From 36af0edec7324206af7d45c04e1e09ebb1730882 Mon Sep 17 00:00:00 2001 From: jillr Date: Mon, 2 Mar 2020 19:25:18 +0000 Subject: [PATCH 001/683] Initial commit --- __init__.py | 0 aws_acm.py | 397 +++ aws_acm_facts.py | 1 + aws_acm_info.py | 297 +++ aws_api_gateway.py | 369 +++ aws_application_scaling_policy.py | 540 ++++ aws_batch_compute_environment.py | 490 ++++ aws_batch_job_definition.py | 459 ++++ aws_batch_job_queue.py | 316 +++ aws_codebuild.py | 408 +++ aws_codecommit.py | 247 ++ aws_codepipeline.py | 320 +++ aws_config_aggregation_authorization.py | 163 ++ aws_config_aggregator.py | 232 ++ aws_config_delivery_channel.py | 219 ++ aws_config_recorder.py | 213 ++ aws_config_rule.py | 275 ++ aws_direct_connect_connection.py | 343 +++ aws_direct_connect_gateway.py | 374 +++ aws_direct_connect_link_aggregation_group.py | 470 ++++ aws_direct_connect_virtual_interface.py | 500 ++++ aws_eks_cluster.py | 307 +++ aws_elasticbeanstalk_app.py | 228 ++ aws_glue_connection.py | 337 +++ aws_glue_job.py | 373 +++ aws_inspector_target.py | 248 ++ aws_kms.py | 1063 ++++++++ aws_kms_facts.py | 1 + aws_kms_info.py | 433 ++++ aws_region_facts.py | 1 + aws_region_info.py | 96 + aws_s3_bucket_facts.py | 1 + aws_s3_bucket_info.py | 119 + aws_s3_cors.py | 168 ++ aws_secret.py | 404 +++ aws_ses_identity.py | 546 ++++ aws_ses_identity_policy.py | 201 ++ aws_ses_rule_set.py | 254 ++ aws_sgw_facts.py | 1 + aws_sgw_info.py | 361 +++ aws_ssm_parameter_store.py | 261 ++ aws_step_functions_state_machine.py | 232 ++ aws_step_functions_state_machine_execution.py | 197 ++ aws_waf_condition.py | 735 ++++++ aws_waf_facts.py | 1 + aws_waf_info.py | 148 ++ aws_waf_rule.py | 354 +++ aws_waf_web_acl.py | 358 +++ cloudformation_exports_info.py | 87 + cloudformation_stack_set.py | 724 ++++++ cloudfront_distribution.py | 2264 +++++++++++++++++ cloudfront_facts.py | 1 + cloudfront_info.py | 729 ++++++ cloudfront_invalidation.py | 276 ++ cloudfront_origin_access_identity.py | 280 ++ cloudtrail.py | 609 +++++ cloudwatchevent_rule.py | 464 ++++ cloudwatchlogs_log_group.py | 318 +++ cloudwatchlogs_log_group_facts.py | 1 + cloudwatchlogs_log_group_info.py | 132 + cloudwatchlogs_log_group_metric_filter.py | 221 ++ data_pipeline.py | 652 +++++ dms_endpoint.py | 472 ++++ dms_replication_subnet_group.py | 238 ++ dynamodb_table.py | 519 ++++ dynamodb_ttl.py | 174 ++ ec2_ami_copy.py | 223 ++ ec2_asg.py | 1808 +++++++++++++ ec2_asg_facts.py | 1 + ec2_asg_info.py | 414 +++ ec2_asg_lifecycle_hook.py | 253 ++ ec2_customer_gateway.py | 275 ++ ec2_customer_gateway_facts.py | 1 + ec2_customer_gateway_info.py | 137 + ec2_eip.py | 640 +++++ ec2_eip_facts.py | 1 + ec2_eip_info.py | 145 ++ ec2_elb.py | 373 +++ ec2_elb_facts.py | 1 + ec2_elb_info.py | 271 ++ ec2_instance.py | 1803 +++++++++++++ ec2_instance_facts.py | 1 + ec2_instance_info.py | 570 +++++ ec2_launch_template.py | 702 +++++ ec2_lc.py | 704 +++++ ec2_lc_facts.py | 1 + ec2_lc_find.py | 217 ++ ec2_lc_info.py | 237 ++ ec2_metric_alarm.py | 409 +++ ec2_placement_group.py | 209 ++ ec2_placement_group_facts.py | 1 + ec2_placement_group_info.py | 129 + ec2_scaling_policy.py | 193 ++ ec2_snapshot_copy.py | 200 ++ ec2_transit_gateway.py | 578 +++++ ec2_transit_gateway_info.py | 267 ++ ec2_vpc_egress_igw.py | 191 ++ ec2_vpc_endpoint.py | 400 +++ ec2_vpc_endpoint_facts.py | 1 + ec2_vpc_endpoint_info.py | 200 ++ ec2_vpc_igw.py | 282 ++ ec2_vpc_igw_facts.py | 1 + ec2_vpc_igw_info.py | 159 ++ ec2_vpc_nacl.py | 633 +++++ ec2_vpc_nacl_facts.py | 1 + ec2_vpc_nacl_info.py | 222 ++ ec2_vpc_nat_gateway.py | 1020 ++++++++ ec2_vpc_nat_gateway_facts.py | 1 + ec2_vpc_nat_gateway_info.py | 156 ++ ec2_vpc_peer.py | 447 ++++ ec2_vpc_peering_facts.py | 1 + ec2_vpc_peering_info.py | 149 ++ ec2_vpc_route_table.py | 747 ++++++ ec2_vpc_route_table_facts.py | 1 + ec2_vpc_route_table_info.py | 134 + ec2_vpc_vgw.py | 580 +++++ ec2_vpc_vgw_facts.py | 1 + ec2_vpc_vgw_info.py | 165 ++ ec2_vpc_vpn.py | 780 ++++++ ec2_vpc_vpn_facts.py | 1 + ec2_vpc_vpn_info.py | 218 ++ ec2_win_password.py | 204 ++ ecs_attribute.py | 311 +++ ecs_cluster.py | 233 ++ ecs_ecr.py | 528 ++++ ecs_service.py | 839 ++++++ ecs_service_facts.py | 1 + ecs_service_info.py | 257 ++ ecs_tag.py | 224 ++ ecs_task.py | 447 ++++ ecs_taskdefinition.py | 521 ++++ ecs_taskdefinition_facts.py | 1 + ecs_taskdefinition_info.py | 334 +++ efs.py | 753 ++++++ efs_facts.py | 1 + efs_info.py | 401 +++ elasticache.py | 559 ++++ elasticache_facts.py | 1 + elasticache_info.py | 310 +++ elasticache_parameter_group.py | 356 +++ elasticache_snapshot.py | 233 ++ elasticache_subnet_group.py | 149 ++ elb_application_lb.py | 655 +++++ elb_application_lb_facts.py | 1 + elb_application_lb_info.py | 292 +++ elb_classic_lb.py | 1349 ++++++++++ elb_classic_lb_facts.py | 1 + elb_classic_lb_info.py | 217 ++ elb_instance.py | 374 +++ elb_network_lb.py | 469 ++++ elb_target.py | 354 +++ elb_target_facts.py | 1 + elb_target_group.py | 857 +++++++ elb_target_group_facts.py | 1 + elb_target_group_info.py | 328 +++ elb_target_info.py | 439 ++++ execute_lambda.py | 286 +++ iam.py | 871 +++++++ iam_cert.py | 315 +++ iam_cert_facts.py | 1 + iam_group.py | 439 ++++ iam_managed_policy.py | 384 +++ iam_mfa_device_facts.py | 1 + iam_mfa_device_info.py | 117 + iam_password_policy.py | 216 ++ iam_policy.py | 346 +++ iam_policy_info.py | 219 ++ iam_role.py | 665 +++++ iam_role_facts.py | 1 + iam_role_info.py | 258 ++ iam_saml_federation.py | 249 ++ iam_server_certificate_facts.py | 1 + iam_server_certificate_info.py | 172 ++ iam_user.py | 370 +++ iam_user_info.py | 185 ++ kinesis_stream.py | 1425 +++++++++++ lambda.py | 624 +++++ lambda_alias.py | 389 +++ lambda_event.py | 448 ++++ lambda_facts.py | 389 +++ lambda_info.py | 380 +++ lambda_policy.py | 439 ++++ lightsail.py | 340 +++ rds.py | 1400 ++++++++++ rds_instance.py | 1225 +++++++++ rds_instance_facts.py | 1 + rds_instance_info.py | 407 +++ rds_param_group.py | 354 +++ rds_snapshot.py | 352 +++ rds_snapshot_facts.py | 1 + rds_snapshot_info.py | 396 +++ rds_subnet_group.py | 202 ++ redshift.py | 623 +++++ redshift_cross_region_snapshots.py | 205 ++ redshift_facts.py | 1 + redshift_info.py | 354 +++ redshift_subnet_group.py | 182 ++ route53.py | 709 ++++++ route53_facts.py | 1 + route53_health_check.py | 375 +++ route53_info.py | 499 ++++ route53_zone.py | 440 ++++ s3_bucket_notification.py | 265 ++ s3_lifecycle.py | 514 ++++ s3_logging.py | 178 ++ s3_sync.py | 565 ++++ s3_website.py | 335 +++ sns.py | 234 ++ sns_topic.py | 529 ++++ sqs_queue.py | 473 ++++ sts_assume_role.py | 180 ++ sts_session_token.py | 158 ++ 212 files changed, 72166 insertions(+) create mode 100644 __init__.py create mode 100644 aws_acm.py create mode 120000 aws_acm_facts.py create mode 100644 aws_acm_info.py create mode 100644 aws_api_gateway.py create mode 100644 aws_application_scaling_policy.py create mode 100644 aws_batch_compute_environment.py create mode 100644 aws_batch_job_definition.py create mode 100644 aws_batch_job_queue.py create mode 100644 aws_codebuild.py create mode 100644 aws_codecommit.py create mode 100644 aws_codepipeline.py create mode 100644 aws_config_aggregation_authorization.py create mode 100644 aws_config_aggregator.py create mode 100644 aws_config_delivery_channel.py create mode 100644 aws_config_recorder.py create mode 100644 aws_config_rule.py create mode 100644 aws_direct_connect_connection.py create mode 100644 aws_direct_connect_gateway.py create mode 100644 aws_direct_connect_link_aggregation_group.py create mode 100644 aws_direct_connect_virtual_interface.py create mode 100644 aws_eks_cluster.py create mode 100644 aws_elasticbeanstalk_app.py create mode 100644 aws_glue_connection.py create mode 100644 aws_glue_job.py create mode 100644 aws_inspector_target.py create mode 100644 aws_kms.py create mode 120000 aws_kms_facts.py create mode 100644 aws_kms_info.py create mode 120000 aws_region_facts.py create mode 100644 aws_region_info.py create mode 120000 aws_s3_bucket_facts.py create mode 100644 aws_s3_bucket_info.py create mode 100644 aws_s3_cors.py create mode 100644 aws_secret.py create mode 100644 aws_ses_identity.py create mode 100644 aws_ses_identity_policy.py create mode 100644 aws_ses_rule_set.py create mode 120000 aws_sgw_facts.py create mode 100644 aws_sgw_info.py create mode 100644 aws_ssm_parameter_store.py create mode 100644 aws_step_functions_state_machine.py create mode 100644 aws_step_functions_state_machine_execution.py create mode 100644 aws_waf_condition.py create mode 120000 aws_waf_facts.py create mode 100644 aws_waf_info.py create mode 100644 aws_waf_rule.py create mode 100644 aws_waf_web_acl.py create mode 100644 cloudformation_exports_info.py create mode 100644 cloudformation_stack_set.py create mode 100644 cloudfront_distribution.py create mode 120000 cloudfront_facts.py create mode 100644 cloudfront_info.py create mode 100644 cloudfront_invalidation.py create mode 100644 cloudfront_origin_access_identity.py create mode 100644 cloudtrail.py create mode 100644 cloudwatchevent_rule.py create mode 100644 cloudwatchlogs_log_group.py create mode 120000 cloudwatchlogs_log_group_facts.py create mode 100644 cloudwatchlogs_log_group_info.py create mode 100644 cloudwatchlogs_log_group_metric_filter.py create mode 100644 data_pipeline.py create mode 100644 dms_endpoint.py create mode 100644 dms_replication_subnet_group.py create mode 100644 dynamodb_table.py create mode 100644 dynamodb_ttl.py create mode 100644 ec2_ami_copy.py create mode 100644 ec2_asg.py create mode 120000 ec2_asg_facts.py create mode 100644 ec2_asg_info.py create mode 100644 ec2_asg_lifecycle_hook.py create mode 100644 ec2_customer_gateway.py create mode 120000 ec2_customer_gateway_facts.py create mode 100644 ec2_customer_gateway_info.py create mode 100644 ec2_eip.py create mode 120000 ec2_eip_facts.py create mode 100644 ec2_eip_info.py create mode 100644 ec2_elb.py create mode 120000 ec2_elb_facts.py create mode 100644 ec2_elb_info.py create mode 100644 ec2_instance.py create mode 120000 ec2_instance_facts.py create mode 100644 ec2_instance_info.py create mode 100644 ec2_launch_template.py create mode 100644 ec2_lc.py create mode 120000 ec2_lc_facts.py create mode 100644 ec2_lc_find.py create mode 100644 ec2_lc_info.py create mode 100644 ec2_metric_alarm.py create mode 100644 ec2_placement_group.py create mode 120000 ec2_placement_group_facts.py create mode 100644 ec2_placement_group_info.py create mode 100644 ec2_scaling_policy.py create mode 100644 ec2_snapshot_copy.py create mode 100644 ec2_transit_gateway.py create mode 100644 ec2_transit_gateway_info.py create mode 100644 ec2_vpc_egress_igw.py create mode 100644 ec2_vpc_endpoint.py create mode 120000 ec2_vpc_endpoint_facts.py create mode 100644 ec2_vpc_endpoint_info.py create mode 100644 ec2_vpc_igw.py create mode 120000 ec2_vpc_igw_facts.py create mode 100644 ec2_vpc_igw_info.py create mode 100644 ec2_vpc_nacl.py create mode 120000 ec2_vpc_nacl_facts.py create mode 100644 ec2_vpc_nacl_info.py create mode 100644 ec2_vpc_nat_gateway.py create mode 120000 ec2_vpc_nat_gateway_facts.py create mode 100644 ec2_vpc_nat_gateway_info.py create mode 100644 ec2_vpc_peer.py create mode 120000 ec2_vpc_peering_facts.py create mode 100644 ec2_vpc_peering_info.py create mode 100644 ec2_vpc_route_table.py create mode 120000 ec2_vpc_route_table_facts.py create mode 100644 ec2_vpc_route_table_info.py create mode 100644 ec2_vpc_vgw.py create mode 120000 ec2_vpc_vgw_facts.py create mode 100644 ec2_vpc_vgw_info.py create mode 100644 ec2_vpc_vpn.py create mode 120000 ec2_vpc_vpn_facts.py create mode 100644 ec2_vpc_vpn_info.py create mode 100644 ec2_win_password.py create mode 100644 ecs_attribute.py create mode 100644 ecs_cluster.py create mode 100644 ecs_ecr.py create mode 100644 ecs_service.py create mode 120000 ecs_service_facts.py create mode 100644 ecs_service_info.py create mode 100644 ecs_tag.py create mode 100644 ecs_task.py create mode 100644 ecs_taskdefinition.py create mode 120000 ecs_taskdefinition_facts.py create mode 100644 ecs_taskdefinition_info.py create mode 100644 efs.py create mode 120000 efs_facts.py create mode 100644 efs_info.py create mode 100644 elasticache.py create mode 120000 elasticache_facts.py create mode 100644 elasticache_info.py create mode 100644 elasticache_parameter_group.py create mode 100644 elasticache_snapshot.py create mode 100644 elasticache_subnet_group.py create mode 100644 elb_application_lb.py create mode 120000 elb_application_lb_facts.py create mode 100644 elb_application_lb_info.py create mode 100644 elb_classic_lb.py create mode 120000 elb_classic_lb_facts.py create mode 100644 elb_classic_lb_info.py create mode 100644 elb_instance.py create mode 100644 elb_network_lb.py create mode 100644 elb_target.py create mode 120000 elb_target_facts.py create mode 100644 elb_target_group.py create mode 120000 elb_target_group_facts.py create mode 100644 elb_target_group_info.py create mode 100644 elb_target_info.py create mode 100644 execute_lambda.py create mode 100644 iam.py create mode 100644 iam_cert.py create mode 120000 iam_cert_facts.py create mode 100644 iam_group.py create mode 100644 iam_managed_policy.py create mode 120000 iam_mfa_device_facts.py create mode 100644 iam_mfa_device_info.py create mode 100644 iam_password_policy.py create mode 100644 iam_policy.py create mode 100644 iam_policy_info.py create mode 100644 iam_role.py create mode 120000 iam_role_facts.py create mode 100644 iam_role_info.py create mode 100644 iam_saml_federation.py create mode 120000 iam_server_certificate_facts.py create mode 100644 iam_server_certificate_info.py create mode 100644 iam_user.py create mode 100644 iam_user_info.py create mode 100644 kinesis_stream.py create mode 100644 lambda.py create mode 100644 lambda_alias.py create mode 100644 lambda_event.py create mode 100644 lambda_facts.py create mode 100644 lambda_info.py create mode 100644 lambda_policy.py create mode 100644 lightsail.py create mode 100644 rds.py create mode 100644 rds_instance.py create mode 120000 rds_instance_facts.py create mode 100644 rds_instance_info.py create mode 100644 rds_param_group.py create mode 100644 rds_snapshot.py create mode 120000 rds_snapshot_facts.py create mode 100644 rds_snapshot_info.py create mode 100644 rds_subnet_group.py create mode 100644 redshift.py create mode 100644 redshift_cross_region_snapshots.py create mode 120000 redshift_facts.py create mode 100644 redshift_info.py create mode 100644 redshift_subnet_group.py create mode 100644 route53.py create mode 120000 route53_facts.py create mode 100644 route53_health_check.py create mode 100644 route53_info.py create mode 100644 route53_zone.py create mode 100644 s3_bucket_notification.py create mode 100644 s3_lifecycle.py create mode 100644 s3_logging.py create mode 100644 s3_sync.py create mode 100644 s3_website.py create mode 100644 sns.py create mode 100644 sns_topic.py create mode 100644 sqs_queue.py create mode 100644 sts_assume_role.py create mode 100644 sts_session_token.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/aws_acm.py b/aws_acm.py new file mode 100644 index 00000000000..421c9e6ad76 --- /dev/null +++ b/aws_acm.py @@ -0,0 +1,397 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# +# Author: +# - Matthew Davis +# on behalf of Telstra Corporation Limited + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: aws_acm +short_description: Upload and delete certificates in the AWS Certificate Manager service +description: + - Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM). + - > + This module does not currently interact with AWS-provided certificates. + It currently only manages certificates provided to AWS by the user. + - The ACM API allows users to upload multiple certificates for the same domain name, + and even multiple identical certificates. + This module attempts to restrict such freedoms, to be idempotent, as per the Ansible philosophy. + It does this through applying AWS resource "Name" tags to ACM certificates. + - > + When I(state=present), + if there is one certificate in ACM + with a C(Name) tag equal to the C(name_tag) parameter, + and an identical body and chain, + this task will succeed without effect. + - > + When I(state=present), + if there is one certificate in ACM + a I(Name) tag equal to the I(name_tag) parameter, + and a different body, + this task will overwrite that certificate. + - > + When I(state=present), + if there are multiple certificates in ACM + with a I(Name) tag equal to the I(name_tag) parameter, + this task will fail. + - > + When I(state=absent) and I(certificate_arn) is defined, + this module will delete the ACM resource with that ARN if it exists in this region, + and succeed without effect if it doesn't exist. + - > + When I(state=absent) and I(domain_name) is defined, + this module will delete all ACM resources in this AWS region with a corresponding domain name. + If there are none, it will succeed without effect. + - > + When I(state=absent) and I(certificate_arn) is not defined, + and I(domain_name) is not defined, + this module will delete all ACM resources in this AWS region with a corresponding I(Name) tag. + If there are none, it will succeed without effect. + - Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API. +options: + certificate: + description: + - The body of the PEM encoded public certificate. + - Required when I(state) is not C(absent). + - If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')). + type: str + + certificate_arn: + description: + - The ARN of a certificate in ACM to delete + - Ignored when I(state=present). + - If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag). + - > + If I(state=absent) and no resource exists with this ARN in this region, + the task will succeed with no effect. + - > + If I(state=absent) and the corresponding resource exists in a different region, + this task may report success without deleting that resource. + type: str + aliases: [arn] + + certificate_chain: + description: + - The body of the PEM encoded chain for your certificate. + - If your certificate chain is in a file, use C(lookup('file', 'path/to/chain.pem')). + - Ignored when I(state=absent) + type: str + + domain_name: + description: + - The domain name of the certificate. + - > + If I(state=absent) and I(domain_name) is specified, + this task will delete all ACM certificates with this domain. + - Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) must be provided. + - > + If I(state=present) this must not be specified. + (Since the domain name is encoded within the public certificate's body.) + type: str + aliases: [domain] + + name_tag: + description: + - The unique identifier for tagging resources using AWS tags, with key I(Name). + - This can be any set of characters accepted by AWS for tag values. + - > + This is to ensure Ansible can treat certificates idempotently, + even though the ACM API allows duplicate certificates. + - If I(state=preset), this must be specified. + - > + If I(state=absent), you must provide exactly one of + I(certificate_arn), I(domain_name) or I(name_tag). + type: str + aliases: [name] + + private_key: + description: + - The body of the PEM encoded private key. + - Required when I(state=present). + - Ignored when I(state=absent). + - If your private key is in a file, use C(lookup('file', 'path/to/key.pem')). + type: str + + state: + description: + - > + If I(state=present), the specified public certificate and private key + will be uploaded, with I(Name) tag equal to I(name_tag). + - > + If I(state=absent), any certificates in this region + with a corresponding I(domain_name), I(name_tag) or I(certificate_arn) + will be deleted. + choices: [present, absent] + default: present + type: str +requirements: + - boto3 +author: + - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' + +- name: upload a self-signed certificate + aws_acm: + certificate: "{{ lookup('file', 'cert.pem' ) }}" + privateKey: "{{ lookup('file', 'key.pem' ) }}" + name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert" + region: ap-southeast-2 # AWS region + +- name: create/update a certificate with a chain + aws_acm: + certificate: "{{ lookup('file', 'cert.pem' ) }}" + privateKey: "{{ lookup('file', 'key.pem' ) }}" + name_tag: my_cert + certificate_chain: "{{ lookup('file', 'chain.pem' ) }}" + state: present + region: ap-southeast-2 + register: cert_create + +- name: print ARN of cert we just created + debug: + var: cert_create.certificate.arn + +- name: delete the cert we just created + aws_acm: + name_tag: my_cert + state: absent + region: ap-southeast-2 + +- name: delete a certificate with a particular ARN + aws_acm: + certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" + state: absent + region: ap-southeast-2 + +- name: delete all certificates with a particular domain name + aws_acm: + domain_name: acm.ansible.com + state: absent + region: ap-southeast-2 + +''' + +RETURN = ''' +certificate: + description: Information about the certificate which was uploaded + type: complex + returned: when I(state=present) + contains: + arn: + description: The ARN of the certificate in ACM + type: str + returned: when I(state=present) + sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" + domain_name: + description: The domain name encoded within the public certificate + type: str + returned: when I(state=present) + sample: acm.ansible.com +arns: + description: A list of the ARNs of the certificates in ACM which were deleted + type: list + elements: str + returned: when I(state=absent) + sample: + - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" +''' + + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.acm import ACMServiceManager +from ansible.module_utils._text import to_text +import base64 +import re # regex library + + +# Takes in two text arguments +# Each a PEM encoded certificate +# Or a chain of PEM encoded certificates +# May include some lines between each chain in the cert, e.g. "Subject: ..." +# Returns True iff the chains/certs are functionally identical (including chain order) +def chain_compare(module, a, b): + + chain_a_pem = pem_chain_split(module, a) + chain_b_pem = pem_chain_split(module, b) + + if len(chain_a_pem) != len(chain_b_pem): + return False + + # Chain length is the same + for (ca, cb) in zip(chain_a_pem, chain_b_pem): + der_a = PEM_body_to_DER(module, ca) + der_b = PEM_body_to_DER(module, cb) + if der_a != der_b: + return False + + return True + + +# Takes in PEM encoded data with no headers +# returns equivilent DER as byte array +def PEM_body_to_DER(module, pem): + try: + der = base64.b64decode(to_text(pem)) + except (ValueError, TypeError) as e: + module.fail_json_aws(e, msg="Unable to decode certificate chain") + return der + + +# Store this globally to avoid repeated recompilation +pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?") + + +# Use regex to split up a chain or single cert into an array of base64 encoded data +# Using "-----BEGIN CERTIFICATE-----" and "----END CERTIFICATE----" +# Noting that some chains have non-pem data in between each cert +# This function returns only what's between the headers, excluding the headers +def pem_chain_split(module, pem): + + pem_arr = re.findall(pem_chain_split_regex, to_text(pem)) + + if len(pem_arr) == 0: + # This happens if the regex doesn't match at all + module.fail_json(msg="Unable to split certificate chain. Possibly zero-length chain?") + + return pem_arr + + +def main(): + argument_spec = dict( + certificate=dict(), + certificate_arn=dict(aliases=['arn']), + certificate_chain=dict(), + domain_name=dict(aliases=['domain']), + name_tag=dict(aliases=['name']), + private_key=dict(no_log=True), + state=dict(default='present', choices=['present', 'absent']) + ) + required_if = [ + ['state', 'present', ['certificate', 'name_tag', 'private_key']], + ] + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + acm = ACMServiceManager(module) + + # Check argument requirements + if module.params['state'] == 'present': + if module.params['certificate_arn']: + module.fail_json(msg="Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'") + else: # absent + # exactly one of these should be specified + absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + if sum([(module.params[a] is not None) for a in absent_args]) != 1: + for a in absent_args: + module.debug("%s is %s" % (a, module.params[a])) + module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified") + + if module.params['name_tag']: + tags = dict(Name=module.params['name_tag']) + else: + tags = None + + client = module.client('acm') + + # fetch the list of certificates currently in ACM + certificates = acm.get_certificates(client=client, + module=module, + domain_name=module.params['domain_name'], + arn=module.params['certificate_arn'], + only_tags=tags) + + module.debug("Found %d corresponding certificates in ACM" % len(certificates)) + + if module.params['state'] == 'present': + if len(certificates) > 1: + msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag'] + module.fail_json(msg=msg, certificates=certificates) + elif len(certificates) == 1: + # update the existing certificate + module.debug("Existing certificate found in ACM") + old_cert = certificates[0] # existing cert in ACM + if ('tags' not in old_cert) or ('Name' not in old_cert['tags']) or (old_cert['tags']['Name'] != module.params['name_tag']): + # shouldn't happen + module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) + + if 'certificate' not in old_cert: + # shouldn't happen + module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) + + # Are the existing certificate in ACM and the local certificate the same? + same = True + same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) + if module.params['certificate_chain']: + # Need to test this + # not sure if Amazon appends the cert itself to the chain when self-signed + same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) + else: + # When there is no chain with a cert + # it seems Amazon returns the cert itself as the chain + same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) + + if same: + module.debug("Existing certificate in ACM is the same, doing nothing") + domain = acm.get_domain_of_cert(client=client, module=module, arn=old_cert['certificate_arn']) + module.exit_json(certificate=dict(domain_name=domain, arn=old_cert['certificate_arn']), changed=False) + else: + module.debug("Existing certificate in ACM is different, overwriting") + + # update cert in ACM + arn = acm.import_certificate(client, module, + certificate=module.params['certificate'], + private_key=module.params['private_key'], + certificate_chain=module.params['certificate_chain'], + arn=old_cert['certificate_arn'], + tags=tags) + domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) + module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) + else: # len(certificates) == 0 + module.debug("No certificate in ACM. Creating new one.") + arn = acm.import_certificate(client=client, + module=module, + certificate=module.params['certificate'], + private_key=module.params['private_key'], + certificate_chain=module.params['certificate_chain'], + tags=tags) + domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) + + module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) + + else: # state == absent + for cert in certificates: + acm.delete_certificate(client, module, cert['certificate_arn']) + module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], + changed=(len(certificates) > 0)) + + +if __name__ == '__main__': + # tests() + main() diff --git a/aws_acm_facts.py b/aws_acm_facts.py new file mode 120000 index 00000000000..42dbcf0df95 --- /dev/null +++ b/aws_acm_facts.py @@ -0,0 +1 @@ +aws_acm_info.py \ No newline at end of file diff --git a/aws_acm_info.py b/aws_acm_info.py new file mode 100644 index 00000000000..31c4ddef370 --- /dev/null +++ b/aws_acm_info.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: aws_acm_info +short_description: Retrieve certificate information from AWS Certificate Manager service +description: + - Retrieve information for ACM certificates + - This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change. + - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API. +options: + certificate_arn: + description: + - If provided, the results will be filtered to show only the certificate with this ARN. + - If no certificate with this ARN exists, this task will fail. + - If a certificate with this ARN exists in a different region, this task will fail + aliases: + - arn + type: str + domain_name: + description: + - The domain name of an ACM certificate to limit the search to + aliases: + - name + type: str + statuses: + description: + - Status to filter the certificate results + choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] + type: list + elements: str + tags: + description: + - Filter results to show only certificates with tags that match all the tags specified here. + type: dict +requirements: + - boto3 +author: + - Will Thames (@willthames) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: obtain all ACM certificates + aws_acm_info: + +- name: obtain all information for a single ACM certificate + aws_acm_info: + domain_name: "*.example_com" + +- name: obtain all certificates pending validation + aws_acm_info: + statuses: + - PENDING_VALIDATION + +- name: obtain all certificates with tag Name=foo and myTag=bar + aws_acm_info: + tags: + Name: foo + myTag: bar + + +# The output is still a list of certificates, just one item long. +- name: obtain information about a certificate with a particular ARN + aws_acm_info: + certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" + +''' + +RETURN = ''' +certificates: + description: A list of certificates + returned: always + type: complex + contains: + certificate: + description: The ACM Certificate body + returned: when certificate creation is complete + sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n' + type: str + certificate_arn: + description: Certificate ARN + returned: always + sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc + type: str + certificate_chain: + description: Full certificate chain for the certificate + returned: when certificate creation is complete + sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...' + type: str + created_at: + description: Date certificate was created + returned: always + sample: '2017-08-15T10:31:19+10:00' + type: str + domain_name: + description: Domain name for the certificate + returned: always + sample: '*.example.com' + type: str + domain_validation_options: + description: Options used by ACM to validate the certificate + returned: when certificate type is AMAZON_ISSUED + type: complex + contains: + domain_name: + description: Fully qualified domain name of the certificate + returned: always + sample: example.com + type: str + validation_domain: + description: The domain name ACM used to send validation emails + returned: always + sample: example.com + type: str + validation_emails: + description: A list of email addresses that ACM used to send domain validation emails + returned: always + sample: + - admin@example.com + - postmaster@example.com + type: list + elements: str + validation_status: + description: Validation status of the domain + returned: always + sample: SUCCESS + type: str + failure_reason: + description: Reason certificate request failed + returned: only when certificate issuing failed + type: str + sample: NO_AVAILABLE_CONTACTS + in_use_by: + description: A list of ARNs for the AWS resources that are using the certificate. + returned: always + sample: [] + type: list + elements: str + issued_at: + description: Date certificate was issued + returned: always + sample: '2017-01-01T00:00:00+10:00' + type: str + issuer: + description: Issuer of the certificate + returned: always + sample: Amazon + type: str + key_algorithm: + description: Algorithm used to generate the certificate + returned: always + sample: RSA-2048 + type: str + not_after: + description: Date after which the certificate is not valid + returned: always + sample: '2019-01-01T00:00:00+10:00' + type: str + not_before: + description: Date before which the certificate is not valid + returned: always + sample: '2017-01-01T00:00:00+10:00' + type: str + renewal_summary: + description: Information about managed renewal process + returned: when certificate is issued by Amazon and a renewal has been started + type: complex + contains: + domain_validation_options: + description: Options used by ACM to validate the certificate + returned: when certificate type is AMAZON_ISSUED + type: complex + contains: + domain_name: + description: Fully qualified domain name of the certificate + returned: always + sample: example.com + type: str + validation_domain: + description: The domain name ACM used to send validation emails + returned: always + sample: example.com + type: str + validation_emails: + description: A list of email addresses that ACM used to send domain validation emails + returned: always + sample: + - admin@example.com + - postmaster@example.com + type: list + elements: str + validation_status: + description: Validation status of the domain + returned: always + sample: SUCCESS + type: str + renewal_status: + description: Status of the domain renewal + returned: always + sample: PENDING_AUTO_RENEWAL + type: str + revocation_reason: + description: Reason for certificate revocation + returned: when the certificate has been revoked + sample: SUPERCEDED + type: str + revoked_at: + description: Date certificate was revoked + returned: when the certificate has been revoked + sample: '2017-09-01T10:00:00+10:00' + type: str + serial: + description: The serial number of the certificate + returned: always + sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f + type: str + signature_algorithm: + description: Algorithm used to sign the certificate + returned: always + sample: SHA256WITHRSA + type: str + status: + description: Status of the certificate in ACM + returned: always + sample: ISSUED + type: str + subject: + description: The name of the entity that is associated with the public key contained in the certificate + returned: always + sample: CN=*.example.com + type: str + subject_alternative_names: + description: Subject Alternative Names for the certificate + returned: always + sample: + - '*.example.com' + type: list + elements: str + tags: + description: Tags associated with the certificate + returned: always + type: dict + sample: + Application: helloworld + Environment: test + type: + description: The source of the certificate + returned: always + sample: AMAZON_ISSUED + type: str +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.acm import ACMServiceManager + + +def main(): + argument_spec = dict( + certificate_arn=dict(aliases=['arn']), + domain_name=dict(aliases=['name']), + statuses=dict(type='list', choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']), + tags=dict(type='dict'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + acm_info = ACMServiceManager(module) + + if module._name == 'aws_acm_facts': + module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", version='2.13') + + client = module.client('acm') + + certificates = acm_info.get_certificates(client, module, + domain_name=module.params['domain_name'], + statuses=module.params['statuses'], + arn=module.params['certificate_arn'], + only_tags=module.params['tags']) + + if module.params['certificate_arn'] and len(certificates) != 1: + module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn']) + + module.exit_json(certificates=certificates) + + +if __name__ == '__main__': + main() diff --git a/aws_api_gateway.py b/aws_api_gateway.py new file mode 100644 index 00000000000..86cfbf129e0 --- /dev/null +++ b/aws_api_gateway.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_api_gateway +short_description: Manage AWS API Gateway APIs +description: + - Allows for the management of API Gateway APIs + - Normally you should give the api_id since there is no other + stable guaranteed unique identifier for the API. If you do + not give api_id then a new API will be create each time + this is run. + - Beware that there are very hard limits on the rate that + you can call API Gateway's REST API. You may need to patch + your boto. See U(https://github.com/boto/boto3/issues/876) + and discuss with your AWS rep. + - swagger_file and swagger_text are passed directly on to AWS + transparently whilst swagger_dict is an ansible dict which is + converted to JSON before the API definitions are uploaded. +requirements: [ boto3 ] +options: + api_id: + description: + - The ID of the API you want to manage. + type: str + state: + description: Create or delete API Gateway. + default: present + choices: [ 'present', 'absent' ] + type: str + swagger_file: + description: + - JSON or YAML file containing swagger definitions for API. + Exactly one of swagger_file, swagger_text or swagger_dict must + be present. + type: path + aliases: ['src', 'api_file'] + swagger_text: + description: + - Swagger definitions for API in JSON or YAML as a string direct + from playbook. + type: str + swagger_dict: + description: + - Swagger definitions API ansible dictionary which will be + converted to JSON and uploaded. + type: json + stage: + description: + - The name of the stage the API should be deployed to. + type: str + deploy_desc: + description: + - Description of the deployment - recorded and visible in the + AWS console. + default: Automatic deployment by Ansible. + type: str + cache_enabled: + description: + - Enable API GW caching of backend responses. Defaults to false. + type: bool + default: false + cache_size: + description: + - Size in GB of the API GW cache, becomes effective when cache_enabled is true. + choices: ['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237'] + type: str + default: '0.5' + stage_variables: + description: + - ENV variables for the stage. Define a dict of key values pairs for variables. + type: dict + stage_canary_settings: + description: + - Canary settings for the deployment of the stage. + - 'Dict with following settings:' + - 'percentTraffic: The percent (0-100) of traffic diverted to a canary deployment.' + - 'deploymentId: The ID of the canary deployment.' + - 'stageVariableOverrides: Stage variables overridden for a canary release deployment.' + - 'useStageCache: A Boolean flag to indicate whether the canary deployment uses the stage cache or not.' + - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage) + type: dict + tracing_enabled: + description: + - Specifies whether active tracing with X-ray is enabled for the API GW stage. + type: bool + endpoint_type: + description: + - Type of endpoint configuration, use C(EDGE) for an edge optimized API endpoint, + - C(REGIONAL) for just a regional deploy or PRIVATE for a private API. + - This will flag will only be used when creating a new API Gateway setup, not for updates. + choices: ['EDGE', 'REGIONAL', 'PRIVATE'] + type: str + default: EDGE +author: + - 'Michael De La Rue (@mikedlr)' +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +notes: + - A future version of this module will probably use tags or another + ID so that an API can be create only once. + - As an early work around an intermediate version will probably do + the same using a tag embedded in the API name. + +''' + +EXAMPLES = ''' +- name: Setup AWS API Gateway setup on AWS and deploy API definition + aws_api_gateway: + swagger_file: my_api.yml + stage: production + cache_enabled: true + cache_size: '1.6' + tracing_enabled: true + endpoint_type: EDGE + state: present + +- name: Update API definition to deploy new version + aws_api_gateway: + api_id: 'abc123321cba' + swagger_file: my_api.yml + deploy_desc: Make auth fix available. + cache_enabled: true + cache_size: '1.6' + endpoint_type: EDGE + state: present + +- name: Update API definitions and settings and deploy as canary + aws_api_gateway: + api_id: 'abc123321cba' + swagger_file: my_api.yml + cache_enabled: true + cache_size: '6.1' + canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True } + state: present +''' + +RETURN = ''' +api_id: + description: API id of the API endpoint created + returned: success + type: str + sample: '0ln4zq7p86' +configure_response: + description: AWS response from the API configure call + returned: success + type: dict + sample: { api_key_source: "HEADER", created_at: "2020-01-01T11:37:59+00:00", id: "0ln4zq7p86" } +deploy_response: + description: AWS response from the API deploy call + returned: success + type: dict + sample: { created_date: "2020-01-01T11:36:59+00:00", id: "rptv4b", description: "Automatic deployment by Ansible." } +resource_actions: + description: Actions performed against AWS API + returned: always + type: list + sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"] +''' + +import json + +try: + import botocore +except ImportError: + # HAS_BOTOCORE taken care of in AnsibleAWSModule + pass + +import traceback +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict) + + +def main(): + argument_spec = dict( + api_id=dict(type='str', required=False), + state=dict(type='str', default='present', choices=['present', 'absent']), + swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']), + swagger_dict=dict(type='json', default=None), + swagger_text=dict(type='str', default=None), + stage=dict(type='str', default=None), + deploy_desc=dict(type='str', default="Automatic deployment by Ansible."), + cache_enabled=dict(type='bool', default=False), + cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']), + stage_variables=dict(type='dict', default={}), + stage_canary_settings=dict(type='dict', default={}), + tracing_enabled=dict(type='bool', default=False), + endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']) + ) + + mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841 + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + ) + + api_id = module.params.get('api_id') + state = module.params.get('state') # noqa: F841 + swagger_file = module.params.get('swagger_file') + swagger_dict = module.params.get('swagger_dict') + swagger_text = module.params.get('swagger_text') + endpoint_type = module.params.get('endpoint_type') + + client = module.client('apigateway') + + changed = True # for now it will stay that way until we can sometimes avoid change + conf_res = None + dep_res = None + del_res = None + + if state == "present": + if api_id is None: + api_id = create_empty_api(module, client, endpoint_type) + api_data = get_api_definitions(module, swagger_file=swagger_file, + swagger_dict=swagger_dict, swagger_text=swagger_text) + conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) + if state == "absent": + del_res = delete_rest_api(module, client, api_id) + + exit_args = {"changed": changed, "api_id": api_id} + + if conf_res is not None: + exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res) + if dep_res is not None: + exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res) + if del_res is not None: + exit_args['delete_response'] = camel_dict_to_snake_dict(del_res) + + module.exit_json(**exit_args) + + +def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None): + apidata = None + if swagger_file is not None: + try: + with open(swagger_file) as f: + apidata = f.read() + except OSError as e: + msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e)) + module.fail_json(msg=msg, exception=traceback.format_exc()) + if swagger_dict is not None: + apidata = json.dumps(swagger_dict) + if swagger_text is not None: + apidata = swagger_text + + if apidata is None: + module.fail_json(msg='module error - no swagger info provided') + return apidata + + +def create_empty_api(module, client, endpoint_type): + """ + creates a new empty API ready to be configured. The description is + temporarily set to show the API as incomplete but should be + updated when the API is configured. + """ + desc = "Incomplete API creation by ansible aws_api_gateway module" + try: + awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type) + except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: + module.fail_json_aws(e, msg="creating API") + return awsret["id"] + + +def delete_rest_api(module, client, api_id): + """ + Deletes entire REST API setup + """ + try: + delete_response = delete_api(client, api_id) + except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: + module.fail_json_aws(e, msg="deleting API {0}".format(api_id)) + return delete_response + + +def ensure_api_in_correct_state(module, client, api_id, api_data): + """Make sure that we have the API configured and deployed as instructed. + + This function first configures the API correctly uploading the + swagger definitions and then deploys those. Configuration and + deployment should be closely tied because there is only one set of + definitions so if we stop, they may be updated by someone else and + then we deploy the wrong configuration. + """ + + configure_response = None + try: + configure_response = configure_api(client, api_id, api_data=api_data) + except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: + module.fail_json_aws(e, msg="configuring API {0}".format(api_id)) + + deploy_response = None + + stage = module.params.get('stage') + if stage: + try: + deploy_response = create_deployment(client, api_id, **module.params) + except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: + msg = "deploying api {0} to stage {1}".format(api_id, stage) + module.fail_json_aws(e, msg) + + return configure_response, deploy_response + + +retry_params = {"tries": 10, "delay": 5, "backoff": 1.2} + + +@AWSRetry.backoff(**retry_params) +def create_api(client, name=None, description=None, endpoint_type=None): + return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]}) + + +@AWSRetry.backoff(**retry_params) +def delete_api(client, api_id): + return client.delete_rest_api(restApiId=api_id) + + +@AWSRetry.backoff(**retry_params) +def configure_api(client, api_id, api_data=None, mode="overwrite"): + return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data) + + +@AWSRetry.backoff(**retry_params) +def create_deployment(client, rest_api_id, **params): + canary_settings = params.get('stage_canary_settings') + + if canary_settings and len(canary_settings) > 0: + result = client.create_deployment( + restApiId=rest_api_id, + stageName=params.get('stage'), + description=params.get('deploy_desc'), + cacheClusterEnabled=params.get('cache_enabled'), + cacheClusterSize=params.get('cache_size'), + variables=params.get('stage_variables'), + canarySettings=canary_settings, + tracingEnabled=params.get('tracing_enabled') + ) + else: + result = client.create_deployment( + restApiId=rest_api_id, + stageName=params.get('stage'), + description=params.get('deploy_desc'), + cacheClusterEnabled=params.get('cache_enabled'), + cacheClusterSize=params.get('cache_size'), + variables=params.get('stage_variables'), + tracingEnabled=params.get('tracing_enabled') + ) + + return result + + +if __name__ == '__main__': + main() diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py new file mode 100644 index 00000000000..563a94f2c76 --- /dev/null +++ b/aws_application_scaling_policy.py @@ -0,0 +1,540 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_application_scaling_policy +short_description: Manage Application Auto Scaling Scaling Policies +notes: + - for details of the parameters and returns see + U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy) +description: + - Creates, updates or removes a Scaling Policy +author: + - Gustavo Maia (@gurumaia) + - Chen Leibovich (@chenl87) +requirements: [ json, botocore, boto3 ] +options: + state: + description: Whether a policy should be present or absent + required: yes + choices: ['absent', 'present'] + type: str + policy_name: + description: The name of the scaling policy. + required: yes + type: str + service_namespace: + description: The namespace of the AWS service. + required: yes + choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb'] + type: str + resource_id: + description: The identifier of the resource associated with the scalable target. + required: yes + type: str + scalable_dimension: + description: The scalable dimension associated with the scalable target. + required: yes + choices: [ 'ecs:service:DesiredCount', + 'ec2:spot-fleet-request:TargetCapacity', + 'elasticmapreduce:instancegroup:InstanceCount', + 'appstream:fleet:DesiredCapacity', + 'dynamodb:table:ReadCapacityUnits', + 'dynamodb:table:WriteCapacityUnits', + 'dynamodb:index:ReadCapacityUnits', + 'dynamodb:index:WriteCapacityUnits'] + type: str + policy_type: + description: The policy type. + required: yes + choices: ['StepScaling', 'TargetTrackingScaling'] + type: str + step_scaling_policy_configuration: + description: A step scaling policy. This parameter is required if you are creating a policy and the policy type is StepScaling. + required: no + type: dict + target_tracking_scaling_policy_configuration: + description: + - A target tracking policy. This parameter is required if you are creating a new policy and the policy type is TargetTrackingScaling. + - 'Full documentation of the suboptions can be found in the API documentation:' + - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)' + required: no + type: dict + suboptions: + CustomizedMetricSpecification: + description: The metric to use if using a customized metric. + type: dict + DisableScaleIn: + description: Whether scaling-in should be disabled. + type: bool + PredefinedMetricSpecification: + description: The metric to use if using a predefined metric. + type: dict + ScaleInCooldown: + description: The time (in seconds) to wait after scaling-in before another scaling action can occur. + type: int + ScaleOutCooldown: + description: The time (in seconds) to wait after scaling-out before another scaling action can occur. + type: int + TargetValue: + description: The target value for the metric + type: float + minimum_tasks: + description: The minimum value to scale to in response to a scale in event. + This parameter is required if you are creating a first new policy for the specified service. + required: no + type: int + maximum_tasks: + description: The maximum value to scale to in response to a scale out event. + This parameter is required if you are creating a first new policy for the specified service. + required: no + type: int + override_task_capacity: + description: Whether or not to override values of minimum and/or maximum tasks if it's already set. + required: no + default: no + type: bool +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create step scaling policy for ECS Service +- name: scaling_policy + aws_application_scaling_policy: + state: present + policy_name: test_policy + service_namespace: ecs + resource_id: service/poc-pricing/test-as + scalable_dimension: ecs:service:DesiredCount + policy_type: StepScaling + minimum_tasks: 1 + maximum_tasks: 6 + step_scaling_policy_configuration: + AdjustmentType: ChangeInCapacity + StepAdjustments: + - MetricIntervalUpperBound: 123 + ScalingAdjustment: 2 + - MetricIntervalLowerBound: 123 + ScalingAdjustment: -2 + Cooldown: 123 + MetricAggregationType: Average + +# Create target tracking scaling policy for ECS Service +- name: scaling_policy + aws_application_scaling_policy: + state: present + policy_name: test_policy + service_namespace: ecs + resource_id: service/poc-pricing/test-as + scalable_dimension: ecs:service:DesiredCount + policy_type: TargetTrackingScaling + minimum_tasks: 1 + maximum_tasks: 6 + target_tracking_scaling_policy_configuration: + TargetValue: 60 + PredefinedMetricSpecification: + PredefinedMetricType: ECSServiceAverageCPUUtilization + ScaleOutCooldown: 60 + ScaleInCooldown: 60 + +# Remove scalable target for ECS Service +- name: scaling_policy + aws_application_scaling_policy: + state: absent + policy_name: test_policy + policy_type: StepScaling + service_namespace: ecs + resource_id: service/cluster-name/service-name + scalable_dimension: ecs:service:DesiredCount +''' + +RETURN = ''' +alarms: + description: List of the CloudWatch alarms associated with the scaling policy + returned: when state present + type: complex + contains: + alarm_arn: + description: The Amazon Resource Name (ARN) of the alarm + returned: when state present + type: str + alarm_name: + description: The name of the alarm + returned: when state present + type: str +service_namespace: + description: The namespace of the AWS service. + returned: when state present + type: str + sample: ecs +resource_id: + description: The identifier of the resource associated with the scalable target. + returned: when state present + type: str + sample: service/cluster-name/service-name +scalable_dimension: + description: The scalable dimension associated with the scalable target. + returned: when state present + type: str + sample: ecs:service:DesiredCount +policy_arn: + description: The Amazon Resource Name (ARN) of the scaling policy.. + returned: when state present + type: str +policy_name: + description: The name of the scaling policy. + returned: when state present + type: str +policy_type: + description: The policy type. + returned: when state present + type: str +min_capacity: + description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present). + returned: when state present + type: int + sample: 1 +max_capacity: + description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present). + returned: when state present + type: int + sample: 2 +role_arn: + description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present). + returned: when state present + type: str + sample: arn:aws:iam::123456789123:role/roleName +step_scaling_policy_configuration: + description: The step scaling policy. + returned: when state present and the policy type is StepScaling + type: complex + contains: + adjustment_type: + description: The adjustment type + returned: when state present and the policy type is StepScaling + type: str + sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity" + cooldown: + description: The amount of time, in seconds, after a scaling activity completes + where previous trigger-related scaling activities can influence future scaling events + returned: when state present and the policy type is StepScaling + type: int + sample: 60 + metric_aggregation_type: + description: The aggregation type for the CloudWatch metrics + returned: when state present and the policy type is StepScaling + type: str + sample: "Average, Minimum, Maximum" + step_adjustments: + description: A set of adjustments that enable you to scale based on the size of the alarm breach + returned: when state present and the policy type is StepScaling + type: list + elements: dict +target_tracking_scaling_policy_configuration: + description: The target tracking policy. + returned: when state present and the policy type is TargetTrackingScaling + type: complex + contains: + predefined_metric_specification: + description: A predefined metric + returned: when state present and the policy type is TargetTrackingScaling + type: complex + contains: + predefined_metric_type: + description: The metric type + returned: when state present and the policy type is TargetTrackingScaling + type: str + sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization" + resource_label: + description: Identifies the resource associated with the metric type + returned: when metric type is ALBRequestCountPerTarget + type: str + scale_in_cooldown: + description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start + returned: when state present and the policy type is TargetTrackingScaling + type: int + sample: 60 + scale_out_cooldown: + description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start + returned: when state present and the policy type is TargetTrackingScaling + type: int + sample: 60 + target_value: + description: The target value for the metric + returned: when state present and the policy type is TargetTrackingScaling + type: int + sample: 70 +creation_time: + description: The Unix timestamp for when the scalable target was created. + returned: when state present + type: str + sample: '2017-09-28T08:22:51.881000-03:00' +''' # NOQA + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + + +# Merge the results of the scalable target creation and policy deletion/creation +# There's no risk in overriding values since mutual keys have the same values in our case +def merge_results(scalable_target_result, policy_result): + if scalable_target_result['changed'] or policy_result['changed']: + changed = True + else: + changed = False + + merged_response = scalable_target_result['response'].copy() + merged_response.update(policy_result['response']) + + return {"changed": changed, "response": merged_response} + + +def delete_scaling_policy(connection, module): + changed = False + try: + scaling_policy = connection.describe_scaling_policies( + ServiceNamespace=module.params.get('service_namespace'), + ResourceId=module.params.get('resource_id'), + ScalableDimension=module.params.get('scalable_dimension'), + PolicyNames=[module.params.get('policy_name')], + MaxResults=1 + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe scaling policies") + + if scaling_policy['ScalingPolicies']: + try: + connection.delete_scaling_policy( + ServiceNamespace=module.params.get('service_namespace'), + ResourceId=module.params.get('resource_id'), + ScalableDimension=module.params.get('scalable_dimension'), + PolicyName=module.params.get('policy_name'), + ) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete scaling policy") + + return {"changed": changed} + + +def create_scalable_target(connection, module): + changed = False + + try: + scalable_targets = connection.describe_scalable_targets( + ServiceNamespace=module.params.get('service_namespace'), + ResourceIds=[ + module.params.get('resource_id'), + ], + ScalableDimension=module.params.get('scalable_dimension') + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe scalable targets") + + # Scalable target registration will occur if: + # 1. There is no scalable target registered for this service + # 2. A scalable target exists, different min/max values are defined and override is set to "yes" + if ( + not scalable_targets['ScalableTargets'] + or ( + module.params.get('override_task_capacity') + and ( + scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks') + or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks') + ) + ) + ): + changed = True + try: + connection.register_scalable_target( + ServiceNamespace=module.params.get('service_namespace'), + ResourceId=module.params.get('resource_id'), + ScalableDimension=module.params.get('scalable_dimension'), + MinCapacity=module.params.get('minimum_tasks'), + MaxCapacity=module.params.get('maximum_tasks') + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to register scalable target") + + try: + response = connection.describe_scalable_targets( + ServiceNamespace=module.params.get('service_namespace'), + ResourceIds=[ + module.params.get('resource_id'), + ], + ScalableDimension=module.params.get('scalable_dimension') + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe scalable targets") + + if (response['ScalableTargets']): + snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0]) + else: + snaked_response = {} + + return {"changed": changed, "response": snaked_response} + + +def create_scaling_policy(connection, module): + try: + scaling_policy = connection.describe_scaling_policies( + ServiceNamespace=module.params.get('service_namespace'), + ResourceId=module.params.get('resource_id'), + ScalableDimension=module.params.get('scalable_dimension'), + PolicyNames=[module.params.get('policy_name')], + MaxResults=1 + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe scaling policies") + + changed = False + + if scaling_policy['ScalingPolicies']: + scaling_policy = scaling_policy['ScalingPolicies'][0] + # check if the input parameters are equal to what's already configured + for attr in ('PolicyName', + 'ServiceNamespace', + 'ResourceId', + 'ScalableDimension', + 'PolicyType', + 'StepScalingPolicyConfiguration', + 'TargetTrackingScalingPolicyConfiguration'): + if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)): + changed = True + scaling_policy[attr] = module.params.get(_camel_to_snake(attr)) + else: + changed = True + scaling_policy = { + 'PolicyName': module.params.get('policy_name'), + 'ServiceNamespace': module.params.get('service_namespace'), + 'ResourceId': module.params.get('resource_id'), + 'ScalableDimension': module.params.get('scalable_dimension'), + 'PolicyType': module.params.get('policy_type'), + 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'), + 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration') + } + + if changed: + try: + if (module.params.get('step_scaling_policy_configuration')): + connection.put_scaling_policy( + PolicyName=scaling_policy['PolicyName'], + ServiceNamespace=scaling_policy['ServiceNamespace'], + ResourceId=scaling_policy['ResourceId'], + ScalableDimension=scaling_policy['ScalableDimension'], + PolicyType=scaling_policy['PolicyType'], + StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration'] + ) + elif (module.params.get('target_tracking_scaling_policy_configuration')): + connection.put_scaling_policy( + PolicyName=scaling_policy['PolicyName'], + ServiceNamespace=scaling_policy['ServiceNamespace'], + ResourceId=scaling_policy['ResourceId'], + ScalableDimension=scaling_policy['ScalableDimension'], + PolicyType=scaling_policy['PolicyType'], + TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create scaling policy") + + try: + response = connection.describe_scaling_policies( + ServiceNamespace=module.params.get('service_namespace'), + ResourceId=module.params.get('resource_id'), + ScalableDimension=module.params.get('scalable_dimension'), + PolicyNames=[module.params.get('policy_name')], + MaxResults=1 + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe scaling policies") + + if (response['ScalingPolicies']): + snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0]) + else: + snaked_response = {} + + return {"changed": changed, "response": snaked_response} + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + policy_name=dict(type='str', required=True), + service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']), + resource_id=dict(type='str', required=True), + scalable_dimension=dict(type='str', + required=True, + choices=['ecs:service:DesiredCount', + 'ec2:spot-fleet-request:TargetCapacity', + 'elasticmapreduce:instancegroup:InstanceCount', + 'appstream:fleet:DesiredCapacity', + 'dynamodb:table:ReadCapacityUnits', + 'dynamodb:table:WriteCapacityUnits', + 'dynamodb:index:ReadCapacityUnits', + 'dynamodb:index:WriteCapacityUnits']), + policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']), + step_scaling_policy_configuration=dict(type='dict'), + target_tracking_scaling_policy_configuration=dict( + type='dict', + options=dict( + CustomizedMetricSpecification=dict(type='dict'), + DisableScaleIn=dict(type='bool'), + PredefinedMetricSpecification=dict(type='dict'), + ScaleInCooldown=dict(type='int'), + ScaleOutCooldown=dict(type='int'), + TargetValue=dict(type='float'), + ) + ), + minimum_tasks=dict(type='int'), + maximum_tasks=dict(type='int'), + override_task_capacity=dict(type='bool'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client('application-autoscaling') + + # Remove any target_tracking_scaling_policy_configuration suboptions that are None + policy_config_options = [ + 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue' + ] + if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict): + for option in policy_config_options: + if module.params['target_tracking_scaling_policy_configuration'][option] is None: + module.params['target_tracking_scaling_policy_configuration'].pop(option) + + if module.params.get("state") == 'present': + # A scalable target must be registered prior to creating a scaling policy + scalable_target_result = create_scalable_target(connection, module) + policy_result = create_scaling_policy(connection, module) + # Merge the results of the scalable target creation and policy deletion/creation + # There's no risk in overriding values since mutual keys have the same values in our case + merged_result = merge_results(scalable_target_result, policy_result) + module.exit_json(**merged_result) + else: + policy_result = delete_scaling_policy(connection, module) + module.exit_json(**policy_result) + + +if __name__ == '__main__': + main() diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py new file mode 100644 index 00000000000..a41e8249ecd --- /dev/null +++ b/aws_batch_compute_environment.py @@ -0,0 +1,490 @@ +#!/usr/bin/python +# Copyright (c) 2017 Jon Meran +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_batch_compute_environment +short_description: Manage AWS Batch Compute Environments +description: + - This module allows the management of AWS Batch Compute Environments. + It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute + environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. + + +author: Jon Meran (@jonmer85) +options: + compute_environment_name: + description: + - The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores + are allowed. + required: true + type: str + type: + description: + - The type of the compute environment. + required: true + choices: ["MANAGED", "UNMANAGED"] + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + compute_environment_state: + description: + - The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs + from a queue and can scale out automatically based on queues. + default: "ENABLED" + choices: ["ENABLED", "DISABLED"] + type: str + service_role: + description: + - The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS + services on your behalf. + required: true + type: str + compute_resource_type: + description: + - The type of compute resource. + required: true + choices: ["EC2", "SPOT"] + type: str + minv_cpus: + description: + - The minimum number of EC2 vCPUs that an environment should maintain. + required: true + type: int + maxv_cpus: + description: + - The maximum number of EC2 vCPUs that an environment can reach. + required: true + type: int + desiredv_cpus: + description: + - The desired number of EC2 vCPUS in the compute environment. + type: int + instance_types: + description: + - The instance types that may be launched. + required: true + type: list + elements: str + image_id: + description: + - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. + type: str + subnets: + description: + - The VPC subnets into which the compute resources are launched. + required: true + type: list + elements: str + security_group_ids: + description: + - The EC2 security groups that are associated with instances launched in the compute environment. + required: true + type: list + elements: str + ec2_key_pair: + description: + - The EC2 key pair that is used for instances launched in the compute environment. + type: str + instance_role: + description: + - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment. + required: true + type: str + tags: + description: + - Key-value pair tags to be applied to resources that are launched in the compute environment. + type: dict + bid_percentage: + description: + - The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that + instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price + must be below 20% of the current On-Demand price for that EC2 instance. + type: int + spot_iam_fleet_role: + description: + - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. + type: str + +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: no + vars: + state: present + tasks: + - name: My Batch Compute Environment + aws_batch_compute_environment: + compute_environment_name: computeEnvironmentName + state: present + region: us-east-1 + compute_environment_state: ENABLED + type: MANAGED + compute_resource_type: EC2 + minv_cpus: 0 + maxv_cpus: 2 + desiredv_cpus: 1 + instance_types: + - optimal + subnets: + - my-subnet1 + - my-subnet2 + security_group_ids: + - my-sg1 + - my-sg2 + instance_role: arn:aws:iam:::instance-profile/ + tags: + tag1: value1 + tag2: value2 + service_role: arn:aws:iam:::role/service-role/ + register: aws_batch_compute_environment_action + + - name: show results + debug: + var: aws_batch_compute_environment_action +''' + +RETURN = ''' +--- +output: + description: "returns what action was taken, whether something was changed, invocation and response" + returned: always + sample: + batch_compute_environment_action: none + changed: false + invocation: + module_args: + aws_access_key: ~ + aws_secret_key: ~ + bid_percentage: ~ + compute_environment_name: + compute_environment_state: ENABLED + compute_resource_type: EC2 + desiredv_cpus: 0 + ec2_key_pair: ~ + ec2_url: ~ + image_id: ~ + instance_role: "arn:aws:iam::..." + instance_types: + - optimal + maxv_cpus: 8 + minv_cpus: 0 + profile: ~ + region: us-east-1 + security_group_ids: + - "*******" + security_token: ~ + service_role: "arn:aws:iam::...." + spot_iam_fleet_role: ~ + state: present + subnets: + - "******" + tags: + Environment: + Name: + type: MANAGED + validate_certs: true + response: + computeEnvironmentArn: "arn:aws:batch:...." + computeEnvironmentName: + computeResources: + desiredvCpus: 0 + instanceRole: "arn:aws:iam::..." + instanceTypes: + - optimal + maxvCpus: 8 + minvCpus: 0 + securityGroupIds: + - "******" + subnets: + - "*******" + tags: + Environment: + Name: + type: EC2 + ecsClusterArn: "arn:aws:ecs:....." + serviceRole: "arn:aws:iam::..." + state: ENABLED + status: VALID + statusReason: "ComputeEnvironment Healthy" + type: MANAGED + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict +import re + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + + +# --------------------------------------------------------------------------------------------------- +# +# Helper Functions & classes +# +# --------------------------------------------------------------------------------------------------- + +def set_api_params(module, module_params): + """ + Sets module parameters to those expected by the boto3 API. + + :param module: + :param module_params: + :return: + """ + api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None) + return snake_dict_to_camel_dict(api_params) + + +def validate_params(module): + """ + Performs basic parameter validation. + + :param module: + :return: + """ + + compute_environment_name = module.params['compute_environment_name'] + + # validate compute environment name + if not re.search(r'^[\w\_:]+$', compute_environment_name): + module.fail_json( + msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters " + "and underscores.".format(compute_environment_name) + ) + if not compute_environment_name.startswith('arn:aws:batch:'): + if len(compute_environment_name) > 128: + module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit' + .format(compute_environment_name)) + + return + + +# --------------------------------------------------------------------------------------------------- +# +# Batch Compute Environment functions +# +# --------------------------------------------------------------------------------------------------- + +def get_current_compute_environment(module, client): + try: + environments = client.describe_compute_environments( + computeEnvironments=[module.params['compute_environment_name']] + ) + if len(environments['computeEnvironments']) > 0: + return environments['computeEnvironments'][0] + else: + return None + except ClientError: + return None + + +def create_compute_environment(module, client): + """ + Adds a Batch compute environment + + :param module: + :param client: + :return: + """ + + changed = False + + # set API parameters + params = ( + 'compute_environment_name', 'type', 'service_role') + api_params = set_api_params(module, params) + + if module.params['compute_environment_state'] is not None: + api_params['state'] = module.params['compute_environment_state'] + + compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets', + 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage', + 'spot_iam_fleet_role') + compute_resources_params = set_api_params(module, compute_resources_param_list) + + if module.params['compute_resource_type'] is not None: + compute_resources_params['type'] = module.params['compute_resource_type'] + + # if module.params['minv_cpus'] is not None: + # compute_resources_params['minvCpus'] = module.params['minv_cpus'] + + api_params['computeResources'] = compute_resources_params + + try: + if not module.check_mode: + client.create_compute_environment(**api_params) + changed = True + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg='Error creating compute environment') + + return changed + + +def remove_compute_environment(module, client): + """ + Remove a Batch compute environment + + :param module: + :param client: + :return: + """ + + changed = False + + # set API parameters + api_params = {'computeEnvironment': module.params['compute_environment_name']} + + try: + if not module.check_mode: + client.delete_compute_environment(**api_params) + changed = True + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg='Error removing compute environment') + return changed + + +def manage_state(module, client): + changed = False + current_state = 'absent' + state = module.params['state'] + compute_environment_state = module.params['compute_environment_state'] + compute_environment_name = module.params['compute_environment_name'] + service_role = module.params['service_role'] + minv_cpus = module.params['minv_cpus'] + maxv_cpus = module.params['maxv_cpus'] + desiredv_cpus = module.params['desiredv_cpus'] + action_taken = 'none' + update_env_response = '' + + check_mode = module.check_mode + + # check if the compute environment exists + current_compute_environment = get_current_compute_environment(module, client) + response = current_compute_environment + if current_compute_environment: + current_state = 'present' + + if state == 'present': + if current_state == 'present': + updates = False + # Update Batch Compute Environment configuration + compute_kwargs = {'computeEnvironment': compute_environment_name} + + # Update configuration if needed + compute_resources = {} + if compute_environment_state and current_compute_environment['state'] != compute_environment_state: + compute_kwargs.update({'state': compute_environment_state}) + updates = True + if service_role and current_compute_environment['serviceRole'] != service_role: + compute_kwargs.update({'serviceRole': service_role}) + updates = True + if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus: + compute_resources['minvCpus'] = minv_cpus + if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus: + compute_resources['maxvCpus'] = maxv_cpus + if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus: + compute_resources['desiredvCpus'] = desiredv_cpus + if len(compute_resources) > 0: + compute_kwargs['computeResources'] = compute_resources + updates = True + if updates: + try: + if not check_mode: + update_env_response = client.update_compute_environment(**compute_kwargs) + if not update_env_response: + module.fail_json(msg='Unable to get compute environment information after creating') + changed = True + action_taken = "updated" + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update environment.") + + else: + # Create Batch Compute Environment + changed = create_compute_environment(module, client) + # Describe compute environment + action_taken = 'added' + response = get_current_compute_environment(module, client) + if not response: + module.fail_json(msg='Unable to get compute environment information after creating') + else: + if current_state == 'present': + # remove the compute environment + changed = remove_compute_environment(module, client) + action_taken = 'deleted' + return dict(changed=changed, batch_compute_environment_action=action_taken, response=response) + + +# --------------------------------------------------------------------------------------------------- +# +# MAIN +# +# --------------------------------------------------------------------------------------------------- + +def main(): + """ + Main entry point. + + :return dict: changed, batch_compute_environment_action, response + """ + + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + compute_environment_name=dict(required=True), + type=dict(required=True, choices=['MANAGED', 'UNMANAGED']), + compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), + service_role=dict(required=True), + compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']), + minv_cpus=dict(type='int', required=True), + maxv_cpus=dict(type='int', required=True), + desiredv_cpus=dict(type='int'), + instance_types=dict(type='list', required=True), + image_id=dict(), + subnets=dict(type='list', required=True), + security_group_ids=dict(type='list', required=True), + ec2_key_pair=dict(), + instance_role=dict(required=True), + tags=dict(type='dict'), + bid_percentage=dict(type='int'), + spot_iam_fleet_role=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = module.client('batch') + + validate_params(module) + + results = manage_state(module, client) + + module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags'])) + + +if __name__ == '__main__': + main() diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py new file mode 100644 index 00000000000..b443e6199a7 --- /dev/null +++ b/aws_batch_job_definition.py @@ -0,0 +1,459 @@ +#!/usr/bin/python +# Copyright (c) 2017 Jon Meran +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_batch_job_definition +short_description: Manage AWS Batch Job Definitions +description: + - This module allows the management of AWS Batch Job Definitions. + It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute + environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. + + +author: Jon Meran (@jonmer85) +options: + job_definition_arn: + description: + - The ARN for the job definition. + type: str + job_definition_name: + description: + - The name for the job definition. + required: true + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + type: + description: + - The type of job definition. + required: true + type: str + parameters: + description: + - Default parameter substitution placeholders to set in the job definition. Parameters are specified as a + key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from + the job definition. + type: dict + image: + description: + - The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker + Hub registry are available by default. Other repositories are specified with `` repository-url /image tag ``. + Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, + and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker + Remote API and the IMAGE parameter of docker run. + required: true + type: str + vcpus: + description: + - The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container + section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to + 1,024 CPU shares. + required: true + type: int + memory: + description: + - The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory + specified here, the container is killed. This parameter maps to Memory in the Create a container section of the + Docker Remote API and the --memory option to docker run. + required: true + type: int + command: + description: + - The command that is passed to the container. This parameter maps to Cmd in the Create a container section of + the Docker Remote API and the COMMAND parameter to docker run. For more information, + see U(https://docs.docker.com/engine/reference/builder/#cmd). + type: list + elements: str + job_role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. + type: str + volumes: + description: + - A list of data volumes used in a job. + suboptions: + host: + description: + - The contents of the host parameter determine whether your data volume persists on the host container + instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host + path for your data volume, but the data is not guaranteed to persist after the containers associated with + it stop running. + This is a dictionary with one property, sourcePath - The path on the host container + instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned + a host path for you. If the host parameter contains a sourcePath file location, then the data volume + persists at the specified location on the host container instance until you delete it manually. If the + sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the + location does exist, the contents of the source path folder are exported. + name: + description: + - The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are + allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints. + type: list + elements: dict + environment: + description: + - The environment variables to pass to a container. This parameter maps to Env in the Create a container section + of the Docker Remote API and the --env option to docker run. + suboptions: + name: + description: + - The name of the key value pair. For environment variables, this is the name of the environment variable. + value: + description: + - The value of the key value pair. For environment variables, this is the value of the environment variable. + type: list + elements: dict + mount_points: + description: + - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container + section of the Docker Remote API and the --volume option to docker run. + suboptions: + containerPath: + description: + - The path on the container at which to mount the host volume. + readOnly: + description: + - If this value is true , the container has read-only access to the volume; otherwise, the container can write + to the volume. The default value is C(false). + sourceVolume: + description: + - The name of the volume to mount. + type: list + elements: dict + readonly_root_filesystem: + description: + - When this parameter is true, the container is given read-only access to its root file system. This parameter + maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option + to docker run. + type: str + privileged: + description: + - When this parameter is true, the container is given elevated privileges on the host container instance + (similar to the root user). This parameter maps to Privileged in the Create a container section of the + Docker Remote API and the --privileged option to docker run. + type: str + ulimits: + description: + - A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section + of the Docker Remote API and the --ulimit option to docker run. + suboptions: + hardLimit: + description: + - The hard limit for the ulimit type. + name: + description: + - The type of the ulimit. + softLimit: + description: + - The soft limit for the ulimit type. + type: list + elements: dict + user: + description: + - The user name to use inside the container. This parameter maps to User in the Create a container section of + the Docker Remote API and the --user option to docker run. + type: str + attempts: + description: + - Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 + attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that + many times. + type: int +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: no + vars: + state: present + tasks: +- name: My Batch Job Definition + aws_batch_job_definition: + job_definition_name: My Batch Job Definition + state: present + type: container + parameters: + Param1: Val1 + Param2: Val2 + image: + vcpus: 1 + memory: 512 + command: + - python + - run_my_script.py + - arg1 + job_role_arn: + attempts: 3 + register: job_definition_create_result + +- name: show results + debug: var=job_definition_create_result +''' + +RETURN = ''' +--- +output: + description: "returns what action was taken, whether something was changed, invocation and response" + returned: always + sample: + aws_batch_job_definition_action: none + changed: false + response: + job_definition_arn: "arn:aws:batch:...." + job_definition_name: + status: INACTIVE + type: container + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.batch import cc, set_api_params +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + + +# --------------------------------------------------------------------------------------------------- +# +# Helper Functions & classes +# +# --------------------------------------------------------------------------------------------------- + +# logger = logging.getLogger() +# logging.basicConfig(filename='ansible_debug.log') +# logger.setLevel(logging.DEBUG) + + +def validate_params(module, batch_client): + """ + Performs basic parameter validation. + + :param module: + :param batch_client: + :return: + """ + return + + +# --------------------------------------------------------------------------------------------------- +# +# Batch Job Definition functions +# +# --------------------------------------------------------------------------------------------------- + +def get_current_job_definition(module, batch_client): + try: + environments = batch_client.describe_job_definitions( + jobDefinitionName=module.params['job_definition_name'] + ) + if len(environments['jobDefinitions']) > 0: + latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions'])) + latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision), + None) + return latest_definition + return None + except ClientError: + return None + + +def create_job_definition(module, batch_client): + """ + Adds a Batch job definition + + :param module: + :param batch_client: + :return: + """ + + changed = False + + # set API parameters + api_params = set_api_params(module, get_base_params()) + container_properties_params = set_api_params(module, get_container_property_params()) + retry_strategy_params = set_api_params(module, get_retry_strategy_params()) + + api_params['retryStrategy'] = retry_strategy_params + api_params['containerProperties'] = container_properties_params + + try: + if not module.check_mode: + batch_client.register_job_definition(**api_params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Error registering job definition') + + return changed + + +def get_retry_strategy_params(): + return 'attempts', + + +def get_container_property_params(): + return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points', + 'readonly_root_filesystem', 'privileged', 'ulimits', 'user') + + +def get_base_params(): + return 'job_definition_name', 'type', 'parameters' + + +def get_compute_environment_order_list(module): + compute_environment_order_list = [] + for ceo in module.params['compute_environment_order']: + compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + return compute_environment_order_list + + +def remove_job_definition(module, batch_client): + """ + Remove a Batch job definition + + :param module: + :param batch_client: + :return: + """ + + changed = False + + try: + if not module.check_mode: + batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn']) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Error removing job definition') + return changed + + +def job_definition_equal(module, current_definition): + equal = True + + for param in get_base_params(): + if module.params.get(param) != current_definition.get(cc(param)): + equal = False + break + + for param in get_container_property_params(): + if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)): + equal = False + break + + for param in get_retry_strategy_params(): + if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)): + equal = False + break + + return equal + + +def manage_state(module, batch_client): + changed = False + current_state = 'absent' + state = module.params['state'] + job_definition_name = module.params['job_definition_name'] + action_taken = 'none' + response = None + + check_mode = module.check_mode + + # check if the job definition exists + current_job_definition = get_current_job_definition(module, batch_client) + if current_job_definition: + current_state = 'present' + + if state == 'present': + if current_state == 'present': + # check if definition has changed and register a new version if necessary + if not job_definition_equal(module, current_job_definition): + create_job_definition(module, batch_client) + action_taken = 'updated with new version' + changed = True + else: + # Create Job definition + changed = create_job_definition(module, batch_client) + action_taken = 'added' + + response = get_current_job_definition(module, batch_client) + if not response: + module.fail_json(msg='Unable to get job definition information after creating/updating') + else: + if current_state == 'present': + # remove the Job definition + changed = remove_job_definition(module, batch_client) + action_taken = 'deregistered' + return dict(changed=changed, batch_job_definition_action=action_taken, response=response) + + +# --------------------------------------------------------------------------------------------------- +# +# MAIN +# +# --------------------------------------------------------------------------------------------------- + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + + argument_spec = dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + job_definition_name=dict(required=True), + job_definition_arn=dict(), + type=dict(required=True), + parameters=dict(type='dict'), + image=dict(required=True), + vcpus=dict(type='int', required=True), + memory=dict(type='int', required=True), + command=dict(type='list', default=[]), + job_role_arn=dict(), + volumes=dict(type='list', default=[]), + environment=dict(type='list', default=[]), + mount_points=dict(type='list', default=[]), + readonly_root_filesystem=dict(), + privileged=dict(), + ulimits=dict(type='list', default=[]), + user=dict(), + attempts=dict(type='int') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + batch_client = module.client('batch') + + validate_params(module, batch_client) + + results = manage_state(module, batch_client) + + module.exit_json(**camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py new file mode 100644 index 00000000000..0d4828ac576 --- /dev/null +++ b/aws_batch_job_queue.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# Copyright (c) 2017 Jon Meran +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_batch_job_queue +short_description: Manage AWS Batch Job Queues +description: + - This module allows the management of AWS Batch Job Queues. + It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute + environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. + + +author: Jon Meran (@jonmer85) +options: + job_queue_name: + description: + - The name for the job queue + required: true + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + job_queue_state: + description: + - The state of the job queue. If the job queue state is ENABLED , it is able to accept jobs. + default: "ENABLED" + choices: ["ENABLED", "DISABLED"] + type: str + priority: + description: + - The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority + parameter) are evaluated first when associated with same compute environment. Priority is determined in + ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job + queue with a priority value of 10. + required: true + type: int + compute_environment_order: + description: + - The set of compute environments mapped to a job queue and their order relative to each other. The job + scheduler uses this parameter to determine which compute environment should execute a given job. Compute + environments must be in the VALID state before you can associate them with a job queue. You can associate up to + 3 compute environments with a job queue. + required: true + type: list + elements: dict + suboptions: + order: + type: int + description: The relative priority of the environment. + compute_environment: + type: str + description: The name of the compute environment. +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: no + vars: + state: present + tasks: + - name: My Batch Job Queue + aws_batch_job_queue: + job_queue_name: jobQueueName + state: present + region: us-east-1 + job_queue_state: ENABLED + priority: 1 + compute_environment_order: + - order: 1 + compute_environment: my_compute_env1 + - order: 2 + compute_environment: my_compute_env2 + register: batch_job_queue_action + + - name: show results + debug: + var: batch_job_queue_action +''' + +RETURN = ''' +--- +output: + description: "returns what action was taken, whether something was changed, invocation and response" + returned: always + sample: + batch_job_queue_action: updated + changed: false + response: + job_queue_arn: "arn:aws:batch:...." + job_queue_name: + priority: 1 + state: DISABLED + status: UPDATING + status_reason: "JobQueue Healthy" + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.batch import set_api_params +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +# --------------------------------------------------------------------------------------------------- +# +# Helper Functions & classes +# +# --------------------------------------------------------------------------------------------------- + + +def validate_params(module): + """ + Performs basic parameter validation. + + :param module: + """ + return + + +# --------------------------------------------------------------------------------------------------- +# +# Batch Job Queue functions +# +# --------------------------------------------------------------------------------------------------- + +def get_current_job_queue(module, client): + try: + environments = client.describe_job_queues( + jobQueues=[module.params['job_queue_name']] + ) + return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None + except ClientError: + return None + + +def create_job_queue(module, client): + """ + Adds a Batch job queue + + :param module: + :param client: + :return: + """ + + changed = False + + # set API parameters + params = ('job_queue_name', 'priority') + api_params = set_api_params(module, params) + + if module.params['job_queue_state'] is not None: + api_params['state'] = module.params['job_queue_state'] + + api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module) + + try: + if not module.check_mode: + client.create_job_queue(**api_params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Error creating compute environment') + + return changed + + +def get_compute_environment_order_list(module): + compute_environment_order_list = [] + for ceo in module.params['compute_environment_order']: + compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + return compute_environment_order_list + + +def remove_job_queue(module, client): + """ + Remove a Batch job queue + + :param module: + :param client: + :return: + """ + + changed = False + + # set API parameters + api_params = {'jobQueue': module.params['job_queue_name']} + + try: + if not module.check_mode: + client.delete_job_queue(**api_params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Error removing job queue') + return changed + + +def manage_state(module, client): + changed = False + current_state = 'absent' + state = module.params['state'] + job_queue_state = module.params['job_queue_state'] + job_queue_name = module.params['job_queue_name'] + priority = module.params['priority'] + action_taken = 'none' + response = None + + check_mode = module.check_mode + + # check if the job queue exists + current_job_queue = get_current_job_queue(module, client) + if current_job_queue: + current_state = 'present' + + if state == 'present': + if current_state == 'present': + updates = False + # Update Batch Job Queue configuration + job_kwargs = {'jobQueue': job_queue_name} + + # Update configuration if needed + if job_queue_state and current_job_queue['state'] != job_queue_state: + job_kwargs.update({'state': job_queue_state}) + updates = True + if priority is not None and current_job_queue['priority'] != priority: + job_kwargs.update({'priority': priority}) + updates = True + + new_compute_environment_order_list = get_compute_environment_order_list(module) + if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']: + job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list + updates = True + + if updates: + try: + if not check_mode: + client.update_job_queue(**job_kwargs) + changed = True + action_taken = "updated" + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update job queue") + + else: + # Create Job Queue + changed = create_job_queue(module, client) + action_taken = 'added' + + # Describe job queue + response = get_current_job_queue(module, client) + if not response: + module.fail_json(msg='Unable to get job queue information after creating/updating') + else: + if current_state == 'present': + # remove the Job Queue + changed = remove_job_queue(module, client) + action_taken = 'deleted' + return dict(changed=changed, batch_job_queue_action=action_taken, response=response) + + +# --------------------------------------------------------------------------------------------------- +# +# MAIN +# +# --------------------------------------------------------------------------------------------------- + +def main(): + """ + Main entry point. + + :return dict: changed, batch_job_queue_action, response + """ + + argument_spec = dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + job_queue_name=dict(required=True), + job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), + priority=dict(type='int', required=True), + compute_environment_order=dict(type='list', required=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = module.client('batch') + + validate_params(module) + + results = manage_state(module, client) + + module.exit_json(**camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/aws_codebuild.py b/aws_codebuild.py new file mode 100644 index 00000000000..ae0726aa1d4 --- /dev/null +++ b/aws_codebuild.py @@ -0,0 +1,408 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_codebuild +short_description: Create or delete an AWS CodeBuild project +notes: + - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html). +description: + - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code. +author: + - Stefan Horning (@stefanhorning) +requirements: [ botocore, boto3 ] +options: + name: + description: + - Name of the CodeBuild project. + required: true + type: str + description: + description: + - Descriptive text of the CodeBuild project. + type: str + source: + description: + - Configure service and location for the build input source. + required: true + suboptions: + type: + description: + - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)." + required: true + type: str + location: + description: + - Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified. + type: str + git_clone_depth: + description: + - When using git you can specify the clone depth as an integer here. + type: int + buildspec: + description: + - The build spec declaration to use for the builds in this build project. Leave empty if part of the code project. + type: str + insecure_ssl: + description: + - Enable this flag to ignore SSL warnings while connecting to the project source code. + type: bool + type: dict + artifacts: + description: + - Information about the build output artifacts for the build project. + required: true + suboptions: + type: + description: + - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)." + required: true + location: + description: + - Information about the build output artifact location. When choosing type S3, set the bucket name here. + path: + description: + - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts. + - Used for path in S3 bucket when type is C(S3). + namespace_type: + description: + - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts. + - Accepts C(BUILD_ID) and C(NONE). + - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)." + name: + description: + - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact. + packaging: + description: + - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file. + type: dict + cache: + description: + - Caching params to speed up following builds. + suboptions: + type: + description: + - Cache type. Can be C(NO_CACHE) or C(S3). + required: true + location: + description: + - Caching location on S3. + required: true + type: dict + environment: + description: + - Information about the build environment for the build project. + suboptions: + type: + description: + - The type of build environment to use for the project. Usually C(LINUX_CONTAINER). + required: true + image: + description: + - The ID of the Docker image to use for this build project. + required: true + compute_type: + description: + - Information about the compute resources the build project will use. + - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)." + required: true + environment_variables: + description: + - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields. + - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }" + privileged_mode: + description: + - Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images. + type: dict + service_role: + description: + - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account. + type: str + timeout_in_minutes: + description: + - How long CodeBuild should wait until timing out any build that has not been marked as completed. + default: 60 + type: int + encryption_key: + description: + - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts. + type: str + tags: + description: + - A set of tags for the build project. + type: list + elements: dict + suboptions: + key: + description: The name of the Tag. + type: str + value: + description: The value of the Tag. + type: str + vpc_config: + description: + - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC. + type: dict + state: + description: + - Create or remove code build project. + default: 'present' + choices: ['present', 'absent'] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- aws_codebuild: + name: my_project + description: My nice little project + service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role" + source: + # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3 + type: CODEPIPELINE + buildspec: '' + artifacts: + namespaceType: NONE + packaging: NONE + type: CODEPIPELINE + name: my_project + environment: + computeType: BUILD_GENERAL1_SMALL + privilegedMode: "true" + image: "aws/codebuild/docker:17.09.0" + type: LINUX_CONTAINER + environmentVariables: + - { name: 'PROFILE', value: 'staging' } + encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3" + region: us-east-1 + state: present +''' + +RETURN = ''' +project: + description: Returns the dictionary describing the code project configuration. + returned: success + type: complex + contains: + name: + description: Name of the CodeBuild project + returned: always + type: str + sample: my_project + arn: + description: ARN of the CodeBuild project + returned: always + type: str + sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder + description: + description: A description of the build project + returned: always + type: str + sample: My nice little project + source: + description: Information about the build input source code. + returned: always + type: complex + contains: + type: + description: The type of the repository + returned: always + type: str + sample: CODEPIPELINE + location: + description: Location identifier, depending on the source type. + returned: when configured + type: str + git_clone_depth: + description: The git clone depth + returned: when configured + type: int + build_spec: + description: The build spec declaration to use for the builds in this build project. + returned: always + type: str + auth: + description: Information about the authorization settings for AWS CodeBuild to access the source code to be built. + returned: when configured + type: complex + insecure_ssl: + description: True if set to ignore SSL warnings. + returned: when configured + type: bool + artifacts: + description: Information about the output of build artifacts + returned: always + type: complex + contains: + type: + description: The type of build artifact. + returned: always + type: str + sample: CODEPIPELINE + location: + description: Output location for build artifacts + returned: when configured + type: str + # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project + cache: + description: Cache settings for the build project. + returned: when configured + type: dict + environment: + description: Environment settings for the build + returned: always + type: dict + service_role: + description: IAM role to be used during build to access other AWS services. + returned: always + type: str + sample: arn:aws:iam::123123123:role/codebuild-service-role + timeout_in_minutes: + description: The timeout of a build in minutes + returned: always + type: int + sample: 60 + tags: + description: Tags added to the project + returned: when configured + type: list + created: + description: Timestamp of the create time of the project + returned: always + type: str + sample: "2018-04-17T16:56:03.245000+02:00" +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + + +def create_or_update_project(client, params, module): + resp = {} + name = params['name'] + # clean up params + formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) + permitted_create_params = get_boto3_client_method_parameters(client, 'create_project') + permitted_update_params = get_boto3_client_method_parameters(client, 'update_project') + + formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) + formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) + + # Check if project with that name already exists and if so update existing: + found = describe_project(client=client, name=name, module=module) + changed = False + + if 'name' in found: + found_project = found + resp = update_project(client=client, params=formatted_update_params, module=module) + updated_project = resp['project'] + + # Prep both dicts for sensible change comparison: + found_project.pop('lastModified') + updated_project.pop('lastModified') + if 'tags' not in updated_project: + updated_project['tags'] = [] + + if updated_project != found_project: + changed = True + return resp, changed + # Or create new project: + try: + resp = client.create_project(**formatted_create_params) + changed = True + return resp, changed + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create CodeBuild project") + + +def update_project(client, params, module): + name = params['name'] + + try: + resp = client.update_project(**params) + return resp + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update CodeBuild project") + + +def delete_project(client, name, module): + found = describe_project(client=client, name=name, module=module) + changed = False + if 'name' in found: + # Mark as changed when a project with that name existed before calling delete + changed = True + try: + resp = client.delete_project(name=name) + return resp, changed + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete CodeBuild project") + + +def describe_project(client, name, module): + project = {} + try: + projects = client.batch_get_projects(names=[name])['projects'] + if len(projects) > 0: + project = projects[0] + return project + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe CodeBuild projects") + + +def main(): + argument_spec = dict( + name=dict(required=True), + description=dict(), + source=dict(required=True, type='dict'), + artifacts=dict(required=True, type='dict'), + cache=dict(type='dict'), + environment=dict(type='dict'), + service_role=dict(), + timeout_in_minutes=dict(type='int', default=60), + encryption_key=dict(), + tags=dict(type='list'), + vpc_config=dict(type='dict'), + state=dict(choices=['present', 'absent'], default='present') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + client_conn = module.client('codebuild') + + state = module.params.get('state') + changed = False + + if state == 'present': + project_result, changed = create_or_update_project( + client=client_conn, + params=module.params, + module=module) + elif state == 'absent': + project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result)) + + +if __name__ == '__main__': + main() diff --git a/aws_codecommit.py b/aws_codecommit.py new file mode 100644 index 00000000000..c946a95aa7d --- /dev/null +++ b/aws_codecommit.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Shuang Wang +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: aws_codecommit +short_description: Manage repositories in AWS CodeCommit +description: + - Supports creation and deletion of CodeCommit repositories. + - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit. +author: Shuang Wang (@ptux) + +requirements: + - botocore + - boto3 + - python >= 2.6 + +options: + name: + description: + - name of repository. + required: true + type: str + description: + description: + - description or comment of repository. + required: false + aliases: + - comment + type: str + state: + description: + - Specifies the state of repository. + required: true + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +RETURN = ''' +repository_metadata: + description: "Information about the repository." + returned: always + type: complex + contains: + account_id: + description: "The ID of the AWS account associated with the repository." + returned: when state is present + type: str + sample: "268342293637" + arn: + description: "The Amazon Resource Name (ARN) of the repository." + returned: when state is present + type: str + sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username" + clone_url_http: + description: "The URL to use for cloning the repository over HTTPS." + returned: when state is present + type: str + sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame" + clone_url_ssh: + description: "The URL to use for cloning the repository over SSH." + returned: when state is present + type: str + sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame" + creation_date: + description: "The date and time the repository was created, in timestamp format." + returned: when state is present + type: str + sample: "2018-10-16T13:21:41.261000+09:00" + last_modified_date: + description: "The date and time the repository was last modified, in timestamp format." + returned: when state is present + type: str + sample: "2018-10-16T13:21:41.261000+09:00" + repository_description: + description: "A comment or description about the repository." + returned: when state is present + type: str + sample: "test from ptux" + repository_id: + description: "The ID of the repository that was created or deleted" + returned: always + type: str + sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e" + repository_name: + description: "The repository's name." + returned: when state is present + type: str + sample: "reponame" + +response_metadata: + description: "Information about the response." + returned: always + type: complex + contains: + http_headers: + description: "http headers of http response" + returned: always + type: dict + http_status_code: + description: "http status code of http response" + returned: always + type: str + sample: "200" + request_id: + description: "http request id" + returned: always + type: str + sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef" + retry_attempts: + description: "numbers of retry attempts" + returned: always + type: str + sample: "0" +''' + +EXAMPLES = ''' +# Create a new repository +- aws_codecommit: + name: repo + state: present + +# Delete a repository +- aws_codecommit: + name: repo + state: absent +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +class CodeCommit(object): + def __init__(self, module=None): + self._module = module + self._client = self._module.client('codecommit') + self._check_mode = self._module.check_mode + + def process(self): + result = dict(changed=False) + + if self._module.params['state'] == 'present': + if not self._repository_exists(): + if not self._check_mode: + result = self._create_repository() + result['changed'] = True + else: + metadata = self._get_repository()['repositoryMetadata'] + if metadata['repositoryDescription'] != self._module.params['description']: + if not self._check_mode: + self._update_repository() + result['changed'] = True + result.update(self._get_repository()) + if self._module.params['state'] == 'absent' and self._repository_exists(): + if not self._check_mode: + result = self._delete_repository() + result['changed'] = True + return result + + def _repository_exists(self): + try: + paginator = self._client.get_paginator('list_repositories') + for page in paginator.paginate(): + repositories = page['repositories'] + for item in repositories: + if self._module.params['name'] in item.values(): + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="couldn't get repository") + return False + + def _get_repository(self): + try: + result = self._client.get_repository( + repositoryName=self._module.params['name'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="couldn't get repository") + return result + + def _update_repository(self): + try: + result = self._client.update_repository_description( + repositoryName=self._module.params['name'], + repositoryDescription=self._module.params['description'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="couldn't create repository") + return result + + def _create_repository(self): + try: + result = self._client.create_repository( + repositoryName=self._module.params['name'], + repositoryDescription=self._module.params['description'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="couldn't create repository") + return result + + def _delete_repository(self): + try: + result = self._client.delete_repository( + repositoryName=self._module.params['name'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="couldn't delete repository") + return result + + +def main(): + argument_spec = dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], required=True), + description=dict(default='', aliases=['comment']) + ) + + ansible_aws_module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + aws_codecommit = CodeCommit(module=ansible_aws_module) + result = aws_codecommit.process() + ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result)) + + +if __name__ == '__main__': + main() diff --git a/aws_codepipeline.py b/aws_codepipeline.py new file mode 100644 index 00000000000..703f905af20 --- /dev/null +++ b/aws_codepipeline.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_codepipeline +short_description: Create or delete AWS CodePipelines +notes: + - for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html) +description: + - Create or delete a CodePipeline on AWS. +author: + - Stefan Horning (@stefanhorning) +requirements: [ botocore, boto3 ] +options: + name: + description: + - Name of the pipeline + required: true + type: str + role_arn: + description: + - ARN of the IAM role to use when executing the pipeline + required: true + type: str + artifact_store: + description: + - Location information where artifacts are stored (on S3). Dictionary with fields type and location. + required: true + suboptions: + type: + description: + - Type of the artifacts storage (only 'S3' is currently supported). + type: str + location: + description: + - Bucket name for artifacts. + type: str + type: dict + stages: + description: + - List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage. + required: true + suboptions: + name: + description: + - Name of the stage (step) in the codepipeline + type: str + actions: + description: + - List of action configurations for that stage. + - 'See the boto3 documentation for full documentation of suboptions:' + - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codepipeline.html#CodePipeline.Client.create_pipeline)' + type: list + elements: dict + elements: dict + type: list + version: + description: + - Version number of the pipeline. This number is automatically incremented when a pipeline is updated. + required: false + type: int + state: + description: + - Create or remove code pipeline + default: 'present' + choices: ['present', 'absent'] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) +- aws_codepipeline: + name: my_deploy_pipeline + role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service + artifact_store: + type: S3 + location: my_s3_codepipline_bucket + stages: + - name: Get_source + actions: + - + name: Git_pull + actionTypeId: + category: Source + owner: ThirdParty + provider: GitHub + version: '1' + outputArtifacts: + - { name: my-app-source } + configuration: + Owner: mediapeers + Repo: my_gh_repo + PollForSourceChanges: 'true' + Branch: master + # Generate token like this: + # https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html + # GH Link: https://github.com/settings/tokens + OAuthToken: 'abc123def456' + runOrder: 1 + - name: Build + actions: + - + name: CodeBuild + actionTypeId: + category: Build + owner: AWS + provider: CodeBuild + version: '1' + inputArtifacts: + - { name: my-app-source } + outputArtifacts: + - { name: my-app-build } + configuration: + # A project with that name needs to be setup on AWS CodeBuild already (use code_build module). + ProjectName: codebuild-project-name + runOrder: 1 + - name: ECS_deploy + actions: + - + name: ECS_deploy + actionTypeId: + category: Deploy + owner: AWS + provider: ECS + version: '1' + inputArtifacts: + - { name: vod-api-app-build } + configuration: + # an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module) + ClusterName: ecs-cluster-name + ServiceName: ecs-cluster-service-name + FileName: imagedefinitions.json + region: us-east-1 + state: present +''' + +RETURN = ''' +pipeline: + description: Returns the dictionary describing the code pipeline configuration. + returned: success + type: complex + contains: + name: + description: Name of the CodePipeline + returned: always + type: str + sample: my_deploy_pipeline + role_arn: + description: ARN of the IAM role attached to the code pipeline + returned: always + type: str + sample: arn:aws:iam::123123123:role/codepipeline-service-role + artifact_store: + description: Information about where the build artifacts are stored + returned: always + type: complex + contains: + type: + description: The type of the artifacts store, such as S3 + returned: always + type: str + sample: S3 + location: + description: The location of the artifacts storage (s3 bucket name) + returned: always + type: str + sample: my_s3_codepipline_bucket + encryption_key: + description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key. + returned: when configured + type: str + stages: + description: List of stages configured for this pipeline + returned: always + type: list + version: + description: The version number of the pipeline. This number is auto incremented when pipeline params are changed. + returned: always + type: int +''' + +import copy +import traceback + +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): + pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages} + if version: + pipeline_dict['version'] = version + try: + resp = client.create_pipeline(pipeline=pipeline_dict) + return resp + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)), + exception=traceback.format_exc()) + + +def update_pipeline(client, pipeline_dict, module): + try: + resp = client.update_pipeline(pipeline=pipeline_dict) + return resp + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)), + exception=traceback.format_exc()) + + +def delete_pipeline(client, name, module): + try: + resp = client.delete_pipeline(name=name) + return resp + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)), + exception=traceback.format_exc()) + + +def describe_pipeline(client, name, version, module): + pipeline = {} + try: + if version is not None: + pipeline = client.get_pipeline(name=name, version=version) + return pipeline + else: + pipeline = client.get_pipeline(name=name) + return pipeline + except is_boto3_error_code('PipelineNotFoundException'): + return pipeline + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + role_arn=dict(required=True, type='str'), + artifact_store=dict(required=True, type='dict'), + stages=dict(required=True, type='list'), + version=dict(type='int'), + state=dict(choices=['present', 'absent'], default='present') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + client_conn = module.client('codepipeline') + + state = module.params.get('state') + changed = False + + # Determine if the CodePipeline exists + found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module) + pipeline_result = {} + + if state == 'present': + if 'pipeline' in found_code_pipeline: + pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline']) + # Update dictionary with provided module params: + pipeline_dict['roleArn'] = module.params['role_arn'] + pipeline_dict['artifactStore'] = module.params['artifact_store'] + pipeline_dict['stages'] = module.params['stages'] + if module.params['version'] is not None: + pipeline_dict['version'] = module.params['version'] + + pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module) + + if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']): + changed = True + else: + pipeline_result = create_pipeline( + client=client_conn, + name=module.params['name'], + role_arn=module.params['role_arn'], + artifact_store=module.params['artifact_store'], + stages=module.params['stages'], + version=module.params['version'], + module=module) + changed = True + elif state == 'absent': + if found_code_pipeline: + pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module) + changed = True + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result)) + + +if __name__ == '__main__': + main() diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py new file mode 100644 index 00000000000..be29aa1a3ad --- /dev/null +++ b/aws_config_aggregation_authorization.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_config_aggregation_authorization +short_description: Manage cross-account AWS Config authorizations +description: + - Module manages AWS Config resources. +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + type: str + authorized_account_id: + description: + - The 12-digit account ID of the account authorized to aggregate data. + type: str + required: true + authorized_aws_region: + description: + - The region authorized to collect aggregated data. + type: str + required: true +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Get current account ID + aws_caller_info: + register: whoami +- aws_config_aggregation_authorization: + state: present + authorized_account_id: '{{ whoami.account }}' + authorzed_aws_region: us-east-1 +''' + +RETURN = '''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry + + +def resource_exists(client, module, params): + try: + current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + authorization_exists = next( + (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), + None + ) + if authorization_exists: + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + return False + + +def create_resource(client, module, params, result): + try: + response = client.put_aggregation_authorization( + AuthorizedAccountId=params['AuthorizedAccountId'], + AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") + + +def update_resource(client, module, params, result): + current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_params = next( + (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), + None + ) + + del current_params['AggregationAuthorizationArn'] + del current_params['CreationTime'] + + if params != current_params: + try: + response = client.put_aggregation_authorization( + AuthorizedAccountId=params['AuthorizedAccountId'], + AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_aggregation_authorization( + AuthorizedAccountId=params['AuthorizedAccountId'], + AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'authorized_account_id': dict(type='str', required=True), + 'authorized_aws_region': dict(type='str', required=True), + }, + supports_check_mode=False, + ) + + result = {'changed': False} + + params = { + 'AuthorizedAccountId': module.params.get('authorized_account_id'), + 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'), + } + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + resource_status = resource_exists(client, module, params) + + if module.params.get('state') == 'present': + if not resource_status: + create_resource(client, module, params, result) + else: + update_resource(client, module, params, result) + + if module.params.get('state') == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(changed=result['changed']) + + +if __name__ == '__main__': + main() diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py new file mode 100644 index 00000000000..065a5505a09 --- /dev/null +++ b/aws_config_aggregator.py @@ -0,0 +1,232 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_config_aggregator +short_description: Manage AWS Config aggregations across multiple accounts +description: + - Module manages AWS Config resources +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + type: str + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + type: str + account_sources: + description: + - Provides a list of source accounts and regions to be aggregated. + suboptions: + account_ids: + description: + - A list of 12-digit account IDs of accounts being aggregated. + type: list + elements: str + aws_regions: + description: + - A list of source regions being aggregated. + type: list + elements: str + all_aws_regions: + description: + - If true, aggregate existing AWS Config regions and future regions. + type: bool + type: list + elements: dict + required: true + organization_source: + description: + - The region authorized to collect aggregated data. + suboptions: + role_arn: + description: + - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. + type: str + aws_regions: + description: + - The source regions being aggregated. + type: list + elements: str + all_aws_regions: + description: + - If true, aggregate existing AWS Config regions and future regions. + type: bool + type: dict + required: true +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Create cross-account aggregator + aws_config_aggregator: + name: test_config_rule + state: present + account_sources: + account_ids: + - 1234567890 + - 0123456789 + - 9012345678 + all_aws_regions: yes +''' + +RETURN = '''#''' + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict + + +def resource_exists(client, module, params): + try: + aggregator = client.describe_configuration_aggregators( + ConfigurationAggregatorNames=[params['name']] + ) + return aggregator['ConfigurationAggregators'][0] + except is_boto3_error_code('NoSuchConfigurationAggregatorException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + client.put_configuration_aggregator( + ConfigurationAggregatorName=params['ConfigurationAggregatorName'], + AccountAggregationSources=params['AccountAggregationSources'], + OrganizationAggregationSource=params['OrganizationAggregationSource'] + ) + result['changed'] = True + result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") + + +def update_resource(client, module, params, result): + current_params = client.describe_configuration_aggregators( + ConfigurationAggregatorNames=[params['name']] + ) + + del current_params['ConfigurationAggregatorArn'] + del current_params['CreationTime'] + del current_params['LastUpdatedTime'] + + if params != current_params['ConfigurationAggregators'][0]: + try: + client.put_configuration_aggregator( + ConfigurationAggregatorName=params['ConfigurationAggregatorName'], + AccountAggregationSources=params['AccountAggregationSources'], + OrganizationAggregationSource=params['OrganizationAggregationSource'] + ) + result['changed'] = True + result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") + + +def delete_resource(client, module, params, result): + try: + client.delete_configuration_aggregator( + ConfigurationAggregatorName=params['ConfigurationAggregatorName'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'account_sources': dict(type='list', required=True), + 'organization_source': dict(type='dict', required=True) + }, + supports_check_mode=False, + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + state = module.params.get('state') + + params = {} + if name: + params['ConfigurationAggregatorName'] = name + if module.params.get('account_sources'): + params['AccountAggregationSources'] = [] + for i in module.params.get('account_sources'): + tmp_dict = {} + if i.get('account_ids'): + tmp_dict['AccountIds'] = i.get('account_ids') + if i.get('aws_regions'): + tmp_dict['AwsRegions'] = i.get('aws_regions') + if i.get('all_aws_regions') is not None: + tmp_dict['AllAwsRegions'] = i.get('all_aws_regions') + params['AccountAggregationSources'].append(tmp_dict) + if module.params.get('organization_source'): + params['OrganizationAggregationSource'] = {} + if module.params.get('organization_source').get('role_arn'): + params['OrganizationAggregationSource'].update({ + 'RoleArn': module.params.get('organization_source').get('role_arn') + }) + if module.params.get('organization_source').get('aws_regions'): + params['OrganizationAggregationSource'].update({ + 'AwsRegions': module.params.get('organization_source').get('aws_regions') + }) + if module.params.get('organization_source').get('all_aws_regions') is not None: + params['OrganizationAggregationSourcep'].update({ + 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') + }) + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + resource_status = resource_exists(client, module, params) + + if state == 'present': + if not resource_status: + create_resource(client, module, params, result) + else: + update_resource(client, module, params, result) + + if state == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(changed=result['changed']) + + +if __name__ == '__main__': + main() diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py new file mode 100644 index 00000000000..54fdb6f7ede --- /dev/null +++ b/aws_config_delivery_channel.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_config_delivery_channel +short_description: Manage AWS Config delivery channels +description: + - This module manages AWS Config delivery locations for rule checks and configuration info. +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + type: str + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + type: str + s3_bucket: + description: + - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files. + type: str + required: true + s3_prefix: + description: + - The prefix for the specified Amazon S3 bucket. + type: str + sns_topic_arn: + description: + - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. + type: str + delivery_frequency: + description: + - The frequency with which AWS Config delivers configuration snapshots. + choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Create Delivery Channel for AWS Config + aws_config_delivery_channel: + name: test_delivery_channel + state: present + s3_bucket: 'test_aws_config_bucket' + sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' + delivery_frequency: 'Twelve_Hours' +''' + +RETURN = '''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry + + +# this waits for an IAM role to become fully available, at the cost of +# taking a long time to fail when the IAM role/policy really is invalid +retry_unavailable_iam_on_put_delivery = AWSRetry.backoff( + catch_extra_error_codes=['InsufficientDeliveryPolicyException'], +) + + +def resource_exists(client, module, params): + try: + channel = client.describe_delivery_channels( + DeliveryChannelNames=[params['name']], + aws_retry=True, + ) + return channel['DeliveryChannels'][0] + except is_boto3_error_code('NoSuchDeliveryChannelException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + retry_unavailable_iam_on_put_delivery( + client.put_delivery_channel, + )( + DeliveryChannel=params, + ) + result['changed'] = True + result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except is_boto3_error_code('InvalidS3KeyPrefixException') as e: + module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") + except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " + "Make sure the bucket exists and is available") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + + +def update_resource(client, module, params, result): + current_params = client.describe_delivery_channels( + DeliveryChannelNames=[params['name']], + aws_retry=True, + ) + + if params != current_params['DeliveryChannels'][0]: + try: + retry_unavailable_iam_on_put_delivery( + client.put_delivery_channel, + )( + DeliveryChannel=params, + ) + result['changed'] = True + result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except is_boto3_error_code('InvalidS3KeyPrefixException') as e: + module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") + except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " + "Make sure the bucket exists and is available") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_delivery_channel( + DeliveryChannelName=params['name'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 's3_bucket': dict(type='str', required=True), + 's3_prefix': dict(type='str'), + 'sns_topic_arn': dict(type='str'), + 'delivery_frequency': dict( + type='str', + choices=[ + 'One_Hour', + 'Three_Hours', + 'Six_Hours', + 'Twelve_Hours', + 'TwentyFour_Hours' + ] + ), + }, + supports_check_mode=False, + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + state = module.params.get('state') + + params = {} + if name: + params['name'] = name + if module.params.get('s3_bucket'): + params['s3BucketName'] = module.params.get('s3_bucket') + if module.params.get('s3_prefix'): + params['s3KeyPrefix'] = module.params.get('s3_prefix') + if module.params.get('sns_topic_arn'): + params['snsTopicARN'] = module.params.get('sns_topic_arn') + if module.params.get('delivery_frequency'): + params['configSnapshotDeliveryProperties'] = { + 'deliveryFrequency': module.params.get('delivery_frequency') + } + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + resource_status = resource_exists(client, module, params) + + if state == 'present': + if not resource_status: + create_resource(client, module, params, result) + if resource_status: + update_resource(client, module, params, result) + + if state == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/aws_config_recorder.py b/aws_config_recorder.py new file mode 100644 index 00000000000..7ba1b0db535 --- /dev/null +++ b/aws_config_recorder.py @@ -0,0 +1,213 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_config_recorder +short_description: Manage AWS Config Recorders +description: + - Module manages AWS Config configuration recorder settings. +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + type: str + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + type: str + role_arn: + description: + - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account. + - Required when I(state=present). + type: str + recording_group: + description: + - Specifies the types of AWS resources for which AWS Config records configuration changes. + - Required when I(state=present) + suboptions: + all_supported: + description: + - Specifies whether AWS Config records configuration changes for every supported type of regional resource. + - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts + recording resources of that type automatically. + - If I(all_supported=true), you cannot enumerate a list of I(resource_types). + include_global_types: + description: + - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources) + with the resources that it records. + - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, + you should consider customizing AWS Config in only one region to record global resources. + - If you set I(include_global_types=true), you must also set I(all_supported=true). + - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording + resources of that type automatically. + resource_types: + description: + - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, + C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)). + - Before you can set this option, you must set I(all_supported=false). + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Create Configuration Recorder for AWS Config + aws_config_recorder: + name: test_configuration_recorder + state: present + role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' + recording_group: + all_supported: true + include_global_types: true +''' + +RETURN = '''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry + + +def resource_exists(client, module, params): + try: + recorder = client.describe_configuration_recorders( + ConfigurationRecorderNames=[params['name']] + ) + return recorder['ConfigurationRecorders'][0] + except is_boto3_error_code('NoSuchConfigurationRecorderException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + response = client.put_configuration_recorder( + ConfigurationRecorder=params + ) + result['changed'] = True + result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder") + + +def update_resource(client, module, params, result): + current_params = client.describe_configuration_recorders( + ConfigurationRecorderNames=[params['name']] + ) + + if params != current_params['ConfigurationRecorders'][0]: + try: + response = client.put_configuration_recorder( + ConfigurationRecorder=params + ) + result['changed'] = True + result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_configuration_recorder( + ConfigurationRecorderName=params['name'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder") + + +def main(): + + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'role_arn': dict(type='str'), + 'recording_group': dict(type='dict'), + }, + supports_check_mode=False, + required_if=[ + ('state', 'present', ['role_arn', 'recording_group']), + ], + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + state = module.params.get('state') + + params = {} + if name: + params['name'] = name + if module.params.get('role_arn'): + params['roleARN'] = module.params.get('role_arn') + if module.params.get('recording_group'): + params['recordingGroup'] = {} + if module.params.get('recording_group').get('all_supported') is not None: + params['recordingGroup'].update({ + 'allSupported': module.params.get('recording_group').get('all_supported') + }) + if module.params.get('recording_group').get('include_global_types') is not None: + params['recordingGroup'].update({ + 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types') + }) + if module.params.get('recording_group').get('resource_types'): + params['recordingGroup'].update({ + 'resourceTypes': module.params.get('recording_group').get('resource_types') + }) + else: + params['recordingGroup'].update({ + 'resourceTypes': [] + }) + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + resource_status = resource_exists(client, module, params) + + if state == 'present': + if not resource_status: + create_resource(client, module, params, result) + if resource_status: + update_resource(client, module, params, result) + + if state == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(changed=result['changed']) + + +if __name__ == '__main__': + main() diff --git a/aws_config_rule.py b/aws_config_rule.py new file mode 100644 index 00000000000..d3eed699cab --- /dev/null +++ b/aws_config_rule.py @@ -0,0 +1,275 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_config_rule +short_description: Manage AWS Config resources +description: + - Module manages AWS Config rules +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + type: str + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + type: str + description: + description: + - The description that you provide for the AWS Config rule. + type: str + scope: + description: + - Defines which resources can trigger an evaluation for the rule. + suboptions: + compliance_types: + description: + - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. + You can only specify one type if you also specify a resource ID for I(compliance_id). + compliance_id: + description: + - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, + you must specify one resource type for I(compliance_types). + tag_key: + description: + - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule. + tag_value: + description: + - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. + If you specify a value for I(tag_value), you must also specify a value for I(tag_key). + type: dict + source: + description: + - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to + evaluate your AWS resources. + suboptions: + owner: + description: + - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. + You can only specify one type if you also specify a resource ID for I(compliance_id). + identifier: + description: + - The ID of the only AWS resource that you want to trigger an evaluation for the rule. + If you specify a resource ID, you must specify one resource type for I(compliance_types). + details: + description: + - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. + - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs. + - Key `EventSource` The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. + - Key `MessageType` The type of notification that triggers AWS Config to run an evaluation for a rule. + - Key `MaximumExecutionFrequency` The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. + type: dict + required: true + input_parameters: + description: + - A string, in JSON format, that is passed to the AWS Config rule Lambda function. + type: str + execution_frequency: + description: + - The maximum frequency with which AWS Config runs evaluations for a rule. + choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Create Config Rule for AWS Config + aws_config_rule: + name: test_config_rule + state: present + description: 'This AWS Config rule checks for public write access on S3 buckets' + scope: + compliance_types: + - 'AWS::S3::Bucket' + source: + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' + +''' + +RETURN = '''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict + + +def rule_exists(client, module, params): + try: + rule = client.describe_config_rules( + ConfigRuleNames=[params['ConfigRuleName']], + aws_retry=True, + ) + return rule['ConfigRules'][0] + except is_boto3_error_code('NoSuchConfigRuleException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + client.put_config_rule( + ConfigRule=params + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config rule") + + +def update_resource(client, module, params, result): + current_params = client.describe_config_rules( + ConfigRuleNames=[params['ConfigRuleName']], + aws_retry=True, + ) + + del current_params['ConfigRules'][0]['ConfigRuleArn'] + del current_params['ConfigRules'][0]['ConfigRuleId'] + + if params != current_params['ConfigRules'][0]: + try: + client.put_config_rule( + ConfigRule=params + ) + result['changed'] = True + result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config rule") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_config_rule( + ConfigRuleName=params['ConfigRuleName'], + aws_retry=True, + ) + result['changed'] = True + result['rule'] = {} + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config rule") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'description': dict(type='str'), + 'scope': dict(type='dict'), + 'source': dict(type='dict', required=True), + 'input_parameters': dict(type='str'), + 'execution_frequency': dict( + type='str', + choices=[ + 'One_Hour', + 'Three_Hours', + 'Six_Hours', + 'Twelve_Hours', + 'TwentyFour_Hours' + ] + ), + }, + supports_check_mode=False, + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + resource_type = module.params.get('resource_type') + state = module.params.get('state') + + params = {} + if name: + params['ConfigRuleName'] = name + if module.params.get('description'): + params['Description'] = module.params.get('description') + if module.params.get('scope'): + params['Scope'] = {} + if module.params.get('scope').get('compliance_types'): + params['Scope'].update({ + 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') + }) + if module.params.get('scope').get('tag_key'): + params['Scope'].update({ + 'TagKey': module.params.get('scope').get('tag_key') + }) + if module.params.get('scope').get('tag_value'): + params['Scope'].update({ + 'TagValue': module.params.get('scope').get('tag_value') + }) + if module.params.get('scope').get('compliance_id'): + params['Scope'].update({ + 'ComplianceResourceId': module.params.get('scope').get('compliance_id') + }) + if module.params.get('source'): + params['Source'] = {} + if module.params.get('source').get('owner'): + params['Source'].update({ + 'Owner': module.params.get('source').get('owner') + }) + if module.params.get('source').get('identifier'): + params['Source'].update({ + 'SourceIdentifier': module.params.get('source').get('identifier') + }) + if module.params.get('source').get('details'): + params['Source'].update({ + 'SourceDetails': module.params.get('source').get('details') + }) + if module.params.get('input_parameters'): + params['InputParameters'] = module.params.get('input_parameters') + if module.params.get('execution_frequency'): + params['MaximumExecutionFrequency'] = module.params.get('execution_frequency') + params['ConfigRuleState'] = 'ACTIVE' + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + existing_rule = rule_exists(client, module, params) + + if state == 'present': + if not existing_rule: + create_resource(client, module, params, result) + else: + update_resource(client, module, params, result) + + if state == 'absent': + if existing_rule: + delete_resource(client, module, params, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py new file mode 100644 index 00000000000..2dd6c839934 --- /dev/null +++ b/aws_direct_connect_connection.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_direct_connect_connection +short_description: Creates, deletes, modifies a DirectConnect connection +description: + - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location. + Upon creation the connection may be added to a link aggregation group or established as a standalone connection. + The connection may later be associated or disassociated with a link aggregation group. +author: "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore +options: + state: + description: + - The state of the Direct Connect connection. + choices: + - present + - absent + type: str + required: true + name: + description: + - The name of the Direct Connect connection. This is required to create a + new connection. + - One of I(connection_id) or I(name) must be specified. + type: str + connection_id: + description: + - The ID of the Direct Connect connection. + - Modifying attributes of a connection with I(forced_update) will result in a new Direct Connect connection ID. + - One of I(connection_id) or I(name) must be specified. + type: str + location: + description: + - Where the Direct Connect connection is located. + - Required when I(state=present). + type: str + bandwidth: + description: + - The bandwidth of the Direct Connect connection. + - Required when I(state=present). + choices: + - 1Gbps + - 10Gbps + type: str + link_aggregation_group: + description: + - The ID of the link aggregation group you want to associate with the connection. + - This is optional when a stand-alone connection is desired. + type: str + forced_update: + description: + - To modify bandwidth or location the connection will need to be deleted and recreated. + By default this will not happen - this option must be set to True. + type: bool +''' + +EXAMPLES = """ + +# create a Direct Connect connection +- aws_direct_connect_connection: + name: ansible-test-connection + state: present + location: EqDC2 + link_aggregation_group: dxlag-xxxxxxxx + bandwidth: 1Gbps + register: dc + +# disassociate the LAG from the connection +- aws_direct_connect_connection: + state: present + connection_id: dc.connection.connection_id + location: EqDC2 + bandwidth: 1Gbps + +# replace the connection with one with more bandwidth +- aws_direct_connect_connection: + state: present + name: ansible-test-connection + location: EqDC2 + bandwidth: 10Gbps + forced_update: True + +# delete the connection +- aws_direct_connect_connection: + state: absent + name: ansible-test-connection +""" + +RETURN = """ +connection: + description: The attributes of the direct connect connection. + type: complex + returned: I(state=present) + contains: + aws_device: + description: The endpoint which the physical connection terminates on. + returned: when the requested state is no longer 'requested' + type: str + sample: EqDC2-12pmo7hemtz1z + bandwidth: + description: The bandwidth of the connection. + returned: always + type: str + sample: 1Gbps + connection_id: + description: The ID of the connection. + returned: always + type: str + sample: dxcon-ffy9ywed + connection_name: + description: The name of the connection. + returned: always + type: str + sample: ansible-test-connection + connection_state: + description: The state of the connection. + returned: always + type: str + sample: pending + loa_issue_time: + description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment. + returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested') + type: str + sample: '2018-03-20T17:36:26-04:00' + location: + description: The location of the connection. + returned: always + type: str + sample: EqDC2 + owner_account: + description: The account that owns the direct connect connection. + returned: always + type: str + sample: '123456789012' + region: + description: The region in which the connection exists. + returned: always + type: str + sample: us-east-1 +""" + +import traceback +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) +from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, delete_connection, + associate_connection_and_lag, disassociate_connection_and_lag) + +try: + from botocore.exceptions import BotoCoreError, ClientError +except Exception: + pass + # handled by imported AnsibleAWSModule + +retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} + + +def connection_status(client, connection_id): + return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False) + + +def connection_exists(client, connection_id=None, connection_name=None, verify=True): + params = {} + if connection_id: + params['connectionId'] = connection_id + try: + response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params) + except (BotoCoreError, ClientError) as e: + if connection_id: + msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + else: + msg = "Failed to describe DirectConnect connections" + raise DirectConnectError(msg=msg, + last_traceback=traceback.format_exc(), + exception=e) + + match = [] + connection = [] + + # look for matching connections + + if len(response.get('connections', [])) == 1 and connection_id: + if response['connections'][0]['connectionState'] != 'deleted': + match.append(response['connections'][0]['connectionId']) + connection.extend(response['connections']) + + for conn in response.get('connections', []): + if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': + match.append(conn['connectionId']) + connection.append(conn) + + # verifying if the connections exists; if true, return connection identifier, otherwise return False + if verify and len(match) == 1: + return match[0] + elif verify: + return False + # not verifying if the connection exists; just return current connection info + elif len(connection) == 1: + return {'connection': connection[0]} + return {'connection': {}} + + +def create_connection(client, location, bandwidth, name, lag_id): + if not name: + raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.") + params = { + 'location': location, + 'bandwidth': bandwidth, + 'connectionName': name, + } + if lag_id: + params['lagId'] = lag_id + + try: + connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params) + except (BotoCoreError, ClientError) as e: + raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), + last_traceback=traceback.format_exc(), + exception=e) + return connection['connectionId'] + + +def changed_properties(current_status, location, bandwidth): + current_bandwidth = current_status['bandwidth'] + current_location = current_status['location'] + + return current_bandwidth != bandwidth or current_location != location + + +@AWSRetry.backoff(**retry_params) +def update_associations(client, latest_state, connection_id, lag_id): + changed = False + if 'lagId' in latest_state and lag_id != latest_state['lagId']: + disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId']) + changed = True + if (changed and lag_id) or (lag_id and 'lagId' not in latest_state): + associate_connection_and_lag(client, connection_id, lag_id) + changed = True + return changed + + +def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update): + # the connection is found; get the latest state and see if it needs to be updated + if connection_id: + latest_state = connection_status(client, connection_id=connection_id)['connection'] + if changed_properties(latest_state, location, bandwidth) and forced_update: + ensure_absent(client, connection_id) + return ensure_present(client=client, + connection_id=None, + connection_name=connection_name, + location=location, + bandwidth=bandwidth, + lag_id=lag_id, + forced_update=forced_update) + elif update_associations(client, latest_state, connection_id, lag_id): + return True, connection_id + + # no connection found; create a new one + else: + return True, create_connection(client, location, bandwidth, connection_name, lag_id) + + return False, connection_id + + +@AWSRetry.backoff(**retry_params) +def ensure_absent(client, connection_id): + changed = False + if connection_id: + delete_connection(client, connection_id) + changed = True + + return changed + + +def main(): + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(), + location=dict(), + bandwidth=dict(choices=['1Gbps', '10Gbps']), + link_aggregation_group=dict(), + connection_id=dict(), + forced_update=dict(type='bool', default=False) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[('connection_id', 'name')], + required_if=[('state', 'present', ('location', 'bandwidth'))] + ) + + connection = module.client('directconnect') + + state = module.params.get('state') + try: + connection_id = connection_exists( + connection, + connection_id=module.params.get('connection_id'), + connection_name=module.params.get('name') + ) + if not connection_id and module.params.get('connection_id'): + module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id'))) + + if state == 'present': + changed, connection_id = ensure_present(connection, + connection_id=connection_id, + connection_name=module.params.get('name'), + location=module.params.get('location'), + bandwidth=module.params.get('bandwidth'), + lag_id=module.params.get('link_aggregation_group'), + forced_update=module.params.get('forced_update')) + response = connection_status(connection, connection_id) + elif state == 'absent': + changed = ensure_absent(connection, connection_id) + response = {} + except DirectConnectError as e: + if e.last_traceback: + module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response)) + else: + module.fail_json(msg=e.msg) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + +if __name__ == '__main__': + main() diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py new file mode 100644 index 00000000000..6918c125de3 --- /dev/null +++ b/aws_direct_connect_gateway.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: aws_direct_connect_gateway +author: Gobin Sougrakpam (@gobins) +short_description: Manage AWS Direct Connect gateway +description: + - Creates AWS Direct Connect Gateway. + - Deletes AWS Direct Connect Gateway. + - Attaches Virtual Gateways to Direct Connect Gateway. + - Detaches Virtual Gateways to Direct Connect Gateway. +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ boto3 ] +options: + state: + description: + - Set I(state=present) to ensure a resource is created. + - Set I(state=absent) to remove a resource. + default: present + choices: [ "present", "absent"] + type: str + name: + description: + - Name of the Direct Connect Gateway to be created or deleted. + type: str + amazon_asn: + description: + - The Amazon side ASN. + - Required when I(state=present). + type: str + direct_connect_gateway_id: + description: + - The ID of an existing Direct Connect Gateway. + - Required when I(state=absent). + type: str + virtual_gateway_id: + description: + - The VPN gateway ID of an existing virtual gateway. + type: str + wait_timeout: + description: + - How long to wait for the association to be deleted. + type: int + default: 320 +''' + +EXAMPLES = ''' +- name: Create a new direct connect gateway attached to virtual private gateway + dxgw: + state: present + name: my-dx-gateway + amazon_asn: 7224 + virtual_gateway_id: vpg-12345 + register: created_dxgw + +- name: Create a new unattached dxgw + dxgw: + state: present + name: my-dx-gateway + amazon_asn: 7224 + register: created_dxgw + +''' + +RETURN = ''' +result: + description: + - The attributes of the Direct Connect Gateway + type: complex + returned: I(state=present) + contains: + amazon_side_asn: + description: ASN on the amazon side. + type: str + direct_connect_gateway_id: + description: The ID of the direct connect gateway. + type: str + direct_connect_gateway_name: + description: The name of the direct connect gateway. + type: str + direct_connect_gateway_state: + description: The state of the direct connect gateway. + type: str + owner_account: + description: The AWS account ID of the owner of the direct connect gateway. + type: str +''' + +import time +import traceback + +try: + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, + get_aws_connection_info, boto3_conn) +from ansible.module_utils._text import to_native + + +def dx_gateway_info(client, gateway_id, module): + try: + resp = client.describe_direct_connect_gateways( + directConnectGatewayId=gateway_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + if resp['directConnectGateways']: + return resp['directConnectGateways'][0] + + +def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): + polling_increment_secs = 15 + max_retries = 3 + status_achieved = False + + for x in range(0, max_retries): + try: + response = check_dxgw_association( + client, + module, + gateway_id=gateway_id, + virtual_gateway_id=virtual_gateway_id) + if response['directConnectGatewayAssociations']: + if response['directConnectGatewayAssociations'][0]['associationState'] == status: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + else: + status_achieved = True + break + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return status_achieved, result + + +def associate_direct_connect_gateway(client, module, gateway_id): + params = dict() + params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + try: + response = client.create_direct_connect_gateway_association( + directConnectGatewayId=gateway_id, + virtualGatewayId=params['virtual_gateway_id']) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating') + if not status_achieved: + module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console') + + result = response + return result + + +def delete_association(client, module, gateway_id, virtual_gateway_id): + try: + response = client.delete_direct_connect_gateway_association( + directConnectGatewayId=gateway_id, + virtualGatewayId=virtual_gateway_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating') + if not status_achieved: + module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console') + + result = response + return result + + +def create_dx_gateway(client, module): + params = dict() + params['name'] = module.params.get('name') + params['amazon_asn'] = module.params.get('amazon_asn') + try: + response = client.create_direct_connect_gateway( + directConnectGatewayName=params['name'], + amazonSideAsn=int(params['amazon_asn'])) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return result + + +def find_dx_gateway(client, module, gateway_id=None): + params = dict() + gateways = list() + if gateway_id is not None: + params['directConnectGatewayId'] = gateway_id + while True: + try: + resp = client.describe_direct_connect_gateways(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + gateways.extend(resp['directConnectGateways']) + if 'nextToken' in resp: + params['nextToken'] = resp['nextToken'] + else: + break + if gateways != []: + count = 0 + for gateway in gateways: + if module.params.get('name') == gateway['directConnectGatewayName']: + count += 1 + return gateway + return None + + +def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None): + try: + if virtual_gateway_id is None: + resp = client.describe_direct_connect_gateway_associations( + directConnectGatewayId=gateway_id + ) + else: + resp = client.describe_direct_connect_gateway_associations( + directConnectGatewayId=gateway_id, + virtualGatewayId=virtual_gateway_id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + return resp + + +def ensure_present(client, module): + # If an existing direct connect gateway matches our args + # then a match is considered to have been found and we will not create another dxgw. + + changed = False + params = dict() + result = dict() + params['name'] = module.params.get('name') + params['amazon_asn'] = module.params.get('amazon_asn') + params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + + # check if a gateway matching our module args already exists + existing_dxgw = find_dx_gateway(client, module) + + if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted': + gateway_id = existing_dxgw['directConnectGatewayId'] + # if a gateway_id was provided, check if it is attach to the DXGW + if params['virtual_gateway_id']: + resp = check_dxgw_association( + client, + module, + gateway_id=gateway_id, + virtual_gateway_id=params['virtual_gateway_id']) + if not resp["directConnectGatewayAssociations"]: + # attach the dxgw to the supplied virtual_gateway_id + associate_direct_connect_gateway(client, module, gateway_id) + changed = True + # if params['virtual_gateway_id'] is not provided, check the dxgw is attached to a VPG. If so, detach it. + else: + existing_dxgw = find_dx_gateway(client, module) + + resp = check_dxgw_association(client, module, gateway_id=gateway_id) + if resp["directConnectGatewayAssociations"]: + for association in resp['directConnectGatewayAssociations']: + if association['associationState'] not in ['disassociating', 'disassociated']: + delete_association( + client, + module, + gateway_id=gateway_id, + virtual_gateway_id=association['virtualGatewayId']) + else: + # create a new dxgw + new_dxgw = create_dx_gateway(client, module) + changed = True + gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId'] + + # if a vpc-id was supplied, attempt to attach it to the dxgw + if params['virtual_gateway_id']: + associate_direct_connect_gateway(client, module, gateway_id) + resp = check_dxgw_association(client, + module, + gateway_id=gateway_id + ) + if resp["directConnectGatewayAssociations"]: + changed = True + + result = dx_gateway_info(client, gateway_id, module) + return changed, result + + +def ensure_absent(client, module): + # If an existing direct connect gateway matches our args + # then a match is considered to have been found and we will not create another dxgw. + + changed = False + result = dict() + dx_gateway_id = module.params.get('direct_connect_gateway_id') + existing_dxgw = find_dx_gateway(client, module, dx_gateway_id) + if existing_dxgw is not None: + resp = check_dxgw_association(client, module, + gateway_id=dx_gateway_id) + if resp["directConnectGatewayAssociations"]: + for association in resp['directConnectGatewayAssociations']: + if association['associationState'] not in ['disassociating', 'disassociated']: + delete_association(client, module, + gateway_id=dx_gateway_id, + virtual_gateway_id=association['virtualGatewayId']) + # wait for deleting association + timeout = time.time() + module.params.get('wait_timeout') + while time.time() < timeout: + resp = check_dxgw_association(client, + module, + gateway_id=dx_gateway_id) + if resp["directConnectGatewayAssociations"] != []: + time.sleep(15) + else: + break + + try: + resp = client.delete_direct_connect_gateway( + directConnectGatewayId=dx_gateway_id + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + result = resp['directConnectGateway'] + return changed + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']), + name=dict(), + amazon_asn=dict(), + virtual_gateway_id=dict(), + direct_connect_gateway_id=dict(), + wait_timeout=dict(type='int', default=320))) + required_if = [('state', 'present', ['name', 'amazon_asn']), + ('state', 'absent', ['direct_connect_gateway_id'])] + module = AnsibleModule(argument_spec=argument_spec, + required_if=required_if) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module') + + state = module.params.get('state') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs) + + if state == 'present': + (changed, results) = ensure_present(client, module) + elif state == 'absent': + changed = ensure_absent(client, module) + results = {} + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py new file mode 100644 index 00000000000..2e4b34d0ca7 --- /dev/null +++ b/aws_direct_connect_link_aggregation_group.py @@ -0,0 +1,470 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_direct_connect_link_aggregation_group +short_description: Manage Direct Connect LAG bundles +description: + - Create, delete, or modify a Direct Connect link aggregation group. +author: "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore +options: + state: + description: + - The state of the Direct Connect link aggregation group. + choices: + - present + - absent + type: str + required: true + name: + description: + - The name of the Direct Connect link aggregation group. + type: str + link_aggregation_group_id: + description: + - The ID of the Direct Connect link aggregation group. + type: str + num_connections: + description: + - The number of connections with which to initialize the link aggregation group. + type: int + min_links: + description: + - The minimum number of physical connections that must be operational for the LAG itself to be operational. + type: int + location: + description: + - The location of the link aggregation group. + type: str + bandwidth: + description: + - The bandwidth of the link aggregation group. + type: str + force_delete: + description: + - This allows the minimum number of links to be set to 0, any hosted connections disassociated, + and any virtual interfaces associated to the LAG deleted. + type: bool + connection_id: + description: + - A connection ID to link with the link aggregation group upon creation. + type: str + delete_with_disassociation: + description: + - To be used with I(state=absent) to delete connections after disassociating them with the LAG. + type: bool + wait: + description: + - Whether or not to wait for the operation to complete. + - May be useful when waiting for virtual interfaces to be deleted. + - The time to wait can be controlled by setting I(wait_timeout). + type: bool + wait_timeout: + description: + - The duration in seconds to wait if I(wait=true). + default: 120 + type: int +''' + +EXAMPLES = """ + +# create a Direct Connect connection +- aws_direct_connect_link_aggregation_group: + state: present + location: EqDC2 + lag_id: dxlag-xxxxxxxx + bandwidth: 1Gbps + +""" + +RETURN = """ +changed: + type: str + description: Whether or not the LAG has changed. + returned: always +aws_device: + type: str + description: The AWS Direct Connection endpoint that hosts the LAG. + sample: "EqSe2-1bwfvazist2k0" + returned: when I(state=present) +connections: + type: list + description: A list of connections bundled by this LAG. + sample: + "connections": [ + { + "aws_device": "EqSe2-1bwfvazist2k0", + "bandwidth": "1Gbps", + "connection_id": "dxcon-fgzjah5a", + "connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h", + "connection_state": "down", + "lag_id": "dxlag-fgnsp4rq", + "location": "EqSe2", + "owner_account": "448830907657", + "region": "us-west-2" + } + ] + returned: when I(state=present) +connections_bandwidth: + type: str + description: The individual bandwidth of the physical connections bundled by the LAG. + sample: "1Gbps" + returned: when I(state=present) +lag_id: + type: str + description: Unique identifier for the link aggregation group. + sample: "dxlag-fgnsp4rq" + returned: when I(state=present) +lag_name: + type: str + description: User-provided name for the link aggregation group. + returned: when I(state=present) +lag_state: + type: str + description: State of the LAG. + sample: "pending" + returned: when I(state=present) +location: + type: str + description: Where the connection is located. + sample: "EqSe2" + returned: when I(state=present) +minimum_links: + type: int + description: The minimum number of physical connections that must be operational for the LAG itself to be operational. + returned: when I(state=present) +number_of_connections: + type: int + description: The number of physical connections bundled by the LAG. + returned: when I(state=present) +owner_account: + type: str + description: Owner account ID of the LAG. + returned: when I(state=present) +region: + type: str + description: The region in which the LAG exists. + returned: when I(state=present) +""" + +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3, + get_aws_connection_info, boto3_conn, AWSRetry) +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, + delete_connection, + delete_virtual_interface, + disassociate_connection_and_lag) +import traceback +import time + +try: + import botocore +except Exception: + pass + # handled by imported HAS_BOTO3 + + +def lag_status(client, lag_id): + return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False) + + +def lag_exists(client, lag_id=None, lag_name=None, verify=True): + """ If verify=True, returns the LAG ID or None + If verify=False, returns the LAG's data (or an empty dict) + """ + try: + if lag_id: + response = client.describe_lags(lagId=lag_id) + else: + response = client.describe_lags() + except botocore.exceptions.ClientError as e: + if lag_id and verify: + return False + elif lag_id: + return {} + else: + failed_op = "Failed to describe DirectConnect link aggregation groups." + raise DirectConnectError(msg=failed_op, + last_traceback=traceback.format_exc(), + exception=e) + + match = [] # List of LAG IDs that are exact matches + lag = [] # List of LAG data that are exact matches + + # look for matching connections + if len(response.get('lags', [])) == 1 and lag_id: + if response['lags'][0]['lagState'] != 'deleted': + match.append(response['lags'][0]['lagId']) + lag.append(response['lags'][0]) + else: + for each in response.get('lags', []): + if each['lagState'] != 'deleted': + if not lag_id: + if lag_name == each['lagName']: + match.append(each['lagId']) + else: + match.append(each['lagId']) + + # verifying if the connections exists; if true, return connection identifier, otherwise return False + if verify and len(match) == 1: + return match[0] + elif verify: + return False + + # not verifying if the connection exists; just return current connection info + else: + if len(lag) == 1: + return lag[0] + else: + return {} + + +def create_lag(client, num_connections, location, bandwidth, name, connection_id): + if not name: + raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.", + last_traceback=None, + exception="") + + parameters = dict(numberOfConnections=num_connections, + location=location, + connectionsBandwidth=bandwidth, + lagName=name) + if connection_id: + parameters.update(connectionId=connection_id) + try: + lag = client.create_lag(**parameters) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name), + last_traceback=traceback.format_exc(), + exception=e) + + return lag['lagId'] + + +def delete_lag(client, lag_id): + try: + client.delete_lag(lagId=lag_id) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), + last_traceback=traceback.format_exc(), + exception=e) + + +@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) +def _update_lag(client, lag_id, lag_name, min_links): + params = {} + if min_links: + params.update(minimumLinks=min_links) + if lag_name: + params.update(lagName=lag_name) + + client.update_lag(lagId=lag_id, **params) + + +def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout): + start = time.time() + + if min_links and min_links > num_connections: + raise DirectConnectError( + msg="The number of connections {0} must be greater than the minimum number of links " + "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), + last_traceback=None, + exception=None + ) + + while True: + try: + _update_lag(client, lag_id, lag_name, min_links) + except botocore.exceptions.ClientError as e: + if wait and time.time() - start <= wait_timeout: + continue + msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id) + if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']: + msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links) + raise DirectConnectError(msg=msg, + last_traceback=traceback.format_exc(), + exception=e) + else: + break + + +def lag_changed(current_status, name, min_links): + """ Determines if a modifiable link aggregation group attribute has been modified. """ + return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks']) + + +def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout): + exists = lag_exists(client, lag_id, lag_name) + if not exists and lag_id: + raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), + last_traceback=None, + exception="") + + # the connection is found; get the latest state and see if it needs to be updated + if exists: + lag_id = exists + latest_state = lag_status(client, lag_id) + if lag_changed(latest_state, lag_name, min_links): + update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout) + return True, lag_id + return False, lag_id + + # no connection found; create a new one + else: + lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id) + update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout) + return True, lag_id + + +def describe_virtual_interfaces(client, lag_id): + try: + response = client.describe_virtual_interfaces(connectionId=lag_id) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), + last_traceback=traceback.format_exc(), + exception=e) + return response.get('virtualInterfaces', []) + + +def get_connections_and_virtual_interfaces(client, lag_id): + virtual_interfaces = describe_virtual_interfaces(client, lag_id) + connections = lag_status(client, lag_id=lag_id).get('connections', []) + return virtual_interfaces, connections + + +def disassociate_vis(client, lag_id, virtual_interfaces): + for vi in virtual_interfaces: + delete_virtual_interface(client, vi['virtualInterfaceId']) + try: + response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId']) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), + last_traceback=traceback.format_exc(), + exception=e) + + +def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout): + lag_id = lag_exists(client, lag_id, lag_name) + if not lag_id: + return False + + latest_status = lag_status(client, lag_id) + + # determine the associated connections and virtual interfaces to disassociate + virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id) + + # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete + if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete: + raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " + "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " + "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " + "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), + last_traceback=None, + exception=None) + + # update min_links to be 0 so we can remove the LAG + update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout) + + # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached + for connection in connections: + disassociate_connection_and_lag(client, connection['connectionId'], lag_id) + if delete_with_disassociation: + delete_connection(client, connection['connectionId']) + + for vi in virtual_interfaces: + delete_virtual_interface(client, vi['virtualInterfaceId']) + + start_time = time.time() + while True: + try: + delete_lag(client, lag_id) + except DirectConnectError as e: + if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait: + continue + else: + return True + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(), + link_aggregation_group_id=dict(), + num_connections=dict(type='int'), + min_links=dict(type='int'), + location=dict(), + bandwidth=dict(), + connection_id=dict(), + delete_with_disassociation=dict(type='bool', default=False), + force_delete=dict(type='bool', default=False), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=120), + )) + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=[('link_aggregation_group_id', 'name')], + required_if=[('state', 'present', ('location', 'bandwidth'))]) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.") + + connection = boto3_conn(module, conn_type='client', + resource='directconnect', region=region, + endpoint=ec2_url, **aws_connect_kwargs) + + state = module.params.get('state') + response = {} + try: + if state == 'present': + changed, lag_id = ensure_present(connection, + num_connections=module.params.get("num_connections"), + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + connection_id=module.params.get("connection_id"), + min_links=module.params.get("min_links"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout")) + response = lag_status(connection, lag_id) + elif state == "absent": + changed = ensure_absent(connection, + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + force_delete=module.params.get("force_delete"), + delete_with_disassociation=module.params.get("delete_with_disassociation"), + wait=module.params.get('wait'), + wait_timeout=module.params.get('wait_timeout')) + except DirectConnectError as e: + if e.last_traceback: + module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception)) + else: + module.fail_json(msg=e.msg) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + +if __name__ == '__main__': + main() diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py new file mode 100644 index 00000000000..96616532587 --- /dev/null +++ b/aws_direct_connect_virtual_interface.py @@ -0,0 +1,500 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_direct_connect_virtual_interface +short_description: Manage Direct Connect virtual interfaces +description: + - Create, delete, or modify a Direct Connect public or private virtual interface. +author: "Sloane Hertel (@s-hertel)" +requirements: + - boto3 + - botocore +options: + state: + description: + - The desired state of the Direct Connect virtual interface. + choices: [present, absent] + type: str + required: true + id_to_associate: + description: + - The ID of the link aggregation group or connection to associate with the virtual interface. + aliases: [link_aggregation_group_id, connection_id] + type: str + required: true + public: + description: + - The type of virtual interface. + type: bool + name: + description: + - The name of the virtual interface. + type: str + vlan: + description: + - The VLAN ID. + default: 100 + type: int + bgp_asn: + description: + - The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + default: 65000 + type: int + authentication_key: + description: + - The authentication key for BGP configuration. + type: str + amazon_address: + description: + - The amazon address CIDR with which to create the virtual interface. + type: str + customer_address: + description: + - The customer address CIDR with which to create the virtual interface. + type: str + address_type: + description: + - The type of IP address for the BGP peer. + type: str + cidr: + description: + - A list of route filter prefix CIDRs with which to create the public virtual interface. + type: list + elements: str + virtual_gateway_id: + description: + - The virtual gateway ID required for creating a private virtual interface. + type: str + virtual_interface_id: + description: + - The virtual interface ID. + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +RETURN = ''' +address_family: + description: The address family for the BGP peer. + returned: always + type: str + sample: ipv4 +amazon_address: + description: IP address assigned to the Amazon interface. + returned: always + type: str + sample: 169.254.255.1/30 +asn: + description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + returned: always + type: int + sample: 65000 +auth_key: + description: The authentication key for BGP configuration. + returned: always + type: str + sample: 0xZ59Y1JZ2oDOSh6YriIlyRE +bgp_peers: + description: A list of the BGP peers configured on this virtual interface. + returned: always + type: complex + contains: + address_family: + description: The address family for the BGP peer. + returned: always + type: str + sample: ipv4 + amazon_address: + description: IP address assigned to the Amazon interface. + returned: always + type: str + sample: 169.254.255.1/30 + asn: + description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + returned: always + type: int + sample: 65000 + auth_key: + description: The authentication key for BGP configuration. + returned: always + type: str + sample: 0xZ59Y1JZ2oDOSh6YriIlyRE + bgp_peer_state: + description: The state of the BGP peer (verifying, pending, available) + returned: always + type: str + sample: available + bgp_status: + description: The up/down state of the BGP peer. + returned: always + type: str + sample: up + customer_address: + description: IP address assigned to the customer interface. + returned: always + type: str + sample: 169.254.255.2/30 +changed: + description: Indicated if the virtual interface has been created/modified/deleted + returned: always + type: bool + sample: false +connection_id: + description: + - The ID of the connection. This field is also used as the ID type for operations that + use multiple connection types (LAG, interconnect, and/or connection). + returned: always + type: str + sample: dxcon-fgb175av +customer_address: + description: IP address assigned to the customer interface. + returned: always + type: str + sample: 169.254.255.2/30 +customer_router_config: + description: Information for generating the customer router configuration. + returned: always + type: str +location: + description: Where the connection is located. + returned: always + type: str + sample: EqDC2 +owner_account: + description: The AWS account that will own the new virtual interface. + returned: always + type: str + sample: '123456789012' +route_filter_prefixes: + description: A list of routes to be advertised to the AWS network in this region (public virtual interface). + returned: always + type: complex + contains: + cidr: + description: A routes to be advertised to the AWS network in this region. + returned: always + type: str + sample: 54.227.92.216/30 +virtual_gateway_id: + description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces. + returned: when I(public=False) + type: str + sample: vgw-f3ce259a +virtual_interface_id: + description: The ID of the virtual interface. + returned: always + type: str + sample: dxvif-fh0w7cex +virtual_interface_name: + description: The name of the virtual interface assigned by the customer. + returned: always + type: str + sample: test_virtual_interface +virtual_interface_state: + description: State of the virtual interface (confirming, verifying, pending, available, down, rejected). + returned: always + type: str + sample: available +virtual_interface_type: + description: The type of virtual interface (private, public). + returned: always + type: str + sample: private +vlan: + description: The VLAN ID. + returned: always + type: int + sample: 100 +''' + +EXAMPLES = ''' +--- +- name: create an association between a LAG and connection + aws_direct_connect_virtual_interface: + state: present + name: "{{ name }}" + link_aggregation_group_id: LAG-XXXXXXXX + connection_id: dxcon-XXXXXXXX + +- name: remove an association between a connection and virtual interface + aws_direct_connect_virtual_interface: + state: absent + connection_id: dxcon-XXXXXXXX + virtual_interface_id: dxv-XXXXXXXX + +''' + +import traceback +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + # handled by AnsibleAWSModule + pass + + +def try_except_ClientError(failure_msg): + ''' + Wrapper for boto3 calls that uses AWSRetry and handles exceptions + ''' + def wrapper(f): + def run_func(*args, **kwargs): + try: + result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) + except (ClientError, BotoCoreError) as e: + raise DirectConnectError(failure_msg, traceback.format_exc(), e) + return result + return run_func + return wrapper + + +def find_unique_vi(client, connection_id, virtual_interface_id, name): + ''' + Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. + If multiple matches are found False is returned. If no matches are found None is returned. + ''' + + # Get the virtual interfaces, filtering by the ID if provided. + vi_params = {} + if virtual_interface_id: + vi_params = {'virtualInterfaceId': virtual_interface_id} + + virtual_interfaces = try_except_ClientError( + failure_msg="Failed to describe virtual interface")( + client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces') + + # Remove deleting/deleted matches from the results. + virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')] + + matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id) + return exact_match(matching_virtual_interfaces) + + +def exact_match(virtual_interfaces): + ''' + Returns the virtual interface ID if one was found, + None if the virtual interface ID needs to be created, + False if an exact match was not found + ''' + + if not virtual_interfaces: + return None + if len(virtual_interfaces) == 1: + return virtual_interfaces[0]['virtualInterfaceId'] + else: + return False + + +def filter_virtual_interfaces(virtual_interfaces, name, connection_id): + ''' + Filters the available virtual interfaces to try to find a unique match + ''' + # Filter by name if provided. + if name: + matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name) + if len(matching_by_name) == 1: + return matching_by_name + else: + matching_by_name = virtual_interfaces + + # If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated) + if connection_id and len(matching_by_name) > 1: + matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id) + if len(matching_by_connection_id) == 1: + return matching_by_connection_id + else: + matching_by_connection_id = matching_by_name + + return matching_by_connection_id + + +def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id): + ''' + Return virtual interfaces that have the connection_id associated + ''' + return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id] + + +def find_virtual_interface_by_name(virtual_interfaces, name): + ''' + Return virtual interfaces that match the provided name + ''' + return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name] + + +def vi_state(client, virtual_interface_id): + ''' + Returns the state of the virtual interface. + ''' + err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id) + vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id) + return vi['virtualInterfaces'][0] + + +def assemble_params_for_creating_vi(params): + ''' + Returns kwargs to use in the call to create the virtual interface + + Params for public virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr + Params for private virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId + ''' + + public = params['public'] + name = params['name'] + vlan = params['vlan'] + bgp_asn = params['bgp_asn'] + auth_key = params['authentication_key'] + amazon_addr = params['amazon_address'] + customer_addr = params['customer_address'] + family_addr = params['address_type'] + cidr = params['cidr'] + virtual_gateway_id = params['virtual_gateway_id'] + + parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn) + opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr) + + for name, value in opt_params.items(): + if value: + parameters[name] = value + + # virtual interface type specific parameters + if public and cidr: + parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr] + if not public: + parameters['virtualGatewayId'] = virtual_gateway_id + + return parameters + + +def create_vi(client, public, associated_id, creation_params): + ''' + :param public: a boolean + :param associated_id: a link aggregation group ID or connection ID to associate + with the virtual interface. + :param creation_params: a dict of parameters to use in the boto call + :return The ID of the created virtual interface + ''' + err_msg = "Failed to create virtual interface" + if public: + vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id, + newPublicVirtualInterface=creation_params) + else: + vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id, + newPrivateVirtualInterface=creation_params) + return vi['virtualInterfaceId'] + + +def modify_vi(client, virtual_interface_id, connection_id): + ''' + Associate a new connection ID + ''' + err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id) + try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id, + connectionId=connection_id) + + +def needs_modification(client, virtual_interface_id, connection_id): + ''' + Determine if the associated connection ID needs to be updated + ''' + return vi_state(client, virtual_interface_id).get('connectionId') != connection_id + + +def ensure_state(connection, module): + changed = False + + state = module.params['state'] + connection_id = module.params['id_to_associate'] + public = module.params['public'] + name = module.params['name'] + + virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name) + + if virtual_interface_id is False: + module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " + "and connection_id options if applicable to find a unique match.") + + if state == 'present': + + if not virtual_interface_id and module.params['virtual_interface_id']: + module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id'])) + + elif not virtual_interface_id: + assembled_params = assemble_params_for_creating_vi(module.params) + virtual_interface_id = create_vi(connection, public, connection_id, assembled_params) + changed = True + + if needs_modification(connection, virtual_interface_id, connection_id): + modify_vi(connection, virtual_interface_id, connection_id) + changed = True + + latest_state = vi_state(connection, virtual_interface_id) + + else: + if virtual_interface_id: + delete_virtual_interface(connection, virtual_interface_id) + changed = True + + latest_state = {} + + return changed, latest_state + + +def main(): + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']), + public=dict(type='bool'), + name=dict(), + vlan=dict(type='int', default=100), + bgp_asn=dict(type='int', default=65000), + authentication_key=dict(), + amazon_address=dict(), + customer_address=dict(), + address_type=dict(), + cidr=dict(type='list'), + virtual_gateway_id=dict(), + virtual_interface_id=dict() + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_one_of=[['virtual_interface_id', 'name']], + required_if=[['state', 'present', ['public']], + ['public', False, ['virtual_gateway_id']], + ['public', True, ['amazon_address']], + ['public', True, ['customer_address']], + ['public', True, ['cidr']]]) + + connection = module.client('directconnect') + + try: + changed, latest_state = ensure_state(connection, module) + except DirectConnectError as e: + if e.exception: + module.fail_json_aws(exception=e.exception, msg=e.msg) + else: + module.fail_json(msg=e.msg) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state)) + + +if __name__ == '__main__': + main() diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py new file mode 100644 index 00000000000..19bf5ed62d5 --- /dev/null +++ b/aws_eks_cluster.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: aws_eks_cluster +short_description: Manage Elastic Kubernetes Service Clusters +description: + - Manage Elastic Kubernetes Service Clusters + +author: Will Thames (@willthames) + +options: + name: + description: Name of EKS cluster + required: True + type: str + version: + description: Kubernetes version - defaults to latest + type: str + role_arn: + description: ARN of IAM role used by the EKS cluster + type: str + subnets: + description: list of subnet IDs for the Kubernetes cluster + type: list + elements: str + security_groups: + description: list of security group names or IDs + type: list + elements: str + state: + description: desired state of the EKS cluster + choices: + - absent + - present + default: present + type: str + wait: + description: >- + Specifies whether the module waits until the cluster is active or deleted + before moving on. It takes "usually less than 10 minutes" per AWS documentation. + type: bool + default: false + wait_timeout: + description: >- + The duration in seconds to wait for the cluster to become active. Defaults + to 1200 seconds (20 minutes). + default: 1200 + type: int + +requirements: [ 'botocore', 'boto3' ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create an EKS cluster + aws_eks_cluster: + name: my_cluster + version: 1.14 + role_arn: my_eks_role + subnets: + - subnet-aaaa1111 + security_groups: + - my_eks_sg + - sg-abcd1234 + register: caller_facts + +- name: Remove an EKS cluster + aws_eks_cluster: + name: my_cluster + wait: yes + state: absent +''' + +RETURN = ''' +arn: + description: ARN of the EKS cluster + returned: when state is present + type: str + sample: arn:aws:eks:us-west-2:111111111111:cluster/my-eks-cluster +certificate_authority: + description: Dictionary containing Certificate Authority Data for cluster + returned: after creation + type: complex + contains: + data: + description: Base-64 encoded Certificate Authority Data for cluster + returned: when the cluster has been created and is active + type: str +endpoint: + description: Kubernetes API server endpoint + returned: when the cluster has been created and is active + type: str + sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com +created_at: + description: Cluster creation date and time + returned: when state is present + type: str + sample: '2018-06-06T11:56:56.242000+00:00' +name: + description: EKS cluster name + returned: when state is present + type: str + sample: my-eks-cluster +resources_vpc_config: + description: VPC configuration of the cluster + returned: when state is present + type: complex + contains: + security_group_ids: + description: List of security group IDs + returned: always + type: list + sample: + - sg-abcd1234 + - sg-aaaa1111 + subnet_ids: + description: List of subnet IDs + returned: always + type: list + sample: + - subnet-abcdef12 + - subnet-345678ab + - subnet-cdef1234 + vpc_id: + description: VPC id + returned: always + type: str + sample: vpc-a1b2c3d4 +role_arn: + description: ARN of the IAM role used by the cluster + returned: when state is present + type: str + sample: arn:aws:iam::111111111111:role/aws_eks_cluster_role +status: + description: status of the EKS cluster + returned: when state is present + type: str + sample: + - CREATING + - ACTIVE +version: + description: Kubernetes version of the cluster + returned: when state is present + type: str + sample: '1.10' +''' + + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter + +try: + import botocore.exceptions +except ImportError: + pass # caught by AnsibleAWSModule + + +def ensure_present(client, module): + name = module.params.get('name') + subnets = module.params['subnets'] + groups = module.params['security_groups'] + wait = module.params.get('wait') + cluster = get_cluster(client, module) + try: + ec2 = module.client('ec2') + vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId'] + groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't lookup security groups") + + if cluster: + if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets): + module.fail_json(msg="Cannot modify subnets of existing cluster") + if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups): + module.fail_json(msg="Cannot modify security groups of existing cluster") + if module.params.get('version') and module.params.get('version') != cluster['version']: + module.fail_json(msg="Cannot modify version of existing cluster") + + if wait: + wait_until(client, module, 'cluster_active') + # Ensure that fields that are only available for active clusters are + # included in the returned value + cluster = get_cluster(client, module) + + module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster)) + + if module.check_mode: + module.exit_json(changed=True) + try: + params = dict(name=name, + roleArn=module.params['role_arn'], + resourcesVpcConfig=dict( + subnetIds=subnets, + securityGroupIds=groups), + clientRequestToken='ansible-create-%s' % name) + if module.params['version']: + params['version'] = module.params['version'] + cluster = client.create_cluster(**params)['cluster'] + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't create cluster %s" % name) + + if wait: + wait_until(client, module, 'cluster_active') + # Ensure that fields that are only available for active clusters are + # included in the returned value + cluster = get_cluster(client, module) + + module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster)) + + +def ensure_absent(client, module): + name = module.params.get('name') + existing = get_cluster(client, module) + wait = module.params.get('wait') + if not existing: + module.exit_json(changed=False) + if not module.check_mode: + try: + client.delete_cluster(name=module.params['name']) + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name) + + if wait: + wait_until(client, module, 'cluster_deleted') + + module.exit_json(changed=True) + + +def get_cluster(client, module): + name = module.params.get('name') + try: + return client.describe_cluster(name=name)['cluster'] + except is_boto3_error_code('ResourceNotFoundException'): + return None + except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except + module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get cluster %s" % name) + + +def wait_until(client, module, waiter_name='cluster_active'): + name = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + + waiter = get_waiter(client, waiter_name) + attempts = 1 + int(wait_timeout / waiter.config.delay) + waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts}) + + +def main(): + argument_spec = dict( + name=dict(required=True), + version=dict(), + role_arn=dict(), + subnets=dict(type='list'), + security_groups=dict(type='list'), + state=dict(choices=['absent', 'present'], default='present'), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=1200, type='int') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]], + supports_check_mode=True, + ) + + if not module.botocore_at_least("1.10.32"): + module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32') + + if (not module.botocore_at_least("1.12.38") and + module.params.get('state') == 'absent' and + module.params.get('wait')): + module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38') + + client = module.client('eks') + + if module.params.get('state') == 'present': + ensure_present(client, module) + else: + ensure_absent(client, module) + + +if __name__ == '__main__': + main() diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py new file mode 100644 index 00000000000..ae69e45092c --- /dev/null +++ b/aws_elasticbeanstalk_app.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' + } + +DOCUMENTATION = ''' +--- +module: aws_elasticbeanstalk_app + +short_description: Create, update, and delete an elastic beanstalk application + + +description: + - Creates, updates, deletes beanstalk applications if app_name is provided. + +options: + app_name: + description: + - Name of the beanstalk application you wish to manage. + aliases: [ 'name' ] + type: str + description: + description: + - The description of the application. + type: str + state: + description: + - Whether to ensure the application is present or absent. + default: present + choices: ['absent','present'] + type: str + terminate_by_force: + description: + - When I(terminate_by_force=true), running environments will be terminated before deleting the application. + default: false + type: bool +author: + - Harpreet Singh (@hsingh) + - Stephen Granger (@viper233) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create or update an application +- aws_elasticbeanstalk_app: + app_name: Sample_App + description: "Hello World App" + state: present + +# Delete application +- aws_elasticbeanstalk_app: + app_name: Sample_App + state: absent + +''' + +RETURN = ''' +app: + description: Beanstalk application. + returned: always + type: dict + sample: { + "ApplicationName": "app-name", + "ConfigurationTemplates": [], + "DateCreated": "2016-12-28T14:50:03.185000+00:00", + "DateUpdated": "2016-12-28T14:50:03.185000+00:00", + "Description": "description", + "Versions": [ + "1.0.0", + "1.0.1" + ] + } +output: + description: Message indicating what change will occur. + returned: in check mode + type: str + sample: App is up-to-date +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + + +def describe_app(ebs, app_name, module): + apps = list_apps(ebs, app_name, module) + + return None if len(apps) != 1 else apps[0] + + +def list_apps(ebs, app_name, module): + try: + if app_name is not None: + apps = ebs.describe_applications(ApplicationNames=[app_name]) + else: + apps = ebs.describe_applications() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not describe application") + + return apps.get("Applications", []) + + +def check_app(ebs, app, module): + app_name = module.params['app_name'] + description = module.params['description'] + state = module.params['state'] + terminate_by_force = module.params['terminate_by_force'] + + result = {} + + if state == 'present' and app is None: + result = dict(changed=True, output="App would be created") + elif state == 'present' and app.get("Description", None) != description: + result = dict(changed=True, output="App would be updated", app=app) + elif state == 'present' and app.get("Description", None) == description: + result = dict(changed=False, output="App is up-to-date", app=app) + elif state == 'absent' and app is None: + result = dict(changed=False, output="App does not exist", app={}) + elif state == 'absent' and app is not None: + result = dict(changed=True, output="App will be deleted", app=app) + elif state == 'absent' and app is not None and terminate_by_force is True: + result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app) + + module.exit_json(**result) + + +def filter_empty(**kwargs): + retval = {} + for k, v in kwargs.items(): + if v: + retval[k] = v + return retval + + +def main(): + argument_spec = dict( + app_name=dict(aliases=['name'], type='str', required=False), + description=dict(), + state=dict(choices=['present', 'absent'], default='present'), + terminate_by_force=dict(type='bool', default=False, required=False) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + app_name = module.params['app_name'] + description = module.params['description'] + state = module.params['state'] + terminate_by_force = module.params['terminate_by_force'] + + if app_name is None: + module.fail_json(msg='Module parameter "app_name" is required') + + result = {} + + ebs = module.client('elasticbeanstalk') + + app = describe_app(ebs, app_name, module) + + if module.check_mode: + check_app(ebs, app, module) + module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.') + + if state == 'present': + if app is None: + try: + create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, + Description=description)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not create application") + + app = describe_app(ebs, app_name, module) + + result = dict(changed=True, app=app) + else: + if app.get("Description", None) != description: + try: + if not description: + ebs.update_application(ApplicationName=app_name) + else: + ebs.update_application(ApplicationName=app_name, Description=description) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not update application") + + app = describe_app(ebs, app_name, module) + + result = dict(changed=True, app=app) + else: + result = dict(changed=False, app=app) + + else: + if app is None: + result = dict(changed=False, output='Application not found', app={}) + else: + try: + if terminate_by_force: + # Running environments will be terminated before deleting the application + ebs.delete_application(ApplicationName=app_name, TerminateEnvByForce=terminate_by_force) + else: + ebs.delete_application(ApplicationName=app_name) + changed = True + except BotoCoreError as e: + module.fail_json_aws(e, msg="Cannot terminate app") + except ClientError as e: + if 'It is currently pending deletion.' not in e.response['Error']['Message']: + module.fail_json_aws(e, msg="Cannot terminate app") + else: + changed = False + + result = dict(changed=changed, app=app) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/aws_glue_connection.py b/aws_glue_connection.py new file mode 100644 index 00000000000..ae9446a7963 --- /dev/null +++ b/aws_glue_connection.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Rob White (@wimnat) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_glue_connection +short_description: Manage an AWS Glue connection +description: + - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + catalog_id: + description: + - The ID of the Data Catalog in which to create the connection. If none is supplied, + the AWS account ID is used by default. + type: str + connection_properties: + description: + - A dict of key-value pairs used as parameters for this connection. + - Required when I(state=present). + type: dict + connection_type: + description: + - The type of the connection. Currently, only JDBC is supported; SFTP is not supported. + default: JDBC + choices: [ 'JDBC', 'SFTP' ] + type: str + description: + description: + - The description of the connection. + type: str + match_criteria: + description: + - A list of UTF-8 strings that specify the criteria that you can use in selecting this connection. + type: list + elements: str + name: + description: + - The name of the connection. + required: true + type: str + security_groups: + description: + - A list of security groups to be used by the connection. Use either security group name or ID. + type: list + elements: str + state: + description: + - Create or delete the AWS Glue connection. + required: true + choices: [ 'present', 'absent' ] + type: str + subnet_id: + description: + - The subnet ID used by the connection. + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an AWS Glue connection +- aws_glue_connection: + name: my-glue-connection + connection_properties: + JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename + USERNAME: my-username + PASSWORD: my-password + state: present + +# Delete an AWS Glue connection +- aws_glue_connection: + name: my-glue-connection + state: absent + +''' + +RETURN = ''' +connection_properties: + description: A dict of key-value pairs used as parameters for this connection. + returned: when state is present + type: dict + sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'} +connection_type: + description: The type of the connection. + returned: when state is present + type: str + sample: JDBC +creation_time: + description: The time this connection definition was created. + returned: when state is present + type: str + sample: "2018-04-21T05:19:58.326000+00:00" +description: + description: Description of the job being defined. + returned: when state is present + type: str + sample: My first Glue job +last_updated_time: + description: The last time this connection definition was updated. + returned: when state is present + type: str + sample: "2018-04-21T05:19:58.326000+00:00" +match_criteria: + description: A list of criteria that can be used in selecting this connection. + returned: when state is present + type: list + sample: [] +name: + description: The name of the connection definition. + returned: when state is present + type: str + sample: my-glue-connection +physical_connection_requirements: + description: A dict of physical connection requirements, such as VPC and SecurityGroup, + needed for making this connection successfully. + returned: when state is present + type: dict + sample: {'subnet-id':'subnet-aabbccddee'} +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names + +# Non-ansible imports +import copy +import time +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + + +def _get_glue_connection(connection, module): + """ + Get an AWS Glue connection based on name. If not found, return None. + + :param connection: AWS boto3 glue connection + :param module: Ansible module + :return: boto3 Glue connection dict or None if not found + """ + + connection_name = module.params.get("name") + connection_catalog_id = module.params.get("catalog_id") + + params = {'Name': connection_name} + if connection_catalog_id is not None: + params['CatalogId'] = connection_catalog_id + + try: + return connection.get_connection(**params)['Connection'] + except (BotoCoreError, ClientError) as e: + if e.response['Error']['Code'] == 'EntityNotFoundException': + return None + else: + raise e + + +def _compare_glue_connection_params(user_params, current_params): + """ + Compare Glue connection params. If there is a difference, return True immediately else return False + + :param user_params: the Glue connection parameters passed by the user + :param current_params: the Glue connection parameters currently configured + :return: True if any parameter is mismatched else False + """ + + # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description + # To counter this, add the key if it's missing with a blank value + + if 'Description' not in current_params: + current_params['Description'] = "" + if 'MatchCriteria' not in current_params: + current_params['MatchCriteria'] = list() + if 'PhysicalConnectionRequirements' not in current_params: + current_params['PhysicalConnectionRequirements'] = dict() + current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = [] + current_params['PhysicalConnectionRequirements']['SubnetId'] = "" + + if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \ + != current_params['ConnectionProperties']: + return True + if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \ + != current_params['ConnectionType']: + return True + if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']: + return True + if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']): + return True + if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']: + if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ + set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \ + != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']): + return True + if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ + user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \ + != current_params['PhysicalConnectionRequirements']['SubnetId']: + return True + + return False + + +def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): + """ + Create or update an AWS Glue connection + + :param connection: AWS boto3 glue connection + :param module: Ansible module + :param glue_connection: a dict of AWS Glue connection parameters or None + :return: + """ + + changed = False + params = dict() + params['ConnectionInput'] = dict() + params['ConnectionInput']['Name'] = module.params.get("name") + params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type") + params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties") + if module.params.get("catalog_id") is not None: + params['CatalogId'] = module.params.get("catalog_id") + if module.params.get("description") is not None: + params['ConnectionInput']['Description'] = module.params.get("description") + if module.params.get("match_criteria") is not None: + params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria") + if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None: + params['ConnectionInput']['PhysicalConnectionRequirements'] = dict() + if module.params.get("security_groups") is not None: + # Get security group IDs from names + security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True) + params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids + if module.params.get("subnet_id") is not None: + params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id") + + # If glue_connection is not None then check if it needs to be modified, else create it + if glue_connection: + if _compare_glue_connection_params(params, glue_connection): + try: + # We need to slightly modify the params for an update + update_params = copy.deepcopy(params) + update_params['Name'] = update_params['ConnectionInput']['Name'] + connection.update_connection(**update_params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + else: + try: + connection.create_connection(**params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + # If changed, get the Glue connection again + if changed: + glue_connection = None + for i in range(10): + glue_connection = _get_glue_connection(connection, module) + if glue_connection is not None: + break + time.sleep(10) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection)) + + +def delete_glue_connection(connection, module, glue_connection): + """ + Delete an AWS Glue connection + + :param connection: AWS boto3 glue connection + :param module: Ansible module + :param glue_connection: a dict of AWS Glue connection parameters or None + :return: + """ + + changed = False + + params = {'ConnectionName': module.params.get("name")} + if module.params.get("catalog_id") is not None: + params['CatalogId'] = module.params.get("catalog_id") + + if glue_connection: + try: + connection.delete_connection(**params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ( + dict( + catalog_id=dict(type='str'), + connection_properties=dict(type='dict'), + connection_type=dict(type='str', default='JDBC', choices=['JDBC', 'SFTP']), + description=dict(type='str'), + match_criteria=dict(type='list'), + name=dict(required=True, type='str'), + security_groups=dict(type='list'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + subnet_id=dict(type='str') + ) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['connection_properties']) + ] + ) + + connection_glue = module.client('glue') + connection_ec2 = module.client('ec2') + + glue_connection = _get_glue_connection(connection_glue, module) + + if module.params.get("state") == 'present': + create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection) + else: + delete_glue_connection(connection_glue, module, glue_connection) + + +if __name__ == '__main__': + main() diff --git a/aws_glue_job.py b/aws_glue_job.py new file mode 100644 index 00000000000..1bd8e8eaf64 --- /dev/null +++ b/aws_glue_job.py @@ -0,0 +1,373 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Rob White (@wimnat) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_glue_job +short_description: Manage an AWS Glue job +description: + - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + allocated_capacity: + description: + - The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs + can be allocated; the default is 10. A DPU is a relative measure of processing power that consists + of 4 vCPUs of compute capacity and 16 GB of memory. + type: int + command_name: + description: + - The name of the job command. This must be 'glueetl'. + default: glueetl + type: str + command_script_location: + description: + - The S3 path to a script that executes a job. + - Required when I(state=present). + type: str + connections: + description: + - A list of Glue connections used for this job. + type: list + elements: str + default_arguments: + description: + - A dict of default arguments for this job. You can specify arguments here that your own job-execution + script consumes, as well as arguments that AWS Glue itself consumes. + type: dict + description: + description: + - Description of the job being defined. + type: str + max_concurrent_runs: + description: + - The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when + this threshold is reached. The maximum value you can specify is controlled by a service limit. + type: int + max_retries: + description: + - The maximum number of times to retry this job if it fails. + type: int + name: + description: + - The name you assign to this job definition. It must be unique in your account. + required: true + type: str + role: + description: + - The name or ARN of the IAM role associated with this job. + - Required when I(state=present). + type: str + state: + description: + - Create or delete the AWS Glue job. + required: true + choices: [ 'present', 'absent' ] + type: str + timeout: + description: + - The job timeout in minutes. + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an AWS Glue job +- aws_glue_job: + command_script_location: s3bucket/script.py + name: my-glue-job + role: my-iam-role + state: present + +# Delete an AWS Glue job +- aws_glue_job: + name: my-glue-job + state: absent + +''' + +RETURN = ''' +allocated_capacity: + description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to + 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power + that consists of 4 vCPUs of compute capacity and 16 GB of memory. + returned: when state is present + type: int + sample: 10 +command: + description: The JobCommand that executes this job. + returned: when state is present + type: complex + contains: + name: + description: The name of the job command. + returned: when state is present + type: str + sample: glueetl + script_location: + description: Specifies the S3 path to a script that executes a job. + returned: when state is present + type: str + sample: mybucket/myscript.py +connections: + description: The connections used for this job. + returned: when state is present + type: dict + sample: "{ Connections: [ 'list', 'of', 'connections' ] }" +created_on: + description: The time and date that this job definition was created. + returned: when state is present + type: str + sample: "2018-04-21T05:19:58.326000+00:00" +default_arguments: + description: The default arguments for this job, specified as name-value pairs. + returned: when state is present + type: dict + sample: "{ 'mykey1': 'myvalue1' }" +description: + description: Description of the job being defined. + returned: when state is present + type: str + sample: My first Glue job +job_name: + description: The name of the AWS Glue job. + returned: always + type: str + sample: my-glue-job +execution_property: + description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. + returned: always + type: complex + contains: + max_concurrent_runs: + description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is + returned when this threshold is reached. The maximum value you can specify is controlled by + a service limit. + returned: when state is present + type: int + sample: 1 +last_modified_on: + description: The last point in time when this job definition was modified. + returned: when state is present + type: str + sample: "2018-04-21T05:19:58.326000+00:00" +max_retries: + description: The maximum number of times to retry this job after a JobRun fails. + returned: when state is present + type: int + sample: 5 +name: + description: The name assigned to this job definition. + returned: when state is present + type: str + sample: my-glue-job +role: + description: The name or ARN of the IAM role associated with this job. + returned: when state is present + type: str + sample: my-iam-role +timeout: + description: The job timeout in minutes. + returned: when state is present + type: int + sample: 300 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +# Non-ansible imports +import copy +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + + +def _get_glue_job(connection, module, glue_job_name): + """ + Get an AWS Glue job based on name. If not found, return None. + + :param connection: AWS boto3 glue connection + :param module: Ansible module + :param glue_job_name: Name of Glue job to get + :return: boto3 Glue job dict or None if not found + """ + + try: + return connection.get_job(JobName=glue_job_name)['Job'] + except (BotoCoreError, ClientError) as e: + if e.response['Error']['Code'] == 'EntityNotFoundException': + return None + else: + module.fail_json_aws(e) + + +def _compare_glue_job_params(user_params, current_params): + """ + Compare Glue job params. If there is a difference, return True immediately else return False + + :param user_params: the Glue job parameters passed by the user + :param current_params: the Glue job parameters currently configured + :return: True if any parameter is mismatched else False + """ + + # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description + # To counter this, add the key if it's missing with a blank value + + if 'Description' not in current_params: + current_params['Description'] = "" + if 'DefaultArguments' not in current_params: + current_params['DefaultArguments'] = dict() + + if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']: + return True + if 'Command' in user_params and user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']: + return True + if 'Connections' in user_params and set(user_params['Connections']) != set(current_params['Connections']): + return True + if 'DefaultArguments' in user_params and set(user_params['DefaultArguments']) != set(current_params['DefaultArguments']): + return True + if 'Description' in user_params and user_params['Description'] != current_params['Description']: + return True + if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']: + return True + if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']: + return True + if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']: + return True + + return False + + +def create_or_update_glue_job(connection, module, glue_job): + """ + Create or update an AWS Glue job + + :param connection: AWS boto3 glue connection + :param module: Ansible module + :param glue_job: a dict of AWS Glue job parameters or None + :return: + """ + + changed = False + params = dict() + params['Name'] = module.params.get("name") + params['Role'] = module.params.get("role") + if module.params.get("allocated_capacity") is not None: + params['AllocatedCapacity'] = module.params.get("allocated_capacity") + if module.params.get("command_script_location") is not None: + params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")} + if module.params.get("connections") is not None: + params['Connections'] = {'Connections': module.params.get("connections")} + if module.params.get("default_arguments") is not None: + params['DefaultArguments'] = module.params.get("default_arguments") + if module.params.get("description") is not None: + params['Description'] = module.params.get("description") + if module.params.get("max_concurrent_runs") is not None: + params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")} + if module.params.get("max_retries") is not None: + params['MaxRetries'] = module.params.get("max_retries") + if module.params.get("timeout") is not None: + params['Timeout'] = module.params.get("timeout") + + # If glue_job is not None then check if it needs to be modified, else create it + if glue_job: + if _compare_glue_job_params(params, glue_job): + try: + # Update job needs slightly modified params + update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)} + del update_params['JobUpdate']['Name'] + connection.update_job(**update_params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + else: + try: + connection.create_job(**params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + # If changed, get the Glue job again + if changed: + glue_job = _get_glue_job(connection, module, params['Name']) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job)) + + +def delete_glue_job(connection, module, glue_job): + """ + Delete an AWS Glue job + + :param connection: AWS boto3 glue connection + :param module: Ansible module + :param glue_job: a dict of AWS Glue job parameters or None + :return: + """ + + changed = False + + if glue_job: + try: + connection.delete_job(JobName=glue_job['Name']) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ( + dict( + allocated_capacity=dict(type='int'), + command_name=dict(type='str', default='glueetl'), + command_script_location=dict(type='str'), + connections=dict(type='list'), + default_arguments=dict(type='dict'), + description=dict(type='str'), + max_concurrent_runs=dict(type='int'), + max_retries=dict(type='int'), + name=dict(required=True, type='str'), + role=dict(type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + timeout=dict(type='int') + ) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['role', 'command_script_location']) + ] + ) + + connection = module.client('glue') + + state = module.params.get("state") + + glue_job = _get_glue_job(connection, module, module.params.get("name")) + + if state == 'present': + create_or_update_glue_job(connection, module, glue_job) + else: + delete_glue_job(connection, module, glue_job) + + +if __name__ == '__main__': + main() diff --git a/aws_inspector_target.py b/aws_inspector_target.py new file mode 100644 index 00000000000..c31456ccff7 --- /dev/null +++ b/aws_inspector_target.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# Copyright (c) 2018 Dennis Conrad for Sainsbury's +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_inspector_target +short_description: Create, Update and Delete Amazon Inspector Assessment + Targets +description: Creates, updates, or deletes Amazon Inspector Assessment Targets + and manages the required Resource Groups. +author: "Dennis Conrad (@dennisconrad)" +options: + name: + description: + - The user-defined name that identifies the assessment target. The name + must be unique within the AWS account. + required: true + type: str + state: + description: + - The state of the assessment target. + choices: + - absent + - present + default: present + type: str + tags: + description: + - Tags of the EC2 instances to be added to the assessment target. + - Required if C(state=present). + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore +''' + +EXAMPLES = ''' +- name: Create my_target Assessment Target + aws_inspector_target: + name: my_target + tags: + role: scan_target + +- name: Update Existing my_target Assessment Target with Additional Tags + aws_inspector_target: + name: my_target + tags: + env: dev + role: scan_target + +- name: Delete my_target Assessment Target + aws_inspector_target: + name: my_target + state: absent +''' + +RETURN = ''' +arn: + description: The ARN that specifies the Amazon Inspector assessment target. + returned: success + type: str + sample: "arn:aws:inspector:eu-west-1:123456789012:target/0-O4LnL7n1" +created_at: + description: The time at which the assessment target was created. + returned: success + type: str + sample: "2018-01-29T13:48:51.958000+00:00" +name: + description: The name of the Amazon Inspector assessment target. + returned: success + type: str + sample: "my_target" +resource_group_arn: + description: The ARN that specifies the resource group that is associated + with the assessment target. + returned: success + type: str + sample: "arn:aws:inspector:eu-west-1:123456789012:resourcegroup/0-qY4gDel8" +tags: + description: The tags of the resource group that is associated with the + assessment target. + returned: success + type: list + sample: {"role": "scan_target", "env": "dev"} +updated_at: + description: The time at which the assessment target was last updated. + returned: success + type: str + sample: "2018-01-29T13:48:51.958000+00:00" +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + compare_aws_tags, +) + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def main(): + argument_spec = dict( + name=dict(required=True), + state=dict(choices=['absent', 'present'], default='present'), + tags=dict(type='dict'), + ) + + required_if = [['state', 'present', ['tags']]] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=False, + required_if=required_if, + ) + + name = module.params.get('name') + state = module.params.get('state').lower() + tags = module.params.get('tags') + if tags: + tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + + client = module.client('inspector') + + try: + existing_target_arn = client.list_assessment_targets( + filter={'assessmentTargetNamePattern': name}, + ).get('assessmentTargetArns')[0] + + existing_target = camel_dict_to_snake_dict( + client.describe_assessment_targets( + assessmentTargetArns=[existing_target_arn], + ).get('assessmentTargets')[0] + ) + + existing_resource_group_arn = existing_target.get('resource_group_arn') + existing_resource_group_tags = client.describe_resource_groups( + resourceGroupArns=[existing_resource_group_arn], + ).get('resourceGroups')[0].get('tags') + + target_exists = True + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, msg="trying to retrieve targets") + except IndexError: + target_exists = False + + if state == 'present' and target_exists: + ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags) + ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict( + existing_resource_group_tags + ) + tags_to_add, tags_to_remove = compare_aws_tags( + ansible_dict_tags, + ansible_dict_existing_tags + ) + if not (tags_to_add or tags_to_remove): + existing_target.update({'tags': ansible_dict_existing_tags}) + module.exit_json(changed=False, **existing_target) + else: + try: + updated_resource_group_arn = client.create_resource_group( + resourceGroupTags=tags, + ).get('resourceGroupArn') + + client.update_assessment_target( + assessmentTargetArn=existing_target_arn, + assessmentTargetName=name, + resourceGroupArn=updated_resource_group_arn, + ) + + updated_target = camel_dict_to_snake_dict( + client.describe_assessment_targets( + assessmentTargetArns=[existing_target_arn], + ).get('assessmentTargets')[0] + ) + + updated_target.update({'tags': ansible_dict_tags}) + module.exit_json(changed=True, **updated_target), + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, msg="trying to update target") + + elif state == 'present' and not target_exists: + try: + new_resource_group_arn = client.create_resource_group( + resourceGroupTags=tags, + ).get('resourceGroupArn') + + new_target_arn = client.create_assessment_target( + assessmentTargetName=name, + resourceGroupArn=new_resource_group_arn, + ).get('assessmentTargetArn') + + new_target = camel_dict_to_snake_dict( + client.describe_assessment_targets( + assessmentTargetArns=[new_target_arn], + ).get('assessmentTargets')[0] + ) + + new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)}) + module.exit_json(changed=True, **new_target) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, msg="trying to create target") + + elif state == 'absent' and target_exists: + try: + client.delete_assessment_target( + assessmentTargetArn=existing_target_arn, + ) + module.exit_json(changed=True) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, msg="trying to delete target") + + elif state == 'absent' and not target_exists: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/aws_kms.py b/aws_kms.py new file mode 100644 index 00000000000..2ba02bf70f7 --- /dev/null +++ b/aws_kms.py @@ -0,0 +1,1063 @@ +#!/usr/bin/python +# -*- coding: utf-8 -* +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_kms +short_description: Perform various KMS management tasks. +description: + - Manage role/user access to a KMS key. Not designed for encrypting/decrypting. +options: + alias: + description: An alias for a key. For safety, even though KMS does not require keys + to have an alias, this module expects all new keys to be given an alias + to make them easier to manage. Existing keys without an alias may be + referred to by I(key_id). Use M(aws_kms_info) to find key ids. Required + if I(key_id) is not given. Note that passing a I(key_id) and I(alias) + will only cause a new alias to be added, an alias will never be renamed. + The 'alias/' prefix is optional. + required: false + aliases: + - key_alias + type: str + key_id: + description: + - Key ID or ARN of the key. + - One of I(alias) or I(key_id) are required. + required: false + aliases: + - key_arn + type: str + enable_key_rotation: + description: + - Whether the key should be automatically rotated every year. + required: false + type: bool + policy_mode: + description: + - (deprecated) Grant or deny access. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + default: grant + choices: [ grant, deny ] + aliases: + - mode + type: str + policy_role_name: + description: + - (deprecated) Role to allow/deny access. + - One of I(policy_role_name) or I(policy_role_arn) are required. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + required: false + aliases: + - role_name + type: str + policy_role_arn: + description: + - (deprecated) ARN of role to allow/deny access. + - One of I(policy_role_name) or I(policy_role_arn) are required. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + type: str + required: false + aliases: + - role_arn + policy_grant_types: + description: + - (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin". + - Required when I(policy_mode=grant). + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + required: false + aliases: + - grant_types + type: list + elements: str + policy_clean_invalid_entries: + description: + - (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases. + - Only cleans if changes are being made. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + type: bool + default: true + aliases: + - clean_invalid_entries + state: + description: Whether a key should be present or absent. Note that making an + existing key absent only schedules a key for deletion. Passing a key that + is scheduled for deletion with state present will cancel key deletion. + required: False + choices: + - present + - absent + default: present + type: str + enabled: + description: Whether or not a key is enabled + default: True + type: bool + description: + description: + A description of the CMK. Use a description that helps you decide + whether the CMK is appropriate for a task. + type: str + tags: + description: A dictionary of tags to apply to a key. + type: dict + purge_tags: + description: Whether the I(tags) argument should cause tags not in the list to + be removed + default: False + type: bool + purge_grants: + description: Whether the I(grants) argument should cause grants not in the list to + be removed + default: False + type: bool + grants: + description: + - A list of grants to apply to the key. Each item must contain I(grantee_principal). + Each item can optionally contain I(retiring_principal), I(operations), I(constraints), + I(name). + - I(grantee_principal) and I(retiring_principal) must be ARNs + - 'For full documentation of suboptions see the boto3 documentation:' + - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)' + type: list + elements: dict + suboptions: + grantee_principal: + description: The full ARN of the principal being granted permissions. + required: true + type: str + retiring_principal: + description: The full ARN of the principal permitted to revoke/retire the grant. + type: str + operations: + type: list + elements: str + description: + - A list of operations that the grantee may perform using the CMK. + choices: ['Decrypt', 'Encrypt', 'GenerateDataKey', 'GenerateDataKeyWithoutPlaintext', 'ReEncryptFrom', 'ReEncryptTo', + 'CreateGrant', 'RetireGrant', 'DescribeKey', 'Verify', 'Sign'] + constraints: + description: + - Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals), + either or both being a dict specifying an encryption context match. + See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) or + U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant) + type: dict + policy: + description: + - policy to apply to the KMS key + - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + type: str +author: + - Ted Timmons (@tedder) + - Will Thames (@willthames) + - Mark Chappell (@tremble) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile +# and has been deprecated in favour of the policy option. +- name: grant user-style access to production secrets + aws_kms: + args: + alias: "alias/my_production_secrets" + policy_mode: grant + policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" + policy_grant_types: "role,role grant" +- name: remove access to production secrets from role + aws_kms: + args: + alias: "alias/my_production_secrets" + policy_mode: deny + policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" + +# Create a new KMS key +- aws_kms: + alias: mykey + tags: + Name: myKey + Purpose: protect_stuff + +# Update previous key with more tags +- aws_kms: + alias: mykey + tags: + Name: myKey + Purpose: protect_stuff + Owner: security_team + +# Update a known key with grants allowing an instance with the billing-prod IAM profile +# to decrypt data encrypted with the environment: production, application: billing +# encryption context +- aws_kms: + key_id: abcd1234-abcd-1234-5678-ef1234567890 + grants: + - name: billing_prod + grantee_principal: arn:aws:iam::1234567890123:role/billing_prod + constraints: + encryption_context_equals: + environment: production + application: billing + operations: + - Decrypt + - RetireGrant +''' + +RETURN = ''' +key_id: + description: ID of key + type: str + returned: always + sample: abcd1234-abcd-1234-5678-ef1234567890 +key_arn: + description: ARN of key + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 +key_state: + description: The state of the key + type: str + returned: always + sample: PendingDeletion +key_usage: + description: The cryptographic operations for which you can use the key. + type: str + returned: always + sample: ENCRYPT_DECRYPT +origin: + description: The source of the key's key material. When this value is C(AWS_KMS), + AWS KMS created the key material. When this value is C(EXTERNAL), the + key material was imported or the CMK lacks key material. + type: str + returned: always + sample: AWS_KMS +aws_account_id: + description: The AWS Account ID that the key belongs to + type: str + returned: always + sample: 1234567890123 +creation_date: + description: Date of creation of the key + type: str + returned: always + sample: "2017-04-18T15:12:08.551000+10:00" +description: + description: Description of the key + type: str + returned: always + sample: "My Key for Protecting important stuff" +enabled: + description: Whether the key is enabled. True if C(KeyState) is true. + type: str + returned: always + sample: false +aliases: + description: list of aliases associated with the key + type: list + returned: always + sample: + - aws/acm + - aws/ebs +policies: + description: list of policy documents for the keys. Empty when access is denied even if there are policies. + type: list + returned: always + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "111111111111" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::111111111111:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" +tags: + description: dictionary of tags applied to the key + type: dict + returned: always + sample: + Name: myKey + Purpose: protecting_stuff +grants: + description: list of grants associated with a key + type: complex + returned: always + contains: + constraints: + description: Constraints on the encryption context that the grant allows. + See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details + type: dict + returned: always + sample: + encryption_context_equals: + "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" + creation_date: + description: Date of creation of the grant + type: str + returned: always + sample: "2017-04-18T15:12:08+10:00" + grant_id: + description: The unique ID for the grant + type: str + returned: always + sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 + grantee_principal: + description: The principal that receives the grant's permissions + type: str + returned: always + sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz + issuing_account: + description: The AWS account under which the grant was issued + type: str + returned: always + sample: arn:aws:iam::01234567890:root + key_id: + description: The key ARN to which the grant applies. + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 + name: + description: The friendly name that identifies the grant + type: str + returned: always + sample: xyz + operations: + description: The list of operations permitted by the grant + type: list + returned: always + sample: + - Decrypt + - RetireGrant + retiring_principal: + description: The principal that can retire the grant + type: str + returned: always + sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz +changes_needed: + description: grant types that would be changed/were changed. + type: dict + returned: always + sample: { "role": "add", "role grant": "add" } +had_invalid_entries: + description: there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made. + type: bool + returned: always +''' + +# these mappings are used to go from simple labels to the actual 'Sid' values returned +# by get_policy. They seem to be magic values. +statement_label = { + 'role': 'Allow use of the key', + 'role grant': 'Allow attachment of persistent resources', + 'admin': 'Allow access for Key Administrators' +} + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags, compare_policies +from ansible.module_utils.six import string_types + +import json + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_iam_roles_with_backoff(connection): + paginator = connection.get_paginator('list_roles') + return paginator.paginate().build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_keys_with_backoff(connection): + paginator = connection.get_paginator('list_keys') + return paginator.paginate().build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_aliases_with_backoff(connection): + paginator = connection.get_paginator('list_aliases') + return paginator.paginate().build_full_result() + + +def get_kms_aliases_lookup(connection): + _aliases = dict() + for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + # Not all aliases are actually associated with a key + if 'TargetKeyId' in alias: + # strip off leading 'alias/' and add it to key's aliases + if alias['TargetKeyId'] in _aliases: + _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + else: + _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + return _aliases + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_tags_with_backoff(connection, key_id, **kwargs): + return connection.list_resource_tags(KeyId=key_id, **kwargs) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_grants_with_backoff(connection, key_id): + params = dict(KeyId=key_id) + paginator = connection.get_paginator('list_grants') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_metadata_with_backoff(connection, key_id): + return connection.describe_key(KeyId=key_id) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def list_key_policies_with_backoff(connection, key_id): + paginator = connection.get_paginator('list_key_policies') + return paginator.paginate(KeyId=key_id).build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_key_policy_with_backoff(connection, key_id, policy_name): + return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) + + +def get_kms_tags(connection, module, key_id): + # Handle pagination here as list_resource_tags does not have + # a paginator + kwargs = {} + tags = [] + more = True + while more: + try: + tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) + tags.extend(tag_response['Tags']) + except is_boto3_error_code('AccessDeniedException'): + tag_response = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key tags") + if tag_response.get('NextMarker'): + kwargs['Marker'] = tag_response['NextMarker'] + else: + more = False + return tags + + +def get_kms_policies(connection, module, key_id): + try: + policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] + return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for + policy in policies] + except is_boto3_error_code('AccessDeniedException'): + return [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key policies") + + +def camel_to_snake_grant(grant): + ''' camel_to_snake_grant snakifies everything except the encryption context ''' + constraints = grant.get('Constraints', {}) + result = camel_dict_to_snake_dict(grant) + if 'EncryptionContextEquals' in constraints: + result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals'] + if 'EncryptionContextSubset' in constraints: + result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset'] + return result + + +def get_key_details(connection, module, key_id): + try: + result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain key metadata") + result['KeyArn'] = result.pop('Arn') + + try: + aliases = get_kms_aliases_lookup(connection) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain aliases") + + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') + result['aliases'] = aliases.get(result['KeyId'], []) + + result = camel_dict_to_snake_dict(result) + + # grants and tags get snakified differently + try: + result['grants'] = [camel_to_snake_grant(grant) for grant in + get_kms_grants_with_backoff(connection, key_id)['Grants']] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain key grants") + tags = get_kms_tags(connection, module, key_id) + result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') + result['policies'] = get_kms_policies(connection, module, key_id) + return result + + +def get_kms_facts(connection, module): + try: + keys = get_kms_keys_with_backoff(connection)['Keys'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain keys") + + return [get_key_details(connection, module, key['KeyId']) for key in keys] + + +def convert_grant_params(grant, key): + grant_params = dict(KeyId=key['key_arn'], + GranteePrincipal=grant['grantee_principal']) + if grant.get('operations'): + grant_params['Operations'] = grant['operations'] + if grant.get('retiring_principal'): + grant_params['RetiringPrincipal'] = grant['retiring_principal'] + if grant.get('name'): + grant_params['Name'] = grant['name'] + if grant.get('constraints'): + grant_params['Constraints'] = dict() + if grant['constraints'].get('encryption_context_subset'): + grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset'] + if grant['constraints'].get('encryption_context_equals'): + grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals'] + return grant_params + + +def different_grant(existing_grant, desired_grant): + if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'): + return True + if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'): + return True + if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')): + return True + if existing_grant.get('constraints') != desired_grant.get('constraints'): + return True + return False + + +def compare_grants(existing_grants, desired_grants, purge_grants=False): + existing_dict = dict((eg['name'], eg) for eg in existing_grants) + desired_dict = dict((dg['name'], dg) for dg in desired_grants) + to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys()) + if purge_grants: + to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys()) + else: + to_remove_keys = set() + to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys()) + for candidate in to_change_candidates: + if different_grant(existing_dict[candidate], desired_dict[candidate]): + to_add_keys.add(candidate) + to_remove_keys.add(candidate) + + to_add = [] + to_remove = [] + for key in to_add_keys: + grant = desired_dict[key] + to_add.append(grant) + for key in to_remove_keys: + grant = existing_dict[key] + to_remove.append(grant) + return to_add, to_remove + + +def start_key_deletion(connection, module, key_metadata): + if key_metadata['KeyState'] == 'PendingDeletion': + return False + + if module.check_mode: + return True + + try: + connection.schedule_key_deletion(KeyId=key_metadata['Arn']) + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to schedule key for deletion") + + +def cancel_key_deletion(connection, module, key): + key_id = key['key_arn'] + if key['key_state'] != 'PendingDeletion': + return False + + if module.check_mode: + return True + + try: + connection.cancel_key_deletion(KeyId=key_id) + # key is disabled after deletion cancellation + # set this so that ensure_enabled_disabled works correctly + key['key_state'] = 'Disabled' + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to cancel key deletion") + + return True + + +def ensure_enabled_disabled(connection, module, key, enabled): + desired_state = 'Enabled' + if not enabled: + desired_state = 'Disabled' + + if key['key_state'] == desired_state: + return False + + key_id = key['key_arn'] + if not module.check_mode: + if enabled: + try: + connection.enable_key(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to enable key") + else: + try: + connection.disable_key(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to disable key") + + return True + + +def update_alias(connection, module, key, alias): + alias = canonicalize_alias_name(alias) + + if alias is None: + return False + + key_id = key['key_arn'] + aliases = get_kms_aliases_with_backoff(connection)['Aliases'] + # We will only add new aliases, not rename existing ones + if alias in [_alias['AliasName'] for _alias in aliases]: + return False + + if not module.check_mode: + try: + connection.create_alias(TargetKeyId=key_id, AliasName=alias) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed create key alias") + + return True + + +def update_description(connection, module, key, description): + if description is None: + return False + if key['description'] == description: + return False + + key_id = key['key_arn'] + if not module.check_mode: + try: + connection.update_key_description(KeyId=key_id, Description=description) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update key description") + + return True + + +def update_tags(connection, module, key, desired_tags, purge_tags): + # purge_tags needs to be explicitly set, so an empty tags list means remove + # all tags + + to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags) + if not (bool(to_add) or bool(to_remove)): + return False + + key_id = key['key_arn'] + if not module.check_mode: + if to_remove: + try: + connection.untag_resource(KeyId=key_id, TagKeys=to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to remove tag") + if to_add: + try: + tags = ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue') + connection.tag_resource(KeyId=key_id, Tags=tags) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to add tag to key") + + return True + + +def update_policy(connection, module, key, policy): + if policy is None: + return False + try: + new_policy = json.loads(policy) + except ValueError as e: + module.fail_json_aws(e, msg="Unable to parse new policy as JSON") + + key_id = key['key_arn'] + try: + keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default') + original_policy = json.loads(keyret['Policy']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + # If we can't fetch the current policy assume we're making a change + # Could occur if we have PutKeyPolicy without GetKeyPolicy + original_policy = {} + + if not compare_policies(original_policy, new_policy): + return False + + if not module.check_mode: + try: + connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update key policy") + + return True + + +def update_key_rotation(connection, module, key, enable_key_rotation): + if enable_key_rotation is None: + return False + key_id = key['key_arn'] + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: + return False + + if enable_key_rotation: + connection.enable_key_rotation(KeyId=key_id) + else: + connection.disable_key_rotation(KeyId=key_id) + return True + + +def update_grants(connection, module, key, desired_grants, purge_grants): + existing_grants = key['grants'] + + to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants) + if not (bool(to_add) or bool(to_remove)): + return False + + key_id = key['key_arn'] + if not module.check_mode: + for grant in to_remove: + try: + connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to retire grant") + for grant in to_add: + grant_params = convert_grant_params(grant, key) + try: + connection.create_grant(**grant_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create grant") + + return True + + +def update_key(connection, module, key): + changed = False + + changed |= cancel_key_deletion(connection, module, key) + changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled']) + changed |= update_alias(connection, module, key, module.params['alias']) + changed |= update_description(connection, module, key, module.params['description']) + changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags')) + changed |= update_policy(connection, module, key, module.params.get('policy')) + changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants')) + changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + + # make results consistent with kms_facts before returning + result = get_key_details(connection, module, key['key_arn']) + result['changed'] = changed + return result + + +def create_key(connection, module): + params = dict(BypassPolicyLockoutSafetyCheck=False, + Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'), + KeyUsage='ENCRYPT_DECRYPT', + Origin='AWS_KMS') + if module.params.get('description'): + params['Description'] = module.params['description'] + if module.params.get('policy'): + params['Policy'] = module.params['policy'] + + try: + result = connection.create_key(**params)['KeyMetadata'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create initial key") + key = get_key_details(connection, module, result['KeyId']) + + update_alias(connection, module, key, module.params['alias']) + update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + + ensure_enabled_disabled(connection, module, key, module.params.get('enabled')) + update_grants(connection, module, key, module.params.get('grants'), False) + + # make results consistent with kms_facts + result = get_key_details(connection, module, key['key_id']) + result['changed'] = True + return result + + +def delete_key(connection, module, key_metadata): + changed = False + + changed |= start_key_deletion(connection, module, key_metadata) + + result = get_key_details(connection, module, key_metadata['Arn']) + result['changed'] = changed + return result + + +def get_arn_from_role_name(iam, rolename): + ret = iam.get_role(RoleName=rolename) + if ret.get('Role') and ret['Role'].get('Arn'): + return ret['Role']['Arn'] + raise Exception('could not find arn for name {0}.'.format(rolename)) + + +def _clean_statement_principals(statement, clean_invalid_entries): + + # create Principal and 'AWS' so we can safely use them later. + if not isinstance(statement.get('Principal'), dict): + statement['Principal'] = dict() + + # If we have a single AWS Principal, ensure we still have a list (to manipulate) + if 'AWS' in statement['Principal'] and isinstance(statement['Principal']['AWS'], string_types): + statement['Principal']['AWS'] = [statement['Principal']['AWS']] + if not isinstance(statement['Principal'].get('AWS'), list): + statement['Principal']['AWS'] = list() + + invalid_entries = [item for item in statement['Principal']['AWS'] if not item.startswith('arn:aws:iam::')] + valid_entries = [item for item in statement['Principal']['AWS'] if item.startswith('arn:aws:iam::')] + + if bool(invalid_entries) and clean_invalid_entries: + statement['Principal']['AWS'] = valid_entries + return True + + return False + + +def _do_statement_grant(statement, role_arn, grant_types, mode, grant_type): + + if mode == 'grant': + if grant_type in grant_types: + if role_arn not in statement['Principal']['AWS']: # needs to be added. + statement['Principal']['AWS'].append(role_arn) + return 'add' + elif role_arn in statement['Principal']['AWS']: # not one the places the role should be + statement['Principal']['AWS'].remove(role_arn) + return 'remove' + return None + + if mode == 'deny' and role_arn in statement['Principal']['AWS']: + # we don't selectively deny. that's a grant with a + # smaller list. so deny=remove all of this arn. + statement['Principal']['AWS'].remove(role_arn) + return 'remove' + return None + + +def do_policy_grant(module, kms, keyarn, role_arn, grant_types, mode='grant', dry_run=True, clean_invalid_entries=True): + ret = {} + policy = json.loads(get_key_policy_with_backoff(kms, keyarn, 'default')['Policy']) + + changes_needed = {} + assert_policy_shape(module, policy) + had_invalid_entries = False + for statement in policy['Statement']: + # We already tested that these are the only types in the statements + for grant_type in statement_label: + # Are we on this grant type's statement? + if statement['Sid'] != statement_label[grant_type]: + continue + + had_invalid_entries |= _clean_statement_principals(statement, clean_invalid_entries) + change = _do_statement_grant(statement, role_arn, grant_types, mode, grant_type) + if change: + changes_needed[grant_type] = change + + ret['changes_needed'] = changes_needed + ret['had_invalid_entries'] = had_invalid_entries + ret['new_policy'] = policy + ret['changed'] = bool(changes_needed) + + if dry_run or not ret['changed']: + return ret + + try: + policy_json_string = json.dumps(policy) + kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not update key_policy', new_policy=policy_json_string) + + return ret + + +def assert_policy_shape(module, policy): + '''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.''' + errors = [] + if policy['Version'] != "2012-10-17": + errors.append('Unknown version/date ({0}) of policy. Things are probably different than we assumed they were.'.format(policy['Version'])) + + found_statement_type = {} + for statement in policy['Statement']: + for label, sidlabel in statement_label.items(): + if statement['Sid'] == sidlabel: + found_statement_type[label] = True + + for statementtype in statement_label: + if not found_statement_type.get(statementtype): + errors.append('Policy is missing {0}.'.format(statementtype)) + + if errors: + module.fail_json(msg='Problems asserting policy shape. Cowardly refusing to modify it', errors=errors, policy=policy) + + +def canonicalize_alias_name(alias): + if alias is None: + return None + if alias.startswith('alias/'): + return alias + return 'alias/' + alias + + +def fetch_key_metadata(connection, module, key_id, alias): + + alias = canonicalize_alias_name(module.params.get('alias')) + + try: + # Fetch by key_id where possible + if key_id: + return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + # Or try alias as a backup + return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata'] + + except connection.exceptions.NotFoundException: + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, 'Failed to fetch key metadata.') + + +def update_policy_grants(connection, module, key_metadata, mode): + iam = module.client('iam') + key_id = key_metadata['Arn'] + + if module.params.get('policy_role_name') and not module.params.get('policy_role_arn'): + module.params['policy_role_arn'] = get_arn_from_role_name(iam, module.params['policy_role_name']) + if not module.params.get('policy_role_arn'): + module.fail_json(msg='policy_role_arn or policy_role_name is required to {0}'.format(module.params['policy_mode'])) + + # check the grant types for 'grant' only. + if mode == 'grant': + for grant_type in module.params['policy_grant_types']: + if grant_type not in statement_label: + module.fail_json(msg='{0} is an unknown grant type.'.format(grant_type)) + + return do_policy_grant(module, connection, + key_id, + module.params['policy_role_arn'], + module.params['policy_grant_types'], + mode=mode, + dry_run=module.check_mode, + clean_invalid_entries=module.params['policy_clean_invalid_entries']) + + +def main(): + argument_spec = dict( + alias=dict(aliases=['key_alias']), + policy_mode=dict(aliases=['mode'], choices=['grant', 'deny'], default='grant'), + policy_role_name=dict(aliases=['role_name']), + policy_role_arn=dict(aliases=['role_arn']), + policy_grant_types=dict(aliases=['grant_types'], type='list'), + policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True), + key_id=dict(aliases=['key_arn']), + description=dict(), + enabled=dict(type='bool', default=True), + tags=dict(type='dict', default={}), + purge_tags=dict(type='bool', default=False), + grants=dict(type='list', default=[]), + policy=dict(), + purge_grants=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + enable_key_rotation=(dict(type='bool')) + ) + + module = AnsibleAWSModule( + supports_check_mode=True, + argument_spec=argument_spec, + required_one_of=[['alias', 'key_id']], + ) + + mode = module.params['policy_mode'] + + kms = module.client('kms') + + key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias')) + # We can't create keys with a specific ID, if we can't access the key we'll have to fail + if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata: + module.fail_json(msg="Could not find key with id %s to update") + + if module.params.get('policy_grant_types') or mode == 'deny': + module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile' + ' and has been deprecated in favour of the policy option.', version='2.13') + result = update_policy_grants(kms, module, key_metadata, mode) + module.exit_json(**result) + + if module.params.get('state') == 'absent': + if key_metadata is None: + module.exit_json(changed=False) + result = delete_key(kms, module, key_metadata) + module.exit_json(**result) + + if key_metadata: + key_details = get_key_details(kms, module, key_metadata['Arn']) + result = update_key(kms, module, key_details) + module.exit_json(**result) + + result = create_key(kms, module) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/aws_kms_facts.py b/aws_kms_facts.py new file mode 120000 index 00000000000..ccd052f5199 --- /dev/null +++ b/aws_kms_facts.py @@ -0,0 +1 @@ +aws_kms_info.py \ No newline at end of file diff --git a/aws_kms_info.py b/aws_kms_info.py new file mode 100644 index 00000000000..3e47206ecab --- /dev/null +++ b/aws_kms_info.py @@ -0,0 +1,433 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_kms_info +short_description: Gather information about AWS KMS keys +description: + - Gather information about AWS KMS keys including tags and grants + - This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change. +author: "Will Thames (@willthames)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + The filters aren't natively supported by boto3, but are supported to provide similar + functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and + C(tag:tagName)) are available, as are C(key-id) and C(alias) + type: dict + pending_deletion: + description: Whether to get full details (tags, grants etc.) of keys pending deletion + default: False + type: bool +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all KMS keys +- aws_kms_info: + +# Gather information about all keys with a Name tag +- aws_kms_info: + filters: + tag-key: Name + +# Gather information about all keys with a specific name +- aws_kms_info: + filters: + "tag:Name": Example +''' + +RETURN = ''' +keys: + description: list of keys + type: complex + returned: always + contains: + key_id: + description: ID of key + type: str + returned: always + sample: abcd1234-abcd-1234-5678-ef1234567890 + key_arn: + description: ARN of key + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 + key_state: + description: The state of the key + type: str + returned: always + sample: PendingDeletion + key_usage: + description: The cryptographic operations for which you can use the key. + type: str + returned: always + sample: ENCRYPT_DECRYPT + origin: + description: + The source of the key's key material. When this value is C(AWS_KMS), + AWS KMS created the key material. When this value is C(EXTERNAL), the + key material was imported or the CMK lacks key material. + type: str + returned: always + sample: AWS_KMS + aws_account_id: + description: The AWS Account ID that the key belongs to + type: str + returned: always + sample: 1234567890123 + creation_date: + description: Date of creation of the key + type: str + returned: always + sample: "2017-04-18T15:12:08.551000+10:00" + description: + description: Description of the key + type: str + returned: always + sample: "My Key for Protecting important stuff" + enabled: + description: Whether the key is enabled. True if C(KeyState) is true. + type: str + returned: always + sample: false + enable_key_rotation: + description: Whether the automatically key rotation every year is enabled. + type: bool + returned: always + sample: false + aliases: + description: list of aliases associated with the key + type: list + returned: always + sample: + - aws/acm + - aws/ebs + tags: + description: dictionary of tags applied to the key. Empty when access is denied even if there are tags. + type: dict + returned: always + sample: + Name: myKey + Purpose: protecting_stuff + policies: + description: list of policy documents for the keys. Empty when access is denied even if there are policies. + type: list + returned: always + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "111111111111" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::111111111111:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" + grants: + description: list of grants associated with a key + type: complex + returned: always + contains: + constraints: + description: Constraints on the encryption context that the grant allows. + See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details + type: dict + returned: always + sample: + encryption_context_equals: + "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" + creation_date: + description: Date of creation of the grant + type: str + returned: always + sample: "2017-04-18T15:12:08+10:00" + grant_id: + description: The unique ID for the grant + type: str + returned: always + sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 + grantee_principal: + description: The principal that receives the grant's permissions + type: str + returned: always + sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz + issuing_account: + description: The AWS account under which the grant was issued + type: str + returned: always + sample: arn:aws:iam::01234567890:root + key_id: + description: The key ARN to which the grant applies. + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 + name: + description: The friendly name that identifies the grant + type: str + returned: always + sample: xyz + operations: + description: The list of operations permitted by the grant + type: list + returned: always + sample: + - Decrypt + - RetireGrant + retiring_principal: + description: The principal that can retire the grant + type: str + returned: always + sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3 +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +import traceback + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + +# Caching lookup for aliases +_aliases = dict() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_keys_with_backoff(connection): + paginator = connection.get_paginator('list_keys') + return paginator.paginate().build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_aliases_with_backoff(connection): + paginator = connection.get_paginator('list_aliases') + return paginator.paginate().build_full_result() + + +def get_kms_aliases_lookup(connection): + if not _aliases: + for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + # Not all aliases are actually associated with a key + if 'TargetKeyId' in alias: + # strip off leading 'alias/' and add it to key's aliases + if alias['TargetKeyId'] in _aliases: + _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + else: + _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + return _aliases + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_tags_with_backoff(connection, key_id, **kwargs): + return connection.list_resource_tags(KeyId=key_id, **kwargs) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_grants_with_backoff(connection, key_id, **kwargs): + params = dict(KeyId=key_id) + if kwargs.get('tokens'): + params['GrantTokens'] = kwargs['tokens'] + paginator = connection.get_paginator('list_grants') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_kms_metadata_with_backoff(connection, key_id): + return connection.describe_key(KeyId=key_id) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def list_key_policies_with_backoff(connection, key_id): + paginator = connection.get_paginator('list_key_policies') + return paginator.paginate(KeyId=key_id).build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_key_policy_with_backoff(connection, key_id, policy_name): + return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_enable_key_rotation_with_backoff(connection, key_id): + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + return current_rotation_status.get('KeyRotationEnabled') + + +def get_kms_tags(connection, module, key_id): + # Handle pagination here as list_resource_tags does not have + # a paginator + kwargs = {} + tags = [] + more = True + while more: + try: + tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) + tags.extend(tag_response['Tags']) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'AccessDeniedException': + module.fail_json(msg="Failed to obtain key tags", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + else: + tag_response = {} + if tag_response.get('NextMarker'): + kwargs['Marker'] = tag_response['NextMarker'] + else: + more = False + return tags + + +def get_kms_policies(connection, module, key_id): + try: + policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] + return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for + policy in policies] + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'AccessDeniedException': + module.fail_json(msg="Failed to obtain key policies", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + else: + return [] + + +def key_matches_filter(key, filtr): + if filtr[0] == 'key-id': + return filtr[1] == key['key_id'] + if filtr[0] == 'tag-key': + return filtr[1] in key['tags'] + if filtr[0] == 'tag-value': + return filtr[1] in key['tags'].values() + if filtr[0] == 'alias': + return filtr[1] in key['aliases'] + if filtr[0].startswith('tag:'): + return key['tags'][filtr[0][4:]] == filtr[1] + + +def key_matches_filters(key, filters): + if not filters: + return True + else: + return all([key_matches_filter(key, filtr) for filtr in filters.items()]) + + +def get_key_details(connection, module, key_id, tokens=None): + if not tokens: + tokens = [] + try: + result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to obtain key metadata", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + result['KeyArn'] = result.pop('Arn') + + try: + aliases = get_kms_aliases_lookup(connection) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to obtain aliases", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + result['aliases'] = aliases.get(result['KeyId'], []) + result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) + + if module.params.get('pending_deletion'): + return camel_dict_to_snake_dict(result) + + try: + result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to obtain key grants", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + tags = get_kms_tags(connection, module, key_id) + + result = camel_dict_to_snake_dict(result) + result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') + result['policies'] = get_kms_policies(connection, module, key_id) + return result + + +def get_kms_info(connection, module): + try: + keys = get_kms_keys_with_backoff(connection)['Keys'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to obtain keys", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + return [get_key_details(connection, module, key['KeyId']) for key in keys] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(type='dict'), + pending_deletion=dict(type='bool', default=False) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + if module._name == 'aws_kms_facts': + module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + all_keys = get_kms_info(connection, module) + module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])]) + + +if __name__ == '__main__': + main() diff --git a/aws_region_facts.py b/aws_region_facts.py new file mode 120000 index 00000000000..03b0d29932e --- /dev/null +++ b/aws_region_facts.py @@ -0,0 +1 @@ +aws_region_info.py \ No newline at end of file diff --git a/aws_region_info.py b/aws_region_info.py new file mode 100644 index 00000000000..2427beb0841 --- /dev/null +++ b/aws_region_info.py @@ -0,0 +1,96 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = ''' +module: aws_region_info +short_description: Gather information about AWS regions. +description: + - Gather information about AWS regions. + - This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change. +author: 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for + possible filters. Filter names and values are case sensitive. You can also use underscores + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + default: {} + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [botocore, boto3] +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all regions +- aws_region_info: + +# Gather information about a single region +- aws_region_info: + filters: + region-name: eu-west-1 +''' + +RETURN = ''' +regions: + returned: on success + description: > + Regions that match the provided filters. Each element consists of a dict with all the information related + to that region. + type: list + sample: "[{ + 'endpoint': 'ec2.us-west-1.amazonaws.com', + 'region_name': 'us-west-1' + }]" +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + if module._name == 'aws_region_facts': + module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", version='2.13') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items()) + + try: + regions = connection.describe_regions( + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe regions.") + + module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']]) + + +if __name__ == '__main__': + main() diff --git a/aws_s3_bucket_facts.py b/aws_s3_bucket_facts.py new file mode 120000 index 00000000000..88f68b437a0 --- /dev/null +++ b/aws_s3_bucket_facts.py @@ -0,0 +1 @@ +aws_s3_bucket_info.py \ No newline at end of file diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py new file mode 100644 index 00000000000..9916e297eaa --- /dev/null +++ b/aws_s3_bucket_info.py @@ -0,0 +1,119 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_s3_bucket_info +short_description: Lists S3 buckets in AWS +requirements: + - boto3 >= 1.4.4 + - python >= 2.6 +description: + - Lists S3 buckets in AWS + - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(aws_s3_bucket_info) module no longer returns C(ansible_facts)! +author: "Gerben Geijteman (@hyperized)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Note: Only AWS S3 is currently supported + +# Lists all s3 buckets +- aws_s3_bucket_info: + register: result + +- name: List buckets + debug: + msg: "{{ result['buckets'] }}" +''' + +RETURN = ''' +buckets: + description: "List of buckets" + returned: always + sample: + - creation_date: 2017-07-06 15:05:12 +00:00 + name: my_bucket + type: list +''' + +import traceback + +try: + import botocore +except ImportError: + pass # will be detected by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, + get_aws_connection_info) + + +def get_bucket_list(module, connection): + """ + Return result of list_buckets json encoded + :param module: + :param connection: + :return: + """ + try: + buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + return buckets + + +def main(): + """ + Get list of S3 buckets + :return: + """ + + # Ensure we have an empty dict + result = {} + + # Including ec2 argument spec + module = AnsibleModule(argument_spec=ec2_argument_spec(), supports_check_mode=True) + is_old_facts = module._name == 'aws_s3_bucket_facts' + if is_old_facts: + module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + # Verify Boto3 is used + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + # Set up connection + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=HAS_BOTO3) + connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, + **aws_connect_params) + + # Gather results + result['buckets'] = get_bucket_list(module, connection) + + # Send exit + if is_old_facts: + module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result) + else: + module.exit_json(msg="Retrieved s3 info.", **result) + + +if __name__ == '__main__': + main() diff --git a/aws_s3_cors.py b/aws_s3_cors.py new file mode 100644 index 00000000000..5bb05bc59fb --- /dev/null +++ b/aws_s3_cors.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_s3_cors +short_description: Manage CORS for S3 buckets in AWS +description: + - Manage CORS for S3 buckets in AWS +author: "Oyvind Saltvik (@fivethreeo)" +options: + name: + description: + - Name of the s3 bucket + required: true + type: str + rules: + description: + - Cors rules to put on the s3 bucket + type: list + state: + description: + - Create or remove cors on the s3 bucket + required: true + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a simple cors for s3 bucket +- aws_s3_cors: + name: mys3bucket + state: present + rules: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - Authorization + expose_headers: + - x-amz-server-side-encryption + - x-amz-request-id + max_age_seconds: 30000 + +# Remove cors for s3 bucket +- aws_s3_cors: + name: mys3bucket + state: absent +''' + +RETURN = ''' +changed: + description: check to see if a change was made to the rules + returned: always + type: bool + sample: true +name: + description: name of bucket + returned: always + type: str + sample: 'bucket-name' +rules: + description: list of current rules + returned: always + type: list + sample: [ + { + "allowed_headers": [ + "Authorization" + ], + "allowed_methods": [ + "GET" + ], + "allowed_origins": [ + "*" + ], + "max_age_seconds": 30000 + } + ] +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except Exception: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies + + +def create_or_update_bucket_cors(connection, module): + + name = module.params.get("name") + rules = module.params.get("rules", []) + changed = False + + try: + current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules'] + except ClientError: + current_camel_rules = [] + + new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True) + # compare_policies() takes two dicts and makes them hashable for comparison + if compare_policies(new_camel_rules, current_camel_rules): + changed = True + + if changed: + try: + cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules}) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name)) + + module.exit_json(changed=changed, name=name, rules=rules) + + +def destroy_bucket_cors(connection, module): + + name = module.params.get("name") + changed = False + + try: + cors = connection.delete_bucket_cors(Bucket=name) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name)) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = dict( + name=dict(required=True, type='str'), + rules=dict(type='list'), + state=dict(type='str', choices=['present', 'absent'], required=True) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + client = module.client('s3') + + state = module.params.get("state") + + if state == 'present': + create_or_update_bucket_cors(client, module) + elif state == 'absent': + destroy_bucket_cors(client, module) + + +if __name__ == '__main__': + main() diff --git a/aws_secret.py b/aws_secret.py new file mode 100644 index 00000000000..ff6fb88358e --- /dev/null +++ b/aws_secret.py @@ -0,0 +1,404 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, REY Remi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: aws_secret +short_description: Manage secrets stored in AWS Secrets Manager. +description: + - Create, update, and delete secrets stored in AWS Secrets Manager. +author: "REY Remi (@rrey)" +requirements: [ 'botocore>=1.10.0', 'boto3' ] +options: + name: + description: + - Friendly name for the secret you are creating. + required: true + type: str + state: + description: + - Whether the secret should be exist or not. + default: 'present' + choices: ['present', 'absent'] + type: str + recovery_window: + description: + - Only used if state is absent. + - Specifies the number of days that Secrets Manager waits before it can delete the secret. + - If set to 0, the deletion is forced without recovery. + default: 30 + type: int + description: + description: + - Specifies a user-provided description of the secret. + type: str + kms_key_id: + description: + - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be + used to encrypt the `secret_string` or `secret_binary` values in the versions stored in this secret. + type: str + secret_type: + description: + - Specifies the type of data that you want to encrypt. + choices: ['binary', 'string'] + default: 'string' + type: str + secret: + description: + - Specifies string or binary data that you want to encrypt and store in the new version of the secret. + default: "" + type: str + tags: + description: + - Specifies a list of user-defined tags that are attached to the secret. + type: dict + rotation_lambda: + description: + - Specifies the ARN of the Lambda function that can rotate the secret. + type: str + rotation_interval: + description: + - Specifies the number of days between automatic scheduled rotations of the secret. + default: 30 + type: int +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +''' + + +EXAMPLES = r''' +- name: Add string to AWS Secrets Manager + aws_secret: + name: 'test_secret_string' + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + +- name: remove string from AWS Secrets Manager + aws_secret: + name: 'test_secret_string' + state: absent + secret_type: 'string' + secret: "{{ super_secret_string }}" +''' + + +RETURN = r''' +secret: + description: The secret information + returned: always + type: complex + contains: + arn: + description: The ARN of the secret + returned: always + type: str + sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx + last_accessed_date: + description: The date the secret was last accessed + returned: always + type: str + sample: '2018-11-20T01:00:00+01:00' + last_changed_date: + description: The date the secret was last modified. + returned: always + type: str + sample: '2018-11-20T12:16:38.433000+01:00' + name: + description: The secret name. + returned: always + type: str + sample: my_secret + rotation_enabled: + description: The secret rotation status. + returned: always + type: bool + sample: false + version_ids_to_stages: + description: Provide the secret version ids and the associated secret stage. + returned: always + type: dict + sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] } +''' + +from ansible.module_utils._text import to_bytes +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + + +class Secret(object): + """An object representation of the Secret described by the self.module args""" + def __init__(self, name, secret_type, secret, description="", kms_key_id=None, + tags=None, lambda_arn=None, rotation_interval=None): + self.name = name + self.description = description + self.kms_key_id = kms_key_id + if secret_type == "binary": + self.secret_type = "SecretBinary" + else: + self.secret_type = "SecretString" + self.secret = secret + self.tags = tags or {} + self.rotation_enabled = False + if lambda_arn: + self.rotation_enabled = True + self.rotation_lambda_arn = lambda_arn + self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)} + + @property + def create_args(self): + args = { + "Name": self.name + } + if self.description: + args["Description"] = self.description + if self.kms_key_id: + args["KmsKeyId"] = self.kms_key_id + if self.tags: + args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) + args[self.secret_type] = self.secret + return args + + @property + def update_args(self): + args = { + "SecretId": self.name + } + if self.description: + args["Description"] = self.description + if self.kms_key_id: + args["KmsKeyId"] = self.kms_key_id + args[self.secret_type] = self.secret + return args + + @property + def boto3_tags(self): + return ansible_dict_to_boto3_tag_list(self.Tags) + + def as_dict(self): + result = self.__dict__ + result.pop("tags") + return snake_dict_to_camel_dict(result) + + +class SecretsManagerInterface(object): + """An interface with SecretsManager""" + + def __init__(self, module): + self.module = module + self.client = self.module.client('secretsmanager') + + def get_secret(self, name): + try: + secret = self.client.describe_secret(SecretId=name) + except self.client.exceptions.ResourceNotFoundException: + secret = None + except Exception as e: + self.module.fail_json_aws(e, msg="Failed to describe secret") + return secret + + def create_secret(self, secret): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + created_secret = self.client.create_secret(**secret.create_args) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create secret") + + if secret.rotation_enabled: + response = self.update_rotation(secret) + created_secret["VersionId"] = response.get("VersionId") + return created_secret + + def update_secret(self, secret): + if self.module.check_mode: + self.module.exit_json(changed=True) + + try: + response = self.client.update_secret(**secret.update_args) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to update secret") + return response + + def restore_secret(self, name): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + response = self.client.restore_secret(SecretId=name) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to restore secret") + return response + + def delete_secret(self, name, recovery_window): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + if recovery_window == 0: + response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True) + else: + response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to delete secret") + return response + + def update_rotation(self, secret): + if secret.rotation_enabled: + try: + response = self.client.rotate_secret( + SecretId=secret.name, + RotationLambdaARN=secret.rotation_lambda_arn, + RotationRules=secret.rotation_rules) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to rotate secret secret") + else: + try: + response = self.client.cancel_rotate_secret(SecretId=secret.name) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to cancel rotation") + return response + + def tag_secret(self, secret_name, tags): + try: + self.client.tag_resource(SecretId=secret_name, Tags=tags) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret") + + def untag_secret(self, secret_name, tag_keys): + try: + self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret") + + def secrets_match(self, desired_secret, current_secret): + """Compare secrets except tags and rotation + + Args: + desired_secret: camel dict representation of the desired secret state. + current_secret: secret reference as returned by the secretsmanager api. + + Returns: bool + """ + if desired_secret.description != current_secret.get("Description", ""): + return False + if desired_secret.kms_key_id != current_secret.get("KmsKeyId"): + return False + current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name")) + if desired_secret.secret_type == 'SecretBinary': + desired_value = to_bytes(desired_secret.secret) + else: + desired_value = desired_secret.secret + if desired_value != current_secret_value.get(desired_secret.secret_type): + return False + return True + + +def rotation_match(desired_secret, current_secret): + """Compare secrets rotation configuration + + Args: + desired_secret: camel dict representation of the desired secret state. + current_secret: secret reference as returned by the secretsmanager api. + + Returns: bool + """ + if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False): + return False + if desired_secret.rotation_enabled: + if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"): + return False + if desired_secret.rotation_rules != current_secret.get("RotationRules"): + return False + return True + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(required=True), + 'state': dict(choices=['present', 'absent'], default='present'), + 'description': dict(default=""), + 'kms_key_id': dict(), + 'secret_type': dict(choices=['binary', 'string'], default="string"), + 'secret': dict(default=""), + 'tags': dict(type='dict', default={}), + 'rotation_lambda': dict(), + 'rotation_interval': dict(type='int', default=30), + 'recovery_window': dict(type='int', default=30), + }, + supports_check_mode=True, + ) + + changed = False + state = module.params.get('state') + secrets_mgr = SecretsManagerInterface(module) + recovery_window = module.params.get('recovery_window') + secret = Secret( + module.params.get('name'), + module.params.get('secret_type'), + module.params.get('secret'), + description=module.params.get('description'), + kms_key_id=module.params.get('kms_key_id'), + tags=module.params.get('tags'), + lambda_arn=module.params.get('rotation_lambda'), + rotation_interval=module.params.get('rotation_interval') + ) + + current_secret = secrets_mgr.get_secret(secret.name) + + if state == 'absent': + if current_secret: + if not current_secret.get("DeletedDate"): + result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + changed = True + elif current_secret.get("DeletedDate") and recovery_window == 0: + result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + changed = True + else: + result = "secret does not exist" + if state == 'present': + if current_secret is None: + result = secrets_mgr.create_secret(secret) + changed = True + else: + if current_secret.get("DeletedDate"): + secrets_mgr.restore_secret(secret.name) + changed = True + if not secrets_mgr.secrets_match(secret, current_secret): + result = secrets_mgr.update_secret(secret) + changed = True + if not rotation_match(secret, current_secret): + result = secrets_mgr.update_rotation(secret) + changed = True + current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags) + if tags_to_add: + secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) + changed = True + if tags_to_remove: + secrets_mgr.untag_secret(secret.name, tags_to_remove) + changed = True + result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) + result.pop("response_metadata") + module.exit_json(changed=changed, secret=result) + + +if __name__ == '__main__': + main() diff --git a/aws_ses_identity.py b/aws_ses_identity.py new file mode 100644 index 00000000000..2afc5d6abfd --- /dev/null +++ b/aws_ses_identity.py @@ -0,0 +1,546 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: aws_ses_identity +short_description: Manages SES email and domain identity +description: + - This module allows the user to manage verified email and domain identity for SES. + - This covers verifying and removing identities as well as setting up complaint, bounce + and delivery notification settings. +author: Ed Costello (@orthanc) + +options: + identity: + description: + - This is the email address or domain to verify / delete. + - If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain. + required: true + type: str + state: + description: Whether to create(or update) or delete the identity. + default: present + choices: [ 'present', 'absent' ] + type: str + bounce_notifications: + description: + - Setup the SNS topic used to report bounce notifications. + - If omitted, bounce notifications will not be delivered to a SNS topic. + - If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled. + suboptions: + topic: + description: + - The ARN of the topic to send notifications to. + - If omitted, notifications will not be delivered to a SNS topic. + include_headers: + description: + - Whether or not to include headers when delivering to the SNS topic. + - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic. + type: bool + default: No + type: dict + complaint_notifications: + description: + - Setup the SNS topic used to report complaint notifications. + - If omitted, complaint notifications will not be delivered to a SNS topic. + - If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled. + suboptions: + topic: + description: + - The ARN of the topic to send notifications to. + - If omitted, notifications will not be delivered to a SNS topic. + include_headers: + description: + - Whether or not to include headers when delivering to the SNS topic. + - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic. + type: bool + default: No + type: dict + delivery_notifications: + description: + - Setup the SNS topic used to report delivery notifications. + - If omitted, delivery notifications will not be delivered to a SNS topic. + suboptions: + topic: + description: + - The ARN of the topic to send notifications to. + - If omitted, notifications will not be delivered to a SNS topic. + include_headers: + description: + - Whether or not to include headers when delivering to the SNS topic. + - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic. + type: bool + default: No + type: dict + feedback_forwarding: + description: + - Whether or not to enable feedback forwarding. + - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics. + type: 'bool' + default: True +requirements: [ 'botocore', 'boto3' ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Ensure example@example.com email identity exists + aws_ses_identity: + identity: example@example.com + state: present + +- name: Delete example@example.com email identity + aws_ses_identity: + email: example@example.com + state: absent + +- name: Ensure example.com domain identity exists + aws_ses_identity: + identity: example.com + state: present + +# Create an SNS topic and send bounce and complaint notifications to it +# instead of emailing the identity owner +- name: Ensure complaints-topic exists + sns_topic: + name: "complaints-topic" + state: present + purge_subscriptions: False + register: topic_info + +- name: Deliver feedback to topic instead of owner email + aws_ses_identity: + identity: example@example.com + state: present + complaint_notifications: + topic: "{{ topic_info.sns_arn }}" + include_headers: True + bounce_notifications: + topic: "{{ topic_info.sns_arn }}" + include_headers: False + feedback_forwarding: False + +# Create an SNS topic for delivery notifications and leave complaints +# Being forwarded to the identity owner email +- name: Ensure delivery-notifications-topic exists + sns_topic: + name: "delivery-notifications-topic" + state: present + purge_subscriptions: False + register: topic_info + +- name: Delivery notifications to topic + aws_ses_identity: + identity: example@example.com + state: present + delivery_notifications: + topic: "{{ topic_info.sns_arn }}" +''' + +RETURN = ''' +identity: + description: The identity being modified. + returned: success + type: str + sample: example@example.com +identity_arn: + description: The arn of the identity being modified. + returned: success + type: str + sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com +verification_attributes: + description: The verification information for the identity. + returned: success + type: complex + sample: { + "verification_status": "Pending", + "verification_token": "...." + } + contains: + verification_status: + description: The verification status of the identity. + type: str + sample: "Pending" + verification_token: + description: The verification token for a domain identity. + type: str +notification_attributes: + description: The notification setup for the identity. + returned: success + type: complex + sample: { + "bounce_topic": "arn:aws:sns:....", + "complaint_topic": "arn:aws:sns:....", + "delivery_topic": "arn:aws:sns:....", + "forwarding_enabled": false, + "headers_in_bounce_notifications_enabled": true, + "headers_in_complaint_notifications_enabled": true, + "headers_in_delivery_notifications_enabled": true + } + contains: + bounce_topic: + description: + - The ARN of the topic bounce notifications are delivered to. + - Omitted if bounce notifications are not delivered to a topic. + type: str + complaint_topic: + description: + - The ARN of the topic complaint notifications are delivered to. + - Omitted if complaint notifications are not delivered to a topic. + type: str + delivery_topic: + description: + - The ARN of the topic delivery notifications are delivered to. + - Omitted if delivery notifications are not delivered to a topic. + type: str + forwarding_enabled: + description: Whether or not feedback forwarding is enabled. + type: bool + headers_in_bounce_notifications_enabled: + description: Whether or not headers are included in messages delivered to the bounce topic. + type: bool + headers_in_complaint_notifications_enabled: + description: Whether or not headers are included in messages delivered to the complaint topic. + type: bool + headers_in_delivery_notifications_enabled: + description: Whether or not headers are included in messages delivered to the delivery topic. + type: bool +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info + +import time + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10): + # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've + # just registered it. Suspect this is an eventual consistency issue on AWS side. + # Don't want this complexity exposed users of the module as they'd have to retry to ensure + # a consistent return from the module. + # To avoid this we have an internal retry that we use only after registering the identity. + for attempt in range(0, retries + 1): + try: + response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity)) + identity_verification = response['VerificationAttributes'] + if identity in identity_verification: + break + time.sleep(retryDelay) + if identity not in identity_verification: + return None + return identity_verification[identity] + + +def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10): + # Unpredictably get_identity_notifications doesn't include the notifications when we've + # just registered the identity. + # Don't want this complexity exposed users of the module as they'd have to retry to ensure + # a consistent return from the module. + # To avoid this we have an internal retry that we use only when getting the current notification + # status for return. + for attempt in range(0, retries + 1): + try: + response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity)) + notification_attributes = response['NotificationAttributes'] + + # No clear AWS docs on when this happens, but it appears sometimes identities are not included in + # in the notification attributes when the identity is first registered. Suspect that this is caused by + # eventual consistency within the AWS services. It's been observed in builds so we need to handle it. + # + # When this occurs, just return None and we'll assume no identity notification settings have been changed + # from the default which is reasonable if this is just eventual consistency on creation. + # See: https://github.com/ansible/ansible/issues/36065 + if identity in notification_attributes: + break + else: + # Paranoia check for coding errors, we only requested one identity, so if we get a different one + # something has gone very wrong. + if len(notification_attributes) != 0: + module.fail_json( + msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format( + identity, + notification_attributes.keys(), + ) + ) + time.sleep(retryDelay) + if identity not in notification_attributes: + return None + return notification_attributes[identity] + + +def desired_topic(module, notification_type): + arg_dict = module.params.get(notification_type.lower() + '_notifications') + if arg_dict: + return arg_dict.get('topic', None) + else: + return None + + +def update_notification_topic(connection, module, identity, identity_notifications, notification_type): + topic_key = notification_type + 'Topic' + if identity_notifications is None: + # If there is no configuration for notifications cannot be being sent to topics + # hence assume None as the current state. + current = None + elif topic_key in identity_notifications: + current = identity_notifications[topic_key] + else: + # If there is information on the notifications setup but no information on the + # particular notification topic it's pretty safe to assume there's no topic for + # this notification. AWS API docs suggest this information will always be + # included but best to be defensive + current = None + + required = desired_topic(module, notification_type) + + if current != required: + try: + if not module.check_mode: + connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format( + identity=identity, + notification_type=notification_type, + )) + return True + return False + + +def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type): + arg_dict = module.params.get(notification_type.lower() + '_notifications') + header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' + if identity_notifications is None: + # If there is no configuration for topic notifications, headers cannot be being + # forwarded, hence assume false. + current = False + elif header_key in identity_notifications: + current = identity_notifications[header_key] + else: + # AWS API doc indicates that the headers in fields are optional. Unfortunately + # it's not clear on what this means. But it's a pretty safe assumption that it means + # headers are not included since most API consumers would interpret absence as false. + current = False + + if arg_dict is not None and 'include_headers' in arg_dict: + required = arg_dict['include_headers'] + else: + required = False + + if current != required: + try: + if not module.check_mode: + connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required, + aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format( + identity=identity, + notification_type=notification_type, + )) + return True + return False + + +def update_feedback_forwarding(connection, module, identity, identity_notifications): + if identity_notifications is None: + # AWS requires feedback forwarding to be enabled unless bounces and complaints + # are being handled by SNS topics. So in the absence of identity_notifications + # information existing feedback forwarding must be on. + current = True + elif 'ForwardingEnabled' in identity_notifications: + current = identity_notifications['ForwardingEnabled'] + else: + # If there is information on the notifications setup but no information on the + # forwarding state it's pretty safe to assume forwarding is off. AWS API docs + # suggest this information will always be included but best to be defensive + current = False + + required = module.params.get('feedback_forwarding') + + if current != required: + try: + if not module.check_mode: + connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity)) + return True + return False + + +def create_mock_notifications_response(module): + resp = { + "ForwardingEnabled": module.params.get('feedback_forwarding'), + } + for notification_type in ('Bounce', 'Complaint', 'Delivery'): + arg_dict = module.params.get(notification_type.lower() + '_notifications') + if arg_dict is not None and 'topic' in arg_dict: + resp[notification_type + 'Topic'] = arg_dict['topic'] + + header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' + if arg_dict is not None and 'include_headers' in arg_dict: + resp[header_key] = arg_dict['include_headers'] + else: + resp[header_key] = False + return resp + + +def update_identity_notifications(connection, module): + identity = module.params.get('identity') + changed = False + identity_notifications = get_identity_notifications(connection, module, identity) + + for notification_type in ('Bounce', 'Complaint', 'Delivery'): + changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type) + changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type) + + changed |= update_feedback_forwarding(connection, module, identity, identity_notifications) + + if changed or identity_notifications is None: + if module.check_mode: + identity_notifications = create_mock_notifications_response(module) + else: + identity_notifications = get_identity_notifications(connection, module, identity, retries=4) + return changed, identity_notifications + + +def validate_params_for_identity_present(module): + if module.params.get('feedback_forwarding') is False: + if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')): + module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " + "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics") + + +def create_or_update_identity(connection, module, region, account_id): + identity = module.params.get('identity') + changed = False + verification_attributes = get_verification_attributes(connection, module, identity) + if verification_attributes is None: + try: + if not module.check_mode: + if '@' in identity: + connection.verify_email_identity(EmailAddress=identity, aws_retry=True) + else: + connection.verify_domain_identity(Domain=identity, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity)) + if module.check_mode: + verification_attributes = { + "VerificationStatus": "Pending", + } + else: + verification_attributes = get_verification_attributes(connection, module, identity, retries=4) + changed = True + elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'): + module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'], + verification_attributes=camel_dict_to_snake_dict(verification_attributes)) + + if verification_attributes is None: + module.fail_json(msg='Unable to load identity verification attributes after registering identity.') + + notifications_changed, notification_attributes = update_identity_notifications(connection, module) + changed |= notifications_changed + + if notification_attributes is None: + module.fail_json(msg='Unable to load identity notification attributes.') + + identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity + + module.exit_json( + changed=changed, + identity=identity, + identity_arn=identity_arn, + verification_attributes=camel_dict_to_snake_dict(verification_attributes), + notification_attributes=camel_dict_to_snake_dict(notification_attributes), + ) + + +def destroy_identity(connection, module): + identity = module.params.get('identity') + changed = False + verification_attributes = get_verification_attributes(connection, module, identity) + if verification_attributes is not None: + try: + if not module.check_mode: + connection.delete_identity(Identity=identity, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity)) + changed = True + + module.exit_json( + changed=changed, + identity=identity, + ) + + +def get_account_id(module): + sts = module.client('sts') + try: + caller_identity = sts.get_caller_identity() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve caller identity') + return caller_identity['Account'] + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + "identity": dict(required=True, type='str'), + "state": dict(default='present', choices=['present', 'absent']), + "bounce_notifications": dict(type='dict'), + "complaint_notifications": dict(type='dict'), + "delivery_notifications": dict(type='dict'), + "feedback_forwarding": dict(default=True, type='bool'), + }, + supports_check_mode=True, + ) + + for notification_type in ('bounce', 'complaint', 'delivery'): + param_name = notification_type + '_notifications' + arg_dict = module.params.get(param_name) + if arg_dict: + extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')] + if extra_keys: + module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') + + # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. + # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but + # the ansible build runs multiple instances of the test in parallel that's caused throttling + # failures so apply a jittered backoff to call SES calls. + connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + + if state == 'present': + region = get_aws_connection_info(module, boto3=True)[0] + account_id = get_account_id(module) + validate_params_for_identity_present(module) + create_or_update_identity(connection, module, region, account_id) + else: + destroy_identity(connection, module) + + +if __name__ == '__main__': + main() diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py new file mode 100644 index 00000000000..08cf4a7b7ac --- /dev/null +++ b/aws_ses_identity_policy.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: aws_ses_identity_policy +short_description: Manages SES sending authorization policies +description: + - This module allows the user to manage sending authorization policies associated with an SES identity (email or domain). + - SES authorization sending policies can be used to control what actors are able to send email + on behalf of the validated identity and what conditions must be met by the sent emails. +author: Ed Costello (@orthanc) + +options: + identity: + description: | + The SES identity to attach or remove a policy from. This can be either the full ARN or just + the verified email or domain. + required: true + type: str + policy_name: + description: The name used to identify the policy within the scope of the identity it's attached to. + required: true + type: str + policy: + description: A properly formatted JSON sending authorization policy. Required when I(state=present). + type: json + state: + description: Whether to create(or update) or delete the authorization policy on the identity. + default: present + choices: [ 'present', 'absent' ] + type: str +requirements: [ 'botocore', 'boto3' ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: add sending authorization policy to domain identity + aws_ses_identity_policy: + identity: example.com + policy_name: ExamplePolicy + policy: "{{ lookup('template', 'policy.json.j2') }}" + state: present + +- name: add sending authorization policy to email identity + aws_ses_identity_policy: + identity: example@example.com + policy_name: ExamplePolicy + policy: "{{ lookup('template', 'policy.json.j2') }}" + state: present + +- name: add sending authorization policy to identity using ARN + aws_ses_identity_policy: + identity: "arn:aws:ses:us-east-1:12345678:identity/example.com" + policy_name: ExamplePolicy + policy: "{{ lookup('template', 'policy.json.j2') }}" + state: present + +- name: remove sending authorization policy + aws_ses_identity_policy: + identity: example.com + policy_name: ExamplePolicy + state: absent +''' + +RETURN = ''' +policies: + description: A list of all policies present on the identity after the operation. + returned: success + type: list + sample: [ExamplePolicy] +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_policies, AWSRetry + +import json + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_identity_policy(connection, module, identity, policy_name): + try: + response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name)) + policies = response['Policies'] + if policy_name in policies: + return policies[policy_name] + return None + + +def create_or_update_identity_policy(connection, module): + identity = module.params.get('identity') + policy_name = module.params.get('policy_name') + required_policy = module.params.get('policy') + required_policy_dict = json.loads(required_policy) + + changed = False + policy = get_identity_policy(connection, module, identity, policy_name) + policy_dict = json.loads(policy) if policy else None + if compare_policies(policy_dict, required_policy_dict): + changed = True + try: + if not module.check_mode: + connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name)) + + # Load the list of applied policies to include in the response. + # In principle we should be able to just return the response, but given + # eventual consistency behaviours in AWS it's plausible that we could + # end up with a list that doesn't contain the policy we just added. + # So out of paranoia check for this case and if we're missing the policy + # just make sure it's present. + # + # As a nice side benefit this also means the return is correct in check mode + try: + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to list identity policies') + if policy_name is not None and policy_name not in policies_present: + policies_present = list(policies_present) + policies_present.append(policy_name) + module.exit_json( + changed=changed, + policies=policies_present, + ) + + +def delete_identity_policy(connection, module): + identity = module.params.get('identity') + policy_name = module.params.get('policy_name') + + changed = False + try: + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to list identity policies') + if policy_name in policies_present: + try: + if not module.check_mode: + connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name)) + changed = True + policies_present = list(policies_present) + policies_present.remove(policy_name) + + module.exit_json( + changed=changed, + policies=policies_present, + ) + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'identity': dict(required=True, type='str'), + 'state': dict(default='present', choices=['present', 'absent']), + 'policy_name': dict(required=True, type='str'), + 'policy': dict(type='json', default=None), + }, + required_if=[['state', 'present', ['policy']]], + supports_check_mode=True, + ) + + # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. + # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but + # the ansible build runs multiple instances of the test in parallel that's caused throttling + # failures so apply a jittered backoff to call SES calls. + connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + + if state == 'present': + create_or_update_identity_policy(connection, module) + else: + delete_identity_policy(connection, module) + + +if __name__ == '__main__': + main() diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py new file mode 100644 index 00000000000..4c9fd730a11 --- /dev/null +++ b/aws_ses_rule_set.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# Copyright (c) 2017, Ben Tomasik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: aws_ses_rule_set +short_description: Manages SES inbound receipt rule sets +description: + - The M(aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets +author: + - "Ben Tomasik (@tomislacker)" + - "Ed Costello (@orthanc)" +requirements: [ boto3, botocore ] +options: + name: + description: + - The name of the receipt rule set. + required: True + type: str + state: + description: + - Whether to create (or update) or destroy the receipt rule set. + required: False + default: present + choices: ["absent", "present"] + type: str + active: + description: + - Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present). + - If omitted, the active rule set will not be changed. + - If C(True) then this rule set will be made active and all others inactive. + - if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set. + type: bool + required: False + force: + description: + - When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set). + type: bool + required: False + default: False +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. +--- +- name: Create default rule set and activate it if not already + aws_ses_rule_set: + name: default-rule-set + state: present + active: yes + +- name: Create some arbitrary rule set but do not activate it + aws_ses_rule_set: + name: arbitrary-rule-set + state: present + +- name: Explicitly deactivate the default rule set leaving no active rule set + aws_ses_rule_set: + name: default-rule-set + state: present + active: no + +- name: Remove an arbitrary inactive rule set + aws_ses_rule_set: + name: arbitrary-rule-set + state: absent + +- name: Remove an ruleset even if we have to first deactivate it to remove it + aws_ses_rule_set: + name: default-rule-set + state: absent + force: yes +""" + +RETURN = """ +active: + description: if the SES rule set is active + returned: success if I(state) is C(present) + type: bool + sample: true +rule_sets: + description: The list of SES receipt rule sets that exist after any changes. + returned: success + type: list + sample: [{ + "created_timestamp": "2018-02-25T01:20:32.690000+00:00", + "name": "default-rule-set" + }] +""" + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + + +def list_rule_sets(client, module): + try: + response = client.list_receipt_rule_sets(aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't list rule sets.") + return response['RuleSets'] + + +def rule_set_in(name, rule_sets): + return any([s for s in rule_sets if s['Name'] == name]) + + +def ruleset_active(client, module, name): + try: + active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get the active rule set.") + if active_rule_set is not None and 'Metadata' in active_rule_set: + return name == active_rule_set['Metadata']['Name'] + else: + # Metadata was not set meaning there is no active rule set + return False + + +def deactivate_rule_set(client, module): + try: + # No ruleset name deactivates all rulesets + client.set_active_receipt_rule_set(aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't set active rule set to None.") + + +def update_active_rule_set(client, module, name, desired_active): + check_mode = module.check_mode + + active = ruleset_active(client, module, name) + + changed = False + if desired_active is not None: + if desired_active and not active: + if not check_mode: + try: + client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name)) + changed = True + active = True + elif not desired_active and active: + if not check_mode: + deactivate_rule_set(client, module) + changed = True + active = False + return changed, active + + +def create_or_update_rule_set(client, module): + name = module.params.get('name') + check_mode = module.check_mode + changed = False + + rule_sets = list_rule_sets(client, module) + if not rule_set_in(name, rule_sets): + if not check_mode: + try: + client.create_receipt_rule_set(RuleSetName=name, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name)) + changed = True + rule_sets = list(rule_sets) + rule_sets.append({ + 'Name': name, + }) + + (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active')) + changed |= active_changed + + module.exit_json( + changed=changed, + active=active, + rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets], + ) + + +def remove_rule_set(client, module): + name = module.params.get('name') + check_mode = module.check_mode + changed = False + + rule_sets = list_rule_sets(client, module) + if rule_set_in(name, rule_sets): + active = ruleset_active(client, module, name) + if active and not module.params.get('force'): + module.fail_json( + msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name), + error={ + "code": "CannotDelete", + "message": "Cannot delete active rule set: {0}".format(name), + } + ) + if not check_mode: + if active and module.params.get('force'): + deactivate_rule_set(client, module) + try: + client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name)) + changed = True + rule_sets = [x for x in rule_sets if x['Name'] != name] + + module.exit_json( + changed=changed, + rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets], + ) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + active=dict(type='bool'), + force=dict(type='bool', default=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + state = module.params.get('state') + + # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. + # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but + # the ansible build runs multiple instances of the test in parallel that's caused throttling + # failures so apply a jittered backoff to call SES calls. + client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + + if state == 'absent': + remove_rule_set(client, module) + else: + create_or_update_rule_set(client, module) + + +if __name__ == '__main__': + main() diff --git a/aws_sgw_facts.py b/aws_sgw_facts.py new file mode 120000 index 00000000000..0af0560a3b2 --- /dev/null +++ b/aws_sgw_facts.py @@ -0,0 +1 @@ +aws_sgw_info.py \ No newline at end of file diff --git a/aws_sgw_info.py b/aws_sgw_info.py new file mode 100644 index 00000000000..2a734cbde2a --- /dev/null +++ b/aws_sgw_info.py @@ -0,0 +1,361 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Loic BLOT (@nerzhul) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# This module is sponsored by E.T.A.I. (www.etai.fr) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_sgw_info +short_description: Fetch AWS Storage Gateway information +description: + - Fetch AWS Storage Gateway information + - This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: Loic Blot (@nerzhul) +options: + gather_local_disks: + description: + - Gather local disks attached to the storage gateway. + type: bool + required: false + default: true + gather_tapes: + description: + - Gather tape information for storage gateways in tape mode. + type: bool + required: false + default: true + gather_file_shares: + description: + - Gather file share information for storage gateways in s3 mode. + type: bool + required: false + default: true + gather_volumes: + description: + - Gather volume information for storage gateways in iSCSI (cached & stored) modes. + type: bool + required: false + default: true +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +RETURN = ''' +gateways: + description: list of gateway objects + returned: always + type: complex + contains: + gateway_arn: + description: "Storage Gateway ARN" + returned: always + type: str + sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888" + gateway_id: + description: "Storage Gateway ID" + returned: always + type: str + sample: "sgw-9999F888" + gateway_name: + description: "Storage Gateway friendly name" + returned: always + type: str + sample: "my-sgw-01" + gateway_operational_state: + description: "Storage Gateway operational state" + returned: always + type: str + sample: "ACTIVE" + gateway_type: + description: "Storage Gateway type" + returned: always + type: str + sample: "FILE_S3" + file_shares: + description: "Storage gateway file shares" + returned: when gateway_type == "FILE_S3" + type: complex + contains: + file_share_arn: + description: "File share ARN" + returned: always + type: str + sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88" + file_share_id: + description: "File share ID" + returned: always + type: str + sample: "share-AF999C88" + file_share_status: + description: "File share status" + returned: always + type: str + sample: "AVAILABLE" + tapes: + description: "Storage Gateway tapes" + returned: when gateway_type == "VTL" + type: complex + contains: + tape_arn: + description: "Tape ARN" + returned: always + type: str + sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88" + tape_barcode: + description: "Tape ARN" + returned: always + type: str + sample: "tape-AF999C88" + tape_size_in_bytes: + description: "Tape ARN" + returned: always + type: int + sample: 555887569 + tape_status: + description: "Tape ARN" + returned: always + type: str + sample: "AVAILABLE" + local_disks: + description: "Storage gateway local disks" + returned: always + type: complex + contains: + disk_allocation_type: + description: "Disk allocation type" + returned: always + type: str + sample: "CACHE STORAGE" + disk_id: + description: "Disk ID on the system" + returned: always + type: str + sample: "pci-0000:00:1f.0" + disk_node: + description: "Disk parent block device" + returned: always + type: str + sample: "/dev/sdb" + disk_path: + description: "Disk path used for the cache" + returned: always + type: str + sample: "/dev/nvme1n1" + disk_size_in_bytes: + description: "Disk size in bytes" + returned: always + type: int + sample: 107374182400 + disk_status: + description: "Disk status" + returned: always + type: str + sample: "present" +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: "Get AWS storage gateway information" + aws_sgw_info: + +- name: "Get AWS storage gateway information for region eu-west-3" + aws_sgw_info: + region: eu-west-3 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +class SGWInformationManager(object): + def __init__(self, client, module): + self.client = client + self.module = module + self.name = self.module.params.get('name') + + def fetch(self): + gateways = self.list_gateways() + for gateway in gateways: + if self.module.params.get('gather_local_disks'): + self.list_local_disks(gateway) + # File share gateway + if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'): + self.list_gateway_file_shares(gateway) + # Volume tape gateway + elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'): + self.list_gateway_vtl(gateway) + # iSCSI gateway + elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'): + self.list_gateway_volumes(gateway) + + self.module.exit_json(gateways=gateways) + + """ + List all storage gateways for the AWS endpoint. + """ + def list_gateways(self): + try: + paginator = self.client.get_paginator('list_gateways') + response = paginator.paginate( + PaginationConfig={ + 'PageSize': 100, + } + ).build_full_result() + + gateways = [] + for gw in response["Gateways"]: + gateways.append(camel_dict_to_snake_dict(gw)) + + return gateways + + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Couldn't list storage gateways") + + """ + Read file share objects from AWS API response. + Drop the gateway_arn attribute from response, as it will be duplicate with parent object. + """ + @staticmethod + def _read_gateway_fileshare_response(fileshares, aws_reponse): + for share in aws_reponse["FileShareInfoList"]: + share_obj = camel_dict_to_snake_dict(share) + if "gateway_arn" in share_obj: + del share_obj["gateway_arn"] + fileshares.append(share_obj) + + return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None + + """ + List file shares attached to AWS storage gateway when in S3 mode. + """ + def list_gateway_file_shares(self, gateway): + try: + response = self.client.list_file_shares( + GatewayARN=gateway["gateway_arn"], + Limit=100 + ) + + gateway["file_shares"] = [] + marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) + + while marker is not None: + response = self.client.list_file_shares( + GatewayARN=gateway["gateway_arn"], + Marker=marker, + Limit=100 + ) + + marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Couldn't list gateway file shares") + + """ + List storage gateway local disks + """ + def list_local_disks(self, gateway): + try: + gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in + self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']] + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") + + """ + Read tape objects from AWS API response. + Drop the gateway_arn attribute from response, as it will be duplicate with parent object. + """ + @staticmethod + def _read_gateway_tape_response(tapes, aws_response): + for tape in aws_response["TapeInfos"]: + tape_obj = camel_dict_to_snake_dict(tape) + if "gateway_arn" in tape_obj: + del tape_obj["gateway_arn"] + tapes.append(tape_obj) + + return aws_response["Marker"] if "Marker" in aws_response else None + + """ + List VTL & VTS attached to AWS storage gateway in VTL mode + """ + def list_gateway_vtl(self, gateway): + try: + response = self.client.list_tapes( + Limit=100 + ) + + gateway["tapes"] = [] + marker = self._read_gateway_tape_response(gateway["tapes"], response) + + while marker is not None: + response = self.client.list_tapes( + Marker=marker, + Limit=100 + ) + + marker = self._read_gateway_tape_response(gateway["tapes"], response) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes") + + """ + List volumes attached to AWS storage gateway in CACHED or STORAGE mode + """ + def list_gateway_volumes(self, gateway): + try: + paginator = self.client.get_paginator('list_volumes') + response = paginator.paginate( + GatewayARN=gateway["gateway_arn"], + PaginationConfig={ + 'PageSize': 100, + } + ).build_full_result() + + gateway["volumes"] = [] + for volume in response["VolumeInfos"]: + volume_obj = camel_dict_to_snake_dict(volume) + if "gateway_arn" in volume_obj: + del volume_obj["gateway_arn"] + if "gateway_id" in volume_obj: + del volume_obj["gateway_id"] + + gateway["volumes"].append(volume_obj) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes") + + +def main(): + argument_spec = dict( + gather_local_disks=dict(type='bool', default=True), + gather_tapes=dict(type='bool', default=True), + gather_file_shares=dict(type='bool', default=True), + gather_volumes=dict(type='bool', default=True) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + if module._name == 'aws_sgw_facts': + module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", version='2.13') + client = module.client('storagegateway') + + if client is None: # this should never happen + module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.') + + SGWInformationManager(client, module).fetch() + + +if __name__ == '__main__': + main() diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py new file mode 100644 index 00000000000..ebfafee4c41 --- /dev/null +++ b/aws_ssm_parameter_store.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: aws_ssm_parameter_store +short_description: Manage key-value pairs in aws parameter store. +description: + - Manage key-value pairs in aws parameter store. +options: + name: + description: + - Parameter key name. + required: true + type: str + description: + description: + - Parameter key description. + required: false + type: str + value: + description: + - Parameter value. + required: false + type: str + state: + description: + - Creates or modifies an existing parameter. + - Deletes a parameter. + required: false + choices: ['present', 'absent'] + default: present + type: str + string_type: + description: + - Parameter String type. + required: false + choices: ['String', 'StringList', 'SecureString'] + default: String + type: str + decryption: + description: + - Work with SecureString type to get plain text secrets + type: bool + required: false + default: true + key_id: + description: + - AWS KMS key to decrypt the secrets. + - The default key (C(alias/aws/ssm)) is automatically generated the first + time it's requested. + required: false + default: alias/aws/ssm + type: str + overwrite_value: + description: + - Option to overwrite an existing value if it already exists. + required: false + choices: ['never', 'changed', 'always'] + default: changed + type: str +author: + - Nathan Webster (@nathanwebsterdotme) + - Bill Wang (@ozbillwang) + - Michael De La Rue (@mikedlr) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ botocore, boto3 ] +''' + +EXAMPLES = ''' +- name: Create or update key/value pair in aws parameter store + aws_ssm_parameter_store: + name: "Hello" + description: "This is your first key" + value: "World" + +- name: Delete the key + aws_ssm_parameter_store: + name: "Hello" + state: absent + +- name: Create or update secure key/value pair with default kms key (aws/ssm) + aws_ssm_parameter_store: + name: "Hello" + description: "This is your first key" + string_type: "SecureString" + value: "World" + +- name: Create or update secure key/value pair with nominated kms key + aws_ssm_parameter_store: + name: "Hello" + description: "This is your first key" + string_type: "SecureString" + key_id: "alias/demo" + value: "World" + +- name: Always update a parameter store value and create a new version + aws_ssm_parameter_store: + name: "overwrite_example" + description: "This example will always overwrite the value" + string_type: "String" + value: "Test1234" + overwrite_value: "always" + +- name: recommend to use with aws_ssm lookup plugin + debug: msg="{{ lookup('aws_ssm', 'hello') }}" +''' + +RETURN = ''' +put_parameter: + description: Add one or more parameters to the system. + returned: success + type: dict +delete_parameter: + description: Delete a parameter from the system. + returned: success + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + + +def update_parameter(client, module, args): + changed = False + response = {} + + try: + response = client.put_parameter(**args) + changed = True + except ClientError as e: + module.fail_json_aws(e, msg="setting parameter") + + return changed, response + + +def create_update_parameter(client, module): + changed = False + existing_parameter = None + response = {} + + args = dict( + Name=module.params.get('name'), + Value=module.params.get('value'), + Type=module.params.get('string_type') + ) + + if (module.params.get('overwrite_value') in ("always", "changed")): + args.update(Overwrite=True) + else: + args.update(Overwrite=False) + + if module.params.get('description'): + args.update(Description=module.params.get('description')) + + if module.params.get('string_type') == 'SecureString': + args.update(KeyId=module.params.get('key_id')) + + try: + existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True) + except Exception: + pass + + if existing_parameter: + if (module.params.get('overwrite_value') == 'always'): + + (changed, response) = update_parameter(client, module, args) + + elif (module.params.get('overwrite_value') == 'changed'): + if existing_parameter['Parameter']['Type'] != args['Type']: + (changed, response) = update_parameter(client, module, args) + + if existing_parameter['Parameter']['Value'] != args['Value']: + (changed, response) = update_parameter(client, module, args) + + if args.get('Description'): + # Description field not available from get_parameter function so get it from describe_parameters + describe_existing_parameter = None + try: + describe_existing_parameter_paginator = client.get_paginator('describe_parameters') + describe_existing_parameter = describe_existing_parameter_paginator.paginate( + Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result() + + except ClientError as e: + module.fail_json_aws(e, msg="getting description value") + + if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']: + (changed, response) = update_parameter(client, module, args) + else: + (changed, response) = update_parameter(client, module, args) + + return changed, response + + +def delete_parameter(client, module): + response = {} + + try: + response = client.delete_parameter( + Name=module.params.get('name') + ) + except ClientError as e: + if e.response['Error']['Code'] == 'ParameterNotFound': + return False, {} + module.fail_json_aws(e, msg="deleting parameter") + + return True, response + + +def setup_client(module): + connection = module.client('ssm') + return connection + + +def setup_module_object(): + argument_spec = dict( + name=dict(required=True), + description=dict(), + value=dict(required=False, no_log=True), + state=dict(default='present', choices=['present', 'absent']), + string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']), + decryption=dict(default=True, type='bool'), + key_id=dict(default="alias/aws/ssm"), + overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), + ) + + return AnsibleAWSModule( + argument_spec=argument_spec, + ) + + +def main(): + module = setup_module_object() + state = module.params.get('state') + client = setup_client(module) + + invocations = { + "present": create_update_parameter, + "absent": delete_parameter, + } + (changed, response) = invocations[state](client, module) + module.exit_json(changed=changed, response=response) + + +if __name__ == '__main__': + main() diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py new file mode 100644 index 00000000000..d1933fcafe7 --- /dev/null +++ b/aws_step_functions_state_machine.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# Copyright (c) 2019, Tom De Keyser (@tdekeyser) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: aws_step_functions_state_machine + +short_description: Manage AWS Step Functions state machines + + +description: + - Create, update and delete state machines in AWS Step Functions. + - Calling the module in C(state=present) for an existing AWS Step Functions state machine + will attempt to update the state machine definition, IAM Role, or tags with the provided data. + +options: + name: + description: + - Name of the state machine + required: true + type: str + definition: + description: + - The Amazon States Language definition of the state machine. See + U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more + information on the Amazon States Language. + - "This parameter is required when C(state=present)." + type: json + role_arn: + description: + - The ARN of the IAM Role that will be used by the state machine for its executions. + - "This parameter is required when C(state=present)." + type: str + state: + description: + - Desired state for the state machine + default: present + choices: [ present, absent ] + type: str + tags: + description: + - A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one. + type: dict + purge_tags: + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + If the I(tags) parameter is not set then tags will not be modified. + default: yes + type: bool + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +author: + - Tom De Keyser (@tdekeyser) +''' + +EXAMPLES = ''' +# Create a new AWS Step Functions state machine +- name: Setup HelloWorld state machine + aws_step_functions_state_machine: + name: "HelloWorldStateMachine" + definition: "{{ lookup('file','state_machine.json') }}" + role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole + tags: + project: helloWorld + +# Update an existing state machine +- name: Change IAM Role and tags of HelloWorld state machine + aws_step_functions_state_machine: + name: HelloWorldStateMachine + definition: "{{ lookup('file','state_machine.json') }}" + role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole + tags: + otherTag: aDifferentTag + +# Remove the AWS Step Functions state machine +- name: Delete HelloWorld state machine + aws_step_functions_state_machine: + name: HelloWorldStateMachine + state: absent +''' + +RETURN = ''' +state_machine_arn: + description: ARN of the AWS Step Functions state machine + type: str + returned: always +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +def manage_state_machine(state, sfn_client, module): + state_machine_arn = get_state_machine_arn(sfn_client, module) + + if state == 'present': + if state_machine_arn is None: + create(sfn_client, module) + else: + update(state_machine_arn, sfn_client, module) + elif state == 'absent': + if state_machine_arn is not None: + remove(state_machine_arn, sfn_client, module) + + check_mode(module, msg='State is up-to-date.') + module.exit_json(changed=False) + + +def create(sfn_client, module): + check_mode(module, msg='State machine would be created.', changed=True) + + tags = module.params.get('tags') + sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else [] + + state_machine = sfn_client.create_state_machine( + name=module.params.get('name'), + definition=module.params.get('definition'), + roleArn=module.params.get('role_arn'), + tags=sfn_tags + ) + module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn')) + + +def remove(state_machine_arn, sfn_client, module): + check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True) + + sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) + module.exit_json(changed=True, state_machine_arn=state_machine_arn) + + +def update(state_machine_arn, sfn_client, module): + tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) + + if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: + check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True) + + sfn_client.update_state_machine( + stateMachineArn=state_machine_arn, + definition=module.params.get('definition'), + roleArn=module.params.get('role_arn') + ) + sfn_client.untag_resource( + resourceArn=state_machine_arn, + tagKeys=tags_to_remove + ) + sfn_client.tag_resource( + resourceArn=state_machine_arn, + tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value') + ) + + module.exit_json(changed=True, state_machine_arn=state_machine_arn) + + +def compare_tags(state_machine_arn, sfn_client, module): + new_tags = module.params.get('tags') + current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags') + return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags')) + + +def params_changed(state_machine_arn, sfn_client, module): + """ + Check whether the state machine definition or IAM Role ARN is different + from the existing state machine parameters. + """ + current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn) + return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn') + + +def get_state_machine_arn(sfn_client, module): + """ + Finds the state machine ARN based on the name parameter. Returns None if + there is no state machine with this name. + """ + target_name = module.params.get('name') + all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines') + + for state_machine in all_state_machines: + if state_machine.get('name') == target_name: + return state_machine.get('stateMachineArn') + + +def check_mode(module, msg='', changed=False): + if module.check_mode: + module.exit_json(changed=changed, output=msg) + + +def main(): + module_args = dict( + name=dict(type='str', required=True), + definition=dict(type='json'), + role_arn=dict(type='str'), + state=dict(choices=['present', 'absent'], default='present'), + tags=dict(default=None, type='dict'), + purge_tags=dict(default=True, type='bool'), + ) + module = AnsibleAWSModule( + argument_spec=module_args, + required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])], + supports_check_mode=True + ) + + sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5)) + state = module.params.get('state') + + try: + manage_state_machine(state, sfn_client, module) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to manage state machine') + + +if __name__ == '__main__': + main() diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py new file mode 100644 index 00000000000..29ed1634ee7 --- /dev/null +++ b/aws_step_functions_state_machine_execution.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# Copyright (c) 2019, Prasad Katti (@prasadkatti) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: aws_step_functions_state_machine_execution + +short_description: Start or stop execution of an AWS Step Functions state machine. + + +description: + - Start or stop execution of a state machine in AWS Step Functions. + +options: + action: + description: Desired action (start or stop) for a state machine execution. + default: start + choices: [ start, stop ] + type: str + name: + description: Name of the execution. + type: str + execution_input: + description: The JSON input data for the execution. + type: json + default: {} + state_machine_arn: + description: The ARN of the state machine that will be executed. + type: str + execution_arn: + description: The ARN of the execution you wish to stop. + type: str + cause: + description: A detailed explanation of the cause for stopping the execution. + type: str + default: '' + error: + description: The error code of the failure to pass in when stopping the execution. + type: str + default: '' + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +author: + - Prasad Katti (@prasadkatti) +''' + +EXAMPLES = ''' +- name: Start an execution of a state machine + aws_step_functions_state_machine_execution: + name: an_execution_name + execution_input: '{ "IsHelloWorldExample": true }' + state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine" + +- name: Stop an execution of a state machine + aws_step_functions_state_machine_execution: + action: stop + execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" + cause: "cause of task failure" + error: "error code of the failure" +''' + +RETURN = ''' +execution_arn: + description: ARN of the AWS Step Functions state machine execution. + type: str + returned: if action == start and changed == True + sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" +start_date: + description: The date the execution is started. + type: str + returned: if action == start and changed == True + sample: "2019-11-02T22:39:49.071000-07:00" +stop_date: + description: The date the execution is stopped. + type: str + returned: if action == stop + sample: "2019-11-02T22:39:49.071000-07:00" +''' + + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +def start_execution(module, sfn_client): + ''' + start_execution uses execution name to determine if a previous execution already exists. + If an execution by the provided name exists, call client.start_execution will not be called. + ''' + + state_machine_arn = module.params.get('state_machine_arn') + name = module.params.get('name') + execution_input = module.params.get('execution_input') + + try: + # list_executions is eventually consistent + page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn) + + for execution in page_iterators.build_full_result()['executions']: + if name == execution['name']: + check_mode(module, msg='State machine execution already exists.', changed=False) + module.exit_json(changed=False) + + check_mode(module, msg='State machine execution would be started.', changed=True) + res_execution = sfn_client.start_execution( + stateMachineArn=state_machine_arn, + name=name, + input=execution_input + ) + except (ClientError, BotoCoreError) as e: + if e.response['Error']['Code'] == 'ExecutionAlreadyExists': + # this will never be executed anymore + module.exit_json(changed=False) + module.fail_json_aws(e, msg="Failed to start execution.") + + module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution)) + + +def stop_execution(module, sfn_client): + + cause = module.params.get('cause') + error = module.params.get('error') + execution_arn = module.params.get('execution_arn') + + try: + # describe_execution is eventually consistent + execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status'] + if execution_status != 'RUNNING': + check_mode(module, msg='State machine execution is not running.', changed=False) + module.exit_json(changed=False) + + check_mode(module, msg='State machine execution would be stopped.', changed=True) + res = sfn_client.stop_execution( + executionArn=execution_arn, + cause=cause, + error=error + ) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to stop execution.") + + module.exit_json(changed=True, **camel_dict_to_snake_dict(res)) + + +def check_mode(module, msg='', changed=False): + if module.check_mode: + module.exit_json(changed=changed, output=msg) + + +def main(): + module_args = dict( + action=dict(choices=['start', 'stop'], default='start'), + name=dict(type='str'), + execution_input=dict(type='json', default={}), + state_machine_arn=dict(type='str'), + cause=dict(type='str', default=''), + error=dict(type='str', default=''), + execution_arn=dict(type='str') + ) + module = AnsibleAWSModule( + argument_spec=module_args, + required_if=[('action', 'start', ['name', 'state_machine_arn']), + ('action', 'stop', ['execution_arn']), + ], + supports_check_mode=True + ) + + sfn_client = module.client('stepfunctions') + + action = module.params.get('action') + if action == "start": + start_execution(module, sfn_client) + else: + stop_execution(module, sfn_client) + + +if __name__ == '__main__': + main() diff --git a/aws_waf_condition.py b/aws_waf_condition.py new file mode 100644 index 00000000000..1b2c887f67f --- /dev/null +++ b/aws_waf_condition.py @@ -0,0 +1,735 @@ +#!/usr/bin/python +# Copyright (c) 2017 Will Thames +# Copyright (c) 2015 Mike Mochan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: aws_waf_condition +short_description: Create and delete WAF Conditions +description: + - Read the AWS documentation for WAF + U(https://aws.amazon.com/documentation/waf/) + +author: + - Will Thames (@willthames) + - Mike Mochan (@mmochan) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +options: + name: + description: Name of the Web Application Firewall condition to manage. + required: true + type: str + type: + description: The type of matching to perform. + choices: + - byte + - geo + - ip + - regex + - size + - sql + - xss + type: str + required: true + filters: + description: + - A list of the filters against which to match. + - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string). + - For I(type=geo), the only valid key is I(country). + - For I(type=ip), the only valid key is I(ip_address). + - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern). + - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size). + - For I(type=sql), valid keys are I(field_to_match) and I(transformation). + - For I(type=xss), valid keys are I(field_to_match) and I(transformation). + - Required when I(state=present). + type: list + elements: dict + suboptions: + field_to_match: + description: + - The field upon which to perform the match. + - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). + type: str + choices: ['uri', 'query_string', 'header', 'method', 'body'] + position: + description: + - Where in the field the match needs to occur. + - Only valid when I(type=byte). + type: str + choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word'] + header: + description: + - Which specific header should be matched. + - Required when I(field_to_match=header). + - Valid when I(type=byte). + type: str + transformation: + description: + - A transform to apply on the field prior to performing the match. + - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). + type: str + choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode'] + country: + description: + - Value of geo constraint (typically a two letter country code). + - The only valid key when I(type=geo). + type: str + ip_address: + description: + - An IP Address or CIDR to match. + - The only valid key when I(type=ip). + type: str + regex_pattern: + description: + - A dict describing the regular expressions used to perform the match. + - Only valid when I(type=regex). + type: dict + suboptions: + name: + description: A name to describe the set of patterns. + type: str + regex_strings: + description: A list of regular expressions to match. + type: list + elements: str + comparison: + description: + - What type of comparison to perform. + - Only valid key when I(type=size). + type: str + choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT'] + size: + description: + - The size of the field (in bytes). + - Only valid key when I(type=size). + type: int + target_string: + description: + - The string to search for. + - May be up to 50 bytes. + - Valid when I(type=byte). + type: str + purge_filters: + description: + - Whether to remove existing filters from a condition if not passed in I(filters). + default: false + type: bool + waf_regional: + description: Whether to use waf-regional module. + default: false + required: no + type: bool + state: + description: Whether the condition should be C(present) or C(absent). + choices: + - present + - absent + default: present + type: str + +''' + +EXAMPLES = ''' + - name: create WAF byte condition + aws_waf_condition: + name: my_byte_condition + filters: + - field_to_match: header + position: STARTS_WITH + target_string: Hello + header: Content-type + type: byte + + - name: create WAF geo condition + aws_waf_condition: + name: my_geo_condition + filters: + - country: US + - country: AU + - country: AT + type: geo + + - name: create IP address condition + aws_waf_condition: + name: "{{ resource_prefix }}_ip_condition" + filters: + - ip_address: "10.0.0.0/8" + - ip_address: "192.168.0.0/24" + type: ip + + - name: create WAF regex condition + aws_waf_condition: + name: my_regex_condition + filters: + - field_to_match: query_string + regex_pattern: + name: greetings + regex_strings: + - '[hH]ello' + - '^Hi there' + - '.*Good Day to You' + type: regex + + - name: create WAF size condition + aws_waf_condition: + name: my_size_condition + filters: + - field_to_match: query_string + size: 300 + comparison: GT + type: size + + - name: create WAF sql injection condition + aws_waf_condition: + name: my_sql_condition + filters: + - field_to_match: query_string + transformation: url_decode + type: sql + + - name: create WAF xss condition + aws_waf_condition: + name: my_xss_condition + filters: + - field_to_match: query_string + transformation: url_decode + type: xss + +''' + +RETURN = ''' +condition: + description: Condition returned by operation. + returned: always + type: complex + contains: + condition_id: + description: Type-agnostic ID for the condition. + returned: when state is present + type: str + sample: dd74b1ff-8c06-4a4f-897a-6b23605de413 + byte_match_set_id: + description: ID for byte match set. + returned: always + type: str + sample: c4882c96-837b-44a2-a762-4ea87dbf812b + byte_match_tuples: + description: List of byte match tuples. + returned: always + type: complex + contains: + field_to_match: + description: Field to match. + returned: always + type: complex + contains: + data: + description: Which specific header (if type is header). + type: str + sample: content-type + type: + description: Type of field + type: str + sample: HEADER + positional_constraint: + description: Position in the field to match. + type: str + sample: STARTS_WITH + target_string: + description: String to look for. + type: str + sample: Hello + text_transformation: + description: Transformation to apply to the field before matching. + type: str + sample: NONE + geo_match_constraints: + description: List of geographical constraints. + returned: when type is geo and state is present + type: complex + contains: + type: + description: Type of geo constraint. + type: str + sample: Country + value: + description: Value of geo constraint (typically a country code). + type: str + sample: AT + geo_match_set_id: + description: ID of the geo match set. + returned: when type is geo and state is present + type: str + sample: dd74b1ff-8c06-4a4f-897a-6b23605de413 + ip_set_descriptors: + description: list of IP address filters + returned: when type is ip and state is present + type: complex + contains: + type: + description: Type of IP address (IPV4 or IPV6). + returned: always + type: str + sample: IPV4 + value: + description: IP address. + returned: always + type: str + sample: 10.0.0.0/8 + ip_set_id: + description: ID of condition. + returned: when type is ip and state is present + type: str + sample: 78ad334a-3535-4036-85e6-8e11e745217b + name: + description: Name of condition. + returned: when state is present + type: str + sample: my_waf_condition + regex_match_set_id: + description: ID of the regex match set. + returned: when type is regex and state is present + type: str + sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c + regex_match_tuples: + description: List of regex matches. + returned: when type is regex and state is present + type: complex + contains: + field_to_match: + description: Field on which the regex match is applied. + type: complex + contains: + type: + description: The field name. + returned: when type is regex and state is present + type: str + sample: QUERY_STRING + regex_pattern_set_id: + description: ID of the regex pattern. + type: str + sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e + text_transformation: + description: transformation applied to the text before matching + type: str + sample: NONE + size_constraint_set_id: + description: ID of the size constraint set. + returned: when type is size and state is present + type: str + sample: de84b4b3-578b-447e-a9a0-0db35c995656 + size_constraints: + description: List of size constraints to apply. + returned: when type is size and state is present + type: complex + contains: + comparison_operator: + description: Comparison operator to apply. + type: str + sample: GT + field_to_match: + description: Field on which the size constraint is applied. + type: complex + contains: + type: + description: Field name. + type: str + sample: QUERY_STRING + size: + description: Size to compare against the field. + type: int + sample: 300 + text_transformation: + description: Transformation applied to the text before matching. + type: str + sample: NONE + sql_injection_match_set_id: + description: ID of the SQL injection match set. + returned: when type is sql and state is present + type: str + sample: de84b4b3-578b-447e-a9a0-0db35c995656 + sql_injection_match_tuples: + description: List of SQL injection match sets. + returned: when type is sql and state is present + type: complex + contains: + field_to_match: + description: Field on which the SQL injection match is applied. + type: complex + contains: + type: + description: Field name. + type: str + sample: QUERY_STRING + text_transformation: + description: Transformation applied to the text before matching. + type: str + sample: URL_DECODE + xss_match_set_id: + description: ID of the XSS match set. + returned: when type is xss and state is present + type: str + sample: de84b4b3-578b-447e-a9a0-0db35c995656 + xss_match_tuples: + description: List of XSS match sets. + returned: when type is xss and state is present + type: complex + contains: + field_to_match: + description: Field on which the XSS match is applied. + type: complex + contains: + type: + description: Field name + type: str + sample: QUERY_STRING + text_transformation: + description: transformation applied to the text before matching. + type: str + sample: URL_DECODE +''' + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff + + +class Condition(object): + + def __init__(self, client, module): + self.client = client + self.module = module + self.type = module.params['type'] + self.method_suffix = MATCH_LOOKUP[self.type]['method'] + self.conditionset = MATCH_LOOKUP[self.type]['conditionset'] + self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's' + self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id' + self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple'] + self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's' + self.conditiontype = MATCH_LOOKUP[self.type]['type'] + + def format_for_update(self, condition_set_id): + # Prep kwargs + kwargs = dict() + kwargs['Updates'] = list() + + for filtr in self.module.params.get('filters'): + # Only for ip_set + if self.type == 'ip': + # there might be a better way of detecting an IPv6 address + if ':' in filtr.get('ip_address'): + ip_type = 'IPV6' + else: + ip_type = 'IPV4' + condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')} + + # Specific for geo_match_set + if self.type == 'geo': + condition_insert = dict(Type='Country', Value=filtr.get('country')) + + # Common For everything but ip_set and geo_match_set + if self.type not in ('ip', 'geo'): + + condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()), + TextTransformation=filtr.get('transformation', 'none').upper()) + + if filtr.get('field_to_match').upper() == "HEADER": + if filtr.get('header'): + condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower() + else: + self.module.fail_json(msg=str("DATA required when HEADER requested")) + + # Specific for byte_match_set + if self.type == 'byte': + condition_insert['TargetString'] = filtr.get('target_string') + condition_insert['PositionalConstraint'] = filtr.get('position') + + # Specific for size_constraint_set + if self.type == 'size': + condition_insert['ComparisonOperator'] = filtr.get('comparison') + condition_insert['Size'] = filtr.get('size') + + # Specific for regex_match_set + if self.type == 'regex': + condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId'] + + kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert}) + + kwargs[self.conditionsetid] = condition_set_id + return kwargs + + def format_for_deletion(self, condition): + return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple} + for current_condition_tuple in condition[self.conditiontuples]], + self.conditionsetid: condition[self.conditionsetid]} + + @AWSRetry.exponential_backoff() + def list_regex_patterns_with_backoff(self, **params): + return self.client.list_regex_pattern_sets(**params) + + @AWSRetry.exponential_backoff() + def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id): + return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id) + + def list_regex_patterns(self): + # at time of writing(2017-11-20) no regex pattern paginator exists + regex_patterns = [] + params = {} + while True: + try: + response = self.list_regex_patterns_with_backoff(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not list regex patterns') + regex_patterns.extend(response['RegexPatternSets']) + if 'NextMarker' in response: + params['NextMarker'] = response['NextMarker'] + else: + break + return regex_patterns + + def get_regex_pattern_by_name(self, name): + existing_regex_patterns = self.list_regex_patterns() + regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns) + if name in regex_lookup: + return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet'] + else: + return None + + def ensure_regex_pattern_present(self, regex_pattern): + name = regex_pattern['name'] + + pattern_set = self.get_regex_pattern_by_name(name) + if not pattern_set: + pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name}, + self.client.create_regex_pattern_set)['RegexPatternSet'] + missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings']) + extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings']) + if not missing and not extra: + return pattern_set + updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing] + updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra]) + run_func_with_change_token_backoff(self.client, self.module, + {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates}, + self.client.update_regex_pattern_set, wait=True) + return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet'] + + def delete_unused_regex_pattern(self, regex_pattern_set_id): + try: + regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet'] + updates = list() + for regex_pattern_string in regex_pattern_set['RegexPatternStrings']: + updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string}) + run_func_with_change_token_backoff(self.client, self.module, + {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates}, + self.client.update_regex_pattern_set) + + run_func_with_change_token_backoff(self.client, self.module, + {'RegexPatternSetId': regex_pattern_set_id}, + self.client.delete_regex_pattern_set, wait=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if e.response['Error']['Code'] == 'WAFNonexistentItemException': + return + self.module.fail_json_aws(e, msg='Could not delete regex pattern') + + def get_condition_by_name(self, name): + all_conditions = [d for d in self.list_conditions() if d['Name'] == name] + if all_conditions: + return all_conditions[0][self.conditionsetid] + + @AWSRetry.exponential_backoff() + def get_condition_by_id_with_backoff(self, condition_set_id): + params = dict() + params[self.conditionsetid] = condition_set_id + func = getattr(self.client, 'get_' + self.method_suffix) + return func(**params)[self.conditionset] + + def get_condition_by_id(self, condition_set_id): + try: + return self.get_condition_by_id_with_backoff(condition_set_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not get condition') + + def list_conditions(self): + method = 'list_' + self.method_suffix + 's' + try: + paginator = self.client.get_paginator(method) + func = paginator.paginate().build_full_result + except botocore.exceptions.OperationNotPageableError: + # list_geo_match_sets and list_regex_match_sets do not have a paginator + func = getattr(self.client, method) + try: + return func()[self.conditionsets] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type) + + def tidy_up_regex_patterns(self, regex_match_set): + all_regex_match_sets = self.list_conditions() + all_match_set_patterns = list() + for rms in all_regex_match_sets: + all_match_set_patterns.extend(conditiontuple['RegexPatternSetId'] + for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples]) + for filtr in regex_match_set[self.conditiontuples]: + if filtr['RegexPatternSetId'] not in all_match_set_patterns: + self.delete_unused_regex_pattern(filtr['RegexPatternSetId']) + + def find_condition_in_rules(self, condition_set_id): + rules_in_use = [] + try: + if self.client.__class__.__name__ == 'WAF': + all_rules = list_rules_with_backoff(self.client) + elif self.client.__class__.__name__ == 'WAFRegional': + all_rules = list_regional_rules_with_backoff(self.client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not list rules') + for rule in all_rules: + try: + rule_details = get_rule_with_backoff(self.client, rule['RuleId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not get rule details') + if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]: + rules_in_use.append(rule_details['Name']) + return rules_in_use + + def find_and_delete_condition(self, condition_set_id): + current_condition = self.get_condition_by_id(condition_set_id) + in_use_rules = self.find_condition_in_rules(condition_set_id) + if in_use_rules: + rulenames = ', '.join(in_use_rules) + self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames)) + if current_condition[self.conditiontuples]: + # Filters are deleted using update with the DELETE action + func = getattr(self.client, 'update_' + self.method_suffix) + params = self.format_for_deletion(current_condition) + try: + # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call + run_func_with_change_token_backoff(self.client, self.module, params, func) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not delete filters from condition') + func = getattr(self.client, 'delete_' + self.method_suffix) + params = dict() + params[self.conditionsetid] = condition_set_id + try: + run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not delete condition') + # tidy up regex patterns + if self.type == 'regex': + self.tidy_up_regex_patterns(current_condition) + return True, {} + + def find_missing(self, update, current_condition): + missing = [] + for desired in update['Updates']: + found = False + desired_condition = desired[self.conditiontuple] + current_conditions = current_condition[self.conditiontuples] + for condition in current_conditions: + if not compare_policies(condition, desired_condition): + found = True + if not found: + missing.append(desired) + return missing + + def find_and_update_condition(self, condition_set_id): + current_condition = self.get_condition_by_id(condition_set_id) + update = self.format_for_update(condition_set_id) + missing = self.find_missing(update, current_condition) + if self.module.params.get('purge_filters'): + extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple} + for current_tuple in current_condition[self.conditiontuples] + if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]] + else: + extra = [] + changed = bool(missing or extra) + if changed: + update['Updates'] = missing + extra + func = getattr(self.client, 'update_' + self.method_suffix) + try: + result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not update condition') + return changed, self.get_condition_by_id(condition_set_id) + + def ensure_condition_present(self): + name = self.module.params['name'] + condition_set_id = self.get_condition_by_name(name) + if condition_set_id: + return self.find_and_update_condition(condition_set_id) + else: + params = dict() + params['Name'] = name + func = getattr(self.client, 'create_' + self.method_suffix) + try: + condition = run_func_with_change_token_backoff(self.client, self.module, params, func) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Could not create condition') + return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid]) + + def ensure_condition_absent(self): + condition_set_id = self.get_condition_by_name(self.module.params['name']) + if condition_set_id: + return self.find_and_delete_condition(condition_set_id) + return False, {} + + +def main(): + filters_subspec = dict( + country=dict(), + field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']), + header=dict(), + transformation=dict(choices=['none', 'compress_white_space', + 'html_entity_decode', 'lowercase', + 'cmd_line', 'url_decode']), + position=dict(choices=['exactly', 'starts_with', 'ends_with', + 'contains', 'contains_word']), + comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']), + target_string=dict(), # Bytes + size=dict(type='int'), + ip_address=dict(), + regex_pattern=dict(), + ) + argument_spec = dict( + name=dict(required=True), + type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']), + filters=dict(type='list'), + purge_filters=dict(type='bool', default=False), + waf_regional=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['filters']]]) + state = module.params.get('state') + + resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + client = module.client(resource) + + condition = Condition(client, module) + + if state == 'present': + (changed, results) = condition.ensure_condition_present() + # return a condition agnostic ID for use by aws_waf_rule + results['ConditionId'] = results[condition.conditionsetid] + else: + (changed, results) = condition.ensure_condition_absent() + + module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/aws_waf_facts.py b/aws_waf_facts.py new file mode 120000 index 00000000000..3fd538387ac --- /dev/null +++ b/aws_waf_facts.py @@ -0,0 +1 @@ +aws_waf_info.py \ No newline at end of file diff --git a/aws_waf_info.py b/aws_waf_info.py new file mode 100644 index 00000000000..5da7e6cff9f --- /dev/null +++ b/aws_waf_info.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: aws_waf_info +short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters. +description: + - Retrieve information for WAF ACLs, Rule , Conditions and Filters. + - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +options: + name: + description: + - The name of a Web Application Firewall. + type: str + waf_regional: + description: Whether to use the waf-regional module. + default: false + required: no + type: bool + +author: + - Mike Mochan (@mmochan) + - Will Thames (@willthames) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: obtain all WAF information + aws_waf_info: + +- name: obtain all information for a single WAF + aws_waf_info: + name: test_waf + +- name: obtain all information for a single WAF Regional + aws_waf_info: + name: test_waf + waf_regional: true +''' + +RETURN = ''' +wafs: + description: The WAFs that match the passed arguments. + returned: success + type: complex + contains: + name: + description: A friendly name or description of the WebACL. + returned: always + type: str + sample: test_waf + default_action: + description: The action to perform if none of the Rules contained in the WebACL match. + returned: always + type: int + sample: BLOCK + metric_name: + description: A friendly name or description for the metrics for this WebACL. + returned: always + type: str + sample: test_waf_metric + rules: + description: An array that contains the action for each Rule in a WebACL , the priority of the Rule. + returned: always + type: complex + contains: + action: + description: The action to perform if the Rule matches. + returned: always + type: str + sample: BLOCK + metric_name: + description: A friendly name or description for the metrics for this Rule. + returned: always + type: str + sample: ipblockrule + name: + description: A friendly name or description of the Rule. + returned: always + type: str + sample: ip_block_rule + predicates: + description: The Predicates list contains a Predicate for each + ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet + object in a Rule. + returned: always + type: list + sample: + [ + { + "byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890", + "byte_match_tuples": [ + { + "field_to_match": { + "type": "QUERY_STRING" + }, + "positional_constraint": "STARTS_WITH", + "target_string": "bobbins", + "text_transformation": "NONE" + } + ], + "name": "bobbins", + "negated": false, + "type": "ByteMatch" + } + ] +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import list_web_acls, get_web_acl + + +def main(): + argument_spec = dict( + name=dict(required=False), + waf_regional=dict(type='bool', default=False) + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'aws_waf_facts': + module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", version='2.13') + + resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + client = module.client(resource) + web_acls = list_web_acls(client, module) + name = module.params['name'] + if name: + web_acls = [web_acl for web_acl in web_acls if + web_acl['Name'] == name] + if not web_acls: + module.fail_json(msg="WAF named %s not found" % name) + module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId']) + for web_acl in web_acls]) + + +if __name__ == '__main__': + main() diff --git a/aws_waf_rule.py b/aws_waf_rule.py new file mode 100644 index 00000000000..b32e26999ce --- /dev/null +++ b/aws_waf_rule.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# Copyright (c) 2017 Will Thames +# Copyright (c) 2015 Mike Mochan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: aws_waf_rule +short_description: Create and delete WAF Rules +description: + - Read the AWS documentation for WAF + U(https://aws.amazon.com/documentation/waf/). + +author: + - Mike Mochan (@mmochan) + - Will Thames (@willthames) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +options: + name: + description: Name of the Web Application Firewall rule. + required: yes + type: str + metric_name: + description: + - A friendly name or description for the metrics for the rule. + - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. + - You can't change I(metric_name) after you create the rule. + - Defaults to the same as I(name) with disallowed characters removed. + type: str + state: + description: Whether the rule should be present or absent. + choices: + - present + - absent + default: present + type: str + conditions: + description: > + List of conditions used in the rule. M(aws_waf_condition) can be used to + create new conditions. + type: list + elements: dict + suboptions: + type: + required: true + type: str + choices: ['byte','geo','ip','size','sql','xss'] + description: The type of rule to match. + negated: + required: true + type: bool + description: Whether the condition should be negated. + condition: + required: true + type: str + description: The name of the condition. The condition must already exist. + purge_conditions: + description: + - Whether or not to remove conditions that are not passed when updating `conditions`. + default: false + type: bool + waf_regional: + description: Whether to use waf-regional module. + default: false + required: false + type: bool +''' + +EXAMPLES = ''' + + - name: create WAF rule + aws_waf_rule: + name: my_waf_rule + conditions: + - name: my_regex_condition + type: regex + negated: no + - name: my_geo_condition + type: geo + negated: no + - name: my_byte_condition + type: byte + negated: yes + + - name: remove WAF rule + aws_waf_rule: + name: "my_waf_rule" + state: absent + +''' + +RETURN = ''' +rule: + description: WAF rule contents + returned: always + type: complex + contains: + metric_name: + description: Metric name for the rule. + returned: always + type: str + sample: ansibletest1234rule + name: + description: Friendly name for the rule. + returned: always + type: str + sample: ansible-test-1234_rule + predicates: + description: List of conditions used in the rule. + returned: always + type: complex + contains: + data_id: + description: ID of the condition. + returned: always + type: str + sample: 8251acdb-526c-42a8-92bc-d3d13e584166 + negated: + description: Whether the sense of the condition is negated. + returned: always + type: bool + sample: false + type: + description: type of the condition. + returned: always + type: str + sample: ByteMatch + rule_id: + description: ID of the WAF rule. + returned: always + type: str + sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261 +''' + +import re + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, list_regional_rules_with_backoff, MATCH_LOOKUP +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff + + +def get_rule_by_name(client, module, name): + rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name] + if rules: + return rules[0] + + +def get_rule(client, module, rule_id): + try: + return client.get_rule(RuleId=rule_id)['Rule'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not get WAF rule') + + +def list_rules(client, module): + if client.__class__.__name__ == 'WAF': + try: + return list_rules_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list WAF rules') + elif client.__class__.__name__ == 'WAFRegional': + try: + return list_regional_rules_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list WAF Regional rules') + + +def list_regional_rules(client, module): + try: + return list_regional_rules_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list WAF rules') + + +def find_and_update_rule(client, module, rule_id): + rule = get_rule(client, module, rule_id) + rule_id = rule['RuleId'] + + existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) + desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) + all_conditions = dict() + + for condition_type in MATCH_LOOKUP: + method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's' + all_conditions[condition_type] = dict() + try: + paginator = client.get_paginator(method) + func = paginator.paginate().build_full_result + except (KeyError, botocore.exceptions.OperationNotPageableError): + # list_geo_match_sets and list_regex_match_sets do not have a paginator + # and throw different exceptions + func = getattr(client, method) + try: + pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type) + for pred in pred_results: + pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id'] + all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred) + all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred) + + for condition in module.params['conditions']: + desired_conditions[condition['type']][condition['name']] = condition + + reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items()) + for condition in rule['Predicates']: + existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition) + + insertions = list() + deletions = list() + + for condition_type in desired_conditions: + for (condition_name, condition) in desired_conditions[condition_type].items(): + if condition_name not in all_conditions[condition_type]: + module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) + condition['data_id'] = all_conditions[condition_type][condition_name]['data_id'] + if condition['data_id'] not in existing_conditions[condition_type]: + insertions.append(format_for_insertion(condition)) + + if module.params['purge_conditions']: + for condition_type in existing_conditions: + deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values() + if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]]) + + changed = bool(insertions or deletions) + update = { + 'RuleId': rule_id, + 'Updates': insertions + deletions + } + if changed: + try: + run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not update rule conditions') + + return changed, get_rule(client, module, rule_id) + + +def format_for_insertion(condition): + return dict(Action='INSERT', + Predicate=dict(Negated=condition['negated'], + Type=MATCH_LOOKUP[condition['type']]['type'], + DataId=condition['data_id'])) + + +def format_for_deletion(condition): + return dict(Action='DELETE', + Predicate=dict(Negated=condition['negated'], + Type=condition['type'], + DataId=condition['data_id'])) + + +def remove_rule_conditions(client, module, rule_id): + conditions = get_rule(client, module, rule_id)['Predicates'] + updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions] + try: + run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not remove rule conditions') + + +def ensure_rule_present(client, module): + name = module.params['name'] + rule_id = get_rule_by_name(client, module, name) + params = dict() + if rule_id: + return find_and_update_rule(client, module, rule_id) + else: + params['Name'] = module.params['name'] + metric_name = module.params['metric_name'] + if not metric_name: + metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name']) + params['MetricName'] = metric_name + try: + new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not create rule') + return find_and_update_rule(client, module, new_rule['RuleId']) + + +def find_rule_in_web_acls(client, module, rule_id): + web_acls_in_use = [] + try: + if client.__class__.__name__ == 'WAF': + all_web_acls = list_web_acls_with_backoff(client) + elif client.__class__.__name__ == 'WAFRegional': + all_web_acls = list_regional_web_acls_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list Web ACLs') + for web_acl in all_web_acls: + try: + web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not get Web ACL details') + if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]: + web_acls_in_use.append(web_acl_details['Name']) + return web_acls_in_use + + +def ensure_rule_absent(client, module): + rule_id = get_rule_by_name(client, module, module.params['name']) + in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) + if in_use_web_acls: + web_acl_names = ', '.join(in_use_web_acls) + module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % + (module.params['name'], web_acl_names)) + if rule_id: + remove_rule_conditions(client, module, rule_id) + try: + return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not delete rule') + return False, {} + + +def main(): + argument_spec = dict( + name=dict(required=True), + metric_name=dict(), + state=dict(default='present', choices=['present', 'absent']), + conditions=dict(type='list'), + purge_conditions=dict(type='bool', default=False), + waf_regional=dict(type='bool', default=False), + ) + module = AnsibleAWSModule(argument_spec=argument_spec) + state = module.params.get('state') + + resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + client = module.client(resource) + if state == 'present': + (changed, results) = ensure_rule_present(client, module) + else: + (changed, results) = ensure_rule_absent(client, module) + + module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py new file mode 100644 index 00000000000..539bac2f8cb --- /dev/null +++ b/aws_waf_web_acl.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: aws_waf_web_acl +short_description: Create and delete WAF Web ACLs. +description: + - Read the AWS documentation for WAF + U(https://aws.amazon.com/documentation/waf/). + +author: + - Mike Mochan (@mmochan) + - Will Thames (@willthames) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +options: + name: + description: Name of the Web Application Firewall ACL to manage. + required: yes + type: str + default_action: + description: The action that you want AWS WAF to take when a request doesn't + match the criteria specified in any of the Rule objects that are associated with the WebACL. + choices: + - block + - allow + - count + type: str + state: + description: Whether the Web ACL should be present or absent. + choices: + - present + - absent + default: present + type: str + metric_name: + description: + - A friendly name or description for the metrics for this WebACL. + - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. + - You can't change I(metric_name) after you create the WebACL. + - Metric name will default to I(name) with disallowed characters stripped out. + type: str + rules: + description: + - A list of rules that the Web ACL will enforce. + type: list + elements: dict + suboptions: + name: + description: Name of the rule. + type: str + required: true + action: + description: The action to perform. + type: str + required: true + priority: + description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first. + type: int + required: true + type: + description: The type of rule. + choices: + - rate_based + - regular + type: str + purge_rules: + description: + - Whether to remove rules that aren't passed with I(rules). + default: False + type: bool + waf_regional: + description: Whether to use waf-regional module. + default: false + required: no + type: bool +''' + +EXAMPLES = ''' + - name: create web ACL + aws_waf_web_acl: + name: my_web_acl + rules: + - name: my_rule + priority: 1 + action: block + default_action: block + purge_rules: yes + state: present + + - name: delete the web acl + aws_waf_web_acl: + name: my_web_acl + state: absent +''' + +RETURN = ''' +web_acl: + description: contents of the Web ACL. + returned: always + type: complex + contains: + default_action: + description: Default action taken by the Web ACL if no rules match. + returned: always + type: dict + sample: + type: BLOCK + metric_name: + description: Metric name used as an identifier. + returned: always + type: str + sample: mywebacl + name: + description: Friendly name of the Web ACL. + returned: always + type: str + sample: my web acl + rules: + description: List of rules. + returned: always + type: complex + contains: + action: + description: Action taken by the WAF when the rule matches. + returned: always + type: complex + sample: + type: ALLOW + priority: + description: priority number of the rule (lower numbers are run first). + returned: always + type: int + sample: 2 + rule_id: + description: Rule ID. + returned: always + type: str + sample: a6fc7ab5-287b-479f-8004-7fd0399daf75 + type: + description: Type of rule (either REGULAR or RATE_BASED). + returned: always + type: str + sample: REGULAR + web_acl_id: + description: Unique identifier of Web ACL. + returned: always + type: str + sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c +''' + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +import re + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import list_rules_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff, \ + run_func_with_change_token_backoff, list_regional_rules_with_backoff + + +def get_web_acl_by_name(client, module, name): + acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name] + if acls: + return acls[0] + else: + return acls + + +def create_rule_lookup(client, module): + if client.__class__.__name__ == 'WAF': + try: + rules = list_rules_with_backoff(client) + return dict((rule['Name'], rule) for rule in rules) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list rules') + elif client.__class__.__name__ == 'WAFRegional': + try: + rules = list_regional_rules_with_backoff(client) + return dict((rule['Name'], rule) for rule in rules) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not list regional rules') + + +def get_web_acl(client, module, web_acl_id): + try: + return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id) + + +def list_web_acls(client, module,): + if client.__class__.__name__ == 'WAF': + try: + return list_web_acls_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not get Web ACLs') + elif client.__class__.__name__ == 'WAFRegional': + try: + return list_regional_web_acls_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not get Web ACLs') + + +def find_and_update_web_acl(client, module, web_acl_id): + acl = get_web_acl(client, module, web_acl_id) + rule_lookup = create_rule_lookup(client, module) + existing_rules = acl['Rules'] + desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'], + 'Priority': rule['priority'], + 'Action': {'Type': rule['action'].upper()}, + 'Type': rule.get('type', 'regular').upper()} + for rule in module.params['rules']] + missing = [rule for rule in desired_rules if rule not in existing_rules] + extras = [] + if module.params['purge_rules']: + extras = [rule for rule in existing_rules if rule not in desired_rules] + + insertions = [format_for_update(rule, 'INSERT') for rule in missing] + deletions = [format_for_update(rule, 'DELETE') for rule in extras] + changed = bool(insertions + deletions) + + # Purge rules before adding new ones in case a deletion shares the same + # priority as an insertion. + params = { + 'WebACLId': acl['WebACLId'], + 'DefaultAction': acl['DefaultAction'] + } + change_tokens = [] + if deletions: + try: + params['Updates'] = deletions + result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) + change_tokens.append(result['ChangeToken']) + get_waiter( + client, 'change_token_in_sync', + ).wait( + ChangeToken=result['ChangeToken'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not update Web ACL') + if insertions: + try: + params['Updates'] = insertions + result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) + change_tokens.append(result['ChangeToken']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not update Web ACL') + if change_tokens: + for token in change_tokens: + get_waiter( + client, 'change_token_in_sync', + ).wait( + ChangeToken=token + ) + if changed: + acl = get_web_acl(client, module, web_acl_id) + return changed, acl + + +def format_for_update(rule, action): + return dict( + Action=action, + ActivatedRule=dict( + Priority=rule['Priority'], + RuleId=rule['RuleId'], + Action=dict( + Type=rule['Action']['Type'] + ) + ) + ) + + +def remove_rules_from_web_acl(client, module, web_acl_id): + acl = get_web_acl(client, module, web_acl_id) + deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']] + try: + params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions} + run_func_with_change_token_backoff(client, module, params, client.update_web_acl) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not remove rule') + + +def ensure_web_acl_present(client, module): + changed = False + result = None + name = module.params['name'] + web_acl_id = get_web_acl_by_name(client, module, name) + if web_acl_id: + (changed, result) = find_and_update_web_acl(client, module, web_acl_id) + else: + metric_name = module.params['metric_name'] + if not metric_name: + metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name']) + default_action = module.params['default_action'].upper() + try: + params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}} + new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not create Web ACL') + (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId']) + return changed, result + + +def ensure_web_acl_absent(client, module): + web_acl_id = get_web_acl_by_name(client, module, module.params['name']) + if web_acl_id: + web_acl = get_web_acl(client, module, web_acl_id) + if web_acl['Rules']: + remove_rules_from_web_acl(client, module, web_acl_id) + try: + run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True) + return True, {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Could not delete Web ACL') + return False, {} + + +def main(): + argument_spec = dict( + name=dict(required=True), + default_action=dict(choices=['block', 'allow', 'count']), + metric_name=dict(), + state=dict(default='present', choices=['present', 'absent']), + rules=dict(type='list'), + purge_rules=dict(type='bool', default=False), + waf_regional=dict(type='bool', default=False) + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['default_action', 'rules']]]) + state = module.params.get('state') + + resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + client = module.client(resource) + if state == 'present': + (changed, results) = ensure_web_acl_present(client, module) + else: + (changed, results) = ensure_web_acl_absent(client, module) + + module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py new file mode 100644 index 00000000000..465608151aa --- /dev/null +++ b/cloudformation_exports_info.py @@ -0,0 +1,87 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: cloudformation_exports_info +short_description: Read a value from CloudFormation Exports +description: + - Module retrieves a value from CloudFormation Exports +requirements: ['boto3 >= 1.11.15'] +author: + - "Michael Moyle (@mmoyle)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Get Exports + cloudformation_exports_info: + profile: 'my_aws_profile' + region: 'my_region' + register: cf_exports +- debug: + msg: "{{ cf_exports }}" +''' + +RETURN = ''' +export_items: + description: A dictionary of Exports items names and values. + returned: Always + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry + +try: + from botocore.exceptions import ClientError + from botocore.exceptions import BotoCoreError +except ImportError: + pass # handled by AnsibleAWSModule + + +@AWSRetry.exponential_backoff() +def list_exports(cloudformation_client): + '''Get Exports Names and Values and return in dictionary ''' + list_exports_paginator = cloudformation_client.get_paginator('list_exports') + exports = list_exports_paginator.paginate().build_full_result()['Exports'] + export_items = dict() + + for item in exports: + export_items[item['Name']] = item['Value'] + + return export_items + + +def main(): + argument_spec = dict() + result = dict( + changed=False, + original_message='' + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) + cloudformation_client = module.client('cloudformation') + + try: + result['export_items'] = list_exports(cloudformation_client) + + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e) + + result.update() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py new file mode 100644 index 00000000000..5b6eb2198df --- /dev/null +++ b/cloudformation_stack_set.py @@ -0,0 +1,724 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudformation_stack_set +short_description: Manage groups of CloudFormation stacks +description: + - Launches/updates/deletes AWS CloudFormation Stack Sets. +notes: + - To make an individual stack, you want the M(cloudformation) module. +options: + name: + description: + - Name of the CloudFormation stack set. + required: true + type: str + description: + description: + - A description of what this stack set creates. + type: str + parameters: + description: + - A list of hashes of all the template variables for the stack. The value can be a string or a dict. + - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example). + default: {} + type: dict + state: + description: + - If I(state=present), stack will be created. If I(state=present) and if stack exists and template has changed, it will be updated. + If I(state=absent), stack will be removed. + default: present + choices: [ present, absent ] + type: str + template: + description: + - The local path of the CloudFormation template. + - This must be the full path to the file, relative to the working directory. If using roles this may look + like C(roles/cloudformation/files/cloudformation-example.json). + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + type: path + template_body: + description: + - Template body. Use this to pass in the actual body of the CloudFormation template. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + type: str + template_url: + description: + - Location of file containing the template body. + - The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region + as the stack. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + type: str + purge_stacks: + description: + - Only applicable when I(state=absent). Sets whether, when deleting a stack set, the stack instances should also be deleted. + - By default, instances will be deleted. To keep stacks when stack set is deleted set I(purge_stacks=false). + type: bool + default: true + wait: + description: + - Whether or not to wait for stack operation to complete. This includes waiting for stack instances to reach UPDATE_COMPLETE status. + - If you choose not to wait, this module will not notify when stack operations fail because it will not wait for them to finish. + type: bool + default: false + wait_timeout: + description: + - How long to wait (in seconds) for stacks to complete create/update/delete operations. + default: 900 + type: int + capabilities: + description: + - Capabilities allow stacks to create and modify IAM resources, which may include adding users or roles. + - Currently the only available values are 'CAPABILITY_IAM' and 'CAPABILITY_NAMED_IAM'. Either or both may be provided. + - > + The following resources require that one or both of these parameters is specified: AWS::IAM::AccessKey, + AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, AWS::IAM::UserToGroupAddition + type: list + elements: str + choices: + - 'CAPABILITY_IAM' + - 'CAPABILITY_NAMED_IAM' + regions: + description: + - A list of AWS regions to create instances of a stack in. The I(region) parameter chooses where the Stack Set is created, and I(regions) + specifies the region for stack instances. + - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will + have their stack instances updated. + type: list + elements: str + accounts: + description: + - A list of AWS accounts in which to create instance of CloudFormation stacks. + - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will + have their stack instances updated. + type: list + elements: str + administration_role_arn: + description: + - ARN of the administration role, meaning the role that CloudFormation Stack Sets use to assume the roles in your child accounts. + - This defaults to C(arn:aws:iam::{{ account ID }}:role/AWSCloudFormationStackSetAdministrationRole) where C({{ account ID }}) is replaced with the + account number of the current IAM role/user/STS credentials. + aliases: + - admin_role_arn + - admin_role + - administration_role + type: str + execution_role_name: + description: + - ARN of the execution role, meaning the role that CloudFormation Stack Sets assumes in your child accounts. + - This MUST NOT be an ARN, and the roles must exist in each child account specified. + - The default name for the execution role is C(AWSCloudFormationStackSetExecutionRole) + aliases: + - exec_role_name + - exec_role + - execution_role + type: str + tags: + description: + - Dictionary of tags to associate with stack and its resources during stack creation. + - Can be updated later, updating tags removes previous entries. + type: dict + failure_tolerance: + description: + - Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time. + type: dict + suboptions: + fail_count: + description: + - The number of accounts, per region, for which this operation can fail before CloudFormation + stops the operation in that region. + - You must specify one of I(fail_count) and I(fail_percentage). + type: int + fail_percentage: + type: int + description: + - The percentage of accounts, per region, for which this stack operation can fail before CloudFormation + stops the operation in that region. + - You must specify one of I(fail_count) and I(fail_percentage). + parallel_percentage: + type: int + description: + - The maximum percentage of accounts in which to perform this operation at one time. + - You must specify one of I(parallel_count) and I(parallel_percentage). + - Note that this setting lets you specify the maximum for operations. + For large deployments, under certain circumstances the actual percentage may be lower. + parallel_count: + type: int + description: + - The maximum number of accounts in which to perform this operation at one time. + - I(parallel_count) may be at most one more than the I(fail_count). + - You must specify one of I(parallel_count) and I(parallel_percentage). + - Note that this setting lets you specify the maximum for operations. + For large deployments, under certain circumstances the actual count may be lower. + +author: "Ryan Scott Brown (@ryansb)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ boto3>=1.6, botocore>=1.10.26 ] +''' + +EXAMPLES = ''' +- name: Create a stack set with instances in two accounts + cloudformation_stack_set: + name: my-stack + description: Test stack in two accounts + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + accounts: [1234567890, 2345678901] + regions: + - us-east-1 + +- name: on subsequent calls, templates are optional but parameters and tags can be altered + cloudformation_stack_set: + name: my-stack + state: present + parameters: + InstanceName: my_stacked_instance + tags: + foo: bar + test: stack + accounts: [1234567890, 2345678901] + regions: + - us-east-1 + +- name: The same type of update, but wait for the update to complete in all stacks + cloudformation_stack_set: + name: my-stack + state: present + wait: true + parameters: + InstanceName: my_restacked_instance + tags: + foo: bar + test: stack + accounts: [1234567890, 2345678901] + regions: + - us-east-1 +''' + +RETURN = ''' +operations_log: + type: list + description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. + returned: always + sample: + - action: CREATE + creation_timestamp: '2018-06-18T17:40:46.372000+00:00' + end_timestamp: '2018-06-18T17:41:24.560000+00:00' + operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8 + status: FAILED + stack_instances: + - account: '1234567890' + region: us-east-1 + stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 + status: OUTDATED + status_reason: Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service. + +operations: + description: All operations initiated by this run of the cloudformation_stack_set module + returned: always + type: list + sample: + - action: CREATE + administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole + creation_timestamp: '2018-06-18T17:40:46.372000+00:00' + end_timestamp: '2018-06-18T17:41:24.560000+00:00' + execution_role_name: AWSCloudFormationStackSetExecutionRole + operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8 + operation_preferences: + region_order: + - us-east-1 + - us-east-2 + stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 + status: FAILED +stack_instances: + description: CloudFormation stack instances that are members of this stack set. This will also include their region and account ID. + returned: state == present + type: list + sample: + - account: '1234567890' + region: us-east-1 + stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 + status: OUTDATED + status_reason: > + Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service. + - account: '1234567890' + region: us-east-2 + stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 + status: OUTDATED + status_reason: Cancelled since failure tolerance has exceeded +stack_set: + type: dict + description: Facts about the currently deployed stack set, its parameters, and its tags + returned: state == present + sample: + administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole + capabilities: [] + description: test stack PRIME + execution_role_name: AWSCloudFormationStackSetExecutionRole + parameters: [] + stack_set_arn: arn:aws:cloudformation:us-east-1:1234567890:stackset/TestStackPrime:19f3f684-aae9-467-ba36-e09f92cf5929 + stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 + stack_set_name: TestStackPrime + status: ACTIVE + tags: + Some: Thing + an: other + template_body: | + AWSTemplateFormatVersion: "2010-09-09" + Parameters: {} + Resources: + Bukkit: + Type: "AWS::S3::Bucket" + Properties: {} + other: + Type: "AWS::SNS::Topic" + Properties: {} + +''' # NOQA + +import time +import datetime +import uuid +import itertools + +try: + import boto3 + import botocore.exceptions + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + # handled by AnsibleAWSModule + pass + +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible.module_utils._text import to_native + + +def create_stack_set(module, stack_params, cfn): + try: + cfn.create_stack_set(aws_retry=True, **stack_params) + return await_stack_set_exists(cfn, stack_params['StackSetName']) + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName'))) + + +def update_stack_set(module, stack_params, cfn): + # if the state is present and the stack already exists, we try to update it. + # AWS will tell us if the stack template and parameters are the same and + # don't need to be updated. + try: + cfn.update_stack_set(**stack_params) + except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except + module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.") + except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except + module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check " + "the `accounts` and `regions` parameters.") + except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except + module.fail_json_aws( + err, msg="Another operation is already in progress on this stack set - please try again later. When making " + "multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors.") + except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except + module.fail_json_aws(err, msg="Could not update stack set.") + if module.params.get('wait'): + await_stack_set_operation( + module, cfn, operation_id=stack_params['OperationId'], + stack_set_name=stack_params['StackSetName'], + max_wait=module.params.get('wait_timeout'), + ) + + return True + + +def compare_stack_instances(cfn, stack_set_name, accounts, regions): + instance_list = cfn.list_stack_instances( + aws_retry=True, + StackSetName=stack_set_name, + )['Summaries'] + desired_stack_instances = set(itertools.product(accounts, regions)) + existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list) + # new stacks, existing stacks, unspecified stacks + return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances) + + +@AWSRetry.backoff(tries=3, delay=4) +def stack_set_facts(cfn, stack_set_name): + try: + ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet'] + ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) + return ss + except cfn.exceptions.from_code('StackSetNotFound'): + # Return None if the stack doesn't exist + return + + +def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait): + wait_start = datetime.datetime.now() + operation = None + for i in range(max_wait // 15): + try: + operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id) + if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'): + # Stack set has completed operation + break + except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + pass + except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except + pass + time.sleep(15) + + if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'): + await_stack_instance_completion( + module, cfn, + stack_set_name=stack_set_name, + # subtract however long we waited already + max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), + ) + elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'): + pass + else: + module.warn( + "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format( + operation_id, stack_set_name, max_wait + ) + ) + + +def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): + to_await = None + for i in range(max_wait // 15): + try: + stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name) + to_await = [inst for inst in stack_instances['Summaries'] + if inst['Status'] != 'CURRENT'] + if not to_await: + return stack_instances['Summaries'] + except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + # this means the deletion beat us, or the stack set is not yet propagated + pass + time.sleep(15) + + module.warn( + "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( + stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait + ) + ) + + +def await_stack_set_exists(cfn, stack_set_name): + # AWSRetry will retry on `StackSetNotFound` errors for us + ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet'] + ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) + return camel_dict_to_snake_dict(ss, ignore_list=('Tags',)) + + +def describe_stack_tree(module, stack_set_name, operation_ids=None): + jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound']) + cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) + result = dict() + result['stack_set'] = camel_dict_to_snake_dict( + cfn.describe_stack_set( + StackSetName=stack_set_name, + aws_retry=True, + )['StackSet'] + ) + result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags']) + result['operations_log'] = sorted( + camel_dict_to_snake_dict( + cfn.list_stack_set_operations( + StackSetName=stack_set_name, + aws_retry=True, + ) + )['summaries'], + key=lambda x: x['creation_timestamp'] + ) + result['stack_instances'] = sorted( + [ + camel_dict_to_snake_dict(i) for i in + cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries'] + ], + key=lambda i: i['region'] + i['account'] + ) + + if operation_ids: + result['operations'] = [] + for op_id in operation_ids: + try: + result['operations'].append(camel_dict_to_snake_dict( + cfn.describe_stack_set_operation( + StackSetName=stack_set_name, + OperationId=op_id, + )['StackSetOperation'] + )) + except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except + pass + return result + + +def get_operation_preferences(module): + params = dict() + if module.params.get('regions'): + params['RegionOrder'] = list(module.params['regions']) + for param, api_name in { + 'fail_count': 'FailureToleranceCount', + 'fail_percentage': 'FailureTolerancePercentage', + 'parallel_percentage': 'MaxConcurrentPercentage', + 'parallel_count': 'MaxConcurrentCount', + }.items(): + if module.params.get('failure_tolerance', {}).get(param): + params[api_name] = module.params.get('failure_tolerance', {}).get(param) + return params + + +def main(): + argument_spec = dict( + name=dict(required=True), + description=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=900), + state=dict(default='present', choices=['present', 'absent']), + purge_stacks=dict(type='bool', default=True), + parameters=dict(type='dict', default={}), + template=dict(type='path'), + template_url=dict(), + template_body=dict(), + capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), + regions=dict(type='list'), + accounts=dict(type='list'), + failure_tolerance=dict( + type='dict', + default={}, + options=dict( + fail_count=dict(type='int'), + fail_percentage=dict(type='int'), + parallel_percentage=dict(type='int'), + parallel_count=dict(type='int'), + ), + mutually_exclusive=[ + ['fail_count', 'fail_percentage'], + ['parallel_count', 'parallel_percentage'], + ], + ), + administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']), + execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']), + tags=dict(type='dict'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['template_url', 'template', 'template_body']], + supports_check_mode=True + ) + if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')): + module.fail_json(msg="Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26") + + # Wrap the cloudformation client methods that this module uses with + # automatic backoff / retry for throttling error codes + jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound']) + cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) + existing_stack_set = stack_set_facts(cfn, module.params['name']) + + operation_uuid = to_native(uuid.uuid4()) + operation_ids = [] + # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. + stack_params = {} + state = module.params['state'] + if state == 'present' and not module.params['accounts']: + module.fail_json( + msg="Can't create a stack set without choosing at least one account. " + "To get the ID of the current account, use the aws_caller_info module." + ) + + module.params['accounts'] = [to_native(a) for a in module.params['accounts']] + + stack_params['StackSetName'] = module.params['name'] + if module.params.get('description'): + stack_params['Description'] = module.params['description'] + + if module.params.get('capabilities'): + stack_params['Capabilities'] = module.params['capabilities'] + + if module.params['template'] is not None: + with open(module.params['template'], 'r') as tpl: + stack_params['TemplateBody'] = tpl.read() + elif module.params['template_body'] is not None: + stack_params['TemplateBody'] = module.params['template_body'] + elif module.params['template_url'] is not None: + stack_params['TemplateURL'] = module.params['template_url'] + else: + # no template is provided, but if the stack set exists already, we can use the existing one. + if existing_stack_set: + stack_params['UsePreviousTemplate'] = True + else: + module.fail_json( + msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " + "`template_body`, or `template_url`".format(module.params['name']) + ) + + stack_params['Parameters'] = [] + for k, v in module.params['parameters'].items(): + if isinstance(v, dict): + # set parameter based on a dict to allow additional CFN Parameter Attributes + param = dict(ParameterKey=k) + + if 'value' in v: + param['ParameterValue'] = to_native(v['value']) + + if 'use_previous_value' in v and bool(v['use_previous_value']): + param['UsePreviousValue'] = True + param.pop('ParameterValue', None) + + stack_params['Parameters'].append(param) + else: + # allow default k/v configuration to set a template parameter + stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + + if module.params.get('tags') and isinstance(module.params.get('tags'), dict): + stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + + if module.params.get('administration_role_arn'): + # TODO loosen the semantics here to autodetect the account ID and build the ARN + stack_params['AdministrationRoleARN'] = module.params['administration_role_arn'] + if module.params.get('execution_role_name'): + stack_params['ExecutionRoleName'] = module.params['execution_role_name'] + + result = {} + + if module.check_mode: + if state == 'absent' and existing_stack_set: + module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) + elif state == 'absent' and not existing_stack_set: + module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) + elif state == 'present' and not existing_stack_set: + module.exit_json(changed=True, msg='New stack set would be created', meta=[]) + elif state == 'present' and existing_stack_set: + new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( + cfn, + module.params['name'], + module.params['accounts'], + module.params['regions'], + ) + if new_stacks: + module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) + elif unspecified_stacks and module.params.get('purge_stack_instances'): + module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) + else: + # TODO: need to check the template and other settings for correct check mode + module.exit_json(changed=False, msg='No changes detected', meta=[]) + + changed = False + if state == 'present': + if not existing_stack_set: + # on create this parameter has a different name, and cannot be referenced later in the job log + stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid) + changed = True + create_stack_set(module, stack_params, cfn) + else: + stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid) + operation_ids.append(stack_params['OperationId']) + if module.params.get('regions'): + stack_params['OperationPreferences'] = get_operation_preferences(module) + changed |= update_stack_set(module, stack_params, cfn) + + # now create/update any appropriate stack instances + new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( + cfn, + module.params['name'], + module.params['accounts'], + module.params['regions'], + ) + if new_stack_instances: + operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid)) + changed = True + cfn.create_stack_instances( + StackSetName=module.params['name'], + Accounts=list(set(acct for acct, region in new_stack_instances)), + Regions=list(set(region for acct, region in new_stack_instances)), + OperationPreferences=get_operation_preferences(module), + OperationId=operation_ids[-1], + ) + else: + operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid)) + cfn.update_stack_instances( + StackSetName=module.params['name'], + Accounts=list(set(acct for acct, region in existing_stack_instances)), + Regions=list(set(region for acct, region in existing_stack_instances)), + OperationPreferences=get_operation_preferences(module), + OperationId=operation_ids[-1], + ) + for op in operation_ids: + await_stack_set_operation( + module, cfn, operation_id=op, + stack_set_name=module.params['name'], + max_wait=module.params.get('wait_timeout'), + ) + + elif state == 'absent': + if not existing_stack_set: + module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name'])) + if module.params.get('purge_stack_instances') is False: + pass + try: + cfn.delete_stack_set( + StackSetName=module.params['name'], + ) + module.exit_json(msg='Stack set {0} deleted'.format(module.params['name'])) + except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name'])) + except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except + delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid) + cfn.delete_stack_instances( + StackSetName=module.params['name'], + Accounts=module.params['accounts'], + Regions=module.params['regions'], + RetainStacks=(not module.params.get('purge_stacks')), + OperationId=delete_instances_op + ) + await_stack_set_operation( + module, cfn, operation_id=delete_instances_op, + stack_set_name=stack_params['StackSetName'], + max_wait=module.params.get('wait_timeout'), + ) + try: + cfn.delete_stack_set( + StackSetName=module.params['name'], + ) + except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except + # this time, it is likely that either the delete failed or there are more stacks. + instances = cfn.list_stack_instances( + StackSetName=module.params['name'], + ) + stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries']) + module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) + module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name'])) + + result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids)) + if any(o['status'] == 'FAILED' for o in result['operations']): + module.fail_json(msg="One or more operations failed to execute", **result) + module.exit_json(changed=changed, **result) + + +if __name__ == '__main__': + main() diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py new file mode 100644 index 00000000000..e4dce8cd27a --- /dev/null +++ b/cloudfront_distribution.py @@ -0,0 +1,2264 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: cloudfront_distribution + +short_description: Create, update and delete AWS CloudFront distributions. + +description: + - Allows for easy creation, updating and deletion of CloudFront distributions. + +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 + + +author: + - Willem van Ketwich (@wilvk) + - Will Thames (@willthames) + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +options: + + state: + description: + - The desired state of the distribution. + - I(state=present) creates a new distribution or updates an existing distribution. + - I(state=absent) deletes an existing distribution. + choices: ['present', 'absent'] + default: 'present' + type: str + + distribution_id: + description: + - The ID of the CloudFront distribution. + - This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag). + type: str + + e_tag: + description: + - A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id). + - Is determined automatically if not specified. + type: str + + caller_reference: + description: + - A unique identifier for creating and updating CloudFront distributions. + - Each caller reference must be unique across all distributions. e.g. a caller reference used in a web + distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id) + to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format + C(YYYY-MM-DDTHH:MM:SS.ffffff). + type: str + + tags: + description: + - Should be input as a dict of key-value pairs. + - Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1' + type: dict + + purge_tags: + description: + - Specifies whether existing tags will be removed before adding new tags. + - When I(purge_tags=yes), existing tags are removed and I(tags) are added, if specified. + If no tags are specified, it removes all existing tags for the distribution. + - When I(purge_tags=no), existing tags are kept and I(tags) are added, if specified. + default: false + type: bool + + alias: + description: + - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only + be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as + the I(e_tag), or I(caller_reference) of an existing distribution. + type: str + + aliases: + description: + - A list) of domain name aliases (CNAMEs) as strings to be used for the distribution. + - Each alias must be unique across all distribution for the AWS account. + type: list + elements: str + + purge_aliases: + description: + - Specifies whether existing aliases will be removed before adding new aliases. + - When I(purge_aliases=yes), existing aliases are removed and I(aliases) are added. + default: false + type: bool + + default_root_object: + description: + - A config element that specifies the path to request when the user requests the origin. + - e.g. if specified as 'index.html', this maps to www.example.com/index.html when www.example.com is called by the user. + - This prevents the entire distribution origin from being exposed at the root. + type: str + + default_origin_domain_name: + description: + - The domain name to use for an origin if no I(origins) have been specified. + - Should only be used on a first run of generating a distribution and not on + subsequent runs. + - Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias). + type: str + + default_origin_path: + description: + - The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified. + type: str + + origins: + type: list + elements: dict + description: + - A config element that is a list of complex origin objects to be specified for the distribution. Used for creating and updating distributions. + suboptions: + id: + description: A unique identifier for the origin or origin group. I(id) must be unique within the distribution. + type: str + domain_name: + description: + - The domain name which CloudFront will query as the origin. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName) + type: str + origin_path: + description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. + type: str + custom_headers: + description: + - Custom headers you wish to add to the request before passing it to the origin. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html) + type: list + elements: dict + suboptions: + header_name: + description: The name of a header that you want CloudFront to forward to your origin. + type: str + header_value: + description: The value for the header that you specified in the I(header_name) field. + type: str + s3_origin_access_identity_enabled: + description: + - Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. + - Will automatically create an Identity for you. + - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html). + type: bool + custom_origin_config: + description: Connection information about the origin. + type: dict + suboptions: + http_port: + description: The HTTP port the custom origin listens on. + type: int + https_port: + description: The HTTPS port the custom origin listens on. + type: int + origin_protocol_policy: + description: The origin protocol policy to apply to your origin. + type: str + origin_ssl_protocols: + description: A list of SSL/TLS protocols that you want CloudFront to use when communicating to the origin over HTTPS. + type: list + elements: str + origin_read_timeout: + description: A timeout (in seconds) when reading from your origin. + type: int + origin_keepalive_timeout: + description: A keep-alive timeout (in seconds). + type: int + + purge_origins: + description: Whether to remove any origins that aren't listed in I(origins). + default: false + type: bool + + default_cache_behavior: + type: dict + description: + - A dict specifying the default cache behavior of the distribution. + - If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid + I(cache_behavior) in I(cache_behaviors) with defaults. + suboptions: + target_origin_id: + description: + - The ID of the origin that you want CloudFront to route requests to + by default. + type: str + forwarded_values: + description: + - A dict that specifies how CloudFront handles query strings and cookies. + type: dict + suboptions: + query_string: + description: + - Indicates whether you want CloudFront to forward query strings + to the origin that is associated with this cache behavior. + type: bool + cookies: + description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. + type: dict + suboptions: + forward: + description: + - Specifies which cookies to forward to the origin for this cache behavior. + - Valid values are C(all), C(none), or C(whitelist). + type: str + whitelisted_names: + type: list + elements: str + description: A list of coockies to forward to the origin for this cache behavior. + headers: + description: + - A list of headers to forward to the origin for this cache behavior. + - To forward all headers use a list containing a single element '*' (C(['*'])) + type: list + elements: str + query_string_cache_keys: + description: + - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior. + type: list + elements: str + trusted_signers: + description: + - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content. + type: dict + suboptions: + enabled: + description: Whether you want to require viewers to use signed URLs to access the files specified by I(target_origin_id) + type: bool + items: + description: A list of trusted signers for this cache behavior. + elements: str + type: list + viewer_protocol_policy: + description: + - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id). + - Valid values are C(allow-all), C(redirect-to-https) and C(https-only). + type: str + default_ttl: + description: The default amount of time that you want objects to stay in CloudFront caches. + type: int + max_ttl: + description: The maximum amount of time that you want objects to stay in CloudFront caches. + type: int + min_ttl: + description: The minimum amount of time that you want objects to stay in CloudFront caches. + type: int + allowed_methods: + description: A dict that controls which HTTP methods CloudFront processes and forwards. + type: dict + suboptions: + items: + description: A list of HTTP methods that you want CloudFront to process and forward. + type: list + elements: str + cached_methods: + description: + - A list of HTTP methods that you want CloudFront to apply caching to. + - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]). + type: list + elements: str + smooth_streaming: + description: + - Whether you want to distribute media files in the Microsoft Smooth Streaming format. + type: bool + compress: + description: + - Whether you want CloudFront to automatically compress files. + type: bool + lambda_function_associations: + description: + - A list of Lambda function associations to use for this cache behavior. + type: list + elements: dict + suboptions: + lambda_function_arn: + description: The ARN of the Lambda function. + type: str + event_type: + description: + - Specifies the event type that triggers a Lambda function invocation. + - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response). + type: str + field_level_encryption_id: + description: + - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data. + type: str + + cache_behaviors: + type: list + elements: dict + description: + - A list of dictionaries describing the cache behaviors for the distribution. + - The order of the list is preserved across runs unless I(purge_cache_behaviors) is enabled. + suboptions: + path_pattern: + description: + - The pattern that specifies which requests to apply the behavior to. + type: str + target_origin_id: + description: + - The ID of the origin that you want CloudFront to route requests to + by default. + type: str + forwarded_values: + description: + - A dict that specifies how CloudFront handles query strings and cookies. + type: dict + suboptions: + query_string: + description: + - Indicates whether you want CloudFront to forward query strings + to the origin that is associated with this cache behavior. + type: bool + cookies: + description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. + type: dict + suboptions: + forward: + description: + - Specifies which cookies to forward to the origin for this cache behavior. + - Valid values are C(all), C(none), or C(whitelist). + type: str + whitelisted_names: + type: list + elements: str + description: A list of coockies to forward to the origin for this cache behavior. + headers: + description: + - A list of headers to forward to the origin for this cache behavior. + - To forward all headers use a list containing a single element '*' (C(['*'])) + type: list + elements: str + query_string_cache_keys: + description: + - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior. + type: list + elements: str + trusted_signers: + description: + - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content. + type: dict + suboptions: + enabled: + description: Whether you want to require viewers to use signed URLs to access the files specified by I(path_pattern) and I(target_origin_id) + type: bool + items: + description: A list of trusted signers for this cache behavior. + elements: str + type: list + viewer_protocol_policy: + description: + - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id) when a request matches I(path_pattern). + - Valid values are C(allow-all), C(redirect-to-https) and C(https-only). + type: str + default_ttl: + description: The default amount of time that you want objects to stay in CloudFront caches. + type: int + max_ttl: + description: The maximum amount of time that you want objects to stay in CloudFront caches. + type: int + min_ttl: + description: The minimum amount of time that you want objects to stay in CloudFront caches. + type: int + allowed_methods: + description: A dict that controls which HTTP methods CloudFront processes and forwards. + type: dict + suboptions: + items: + description: A list of HTTP methods that you want CloudFront to process and forward. + type: list + elements: str + cached_methods: + description: + - A list of HTTP methods that you want CloudFront to apply caching to. + - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]). + type: list + elements: str + smooth_streaming: + description: + - Whether you want to distribute media files in the Microsoft Smooth Streaming format. + type: bool + compress: + description: + - Whether you want CloudFront to automatically compress files. + type: bool + lambda_function_associations: + description: + - A list of Lambda function associations to use for this cache behavior. + type: list + elements: dict + suboptions: + lambda_function_arn: + description: The ARN of the Lambda function. + type: str + event_type: + description: + - Specifies the event type that triggers a Lambda function invocation. + - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response). + type: str + field_level_encryption_id: + description: + - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data. + type: str + + + purge_cache_behaviors: + description: + - Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). + - This switch also allows the reordering of I(cache_behaviors). + default: false + type: bool + + custom_error_responses: + type: list + elements: dict + description: + - A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. + - This attribute configures custom http error messages returned to the user. + suboptions: + error_code: + type: int + description: The error code the custom error page is for. + error_caching_min_ttl: + type: int + description: The length of time (in seconds) that CloudFront will cache status codes for. + response_code: + type: int + description: + - The HTTP status code that CloudFront should return to a user when the origin returns the HTTP status code specified by I(error_code). + response_page_path: + type: str + description: + - The path to the custom error page that you want CloudFront to return to a viewer when your origin returns + the HTTP status code specified by I(error_code). + + purge_custom_error_responses: + description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses). + default: false + type: bool + + comment: + description: + - A comment that describes the CloudFront distribution. + - If not specified, it defaults to a generic message that it has been created with Ansible, and a datetime stamp. + type: str + + logging: + description: + - A config element that is a complex object that defines logging for the distribution. + suboptions: + enabled: + description: When I(enabled=true) CloudFront will log access to an S3 bucket. + type: bool + include_cookies: + description: When I(include_cookies=true) CloudFront will include cookies in the logs. + type: bool + bucket: + description: The S3 bucket to store the log in. + type: str + prefix: + description: A prefix to include in the S3 object names. + type: str + type: dict + + price_class: + description: + - A string that specifies the pricing class of the distribution. As per + U(https://aws.amazon.com/cloudfront/pricing/) + - I(price_class=PriceClass_100) consists of the areas United States, Canada and Europe. + - I(price_class=PriceClass_200) consists of the areas United States, Canada, Europe, Japan, India, + Hong Kong, Philippines, S. Korea, Singapore & Taiwan. + - I(price_class=PriceClass_All) consists of the areas United States, Canada, Europe, Japan, India, + South America, Australia, Hong Kong, Philippines, S. Korea, Singapore & Taiwan. + - AWS defaults this to C(PriceClass_All). + - Valid values are C(PriceClass_100), C(PriceClass_200) and C(PriceClass_All) + type: str + + enabled: + description: + - A boolean value that specifies whether the distribution is enabled or disabled. + default: false + type: bool + + viewer_certificate: + type: dict + description: + - A dict that specifies the encryption details of the distribution. + suboptions: + cloudfront_default_certificate: + type: bool + description: + - If you're using the CloudFront domain name for your distribution, such as C(123456789abcde.cloudfront.net) + you should set I(cloudfront_default_certificate=true) + - If I(cloudfront_default_certificate=true) do not set I(ssl_support_method). + iam_certificate_id: + type: str + description: + - The ID of a certificate stored in IAM to use for HTTPS connections. + - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method) + acm_certificate_arn: + type: str + description: + - The ID of a certificate stored in ACM to use for HTTPS connections. + - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method) + ssl_support_method: + type: str + description: + - How CloudFront should serve SSL certificates. + - Valid values are C(sni-only) for SNI, and C(vip) if CloudFront is configured to use a dedicated IP for your content. + minimum_protocol_version: + type: str + description: + - The security policy that you want CloudFront to use for HTTPS connections. + - See U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) + for supported security policies. + + restrictions: + type: dict + description: + - A config element that is a complex object that describes how a distribution should restrict it's content. + suboptions: + geo_restriction: + description: Apply a restriciton based on the location of the requester. + type: dict + suboptions: + restriction_type: + type: str + description: + - The method that you want to use to restrict distribution of your content by country. + - Valid values are C(none), C(whitelist), C(blacklist) + items: + description: + - A list of ISO 3166-1 two letter (Alpha 2) country codes that the + restriction should apply to. + - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/)' + type: list + + web_acl_id: + description: + - The ID of a Web Application Firewall (WAF) Access Control List (ACL). + type: str + + http_version: + description: + - The version of the http protocol to use for the distribution. + - AWS defaults this to C(http2). + - Valid values are C(http1.1) and C(http2) + type: str + + ipv6_enabled: + description: + - Determines whether IPv6 support is enabled or not. + type: bool + default: false + + wait: + description: + - Specifies whether the module waits until the distribution has completed processing the creation or update. + type: bool + default: false + + wait_timeout: + description: + - Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. + default: 1800 + type: int + +''' + +EXAMPLES = ''' + +# create a basic distribution with defaults and tags + +- cloudfront_distribution: + state: present + default_origin_domain_name: www.my-cloudfront-origin.com + tags: + Name: example distribution + Project: example project + Priority: '1' + +# update a distribution comment by distribution_id + +- cloudfront_distribution: + state: present + distribution_id: E1RP5A2MJ8073O + comment: modified by ansible cloudfront.py + +# update a distribution comment by caller_reference + +- cloudfront_distribution: + state: present + caller_reference: my cloudfront distribution 001 + comment: modified by ansible cloudfront.py + +# update a distribution's aliases and comment using the distribution_id as a reference + +- cloudfront_distribution: + state: present + distribution_id: E1RP5A2MJ8073O + comment: modified by cloudfront.py again + aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ] + +# update a distribution's aliases and comment using an alias as a reference + +- cloudfront_distribution: + state: present + caller_reference: my test distribution + comment: modified by cloudfront.py again + aliases: + - www.my-distribution-source.com + - zzz.aaa.io + +# update a distribution's comment and aliases and tags and remove existing tags + +- cloudfront_distribution: + state: present + distribution_id: E15BU8SDCGSG57 + comment: modified by cloudfront.py again + aliases: + - tested.com + tags: + Project: distribution 1.2 + purge_tags: yes + +# create a distribution with an origin, logging and default cache behavior + +- cloudfront_distribution: + state: present + caller_reference: unique test distribution ID + origins: + - id: 'my test origin-000111' + domain_name: www.example.com + origin_path: /production + custom_headers: + - header_name: MyCustomHeaderName + header_value: MyCustomHeaderValue + default_cache_behavior: + target_origin_id: 'my test origin-000111' + forwarded_values: + query_string: true + cookies: + forward: all + headers: + - '*' + viewer_protocol_policy: allow-all + smooth_streaming: true + compress: true + allowed_methods: + items: + - GET + - HEAD + cached_methods: + - GET + - HEAD + logging: + enabled: true + include_cookies: false + bucket: mylogbucket.s3.amazonaws.com + prefix: myprefix/ + enabled: false + comment: this is a CloudFront distribution with logging + +# delete a distribution + +- cloudfront_distribution: + state: absent + caller_reference: replaceable distribution +''' + +RETURN = ''' +active_trusted_signers: + description: Key pair IDs that CloudFront is aware of for each trusted signer. + returned: always + type: complex + contains: + enabled: + description: Whether trusted signers are in use. + returned: always + type: bool + sample: false + quantity: + description: Number of trusted signers. + returned: always + type: int + sample: 1 + items: + description: Number of trusted signers. + returned: when there are trusted signers + type: list + sample: + - key_pair_id +aliases: + description: Aliases that refer to the distribution. + returned: always + type: complex + contains: + items: + description: List of aliases. + returned: always + type: list + sample: + - test.example.com + quantity: + description: Number of aliases. + returned: always + type: int + sample: 1 +arn: + description: Amazon Resource Name of the distribution. + returned: always + type: str + sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI +cache_behaviors: + description: CloudFront cache behaviors. + returned: always + type: complex + contains: + items: + description: List of cache behaviors. + returned: always + type: complex + contains: + allowed_methods: + description: Methods allowed by the cache behavior. + returned: always + type: complex + contains: + cached_methods: + description: Methods cached by the cache behavior. + returned: always + type: complex + contains: + items: + description: List of cached methods. + returned: always + type: list + sample: + - HEAD + - GET + quantity: + description: Count of cached methods. + returned: always + type: int + sample: 2 + items: + description: List of methods allowed by the cache behavior. + returned: always + type: list + sample: + - HEAD + - GET + quantity: + description: Count of methods allowed by the cache behavior. + returned: always + type: int + sample: 2 + compress: + description: Whether compression is turned on for the cache behavior. + returned: always + type: bool + sample: false + default_ttl: + description: Default Time to Live of the cache behavior. + returned: always + type: int + sample: 86400 + forwarded_values: + description: Values forwarded to the origin for this cache behavior. + returned: always + type: complex + contains: + cookies: + description: Cookies to forward to the origin. + returned: always + type: complex + contains: + forward: + description: Which cookies to forward to the origin for this cache behavior. + returned: always + type: str + sample: none + whitelisted_names: + description: The names of the cookies to forward to the origin for this cache behavior. + returned: when I(forward=whitelist) + type: complex + contains: + quantity: + description: Count of cookies to forward. + returned: always + type: int + sample: 1 + items: + description: List of cookies to forward. + returned: when list is not empty + type: list + sample: my_cookie + headers: + description: Which headers are used to vary on cache retrievals. + returned: always + type: complex + contains: + quantity: + description: Count of headers to vary on. + returned: always + type: int + sample: 1 + items: + description: List of headers to vary on. + returned: when list is not empty + type: list + sample: + - Host + query_string: + description: Whether the query string is used in cache lookups. + returned: always + type: bool + sample: false + query_string_cache_keys: + description: Which query string keys to use in cache lookups. + returned: always + type: complex + contains: + quantity: + description: Count of query string cache keys to use in cache lookups. + returned: always + type: int + sample: 1 + items: + description: List of query string cache keys to use in cache lookups. + returned: when list is not empty + type: list + sample: + lambda_function_associations: + description: Lambda function associations for a cache behavior. + returned: always + type: complex + contains: + quantity: + description: Count of lambda function associations. + returned: always + type: int + sample: 1 + items: + description: List of lambda function associations. + returned: when list is not empty + type: list + sample: + - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function + event_type: viewer-response + max_ttl: + description: Maximum Time to Live. + returned: always + type: int + sample: 31536000 + min_ttl: + description: Minimum Time to Live. + returned: always + type: int + sample: 0 + path_pattern: + description: Path pattern that determines this cache behavior. + returned: always + type: str + sample: /path/to/files/* + smooth_streaming: + description: Whether smooth streaming is enabled. + returned: always + type: bool + sample: false + target_origin_id: + description: ID of origin reference by this cache behavior. + returned: always + type: str + sample: origin_abcd + trusted_signers: + description: Trusted signers. + returned: always + type: complex + contains: + enabled: + description: Whether trusted signers are enabled for this cache behavior. + returned: always + type: bool + sample: false + quantity: + description: Count of trusted signers. + returned: always + type: int + sample: 1 + viewer_protocol_policy: + description: Policy of how to handle http/https. + returned: always + type: str + sample: redirect-to-https + quantity: + description: Count of cache behaviors. + returned: always + type: int + sample: 1 + +caller_reference: + description: Idempotency reference given when creating CloudFront distribution. + returned: always + type: str + sample: '1484796016700' +comment: + description: Any comments you want to include about the distribution. + returned: always + type: str + sample: 'my first CloudFront distribution' +custom_error_responses: + description: Custom error responses to use for error handling. + returned: always + type: complex + contains: + items: + description: List of custom error responses. + returned: always + type: complex + contains: + error_caching_min_ttl: + description: Minimum time to cache this error response. + returned: always + type: int + sample: 300 + error_code: + description: Origin response code that triggers this error response. + returned: always + type: int + sample: 500 + response_code: + description: Response code to return to the requester. + returned: always + type: str + sample: '500' + response_page_path: + description: Path that contains the error page to display. + returned: always + type: str + sample: /errors/5xx.html + quantity: + description: Count of custom error response items + returned: always + type: int + sample: 1 +default_cache_behavior: + description: Default cache behavior. + returned: always + type: complex + contains: + allowed_methods: + description: Methods allowed by the cache behavior. + returned: always + type: complex + contains: + cached_methods: + description: Methods cached by the cache behavior. + returned: always + type: complex + contains: + items: + description: List of cached methods. + returned: always + type: list + sample: + - HEAD + - GET + quantity: + description: Count of cached methods. + returned: always + type: int + sample: 2 + items: + description: List of methods allowed by the cache behavior. + returned: always + type: list + sample: + - HEAD + - GET + quantity: + description: Count of methods allowed by the cache behavior. + returned: always + type: int + sample: 2 + compress: + description: Whether compression is turned on for the cache behavior. + returned: always + type: bool + sample: false + default_ttl: + description: Default Time to Live of the cache behavior. + returned: always + type: int + sample: 86400 + forwarded_values: + description: Values forwarded to the origin for this cache behavior. + returned: always + type: complex + contains: + cookies: + description: Cookies to forward to the origin. + returned: always + type: complex + contains: + forward: + description: Which cookies to forward to the origin for this cache behavior. + returned: always + type: str + sample: none + whitelisted_names: + description: The names of the cookies to forward to the origin for this cache behavior. + returned: when I(forward=whitelist) + type: complex + contains: + quantity: + description: Count of cookies to forward. + returned: always + type: int + sample: 1 + items: + description: List of cookies to forward. + returned: when list is not empty + type: list + sample: my_cookie + headers: + description: Which headers are used to vary on cache retrievals. + returned: always + type: complex + contains: + quantity: + description: Count of headers to vary on. + returned: always + type: int + sample: 1 + items: + description: List of headers to vary on. + returned: when list is not empty + type: list + sample: + - Host + query_string: + description: Whether the query string is used in cache lookups. + returned: always + type: bool + sample: false + query_string_cache_keys: + description: Which query string keys to use in cache lookups. + returned: always + type: complex + contains: + quantity: + description: Count of query string cache keys to use in cache lookups. + returned: always + type: int + sample: 1 + items: + description: List of query string cache keys to use in cache lookups. + returned: when list is not empty + type: list + sample: + lambda_function_associations: + description: Lambda function associations for a cache behavior. + returned: always + type: complex + contains: + quantity: + description: Count of lambda function associations. + returned: always + type: int + sample: 1 + items: + description: List of lambda function associations. + returned: when list is not empty + type: list + sample: + - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function + event_type: viewer-response + max_ttl: + description: Maximum Time to Live. + returned: always + type: int + sample: 31536000 + min_ttl: + description: Minimum Time to Live. + returned: always + type: int + sample: 0 + path_pattern: + description: Path pattern that determines this cache behavior. + returned: always + type: str + sample: /path/to/files/* + smooth_streaming: + description: Whether smooth streaming is enabled. + returned: always + type: bool + sample: false + target_origin_id: + description: ID of origin reference by this cache behavior. + returned: always + type: str + sample: origin_abcd + trusted_signers: + description: Trusted signers. + returned: always + type: complex + contains: + enabled: + description: Whether trusted signers are enabled for this cache behavior. + returned: always + type: bool + sample: false + quantity: + description: Count of trusted signers. + returned: always + type: int + sample: 1 + viewer_protocol_policy: + description: Policy of how to handle http/https. + returned: always + type: str + sample: redirect-to-https +default_root_object: + description: The object that you want CloudFront to request from your origin (for example, index.html) + when a viewer requests the root URL for your distribution. + returned: always + type: str + sample: '' +diff: + description: Difference between previous configuration and new configuration. + returned: always + type: dict + sample: {} +domain_name: + description: Domain name of CloudFront distribution. + returned: always + type: str + sample: d1vz8pzgurxosf.cloudfront.net +enabled: + description: Whether the CloudFront distribution is enabled or not. + returned: always + type: bool + sample: true +http_version: + description: Version of HTTP supported by the distribution. + returned: always + type: str + sample: http2 +id: + description: CloudFront distribution ID. + returned: always + type: str + sample: E123456ABCDEFG +in_progress_invalidation_batches: + description: The number of invalidation batches currently in progress. + returned: always + type: int + sample: 0 +is_ipv6_enabled: + description: Whether IPv6 is enabled. + returned: always + type: bool + sample: true +last_modified_time: + description: Date and time distribution was last modified. + returned: always + type: str + sample: '2017-10-13T01:51:12.656000+00:00' +logging: + description: Logging information. + returned: always + type: complex + contains: + bucket: + description: S3 bucket logging destination. + returned: always + type: str + sample: logs-example-com.s3.amazonaws.com + enabled: + description: Whether logging is enabled. + returned: always + type: bool + sample: true + include_cookies: + description: Whether to log cookies. + returned: always + type: bool + sample: false + prefix: + description: Prefix added to logging object names. + returned: always + type: str + sample: cloudfront/test +origins: + description: Origins in the CloudFront distribution. + returned: always + type: complex + contains: + items: + description: List of origins. + returned: always + type: complex + contains: + custom_headers: + description: Custom headers passed to the origin. + returned: always + type: complex + contains: + quantity: + description: Count of headers. + returned: always + type: int + sample: 1 + custom_origin_config: + description: Configuration of the origin. + returned: always + type: complex + contains: + http_port: + description: Port on which HTTP is listening. + returned: always + type: int + sample: 80 + https_port: + description: Port on which HTTPS is listening. + returned: always + type: int + sample: 443 + origin_keepalive_timeout: + description: Keep-alive timeout. + returned: always + type: int + sample: 5 + origin_protocol_policy: + description: Policy of which protocols are supported. + returned: always + type: str + sample: https-only + origin_read_timeout: + description: Timeout for reads to the origin. + returned: always + type: int + sample: 30 + origin_ssl_protocols: + description: SSL protocols allowed by the origin. + returned: always + type: complex + contains: + items: + description: List of SSL protocols. + returned: always + type: list + sample: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + quantity: + description: Count of SSL protocols. + returned: always + type: int + sample: 3 + domain_name: + description: Domain name of the origin. + returned: always + type: str + sample: test-origin.example.com + id: + description: ID of the origin. + returned: always + type: str + sample: test-origin.example.com + origin_path: + description: Subdirectory to prefix the request from the S3 or HTTP origin. + returned: always + type: str + sample: '' + quantity: + description: Count of origins. + returned: always + type: int + sample: 1 +price_class: + description: Price class of CloudFront distribution. + returned: always + type: str + sample: PriceClass_All +restrictions: + description: Restrictions in use by CloudFront. + returned: always + type: complex + contains: + geo_restriction: + description: Controls the countries in which your content is distributed. + returned: always + type: complex + contains: + quantity: + description: Count of restrictions. + returned: always + type: int + sample: 1 + items: + description: List of country codes allowed or disallowed. + returned: always + type: list + sample: xy + restriction_type: + description: Type of restriction. + returned: always + type: str + sample: blacklist +status: + description: Status of the CloudFront distribution. + returned: always + type: str + sample: InProgress +tags: + description: Distribution tags. + returned: always + type: dict + sample: + Hello: World +viewer_certificate: + description: Certificate used by CloudFront distribution. + returned: always + type: complex + contains: + acm_certificate_arn: + description: ARN of ACM certificate. + returned: when certificate comes from ACM + type: str + sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef + certificate: + description: Reference to certificate. + returned: always + type: str + sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef + certificate_source: + description: Where certificate comes from. + returned: always + type: str + sample: acm + minimum_protocol_version: + description: Minimum SSL/TLS protocol supported by this distribution. + returned: always + type: str + sample: TLSv1 + ssl_support_method: + description: Support for pre-SNI browsers or not. + returned: always + type: str + sample: sni-only +web_acl_id: + description: ID of Web Access Control List (from WAF service). + returned: always + type: str + sample: abcd1234-1234-abcd-abcd-abcd12345678 +''' + +from ansible.module_utils._text import to_text, to_native +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible.module_utils.common.dict_transformations import recursive_diff +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +import datetime + +try: + from collections import OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict + except ImportError: + pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed) + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +def change_dict_key_name(dictionary, old_key, new_key): + if old_key in dictionary: + dictionary[new_key] = dictionary.get(old_key) + dictionary.pop(old_key, None) + return dictionary + + +def merge_validation_into_config(config, validated_node, node_name): + if validated_node is not None: + if isinstance(validated_node, dict): + config_node = config.get(node_name) + if config_node is not None: + config_node_items = list(config_node.items()) + else: + config_node_items = [] + config[node_name] = dict(config_node_items + list(validated_node.items())) + if isinstance(validated_node, list): + config[node_name] = list(set(config.get(node_name) + validated_node)) + return config + + +def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): + if list_items is None: + list_items = [] + if not isinstance(list_items, list): + raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items))) + result = {} + if include_quantity: + result['quantity'] = len(list_items) + if len(list_items) > 0: + result['items'] = list_items + return result + + +def create_distribution(client, module, config, tags): + try: + if not tags: + return client.create_distribution(DistributionConfig=config)['Distribution'] + else: + distribution_config_with_tags = { + 'DistributionConfig': config, + 'Tags': { + 'Items': tags + } + } + return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error creating distribution") + + +def delete_distribution(client, module, distribution): + try: + return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution'])) + + +def update_distribution(client, module, config, distribution_id, e_tag): + try: + return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) + + +def tag_resource(client, module, arn, tags): + try: + return client.tag_resource(Resource=arn, Tags=dict(Items=tags)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error tagging resource") + + +def untag_resource(client, module, arn, tag_keys): + try: + return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error untagging resource") + + +def list_tags_for_resource(client, module, arn): + try: + response = client.list_tags_for_resource(Resource=arn) + return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error listing tags for resource") + + +def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn): + changed = False + to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags) + if to_remove: + untag_resource(client, module, arn, to_remove) + changed = True + if to_add: + tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add)) + changed = True + return changed + + +class CloudFrontValidationManager(object): + """ + Manages CloudFront validations + """ + + def __init__(self, module): + self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + self.module = module + self.__default_distribution_enabled = True + self.__default_http_port = 80 + self.__default_https_port = 443 + self.__default_ipv6_enabled = False + self.__default_origin_ssl_protocols = [ + 'TLSv1', + 'TLSv1.1', + 'TLSv1.2' + ] + self.__default_custom_origin_protocol_policy = 'match-viewer' + self.__default_custom_origin_read_timeout = 30 + self.__default_custom_origin_keepalive_timeout = 5 + self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + self.__default_cache_behavior_min_ttl = 0 + self.__default_cache_behavior_max_ttl = 31536000 + self.__default_cache_behavior_default_ttl = 86400 + self.__default_cache_behavior_compress = False + self.__default_cache_behavior_viewer_protocol_policy = 'allow-all' + self.__default_cache_behavior_smooth_streaming = False + self.__default_cache_behavior_forwarded_values_forward_cookies = 'none' + self.__default_cache_behavior_forwarded_values_query_string = True + self.__default_trusted_signers_enabled = False + self.__valid_price_classes = set([ + 'PriceClass_100', + 'PriceClass_200', + 'PriceClass_All' + ]) + self.__valid_origin_protocol_policies = set([ + 'http-only', + 'match-viewer', + 'https-only' + ]) + self.__valid_origin_ssl_protocols = set([ + 'SSLv3', + 'TLSv1', + 'TLSv1.1', + 'TLSv1.2' + ]) + self.__valid_cookie_forwarding = set([ + 'none', + 'whitelist', + 'all' + ]) + self.__valid_viewer_protocol_policies = set([ + 'allow-all', + 'https-only', + 'redirect-to-https' + ]) + self.__valid_methods = set([ + 'GET', + 'HEAD', + 'POST', + 'PUT', + 'PATCH', + 'OPTIONS', + 'DELETE' + ]) + self.__valid_methods_cached_methods = [ + set([ + 'GET', + 'HEAD' + ]), + set([ + 'GET', + 'HEAD', + 'OPTIONS' + ]) + ] + self.__valid_methods_allowed_methods = [ + self.__valid_methods_cached_methods[0], + self.__valid_methods_cached_methods[1], + self.__valid_methods + ] + self.__valid_lambda_function_association_event_types = set([ + 'viewer-request', + 'viewer-response', + 'origin-request', + 'origin-response' + ]) + self.__valid_viewer_certificate_ssl_support_methods = set([ + 'sni-only', + 'vip' + ]) + self.__valid_viewer_certificate_minimum_protocol_versions = set([ + 'SSLv3', + 'TLSv1', + 'TLSv1_2016', + 'TLSv1.1_2016', + 'TLSv1.2_2018' + ]) + self.__valid_viewer_certificate_certificate_sources = set([ + 'cloudfront', + 'iam', + 'acm' + ]) + self.__valid_http_versions = set([ + 'http1.1', + 'http2' + ]) + self.__s3_bucket_domain_identifier = '.s3.amazonaws.com' + + def add_missing_key(self, dict_object, key_to_set, value_to_set): + if key_to_set not in dict_object and value_to_set is not None: + dict_object[key_to_set] = value_to_set + return dict_object + + def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set): + if old_key not in dict_object and value_to_set is not None: + dict_object[new_key] = value_to_set + else: + dict_object = change_dict_key_name(dict_object, old_key, new_key) + return dict_object + + def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False): + if key_name in dict_object: + self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values) + else: + if to_aws_list: + dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set) + elif value_to_set is not None: + dict_object[key_name] = value_to_set + return dict_object + + def validate_logging(self, logging): + try: + if logging is None: + return None + valid_logging = {} + if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging): + self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.") + valid_logging['include_cookies'] = logging.get('include_cookies') + valid_logging['enabled'] = logging.get('enabled') + valid_logging['bucket'] = logging.get('bucket') + valid_logging['prefix'] = logging.get('prefix') + return valid_logging + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution logging") + + def validate_is_list(self, list_to_validate, list_name): + if not isinstance(list_to_validate, list): + self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__)) + + def validate_required_key(self, key_name, full_key_name, dict_object): + if key_name not in dict_object: + self.module.fail_json(msg="%s must be specified." % full_key_name) + + def validate_origins(self, client, config, origins, default_origin_domain_name, + default_origin_path, create_distribution, purge_origins=False): + try: + if origins is None: + if default_origin_domain_name is None and not create_distribution: + if purge_origins: + return None + else: + return ansible_list_to_cloudfront_list(config) + if default_origin_domain_name is not None: + origins = [{ + 'domain_name': default_origin_domain_name, + 'origin_path': default_origin_path or '' + }] + else: + origins = [] + self.validate_is_list(origins, 'origins') + if not origins and default_origin_domain_name is None and create_distribution: + self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.") + all_origins = OrderedDict() + new_domains = list() + for origin in config: + all_origins[origin.get('domain_name')] = origin + for origin in origins: + origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path) + all_origins[origin['domain_name']] = origin + new_domains.append(origin['domain_name']) + if purge_origins: + for domain in list(all_origins.keys()): + if domain not in new_domains: + del(all_origins[domain]) + return ansible_list_to_cloudfront_list(list(all_origins.values())) + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution origins") + + def validate_s3_origin_configuration(self, client, existing_config, origin): + if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'): + return existing_config['s3_origin_config']['origin_access_identity'] + if not origin['s3_origin_access_identity_enabled']: + return None + try: + comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) + caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) + cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, + Comment=comment)) + oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] + except Exception as e: + self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) + return "origin-access-identity/cloudfront/%s" % oai + + def validate_origin(self, client, existing_config, origin, default_origin_path): + try: + origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or '')) + self.validate_required_key('origin_path', 'origins[].origin_path', origin) + origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string)) + if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0: + for custom_header in origin.get('custom_headers'): + if 'header_name' not in custom_header or 'header_value' not in custom_header: + self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.") + origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers')) + else: + origin['custom_headers'] = ansible_list_to_cloudfront_list() + if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): + if origin.get("s3_origin_access_identity_enabled") is not None: + s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) + if s3_origin_config: + oai = s3_origin_config + else: + oai = "" + origin["s3_origin_config"] = dict(origin_access_identity=oai) + del(origin["s3_origin_access_identity_enabled"]) + if 'custom_origin_config' in origin: + self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") + else: + origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {})) + custom_origin_config = origin.get('custom_origin_config') + custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy', + 'origins[].custom_origin_config.origin_protocol_policy', + self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies) + custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout) + custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout) + custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port) + custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port) + if custom_origin_config.get('origin_ssl_protocols', {}).get('items'): + custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items'] + if custom_origin_config.get('origin_ssl_protocols'): + self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols', + self.__valid_origin_ssl_protocols) + else: + custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols + custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols']) + return origin + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error validating distribution origin") + + def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False): + try: + if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False: + return ansible_list_to_cloudfront_list(config) + all_cache_behaviors = OrderedDict() + # cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors + # is true (if purge_cache_behaviors is not true, we can't really know the full new order) + if not purge_cache_behaviors: + for behavior in config: + all_cache_behaviors[behavior['path_pattern']] = behavior + for cache_behavior in cache_behaviors: + valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}), + cache_behavior, valid_origins) + all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior + if purge_cache_behaviors: + for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): + del(all_cache_behaviors[target_origin_id]) + return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors") + + def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False): + if is_default_cache and cache_behavior is None: + cache_behavior = {} + if cache_behavior is None and valid_origins is not None: + return config + cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache) + cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior) + cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior) + cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior) + cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior) + cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior) + return cache_behavior + + def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): + try: + cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l', + config.get('min_t_t_l', self.__default_cache_behavior_min_ttl)) + cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l', + config.get('max_t_t_l', self.__default_cache_behavior_max_ttl)) + cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l', + config.get('default_t_t_l', self.__default_cache_behavior_default_ttl)) + cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress)) + target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id')) + if not target_origin_id: + target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins) + if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]: + if is_default_cache: + cache_behavior_name = 'Default cache behavior' + else: + cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern'] + self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." % + cache_behavior_name) + cache_behavior['target_origin_id'] = target_origin_id + cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy', + config.get('viewer_protocol_policy', + self.__default_cache_behavior_viewer_protocol_policy), + self.__valid_viewer_protocol_policies) + cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming', + config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming)) + return cache_behavior + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys") + + def validate_forwarded_values(self, config, forwarded_values, cache_behavior): + try: + if not forwarded_values: + forwarded_values = dict() + existing_config = config.get('forwarded_values', {}) + headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items')) + if headers: + headers.sort() + forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers) + if 'cookies' not in forwarded_values: + forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies) + forwarded_values['cookies'] = {'forward': forward} + else: + existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items') + whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist) + if whitelist: + self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names') + forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist) + cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward')) + self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward', + self.__valid_cookie_forwarding) + forwarded_values['cookies']['forward'] = cookie_forwarding + query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', [])) + self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys') + forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys) + forwarded_values = self.add_missing_key(forwarded_values, 'query_string', + existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string)) + cache_behavior['forwarded_values'] = forwarded_values + return cache_behavior + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating forwarded values") + + def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior): + try: + if lambda_function_associations is not None: + self.validate_is_list(lambda_function_associations, 'lambda_function_associations') + for association in lambda_function_associations: + association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n') + self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type', + self.__valid_lambda_function_association_event_types) + cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations) + else: + if 'lambda_function_associations' in config: + cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations') + else: + cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([]) + return cache_behavior + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating lambda function associations") + + def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior): + if field_level_encryption_id is not None: + cache_behavior['field_level_encryption_id'] = field_level_encryption_id + elif 'field_level_encryption_id' in config: + cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id') + else: + cache_behavior['field_level_encryption_id'] = "" + return cache_behavior + + def validate_allowed_methods(self, config, allowed_methods, cache_behavior): + try: + if allowed_methods is not None: + self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods) + temp_allowed_items = allowed_methods.get('items') + self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items') + self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]', + self.__valid_methods_allowed_methods) + cached_items = allowed_methods.get('cached_methods') + if 'cached_methods' in allowed_methods: + self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods') + self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]', + self.__valid_methods_cached_methods) + # we don't care if the order of how cloudfront stores the methods differs - preserving existing + # order reduces likelihood of making unnecessary changes + if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items): + cache_behavior['allowed_methods'] = config['allowed_methods'] + else: + cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items) + + if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])): + cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods'] + else: + cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items) + else: + if 'allowed_methods' in config: + cache_behavior['allowed_methods'] = config.get('allowed_methods') + return cache_behavior + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating allowed methods") + + def validate_trusted_signers(self, config, trusted_signers, cache_behavior): + try: + if trusted_signers is None: + trusted_signers = {} + if 'items' in trusted_signers: + valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items')) + else: + valid_trusted_signers = dict(quantity=config.get('quantity', 0)) + if 'items' in config: + valid_trusted_signers = dict(items=config['items']) + valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled)) + cache_behavior['trusted_signers'] = valid_trusted_signers + return cache_behavior + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating trusted signers") + + def validate_viewer_certificate(self, viewer_certificate): + try: + if viewer_certificate is None: + return None + if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None: + self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + + "_certificate set to true.") + self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method', + self.__valid_viewer_certificate_ssl_support_methods) + self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version', + self.__valid_viewer_certificate_minimum_protocol_versions) + self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source', + self.__valid_viewer_certificate_certificate_sources) + viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate') + viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method') + viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id') + viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn') + return viewer_certificate + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating viewer certificate") + + def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses): + try: + if custom_error_responses is None and not purge_custom_error_responses: + return ansible_list_to_cloudfront_list(config) + self.validate_is_list(custom_error_responses, 'custom_error_responses') + result = list() + existing_responses = dict((response['error_code'], response) for response in custom_error_responses) + for custom_error_response in custom_error_responses: + self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response) + custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l') + if 'response_code' in custom_error_response: + custom_error_response['response_code'] = str(custom_error_response['response_code']) + if custom_error_response['error_code'] in existing_responses: + del(existing_responses[custom_error_response['error_code']]) + result.append(custom_error_response) + if not purge_custom_error_responses: + result.extend(existing_responses.values()) + + return ansible_list_to_cloudfront_list(result) + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating custom error responses") + + def validate_restrictions(self, config, restrictions, purge_restrictions=False): + try: + if restrictions is None: + if purge_restrictions: + return None + else: + return config + self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions) + geo_restriction = restrictions.get('geo_restriction') + self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction) + existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', []) + geo_restriction_items = geo_restriction.get('items') + if not purge_restrictions: + geo_restriction_items.extend([rest for rest in existing_restrictions if + rest not in geo_restriction_items]) + valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items) + valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type') + return {'geo_restriction': valid_restrictions} + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating restrictions") + + def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id): + try: + config['default_root_object'] = default_root_object or config.get('default_root_object', '') + config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled) + if http_version is not None or config.get('http_version'): + self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions) + config['http_version'] = http_version or config.get('http_version') + if web_acl_id or config.get('web_a_c_l_id'): + config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id') + return config + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution config parameters") + + def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False): + try: + if config is None: + config = {} + if aliases is not None: + if not purge_aliases: + aliases.extend([alias for alias in config.get('aliases', {}).get('items', []) + if alias not in aliases]) + config['aliases'] = ansible_list_to_cloudfront_list(aliases) + if logging is not None: + config['logging'] = self.validate_logging(logging) + config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled) + if price_class is not None: + self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes) + config['price_class'] = price_class + return config + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating common distribution parameters") + + def validate_comment(self, config, comment): + config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string) + return config + + def validate_caller_reference(self, caller_reference): + return caller_reference or self.__default_datetime_string + + def get_first_origin_id_for_default_cache_behavior(self, valid_origins): + try: + if valid_origins is not None: + valid_origins_list = valid_origins.get('items') + if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0: + return str(valid_origins_list[0].get('id')) + self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.") + except Exception as e: + self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior") + + def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list): + try: + self.validate_is_list(attribute_list, attribute_list_name) + if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or + isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)): + self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list))) + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") + + def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): + if attribute is not None and attribute not in allowed_list: + self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list))) + + def validate_distribution_from_caller_reference(self, caller_reference): + try: + distributions = self.__cloudfront_facts_mgr.list_distributions(False) + distribution_name = 'Distribution' + distribution_config_name = 'DistributionConfig' + distribution_ids = [dist.get('Id') for dist in distributions] + for distribution_id in distribution_ids: + distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id) + if distribution is not None: + distribution_config = distribution[distribution_name].get(distribution_config_name) + if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference: + distribution[distribution_name][distribution_config_name] = distribution_config + return distribution + + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution from caller reference") + + def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference): + try: + if caller_reference is not None: + return self.validate_distribution_from_caller_reference(caller_reference) + else: + if aliases: + distribution_id = self.validate_distribution_id_from_alias(aliases) + if distribution_id: + return self.__cloudfront_facts_mgr.get_distribution(distribution_id) + return None + except Exception as e: + self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference") + + def validate_distribution_id_from_alias(self, aliases): + distributions = self.__cloudfront_facts_mgr.list_distributions(False) + if distributions: + for distribution in distributions: + distribution_aliases = distribution.get('Aliases', {}).get('Items', []) + if set(aliases) & set(distribution_aliases): + return distribution['Id'] + return None + + def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): + if distribution_id is None: + distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id'] + + try: + waiter = client.get_waiter('distribution_deployed') + attempts = 1 + int(wait_timeout / 60) + waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts}) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action." + " Waited for {0} seconds before timeout.".format(to_text(wait_timeout))) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) + + +def main(): + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + caller_reference=dict(), + comment=dict(), + distribution_id=dict(), + e_tag=dict(), + tags=dict(type='dict', default={}), + purge_tags=dict(type='bool', default=False), + alias=dict(), + aliases=dict(type='list', default=[]), + purge_aliases=dict(type='bool', default=False), + default_root_object=dict(), + origins=dict(type='list'), + purge_origins=dict(type='bool', default=False), + default_cache_behavior=dict(type='dict'), + cache_behaviors=dict(type='list'), + purge_cache_behaviors=dict(type='bool', default=False), + custom_error_responses=dict(type='list'), + purge_custom_error_responses=dict(type='bool', default=False), + logging=dict(type='dict'), + price_class=dict(), + enabled=dict(type='bool'), + viewer_certificate=dict(type='dict'), + restrictions=dict(type='dict'), + web_acl_id=dict(), + http_version=dict(), + ipv6_enabled=dict(type='bool'), + default_origin_domain_name=dict(), + default_origin_path=dict(), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=1800, type='int') + ) + + result = {} + changed = True + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=False, + mutually_exclusive=[ + ['distribution_id', 'alias'], + ['default_origin_domain_name', 'distribution_id'], + ['default_origin_domain_name', 'alias'], + ] + ) + + client = module.client('cloudfront') + + validation_mgr = CloudFrontValidationManager(module) + + state = module.params.get('state') + caller_reference = module.params.get('caller_reference') + comment = module.params.get('comment') + e_tag = module.params.get('e_tag') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + distribution_id = module.params.get('distribution_id') + alias = module.params.get('alias') + aliases = module.params.get('aliases') + purge_aliases = module.params.get('purge_aliases') + default_root_object = module.params.get('default_root_object') + origins = module.params.get('origins') + purge_origins = module.params.get('purge_origins') + default_cache_behavior = module.params.get('default_cache_behavior') + cache_behaviors = module.params.get('cache_behaviors') + purge_cache_behaviors = module.params.get('purge_cache_behaviors') + custom_error_responses = module.params.get('custom_error_responses') + purge_custom_error_responses = module.params.get('purge_custom_error_responses') + logging = module.params.get('logging') + price_class = module.params.get('price_class') + enabled = module.params.get('enabled') + viewer_certificate = module.params.get('viewer_certificate') + restrictions = module.params.get('restrictions') + purge_restrictions = module.params.get('purge_restrictions') + web_acl_id = module.params.get('web_acl_id') + http_version = module.params.get('http_version') + ipv6_enabled = module.params.get('ipv6_enabled') + default_origin_domain_name = module.params.get('default_origin_domain_name') + default_origin_path = module.params.get('default_origin_path') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + if alias and alias not in aliases: + aliases.append(alias) + + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + + update = state == 'present' and distribution + create = state == 'present' and not distribution + delete = state == 'absent' and distribution + + if not (update or create or delete): + module.exit_json(changed=False) + + if update or delete: + config = distribution['Distribution']['DistributionConfig'] + e_tag = distribution['ETag'] + distribution_id = distribution['Distribution']['Id'] + else: + config = dict() + if update: + config = camel_dict_to_snake_dict(config, reversible=True) + + if create or update: + config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases) + config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id) + config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name, + default_origin_path, create, purge_origins) + config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []), + cache_behaviors, config['origins'], purge_cache_behaviors) + config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}), + default_cache_behavior, config['origins'], True) + config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []), + custom_error_responses, purge_custom_error_responses) + valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions) + if valid_restrictions: + config['restrictions'] = valid_restrictions + valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate) + config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate') + config = validation_mgr.validate_comment(config, comment) + config = snake_dict_to_camel_dict(config, capitalize_first=True) + + if create: + config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference) + result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags)) + result = camel_dict_to_snake_dict(result) + result['tags'] = list_tags_for_resource(client, module, result['arn']) + + if delete: + if config['Enabled']: + config['Enabled'] = False + result = update_distribution(client, module, config, distribution_id, e_tag) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + # e_tag = distribution['ETag'] + result = delete_distribution(client, module, distribution) + + if update: + changed = config != distribution['Distribution']['DistributionConfig'] + if changed: + result = update_distribution(client, module, config, distribution_id, e_tag) + else: + result = distribution['Distribution'] + existing_tags = list_tags_for_resource(client, module, result['ARN']) + distribution['Distribution']['DistributionConfig']['tags'] = existing_tags + changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN']) + result = camel_dict_to_snake_dict(result) + result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn']) + result['diff'] = dict() + diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config) + if diff: + result['diff']['before'] = diff[0] + result['diff']['after'] = diff[1] + + if wait and (create or update): + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) + + if 'distribution_config' in result: + result.update(result['distribution_config']) + del(result['distribution_config']) + + module.exit_json(changed=changed, **result) + + +if __name__ == '__main__': + main() diff --git a/cloudfront_facts.py b/cloudfront_facts.py new file mode 120000 index 00000000000..700056e714c --- /dev/null +++ b/cloudfront_facts.py @@ -0,0 +1 @@ +cloudfront_info.py \ No newline at end of file diff --git a/cloudfront_info.py b/cloudfront_info.py new file mode 100644 index 00000000000..1850b027c43 --- /dev/null +++ b/cloudfront_info.py @@ -0,0 +1,729 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudfront_info +short_description: Obtain facts about an AWS CloudFront distribution +description: + - Gets information about an AWS CloudFront distribution. + - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(cloudfront_info) module no longer returns C(ansible_facts)! +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 +author: Willem van Ketwich (@wilvk) +options: + distribution_id: + description: + - The id of the CloudFront distribution. Used with I(distribution), I(distribution_config), + I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations). + required: false + type: str + invalidation_id: + description: + - The id of the invalidation to get information about. + - Used with I(invalidation). + required: false + type: str + origin_access_identity_id: + description: + - The id of the CloudFront origin access identity to get information about. + required: false + type: str +# web_acl_id: +# description: +# - Used with I(list_distributions_by_web_acl_id). +# required: false +# type: str + domain_name_alias: + description: + - Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront + distribution to get the distribution id where required. + required: false + type: str + all_lists: + description: + - Get all CloudFront lists that do not require parameters. + required: false + default: false + type: bool + origin_access_identity: + description: + - Get information about an origin access identity. + - Requires I(origin_access_identity_id) to be specified. + required: false + default: false + type: bool + origin_access_identity_config: + description: + - Get the configuration information about an origin access identity. + - Requires I(origin_access_identity_id) to be specified. + required: false + default: false + type: bool + distribution: + description: + - Get information about a distribution. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. + required: false + default: false + type: bool + distribution_config: + description: + - Get the configuration information about a distribution. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. + required: false + default: false + type: bool + invalidation: + description: + - Get information about an invalidation. + - Requires I(invalidation_id) to be specified. + required: false + default: false + type: bool + streaming_distribution: + description: + - Get information about a specified RTMP distribution. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. + required: false + default: false + type: bool + streaming_distribution_config: + description: + - Get the configuration information about a specified RTMP distribution. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. + required: false + default: false + type: bool + list_origin_access_identities: + description: + - Get a list of CloudFront origin access identities. + - Requires I(origin_access_identity_id) to be set. + required: false + default: false + type: bool + list_distributions: + description: + - Get a list of CloudFront distributions. + required: false + default: false + type: bool + list_distributions_by_web_acl_id: + description: + - Get a list of distributions using web acl id as a filter. + - Requires I(web_acl_id) to be set. + required: false + default: false + type: bool + list_invalidations: + description: + - Get a list of invalidations. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. + required: false + default: false + type: bool + list_streaming_distributions: + description: + - Get a list of streaming distributions. + required: false + default: false + type: bool + summary: + description: + - Returns a summary of all distributions, streaming distributions and origin_access_identities. + - This is the default behaviour if no option is selected. + required: false + default: false + type: bool + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Get a summary of distributions +- cloudfront_info: + summary: true + register: result + +# Get information about a distribution +- cloudfront_info: + distribution: true + distribution_id: my-cloudfront-distribution-id + register: result_did +- debug: + msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}" + +# Get information about a distribution using the CNAME of the cloudfront distribution. +- cloudfront_info: + distribution: true + domain_name_alias: www.my-website.com + register: result_website +- debug: + msg: "{{ result_website['cloudfront']['www.my-website.com'] }}" + +# When the module is called as cloudfront_facts, return values are published +# in ansible_facts['cloudfront'][] and can be used as follows. +# Note that this is deprecated and will stop working in Ansible 2.13. +- cloudfront_facts: + distribution: true + distribution_id: my-cloudfront-distribution-id +- debug: + msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}" + +- cloudfront_facts: + distribution: true + domain_name_alias: www.my-website.com +- debug: + msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}" + +# Get all information about an invalidation for a distribution. +- cloudfront_facts: + invalidation: true + distribution_id: my-cloudfront-distribution-id + invalidation_id: my-cloudfront-invalidation-id + +# Get all information about a CloudFront origin access identity. +- cloudfront_facts: + origin_access_identity: true + origin_access_identity_id: my-cloudfront-origin-access-identity-id + +# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) +- cloudfront_facts: + origin_access_identity: true + origin_access_identity_id: my-cloudfront-origin-access-identity-id + +# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) +- cloudfront_facts: + all_lists: true +''' + +RETURN = ''' +origin_access_identity: + description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set. + returned: only if I(origin_access_identity) is true + type: dict +origin_access_identity_configuration: + description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set. + returned: only if I(origin_access_identity_configuration) is true + type: dict +distribution: + description: > + Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias) + to be specified. Requires I(origin_access_identity_id) to be set. + returned: only if distribution is true + type: dict +distribution_config: + description: > + Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias) + to be specified. + returned: only if I(distribution_config) is true + type: dict +invalidation: + description: > + Describes the invalidation information for the distribution. Requires + I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.) + returned: only if invalidation is true + type: dict +streaming_distribution: + description: > + Describes the streaming information for the distribution. Requires + I(distribution_id) or I(domain_name_alias) to be specified. + returned: only if I(streaming_distribution) is true + type: dict +streaming_distribution_config: + description: > + Describes the streaming configuration information for the distribution. + Requires I(distribution_id) or I(domain_name_alias) to be specified. + returned: only if I(streaming_distribution_config) is true + type: dict +summary: + description: Gives a summary of distributions, streaming distributions and origin access identities. + returned: as default or if summary is true + type: dict +result: + description: > + Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id + as figuring out the DistributionId is usually the reason one uses this module in the first place. + returned: always + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3 +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible.module_utils.basic import AnsibleModule +from functools import partial +import traceback + +try: + import botocore +except ImportError: + pass # will be caught by imported HAS_BOTO3 + + +class CloudFrontServiceManager: + """Handles CloudFront Services""" + + def __init__(self, module): + self.module = module + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + self.client = boto3_conn(module, conn_type='client', + resource='cloudfront', region=region, + endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoRegionError: + self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION " + "environment variable or in boto configuration file") + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Can't establish connection - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_distribution(self, distribution_id): + try: + func = partial(self.client.get_distribution, Id=distribution_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing distribution - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_distribution_config(self, distribution_id): + try: + func = partial(self.client.get_distribution_config, Id=distribution_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing distribution configuration - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_origin_access_identity(self, origin_access_identity_id): + try: + func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing origin access identity - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_origin_access_identity_config(self, origin_access_identity_id): + try: + func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_invalidation(self, distribution_id, invalidation_id): + try: + func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing invalidation - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_streaming_distribution(self, distribution_id): + try: + func = partial(self.client.get_streaming_distribution, Id=distribution_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing streaming distribution - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_streaming_distribution_config(self, distribution_id): + try: + func = partial(self.client.get_streaming_distribution_config, Id=distribution_id) + return self.paginated_response(func) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error describing streaming distribution - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def list_origin_access_identities(self): + try: + func = partial(self.client.list_cloud_front_origin_access_identities) + origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList') + if origin_access_identity_list['Quantity'] > 0: + return origin_access_identity_list['Items'] + return {} + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def list_distributions(self, keyed=True): + try: + func = partial(self.client.list_distributions) + distribution_list = self.paginated_response(func, 'DistributionList') + if distribution_list['Quantity'] == 0: + return {} + else: + distribution_list = distribution_list['Items'] + if not keyed: + return distribution_list + return self.keyed_list_helper(distribution_list) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error listing distributions - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def list_distributions_by_web_acl_id(self, web_acl_id): + try: + func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id) + distribution_list = self.paginated_response(func, 'DistributionList') + if distribution_list['Quantity'] == 0: + return {} + else: + distribution_list = distribution_list['Items'] + return self.keyed_list_helper(distribution_list) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def list_invalidations(self, distribution_id): + try: + func = partial(self.client.list_invalidations, DistributionId=distribution_id) + invalidation_list = self.paginated_response(func, 'InvalidationList') + if invalidation_list['Quantity'] > 0: + return invalidation_list['Items'] + return {} + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error listing invalidations - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def list_streaming_distributions(self, keyed=True): + try: + func = partial(self.client.list_streaming_distributions) + streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList') + if streaming_distribution_list['Quantity'] == 0: + return {} + else: + streaming_distribution_list = streaming_distribution_list['Items'] + if not keyed: + return streaming_distribution_list + return self.keyed_list_helper(streaming_distribution_list) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error listing streaming distributions - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def summary(self): + summary_dict = {} + summary_dict.update(self.summary_get_distribution_list(False)) + summary_dict.update(self.summary_get_distribution_list(True)) + summary_dict.update(self.summary_get_origin_access_identity_list()) + return summary_dict + + def summary_get_origin_access_identity_list(self): + try: + origin_access_identity_list = {'origin_access_identities': []} + origin_access_identities = self.list_origin_access_identities() + for origin_access_identity in origin_access_identities: + oai_id = origin_access_identity['Id'] + oai_full_response = self.get_origin_access_identity(oai_id) + oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} + origin_access_identity_list['origin_access_identities'].append(oai_summary) + return origin_access_identity_list + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def summary_get_distribution_list(self, streaming=False): + try: + list_name = 'streaming_distributions' if streaming else 'distributions' + key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled'] + distribution_list = {list_name: []} + distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False) + for dist in distributions: + temp_distribution = {} + for key_name in key_list: + temp_distribution[key_name] = dist[key_name] + temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])] + temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming) + if not streaming: + temp_distribution['WebACLId'] = dist['WebACLId'] + invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id']) + if invalidation_ids: + temp_distribution['Invalidations'] = invalidation_ids + resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN']) + temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) + distribution_list[list_name].append(temp_distribution) + return distribution_list + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error generating summary of distributions - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except Exception as e: + self.module.fail_json(msg="Error generating summary of distributions - " + str(e), + exception=traceback.format_exc()) + + def get_etag_from_distribution_id(self, distribution_id, streaming): + distribution = {} + if not streaming: + distribution = self.get_distribution(distribution_id) + else: + distribution = self.get_streaming_distribution(distribution_id) + return distribution['ETag'] + + def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): + try: + invalidation_ids = [] + invalidations = self.list_invalidations(distribution_id) + for invalidation in invalidations: + invalidation_ids.append(invalidation['Id']) + return invalidation_ids + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_distribution_id_from_domain_name(self, domain_name): + try: + distribution_id = "" + distributions = self.list_distributions(False) + distributions += self.list_streaming_distributions(False) + for dist in distributions: + if 'Items' in dist['Aliases']: + for alias in dist['Aliases']['Items']: + if str(alias).lower() == domain_name.lower(): + distribution_id = dist['Id'] + break + return distribution_id + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def get_aliases_from_distribution_id(self, distribution_id): + aliases = [] + try: + distributions = self.list_distributions(False) + for dist in distributions: + if dist['Id'] == distribution_id and 'Items' in dist['Aliases']: + for alias in dist['Aliases']['Items']: + aliases.append(alias) + break + return aliases + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + def paginated_response(self, func, result_key=""): + ''' + Returns expanded response for paginated operations. + The 'result_key' is used to define the concatenated results that are combined from each paginated response. + ''' + args = dict() + results = dict() + loop = True + while loop: + response = func(**args) + if result_key == "": + result = response + result.pop('ResponseMetadata', None) + else: + result = response.get(result_key) + results.update(result) + args['Marker'] = response.get('NextMarker') + for key in response.keys(): + if key.endswith('List'): + args['Marker'] = response[key].get('NextMarker') + break + loop = args['Marker'] is not None + return results + + def keyed_list_helper(self, list_to_key): + keyed_list = dict() + for item in list_to_key: + distribution_id = item['Id'] + if 'Items' in item['Aliases']: + aliases = item['Aliases']['Items'] + for alias in aliases: + keyed_list.update({alias: item}) + keyed_list.update({distribution_id: item}) + return keyed_list + + +def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): + facts[distribution_id].update(details) + # also have a fixed key for accessing results/details returned + facts['result'] = details + facts['result']['DistributionId'] = distribution_id + + for alias in aliases: + facts[alias].update(details) + return facts + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + distribution_id=dict(required=False, type='str'), + invalidation_id=dict(required=False, type='str'), + origin_access_identity_id=dict(required=False, type='str'), + domain_name_alias=dict(required=False, type='str'), + all_lists=dict(required=False, default=False, type='bool'), + distribution=dict(required=False, default=False, type='bool'), + distribution_config=dict(required=False, default=False, type='bool'), + origin_access_identity=dict(required=False, default=False, type='bool'), + origin_access_identity_config=dict(required=False, default=False, type='bool'), + invalidation=dict(required=False, default=False, type='bool'), + streaming_distribution=dict(required=False, default=False, type='bool'), + streaming_distribution_config=dict(required=False, default=False, type='bool'), + list_origin_access_identities=dict(required=False, default=False, type='bool'), + list_distributions=dict(required=False, default=False, type='bool'), + list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'), + list_invalidations=dict(required=False, default=False, type='bool'), + list_streaming_distributions=dict(required=False, default=False, type='bool'), + summary=dict(required=False, default=False, type='bool') + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + is_old_facts = module._name == 'cloudfront_facts' + if is_old_facts: + module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + service_mgr = CloudFrontServiceManager(module) + + distribution_id = module.params.get('distribution_id') + invalidation_id = module.params.get('invalidation_id') + origin_access_identity_id = module.params.get('origin_access_identity_id') + web_acl_id = module.params.get('web_acl_id') + domain_name_alias = module.params.get('domain_name_alias') + all_lists = module.params.get('all_lists') + distribution = module.params.get('distribution') + distribution_config = module.params.get('distribution_config') + origin_access_identity = module.params.get('origin_access_identity') + origin_access_identity_config = module.params.get('origin_access_identity_config') + invalidation = module.params.get('invalidation') + streaming_distribution = module.params.get('streaming_distribution') + streaming_distribution_config = module.params.get('streaming_distribution_config') + list_origin_access_identities = module.params.get('list_origin_access_identities') + list_distributions = module.params.get('list_distributions') + list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id') + list_invalidations = module.params.get('list_invalidations') + list_streaming_distributions = module.params.get('list_streaming_distributions') + summary = module.params.get('summary') + + aliases = [] + result = {'cloudfront': {}} + facts = {} + + require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or + streaming_distribution_config or list_invalidations) + + # set default to summary if no option specified + summary = summary or not (distribution or distribution_config or origin_access_identity or + origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or + list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or + list_streaming_distributions or list_distributions) + + # validations + if require_distribution_id and distribution_id is None and domain_name_alias is None: + module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.') + if (invalidation and invalidation_id is None): + module.fail_json(msg='Error invalidation_id has not been specified.') + if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None: + module.fail_json(msg='Error origin_access_identity_id has not been specified.') + if list_distributions_by_web_acl_id and web_acl_id is None: + module.fail_json(msg='Error web_acl_id has not been specified.') + + # get distribution id from domain name alias + if require_distribution_id and distribution_id is None: + distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias) + if not distribution_id: + module.fail_json(msg='Error unable to source a distribution id from domain_name_alias') + + # set appropriate cloudfront id + if distribution_id and not list_invalidations: + facts = {distribution_id: {}} + aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) + for alias in aliases: + facts.update({alias: {}}) + if invalidation_id: + facts.update({invalidation_id: {}}) + elif distribution_id and list_invalidations: + facts = {distribution_id: {}} + aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) + for alias in aliases: + facts.update({alias: {}}) + elif origin_access_identity_id: + facts = {origin_access_identity_id: {}} + elif web_acl_id: + facts = {web_acl_id: {}} + + # get details based on options + if distribution: + facts_to_set = service_mgr.get_distribution(distribution_id) + if distribution_config: + facts_to_set = service_mgr.get_distribution_config(distribution_id) + if origin_access_identity: + facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id)) + if origin_access_identity_config: + facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id)) + if invalidation: + facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id) + facts[invalidation_id].update(facts_to_set) + if streaming_distribution: + facts_to_set = service_mgr.get_streaming_distribution(distribution_id) + if streaming_distribution_config: + facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id) + if list_invalidations: + facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)} + if 'facts_to_set' in vars(): + facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) + + # get list based on options + if all_lists or list_origin_access_identities: + facts['origin_access_identities'] = service_mgr.list_origin_access_identities() + if all_lists or list_distributions: + facts['distributions'] = service_mgr.list_distributions() + if all_lists or list_streaming_distributions: + facts['streaming_distributions'] = service_mgr.list_streaming_distributions() + if list_distributions_by_web_acl_id: + facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id) + if list_invalidations: + facts['invalidations'] = service_mgr.list_invalidations(distribution_id) + + # default summary option + if summary: + facts['summary'] = service_mgr.summary() + + result['changed'] = False + result['cloudfront'].update(facts) + if is_old_facts: + module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result) + else: + module.exit_json(msg="Retrieved CloudFront info.", **result) + + +if __name__ == '__main__': + main() diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py new file mode 100644 index 00000000000..5de20501939 --- /dev/null +++ b/cloudfront_invalidation.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: cloudfront_invalidation + +short_description: create invalidations for AWS CloudFront distributions +description: + - Allows for invalidation of a batch of paths for a CloudFront distribution. + +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 + + +author: Willem van Ketwich (@wilvk) + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +options: + distribution_id: + description: + - The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias. + required: false + type: str + alias: + description: + - The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id. + required: false + type: str + caller_reference: + description: + - A unique reference identifier for the invalidation paths. + - Defaults to current datetime stamp. + required: false + default: + type: str + target_paths: + description: + - A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*' + required: true + type: list + elements: str + +notes: + - does not support check mode + +''' + +EXAMPLES = ''' + +- name: create a batch of invalidations using a distribution_id for a reference + cloudfront_invalidation: + distribution_id: E15BU8SDCGSG57 + caller_reference: testing 123 + target_paths: + - /testpathone/test1.css + - /testpathtwo/test2.js + - /testpaththree/test3.ss + +- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match + cloudfront_invalidation: + alias: alias.test.com + caller_reference: testing 123 + target_paths: + - /testpathone/test4.css + - /testpathtwo/test5.js + - /testpaththree/* + +''' + +RETURN = ''' +invalidation: + description: The invalidation's information. + returned: always + type: complex + contains: + create_time: + description: The date and time the invalidation request was first made. + returned: always + type: str + sample: '2018-02-01T15:50:41.159000+00:00' + id: + description: The identifier for the invalidation request. + returned: always + type: str + sample: I2G9MOWJZFV612 + invalidation_batch: + description: The current invalidation information for the batch request. + returned: always + type: complex + contains: + caller_reference: + description: The value used to uniquely identify an invalidation request. + returned: always + type: str + sample: testing 123 + paths: + description: A dict that contains information about the objects that you want to invalidate. + returned: always + type: complex + contains: + items: + description: A list of the paths that you want to invalidate. + returned: always + type: list + sample: + - /testpathtwo/test2.js + - /testpathone/test1.css + - /testpaththree/test3.ss + quantity: + description: The number of objects that you want to invalidate. + returned: always + type: int + sample: 3 + status: + description: The status of the invalidation request. + returned: always + type: str + sample: Completed +location: + description: The fully qualified URI of the distribution and invalidation batch request. + returned: always + type: str + sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +import datetime + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by imported AnsibleAWSModule + + +class CloudFrontInvalidationServiceManager(object): + """ + Handles CloudFront service calls to AWS for invalidations + """ + + def __init__(self, module): + self.module = module + self.client = module.client('cloudfront') + + def create_invalidation(self, distribution_id, invalidation_batch): + current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference']) + try: + response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch) + response.pop('ResponseMetadata', None) + if current_invalidation_response: + return response, False + else: + return response, True + except BotoCoreError as e: + self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") + except ClientError as e: + if ('Your request contains a caller reference that was used for a previous invalidation batch ' + 'for the same distribution.' in e.response['Error']['Message']): + self.module.warn("InvalidationBatch target paths are not modifiable. " + "To make a new invalidation please update caller_reference.") + return current_invalidation_response, False + else: + self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") + + def get_invalidation(self, distribution_id, caller_reference): + current_invalidation = {} + # find all invalidations for the distribution + try: + paginator = self.client.get_paginator('list_invalidations') + invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', []) + invalidation_ids = [inv['Id'] for inv in invalidations] + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.") + + # check if there is an invalidation with the same caller reference + for inv_id in invalidation_ids: + try: + invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation'] + caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference') + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id)) + if caller_ref == caller_reference: + current_invalidation = invalidation + break + + current_invalidation.pop('ResponseMetadata', None) + return current_invalidation + + +class CloudFrontInvalidationValidationManager(object): + """ + Manages CloudFront validations for invalidation batches + """ + + def __init__(self, module): + self.module = module + self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + + def validate_distribution_id(self, distribution_id, alias): + try: + if distribution_id is None and alias is None: + self.module.fail_json(msg="distribution_id or alias must be specified") + if distribution_id is None: + distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias) + return distribution_id + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error validating parameters.") + + def create_aws_list(self, invalidation_batch): + aws_list = {} + aws_list["Quantity"] = len(invalidation_batch) + aws_list["Items"] = invalidation_batch + return aws_list + + def validate_invalidation_batch(self, invalidation_batch, caller_reference): + try: + if caller_reference is not None: + valid_caller_reference = caller_reference + else: + valid_caller_reference = datetime.datetime.now().isoformat() + valid_invalidation_batch = { + 'paths': self.create_aws_list(invalidation_batch), + 'caller_reference': valid_caller_reference + } + return valid_invalidation_batch + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error validating invalidation batch.") + + +def main(): + argument_spec = dict( + caller_reference=dict(), + distribution_id=dict(), + alias=dict(), + target_paths=dict(required=True, type='list') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) + + validation_mgr = CloudFrontInvalidationValidationManager(module) + service_mgr = CloudFrontInvalidationServiceManager(module) + + caller_reference = module.params.get('caller_reference') + distribution_id = module.params.get('distribution_id') + alias = module.params.get('alias') + target_paths = module.params.get('target_paths') + + result = {} + + distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias) + valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference) + valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True) + result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) + + +if __name__ == '__main__': + main() diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py new file mode 100644 index 00000000000..d175483d271 --- /dev/null +++ b/cloudfront_origin_access_identity.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: cloudfront_origin_access_identity + +short_description: Create, update and delete origin access identities for a + CloudFront distribution + +description: + - Allows for easy creation, updating and deletion of origin access + identities. + +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 + + +author: Willem van Ketwich (@wilvk) + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +options: + state: + description: If the named resource should exist. + choices: + - present + - absent + default: present + type: str + origin_access_identity_id: + description: + - The origin_access_identity_id of the CloudFront distribution. + required: false + type: str + comment: + description: + - A comment to describe the CloudFront origin access identity. + required: false + type: str + caller_reference: + description: + - A unique identifier to reference the origin access identity by. + required: false + type: str + +notes: + - Does not support check mode. + +''' + +EXAMPLES = ''' + +- name: create an origin access identity + cloudfront_origin_access_identity: + state: present + caller_reference: this is an example reference + comment: this is an example comment + +- name: update an existing origin access identity using caller_reference as an identifier + cloudfront_origin_access_identity: + origin_access_identity_id: E17DRN9XUOAHZX + caller_reference: this is an example reference + comment: this is a new comment + +- name: delete an existing origin access identity using caller_reference as an identifier + cloudfront_origin_access_identity: + state: absent + caller_reference: this is an example reference + comment: this is a new comment + +''' + +RETURN = ''' +cloud_front_origin_access_identity: + description: The origin access identity's information. + returned: always + type: complex + contains: + cloud_front_origin_access_identity_config: + description: describes a url specifying the origin access identity. + returned: always + type: complex + contains: + caller_reference: + description: a caller reference for the oai + returned: always + type: str + comment: + description: a comment describing the oai + returned: always + type: str + id: + description: a unique identifier of the oai + returned: always + type: str + s3_canonical_user_id: + description: the canonical user ID of the user who created the oai + returned: always + type: str +e_tag: + description: The current version of the origin access identity created. + returned: always + type: str +location: + description: The fully qualified URI of the new origin access identity just created. + returned: when initially created + type: str + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +import datetime +from functools import partial +import json +import traceback + +try: + import botocore + from botocore.signers import CloudFrontSigner + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by imported AnsibleAWSModule + + +class CloudFrontOriginAccessIdentityServiceManager(object): + """ + Handles CloudFront origin access identity service calls to aws + """ + + def __init__(self, module): + self.module = module + self.client = module.client('cloudfront') + + def create_origin_access_identity(self, caller_reference, comment): + try: + return self.client.create_cloud_front_origin_access_identity( + CloudFrontOriginAccessIdentityConfig={ + 'CallerReference': caller_reference, + 'Comment': comment + } + ) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.") + + def delete_origin_access_identity(self, origin_access_identity_id, e_tag): + try: + return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.") + + def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag): + changed = False + new_config = { + 'CallerReference': caller_reference, + 'Comment': comment + } + + try: + current_config = self.client.get_cloud_front_origin_access_identity_config( + Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig'] + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.") + + if new_config != current_config: + changed = True + + try: + # If the CallerReference is a value already sent in a previous identity request + # the returned value is that of the original request + result = self.client.update_cloud_front_origin_access_identity( + CloudFrontOriginAccessIdentityConfig=new_config, + Id=origin_access_identity_id, + IfMatch=e_tag, + ) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.") + + return result, changed + + +class CloudFrontOriginAccessIdentityValidationManager(object): + """ + Manages CloudFront Origin Access Identities + """ + + def __init__(self, module): + self.module = module + self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + + def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id): + try: + if origin_access_identity_id is None: + return + oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id) + if oai is not None: + return oai.get('ETag') + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") + + def validate_origin_access_identity_id_from_caller_reference( + self, caller_reference): + try: + origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() + origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities] + for origin_access_identity_id in origin_origin_access_identity_ids: + oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id)) + temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference') + if temp_caller_reference == caller_reference: + return origin_access_identity_id + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.") + + def validate_comment(self, comment): + if comment is None: + return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + return comment + + +def main(): + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + origin_access_identity_id=dict(), + caller_reference=dict(), + comment=dict(), + ) + + result = {} + e_tag = None + changed = False + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) + service_mgr = CloudFrontOriginAccessIdentityServiceManager(module) + validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module) + + state = module.params.get('state') + caller_reference = module.params.get('caller_reference') + + comment = module.params.get('comment') + origin_access_identity_id = module.params.get('origin_access_identity_id') + + if origin_access_identity_id is None and caller_reference is not None: + origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference) + + e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id) + comment = validation_mgr.validate_comment(comment) + + if state == 'present': + if origin_access_identity_id is not None and e_tag is not None: + result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag) + else: + result = service_mgr.create_origin_access_identity(caller_reference, comment) + changed = True + elif(state == 'absent' and origin_access_identity_id is not None and + e_tag is not None): + result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) + changed = True + + result.pop('ResponseMetadata', None) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) + + +if __name__ == '__main__': + main() diff --git a/cloudtrail.py b/cloudtrail.py new file mode 100644 index 00000000000..0dc8feb64af --- /dev/null +++ b/cloudtrail.py @@ -0,0 +1,609 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudtrail +short_description: manage CloudTrail create, delete, update +description: + - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled. +author: + - Ansible Core Team + - Ted Timmons (@tedder) + - Daniel Shepherd (@shepdelacreme) +requirements: + - boto3 + - botocore +options: + state: + description: + - Add or remove CloudTrail configuration. + - 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).' + - I(state=enabled) is equivalet to I(state=present). + - I(state=disabled) is equivalet to I(state=absent). + type: str + choices: ['present', 'absent', 'enabled', 'disabled'] + default: present + name: + description: + - Name for the CloudTrail. + - Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account. + type: str + default: default + enable_logging: + description: + - Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files. + default: true + type: bool + s3_bucket_name: + description: + - An existing S3 bucket where CloudTrail will deliver log files. + - This bucket should exist and have the proper policy. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html). + - Required when I(state=present). + type: str + s3_key_prefix: + description: + - S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed. + type: str + is_multi_region_trail: + description: + - Specify whether the trail belongs only to one region or exists in all regions. + default: false + type: bool + enable_log_file_validation: + description: + - Specifies whether log file integrity validation is enabled. + - CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered. + type: bool + aliases: [ "log_file_validation_enabled" ] + include_global_events: + description: + - Record API calls from global services such as IAM and STS. + default: true + type: bool + aliases: [ "include_global_service_events" ] + sns_topic_name: + description: + - SNS Topic name to send notifications to when a log file is delivered. + type: str + cloudwatch_logs_role_arn: + description: + - Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html). + - Required when C(cloudwatch_logs_log_group_arn). + type: str + cloudwatch_logs_log_group_arn: + description: + - A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html). + - Required when C(cloudwatch_logs_role_arn). + type: str + kms_key_id: + description: + - Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption. + - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html). + type: str + tags: + description: + - A hash/dictionary of tags to be applied to the CloudTrail resource. + - Remove completely or specify an empty dictionary to remove all tags. + default: {} + type: dict + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: create single region cloudtrail + cloudtrail: + state: present + name: default + s3_bucket_name: mylogbucket + s3_key_prefix: cloudtrail + region: us-east-1 + +- name: create multi-region trail with validation and tags + cloudtrail: + state: present + name: default + s3_bucket_name: mylogbucket + region: us-east-1 + is_multi_region_trail: true + enable_log_file_validation: true + cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role" + cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*" + kms_key_id: "alias/MyAliasName" + tags: + environment: dev + Name: default + +- name: show another valid kms_key_id + cloudtrail: + state: present + name: default + s3_bucket_name: mylogbucket + kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" + # simply "12345678-1234-1234-1234-123456789012" would be valid too. + +- name: pause logging the trail we just created + cloudtrail: + state: present + name: default + enable_logging: false + s3_bucket_name: mylogbucket + region: us-east-1 + is_multi_region_trail: true + enable_log_file_validation: true + tags: + environment: dev + Name: default + +- name: delete a trail + cloudtrail: + state: absent + name: default +''' + +RETURN = ''' +exists: + description: whether the resource exists + returned: always + type: bool + sample: true +trail: + description: CloudTrail resource details + returned: always + type: complex + sample: hash/dictionary of values + contains: + trail_arn: + description: Full ARN of the CloudTrail resource + returned: success + type: str + sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default + name: + description: Name of the CloudTrail resource + returned: success + type: str + sample: default + is_logging: + description: Whether logging is turned on or paused for the Trail + returned: success + type: bool + sample: True + s3_bucket_name: + description: S3 bucket name where log files are delivered + returned: success + type: str + sample: myBucket + s3_key_prefix: + description: Key prefix in bucket where log files are delivered (if any) + returned: success when present + type: str + sample: myKeyPrefix + log_file_validation_enabled: + description: Whether log file validation is enabled on the trail + returned: success + type: bool + sample: true + include_global_service_events: + description: Whether global services (IAM, STS) are logged with this trail + returned: success + type: bool + sample: true + is_multi_region_trail: + description: Whether the trail applies to all regions or just one + returned: success + type: bool + sample: true + has_custom_event_selectors: + description: Whether any custom event selectors are used for this trail. + returned: success + type: bool + sample: False + home_region: + description: The home region where the trail was originally created and must be edited. + returned: success + type: str + sample: us-east-1 + sns_topic_name: + description: The SNS topic name where log delivery notifications are sent. + returned: success when present + type: str + sample: myTopic + sns_topic_arn: + description: Full ARN of the SNS topic where log delivery notifications are sent. + returned: success when present + type: str + sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic + cloud_watch_logs_log_group_arn: + description: Full ARN of the CloudWatch Logs log group where events are delivered. + returned: success when present + type: str + sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:* + cloud_watch_logs_role_arn: + description: Full ARN of the IAM role that CloudTrail assumes to deliver events. + returned: success when present + type: str + sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role + kms_key_id: + description: Full ARN of the KMS Key used to encrypt log files. + returned: success when present + type: str + sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012 + tags: + description: hash/dictionary of tags applied to this resource + returned: success + type: dict + sample: {'environment': 'dev', 'Name': 'default'} +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict) + + +def create_trail(module, client, ct_params): + """ + Creates a CloudTrail + + module : AnsibleAWSModule object + client : boto3 client connection object + ct_params : The parameters for the Trail to create + """ + resp = {} + try: + resp = client.create_trail(**ct_params) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to create Trail") + + return resp + + +def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False): + """ + Creates, updates, removes tags on a CloudTrail resource + + module : AnsibleAWSModule object + client : boto3 client connection object + tags : Dict of tags converted from ansible_dict to boto3 list of dicts + trail_arn : The ARN of the CloudTrail to operate on + curr_tags : Dict of the current tags on resource, if any + dry_run : true/false to determine if changes will be made if needed + """ + adds = [] + removes = [] + updates = [] + changed = False + + if curr_tags is None: + # No current tags so just convert all to a tag list + adds = ansible_dict_to_boto3_tag_list(tags) + else: + curr_keys = set(curr_tags.keys()) + new_keys = set(tags.keys()) + add_keys = new_keys - curr_keys + remove_keys = curr_keys - new_keys + update_keys = dict() + for k in curr_keys.intersection(new_keys): + if curr_tags[k] != tags[k]: + update_keys.update({k: tags[k]}) + + adds = get_tag_list(add_keys, tags) + removes = get_tag_list(remove_keys, curr_tags) + updates = get_tag_list(update_keys, tags) + + if removes or updates: + changed = True + if not dry_run: + try: + client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from Trail") + + if updates or adds: + changed = True + if not dry_run: + try: + client.add_tags(ResourceId=trail_arn, TagsList=updates + adds) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to Trail") + + return changed + + +def get_tag_list(keys, tags): + """ + Returns a list of dicts with tags to act on + keys : set of keys to get the values for + tags : the dict of tags to turn into a list + """ + tag_list = [] + for k in keys: + tag_list.append({'Key': k, 'Value': tags[k]}) + + return tag_list + + +def set_logging(module, client, name, action): + """ + Starts or stops logging based on given state + + module : AnsibleAWSModule object + client : boto3 client connection object + name : The name or ARN of the CloudTrail to operate on + action : start or stop + """ + if action == 'start': + try: + client.start_logging(Name=name) + return client.get_trail_status(Name=name) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to start logging") + elif action == 'stop': + try: + client.stop_logging(Name=name) + return client.get_trail_status(Name=name) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to stop logging") + else: + module.fail_json(msg="Unsupported logging action") + + +def get_trail_facts(module, client, name): + """ + Describes existing trail in an account + + module : AnsibleAWSModule object + client : boto3 client connection object + name : Name of the trail + """ + # get Trail info + try: + trail_resp = client.describe_trails(trailNameList=[name]) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to describe Trail") + + # Now check to see if our trail exists and get status and tags + if len(trail_resp['trailList']): + trail = trail_resp['trailList'][0] + try: + status_resp = client.get_trail_status(Name=trail['Name']) + tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']]) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to describe Trail") + + trail['IsLogging'] = status_resp['IsLogging'] + trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList']) + # Check for non-existent values and populate with None + optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId']) + for v in optional_vals - set(trail.keys()): + trail[v] = None + return trail + + else: + # trail doesn't exist return None + return None + + +def delete_trail(module, client, trail_arn): + """ + Delete a CloudTrail + + module : AnsibleAWSModule object + client : boto3 client connection object + trail_arn : Full CloudTrail ARN + """ + try: + client.delete_trail(Name=trail_arn) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete Trail") + + +def update_trail(module, client, ct_params): + """ + Delete a CloudTrail + + module : AnsibleAWSModule object + client : boto3 client connection object + ct_params : The parameters for the Trail to update + """ + try: + client.update_trail(**ct_params) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to update Trail") + + +def main(): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), + name=dict(default='default'), + enable_logging=dict(default=True, type='bool'), + s3_bucket_name=dict(), + s3_key_prefix=dict(), + sns_topic_name=dict(), + is_multi_region_trail=dict(default=False, type='bool'), + enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), + include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), + cloudwatch_logs_role_arn=dict(), + cloudwatch_logs_log_group_arn=dict(), + kms_key_id=dict(), + tags=dict(default={}, type='dict'), + ) + + required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] + required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) + + # collect parameters + if module.params['state'] in ('present', 'enabled'): + state = 'present' + elif module.params['state'] in ('absent', 'disabled'): + state = 'absent' + tags = module.params['tags'] + enable_logging = module.params['enable_logging'] + ct_params = dict( + Name=module.params['name'], + S3BucketName=module.params['s3_bucket_name'], + IncludeGlobalServiceEvents=module.params['include_global_events'], + IsMultiRegionTrail=module.params['is_multi_region_trail'], + ) + + if module.params['s3_key_prefix']: + ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') + + if module.params['sns_topic_name']: + ct_params['SnsTopicName'] = module.params['sns_topic_name'] + + if module.params['cloudwatch_logs_role_arn']: + ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn'] + + if module.params['cloudwatch_logs_log_group_arn']: + ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn'] + + if module.params['enable_log_file_validation'] is not None: + ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation'] + + if module.params['kms_key_id']: + ct_params['KmsKeyId'] = module.params['kms_key_id'] + + client = module.client('cloudtrail') + region = module.region + + results = dict( + changed=False, + exists=False + ) + + # Get existing trail facts + trail = get_trail_facts(module, client, ct_params['Name']) + + # If the trail exists set the result exists variable + if trail is not None: + results['exists'] = True + + if state == 'absent' and results['exists']: + # If Trail exists go ahead and delete + results['changed'] = True + results['exists'] = False + results['trail'] = dict() + if not module.check_mode: + delete_trail(module, client, trail['TrailARN']) + + elif state == 'present' and results['exists']: + # If Trail exists see if we need to update it + do_update = False + for key in ct_params: + tkey = str(key) + # boto3 has inconsistent parameter naming so we handle it here + if key == 'EnableLogFileValidation': + tkey = 'LogFileValidationEnabled' + # We need to make an empty string equal None + if ct_params.get(key) == '': + val = None + else: + val = ct_params.get(key) + if val != trail.get(tkey): + do_update = True + results['changed'] = True + # If we are in check mode copy the changed values to the trail facts in result output to show what would change. + if module.check_mode: + trail.update({tkey: ct_params.get(key)}) + + if not module.check_mode and do_update: + update_trail(module, client, ct_params) + trail = get_trail_facts(module, client, ct_params['Name']) + + # Check if we need to start/stop logging + if enable_logging and not trail['IsLogging']: + results['changed'] = True + trail['IsLogging'] = True + if not module.check_mode: + set_logging(module, client, name=ct_params['Name'], action='start') + if not enable_logging and trail['IsLogging']: + results['changed'] = True + trail['IsLogging'] = False + if not module.check_mode: + set_logging(module, client, name=ct_params['Name'], action='stop') + + # Check if we need to update tags on resource + tag_dry_run = False + if module.check_mode: + tag_dry_run = True + tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run) + if tags_changed: + results['changed'] = True + trail['tags'] = tags + # Populate trail facts in output + results['trail'] = camel_dict_to_snake_dict(trail) + + elif state == 'present' and not results['exists']: + # Trail doesn't exist just go create it + results['changed'] = True + if not module.check_mode: + # If we aren't in check_mode then actually create it + created_trail = create_trail(module, client, ct_params) + # Apply tags + tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN']) + # Get the trail status + try: + status_resp = client.get_trail_status(Name=created_trail['Name']) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to fetch Trail statuc") + # Set the logging state for the trail to desired value + if enable_logging and not status_resp['IsLogging']: + set_logging(module, client, name=ct_params['Name'], action='start') + if not enable_logging and status_resp['IsLogging']: + set_logging(module, client, name=ct_params['Name'], action='stop') + # Get facts for newly created Trail + trail = get_trail_facts(module, client, ct_params['Name']) + + # If we are in check mode create a fake return structure for the newly minted trail + if module.check_mode: + acct_id = '123456789012' + try: + sts_client = module.client('sts') + acct_id = sts_client.get_caller_identity()['Account'] + except (BotoCoreError, ClientError): + pass + trail = dict() + trail.update(ct_params) + if 'EnableLogFileValidation' not in ct_params: + ct_params['EnableLogFileValidation'] = False + trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation'] + trail.pop('EnableLogFileValidation') + fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name'] + trail['HasCustomEventSelectors'] = False + trail['HomeRegion'] = region + trail['TrailARN'] = fake_arn + trail['IsLogging'] = enable_logging + trail['tags'] = tags + # Populate trail facts in output + results['trail'] = camel_dict_to_snake_dict(trail) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py new file mode 100644 index 00000000000..49c6acc65fd --- /dev/null +++ b/cloudwatchevent_rule.py @@ -0,0 +1,464 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: cloudwatchevent_rule +short_description: Manage CloudWatch Event rules and targets +description: + - This module creates and manages CloudWatch event rules and targets. +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: "Jim Dalton (@jsdalton) " +requirements: + - python >= 2.6 + - boto3 +notes: + - A rule must contain at least an I(event_pattern) or I(schedule_expression). A + rule can have both an I(event_pattern) and a I(schedule_expression), in which + case the rule will trigger on matching events as well as on a schedule. + - When specifying targets, I(input) and I(input_path) are mutually-exclusive + and optional parameters. +options: + name: + description: + - The name of the rule you are creating, updating or deleting. No spaces + or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)). + required: true + type: str + schedule_expression: + description: + - A cron or rate expression that defines the schedule the rule will + trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)). + required: false + type: str + event_pattern: + description: + - A string pattern (in valid JSON format) that is used to match against + incoming events to determine if the rule should be triggered. + required: false + type: str + state: + description: + - Whether the rule is present (and enabled), disabled, or absent. + choices: ["present", "disabled", "absent"] + default: present + required: false + type: str + description: + description: + - A description of the rule. + required: false + type: str + role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role associated with the rule. + required: false + type: str + targets: + type: list + elements: dict + description: + - A list of targets to add to or update for the rule. + suboptions: + id: + type: str + required: true + description: The unique target assignment ID. + arn: + type: str + required: true + description: The ARN associated with the target. + role_arn: + type: str + description: The ARN of the IAM role to be used for this target when the rule is triggered. + input: + type: str + description: + - A JSON object that will override the event data when passed to the target. + - If neither I(input) nor I(input_path) is specified, then the entire + event is passed to the target in JSON form. + input_path: + type: str + description: + - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be + passed to the target. + - If neither I(input) nor I(input_path) is specified, then the entire + event is passed to the target in JSON form. + ecs_parameters: + type: dict + description: + - Contains the ECS task definition and task count to be used, if the event target is an ECS task. + suboptions: + task_definition_arn: + type: str + description: The full ARN of the task definition. + task_count: + type: int + description: The number of tasks to create based on I(task_definition). + required: false +''' + +EXAMPLES = ''' +- cloudwatchevent_rule: + name: MyCronTask + schedule_expression: "cron(0 20 * * ? *)" + description: Run my scheduled task + targets: + - id: MyTargetId + arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction + +- cloudwatchevent_rule: + name: MyDisabledCronTask + schedule_expression: "rate(5 minutes)" + description: Run my disabled scheduled task + state: disabled + targets: + - id: MyOtherTargetId + arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction + input: '{"foo": "bar"}' + +- cloudwatchevent_rule: + name: MyCronTask + state: absent +''' + +RETURN = ''' +rule: + description: CloudWatch Event rule data. + returned: success + type: dict + sample: + arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask' + description: 'Run my scheduled task' + name: 'MyCronTask' + schedule_expression: 'cron(0 20 * * ? *)' + state: 'ENABLED' +targets: + description: CloudWatch Event target(s) assigned to the rule. + returned: success + type: list + sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" +''' + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +class CloudWatchEventRule(object): + def __init__(self, module, name, client, schedule_expression=None, + event_pattern=None, description=None, role_arn=None): + self.name = name + self.client = client + self.changed = False + self.schedule_expression = schedule_expression + self.event_pattern = event_pattern + self.description = description + self.role_arn = role_arn + self.module = module + + def describe(self): + """Returns the existing details of the rule in AWS""" + try: + rule_info = self.client.describe_rule(Name=self.name) + except botocore.exceptions.ClientError as e: + error_code = e.response.get('Error', {}).get('Code') + if error_code == 'ResourceNotFoundException': + return {} + self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) + except botocore.exceptions.BotoCoreError as e: + self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) + return self._snakify(rule_info) + + def put(self, enabled=True): + """Creates or updates the rule in AWS""" + request = { + 'Name': self.name, + 'State': "ENABLED" if enabled else "DISABLED", + } + if self.schedule_expression: + request['ScheduleExpression'] = self.schedule_expression + if self.event_pattern: + request['EventPattern'] = self.event_pattern + if self.description: + request['Description'] = self.description + if self.role_arn: + request['RoleArn'] = self.role_arn + try: + response = self.client.put_rule(**request) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name) + self.changed = True + return response + + def delete(self): + """Deletes the rule in AWS""" + self.remove_all_targets() + + try: + response = self.client.delete_rule(Name=self.name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name) + self.changed = True + return response + + def enable(self): + """Enables the rule in AWS""" + try: + response = self.client.enable_rule(Name=self.name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name) + self.changed = True + return response + + def disable(self): + """Disables the rule in AWS""" + try: + response = self.client.disable_rule(Name=self.name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name) + self.changed = True + return response + + def list_targets(self): + """Lists the existing targets for the rule in AWS""" + try: + targets = self.client.list_targets_by_rule(Rule=self.name) + except botocore.exceptions.ClientError as e: + error_code = e.response.get('Error', {}).get('Code') + if error_code == 'ResourceNotFoundException': + return [] + self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) + except botocore.exceptions.BotoCoreError as e: + self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) + return self._snakify(targets)['targets'] + + def put_targets(self, targets): + """Creates or updates the provided targets on the rule in AWS""" + if not targets: + return + request = { + 'Rule': self.name, + 'Targets': self._targets_request(targets), + } + try: + response = self.client.put_targets(**request) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name) + self.changed = True + return response + + def remove_targets(self, target_ids): + """Removes the provided targets from the rule in AWS""" + if not target_ids: + return + request = { + 'Rule': self.name, + 'Ids': target_ids + } + try: + response = self.client.remove_targets(**request) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name) + self.changed = True + return response + + def remove_all_targets(self): + """Removes all targets on rule""" + targets = self.list_targets() + return self.remove_targets([t['id'] for t in targets]) + + def _targets_request(self, targets): + """Formats each target for the request""" + targets_request = [] + for target in targets: + target_request = { + 'Id': target['id'], + 'Arn': target['arn'] + } + if 'input' in target: + target_request['Input'] = target['input'] + if 'input_path' in target: + target_request['InputPath'] = target['input_path'] + if 'role_arn' in target: + target_request['RoleArn'] = target['role_arn'] + if 'ecs_parameters' in target: + target_request['EcsParameters'] = {} + ecs_parameters = target['ecs_parameters'] + if 'task_definition_arn' in target['ecs_parameters']: + target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn'] + if 'task_count' in target['ecs_parameters']: + target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count'] + targets_request.append(target_request) + return targets_request + + def _snakify(self, dict): + """Converts camel case to snake case""" + return camel_dict_to_snake_dict(dict) + + +class CloudWatchEventRuleManager(object): + RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn'] + + def __init__(self, rule, targets): + self.rule = rule + self.targets = targets + + def ensure_present(self, enabled=True): + """Ensures the rule and targets are present and synced""" + rule_description = self.rule.describe() + if rule_description: + # Rule exists so update rule, targets and state + self._sync_rule(enabled) + self._sync_targets() + self._sync_state(enabled) + else: + # Rule does not exist, so create new rule and targets + self._create(enabled) + + def ensure_disabled(self): + """Ensures the rule and targets are present, but disabled, and synced""" + self.ensure_present(enabled=False) + + def ensure_absent(self): + """Ensures the rule and targets are absent""" + rule_description = self.rule.describe() + if not rule_description: + # Rule doesn't exist so don't need to delete + return + self.rule.delete() + + def fetch_aws_state(self): + """Retrieves rule and target state from AWS""" + aws_state = { + 'rule': {}, + 'targets': [], + 'changed': self.rule.changed + } + rule_description = self.rule.describe() + if not rule_description: + return aws_state + + # Don't need to include response metadata noise in response + del rule_description['response_metadata'] + + aws_state['rule'] = rule_description + aws_state['targets'].extend(self.rule.list_targets()) + return aws_state + + def _sync_rule(self, enabled=True): + """Syncs local rule state with AWS""" + if not self._rule_matches_aws(): + self.rule.put(enabled) + + def _sync_targets(self): + """Syncs local targets with AWS""" + # Identify and remove extraneous targets on AWS + target_ids_to_remove = self._remote_target_ids_to_remove() + if target_ids_to_remove: + self.rule.remove_targets(target_ids_to_remove) + + # Identify targets that need to be added or updated on AWS + targets_to_put = self._targets_to_put() + if targets_to_put: + self.rule.put_targets(targets_to_put) + + def _sync_state(self, enabled=True): + """Syncs local rule state with AWS""" + remote_state = self._remote_state() + if enabled and remote_state != 'ENABLED': + self.rule.enable() + elif not enabled and remote_state != 'DISABLED': + self.rule.disable() + + def _create(self, enabled=True): + """Creates rule and targets on AWS""" + self.rule.put(enabled) + self.rule.put_targets(self.targets) + + def _rule_matches_aws(self): + """Checks if the local rule data matches AWS""" + aws_rule_data = self.rule.describe() + + # The rule matches AWS only if all rule data fields are equal + # to their corresponding local value defined in the task + return all([ + getattr(self.rule, field) == aws_rule_data.get(field, None) + for field in self.RULE_FIELDS + ]) + + def _targets_to_put(self): + """Returns a list of targets that need to be updated or added remotely""" + remote_targets = self.rule.list_targets() + return [t for t in self.targets if t not in remote_targets] + + def _remote_target_ids_to_remove(self): + """Returns a list of targets that need to be removed remotely""" + target_ids = [t['id'] for t in self.targets] + remote_targets = self.rule.list_targets() + return [ + rt['id'] for rt in remote_targets if rt['id'] not in target_ids + ] + + def _remote_state(self): + """Returns the remote state from AWS""" + description = self.rule.describe() + if not description: + return + return description['state'] + + +def main(): + argument_spec = dict( + name=dict(required=True), + schedule_expression=dict(), + event_pattern=dict(), + state=dict(choices=['present', 'disabled', 'absent'], + default='present'), + description=dict(), + role_arn=dict(), + targets=dict(type='list', default=[]), + ) + module = AnsibleAWSModule(argument_spec=argument_spec) + + rule_data = dict( + [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS] + ) + targets = module.params.get('targets') + state = module.params.get('state') + client = module.client('events') + + cwe_rule = CloudWatchEventRule(module, client=client, **rule_data) + cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets) + + if state == 'present': + cwe_rule_manager.ensure_present() + elif state == 'disabled': + cwe_rule_manager.ensure_disabled() + elif state == 'absent': + cwe_rule_manager.ensure_absent() + else: + module.fail_json(msg="Invalid state '{0}' provided".format(state)) + + module.exit_json(**cwe_rule_manager.fetch_aws_state()) + + +if __name__ == '__main__': + main() diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py new file mode 100644 index 00000000000..289a3f5efbe --- /dev/null +++ b/cloudwatchlogs_log_group.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudwatchlogs_log_group +short_description: create or delete log_group in CloudWatchLogs +notes: + - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html). +description: + - Create or delete log_group in CloudWatchLogs. +author: + - Willian Ricardo (@willricardo) +requirements: [ json, botocore, boto3 ] +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + default: present + required: false + type: str + log_group_name: + description: + - The name of the log group. + required: true + type: str + kms_key_id: + description: + - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + required: false + type: str + tags: + description: + - The key-value pairs to use for the tags. + required: false + type: dict + retention: + description: + - The number of days to retain the log events in the specified log group. + - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" + - Mutually exclusive with I(purge_retention_policy). + required: false + type: int + purge_retention_policy: + description: + - "Whether to purge the retention policy or not." + - "Mutually exclusive with I(retention) and I(overwrite)." + default: false + required: false + type: bool + overwrite: + description: + - Whether an existing log group should be overwritten on create. + - Mutually exclusive with I(purge_retention_policy). + default: false + required: false + type: bool +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- cloudwatchlogs_log_group: + log_group_name: test-log-group + +- cloudwatchlogs_log_group: + state: present + log_group_name: test-log-group + tags: { "Name": "test-log-group", "Env" : "QA" } + +- cloudwatchlogs_log_group: + state: present + log_group_name: test-log-group + tags: { "Name": "test-log-group", "Env" : "QA" } + kms_key_id: arn:aws:kms:region:account-id:key/key-id + +- cloudwatchlogs_log_group: + state: absent + log_group_name: test-log-group + +''' + +RETURN = ''' +log_groups: + description: Return the list of complex objects representing log groups + returned: success + type: complex + contains: + log_group_name: + description: The name of the log group. + returned: always + type: str + creation_time: + description: The creation time of the log group. + returned: always + type: int + retention_in_days: + description: The number of days to retain the log events in the specified log group. + returned: always + type: int + metric_filter_count: + description: The number of metric filters. + returned: always + type: int + arn: + description: The Amazon Resource Name (ARN) of the log group. + returned: always + type: str + stored_bytes: + description: The number of bytes stored. + returned: always + type: str + kms_key_id: + description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + returned: always + type: str +''' + +import traceback +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info + +try: + import botocore +except ImportError: + pass # will be detected by imported HAS_BOTO3 + + +def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): + request = {'logGroupName': log_group_name} + if kms_key_id: + request['kmsKeyId'] = kms_key_id + if tags: + request['tags'] = tags + + try: + client.create_log_group(**request) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)), + exception=traceback.format_exc()) + + if retention: + input_retention_policy(client=client, + log_group_name=log_group_name, + retention=retention, module=module) + + desc_log_group = describe_log_group(client=client, + log_group_name=log_group_name, + module=module) + + if 'logGroups' in desc_log_group: + for i in desc_log_group['logGroups']: + if log_group_name == i['logGroupName']: + return i + module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!") + + +def input_retention_policy(client, log_group_name, retention, module): + try: + permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653] + + if retention in permited_values: + response = client.put_retention_policy(logGroupName=log_group_name, + retentionInDays=retention) + else: + delete_log_group(client=client, log_group_name=log_group_name, module=module) + module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]") + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc()) + + +def delete_retention_policy(client, log_group_name, module): + try: + client.delete_retention_policy(logGroupName=log_group_name) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc()) + + +def delete_log_group(client, log_group_name, module): + desc_log_group = describe_log_group(client=client, + log_group_name=log_group_name, + module=module) + + try: + if 'logGroups' in desc_log_group: + for i in desc_log_group['logGroups']: + if log_group_name == i['logGroupName']: + client.delete_log_group(logGroupName=log_group_name) + + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc()) + + +def describe_log_group(client, log_group_name, module): + try: + desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) + return desc_log_group + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc()) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + log_group_name=dict(required=True, type='str'), + state=dict(choices=['present', 'absent'], + default='present'), + kms_key_id=dict(required=False, type='str'), + tags=dict(required=False, type='dict'), + retention=dict(required=False, type='int'), + purge_retention_policy=dict(required=False, type='bool', default=False), + overwrite=dict(required=False, type='bool', default=False) + )) + + mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] + module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + + state = module.params.get('state') + changed = False + + # Determine if the log group exists + desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) + found_log_group = {} + for i in desc_log_group.get('logGroups', []): + if module.params['log_group_name'] == i['logGroupName']: + found_log_group = i + break + + if state == 'present': + if found_log_group: + if module.params['overwrite'] is True: + changed = True + delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) + found_log_group = create_log_group(client=logs, + log_group_name=module.params['log_group_name'], + kms_key_id=module.params['kms_key_id'], + tags=module.params['tags'], + retention=module.params['retention'], + module=module) + elif module.params['purge_retention_policy']: + if found_log_group.get('retentionInDays'): + changed = True + delete_retention_policy(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + elif module.params['retention'] != found_log_group.get('retentionInDays'): + if module.params['retention'] is not None: + changed = True + input_retention_policy(client=logs, + log_group_name=module.params['log_group_name'], + retention=module.params['retention'], + module=module) + found_log_group['retentionInDays'] = module.params['retention'] + + elif not found_log_group: + changed = True + found_log_group = create_log_group(client=logs, + log_group_name=module.params['log_group_name'], + kms_key_id=module.params['kms_key_id'], + tags=module.params['tags'], + retention=module.params['retention'], + module=module) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group)) + + elif state == 'absent': + if found_log_group: + changed = True + delete_log_group(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/cloudwatchlogs_log_group_facts.py b/cloudwatchlogs_log_group_facts.py new file mode 120000 index 00000000000..402937478ad --- /dev/null +++ b/cloudwatchlogs_log_group_facts.py @@ -0,0 +1 @@ +cloudwatchlogs_log_group_info.py \ No newline at end of file diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py new file mode 100644 index 00000000000..a4f85673bf3 --- /dev/null +++ b/cloudwatchlogs_log_group_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cloudwatchlogs_log_group_info +short_description: Get information about log_group in CloudWatchLogs +description: + - Lists the specified log groups. You can list all your log groups or filter the results by prefix. + - This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change. +author: + - Willian Ricardo (@willricardo) +requirements: [ botocore, boto3 ] +options: + log_group_name: + description: + - The name or prefix of the log group to filter by. + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- cloudwatchlogs_log_group_info: + log_group_name: test-log-group +''' + +RETURN = ''' +log_groups: + description: Return the list of complex objects representing log groups + returned: success + type: complex + contains: + log_group_name: + description: The name of the log group. + returned: always + type: str + creation_time: + description: The creation time of the log group. + returned: always + type: int + retention_in_days: + description: The number of days to retain the log events in the specified log group. + returned: always + type: int + metric_filter_count: + description: The number of metric filters. + returned: always + type: int + arn: + description: The Amazon Resource Name (ARN) of the log group. + returned: always + type: str + stored_bytes: + description: The number of bytes stored. + returned: always + type: str + kms_key_id: + description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + returned: always + type: str +''' + +import traceback +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info + +try: + import botocore +except ImportError: + pass # will be detected by imported HAS_BOTO3 + + +def describe_log_group(client, log_group_name, module): + params = {} + if log_group_name: + params['logGroupNamePrefix'] = log_group_name + try: + paginator = client.get_paginator('describe_log_groups') + desc_log_group = paginator.paginate(**params).build_full_result() + return desc_log_group + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), + exception=traceback.format_exc()) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + log_group_name=dict(), + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'cloudwatchlogs_log_group_facts': + module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + + desc_log_group = describe_log_group(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + final_log_group_snake = [] + + for log_group in desc_log_group['logGroups']: + final_log_group_snake.append(camel_dict_to_snake_dict(log_group)) + + desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) + module.exit_json(**desc_log_group_result) + + +if __name__ == '__main__': + main() diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py new file mode 100644 index 00000000000..512e49a3cba --- /dev/null +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cloudwatchlogs_log_group_metric_filter +author: + - "Markus Bergholz (@markuman)" +short_description: Manage CloudWatch log group metric filter +description: + - Create, modify and delete CloudWatch log group metric filter. + - CloudWatch log group metric filter can be use with M(ec2_metric_alarm). +requirements: + - boto3 + - botocore +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + log_group_name: + description: + - The name of the log group where the metric filter is applied on. + required: true + type: str + filter_name: + description: + - A name for the metric filter you create. + required: true + type: str + filter_pattern: + description: + - A filter pattern for extracting metric data out of ingested log events. Required when I(state=present). + type: str + metric_transformation: + description: + - A collection of information that defines how metric data gets emitted. Required when I(state=present). + type: dict + suboptions: + metric_name: + description: + - The name of the cloudWatch metric. + type: str + metric_namespace: + description: + - The namespace of the cloudWatch metric. + type: str + metric_value: + description: + - The value to publish to the cloudWatch metric when a filter pattern matches a log event. + type: str + default_value: + description: + - The value to emit when a filter pattern does not match a log event. + type: float +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: set metric filter on log group /fluentd/testcase + cloudwatchlogs_log_group_metric_filter: + log_group_name: /fluentd/testcase + filter_name: BoxFreeStorage + filter_pattern: '{($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: "$.value" + +- name: delete metric filter on log group /fluentd/testcase + cloudwatchlogs_log_group_metric_filter: + log_group_name: /fluentd/testcase + filter_name: BoxFreeStorage + state: absent +''' + +RETURN = """ +metric_filters: + description: Return the origin response value + returned: success + type: list + contains: + creation_time: + filter_name: + filter_pattern: + log_group_name: + metric_filter_count: +""" +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def metricTransformationHandler(metricTransformations, originMetricTransformations=None): + + if originMetricTransformations: + change = False + originMetricTransformations = camel_dict_to_snake_dict( + originMetricTransformations) + for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]: + if metricTransformations.get(item) != originMetricTransformations.get(item): + change = True + else: + change = True + + defaultValue = metricTransformations.get("default_value") + if isinstance(defaultValue, int) or isinstance(defaultValue, float): + retval = [ + { + 'metricName': metricTransformations.get("metric_name"), + 'metricNamespace': metricTransformations.get("metric_namespace"), + 'metricValue': metricTransformations.get("metric_value"), + 'defaultValue': defaultValue + } + ] + else: + retval = [ + { + 'metricName': metricTransformations.get("metric_name"), + 'metricNamespace': metricTransformations.get("metric_namespace"), + 'metricValue': metricTransformations.get("metric_value"), + } + ] + + return retval, change + + +def main(): + + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + log_group_name=dict(type='str', required=True), + filter_name=dict(type='str', required=True), + filter_pattern=dict(type='str'), + metric_transformation=dict(type='dict', options=dict( + metric_name=dict(type='str'), + metric_namespace=dict(type='str'), + metric_value=dict(type='str'), + default_value=dict(type='float') + )), + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])] + ) + + log_group_name = module.params.get("log_group_name") + filter_name = module.params.get("filter_name") + filter_pattern = module.params.get("filter_pattern") + metric_transformation = module.params.get("metric_transformation") + state = module.params.get("state") + + cwl = module.client('logs') + + # check if metric filter exists + response = cwl.describe_metric_filters( + logGroupName=log_group_name, + filterNamePrefix=filter_name + ) + + if len(response.get("metricFilters")) == 1: + originMetricTransformations = response.get( + "metricFilters")[0].get("metricTransformations")[0] + originFilterPattern = response.get("metricFilters")[ + 0].get("filterPattern") + else: + originMetricTransformations = None + originFilterPattern = None + change = False + metricTransformation = None + + if state == "absent" and originMetricTransformations: + if not module.check_mode: + response = cwl.delete_metric_filter( + logGroupName=log_group_name, + filterName=filter_name + ) + change = True + metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]] + + elif state == "present": + metricTransformation, change = metricTransformationHandler( + metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations) + + change = change or filter_pattern != originFilterPattern + + if change: + if not module.check_mode: + response = cwl.put_metric_filter( + logGroupName=log_group_name, + filterName=filter_name, + filterPattern=filter_pattern, + metricTransformations=metricTransformation + ) + + metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation] + + module.exit_json(changed=change, metric_filters=metricTransformation) + + +if __name__ == '__main__': + main() diff --git a/data_pipeline.py b/data_pipeline.py new file mode 100644 index 00000000000..d734e32e249 --- /dev/null +++ b/data_pipeline.py @@ -0,0 +1,652 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: data_pipeline +author: + - Raghu Udiyar (@raags) + - Sloane Hertel (@s-hertel) +requirements: [ "boto3" ] +short_description: Create and manage AWS Datapipelines +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +description: + - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) + given to the datapipeline. + - The pipeline definition must be in the format given here + U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax). + - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state. +options: + name: + description: + - The name of the Datapipeline to create/modify/delete. + required: true + type: str + description: + description: + - An optional description for the pipeline being created. + default: '' + type: str + objects: + type: list + elements: dict + description: + - A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields). + suboptions: + id: + description: + - The ID of the object. + type: str + name: + description: + - The name of the object. + type: str + fields: + description: + - Key-value pairs that define the properties of the object. + - The value is specified as a reference to another object I(refValue) or as a string value I(stringValue) + but not as both. + type: list + elements: dict + suboptions: + key: + type: str + description: + - The field identifier. + stringValue: + type: str + description: + - The field value. + - Exactly one of I(stringValue) and I(refValue) may be specified. + refValue: + type: str + description: + - The field value, expressed as the identifier of another object. + - Exactly one of I(stringValue) and I(refValue) may be specified. + parameters: + description: + - A list of parameter objects (dicts) in the pipeline definition. + type: list + elements: dict + suboptions: + id: + description: + - The ID of the parameter object. + attributes: + description: + - A list of attributes (dicts) of the parameter object. + type: list + elements: dict + suboptions: + key: + description: The field identifier. + type: str + stringValue: + description: The field value. + type: str + + values: + description: + - A list of parameter values (dicts) in the pipeline definition. + type: list + elements: dict + suboptions: + id: + description: The ID of the parameter value + type: str + stringValue: + description: The field value + type: str + timeout: + description: + - Time in seconds to wait for the pipeline to transition to the requested state, fail otherwise. + default: 300 + type: int + state: + description: + - The requested state of the pipeline. + choices: ['present', 'absent', 'active', 'inactive'] + default: present + type: str + tags: + description: + - A dict of key:value pair(s) to add to the pipeline. + type: dict + version: + description: + - The version option has never had any effect and will be removed in + Ansible 2.14 + type: str +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create pipeline +- data_pipeline: + name: test-dp + region: us-west-2 + objects: "{{pipelineObjects}}" + parameters: "{{pipelineParameters}}" + values: "{{pipelineValues}}" + tags: + key1: val1 + key2: val2 + state: present + +# Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects +- data_pipeline: + name: test-dp + objects: + - "id": "DefaultSchedule" + "name": "Every 1 day" + "fields": + - "key": "period" + "stringValue": "1 days" + - "key": "type" + "stringValue": "Schedule" + - "key": "startAt" + "stringValue": "FIRST_ACTIVATION_DATE_TIME" + - "id": "Default" + "name": "Default" + "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" }, + { "key": "role", "stringValue": "DataPipelineDefaultRole" }, + { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" }, + { "key": "scheduleType", "stringValue": "cron" }, + { "key": "schedule", "refValue": "DefaultSchedule" }, + { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ] + state: active + +# Activate pipeline +- data_pipeline: + name: test-dp + region: us-west-2 + state: active + +# Delete pipeline +- data_pipeline: + name: test-dp + region: us-west-2 + state: absent + +''' + +RETURN = ''' +changed: + description: whether the data pipeline has been modified + type: bool + returned: always + sample: + changed: true +result: + description: + - Contains the data pipeline data (data_pipeline) and a return message (msg). + If the data pipeline exists data_pipeline will contain the keys description, name, + pipeline_id, state, tags, and unique_id. If the data pipeline does not exist then + data_pipeline will be an empty dict. The msg describes the status of the operation. + returned: always + type: dict +''' + +import hashlib +import json +import time +import traceback + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict +from ansible.module_utils._text import to_text + + +DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] +DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] +DP_ACTIVATING_STATE = 'ACTIVATING' +DP_DEACTIVATING_STATE = 'DEACTIVATING' +PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$' + + +class DataPipelineNotFound(Exception): + pass + + +class TimeOutException(Exception): + pass + + +def pipeline_id(client, name): + """Return pipeline id for the given pipeline name + + :param object client: boto3 datapipeline client + :param string name: pipeline name + :returns: pipeline id + :raises: DataPipelineNotFound + + """ + pipelines = client.list_pipelines() + for dp in pipelines['pipelineIdList']: + if dp['name'] == name: + return dp['id'] + raise DataPipelineNotFound + + +def pipeline_description(client, dp_id): + """Return pipeline description list + + :param object client: boto3 datapipeline client + :returns: pipeline description dictionary + :raises: DataPipelineNotFound + + """ + try: + return client.describe_pipelines(pipelineIds=[dp_id]) + except ClientError as e: + raise DataPipelineNotFound + + +def pipeline_field(client, dp_id, field): + """Return a pipeline field from the pipeline description. + + The available fields are listed in describe_pipelines output. + + :param object client: boto3 datapipeline client + :param string dp_id: pipeline id + :param string field: pipeline description field + :returns: pipeline field information + + """ + dp_description = pipeline_description(client, dp_id) + for field_key in dp_description['pipelineDescriptionList'][0]['fields']: + if field_key['key'] == field: + return field_key['stringValue'] + raise KeyError("Field key {0} not found!".format(field)) + + +def run_with_timeout(timeout, func, *func_args, **func_kwargs): + """Run func with the provided args and kwargs, and wait utill + timeout for truthy return value + + :param int timeout: time to wait for status + :param function func: function to run, should return True or False + :param args func_args: function args to pass to func + :param kwargs func_kwargs: function key word args + :returns: True if func returns truthy within timeout + :raises: TimeOutException + + """ + + for count in range(timeout // 10): + if func(*func_args, **func_kwargs): + return True + else: + # check every 10s + time.sleep(10) + + raise TimeOutException + + +def check_dp_exists(client, dp_id): + """Check if datapipeline exists + + :param object client: boto3 datapipeline client + :param string dp_id: pipeline id + :returns: True or False + + """ + try: + # pipeline_description raises DataPipelineNotFound + if pipeline_description(client, dp_id): + return True + else: + return False + except DataPipelineNotFound: + return False + + +def check_dp_status(client, dp_id, status): + """Checks if datapipeline matches states in status list + + :param object client: boto3 datapipeline client + :param string dp_id: pipeline id + :param list status: list of states to check against + :returns: True or False + + """ + if not isinstance(status, list): + raise AssertionError() + if pipeline_field(client, dp_id, field="@pipelineState") in status: + return True + else: + return False + + +def pipeline_status_timeout(client, dp_id, status, timeout): + args = (client, dp_id, status) + return run_with_timeout(timeout, check_dp_status, *args) + + +def pipeline_exists_timeout(client, dp_id, timeout): + args = (client, dp_id) + return run_with_timeout(timeout, check_dp_exists, *args) + + +def activate_pipeline(client, module): + """Activates pipeline + + """ + dp_name = module.params.get('name') + timeout = module.params.get('timeout') + + try: + dp_id = pipeline_id(client, dp_name) + except DataPipelineNotFound: + module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + + if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: + changed = False + else: + try: + client.activate_pipeline(pipelineId=dp_id) + except ClientError as e: + if e.response["Error"]["Code"] == "InvalidRequestException": + module.fail_json(msg="You need to populate your pipeline before activation.") + try: + pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, + timeout=timeout) + except TimeOutException: + if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": + # activated but completed more rapidly than it was checked + pass + else: + module.fail_json(msg=('Data Pipeline {0} failed to activate ' + 'within timeout {1} seconds').format(dp_name, timeout)) + changed = True + + data_pipeline = get_result(client, dp_id) + result = {'data_pipeline': data_pipeline, + 'msg': 'Data Pipeline {0} activated.'.format(dp_name)} + + return (changed, result) + + +def deactivate_pipeline(client, module): + """Deactivates pipeline + + """ + dp_name = module.params.get('name') + timeout = module.params.get('timeout') + + try: + dp_id = pipeline_id(client, dp_name) + except DataPipelineNotFound: + module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + + if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: + changed = False + else: + client.deactivate_pipeline(pipelineId=dp_id) + try: + pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, + timeout=timeout) + except TimeOutException: + module.fail_json(msg=('Data Pipeline {0} failed to deactivate' + 'within timeout {1} seconds').format(dp_name, timeout)) + changed = True + + data_pipeline = get_result(client, dp_id) + result = {'data_pipeline': data_pipeline, + 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)} + + return (changed, result) + + +def _delete_dp_with_check(dp_id, client, timeout): + client.delete_pipeline(pipelineId=dp_id) + try: + pipeline_status_timeout(client=client, dp_id=dp_id, status=[PIPELINE_DOESNT_EXIST], timeout=timeout) + except DataPipelineNotFound: + return True + + +def delete_pipeline(client, module): + """Deletes pipeline + + """ + dp_name = module.params.get('name') + timeout = module.params.get('timeout') + + try: + dp_id = pipeline_id(client, dp_name) + _delete_dp_with_check(dp_id, client, timeout) + changed = True + except DataPipelineNotFound: + changed = False + except TimeOutException: + module.fail_json(msg=('Data Pipeline {0} failed to delete' + 'within timeout {1} seconds').format(dp_name, timeout)) + result = {'data_pipeline': {}, + 'msg': 'Data Pipeline {0} deleted'.format(dp_name)} + + return (changed, result) + + +def build_unique_id(module): + data = dict(module.params) + # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline + [data.pop(each, None) for each in ('objects', 'timeout')] + json_data = json.dumps(data, sort_keys=True).encode("utf-8") + hashed_data = hashlib.md5(json_data).hexdigest() + return hashed_data + + +def format_tags(tags): + """ Reformats tags + + :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3}) + :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}]) + + """ + return [dict(key=k, value=v) for k, v in tags.items()] + + +def get_result(client, dp_id): + """ Get the current state of the data pipeline and reformat it to snake_case for exit_json + + :param object client: boto3 datapipeline client + :param string dp_id: pipeline id + :returns: reformatted dict of pipeline description + + """ + # pipeline_description returns a pipelineDescriptionList of length 1 + # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict) + dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0] + + # Get uniqueId and pipelineState in fields to add to the exit_json result + dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId") + dp["pipeline_state"] = pipeline_field(client, dp_id, field="@pipelineState") + + # Remove fields; can't make a list snake_case and most of the data is redundant + del dp["fields"] + + # Note: tags is already formatted fine so we don't need to do anything with it + + # Reformat data pipeline and add reformatted fields back + dp = camel_dict_to_snake_dict(dp) + return dp + + +def diff_pipeline(client, module, objects, unique_id, dp_name): + """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated + """ + result = {} + changed = False + create_dp = False + + # See if there is already a pipeline with the same unique_id + unique_id = build_unique_id(module) + try: + dp_id = pipeline_id(client, dp_name) + dp_unique_id = to_text(pipeline_field(client, dp_id, field="uniqueId")) + if dp_unique_id != unique_id: + # A change is expected but not determined. Updated to a bool in create_pipeline(). + changed = "NEW_VERSION" + create_dp = True + # Unique ids are the same - check if pipeline needs modification + else: + dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects'] + # Definition needs to be updated + if dp_objects != objects: + changed, msg = define_pipeline(client, module, objects, dp_id) + # No changes + else: + msg = 'Data Pipeline {0} is present'.format(dp_name) + data_pipeline = get_result(client, dp_id) + result = {'data_pipeline': data_pipeline, + 'msg': msg} + except DataPipelineNotFound: + create_dp = True + + return create_dp, changed, result + + +def define_pipeline(client, module, objects, dp_id): + """Puts pipeline definition + + """ + dp_name = module.params.get('name') + + if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": + msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name) + changed = False + + elif objects: + parameters = module.params.get('parameters') + values = module.params.get('values') + + try: + client.put_pipeline_definition(pipelineId=dp_id, + pipelineObjects=objects, + parameterObjects=parameters, + parameterValues=values) + msg = 'Data Pipeline {0} has been updated.'.format(dp_name) + changed = True + except ClientError as e: + module.fail_json(msg="Failed to put the definition for pipeline {0}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects".format(dp_name), exception=traceback.format_exc()) + else: + changed = False + msg = "" + + return changed, msg + + +def create_pipeline(client, module): + """Creates datapipeline. Uses uniqueId to achieve idempotency. + + """ + dp_name = module.params.get('name') + objects = module.params.get('objects', None) + description = module.params.get('description', '') + tags = module.params.get('tags') + timeout = module.params.get('timeout') + + unique_id = build_unique_id(module) + create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name) + + if changed == "NEW_VERSION": + # delete old version + changed, creation_result = delete_pipeline(client, module) + + # There isn't a pipeline or it has different parameters than the pipeline in existence. + if create_dp: + # Make pipeline + try: + tags = format_tags(tags) + dp = client.create_pipeline(name=dp_name, + uniqueId=unique_id, + description=description, + tags=tags) + dp_id = dp['pipelineId'] + pipeline_exists_timeout(client, dp_id, timeout) + except ClientError as e: + module.fail_json(msg="Failed to create the data pipeline {0}.".format(dp_name), exception=traceback.format_exc()) + except TimeOutException: + module.fail_json(msg=('Data Pipeline {0} failed to create' + 'within timeout {1} seconds').format(dp_name, timeout)) + # Put pipeline definition + changed, msg = define_pipeline(client, module, objects, dp_id) + + changed = True + data_pipeline = get_result(client, dp_id) + result = {'data_pipeline': data_pipeline, + 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg} + + return (changed, result) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + version=dict(removed_in_version='2.14'), + description=dict(required=False, default=''), + objects=dict(required=False, type='list', default=[]), + parameters=dict(required=False, type='list', default=[]), + timeout=dict(required=False, type='int', default=300), + state=dict(default='present', choices=['present', 'absent', + 'active', 'inactive']), + tags=dict(required=False, type='dict', default={}), + values=dict(required=False, type='list', default=[]) + ) + ) + module = AnsibleModule(argument_spec, supports_check_mode=False) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for the datapipeline module!') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + client = boto3_conn(module, conn_type='client', + resource='datapipeline', region=region, + endpoint=ec2_url, **aws_connect_kwargs) + except ClientError as e: + module.fail_json(msg="Can't authorize connection - " + str(e)) + + state = module.params.get('state') + if state == 'present': + changed, result = create_pipeline(client, module) + elif state == 'absent': + changed, result = delete_pipeline(client, module) + elif state == 'active': + changed, result = activate_pipeline(client, module) + elif state == 'inactive': + changed, result = deactivate_pipeline(client, module) + + module.exit_json(result=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/dms_endpoint.py b/dms_endpoint.py new file mode 100644 index 00000000000..b4f24643c66 --- /dev/null +++ b/dms_endpoint.py @@ -0,0 +1,472 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dms_endpoint +short_description: Creates or destroys a data migration services endpoint +description: + - Creates or destroys a data migration services endpoint, + that can be used to replicate data. +options: + state: + description: + - State of the endpoint. + default: present + choices: ['present', 'absent'] + type: str + endpointidentifier: + description: + - An identifier name for the endpoint. + type: str + required: true + endpointtype: + description: + - Type of endpoint we want to manage. + choices: ['source', 'target'] + type: str + required: true + enginename: + description: + - Database engine that we want to use, please refer to + the AWS DMS for more information on the supported + engines and their limitations. + choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora', + 'redshift', 's3', 'db2', 'azuredb', 'sybase', + 'dynamodb', 'mongodb', 'sqlserver'] + type: str + required: true + username: + description: + - Username our endpoint will use to connect to the database. + type: str + password: + description: + - Password used to connect to the database + this attribute can only be written + the AWS API does not return this parameter. + type: str + servername: + description: + - Servername that the endpoint will connect to. + type: str + port: + description: + - TCP port for access to the database. + type: int + databasename: + description: + - Name for the database on the origin or target side. + type: str + extraconnectionattributes: + description: + - Extra attributes for the database connection, the AWS documentation + states " For more information about extra connection attributes, + see the documentation section for your data store." + type: str + kmskeyid: + description: + - Encryption key to use to encrypt replication storage and + connection information. + type: str + tags: + description: + - A list of tags to add to the endpoint. + type: dict + certificatearn: + description: + - Amazon Resource Name (ARN) for the certificate. + type: str + sslmode: + description: + - Mode used for the SSL connection. + default: none + choices: ['none', 'require', 'verify-ca', 'verify-full'] + type: str + serviceaccessrolearn: + description: + - Amazon Resource Name (ARN) for the service access role that you + want to use to create the endpoint. + type: str + externaltabledefinition: + description: + - The external table definition. + type: str + dynamodbsettings: + description: + - Settings in JSON format for the target Amazon DynamoDB endpoint + if source or target is dynamodb. + type: dict + s3settings: + description: + - S3 buckets settings for the target Amazon S3 endpoint. + type: dict + dmstransfersettings: + description: + - The settings in JSON format for the DMS transfer type of + source endpoint. + type: dict + mongodbsettings: + description: + - Settings in JSON format for the source MongoDB endpoint. + type: dict + kinesissettings: + description: + - Settings in JSON format for the target Amazon Kinesis + Data Streams endpoint. + type: dict + elasticsearchsettings: + description: + - Settings in JSON format for the target Elasticsearch endpoint. + type: dict + wait: + description: + - Whether Ansible should wait for the object to be deleted when I(state=absent). + type: bool + default: false + timeout: + description: + - Time in seconds we should wait for when deleting a resource. + - Required when I(wait=true). + type: int + retries: + description: + - number of times we should retry when deleting a resource + - Required when I(wait=true). + type: int +author: + - "Rui Moreira (@ruimoreira)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details +# Endpoint Creation +- dms_endpoint: + state: absent + endpointidentifier: 'testsource' + endpointtype: source + enginename: aurora + username: testing1 + password: testint1234 + servername: testing.domain.com + port: 3306 + databasename: 'testdb' + sslmode: none + wait: false +''' + +RETURN = ''' # ''' +__metaclass__ = type +import traceback +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +backoff_params = dict(tries=5, delay=1, backoff=1.5) + + +@AWSRetry.backoff(**backoff_params) +def describe_endpoints(connection, endpoint_identifier): + """ checks if the endpoint exists """ + try: + endpoint_filter = dict(Name='endpoint-id', + Values=[endpoint_identifier]) + return connection.describe_endpoints(Filters=[endpoint_filter]) + except botocore.exceptions.ClientError: + return {'Endpoints': []} + + +@AWSRetry.backoff(**backoff_params) +def dms_delete_endpoint(client, **params): + """deletes the DMS endpoint based on the EndpointArn""" + if module.params.get('wait'): + return delete_dms_endpoint(client) + else: + return client.delete_endpoint(**params) + + +@AWSRetry.backoff(**backoff_params) +def dms_create_endpoint(client, **params): + """ creates the DMS endpoint""" + return client.create_endpoint(**params) + + +@AWSRetry.backoff(**backoff_params) +def dms_modify_endpoint(client, **params): + """ updates the endpoint""" + return client.modify_endpoint(**params) + + +@AWSRetry.backoff(**backoff_params) +def get_endpoint_deleted_waiter(client): + return client.get_waiter('endpoint_deleted') + + +def endpoint_exists(endpoint): + """ Returns boolean based on the existence of the endpoint + :param endpoint: dict containing the described endpoint + :return: bool + """ + return bool(len(endpoint['Endpoints'])) + + +def delete_dms_endpoint(connection): + try: + endpoint = describe_endpoints(connection, + module.params.get('endpointidentifier')) + endpoint_arn = endpoint['Endpoints'][0].get('EndpointArn') + delete_arn = dict( + EndpointArn=endpoint_arn + ) + if module.params.get('wait'): + + delete_output = connection.delete_endpoint(**delete_arn) + delete_waiter = get_endpoint_deleted_waiter(connection) + delete_waiter.wait( + Filters=[{ + 'Name': 'endpoint-arn', + 'Values': [endpoint_arn] + + }], + WaiterConfig={ + 'Delay': module.params.get('timeout'), + 'MaxAttempts': module.params.get('retries') + } + ) + return delete_output + else: + return connection.delete_endpoint(**delete_arn) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to delete the DMS endpoint.", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to delete the DMS endpoint.", + exception=traceback.format_exc()) + + +def create_module_params(): + """ + Reads the module parameters and returns a dict + :return: dict + """ + endpoint_parameters = dict( + EndpointIdentifier=module.params.get('endpointidentifier'), + EndpointType=module.params.get('endpointtype'), + EngineName=module.params.get('enginename'), + Username=module.params.get('username'), + Password=module.params.get('password'), + ServerName=module.params.get('servername'), + Port=module.params.get('port'), + DatabaseName=module.params.get('databasename'), + SslMode=module.params.get('sslmode') + ) + if module.params.get('EndpointArn'): + endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn') + if module.params.get('certificatearn'): + endpoint_parameters['CertificateArn'] = \ + module.params.get('certificatearn') + + if module.params.get('dmstransfersettings'): + endpoint_parameters['DmsTransferSettings'] = \ + module.params.get('dmstransfersettings') + + if module.params.get('extraconnectionattributes'): + endpoint_parameters['ExtraConnectionAttributes'] =\ + module.params.get('extraconnectionattributes') + + if module.params.get('kmskeyid'): + endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid') + + if module.params.get('tags'): + endpoint_parameters['Tags'] = module.params.get('tags') + + if module.params.get('serviceaccessrolearn'): + endpoint_parameters['ServiceAccessRoleArn'] = \ + module.params.get('serviceaccessrolearn') + + if module.params.get('externaltabledefinition'): + endpoint_parameters['ExternalTableDefinition'] = \ + module.params.get('externaltabledefinition') + + if module.params.get('dynamodbsettings'): + endpoint_parameters['DynamoDbSettings'] = \ + module.params.get('dynamodbsettings') + + if module.params.get('s3settings'): + endpoint_parameters['S3Settings'] = module.params.get('s3settings') + + if module.params.get('mongodbsettings'): + endpoint_parameters['MongoDbSettings'] = \ + module.params.get('mongodbsettings') + + if module.params.get('kinesissettings'): + endpoint_parameters['KinesisSettings'] = \ + module.params.get('kinesissettings') + + if module.params.get('elasticsearchsettings'): + endpoint_parameters['ElasticsearchSettings'] = \ + module.params.get('elasticsearchsettings') + + if module.params.get('wait'): + endpoint_parameters['wait'] = module.boolean(module.params.get('wait')) + + if module.params.get('timeout'): + endpoint_parameters['timeout'] = module.params.get('timeout') + + if module.params.get('retries'): + endpoint_parameters['retries'] = module.params.get('retries') + + return endpoint_parameters + + +def compare_params(param_described): + """ + Compares the dict obtained from the describe DMS endpoint and + what we are reading from the values in the template We can + never compare the password as boto3's method for describing + a DMS endpoint does not return the value for + the password for security reasons ( I assume ) + """ + modparams = create_module_params() + changed = False + for paramname in modparams: + if paramname == 'Password' or paramname in param_described \ + and param_described[paramname] == modparams[paramname] or \ + str(param_described[paramname]).lower() \ + == modparams[paramname]: + pass + else: + changed = True + return changed + + +def modify_dms_endpoint(connection): + + try: + params = create_module_params() + return dms_modify_endpoint(connection, **params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to update DMS endpoint.", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to update DMS endpoint.", + exception=traceback.format_exc()) + + +def create_dms_endpoint(connection): + """ + Function to create the dms endpoint + :param connection: boto3 aws connection + :return: information about the dms endpoint object + """ + + try: + params = create_module_params() + return dms_create_endpoint(connection, **params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to create DMS endpoint.", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to create DMS endpoint.", + exception=traceback.format_exc()) + + +def main(): + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + endpointidentifier=dict(required=True), + endpointtype=dict(choices=['source', 'target'], required=True), + enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb', + 'aurora', 'redshift', 's3', 'db2', 'azuredb', + 'sybase', 'dynamodb', 'mongodb', 'sqlserver'], + required=True), + username=dict(), + password=dict(no_log=True), + servername=dict(), + port=dict(type='int'), + databasename=dict(), + extraconnectionattributes=dict(), + kmskeyid=dict(), + tags=dict(type='dict'), + certificatearn=dict(), + sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'], + default='none'), + serviceaccessrolearn=dict(), + externaltabledefinition=dict(), + dynamodbsettings=dict(type='dict'), + s3settings=dict(type='dict'), + dmstransfersettings=dict(type='dict'), + mongodbsettings=dict(type='dict'), + kinesissettings=dict(type='dict'), + elasticsearchsettings=dict(type='dict'), + wait=dict(type='bool', default=False), + timeout=dict(type='int'), + retries=dict(type='int') + ) + global module + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ["state", "absent", ["wait"]], + ["wait", "True", ["timeout"]], + ["wait", "True", ["retries"]], + ], + supports_check_mode=False + ) + exit_message = None + changed = False + + state = module.params.get('state') + + dmsclient = module.client('dms') + endpoint = describe_endpoints(dmsclient, + module.params.get('endpointidentifier')) + if state == 'present': + if endpoint_exists(endpoint): + module.params['EndpointArn'] = \ + endpoint['Endpoints'][0].get('EndpointArn') + params_changed = compare_params(endpoint["Endpoints"][0]) + if params_changed: + updated_dms = modify_dms_endpoint(dmsclient) + exit_message = updated_dms + changed = True + else: + module.exit_json(changed=False, msg="Endpoint Already Exists") + else: + dms_properties = create_dms_endpoint(dmsclient) + exit_message = dms_properties + changed = True + elif state == 'absent': + if endpoint_exists(endpoint): + delete_results = delete_dms_endpoint(dmsclient) + exit_message = delete_results + changed = True + else: + changed = False + exit_message = 'DMS Endpoint does not exist' + + module.exit_json(changed=changed, msg=exit_message) + + +if __name__ == '__main__': + main() diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py new file mode 100644 index 00000000000..2a786ff78fd --- /dev/null +++ b/dms_replication_subnet_group.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dms_replication_subnet_group +short_description: creates or destroys a data migration services subnet group +description: + - Creates or destroys a data migration services subnet group. +options: + state: + description: + - State of the subnet group. + default: present + choices: ['present', 'absent'] + type: str + identifier: + description: + - The name for the replication subnet group. + This value is stored as a lowercase string. + Must contain no more than 255 alphanumeric characters, + periods, spaces, underscores, or hyphens. Must not be "default". + type: str + required: true + description: + description: + - The description for the subnet group. + type: str + required: true + subnet_ids: + description: + - A list containing the subnet ids for the replication subnet group, + needs to be at least 2 items in the list. + type: list + elements: str + required: true +author: + - "Rui Moreira (@ruimoreira)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- dms_replication_subnet_group: + state: present + identifier: "dev-sngroup" + description: "Development Subnet Group asdasdas" + subnet_ids: ['subnet-id1','subnet-id2'] +''' + +RETURN = ''' # ''' + +import traceback +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +backoff_params = dict(tries=5, delay=1, backoff=1.5) + + +@AWSRetry.backoff(**backoff_params) +def describe_subnet_group(connection, subnet_group): + """checks if instance exists""" + try: + subnet_group_filter = dict(Name='replication-subnet-group-id', + Values=[subnet_group]) + return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter]) + except botocore.exceptions.ClientError: + return {'ReplicationSubnetGroups': []} + + +@AWSRetry.backoff(**backoff_params) +def replication_subnet_group_create(connection, **params): + """ creates the replication subnet group """ + return connection.create_replication_subnet_group(**params) + + +@AWSRetry.backoff(**backoff_params) +def replication_subnet_group_modify(connection, **modify_params): + return connection.modify_replication_subnet_group(**modify_params) + + +@AWSRetry.backoff(**backoff_params) +def replication_subnet_group_delete(module, connection): + subnetid = module.params.get('identifier') + delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) + return connection.delete_replication_subnet_group(**delete_parameters) + + +def replication_subnet_exists(subnet): + """ Returns boolean based on the existence of the endpoint + :param endpoint: dict containing the described endpoint + :return: bool + """ + return bool(len(subnet['ReplicationSubnetGroups'])) + + +def create_module_params(module): + """ + Reads the module parameters and returns a dict + :return: dict + """ + instance_parameters = dict( + # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API + ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(), + ReplicationSubnetGroupDescription=module.params.get('description'), + SubnetIds=module.params.get('subnet_ids'), + ) + + return instance_parameters + + +def compare_params(module, param_described): + """ + Compares the dict obtained from the describe function and + what we are reading from the values in the template We can + never compare passwords as boto3's method for describing + a DMS endpoint does not return the value for + the password for security reasons ( I assume ) + """ + modparams = create_module_params(module) + changed = False + # need to sanitize values that get returned from the API + if 'VpcId' in param_described.keys(): + param_described.pop('VpcId') + if 'SubnetGroupStatus' in param_described.keys(): + param_described.pop('SubnetGroupStatus') + for paramname in modparams.keys(): + if paramname in param_described.keys() and \ + param_described.get(paramname) == modparams[paramname]: + pass + elif paramname == 'SubnetIds': + subnets = [] + for subnet in param_described.get('Subnets'): + subnets.append(subnet.get('SubnetIdentifier')) + for modulesubnet in modparams['SubnetIds']: + if modulesubnet in subnets: + pass + else: + changed = True + return changed + + +def create_replication_subnet_group(module, connection): + try: + params = create_module_params(module) + return replication_subnet_group_create(connection, **params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to create DMS replication subnet group.", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to create DMS replication subnet group.", + exception=traceback.format_exc()) + + +def modify_replication_subnet_group(module, connection): + try: + modify_params = create_module_params(module) + return replication_subnet_group_modify(connection, **modify_params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to Modify the DMS replication subnet group.", + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to Modify the DMS replication subnet group.", + exception=traceback.format_exc()) + + +def main(): + argument_spec = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + identifier=dict(type='str', required=True), + description=dict(type='str', required=True), + subnet_ids=dict(type='list', elements='str', required=True), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + exit_message = None + changed = False + + state = module.params.get('state') + dmsclient = module.client('dms') + subnet_group = describe_subnet_group(dmsclient, + module.params.get('identifier')) + if state == 'present': + if replication_subnet_exists(subnet_group): + if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]): + if not module.check_mode: + exit_message = modify_replication_subnet_group(module, dmsclient) + else: + exit_message = dmsclient + changed = True + else: + exit_message = "No changes to Subnet group" + else: + if not module.check_mode: + exit_message = create_replication_subnet_group(module, dmsclient) + changed = True + else: + exit_message = "Check mode enabled" + + elif state == 'absent': + if replication_subnet_exists(subnet_group): + if not module.check_mode: + replication_subnet_group_delete(module, dmsclient) + changed = True + exit_message = "Replication subnet group Deleted" + else: + exit_message = dmsclient + changed = True + + else: + changed = False + exit_message = "Replication subnet group does not exist" + + module.exit_json(changed=changed, msg=exit_message) + + +if __name__ == '__main__': + main() diff --git a/dynamodb_table.py b/dynamodb_table.py new file mode 100644 index 00000000000..1ecaf22184f --- /dev/null +++ b/dynamodb_table.py @@ -0,0 +1,519 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dynamodb_table +short_description: Create, update or delete AWS Dynamo DB tables +description: + - Create or delete AWS Dynamo DB tables. + - Can update the provisioned throughput on existing tables. + - Returns the status of the specified table. +author: Alan Loi (@loia) +requirements: + - "boto >= 2.37.0" + - "boto3 >= 1.4.4 (for tagging)" +options: + state: + description: + - Create or delete the table. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the table. + required: true + type: str + hash_key_name: + description: + - Name of the hash key. + - Required when C(state=present). + type: str + hash_key_type: + description: + - Type of the hash key. + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + type: str + range_key_name: + description: + - Name of the range key. + type: str + range_key_type: + description: + - Type of the range key. + choices: ['STRING', 'NUMBER', 'BINARY'] + default: 'STRING' + type: str + read_capacity: + description: + - Read throughput capacity (units) to provision. + default: 1 + type: int + write_capacity: + description: + - Write throughput capacity (units) to provision. + default: 1 + type: int + indexes: + description: + - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput. + - "required options: ['name', 'type', 'hash_key_name']" + - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']" + suboptions: + name: + description: The name of the index. + type: str + required: true + type: + description: + - The type of index. + - "Valid types: C(all), C(global_all), C(global_include), C(global_keys_only), C(include), C(keys_only)" + type: str + required: true + hash_key_name: + description: The name of the hash-based key. + required: true + type: str + hash_key_type: + description: The type of the hash-based key. + type: str + range_key_name: + description: The name of the range-based key. + type: str + range_key_type: + type: str + description: The type of the range-based key. + includes: + type: list + description: A list of fields to include when using C(global_include) or C(include) indexes. + read_capacity: + description: + - Read throughput capacity (units) to provision for the index. + type: int + write_capacity: + description: + - Write throughput capacity (units) to provision for the index. + type: int + default: [] + type: list + elements: dict + tags: + description: + - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag. + - 'For example: C({"key":"value"}) and C({"key":"value","key2":"value2"})' + type: dict + wait_for_active_timeout: + description: + - how long before wait gives up, in seconds. only used when tags is set + default: 60 + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create dynamo table with hash and range primary key +- dynamodb_table: + name: my-table + region: us-east-1 + hash_key_name: id + hash_key_type: STRING + range_key_name: create_time + range_key_type: NUMBER + read_capacity: 2 + write_capacity: 2 + tags: + tag_name: tag_value + +# Update capacity on existing dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + read_capacity: 10 + write_capacity: 10 + +# set index on existing dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + indexes: + - name: NamedIndex + type: global_include + hash_key_name: id + range_key_name: create_time + includes: + - other_field + - other_field2 + read_capacity: 10 + write_capacity: 10 + +# Delete dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + state: absent +''' + +RETURN = ''' +table_status: + description: The current status of the table. + returned: success + type: str + sample: ACTIVE +''' + +import time +import traceback + +try: + import boto + import boto.dynamodb2 + from boto.dynamodb2.table import Table + from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex + from boto.dynamodb2.types import STRING, NUMBER, BINARY + from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError + from boto.dynamodb2.exceptions import ValidationException + HAS_BOTO = True + + DYNAMO_TYPE_MAP = { + 'STRING': STRING, + 'NUMBER': NUMBER, + 'BINARY': BINARY + } + +except ImportError: + HAS_BOTO = False + +try: + import botocore + from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +DYNAMO_TYPE_DEFAULT = 'STRING' +INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name'] +INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity'] +INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] + + +def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3_sts=None, region=None): + table_name = module.params.get('name') + hash_key_name = module.params.get('hash_key_name') + hash_key_type = module.params.get('hash_key_type') + range_key_name = module.params.get('range_key_name') + range_key_type = module.params.get('range_key_type') + read_capacity = module.params.get('read_capacity') + write_capacity = module.params.get('write_capacity') + all_indexes = module.params.get('indexes') + tags = module.params.get('tags') + wait_for_active_timeout = module.params.get('wait_for_active_timeout') + + for index in all_indexes: + validate_index(index, module) + + schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type) + + throughput = { + 'read': read_capacity, + 'write': write_capacity + } + + indexes, global_indexes = get_indexes(all_indexes) + + result = dict( + region=region, + table_name=table_name, + hash_key_name=hash_key_name, + hash_key_type=hash_key_type, + range_key_name=range_key_name, + range_key_type=range_key_type, + read_capacity=read_capacity, + write_capacity=write_capacity, + indexes=all_indexes, + ) + + try: + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes) + else: + if not module.check_mode: + Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes) + result['changed'] = True + + if not module.check_mode: + result['table_status'] = table.describe()['Table']['TableStatus'] + + if tags: + # only tables which are active can be tagged + wait_until_table_active(module, table, wait_for_active_timeout) + account_id = get_account_id(boto3_sts) + boto3_dynamodb.tag_resource( + ResourceArn='arn:aws:dynamodb:' + + region + + ':' + + account_id + + ':table/' + + table_name, + Tags=ansible_dict_to_boto3_tag_list(tags)) + result['tags'] = tags + + except BotoServerError: + result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def get_account_id(boto3_sts): + return boto3_sts.get_caller_identity()["Account"] + + +def wait_until_table_active(module, table, wait_timeout): + max_wait_time = time.time() + wait_timeout + while (max_wait_time > time.time()) and (table.describe()['Table']['TableStatus'] != 'ACTIVE'): + time.sleep(5) + if max_wait_time <= time.time(): + # waiting took too long + module.fail_json(msg="timed out waiting for table to exist") + + +def delete_dynamo_table(connection, module): + table_name = module.params.get('name') + + result = dict( + region=module.params.get('region'), + table_name=table_name, + ) + + try: + table = Table(table_name, connection=connection) + + if dynamo_table_exists(table): + if not module.check_mode: + table.delete() + result['changed'] = True + + else: + result['changed'] = False + + except BotoServerError: + result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def dynamo_table_exists(table): + try: + table.describe() + return True + + except JSONResponseError as e: + if e.message and e.message.startswith('Requested resource not found'): + return False + else: + raise e + + +def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None): + table.describe() # populate table details + throughput_changed = False + global_indexes_changed = False + if has_throughput_changed(table, throughput): + if not check_mode: + throughput_changed = table.update(throughput=throughput) + else: + throughput_changed = True + + removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes) + if removed_indexes: + if not check_mode: + for name, index in removed_indexes.items(): + global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed + else: + global_indexes_changed = True + + if added_indexes: + if not check_mode: + for name, index in added_indexes.items(): + global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed + else: + global_indexes_changed = True + + if index_throughput_changes: + if not check_mode: + # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed + try: + global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed + except ValidationException: + pass + else: + global_indexes_changed = True + + return throughput_changed or global_indexes_changed + + +def has_throughput_changed(table, new_throughput): + if not new_throughput: + return False + + return new_throughput['read'] != table.throughput['read'] or \ + new_throughput['write'] != table.throughput['write'] + + +def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type): + if range_key_name: + schema = [ + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])), + RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])) + ] + else: + schema = [ + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])) + ] + return schema + + +def get_changed_global_indexes(table, global_indexes): + table.describe() + + table_index_info = dict((index.name, index.schema()) for index in table.global_indexes) + table_index_objects = dict((index.name, index) for index in table.global_indexes) + set_index_info = dict((index.name, index.schema()) for index in global_indexes) + set_index_objects = dict((index.name, index) for index in global_indexes) + + removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info) + added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info) + # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed + # for name, index in set_index_objects.items(): + # if (name not in added_indexes and + # (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or + # index.throughput['write'] != str(table_index_objects[name].throughput['write']))): + # index_throughput_changes[name] = index.throughput + # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed + index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes) + + return removed_indexes, added_indexes, index_throughput_changes + + +def validate_index(index, module): + for key, val in index.items(): + if key not in INDEX_OPTIONS: + module.fail_json(msg='%s is not a valid option for an index' % key) + for required_option in INDEX_REQUIRED_OPTIONS: + if required_option not in index: + module.fail_json(msg='%s is a required option for an index' % required_option) + if index['type'] not in INDEX_TYPE_OPTIONS: + module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS)) + + +def get_indexes(all_indexes): + indexes = [] + global_indexes = [] + for index in all_indexes: + name = index['name'] + schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type')) + throughput = { + 'read': index.get('read_capacity', 1), + 'write': index.get('write_capacity', 1) + } + + if index['type'] == 'all': + indexes.append(AllIndex(name, parts=schema)) + + elif index['type'] == 'global_all': + global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput)) + + elif index['type'] == 'global_include': + global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes'])) + + elif index['type'] == 'global_keys_only': + global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput)) + + elif index['type'] == 'include': + indexes.append(IncludeIndex(name, parts=schema, includes=index['includes'])) + + elif index['type'] == 'keys_only': + indexes.append(KeysOnlyIndex(name, parts=schema)) + + return indexes, global_indexes + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, type='str'), + hash_key_name=dict(type='str'), + hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + range_key_name=dict(type='str'), + range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + read_capacity=dict(default=1, type='int'), + write_capacity=dict(default=1, type='int'), + indexes=dict(default=[], type='list'), + tags=dict(type='dict'), + wait_for_active_timeout=dict(default=60, type='int'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + if not HAS_BOTO3 and module.params.get('tags'): + module.fail_json(msg='boto3 required when using tags for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg='region must be specified') + + try: + connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) + except (NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + if module.params.get('tags'): + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs) + if not hasattr(boto3_dynamodb, 'tag_resource'): + module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version') + boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc()) + else: + boto3_dynamodb = None + boto3_sts = None + + state = module.params.get('state') + if state == 'present': + create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region) + elif state == 'absent': + delete_dynamo_table(connection, module) + + +if __name__ == '__main__': + main() diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py new file mode 100644 index 00000000000..5ed0488d686 --- /dev/null +++ b/dynamodb_ttl.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: dynamodb_ttl +short_description: Set TTL for a given DynamoDB table +description: +- Uses boto3 to set TTL. +- Requires botocore version 1.5.24 or higher. +options: + state: + description: + - State to set DynamoDB table to. + choices: ['enable', 'disable'] + required: false + type: str + table_name: + description: + - Name of the DynamoDB table to work on. + required: true + type: str + attribute_name: + description: + - The name of the Time To Live attribute used to store the expiration time for items in the table. + - This appears to be required by the API even when disabling TTL. + required: true + type: str + +author: Ted Timmons (@tedder) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ botocore>=1.5.24, boto3 ] +''' + +EXAMPLES = ''' +- name: enable TTL on my cowfacts table + dynamodb_ttl: + state: enable + table_name: cowfacts + attribute_name: cow_deleted_date + +- name: disable TTL on my cowfacts table + dynamodb_ttl: + state: disable + table_name: cowfacts + attribute_name: cow_deleted_date +''' + +RETURN = ''' +current_status: + description: current or new TTL specification. + type: dict + returned: always + sample: + - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" } + - { "AttributeName": "deploy_timestamp", "Enabled": true } +''' + +import distutils.version +import traceback + +try: + import botocore +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info + + +def get_current_ttl_state(c, table_name): + '''Fetch the state dict for a table.''' + current_state = c.describe_time_to_live(TableName=table_name) + return current_state.get('TimeToLiveDescription') + + +def does_state_need_changing(attribute_name, desired_state, current_spec): + '''Run checks to see if the table needs to be modified. Basically a dirty check.''' + if not current_spec: + # we don't have an entry (or a table?) + return True + + if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']: + return True + if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']: + return True + if attribute_name != current_spec.get('AttributeName'): + return True + + return False + + +def set_ttl_state(c, table_name, state, attribute_name): + '''Set our specification. Returns the update_time_to_live specification dict, + which is different than the describe_* call.''' + is_enabled = False + if state.lower() == 'enable': + is_enabled = True + + ret = c.update_time_to_live( + TableName=table_name, + TimeToLiveSpecification={ + 'Enabled': is_enabled, + 'AttributeName': attribute_name + } + ) + + return ret.get('TimeToLiveSpecification') + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(choices=['enable', 'disable']), + table_name=dict(required=True), + attribute_name=dict(required=True)) + ) + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + elif distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'): + # TTL was added in this version. + module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24')) + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + dbclient = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=str(e)) + + result = {'changed': False} + state = module.params['state'] + + # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the + # methods so it's easier to do here. + try: + current_state = get_current_ttl_state(dbclient, module.params['table_name']) + + if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state): + # changes needed + new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name']) + result['current_status'] = new_state + result['changed'] = True + else: + # no changes needed + result['current_status'] = current_state + + except botocore.exceptions.ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.ParamValidationError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc()) + except ValueError as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py new file mode 100644 index 00000000000..2cffae21342 --- /dev/null +++ b/ec2_ami_copy.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_ami_copy +short_description: copies AMI between AWS regions, return new image id +description: + - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.) +options: + source_region: + description: + - The source region the AMI should be copied from. + required: true + type: str + source_image_id: + description: + - The ID of the AMI in source region that should be copied. + required: true + type: str + name: + description: + - The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.) + default: "default" + type: str + description: + description: + - An optional human-readable string describing the contents and purpose of the new AMI. + type: str + encrypted: + description: + - Whether or not the destination snapshots of the copied AMI should be encrypted. + type: bool + kms_key_id: + description: + - KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account. + type: str + wait: + description: + - Wait for the copied AMI to be in state 'available' before returning. + type: bool + default: 'no' + wait_timeout: + description: + - How long before wait gives up, in seconds. Prior to 2.3 the default was 1200. + - From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults. + This was reenabled in 2.6 to allow timeouts greater than 10 minutes. + default: 600 + type: int + tags: + description: + - 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})' + type: dict + tag_equality: + description: + - Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match + in an existing AMI, the AMI will not be copied again. + default: false + type: bool +author: +- Amir Moulavi (@amir343) +- Tim C (@defunctio) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 +''' + +EXAMPLES = ''' +# Basic AMI Copy +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + +# AMI copy wait until available +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + wait: yes + wait_timeout: 1200 # Default timeout is 600 + register: image_id + +# Named AMI copy +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + name: My-Awesome-AMI + description: latest patch + +# Tagged AMI copy (will not copy the same AMI twice) +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + tags: + Name: My-Super-AMI + Patch: 1.2.3 + tag_equality: yes + +# Encrypted AMI copy +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + encrypted: yes + +# Encrypted AMI copy with specified key +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + encrypted: yes + kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b +''' + +RETURN = ''' +image_id: + description: AMI ID of the copied AMI + returned: always + type: str + sample: ami-e689729e +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible.module_utils._text import to_native + +try: + from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +def copy_image(module, ec2): + """ + Copies an AMI + + module : AnsibleModule object + ec2: ec2 connection object + """ + + image = None + changed = False + tags = module.params.get('tags') + + params = {'SourceRegion': module.params.get('source_region'), + 'SourceImageId': module.params.get('source_image_id'), + 'Name': module.params.get('name'), + 'Description': module.params.get('description'), + 'Encrypted': module.params.get('encrypted'), + } + if module.params.get('kms_key_id'): + params['KmsKeyId'] = module.params.get('kms_key_id') + + try: + if module.params.get('tag_equality'): + filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()] + filters.append(dict(Name='state', Values=['available', 'pending'])) + images = ec2.describe_images(Filters=filters) + if len(images['Images']) > 0: + image = images['Images'][0] + if not image: + image = ec2.copy_image(**params) + image_id = image['ImageId'] + if tags: + ec2.create_tags(Resources=[image_id], + Tags=ansible_dict_to_boto3_tag_list(tags)) + changed = True + + if module.params.get('wait'): + delay = 15 + max_attempts = module.params.get('wait_timeout') // delay + image_id = image.get('ImageId') + ec2.get_waiter('image_available').wait( + ImageIds=[image_id], + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + ) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(image)) + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the image to become available') + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Could not copy AMI") + except Exception as e: + module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + + +def main(): + argument_spec = dict( + source_region=dict(required=True), + source_image_id=dict(required=True), + name=dict(default='default'), + description=dict(default=''), + encrypted=dict(type='bool', default=False, required=False), + kms_key_id=dict(type='str', required=False), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=600), + tags=dict(type='dict'), + tag_equality=dict(type='bool', default=False)) + + module = AnsibleAWSModule(argument_spec=argument_spec) + # TODO: Check botocore version + ec2 = module.client('ec2') + copy_image(module, ec2) + + +if __name__ == '__main__': + main() diff --git a/ec2_asg.py b/ec2_asg.py new file mode 100644 index 00000000000..136fb4e3cde --- /dev/null +++ b/ec2_asg.py @@ -0,0 +1,1808 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_asg +short_description: Create or delete AWS AutoScaling Groups (ASGs) +description: + - Can create or delete AWS AutoScaling Groups. + - Can be used with the M(ec2_lc) module to manage Launch Configurations. +author: "Gareth Rushgrove (@garethr)" +requirements: [ "boto3", "botocore" ] +options: + state: + description: + - Register or deregister the instance. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for group to be created or deleted. + required: true + type: str + load_balancers: + description: + - List of ELB names to use for the group. Use for classic load balancers. + type: list + elements: str + target_group_arns: + description: + - List of target group ARNs to use for the group. Use for application load balancers. + type: list + elements: str + availability_zones: + description: + - List of availability zone names in which to create the group. + - Defaults to all the availability zones in the region if I(vpc_zone_identifier) is not set. + type: list + elements: str + launch_config_name: + description: + - Name of the Launch configuration to use for the group. See the M(ec2_lc) module for managing these. + - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided. + type: str + launch_template: + description: + - Dictionary describing the Launch Template to use + suboptions: + version: + description: + - The version number of the launch template to use. + - Defaults to latest version if not provided. + type: str + launch_template_name: + description: + - The name of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required. + type: str + launch_template_id: + description: + - The id of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required. + type: str + type: dict + min_size: + description: + - Minimum number of instances in group, if unspecified then the current group value will be used. + type: int + max_size: + description: + - Maximum number of instances in group, if unspecified then the current group value will be used. + type: int + max_instance_lifetime: + description: + - The maximum amount of time, in seconds, that an instance can be in service. + - Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified. + - Value of 0 removes lifetime restriction. + type: int + mixed_instances_policy: + description: + - A mixed instance policy to use for the ASG. + - Only used when the ASG is configured to use a Launch Template (I(launch_template)). + - 'See also U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-mixedinstancespolicy.html)' + required: false + suboptions: + instance_types: + description: + - A list of instance_types. + type: list + elements: str + type: dict + placement_group: + description: + - Physical location of your cluster placement group created in Amazon EC2. + type: str + desired_capacity: + description: + - Desired number of instances in group, if unspecified then the current group value will be used. + type: int + replace_all_instances: + description: + - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration. + It increases the ASG size by I(replace_batch_size), waits for the new instances to be up and running. + After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced. + Once that's done the ASG size is reduced back to the expected size. + default: false + type: bool + replace_batch_size: + description: + - Number of instances you'd like to replace at a time. Used with I(replace_all_instances). + required: false + default: 1 + type: int + replace_instances: + description: + - List of I(instance_ids) belonging to the named AutoScalingGroup that you would like to terminate and be replaced with instances + matching the current launch configuration. + type: list + elements: str + lc_check: + description: + - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config). + default: true + type: bool + lt_check: + description: + - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current + I(launch_template or I(launch_template) I(version). + default: true + type: bool + vpc_zone_identifier: + description: + - List of VPC subnets to use + type: list + elements: str + tags: + description: + - A list of tags to add to the Auto Scale Group. + - Optional key is I(propagate_at_launch), which defaults to true. + - When I(propagate_at_launch) is true the tags will be propagated to the Instances created. + type: list + elements: dict + health_check_period: + description: + - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + required: false + default: 300 + type: int + health_check_type: + description: + - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. + required: false + default: EC2 + choices: ['EC2', 'ELB'] + type: str + default_cooldown: + description: + - The number of seconds after a scaling activity completes before another can begin. + default: 300 + type: int + wait_timeout: + description: + - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy", + try increasing this value. + default: 300 + type: int + wait_for_instances: + description: + - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all + instances have a lifecycle_state of "InService" and a health_status of "Healthy". + default: true + type: bool + termination_policies: + description: + - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. + - Using I(termination_policies=Default) when modifying an existing AutoScalingGroup will result in the existing policy being retained + instead of changed to C(Default). + - 'Valid values include: C(Default), C(OldestInstance), C(NewestInstance), C(OldestLaunchConfiguration), C(ClosestToNextInstanceHour)' + - 'Full documentation of valid values can be found in the AWS documentation:' + - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#custom-termination-policy)' + default: Default + type: list + elements: str + notification_topic: + description: + - A SNS topic ARN to send auto scaling notifications to. + type: str + notification_types: + description: + - A list of auto scaling events to trigger notifications on. + default: + - 'autoscaling:EC2_INSTANCE_LAUNCH' + - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR' + - 'autoscaling:EC2_INSTANCE_TERMINATE' + - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + required: false + type: list + elements: str + suspend_processes: + description: + - A list of scaling processes to suspend. + - 'Valid values include:' + - C(Launch), C(Terminate), C(HealthCheck), C(ReplaceUnhealthy), C(AZRebalance), C(AlarmNotification), C(ScheduledActions), C(AddToLoadBalancer) + - 'Full documentation of valid values can be found in the AWS documentation:' + - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)' + default: [] + type: list + elements: str + metrics_collection: + description: + - Enable ASG metrics collection. + type: bool + default: false + metrics_granularity: + description: + - When I(metrics_collection=true) this will determine the granularity of metrics collected by CloudWatch. + default: "1Minute" + type: str + metrics_list: + description: + - List of autoscaling metrics to collect when I(metrics_collection=true). + default: + - 'GroupMinSize' + - 'GroupMaxSize' + - 'GroupDesiredCapacity' + - 'GroupInServiceInstances' + - 'GroupPendingInstances' + - 'GroupStandbyInstances' + - 'GroupTerminatingInstances' + - 'GroupTotalInstances' + type: list + elements: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Basic configuration with Launch Configuration + +- ec2_asg: + name: special + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + launch_config_name: 'lc-1' + min_size: 1 + max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + tags: + - environment: production + propagate_at_launch: no + +# Rolling ASG Updates + +# Below is an example of how to assign a new launch config to an ASG and terminate old instances. +# +# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in +# a rolling fashion with instances using the current launch configuration, "my_new_lc". +# +# This could also be considered a rolling deploy of a pre-baked AMI. +# +# If this is a newly created group, the instances will not be replaced since all instances +# will have the current launch configuration. + +- name: create launch config + ec2_lc: + name: my_new_lc + image_id: ami-lkajsf + key_name: mykey + region: us-east-1 + security_groups: sg-23423 + instance_type: m1.small + assign_public_ip: yes + +- ec2_asg: + name: myasg + launch_config_name: my_new_lc + health_check_period: 60 + health_check_type: ELB + replace_all_instances: yes + min_size: 5 + max_size: 5 + desired_capacity: 5 + region: us-east-1 + +# To only replace a couple of instances instead of all of them, supply a list +# to "replace_instances": + +- ec2_asg: + name: myasg + launch_config_name: my_new_lc + health_check_period: 60 + health_check_type: ELB + replace_instances: + - i-b345231 + - i-24c2931 + min_size: 5 + max_size: 5 + desired_capacity: 5 + region: us-east-1 + +# Basic Configuration with Launch Template + +- ec2_asg: + name: special + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + launch_template: + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' + min_size: 1 + max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + tags: + - environment: production + propagate_at_launch: no + +# Basic Configuration with Launch Template using mixed instance policy + +- ec2_asg: + name: special + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + launch_template: + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' + mixed_instances_policy: + instance_types: + - t3a.large + - t3.large + - t2.large + min_size: 1 + max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + tags: + - environment: production + propagate_at_launch: no +''' + +RETURN = ''' +--- +auto_scaling_group_name: + description: The unique name of the auto scaling group + returned: success + type: str + sample: "myasg" +auto_scaling_group_arn: + description: The unique ARN of the autoscaling group + returned: success + type: str + sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg" +availability_zones: + description: The availability zones for the auto scaling group + returned: success + type: list + sample: [ + "us-east-1d" + ] +created_time: + description: Timestamp of create time of the auto scaling group + returned: success + type: str + sample: "2017-11-08T14:41:48.272000+00:00" +default_cooldown: + description: The default cooldown time in seconds. + returned: success + type: int + sample: 300 +desired_capacity: + description: The number of EC2 instances that should be running in this group. + returned: success + type: int + sample: 3 +healthcheck_period: + description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + returned: success + type: int + sample: 30 +healthcheck_type: + description: The service you want the health status from, one of "EC2" or "ELB". + returned: success + type: str + sample: "ELB" +healthy_instances: + description: Number of instances in a healthy state + returned: success + type: int + sample: 5 +in_service_instances: + description: Number of instances in service + returned: success + type: int + sample: 3 +instance_facts: + description: Dictionary of EC2 instances and their status as it relates to the ASG. + returned: success + type: dict + sample: { + "i-0123456789012": { + "health_status": "Healthy", + "launch_config_name": "public-webapp-production-1", + "lifecycle_state": "InService" + } + } +instances: + description: list of instance IDs in the ASG + returned: success + type: list + sample: [ + "i-0123456789012" + ] +launch_config_name: + description: > + Name of launch configuration associated with the ASG. Same as launch_configuration_name, + provided for compatibility with ec2_asg module. + returned: success + type: str + sample: "public-webapp-production-1" +load_balancers: + description: List of load balancers names attached to the ASG. + returned: success + type: list + sample: ["elb-webapp-prod"] +max_instance_lifetime: + description: The maximum amount of time, in seconds, that an instance can be in service. + returned: success + type: int + sample: 604800 +max_size: + description: Maximum size of group + returned: success + type: int + sample: 3 +min_size: + description: Minimum size of group + returned: success + type: int + sample: 1 +mixed_instance_policy: + description: Returns the list of instance types if a mixed instance policy is set. + returned: success + type: list + sample: ["t3.micro", "t3a.micro"] +pending_instances: + description: Number of instances in pending state + returned: success + type: int + sample: 1 +tags: + description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. + returned: success + type: list + sample: [ + { + "key": "Name", + "value": "public-webapp-production-1", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + }, + { + "key": "env", + "value": "production", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + } + ] +target_group_arns: + description: List of ARNs of the target groups that the ASG populates + returned: success + type: list + sample: [ + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" + ] +target_group_names: + description: List of names of the target groups that the ASG populates + returned: success + type: list + sample: [ + "target-group-host-hello", + "target-group-path-world" + ] +termination_policies: + description: A list of termination policies for the group. + returned: success + type: list + sample: ["Default"] +unhealthy_instances: + description: Number of instances in an unhealthy state + returned: success + type: int + sample: 0 +viable_instances: + description: Number of instances in a viable state + returned: success + type: int + sample: 1 +vpc_zone_identifier: + description: VPC zone ID / subnet id for the auto scaling group + returned: success + type: str + sample: "subnet-a31ef45f" +metrics_collection: + description: List of enabled AutosSalingGroup metrics + returned: success + type: list + sample: [ + { + "Granularity": "1Minute", + "Metric": "GroupInServiceInstances" + } + ] +''' + +import time +import traceback + +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + AWSRetry, + camel_dict_to_snake_dict +) + +try: + import botocore +except ImportError: + pass # will be detected by imported HAS_BOTO3 + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', + 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', + 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', + 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', + 'VPCZoneIdentifier') + +INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') + +backoff_params = dict(tries=10, delay=3, backoff=1.5) + + +@AWSRetry.backoff(**backoff_params) +def describe_autoscaling_groups(connection, group_name): + pg = connection.get_paginator('describe_auto_scaling_groups') + return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) + + +@AWSRetry.backoff(**backoff_params) +def deregister_lb_instances(connection, lb_name, instance_id): + connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) + + +@AWSRetry.backoff(**backoff_params) +def describe_instance_health(connection, lb_name, instances): + params = dict(LoadBalancerName=lb_name) + if instances: + params.update(Instances=instances) + return connection.describe_instance_health(**params) + + +@AWSRetry.backoff(**backoff_params) +def describe_target_health(connection, target_group_arn, instances): + return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) + + +@AWSRetry.backoff(**backoff_params) +def suspend_asg_processes(connection, asg_name, processes): + connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) + + +@AWSRetry.backoff(**backoff_params) +def resume_asg_processes(connection, asg_name, processes): + connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) + + +@AWSRetry.backoff(**backoff_params) +def describe_launch_configurations(connection, launch_config_name): + pg = connection.get_paginator('describe_launch_configurations') + return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() + + +@AWSRetry.backoff(**backoff_params) +def describe_launch_templates(connection, launch_template): + if launch_template['launch_template_id'] is not None: + try: + lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) + return lt + except (botocore.exceptions.ClientError) as e: + module.fail_json(msg="No launch template found matching: %s" % launch_template) + else: + try: + lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) + return lt + except (botocore.exceptions.ClientError) as e: + module.fail_json(msg="No launch template found matching: %s" % launch_template) + + +@AWSRetry.backoff(**backoff_params) +def create_asg(connection, **params): + connection.create_auto_scaling_group(**params) + + +@AWSRetry.backoff(**backoff_params) +def put_notification_config(connection, asg_name, topic_arn, notification_types): + connection.put_notification_configuration( + AutoScalingGroupName=asg_name, + TopicARN=topic_arn, + NotificationTypes=notification_types + ) + + +@AWSRetry.backoff(**backoff_params) +def del_notification_config(connection, asg_name, topic_arn): + connection.delete_notification_configuration( + AutoScalingGroupName=asg_name, + TopicARN=topic_arn + ) + + +@AWSRetry.backoff(**backoff_params) +def attach_load_balancers(connection, asg_name, load_balancers): + connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) + + +@AWSRetry.backoff(**backoff_params) +def detach_load_balancers(connection, asg_name, load_balancers): + connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) + + +@AWSRetry.backoff(**backoff_params) +def attach_lb_target_groups(connection, asg_name, target_group_arns): + connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) + + +@AWSRetry.backoff(**backoff_params) +def detach_lb_target_groups(connection, asg_name, target_group_arns): + connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) + + +@AWSRetry.backoff(**backoff_params) +def update_asg(connection, **params): + connection.update_auto_scaling_group(**params) + + +@AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) +def delete_asg(connection, asg_name, force_delete): + connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) + + +@AWSRetry.backoff(**backoff_params) +def terminate_asg_instance(connection, instance_id, decrement_capacity): + connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, + ShouldDecrementDesiredCapacity=decrement_capacity) + + +def enforce_required_arguments_for_create(): + ''' As many arguments are not required for autoscale group deletion + they cannot be mandatory arguments for the module, so we enforce + them here ''' + missing_args = [] + if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: + module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") + for arg in ('min_size', 'max_size'): + if module.params[arg] is None: + missing_args.append(arg) + if missing_args: + module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) + + +def get_properties(autoscaling_group): + properties = dict( + healthy_instances=0, + in_service_instances=0, + unhealthy_instances=0, + pending_instances=0, + viable_instances=0, + terminating_instances=0 + ) + instance_facts = dict() + autoscaling_group_instances = autoscaling_group.get('Instances') + + if autoscaling_group_instances: + properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] + for i in autoscaling_group_instances: + instance_facts[i['InstanceId']] = { + 'health_status': i['HealthStatus'], + 'lifecycle_state': i['LifecycleState'] + } + if 'LaunchConfigurationName' in i: + instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName'] + elif 'LaunchTemplate' in i: + instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate'] + + if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': + properties['viable_instances'] += 1 + + if i['HealthStatus'] == 'Healthy': + properties['healthy_instances'] += 1 + else: + properties['unhealthy_instances'] += 1 + + if i['LifecycleState'] == 'InService': + properties['in_service_instances'] += 1 + if i['LifecycleState'] == 'Terminating': + properties['terminating_instances'] += 1 + if i['LifecycleState'] == 'Pending': + properties['pending_instances'] += 1 + else: + properties['instances'] = [] + + properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') + properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') + properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') + properties['created_time'] = autoscaling_group.get('CreatedTime') + properties['instance_facts'] = instance_facts + properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') + if 'LaunchConfigurationName' in autoscaling_group: + properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') + else: + properties['launch_template'] = autoscaling_group.get('LaunchTemplate') + properties['tags'] = autoscaling_group.get('Tags') + properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime') + properties['min_size'] = autoscaling_group.get('MinSize') + properties['max_size'] = autoscaling_group.get('MaxSize') + properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') + properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') + properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') + properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') + properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') + properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') + properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') + properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') + raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy') + if raw_mixed_instance_object: + properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')] + + metrics = autoscaling_group.get('EnabledMetrics') + if metrics: + metrics.sort(key=lambda x: x["Metric"]) + properties['metrics_collection'] = metrics + + if properties['target_group_arns']: + elbv2_connection = module.client('elbv2') + tg_paginator = elbv2_connection.get_paginator('describe_target_groups') + tg_result = tg_paginator.paginate( + TargetGroupArns=properties['target_group_arns'] + ).build_full_result() + target_groups = tg_result['TargetGroups'] + else: + target_groups = [] + + properties['target_group_names'] = [ + tg['TargetGroupName'] + for tg in target_groups + ] + + return properties + + +def get_launch_object(connection, ec2_connection): + launch_object = dict() + launch_config_name = module.params.get('launch_config_name') + launch_template = module.params.get('launch_template') + mixed_instances_policy = module.params.get('mixed_instances_policy') + if launch_config_name is None and launch_template is None: + return launch_object + elif launch_config_name: + try: + launch_configs = describe_launch_configurations(connection, launch_config_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to describe launch configurations", + exception=traceback.format_exc()) + if len(launch_configs['LaunchConfigurations']) == 0: + module.fail_json(msg="No launch config found with name %s" % launch_config_name) + launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} + return launch_object + elif launch_template: + lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] + if launch_template['version'] is not None: + launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} + else: + launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} + + if mixed_instances_policy: + instance_types = mixed_instances_policy.get('instance_types', []) + policy = { + 'LaunchTemplate': { + 'LaunchTemplateSpecification': launch_object['LaunchTemplate'] + } + } + if instance_types: + policy['LaunchTemplate']['Overrides'] = [] + for instance_type in instance_types: + instance_type_dict = {'InstanceType': instance_type} + policy['LaunchTemplate']['Overrides'].append(instance_type_dict) + launch_object['MixedInstancesPolicy'] = policy + return launch_object + + +def elb_dreg(asg_connection, group_name, instance_id): + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + wait_timeout = module.params.get('wait_timeout') + count = 1 + if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': + elb_connection = module.client('elb') + else: + return + + for lb in as_group['LoadBalancerNames']: + deregister_lb_instances(elb_connection, lb, instance_id) + module.debug("De-registering %s from ELB %s" % (instance_id, lb)) + + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and count > 0: + count = 0 + for lb in as_group['LoadBalancerNames']: + lb_instances = describe_instance_health(elb_connection, lb, []) + for i in lb_instances['InstanceStates']: + if i['InstanceId'] == instance_id and i['State'] == "InService": + count += 1 + module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) + + +def elb_healthy(asg_connection, elb_connection, group_name): + healthy_instances = set() + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + props = get_properties(as_group) + # get healthy, inservice instances from ASG + instances = [] + for instance, settings in props['instance_facts'].items(): + if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + instances.append(dict(InstanceId=instance)) + module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug("ELB instance status:") + lb_instances = list() + for lb in as_group.get('LoadBalancerNames'): + # we catch a race condition that sometimes happens if the instance exists in the ASG + # but has not yet show up in the ELB + try: + lb_instances = describe_instance_health(elb_connection, lb, instances) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidInstance': + return None + + module.fail_json(msg="Failed to get load balancer.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to get load balancer.", + exception=traceback.format_exc()) + + for i in lb_instances.get('InstanceStates'): + if i['State'] == "InService": + healthy_instances.add(i['InstanceId']) + module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) + return len(healthy_instances) + + +def tg_healthy(asg_connection, elbv2_connection, group_name): + healthy_instances = set() + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + props = get_properties(as_group) + # get healthy, inservice instances from ASG + instances = [] + for instance, settings in props['instance_facts'].items(): + if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + instances.append(dict(Id=instance)) + module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug("Target Group instance status:") + tg_instances = list() + for tg in as_group.get('TargetGroupARNs'): + # we catch a race condition that sometimes happens if the instance exists in the ASG + # but has not yet show up in the ELB + try: + tg_instances = describe_target_health(elbv2_connection, tg, instances) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidInstance': + return None + + module.fail_json(msg="Failed to get target group.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to get target group.", + exception=traceback.format_exc()) + + for i in tg_instances.get('TargetHealthDescriptions'): + if i['TargetHealth']['State'] == "healthy": + healthy_instances.add(i['Target']['Id']) + module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) + return len(healthy_instances) + + +def wait_for_elb(asg_connection, group_name): + wait_timeout = module.params.get('wait_timeout') + + # if the health_check_type is ELB, we want to query the ELBs directly for instance + # status as to avoid health_check_grace period that is awarded to ASG instances + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + + if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': + module.debug("Waiting for ELB to consider instances healthy.") + elb_connection = module.client('elb') + + wait_timeout = time.time() + wait_timeout + healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) + + while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) + module.debug("ELB thinks %s instances are healthy." % healthy_instances) + time.sleep(10) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) + module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) + + +def wait_for_target_group(asg_connection, group_name): + wait_timeout = module.params.get('wait_timeout') + + # if the health_check_type is ELB, we want to query the ELBs directly for instance + # status as to avoid health_check_grace period that is awarded to ASG instances + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + + if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': + module.debug("Waiting for Target Group to consider instances healthy.") + elbv2_connection = module.client('elbv2') + + wait_timeout = time.time() + wait_timeout + healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) + + while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) + module.debug("Target Group thinks %s instances are healthy." % healthy_instances) + time.sleep(10) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) + module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) + + +def suspend_processes(ec2_connection, as_group): + suspend_processes = set(module.params.get('suspend_processes')) + + try: + suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) + except AttributeError: + # New ASG being created, no suspended_processes defined yet + suspended_processes = set() + + if suspend_processes == suspended_processes: + return False + + resume_processes = list(suspended_processes - suspend_processes) + if resume_processes: + resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) + + if suspend_processes: + suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) + + return True + + +def create_autoscaling_group(connection): + group_name = module.params.get('name') + load_balancers = module.params['load_balancers'] + target_group_arns = module.params['target_group_arns'] + availability_zones = module.params['availability_zones'] + launch_config_name = module.params.get('launch_config_name') + launch_template = module.params.get('launch_template') + mixed_instances_policy = module.params.get('mixed_instances_policy') + min_size = module.params['min_size'] + max_size = module.params['max_size'] + max_instance_lifetime = module.params.get('max_instance_lifetime') + placement_group = module.params.get('placement_group') + desired_capacity = module.params.get('desired_capacity') + vpc_zone_identifier = module.params.get('vpc_zone_identifier') + set_tags = module.params.get('tags') + health_check_period = module.params.get('health_check_period') + health_check_type = module.params.get('health_check_type') + default_cooldown = module.params.get('default_cooldown') + wait_for_instances = module.params.get('wait_for_instances') + wait_timeout = module.params.get('wait_timeout') + termination_policies = module.params.get('termination_policies') + notification_topic = module.params.get('notification_topic') + notification_types = module.params.get('notification_types') + metrics_collection = module.params.get('metrics_collection') + metrics_granularity = module.params.get('metrics_granularity') + metrics_list = module.params.get('metrics_list') + + try: + as_groups = describe_autoscaling_groups(connection, group_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to describe auto scaling groups.", + exception=traceback.format_exc()) + + ec2_connection = module.client('ec2') + + if vpc_zone_identifier: + vpc_zone_identifier = ','.join(vpc_zone_identifier) + + asg_tags = [] + for tag in set_tags: + for k, v in tag.items(): + if k != 'propagate_at_launch': + asg_tags.append(dict(Key=k, + Value=to_native(v), + PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), + ResourceType='auto-scaling-group', + ResourceId=group_name)) + if not as_groups: + if not vpc_zone_identifier and not availability_zones: + availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for + zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] + + enforce_required_arguments_for_create() + + if desired_capacity is None: + desired_capacity = min_size + ag = dict( + AutoScalingGroupName=group_name, + MinSize=min_size, + MaxSize=max_size, + DesiredCapacity=desired_capacity, + Tags=asg_tags, + HealthCheckGracePeriod=health_check_period, + HealthCheckType=health_check_type, + DefaultCooldown=default_cooldown, + TerminationPolicies=termination_policies) + if vpc_zone_identifier: + ag['VPCZoneIdentifier'] = vpc_zone_identifier + if availability_zones: + ag['AvailabilityZones'] = availability_zones + if placement_group: + ag['PlacementGroup'] = placement_group + if load_balancers: + ag['LoadBalancerNames'] = load_balancers + if target_group_arns: + ag['TargetGroupARNs'] = target_group_arns + if max_instance_lifetime: + ag['MaxInstanceLifetime'] = max_instance_lifetime + + launch_object = get_launch_object(connection, ec2_connection) + if 'LaunchConfigurationName' in launch_object: + ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] + elif 'LaunchTemplate' in launch_object: + if 'MixedInstancesPolicy' in launch_object: + ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + else: + ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + else: + module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate", + exception=traceback.format_exc()) + + try: + create_asg(connection, **ag) + if metrics_collection: + connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + + all_ag = describe_autoscaling_groups(connection, group_name) + if len(all_ag) == 0: + module.fail_json(msg="No auto scaling group found with the name %s" % group_name) + as_group = all_ag[0] + suspend_processes(connection, as_group) + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + if load_balancers: + wait_for_elb(connection, group_name) + # Wait for target group health if target group(s)defined + if target_group_arns: + wait_for_target_group(connection, group_name) + if notification_topic: + put_notification_config(connection, group_name, notification_topic, notification_types) + as_group = describe_autoscaling_groups(connection, group_name)[0] + asg_properties = get_properties(as_group) + changed = True + return changed, asg_properties + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to create Autoscaling Group.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to create Autoscaling Group.", + exception=traceback.format_exc()) + else: + as_group = as_groups[0] + initial_asg_properties = get_properties(as_group) + changed = False + + if suspend_processes(connection, as_group): + changed = True + + # process tag changes + if len(set_tags) > 0: + have_tags = as_group.get('Tags') + want_tags = asg_tags + if have_tags: + have_tags.sort(key=lambda x: x["Key"]) + if want_tags: + want_tags.sort(key=lambda x: x["Key"]) + dead_tags = [] + have_tag_keyvals = [x['Key'] for x in have_tags] + want_tag_keyvals = [x['Key'] for x in want_tags] + + for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): + changed = True + dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'], + ResourceType='auto-scaling-group', Key=dead_tag)) + have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] + if dead_tags: + connection.delete_tags(Tags=dead_tags) + + zipped = zip(have_tags, want_tags) + if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped): + changed = True + connection.create_or_update_tags(Tags=asg_tags) + + # Handle load balancer attachments/detachments + # Attach load balancers if they are specified but none currently exist + if load_balancers and not as_group['LoadBalancerNames']: + changed = True + try: + attach_load_balancers(connection, group_name, load_balancers) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to update Autoscaling Group.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to update Autoscaling Group.", + exception=traceback.format_exc()) + + # Update load balancers if they are specified and one or more already exists + elif as_group['LoadBalancerNames']: + change_load_balancers = load_balancers is not None + # Get differences + if not load_balancers: + load_balancers = list() + wanted_elbs = set(load_balancers) + + has_elbs = set(as_group['LoadBalancerNames']) + # check if all requested are already existing + if has_elbs - wanted_elbs and change_load_balancers: + # if wanted contains less than existing, then we need to delete some + elbs_to_detach = has_elbs.difference(wanted_elbs) + if elbs_to_detach: + changed = True + try: + detach_load_balancers(connection, group_name, list(elbs_to_detach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)), + exception=traceback.format_exc()) + if wanted_elbs - has_elbs: + # if has contains less than wanted, then we need to add some + elbs_to_attach = wanted_elbs.difference(has_elbs) + if elbs_to_attach: + changed = True + try: + attach_load_balancers(connection, group_name, list(elbs_to_attach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)), + exception=traceback.format_exc()) + + # Handle target group attachments/detachments + # Attach target groups if they are specified but none currently exist + if target_group_arns and not as_group['TargetGroupARNs']: + changed = True + try: + attach_lb_target_groups(connection, group_name, target_group_arns) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to update Autoscaling Group.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to update Autoscaling Group.", + exception=traceback.format_exc()) + # Update target groups if they are specified and one or more already exists + elif target_group_arns is not None and as_group['TargetGroupARNs']: + # Get differences + wanted_tgs = set(target_group_arns) + has_tgs = set(as_group['TargetGroupARNs']) + # check if all requested are already existing + if has_tgs.issuperset(wanted_tgs): + # if wanted contains less than existing, then we need to delete some + tgs_to_detach = has_tgs.difference(wanted_tgs) + if tgs_to_detach: + changed = True + try: + detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)), + exception=traceback.format_exc()) + if wanted_tgs.issuperset(has_tgs): + # if has contains less than wanted, then we need to add some + tgs_to_attach = wanted_tgs.difference(has_tgs) + if tgs_to_attach: + changed = True + try: + attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)), + exception=traceback.format_exc()) + + # check for attributes that aren't required for updating an existing ASG + # check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group['MinSize'] + if max_size is None: + max_size = as_group['MaxSize'] + if desired_capacity is None: + desired_capacity = as_group['DesiredCapacity'] + ag = dict( + AutoScalingGroupName=group_name, + MinSize=min_size, + MaxSize=max_size, + DesiredCapacity=desired_capacity, + HealthCheckGracePeriod=health_check_period, + HealthCheckType=health_check_type, + DefaultCooldown=default_cooldown, + TerminationPolicies=termination_policies) + + # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. + launch_object = get_launch_object(connection, ec2_connection) + if 'LaunchConfigurationName' in launch_object: + ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] + elif 'LaunchTemplate' in launch_object: + if 'MixedInstancesPolicy' in launch_object: + ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + else: + ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + else: + try: + ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] + except Exception: + launch_template = as_group['LaunchTemplate'] + # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. + ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} + + if availability_zones: + ag['AvailabilityZones'] = availability_zones + if vpc_zone_identifier: + ag['VPCZoneIdentifier'] = vpc_zone_identifier + if max_instance_lifetime is not None: + ag['MaxInstanceLifetime'] = max_instance_lifetime + + try: + update_asg(connection, **ag) + + if metrics_collection: + connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + else: + connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e), + exception=traceback.format_exc()) + if notification_topic: + try: + put_notification_config(connection, group_name, notification_topic, notification_types) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to update Autoscaling Group notifications.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to update Autoscaling Group notifications.", + exception=traceback.format_exc()) + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + # Wait for ELB health if ELB(s)defined + if load_balancers: + module.debug('\tWAITING FOR ELB HEALTH') + wait_for_elb(connection, group_name) + # Wait for target group health if target group(s)defined + + if target_group_arns: + module.debug('\tWAITING FOR TG HEALTH') + wait_for_target_group(connection, group_name) + + try: + as_group = describe_autoscaling_groups(connection, group_name)[0] + asg_properties = get_properties(as_group) + if asg_properties != initial_asg_properties: + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to read existing Autoscaling Groups.", + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json(msg="Failed to read existing Autoscaling Groups.", + exception=traceback.format_exc()) + return changed, asg_properties + + +def delete_autoscaling_group(connection): + group_name = module.params.get('name') + notification_topic = module.params.get('notification_topic') + wait_for_instances = module.params.get('wait_for_instances') + wait_timeout = module.params.get('wait_timeout') + + if notification_topic: + del_notification_config(connection, group_name, notification_topic) + groups = describe_autoscaling_groups(connection, group_name) + if groups: + wait_timeout = time.time() + wait_timeout + if not wait_for_instances: + delete_asg(connection, group_name, force_delete=True) + else: + updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0) + update_asg(connection, **updated_params) + instances = True + while instances and wait_for_instances and wait_timeout >= time.time(): + tmp_groups = describe_autoscaling_groups(connection, group_name) + if tmp_groups: + tmp_group = tmp_groups[0] + if not tmp_group.get('Instances'): + instances = False + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + + delete_asg(connection, group_name, force_delete=False) + while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): + time.sleep(5) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) + return True + + return False + + +def get_chunks(l, n): + for i in range(0, len(l), n): + yield l[i:i + n] + + +def update_size(connection, group, max_size, min_size, dc): + module.debug("setting ASG sizes") + module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) + updated_group = dict() + updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] + updated_group['MinSize'] = min_size + updated_group['MaxSize'] = max_size + updated_group['DesiredCapacity'] = dc + update_asg(connection, **updated_group) + + +def replace(connection): + batch_size = module.params.get('replace_batch_size') + wait_timeout = module.params.get('wait_timeout') + wait_for_instances = module.params.get('wait_for_instances') + group_name = module.params.get('name') + max_size = module.params.get('max_size') + min_size = module.params.get('min_size') + desired_capacity = module.params.get('desired_capacity') + launch_config_name = module.params.get('launch_config_name') + # Required to maintain the default value being set to 'true' + if launch_config_name: + lc_check = module.params.get('lc_check') + else: + lc_check = False + # Mirror above behavior for Launch Templates + launch_template = module.params.get('launch_template') + if launch_template: + lt_check = module.params.get('lt_check') + else: + lt_check = False + replace_instances = module.params.get('replace_instances') + replace_all_instances = module.params.get('replace_all_instances') + + as_group = describe_autoscaling_groups(connection, group_name)[0] + if desired_capacity is None: + desired_capacity = as_group['DesiredCapacity'] + + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') + + props = get_properties(as_group) + instances = props['instances'] + if replace_all_instances: + # If replacing all instances, then set replace_instances to current set + # This allows replace_instances and replace_all_instances to behave same + replace_instances = instances + if replace_instances: + instances = replace_instances + + # check to see if instances are replaceable if checking launch configs + if launch_config_name: + new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances) + elif launch_template: + new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances) + + num_new_inst_needed = desired_capacity - len(new_instances) + + if lc_check or lt_check: + if num_new_inst_needed == 0 and old_instances: + module.debug("No new instances needed, but old instances are present. Removing old instances") + terminate_batch(connection, old_instances, instances, True) + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + changed = True + return changed, props + + # we don't want to spin up extra instances if not necessary + if num_new_inst_needed < batch_size: + module.debug("Overriding batch size to %s" % num_new_inst_needed) + batch_size = num_new_inst_needed + + if not old_instances: + changed = False + return changed, props + + # check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group['MinSize'] + if max_size is None: + max_size = as_group['MaxSize'] + + # set temporary settings and wait for them to be reached + # This should get overwritten if the number of instances left is less than the batch size. + + as_group = describe_autoscaling_groups(connection, group_name)[0] + update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) + + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') + wait_for_elb(connection, group_name) + wait_for_target_group(connection, group_name) + + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + instances = props['instances'] + if replace_instances: + instances = replace_instances + + module.debug("beginning main loop") + for i in get_chunks(instances, batch_size): + # break out of this loop if we have enough new instances + break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False) + + if wait_for_instances: + wait_for_term_inst(connection, term_instances) + wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') + wait_for_elb(connection, group_name) + wait_for_target_group(connection, group_name) + + if break_early: + module.debug("breaking loop") + break + + update_size(connection, as_group, max_size, min_size, desired_capacity) + as_group = describe_autoscaling_groups(connection, group_name)[0] + asg_properties = get_properties(as_group) + module.debug("Rolling update complete.") + changed = True + return changed, asg_properties + + +def get_instances_by_launch_config(props, lc_check, initial_instances): + new_instances = [] + old_instances = [] + # old instances are those that have the old launch config + if lc_check: + for i in props['instances']: + # Check if migrating from launch_template to launch_config first + if 'launch_template' in props['instance_facts'][i]: + old_instances.append(i) + elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']: + new_instances.append(i) + else: + old_instances.append(i) + + else: + module.debug("Comparing initial instances with current: %s" % initial_instances) + for i in props['instances']: + if i not in initial_instances: + new_instances.append(i) + else: + old_instances.append(i) + + module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) + module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + + return new_instances, old_instances + + +def get_instances_by_launch_template(props, lt_check, initial_instances): + new_instances = [] + old_instances = [] + # old instances are those that have the old launch template or version of the same launch template + if lt_check: + for i in props['instances']: + # Check if migrating from launch_config_name to launch_template_name first + if 'launch_config_name' in props['instance_facts'][i]: + old_instances.append(i) + elif props['instance_facts'][i].get('launch_template') == props['launch_template']: + new_instances.append(i) + else: + old_instances.append(i) + else: + module.debug("Comparing initial instances with current: %s" % initial_instances) + for i in props['instances']: + if i not in initial_instances: + new_instances.append(i) + else: + old_instances.append(i) + + module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) + module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + + return new_instances, old_instances + + +def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): + instances_to_terminate = [] + instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config + if 'launch_config_name' in module.params: + if lc_check: + for i in instances: + if ( + 'launch_template' in props['instance_facts'][i] + or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name'] + ): + instances_to_terminate.append(i) + else: + for i in instances: + if i in initial_instances: + instances_to_terminate.append(i) + elif 'launch_template' in module.params: + if lt_check: + for i in instances: + if ( + 'launch_config_name' in props['instance_facts'][i] + or props['instance_facts'][i]['launch_template'] != props['launch_template'] + ): + instances_to_terminate.append(i) + else: + for i in instances: + if i in initial_instances: + instances_to_terminate.append(i) + + return instances_to_terminate + + +def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): + batch_size = module.params.get('replace_batch_size') + min_size = module.params.get('min_size') + desired_capacity = module.params.get('desired_capacity') + group_name = module.params.get('name') + lc_check = module.params.get('lc_check') + lt_check = module.params.get('lt_check') + decrement_capacity = False + break_loop = False + + as_group = describe_autoscaling_groups(connection, group_name)[0] + if desired_capacity is None: + desired_capacity = as_group['DesiredCapacity'] + + props = get_properties(as_group) + desired_size = as_group['MinSize'] + if module.params.get('launch_config_name'): + new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) + else: + new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) + num_new_inst_needed = desired_capacity - len(new_instances) + + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config + instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) + + module.debug("new instances needed: %s" % num_new_inst_needed) + module.debug("new instances: %s" % new_instances) + module.debug("old instances: %s" % old_instances) + module.debug("batch instances: %s" % ",".join(instances_to_terminate)) + + if num_new_inst_needed == 0: + decrement_capacity = True + if as_group['MinSize'] != min_size: + if min_size is None: + min_size = as_group['MinSize'] + updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) + update_asg(connection, **updated_params) + module.debug("Updating minimum size back to original of %s" % min_size) + # if are some leftover old instances, but we are already at capacity with new ones + # we don't want to decrement capacity + if leftovers: + decrement_capacity = False + break_loop = True + instances_to_terminate = old_instances + desired_size = min_size + module.debug("No new instances needed") + + if num_new_inst_needed < batch_size and num_new_inst_needed != 0: + instances_to_terminate = instances_to_terminate[:num_new_inst_needed] + decrement_capacity = False + break_loop = False + module.debug("%s new instances needed" % num_new_inst_needed) + + module.debug("decrementing capacity: %s" % decrement_capacity) + + for instance_id in instances_to_terminate: + elb_dreg(connection, group_name, instance_id) + module.debug("terminating instance: %s" % instance_id) + terminate_asg_instance(connection, instance_id, decrement_capacity) + + # we wait to make sure the machines we marked as Unhealthy are + # no longer in the list + + return break_loop, desired_size, instances_to_terminate + + +def wait_for_term_inst(connection, term_instances): + wait_timeout = module.params.get('wait_timeout') + group_name = module.params.get('name') + as_group = describe_autoscaling_groups(connection, group_name)[0] + count = 1 + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and count > 0: + module.debug("waiting for instances to terminate") + count = 0 + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + instance_facts = props['instance_facts'] + instances = (i for i in instance_facts if i in term_instances) + for i in instances: + lifecycle = instance_facts[i]['lifecycle_state'] + health = instance_facts[i]['health_status'] + module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) + if lifecycle.startswith('Terminating') or health == 'Unhealthy': + count += 1 + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + + +def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): + # make sure we have the latest stats after that last loop. + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + # now we make sure that we have enough instances in a viable state + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and desired_size > props[prop]: + module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + time.sleep(10) + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) + module.debug("Reached %s: %s" % (prop, desired_size)) + return props + + +def asg_exists(connection): + group_name = module.params.get('name') + as_group = describe_autoscaling_groups(connection, group_name) + return bool(len(as_group)) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + load_balancers=dict(type='list'), + target_group_arns=dict(type='list'), + availability_zones=dict(type='list'), + launch_config_name=dict(type='str'), + launch_template=dict( + type='dict', + default=None, + options=dict( + version=dict(type='str'), + launch_template_name=dict(type='str'), + launch_template_id=dict(type='str'), + ) + ), + min_size=dict(type='int'), + max_size=dict(type='int'), + max_instance_lifetime=dict(type='int'), + mixed_instances_policy=dict( + type='dict', + default=None, + options=dict( + instance_types=dict( + type='list', + elements='str' + ), + ) + ), + placement_group=dict(type='str'), + desired_capacity=dict(type='int'), + vpc_zone_identifier=dict(type='list'), + replace_batch_size=dict(type='int', default=1), + replace_all_instances=dict(type='bool', default=False), + replace_instances=dict(type='list', default=[]), + lc_check=dict(type='bool', default=True), + lt_check=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='list', default=[]), + health_check_period=dict(type='int', default=300), + health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), + default_cooldown=dict(type='int', default=300), + wait_for_instances=dict(type='bool', default=True), + termination_policies=dict(type='list', default='Default'), + notification_topic=dict(type='str', default=None), + notification_types=dict( + type='list', + default=[ + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + ] + ), + suspend_processes=dict(type='list', default=[]), + metrics_collection=dict(type='bool', default=False), + metrics_granularity=dict(type='str', default='1Minute'), + metrics_list=dict( + type='list', + default=[ + 'GroupMinSize', + 'GroupMaxSize', + 'GroupDesiredCapacity', + 'GroupInServiceInstances', + 'GroupPendingInstances', + 'GroupStandbyInstances', + 'GroupTerminatingInstances', + 'GroupTotalInstances' + ] + ) + ) + + global module + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['replace_all_instances', 'replace_instances'], + ['launch_config_name', 'launch_template'] + ] + ) + + if ( + module.params.get('max_instance_lifetime') is not None + and not module.botocore_at_least('1.13.21') + ): + module.fail_json( + msg='Botocore needs to be version 1.13.21 or higher to use max_instance_lifetime.' + ) + + if ( + module.params.get('mixed_instances_policy') is not None + and not module.botocore_at_least('1.12.45') + ): + module.fail_json( + msg='Botocore needs to be version 1.12.45 or higher to use mixed_instances_policy.' + ) + + state = module.params.get('state') + replace_instances = module.params.get('replace_instances') + replace_all_instances = module.params.get('replace_all_instances') + + connection = module.client('autoscaling') + changed = create_changed = replace_changed = False + exists = asg_exists(connection) + + if state == 'present': + create_changed, asg_properties = create_autoscaling_group(connection) + elif state == 'absent': + changed = delete_autoscaling_group(connection) + module.exit_json(changed=changed) + + # Only replace instances if asg existed at start of call + if ( + exists + and (replace_all_instances or replace_instances) + and (module.params.get('launch_config_name') or module.params.get('launch_template')) + ): + replace_changed, asg_properties = replace(connection) + + if create_changed or replace_changed: + changed = True + + module.exit_json(changed=changed, **asg_properties) + + +if __name__ == '__main__': + main() diff --git a/ec2_asg_facts.py b/ec2_asg_facts.py new file mode 120000 index 00000000000..88ec9524588 --- /dev/null +++ b/ec2_asg_facts.py @@ -0,0 +1 @@ +ec2_asg_info.py \ No newline at end of file diff --git a/ec2_asg_info.py b/ec2_asg_info.py new file mode 100644 index 00000000000..cf3a10b90f5 --- /dev/null +++ b/ec2_asg_info.py @@ -0,0 +1,414 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_asg_info +short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS +description: + - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS + - This module was called C(ec2_asg_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + name: + description: + - The prefix or name of the auto scaling group(s) you are searching for. + - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match." + type: str + required: false + tags: + description: + - > + A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling + group(s) you are searching for. + required: false + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Find all groups +- ec2_asg_info: + register: asgs + +# Find a group with matching name/prefix +- ec2_asg_info: + name: public-webserver-asg + register: asgs + +# Find a group with matching tags +- ec2_asg_info: + tags: + project: webapp + env: production + register: asgs + +# Find a group with matching name/prefix and tags +- ec2_asg_info: + name: myproject + tags: + env: production + register: asgs + +# Fail if no groups are found +- ec2_asg_info: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length == 0 }}" + +# Fail if more than 1 group is found +- ec2_asg_info: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length > 1 }}" +''' + +RETURN = ''' +--- +auto_scaling_group_arn: + description: The Amazon Resource Name of the ASG + returned: success + type: str + sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1" +auto_scaling_group_name: + description: Name of autoscaling group + returned: success + type: str + sample: "public-webapp-production-1" +availability_zones: + description: List of Availability Zones that are enabled for this ASG. + returned: success + type: list + sample: ["us-west-2a", "us-west-2b", "us-west-2a"] +created_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: str + sample: "2015-11-25T00:05:36.309Z" +default_cooldown: + description: The default cooldown time in seconds. + returned: success + type: int + sample: 300 +desired_capacity: + description: The number of EC2 instances that should be running in this group. + returned: success + type: int + sample: 3 +health_check_period: + description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + returned: success + type: int + sample: 30 +health_check_type: + description: The service you want the health status from, one of "EC2" or "ELB". + returned: success + type: str + sample: "ELB" +instances: + description: List of EC2 instances and their status as it relates to the ASG. + returned: success + type: list + sample: [ + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-es22ad25", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": "false" + } + ] +launch_config_name: + description: > + Name of launch configuration associated with the ASG. Same as launch_configuration_name, + provided for compatibility with ec2_asg module. + returned: success + type: str + sample: "public-webapp-production-1" +launch_configuration_name: + description: Name of launch configuration associated with the ASG. + returned: success + type: str + sample: "public-webapp-production-1" +load_balancer_names: + description: List of load balancers names attached to the ASG. + returned: success + type: list + sample: ["elb-webapp-prod"] +max_size: + description: Maximum size of group + returned: success + type: int + sample: 3 +min_size: + description: Minimum size of group + returned: success + type: int + sample: 1 +new_instances_protected_from_scale_in: + description: Whether or not new instances a protected from automatic scaling in. + returned: success + type: bool + sample: "false" +placement_group: + description: Placement group into which instances are launched, if any. + returned: success + type: str + sample: None +status: + description: The current state of the group when DeleteAutoScalingGroup is in progress. + returned: success + type: str + sample: None +tags: + description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. + returned: success + type: list + sample: [ + { + "key": "Name", + "value": "public-webapp-production-1", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + }, + { + "key": "env", + "value": "production", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + } + ] +target_group_arns: + description: List of ARNs of the target groups that the ASG populates + returned: success + type: list + sample: [ + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" + ] +target_group_names: + description: List of names of the target groups that the ASG populates + returned: success + type: list + sample: [ + "target-group-host-hello", + "target-group-path-world" + ] +termination_policies: + description: A list of termination policies for the group. + returned: success + type: str + sample: ["Default"] +''' + +import re + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +def match_asg_tags(tags_to_match, asg): + for key, value in tags_to_match.items(): + for tag in asg['Tags']: + if key == tag['Key'] and value == tag['Value']: + break + else: + return False + return True + + +def find_asgs(conn, module, name=None, tags=None): + """ + Args: + conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. + name (str): Optional name of the ASG you are looking for. + tags (dict): Optional dictionary of tags and values to search for. + + Basic Usage: + >>> name = 'public-webapp-production' + >>> tags = { 'env': 'production' } + >>> conn = boto3.client('autoscaling', region_name='us-west-2') + >>> results = find_asgs(name, conn) + + Returns: + List + [ + { + "auto_scaling_group_arn": ( + "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:" + "autoScalingGroupName/public-webapp-production" + ), + "auto_scaling_group_name": "public-webapp-production", + "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], + "created_time": "2016-02-02T23:28:42.481000+00:00", + "default_cooldown": 300, + "desired_capacity": 2, + "enabled_metrics": [], + "health_check_grace_period": 300, + "health_check_type": "ELB", + "instances": + [ + { + "availability_zone": "us-west-2c", + "health_status": "Healthy", + "instance_id": "i-047a12cb", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + }, + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-7a29df2c", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + } + ], + "launch_config_name": "public-webapp-production-1", + "launch_configuration_name": "public-webapp-production-1", + "load_balancer_names": ["public-webapp-production-lb"], + "max_size": 4, + "min_size": 2, + "new_instances_protected_from_scale_in": false, + "placement_group": None, + "status": None, + "suspended_processes": [], + "tags": + [ + { + "key": "Name", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "public-webapp-production" + }, + { + "key": "env", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "production" + } + ], + "target_group_names": [], + "target_group_arns": [], + "termination_policies": + [ + "Default" + ], + "vpc_zone_identifier": + [ + "subnet-a1b1c1d1", + "subnet-a2b2c2d2", + "subnet-a3b3c3d3" + ] + } + ] + """ + + try: + asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') + asgs = asgs_paginator.paginate().build_full_result() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups') + + if not asgs: + return asgs + + try: + elbv2 = module.client('elbv2') + except ClientError as e: + # This is nice to have, not essential + elbv2 = None + matched_asgs = [] + + if name is not None: + # if the user didn't specify a name + name_prog = re.compile(r'^' + name) + + for asg in asgs['AutoScalingGroups']: + if name: + matched_name = name_prog.search(asg['AutoScalingGroupName']) + else: + matched_name = True + + if tags: + matched_tags = match_asg_tags(tags, asg) + else: + matched_tags = True + + if matched_name and matched_tags: + asg = camel_dict_to_snake_dict(asg) + # compatibility with ec2_asg module + if 'launch_configuration_name' in asg: + asg['launch_config_name'] = asg['launch_configuration_name'] + # workaround for https://github.com/ansible/ansible/pull/25015 + if 'target_group_ar_ns' in asg: + asg['target_group_arns'] = asg['target_group_ar_ns'] + del(asg['target_group_ar_ns']) + if asg.get('target_group_arns'): + if elbv2: + try: + tg_paginator = elbv2.get_paginator('describe_target_groups') + tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result() + asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']] + except ClientError as e: + if e.response['Error']['Code'] == 'TargetGroupNotFound': + asg['target_group_names'] = [] + else: + module.fail_json_aws(e, msg="Failed to describe Target Groups") + except BotoCoreError as e: + module.fail_json_aws(e, msg="Failed to describe Target Groups") + else: + asg['target_group_names'] = [] + matched_asgs.append(asg) + + return matched_asgs + + +def main(): + + argument_spec = dict( + name=dict(type='str'), + tags=dict(type='dict'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec) + if module._name == 'ec2_asg_facts': + module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", version='2.13') + + asg_name = module.params.get('name') + asg_tags = module.params.get('tags') + + autoscaling = module.client('autoscaling') + + results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) + module.exit_json(results=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py new file mode 100644 index 00000000000..f648a699207 --- /dev/null +++ b/ec2_asg_lifecycle_hook.py @@ -0,0 +1,253 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ec2_asg_lifecycle_hook +short_description: Create, delete or update AWS ASG Lifecycle Hooks. +description: + - Will create a new hook when I(state=present) and no given Hook is found. + - Will update an existing hook when I(state=present) and a Hook is found, but current and provided parameters differ. + - Will delete the hook when I(state=absent) and a Hook is found. +author: Igor 'Tsigankov' Eyrich (@tsiganenok) +options: + state: + description: + - Create or delete Lifecycle Hook. + - When I(state=present) updates existing hook or creates a new hook if not found. + choices: ['present', 'absent'] + default: present + type: str + lifecycle_hook_name: + description: + - The name of the lifecycle hook. + required: true + type: str + autoscaling_group_name: + description: + - The name of the Auto Scaling group to which you want to assign the lifecycle hook. + required: true + type: str + transition: + description: + - The instance state to which you want to attach the lifecycle hook. + - Required when I(state=present). + choices: ['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING'] + type: str + role_arn: + description: + - The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. + type: str + notification_target_arn: + description: + - The ARN of the notification target that Auto Scaling will use to notify you when an + instance is in the transition state for the lifecycle hook. + - This target can be either an SQS queue or an SNS topic. + - If you specify an empty string, this overrides the current ARN. + type: str + notification_meta_data: + description: + - Contains additional information that you want to include any time Auto Scaling sends a message to the notification target. + type: str + heartbeat_timeout: + description: + - The amount of time, in seconds, that can elapse before the lifecycle hook times out. + When the lifecycle hook times out, Auto Scaling performs the default action. + You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. + - By default Amazon AWS will use 3600 (1 hour) + type: int + default_result: + description: + - Defines the action the Auto Scaling group should take when the lifecycle hook timeout + elapses or if an unexpected failure occurs. + choices: ['ABANDON', 'CONTINUE'] + default: ABANDON + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ boto3>=1.4.4 ] + +''' + +EXAMPLES = ''' +# Create / Update lifecycle hook +- ec2_asg_lifecycle_hook: + region: eu-central-1 + state: present + autoscaling_group_name: example + lifecycle_hook_name: example + transition: autoscaling:EC2_INSTANCE_LAUNCHING + heartbeat_timeout: 7000 + default_result: ABANDON + +# Delete lifecycle hook +- ec2_asg_lifecycle_hook: + region: eu-central-1 + state: absent + autoscaling_group_name: example + lifecycle_hook_name: example + +''' + +RETURN = ''' + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + + +def create_lifecycle_hook(connection, module): + changed = False + + lch_name = module.params.get('lifecycle_hook_name') + asg_name = module.params.get('autoscaling_group_name') + transition = module.params.get('transition') + role_arn = module.params.get('role_arn') + notification_target_arn = module.params.get('notification_target_arn') + notification_meta_data = module.params.get('notification_meta_data') + heartbeat_timeout = module.params.get('heartbeat_timeout') + default_result = module.params.get('default_result') + + lch_params = { + 'LifecycleHookName': lch_name, + 'AutoScalingGroupName': asg_name, + 'LifecycleTransition': transition + } + + if role_arn: + lch_params['RoleARN'] = role_arn + + if notification_target_arn: + lch_params['NotificationTargetARN'] = notification_target_arn + + if notification_meta_data: + lch_params['NotificationMetadata'] = notification_meta_data + + if heartbeat_timeout: + lch_params['HeartbeatTimeout'] = heartbeat_timeout + + if default_result: + lch_params['DefaultResult'] = default_result + + try: + existing_hook = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, + LifecycleHookNames=[lch_name] + )['LifecycleHooks'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get Lifecycle Hook") + + if not existing_hook: + changed = True + else: + # GlobalTimeout is not configurable, but exists in response. + # Removing it helps to compare both dicts in order to understand + # what changes were done. + del(existing_hook[0]['GlobalTimeout']) + added, removed, modified, same = dict_compare(lch_params, existing_hook[0]) + if added or removed or modified: + changed = True + + if changed: + try: + connection.put_lifecycle_hook(**lch_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create LifecycleHook") + + return(changed) + + +def dict_compare(d1, d2): + d1_keys = set(d1.keys()) + d2_keys = set(d2.keys()) + intersect_keys = d1_keys.intersection(d2_keys) + added = d1_keys - d2_keys + removed = d2_keys - d1_keys + modified = False + for key in d1: + if d1[key] != d2[key]: + modified = True + break + + same = set(o for o in intersect_keys if d1[o] == d2[o]) + return added, removed, modified, same + + +def delete_lifecycle_hook(connection, module): + changed = False + + lch_name = module.params.get('lifecycle_hook_name') + asg_name = module.params.get('autoscaling_group_name') + + try: + all_hooks = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks") + + for hook in all_hooks['LifecycleHooks']: + if hook['LifecycleHookName'] == lch_name: + lch_params = { + 'LifecycleHookName': lch_name, + 'AutoScalingGroupName': asg_name + } + + try: + connection.delete_lifecycle_hook(**lch_params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete LifecycleHook") + else: + pass + + return(changed) + + +def main(): + argument_spec = dict( + autoscaling_group_name=dict(required=True, type='str'), + lifecycle_hook_name=dict(required=True, type='str'), + transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']), + role_arn=dict(type='str'), + notification_target_arn=dict(type='str'), + notification_meta_data=dict(type='str'), + heartbeat_timeout=dict(type='int'), + default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']), + state=dict(default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['transition']]]) + state = module.params.get('state') + + connection = module.client('autoscaling') + + changed = False + + if state == 'present': + changed = create_lifecycle_hook(connection, module) + elif state == 'absent': + changed = delete_lifecycle_hook(connection, module) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py new file mode 100644 index 00000000000..fe4d95f3a81 --- /dev/null +++ b/ec2_customer_gateway.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_customer_gateway +short_description: Manage an AWS customer gateway +description: + - Manage an AWS customer gateway. +author: Michael Baydoun (@MichaelBaydoun) +requirements: [ botocore, boto3 ] +notes: + - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the + first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent + requests do not create new customer gateway resources. + - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use + customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. +options: + bgp_asn: + description: + - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + type: int + ip_address: + description: + - Internet-routable IP address for customers gateway, must be a static address. + required: true + type: str + name: + description: + - Name of the customer gateway. + required: true + type: str + routing: + description: + - The type of routing. + choices: ['static', 'dynamic'] + default: dynamic + type: str + state: + description: + - Create or terminate the Customer Gateway. + default: present + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' + +# Create Customer Gateway +- ec2_customer_gateway: + bgp_asn: 12345 + ip_address: 1.2.3.4 + name: IndianapolisOffice + region: us-east-1 + register: cgw + +# Delete Customer Gateway +- ec2_customer_gateway: + ip_address: 1.2.3.4 + name: IndianapolisOffice + state: absent + region: us-east-1 + register: cgw +''' + +RETURN = ''' +gateway.customer_gateways: + description: details about the gateway that was created. + returned: success + type: complex + contains: + bgp_asn: + description: The Border Gateway Autonomous System Number. + returned: when exists and gateway is available. + sample: 65123 + type: str + customer_gateway_id: + description: gateway id assigned by amazon. + returned: when exists and gateway is available. + sample: cgw-cb6386a2 + type: str + ip_address: + description: ip address of your gateway device. + returned: when exists and gateway is available. + sample: 1.2.3.4 + type: str + state: + description: state of gateway. + returned: when gateway exists and is available. + sample: available + type: str + tags: + description: Any tags on the gateway. + returned: when gateway exists and is available, and when tags exist. + type: list + type: + description: encryption type. + returned: when gateway exists and is available. + sample: ipsec.1 + type: str +''' + +try: + from botocore.exceptions import ClientError + HAS_BOTOCORE = True +except ImportError: + HAS_BOTOCORE = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, AWSRetry, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info) + + +class Ec2CustomerGatewayManager: + + def __init__(self, module): + self.module = module + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except ClientError as e: + module.fail_json(msg=e.message) + + @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) + def ensure_cgw_absent(self, gw_id): + response = self.ec2.delete_customer_gateway( + DryRun=False, + CustomerGatewayId=gw_id + ) + return response + + def ensure_cgw_present(self, bgp_asn, ip_address): + if not bgp_asn: + bgp_asn = 65000 + response = self.ec2.create_customer_gateway( + DryRun=False, + Type='ipsec.1', + PublicIp=ip_address, + BgpAsn=bgp_asn, + ) + return response + + def tag_cgw_name(self, gw_id, name): + response = self.ec2.create_tags( + DryRun=False, + Resources=[ + gw_id, + ], + Tags=[ + { + 'Key': 'Name', + 'Value': name + }, + ] + ) + return response + + def describe_gateways(self, ip_address): + response = self.ec2.describe_customer_gateways( + DryRun=False, + Filters=[ + { + 'Name': 'state', + 'Values': [ + 'available', + ] + }, + { + 'Name': 'ip-address', + 'Values': [ + ip_address, + ] + } + ] + ) + return response + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + bgp_asn=dict(required=False, type='int'), + ip_address=dict(required=True), + name=dict(required=True), + routing=dict(default='dynamic', choices=['dynamic', 'static']), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ('routing', 'dynamic', ['bgp_asn']) + ] + ) + + if not HAS_BOTOCORE: + module.fail_json(msg='botocore is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + gw_mgr = Ec2CustomerGatewayManager(module) + + name = module.params.get('name') + + existing = gw_mgr.describe_gateways(module.params['ip_address']) + + results = dict(changed=False) + if module.params['state'] == 'present': + if existing['CustomerGateways']: + existing['CustomerGateway'] = existing['CustomerGateways'][0] + results['gateway'] = existing + if existing['CustomerGateway']['Tags']: + tag_array = existing['CustomerGateway']['Tags'] + for key, value in enumerate(tag_array): + if value['Key'] == 'Name': + current_name = value['Value'] + if current_name != name: + results['name'] = gw_mgr.tag_cgw_name( + results['gateway']['CustomerGateway']['CustomerGatewayId'], + module.params['name'], + ) + results['changed'] = True + else: + if not module.check_mode: + results['gateway'] = gw_mgr.ensure_cgw_present( + module.params['bgp_asn'], + module.params['ip_address'], + ) + results['name'] = gw_mgr.tag_cgw_name( + results['gateway']['CustomerGateway']['CustomerGatewayId'], + module.params['name'], + ) + results['changed'] = True + + elif module.params['state'] == 'absent': + if existing['CustomerGateways']: + existing['CustomerGateway'] = existing['CustomerGateways'][0] + results['gateway'] = existing + if not module.check_mode: + results['gateway'] = gw_mgr.ensure_cgw_absent( + existing['CustomerGateway']['CustomerGatewayId'] + ) + results['changed'] = True + + pretty_results = camel_dict_to_snake_dict(results) + module.exit_json(**pretty_results) + + +if __name__ == '__main__': + main() diff --git a/ec2_customer_gateway_facts.py b/ec2_customer_gateway_facts.py new file mode 120000 index 00000000000..2e1aec0aba5 --- /dev/null +++ b/ec2_customer_gateway_facts.py @@ -0,0 +1 @@ +ec2_customer_gateway_info.py \ No newline at end of file diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py new file mode 100644 index 00000000000..456b3f226df --- /dev/null +++ b/ec2_customer_gateway_info.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ec2_customer_gateway_info +short_description: Gather information about customer gateways in AWS +description: + - Gather information about customer gateways in AWS. + - This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: Madhura Naniwadekar (@Madhura-CSI) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters. + type: dict + customer_gateway_ids: + description: + - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list. + type: list + elements: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all customer gateways + ec2_customer_gateway_info: + +- name: Gather information about a filtered list of customer gateways, based on tags + ec2_customer_gateway_info: + region: ap-southeast-2 + filters: + "tag:Name": test-customer-gateway + "tag:AltName": test-customer-gateway-alt + register: cust_gw_info + +- name: Gather information about a specific customer gateway by specifying customer gateway ID + ec2_customer_gateway_info: + region: ap-southeast-2 + customer_gateway_ids: + - 'cgw-48841a09' + - 'cgw-fec021ce' + register: cust_gw_info +''' + +RETURN = ''' +customer_gateways: + description: List of one or more customer gateways. + returned: always + type: list + sample: [ + { + "bgp_asn": "65000", + "customer_gateway_id": "cgw-fec844ce", + "customer_gateway_name": "test-customer-gw", + "ip_address": "110.112.113.120", + "state": "available", + "tags": [ + { + "key": "Name", + "value": "test-customer-gw" + } + ], + "type": "ipsec.1" + } + ] +''' + +import json +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict + + +def date_handler(obj): + return obj.isoformat() if hasattr(obj, 'isoformat') else obj + + +def list_customer_gateways(connection, module): + params = dict() + + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids') + + try: + result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler)) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Could not describe customer gateways") + snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']] + if snaked_customer_gateways: + for customer_gateway in snaked_customer_gateways: + customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', [])) + customer_gateway_name = customer_gateway['tags'].get('Name') + if customer_gateway_name: + customer_gateway['customer_gateway_name'] = customer_gateway_name + module.exit_json(changed=False, customer_gateways=snaked_customer_gateways) + + +def main(): + + argument_spec = dict( + customer_gateway_ids=dict(default=[], type='list'), + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[['customer_gateway_ids', 'filters']], + supports_check_mode=True) + if module._module._name == 'ec2_customer_gateway_facts': + module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'", version='2.13') + + connection = module.client('ec2') + + list_customer_gateways(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_eip.py b/ec2_eip.py new file mode 100644 index 00000000000..dba01639c32 --- /dev/null +++ b/ec2_eip.py @@ -0,0 +1,640 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_eip +short_description: manages EC2 elastic IP (EIP) addresses. +description: + - This module can allocate or release an EIP. + - This module can associate/disassociate an EIP with instances or network interfaces. +options: + device_id: + description: + - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. + required: false + aliases: [ instance_id ] + type: str + public_ip: + description: + - The IP address of a previously allocated EIP. + - When I(public_ip=present) and device is specified, the EIP is associated with the device. + - When I(public_ip=absent) and device is specified, the EIP is disassociated from the device. + aliases: [ ip ] + type: str + state: + description: + - When C(state=present), allocate an EIP or associate an existing EIP with a device. + - When C(state=absent), disassociate the EIP from the device and optionally release it. + choices: ['present', 'absent'] + default: present + type: str + in_vpc: + description: + - Allocate an EIP inside a VPC or not. + - Required if specifying an ENI with I(device_id). + default: false + type: bool + reuse_existing_ip_allowed: + description: + - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one. + default: false + type: bool + release_on_disassociation: + description: + - Whether or not to automatically release the EIP when it is disassociated. + default: false + type: bool + private_ip_address: + description: + - The primary or secondary private IP address to associate with the Elastic IP address. + type: str + allow_reassociation: + description: + - Specify this option to allow an Elastic IP address that is already associated with another + network interface or instance to be re-associated with the specified instance or interface. + default: false + type: bool + tag_name: + description: + - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse + an Elastic IP if it is tagged with I(tag_name). + type: str + tag_value: + description: + - Supplements I(tag_name) but also checks that the value of the tag provided in I(tag_name) matches I(tag_value). + type: str + public_ipv4_pool: + description: + - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP) + only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). + type: str + wait_timeout: + description: + - The I(wait_timeout) option does nothing and will be removed in Ansible 2.14. + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: "Rick Mendes (@rickmendes) " +notes: + - There may be a delay between the time the EIP is assigned and when + the cloud instance is reachable via the new address. Use wait_for and + pause to delay further playbook execution until the instance is reachable, + if necessary. + - This module returns multiple changed statuses on disassociation or release. + It returns an overall status based on any changes occurring. It also returns + individual changed statuses for disassociation and release. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: associate an elastic IP with an instance + ec2_eip: + device_id: i-1212f003 + ip: 93.184.216.119 + +- name: associate an elastic IP with a device + ec2_eip: + device_id: eni-c8ad70f3 + ip: 93.184.216.119 + +- name: associate an elastic IP with a device and allow reassociation + ec2_eip: + device_id: eni-c8ad70f3 + public_ip: 93.184.216.119 + allow_reassociation: true + +- name: disassociate an elastic IP from an instance + ec2_eip: + device_id: i-1212f003 + ip: 93.184.216.119 + state: absent + +- name: disassociate an elastic IP with a device + ec2_eip: + device_id: eni-c8ad70f3 + ip: 93.184.216.119 + state: absent + +- name: allocate a new elastic IP and associate it with an instance + ec2_eip: + device_id: i-1212f003 + +- name: allocate a new elastic IP without associating it to anything + ec2_eip: + state: present + register: eip + +- name: output the IP + debug: + msg: "Allocated IP is {{ eip.public_ip }}" + +- name: provision new instances with ec2 + ec2: + keypair: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: true + group: webserver + count: 3 + register: ec2 + +- name: associate new elastic IPs with each of the instances + ec2_eip: + device_id: "{{ item }}" + loop: "{{ ec2.instance_ids }}" + +- name: allocate a new elastic IP inside a VPC in us-west-2 + ec2_eip: + region: us-west-2 + in_vpc: true + register: eip + +- name: output the IP + debug: + msg: "Allocated IP inside a VPC is {{ eip.public_ip }}" + +- name: allocate eip - reuse unallocated ips (if found) with FREE tag + ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: FREE + +- name: allocate eip - reuse unallocted ips if tag reserved is nope + ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: reserved + tag_value: nope + +- name: allocate new eip - from servers given ipv4 pool + ec2_eip: + region: us-east-1 + in_vpc: true + public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 + +- name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic) + ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: dev-servers + public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 + +- name: allocate eip from pool - check if tag reserved_for exists and value is our hostname + ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: reserved_for + tag_value: "{{ inventory_hostname }}" + public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 +''' + +RETURN = ''' +allocation_id: + description: allocation_id of the elastic ip + returned: on success + type: str + sample: eipalloc-51aa3a6c +public_ip: + description: an elastic ip address + returned: on success + type: str + sample: 52.88.159.209 +''' + +try: + import botocore.exceptions +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list + + +def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): + if address_is_associated_with_device(ec2, module, address, device_id, is_instance): + return {'changed': False} + + # If we're in check mode, nothing else to do + if not check_mode: + if is_instance: + try: + params = dict( + InstanceId=device_id, + AllowReassociation=allow_reassociation, + ) + if private_ip_address: + params['PrivateIPAddress'] = private_ip_address + if address['Domain'] == 'vpc': + params['AllocationId'] = address['AllocationId'] + else: + params['PublicIp'] = address['PublicIp'] + res = ec2.associate_address(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) + module.fail_json_aws(e, msg=msg) + else: + params = dict( + NetworkInterfaceId=device_id, + AllocationId=address['AllocationId'], + AllowReassociation=allow_reassociation, + ) + + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + + try: + res = ec2.associate_address(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id) + module.fail_json_aws(e, msg=msg) + if not res: + module.fail_json_aws(e, msg='Association failed.') + + return {'changed': True} + + +def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True): + if not address_is_associated_with_device(ec2, module, address, device_id, is_instance): + return {'changed': False} + + # If we're in check mode, nothing else to do + if not check_mode: + try: + if address['Domain'] == 'vpc': + res = ec2.disassociate_address( + AssociationId=address['AssociationId'], aws_retry=True + ) + else: + res = ec2.disassociate_address( + PublicIp=address['PublicIp'], aws_retry=True + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed") + + return {'changed': True} + + +@AWSRetry.jittered_backoff() +def find_address(ec2, module, public_ip, device_id, is_instance=True): + """ Find an existing Elastic IP address """ + filters = [] + kwargs = {} + + if public_ip: + kwargs["PublicIps"] = [public_ip] + elif device_id: + if is_instance: + filters.append({"Name": 'instance-id', "Values": [device_id]}) + else: + filters.append({'Name': 'network-interface-id', "Values": [device_id]}) + + if len(filters) > 0: + kwargs["Filters"] = filters + elif len(filters) == 0 and public_ip is None: + return None + + try: + addresses = ec2.describe_addresses(**kwargs) + except is_boto3_error_code('InvalidAddress.NotFound') as e: + # If we're releasing and we can't find it, it's already gone... + if module.params.get('state') == 'absent': + module.exit_json(changed=False) + module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") + + addresses = addresses["Addresses"] + if len(addresses) == 1: + return addresses[0] + elif len(addresses) > 1: + msg = "Found more than one address using args {0}".format(kwargs) + msg += "Addresses found: {0}".format(addresses) + module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) + + +def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): + """ Check if the elastic IP is currently associated with the device """ + address = find_address(ec2, module, address["PublicIp"], device_id, is_instance) + if address: + if is_instance: + if "InstanceId" in address and address["InstanceId"] == device_id: + return address + else: + if "NetworkInterfaceId" in address and address["NetworkInterfaceId"] == device_id: + return address + return False + + +def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None): + """ Allocate a new elastic IP address (when needed) and return it """ + if reuse_existing_ip_allowed: + filters = [] + if not domain: + domain = 'standard' + filters.append({'Name': 'domain', "Values": [domain]}) + + if tag_dict is not None: + filters += ansible_dict_to_boto3_filter_list(tag_dict) + + try: + all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") + + all_addresses = all_addresses["Addresses"] + + if domain == 'vpc': + unassociated_addresses = [a for a in all_addresses + if not a.get('AssociationId', None)] + else: + unassociated_addresses = [a for a in all_addresses + if not a['InstanceId']] + if unassociated_addresses: + return unassociated_addresses[0], False + + if public_ipv4_pool: + return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True + + try: + result = ec2.allocate_address(Domain=domain, aws_retry=True), True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") + return result + + +def release_address(ec2, module, address, check_mode): + """ Release a previously allocated elastic IP address """ + + # If we're in check mode, nothing else to do + if not check_mode: + try: + result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't release Elastic IP address") + + return {'changed': True} + + +@AWSRetry.jittered_backoff() +def describe_eni_with_backoff(ec2, module, device_id): + try: + return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id]) + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e: + module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") + + +def find_device(ec2, module, device_id, is_instance=True): + """ Attempt to find the EC2 instance and return it """ + + if is_instance: + try: + paginator = ec2.get_paginator('describe_instances') + reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]')) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get list of instances") + + if len(reservations) == 1: + instances = reservations[0]['Instances'] + if len(instances) == 1: + return instances[0] + else: + try: + interfaces = describe_eni_with_backoff(ec2, module, device_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") + if len(interfaces) == 1: + return interfaces[0] + + +def ensure_present(ec2, module, domain, address, private_ip_address, device_id, + reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True): + changed = False + + # Return the EIP object since we've been given a public IP + if not address: + if check_mode: + return {'changed': True} + + address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode) + + if device_id: + # Allocate an IP for instance since no public_ip was provided + if is_instance: + instance = find_device(ec2, module, device_id) + if reuse_existing_ip_allowed: + if instance.vpc_id and len(instance.vpc_id) > 0 and domain is None: + msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc" + module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) + + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device( + ec2, module, address, private_ip_address, device_id, allow_reassociation, + check_mode + ) + else: + instance = find_device(ec2, module, device_id, is_instance=False) + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device( + ec2, module, address, private_ip_address, device_id, allow_reassociation, + check_mode, is_instance=False + ) + + changed = changed or assoc_result['changed'] + + return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']} + + +def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True): + if not address: + return {'changed': False} + + # disassociating address from instance + if device_id: + if is_instance: + return disassociate_ip_and_device( + ec2, module, address, device_id, check_mode + ) + else: + return disassociate_ip_and_device( + ec2, module, address, device_id, check_mode, is_instance=False + ) + # releasing address + else: + return release_address(ec2, module, address, check_mode) + + +def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): + # type: (EC2Connection, str, bool, str) -> Address + """ Overrides boto's allocate_address function to support BYOIP """ + params = {} + + if domain is not None: + params['Domain'] = domain + + if public_ipv4_pool is not None: + params['PublicIpv4Pool'] = public_ipv4_pool + + if check_mode: + params['DryRun'] = 'true' + + try: + result = ec2.allocate_address(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") + return result + + +def generate_tag_dict(module, tag_name, tag_value): + # type: (AnsibleModule, str, str) -> Optional[Dict] + """ Generates a dictionary to be passed as a filter to Amazon """ + if tag_name and not tag_value: + if tag_name.startswith('tag:'): + tag_name = tag_name.strip('tag:') + return {'tag-key': tag_name} + + elif tag_name and tag_value: + if not tag_name.startswith('tag:'): + tag_name = 'tag:' + tag_name + return {tag_name: tag_value} + + elif tag_value and not tag_name: + module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')") + + +def main(): + argument_spec = dict( + device_id=dict(required=False, aliases=['instance_id']), + public_ip=dict(required=False, aliases=['ip']), + state=dict(required=False, default='present', + choices=['present', 'absent']), + in_vpc=dict(required=False, type='bool', default=False), + reuse_existing_ip_allowed=dict(required=False, type='bool', + default=False), + release_on_disassociation=dict(required=False, type='bool', default=False), + allow_reassociation=dict(type='bool', default=False), + wait_timeout=dict(type='int', removed_in_version='2.14'), + private_ip_address=dict(), + tag_name=dict(), + tag_value=dict(), + public_ipv4_pool=dict() + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_by={ + 'private_ip_address': ['device_id'], + }, + ) + + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + device_id = module.params.get('device_id') + instance_id = module.params.get('instance_id') + public_ip = module.params.get('public_ip') + private_ip_address = module.params.get('private_ip_address') + state = module.params.get('state') + in_vpc = module.params.get('in_vpc') + domain = 'vpc' if in_vpc else None + reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') + release_on_disassociation = module.params.get('release_on_disassociation') + allow_reassociation = module.params.get('allow_reassociation') + tag_name = module.params.get('tag_name') + tag_value = module.params.get('tag_value') + public_ipv4_pool = module.params.get('public_ipv4_pool') + + if instance_id: + warnings = ["instance_id is no longer used, please use device_id going forward"] + is_instance = True + device_id = instance_id + else: + if device_id and device_id.startswith('i-'): + is_instance = True + elif device_id: + if device_id.startswith('eni-') and not in_vpc: + module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") + is_instance = False + + tag_dict = generate_tag_dict(module, tag_name, tag_value) + + try: + if device_id: + address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance) + else: + address = find_address(ec2, module, public_ip, None) + + if state == 'present': + if device_id: + result = ensure_present( + ec2, module, domain, address, private_ip_address, device_id, + reuse_existing_ip_allowed, allow_reassociation, + module.check_mode, is_instance=is_instance + ) + else: + if address: + changed = False + else: + address, changed = allocate_address( + ec2, module, domain, reuse_existing_ip_allowed, + module.check_mode, tag_dict, public_ipv4_pool + ) + result = { + 'changed': changed, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } + else: + if device_id: + disassociated = ensure_absent( + ec2, module, address, device_id, module.check_mode, is_instance=is_instance + ) + + if release_on_disassociation and disassociated['changed']: + released = release_address(ec2, module, address, module.check_mode) + result = { + 'changed': True, + 'disassociated': disassociated, + 'released': released + } + else: + result = { + 'changed': disassociated['changed'], + 'disassociated': disassociated, + 'released': {'changed': False} + } + else: + released = release_address(ec2, module, address, module.check_mode) + result = { + 'changed': released['changed'], + 'disassociated': {'changed': False}, + 'released': released + } + + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(str(e)) + + if instance_id: + result['warnings'] = warnings + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ec2_eip_facts.py b/ec2_eip_facts.py new file mode 120000 index 00000000000..0ba519697bd --- /dev/null +++ b/ec2_eip_facts.py @@ -0,0 +1 @@ +ec2_eip_info.py \ No newline at end of file diff --git a/ec2_eip_info.py b/ec2_eip_info.py new file mode 100644 index 00000000000..de76a29b414 --- /dev/null +++ b/ec2_eip_info.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_eip_info +short_description: List EC2 EIP details +description: + - List details of EC2 Elastic IP addresses. + - This module was called C(ec2_eip_facts) before Ansible 2.9. The usage did not change. +author: "Brad Macpherson (@iiibrad)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and filter + value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options) + for possible filters. Filter names and values are case sensitive. + required: false + default: {} + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details or the AWS region, +# see the AWS Guide for details. + +# List all EIP addresses in the current region. +- ec2_eip_info: + register: regional_eip_addresses + +# List all EIP addresses for a VM. +- ec2_eip_info: + filters: + instance-id: i-123456789 + register: my_vm_eips + +- debug: msg="{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}" + +# List all EIP addresses for several VMs. +- ec2_eip_info: + filters: + instance-id: + - i-123456789 + - i-987654321 + register: my_vms_eips + +# List all EIP addresses using the 'Name' tag as a filter. +- ec2_eip_info: + filters: + tag:Name: www.example.com + register: my_vms_eips + +# List all EIP addresses using the Allocation-id as a filter +- ec2_eip_info: + filters: + allocation-id: eipalloc-64de1b01 + register: my_vms_eips + +# Set the variable eip_alloc to the value of the first allocation_id +# and set the variable my_pub_ip to the value of the first public_ip +- set_fact: + eip_alloc: my_vms_eips.addresses[0].allocation_id + my_pub_ip: my_vms_eips.addresses[0].public_ip + +''' + + +RETURN = ''' +addresses: + description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP. + returned: on success + type: list + sample: [{ + "allocation_id": "eipalloc-64de1b01", + "association_id": "eipassoc-0fe9ce90d6e983e97", + "domain": "vpc", + "instance_id": "i-01020cfeb25b0c84f", + "network_interface_id": "eni-02fdeadfd4beef9323b", + "network_interface_owner_id": "0123456789", + "private_ip_address": "10.0.0.1", + "public_ip": "54.81.104.1", + "tags": { + "Name": "test-vm-54.81.104.1" + } + }] + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict) +try: + from botocore.exceptions import (BotoCoreError, ClientError) +except ImportError: + pass # caught by imported AnsibleAWSModule + + +def get_eips_details(module): + connection = module.client('ec2') + filters = module.params.get("filters") + try: + response = connection.describe_addresses( + Filters=ansible_dict_to_boto3_filter_list(filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws( + e, + msg="Error retrieving EIPs") + + addresses = camel_dict_to_snake_dict(response)['addresses'] + for address in addresses: + if 'tags' in address: + address['tags'] = boto3_tag_list_to_ansible_dict(address['tags']) + return addresses + + +def main(): + module = AnsibleAWSModule( + argument_spec=dict( + filters=dict(type='dict', default={}) + ), + supports_check_mode=True + ) + if module._module._name == 'ec2_eip_facts': + module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", version='2.13') + + module.exit_json(changed=False, addresses=get_eips_details(module)) + + +if __name__ == '__main__': + main() diff --git a/ec2_elb.py b/ec2_elb.py new file mode 100644 index 00000000000..a74887ee41d --- /dev/null +++ b/ec2_elb.py @@ -0,0 +1,373 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_elb +short_description: De-registers or registers instances from EC2 ELBs +description: + - This module de-registers or registers an AWS EC2 instance from the ELBs + that it belongs to. + - Returns fact "ec2_elbs" which is a list of elbs attached to the instance + if state=absent is passed as an argument. + - Will be marked changed when called only if there are ELBs found to operate on. +author: "John Jarvis (@jarv)" +options: + state: + description: + - register or deregister the instance + required: true + choices: ['present', 'absent'] + type: str + instance_id: + description: + - EC2 Instance ID + required: true + type: str + ec2_elbs: + description: + - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. + type: list + enable_availability_zone: + description: + - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already + been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. + type: bool + default: 'yes' + wait: + description: + - Wait for instance registration or deregistration to complete successfully before returning. + type: bool + default: 'yes' + wait_timeout: + description: + - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. + If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. + default: 0 + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = """ +# basic pre_task and post_task example +pre_tasks: + - name: Gathering ec2 facts + action: ec2_facts + - name: Instance De-register + local_action: + module: ec2_elb + instance_id: "{{ ansible_ec2_instance_id }}" + state: absent +roles: + - myrole +post_tasks: + - name: Instance Register + local_action: + module: ec2_elb + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ item }}" + state: present + loop: "{{ ec2_elbs }}" +""" + +import time + +try: + import boto + import boto.ec2 + import boto.ec2.autoscale + import boto.ec2.elb + from boto.regioninfo import RegionInfo + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, + get_aws_connection_info) + + +class ElbManager: + """Handles EC2 instance ELB registration and de-registration""" + + def __init__(self, module, instance_id=None, ec2_elbs=None, + region=None, **aws_connect_params): + self.module = module + self.instance_id = instance_id + self.region = region + self.aws_connect_params = aws_connect_params + self.lbs = self._get_instance_lbs(ec2_elbs) + self.changed = False + + def deregister(self, wait, timeout): + """De-register the instance from all ELBs and wait for the ELB + to report it out-of-service""" + + for lb in self.lbs: + initial_state = self._get_instance_health(lb) + if initial_state is None: + # Instance isn't registered with this load + # balancer. Ignore it and try the next one. + continue + + # The instance is not associated with any load balancer so nothing to do + if not self._get_instance_lbs(): + return + + lb.deregister_instances([self.instance_id]) + + # The ELB is changing state in some way. Either an instance that's + # InService is moving to OutOfService, or an instance that's + # already OutOfService is being deregistered. + self.changed = True + + if wait: + self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) + + def register(self, wait, enable_availability_zone, timeout): + """Register the instance for all ELBs and wait for the ELB + to report the instance in-service""" + for lb in self.lbs: + initial_state = self._get_instance_health(lb) + + if enable_availability_zone: + self._enable_availailability_zone(lb) + + lb.register_instances([self.instance_id]) + + if wait: + self._await_elb_instance_state(lb, 'InService', initial_state, timeout) + else: + # We cannot assume no change was made if we don't wait + # to find out + self.changed = True + + def exists(self, lbtest): + """ Verify that the named ELB actually exists """ + + found = False + for lb in self.lbs: + if lb.name == lbtest: + found = True + break + return found + + def _enable_availailability_zone(self, lb): + """Enable the current instance's availability zone in the provided lb. + Returns True if the zone was enabled or False if no change was made. + lb: load balancer""" + instance = self._get_instance() + if instance.placement in lb.availability_zones: + return False + + lb.enable_zones(zones=instance.placement) + + # If successful, the new zone will have been added to + # lb.availability_zones + return instance.placement in lb.availability_zones + + def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): + """Wait for an ELB to change state + lb: load balancer + awaited_state : state to poll for (string)""" + + wait_timeout = time.time() + timeout + while True: + instance_state = self._get_instance_health(lb) + + if not instance_state: + msg = ("The instance %s could not be put in service on %s." + " Reason: Invalid Instance") + self.module.fail_json(msg=msg % (self.instance_id, lb)) + + if instance_state.state == awaited_state: + # Check the current state against the initial state, and only set + # changed if they are different. + if (initial_state is None) or (instance_state.state != initial_state.state): + self.changed = True + break + elif self._is_instance_state_pending(instance_state): + # If it's pending, we'll skip further checks and continue waiting + pass + elif (awaited_state == 'InService' + and instance_state.reason_code == "Instance" + and time.time() >= wait_timeout): + # If the reason_code for the instance being out of service is + # "Instance" this indicates a failure state, e.g. the instance + # has failed a health check or the ELB does not have the + # instance's availability zone enabled. The exact reason why is + # described in InstantState.description. + msg = ("The instance %s could not be put in service on %s." + " Reason: %s") + self.module.fail_json(msg=msg % (self.instance_id, + lb, + instance_state.description)) + time.sleep(1) + + def _is_instance_state_pending(self, instance_state): + """ + Determines whether the instance_state is "pending", meaning there is + an operation under way to bring it in service. + """ + # This is messy, because AWS provides no way to distinguish between + # an instance that is is OutOfService because it's pending vs. OutOfService + # because it's failing health checks. So we're forced to analyze the + # description, which is likely to be brittle. + return (instance_state and 'pending' in instance_state.description) + + def _get_instance_health(self, lb): + """ + Check instance health, should return status object or None under + certain error conditions. + """ + try: + status = lb.get_instance_health([self.instance_id])[0] + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstance': + return None + else: + raise + return status + + def _get_instance_lbs(self, ec2_elbs=None): + """Returns a list of ELBs attached to self.instance_id + ec2_elbs: an optional list of elb names that will be used + for elb lookup instead of returning what elbs + are attached to self.instance_id""" + + if not ec2_elbs: + ec2_elbs = self._get_auto_scaling_group_lbs() + + try: + elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + elbs = [] + marker = None + while True: + try: + newelbs = elb.get_all_load_balancers(marker=marker) + marker = newelbs.next_marker + elbs.extend(newelbs) + if not marker: + break + except TypeError: + # Older version of boto do not allow for params + elbs = elb.get_all_load_balancers() + break + + if ec2_elbs: + lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) + else: + lbs = [] + for lb in elbs: + for info in lb.instances: + if self.instance_id == info.id: + lbs.append(lb) + return lbs + + def _get_auto_scaling_group_lbs(self): + """Returns a list of ELBs associated with self.instance_id + indirectly through its auto scaling group membership""" + + try: + asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) + if len(asg_instances) > 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") + + if not asg_instances: + asg_elbs = [] + else: + asg_name = asg_instances[0].group_name + + asgs = asg.get_all_groups([asg_name]) + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + + asg_elbs = asgs[0].load_balancers + + return asg_elbs + + def _get_instance(self): + """Returns a boto.ec2.InstanceObject for self.instance_id""" + try: + ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + return ec2.get_only_instances(instance_ids=[self.instance_id])[0] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['present', 'absent']}, + instance_id={'required': True}, + ec2_elbs={'default': None, 'required': False, 'type': 'list'}, + enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, + wait={'required': False, 'default': True, 'type': 'bool'}, + wait_timeout={'required': False, 'default': 0, 'type': 'int'} + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ec2_elbs = module.params['ec2_elbs'] + wait = module.params['wait'] + enable_availability_zone = module.params['enable_availability_zone'] + timeout = module.params['wait_timeout'] + + if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + module.fail_json(msg="ELBs are required for registration") + + instance_id = module.params['instance_id'] + elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) + + if ec2_elbs is not None: + for elb in ec2_elbs: + if not elb_man.exists(elb): + msg = "ELB %s does not exist" % elb + module.fail_json(msg=msg) + + if not module.check_mode: + if module.params['state'] == 'present': + elb_man.register(wait, enable_availability_zone, timeout) + elif module.params['state'] == 'absent': + elb_man.deregister(wait, timeout) + + ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} + ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/ec2_elb_facts.py b/ec2_elb_facts.py new file mode 120000 index 00000000000..a029c6d0b08 --- /dev/null +++ b/ec2_elb_facts.py @@ -0,0 +1 @@ +ec2_elb_info.py \ No newline at end of file diff --git a/ec2_elb_info.py b/ec2_elb_info.py new file mode 100644 index 00000000000..b431c9c98e5 --- /dev/null +++ b/ec2_elb_info.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_elb_info +short_description: Gather information about EC2 Elastic Load Balancers in AWS +description: + - Gather information about EC2 Elastic Load Balancers in AWS + - This module was called C(ec2_elb_facts) before Ansible 2.9. The usage did not change. +author: + - "Michael Schultz (@mjschultz)" + - "Fernando Jose Pando (@nand0p)" +options: + names: + description: + - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. + type: list +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Output format tries to match ec2_elb_lb module input parameters + +# Gather information about all ELBs +- action: + module: ec2_elb_info + register: elb_info + +- action: + module: debug + msg: "{{ item.dns_name }}" + loop: "{{ elb_info.elbs }}" + +# Gather information about a particular ELB +- action: + module: ec2_elb_info + names: frontend-prod-elb + register: elb_info + +- action: + module: debug + msg: "{{ elb_info.elbs.0.dns_name }}" + +# Gather information about a set of ELBs +- action: + module: ec2_elb_info + names: + - frontend-prod-elb + - backend-prod-elb + register: elb_info + +- action: + module: debug + msg: "{{ item.dns_name }}" + loop: "{{ elb_info.elbs }}" + +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + AWSRetry, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, +) + +try: + import boto.ec2.elb + from boto.ec2.tag import Tag + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +class ElbInformation(object): + """Handles ELB information.""" + + def __init__(self, + module, + names, + region, + **aws_connect_params): + + self.module = module + self.names = names + self.region = region + self.aws_connect_params = aws_connect_params + self.connection = self._get_elb_connection() + + def _get_tags(self, elbname): + params = {'LoadBalancerNames.member.1': elbname} + elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)]) + return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key')) + + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + def _get_elb_connection(self): + return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) + + def _get_elb_listeners(self, listeners): + listener_list = [] + + for listener in listeners: + listener_dict = { + 'load_balancer_port': listener[0], + 'instance_port': listener[1], + 'protocol': listener[2], + 'instance_protocol': listener[3] + } + + try: + ssl_certificate_id = listener[4] + except IndexError: + pass + else: + if ssl_certificate_id: + listener_dict['ssl_certificate_id'] = ssl_certificate_id + + listener_list.append(listener_dict) + + return listener_list + + def _get_health_check(self, health_check): + protocol, port_path = health_check.target.split(':') + try: + port, path = port_path.split('/', 1) + path = '/{0}'.format(path) + except ValueError: + port = port_path + path = None + + health_check_dict = { + 'ping_protocol': protocol.lower(), + 'ping_port': int(port), + 'response_timeout': health_check.timeout, + 'interval': health_check.interval, + 'unhealthy_threshold': health_check.unhealthy_threshold, + 'healthy_threshold': health_check.healthy_threshold, + } + + if path: + health_check_dict['ping_path'] = path + return health_check_dict + + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + def _get_elb_info(self, elb): + elb_info = { + 'name': elb.name, + 'zones': elb.availability_zones, + 'dns_name': elb.dns_name, + 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name, + 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id, + 'hosted_zone_name': elb.canonical_hosted_zone_name, + 'hosted_zone_id': elb.canonical_hosted_zone_name_id, + 'instances': [instance.id for instance in elb.instances], + 'listeners': self._get_elb_listeners(elb.listeners), + 'scheme': elb.scheme, + 'security_groups': elb.security_groups, + 'health_check': self._get_health_check(elb.health_check), + 'subnets': elb.subnets, + 'instances_inservice': [], + 'instances_inservice_count': 0, + 'instances_outofservice': [], + 'instances_outofservice_count': 0, + 'instances_inservice_percent': 0.0, + 'tags': self._get_tags(elb.name) + } + + if elb.vpc_id: + elb_info['vpc_id'] = elb.vpc_id + + if elb.instances: + instance_health = self.connection.describe_instance_health(elb.name) + elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService'] + elb_info['instances_inservice_count'] = len(elb_info['instances_inservice']) + elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService'] + elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice']) + try: + elb_info['instances_inservice_percent'] = ( + float(elb_info['instances_inservice_count']) / + float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count']) + ) * 100. + except ZeroDivisionError: + elb_info['instances_inservice_percent'] = 0. + return elb_info + + def list_elbs(self): + elb_array, token = [], None + get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) + while True: + all_elbs = get_elb_with_backoff(marker=token) + token = all_elbs.next_marker + + if all_elbs: + if self.names: + for existing_lb in all_elbs: + if existing_lb.name in self.names: + elb_array.append(existing_lb) + else: + elb_array.extend(all_elbs) + else: + break + + if token is None: + break + + return list(map(self._get_elb_info, elb_array)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + names={'default': [], 'type': 'list'} + ) + ) + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + if module._name == 'ec2_elb_facts': + module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", version='2.13') + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + try: + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region must be specified") + + names = module.params['names'] + elb_information = ElbInformation( + module, names, region, **aws_connect_params) + + ec2_info_result = dict(changed=False, + elbs=elb_information.list_elbs()) + + except BotoServerError as err: + module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message), + exception=traceback.format_exc()) + + module.exit_json(**ec2_info_result) + + +if __name__ == '__main__': + main() diff --git a/ec2_instance.py b/ec2_instance.py new file mode 100644 index 00000000000..ea7f49c5f32 --- /dev/null +++ b/ec2_instance.py @@ -0,0 +1,1803 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ec2_instance +short_description: Create & manage EC2 instances +description: + - Create and manage AWS EC2 instances. + - > + Note: This module does not support creating + L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(ec2) module + can create and manage spot instances. +author: + - Ryan Scott Brown (@ryansb) +requirements: [ "boto3", "botocore" ] +options: + instance_ids: + description: + - If you specify one or more instance IDs, only instances that have the specified IDs are returned. + type: list + state: + description: + - Goal state for the instances. + choices: [present, terminated, running, started, stopped, restarted, rebooted, absent] + default: present + type: str + wait: + description: + - Whether or not to wait for the desired state (use wait_timeout to customize this). + default: true + type: bool + wait_timeout: + description: + - How long to wait (in seconds) for the instance to finish booting/terminating. + default: 600 + type: int + instance_type: + description: + - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + Only required when instance is not already present. + default: t2.micro + type: str + user_data: + description: + - Opaque blob of data which is made available to the ec2 instance + type: str + tower_callback: + description: + - Preconfigured user-data to enable an instance to perform a Tower callback (Linux only). + - Mutually exclusive with I(user_data). + - For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password. + - If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible. + type: dict + suboptions: + tower_address: + description: + - IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in. + type: str + job_template_id: + description: + - Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+). + type: str + host_config_key: + description: + - Host configuration secret key generated by the Tower job template. + type: str + tags: + description: + - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one. + type: dict + purge_tags: + description: + - Delete any tags not specified in the task that are on the instance. + This means you have to specify all the desired tags on each task affecting an instance. + default: false + type: bool + image: + description: + - An image to use for the instance. The M(ec2_ami_info) module may be used to retrieve images. + One of I(image) or I(image_id) are required when instance is not already present. + type: dict + suboptions: + id: + description: + - The AMI ID. + type: str + ramdisk: + description: + - Overrides the AMI's default ramdisk ID. + type: str + kernel: + description: + - a string AKI to override the AMI kernel. + image_id: + description: + - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present. + - This is an alias for I(image.id). + type: str + security_groups: + description: + - A list of security group IDs or names (strings). Mutually exclusive with I(security_group). + type: list + security_group: + description: + - A security group ID or name. Mutually exclusive with I(security_groups). + type: str + name: + description: + - The Name tag for the instance. + type: str + vpc_subnet_id: + description: + - The subnet ID in which to launch the instance (VPC) + If none is provided, ec2_instance will chose the default zone of the default VPC. + aliases: ['subnet_id'] + type: str + network: + description: + - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or + containing specifications for a single network interface. + - Use the ec2_eni module to create ENIs with special settings. + type: dict + suboptions: + interfaces: + description: + - a list of ENI IDs (strings) or a list of objects containing the key I(id). + type: list + assign_public_ip: + description: + - when true assigns a public IP address to the interface + type: bool + private_ip_address: + description: + - an IPv4 address to assign to the interface + type: str + ipv6_addresses: + description: + - a list of IPv6 addresses to assign to the network interface + type: list + source_dest_check: + description: + - controls whether source/destination checking is enabled on the interface + type: bool + description: + description: + - a description for the network interface + type: str + private_ip_addresses: + description: + - a list of IPv4 addresses to assign to the network interface + type: list + subnet_id: + description: + - the subnet to connect the network interface to + type: str + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is + terminated. + type: bool + device_index: + description: + - The index of the interface to modify + type: int + groups: + description: + - a list of security group IDs to attach to the interface + type: list + volumes: + description: + - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage. + - A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id, + ebs.iops, and ebs.delete_on_termination. + - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + type: list + launch_template: + description: + - The EC2 launch template to base instance configuration on. + type: dict + suboptions: + id: + description: + - the ID of the launch template (optional if name is specified). + type: str + name: + description: + - the pretty name of the launch template (optional if id is specified). + type: str + version: + description: + - the specific version of the launch template to use. If unspecified, the template default is chosen. + key_name: + description: + - Name of the SSH access key to assign to the instance - must exist in the region the instance is created. + type: str + availability_zone: + description: + - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter. + - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted). + type: str + instance_initiated_shutdown_behavior: + description: + - Whether to stop or terminate an instance upon shutdown. + choices: ['stop', 'terminate'] + type: str + tenancy: + description: + - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. + choices: ['dedicated', 'default'] + type: str + termination_protection: + description: + - Whether to enable termination protection. + This module will not terminate an instance with termination protection active, it must be turned off first. + type: bool + cpu_credit_specification: + description: + - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted. + - Choose I(unlimited) to enable buying additional CPU credits. + choices: ['unlimited', 'standard'] + type: str + cpu_options: + description: + - Reduce the number of vCPU exposed to the instance. + - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available. + - Requires botocore >= 1.10.16 + type: dict + suboptions: + threads_per_core: + description: + - Select the number of threads per core to enable. Disable or Enable Intel HT. + choices: [1, 2] + required: true + type: int + core_count: + description: + - Set the number of core to enable. + required: true + type: int + detailed_monitoring: + description: + - Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting. + type: bool + ebs_optimized: + description: + - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + type: bool + filters: + description: + - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item + consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html). + for possible filters. Filter names and values are case sensitive. + - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and + subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups. + type: dict + instance_role: + description: + - The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format + then the ListInstanceProfiles permission must also be granted. + U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided, + the role with a matching name will be used from the active AWS account. + type: str + placement_group: + description: + - The placement group that needs to be assigned to the instance + type: str + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Terminate every running instance in a region. Use with EXTREME caution. +- ec2_instance: + state: absent + filters: + instance-state-name: running + +# restart a particular instance by its ID +- ec2_instance: + state: restarted + instance_ids: + - i-12345678 + +# start an instance with a public IP address +- ec2_instance: + name: "public-compute-instance" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: c5.large + security_group: default + network: + assign_public_ip: true + image_id: ami-123456 + tags: + Environment: Testing + +# start an instance and Add EBS +- ec2_instance: + name: "public-withebs-instance" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: t2.micro + key_name: "prod-ssh-key" + security_group: default + volumes: + - device_name: /dev/sda1 + ebs: + volume_size: 16 + delete_on_termination: true + +# start an instance with a cpu_options +- ec2_instance: + name: "public-cpuoption-instance" + vpc_subnet_id: subnet-5ca1ab1e + tags: + Environment: Testing + instance_type: c4.large + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + cpu_options: + core_count: 1 + threads_per_core: 1 + +# start an instance and have it begin a Tower callback on boot +- ec2_instance: + name: "tower-callback-test" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + security_group: default + tower_callback: + # IP or hostname of tower server + tower_address: 1.2.3.4 + job_template_id: 876 + host_config_key: '[secret config key goes here]' + network: + assign_public_ip: true + image_id: ami-123456 + cpu_credit_specification: unlimited + tags: + SomeThing: "A value" + +# start an instance with ENI (An existing ENI ID is required) +- ec2_instance: + name: "public-eni-instance" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + network: + interfaces: + - id: "eni-12345" + tags: + Env: "eni_on" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + instance_type: t2.micro + image_id: ami-123456 + +# add second ENI interface +- ec2_instance: + name: "public-eni-instance" + network: + interfaces: + - id: "eni-12345" + - id: "eni-67890" + image_id: ami-123456 + tags: + Env: "eni_on" + instance_type: t2.micro +''' + +RETURN = ''' +instances: + description: a list of ec2 instances + returned: when wait == true + type: complex + contains: + ami_launch_index: + description: The AMI launch index, which can be used to find this instance in the launch group. + returned: always + type: int + sample: 0 + architecture: + description: The architecture of the image + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/sdh or xvdh). + returned: always + type: str + sample: /dev/sdh + ebs: + description: Parameters used to automatically set up EBS volumes when the instance is launched. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: attached + volume_id: + description: The ID of the EBS volume + returned: always + type: str + sample: vol-12345678 + client_token: + description: The idempotency token you provided when you launched the instance, if applicable. + returned: always + type: str + sample: mytoken + ebs_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + hypervisor: + description: The hypervisor type of the instance. + returned: always + type: str + sample: xen + iam_instance_profile: + description: The IAM instance profile associated with the instance, if applicable. + returned: always + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the instance profile. + returned: always + type: str + sample: "arn:aws:iam::000012345678:instance-profile/myprofile" + id: + description: The ID of the instance profile + returned: always + type: str + sample: JFJ397FDG400FG9FD1N + image_id: + description: The ID of the AMI used to launch the instance. + returned: always + type: str + sample: ami-0011223344 + instance_id: + description: The ID of the instance. + returned: always + type: str + sample: i-012345678 + instance_type: + description: The instance type size of the running instance. + returned: always + type: str + sample: t2.micro + key_name: + description: The name of the key pair, if this instance was launched with an associated key pair. + returned: always + type: str + sample: my-key + launch_time: + description: The time the instance was launched. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + monitoring: + description: The monitoring for the instance. + returned: always + type: complex + contains: + state: + description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. + returned: always + type: str + sample: disabled + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + association: + description: The association information for an Elastic IPv4 associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + attachment: + description: The network interface attachment. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + attachment_id: + description: The ID of the network interface attachment. + returned: always + type: str + sample: eni-attach-3aff3f + delete_on_termination: + description: Indicates whether the network interface is deleted when the instance is terminated. + returned: always + type: bool + sample: true + device_index: + description: The index of the device on the instance for the network interface attachment. + returned: always + type: int + sample: 0 + status: + description: The attachment state. + returned: always + type: str + sample: attached + description: + description: The description. + returned: always + type: str + sample: My interface + groups: + description: One or more security groups. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-abcdef12 + group_name: + description: The name of the security group. + returned: always + type: str + sample: mygroup + ipv6_addresses: + description: One or more IPv6 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + ipv6_address: + description: The IPv6 address. + returned: always + type: str + sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + owner_id: + description: The AWS account ID of the owner of the network interface. + returned: always + type: str + sample: 01234567890 + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + private_ip_addresses: + description: The private IPv4 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + association: + description: The association information for an Elastic IP address (IPv4) associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + primary: + description: Indicates whether this IPv4 address is the primary private IP address of the network interface. + returned: always + type: bool + sample: true + private_ip_address: + description: The private IPv4 address of the network interface. + returned: always + type: str + sample: 10.0.0.1 + source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + status: + description: The status of the network interface. + returned: always + type: str + sample: in-use + subnet_id: + description: The ID of the subnet for the network interface. + returned: always + type: str + sample: subnet-0123456 + vpc_id: + description: The ID of the VPC for the network interface. + returned: always + type: str + sample: vpc-0123456 + placement: + description: The location where the instance launched, if applicable. + returned: always + type: complex + contains: + availability_zone: + description: The Availability Zone of the instance. + returned: always + type: str + sample: ap-southeast-2a + group_name: + description: The name of the placement group the instance is in (for cluster compute instances). + returned: always + type: str + sample: "" + tenancy: + description: The tenancy of the instance (if the instance is running in a VPC). + returned: always + type: str + sample: default + private_dns_name: + description: The private DNS name. + returned: always + type: str + sample: ip-10-0-0-1.ap-southeast-2.compute.internal + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + product_codes: + description: One or more product codes. + returned: always + type: list + elements: dict + contains: + product_code_id: + description: The product code. + returned: always + type: str + sample: aw0evgkw8ef3n2498gndfgasdfsd5cce + product_code_type: + description: The type of product code. + returned: always + type: str + sample: marketplace + public_dns_name: + description: The public DNS name assigned to the instance. + returned: always + type: str + sample: + public_ip_address: + description: The public IPv4 address assigned to the instance + returned: always + type: str + sample: 52.0.0.1 + root_device_name: + description: The device name of the root device + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + network.source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + state: + description: The current state of the instance. + returned: always + type: complex + contains: + code: + description: The low byte represents the state. + returned: always + type: int + sample: 16 + name: + description: The name of the state. + returned: always + type: str + sample: running + state_transition_reason: + description: The reason for the most recent state transition. + returned: always + type: str + sample: + subnet_id: + description: The ID of the subnet in which the instance is running. + returned: always + type: str + sample: subnet-00abcdef + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: dict + sample: vpc-0011223344 +''' + +import re +import uuid +import string +import textwrap +import time +from collections import namedtuple + +try: + import boto3 + import botocore.exceptions +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.six import text_type, string_types +from ansible.module_utils.six.moves.urllib import parse as urlparse +from ansible.module_utils._text import to_bytes, to_native +import ansible_collections.ansible.amazon.plugins.module_utils.ec2 as ec2_utils +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, + ansible_dict_to_boto3_filter_list, + compare_aws_tags, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict) + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +module = None + + +def tower_callback_script(tower_conf, windows=False, passwd=None): + script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1' + if windows and passwd is not None: + script_tpl = """ + $admin = [adsi]("WinNT://./administrator, user") + $admin.PSBase.Invoke("SetPassword", "{PASS}") + Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}')) + + """ + return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url)) + elif windows and passwd is None: + script_tpl = """ + $admin = [adsi]("WinNT://./administrator, user") + Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}')) + + """ + return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url)) + elif not windows: + for p in ['tower_address', 'job_template_id', 'host_config_key']: + if p not in tower_conf: + module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p)) + + if isinstance(tower_conf['job_template_id'], string_types): + tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id']) + tpl = string.Template(textwrap.dedent("""#!/bin/bash + set -x + + retry_attempts=10 + attempt=0 + while [[ $attempt -lt $retry_attempts ]] + do + status_code=`curl --max-time 10 -v -k -s -i \ + --data "host_config_key=${host_config_key}" \ + 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \ + | head -n 1 \ + | awk '{print $2}'` + if [[ $status_code == 404 ]] + then + status_code=`curl --max-time 10 -v -k -s -i \ + --data "host_config_key=${host_config_key}" \ + 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \ + | head -n 1 \ + | awk '{print $2}'` + # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404 + fi + if [[ $status_code == 201 ]] + then + exit 0 + fi + attempt=$(( attempt + 1 )) + echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})" + sleep 60 + done + exit 1 + """)) + return tpl.safe_substitute(tower_address=tower_conf['tower_address'], + template_id=tower_conf['job_template_id'], + host_config_key=tower_conf['host_config_key']) + raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.") + + +@AWSRetry.jittered_backoff() +def manage_tags(match, new_tags, purge_tags, ec2): + changed = False + old_tags = boto3_tag_list_to_ansible_dict(match['Tags']) + tags_to_set, tags_to_delete = compare_aws_tags( + old_tags, new_tags, + purge_tags=purge_tags, + ) + if tags_to_set: + ec2.create_tags( + Resources=[match['InstanceId']], + Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) + changed |= True + if tags_to_delete: + delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete) + ec2.delete_tags( + Resources=[match['InstanceId']], + Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values)) + changed |= True + return changed + + +def build_volume_spec(params): + volumes = params.get('volumes') or [] + for volume in volumes: + if 'ebs' in volume: + for int_value in ['volume_size', 'iops']: + if int_value in volume['ebs']: + volume['ebs'][int_value] = int(volume['ebs'][int_value]) + return [ec2_utils.snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] + + +def add_or_update_instance_profile(instance, desired_profile_name): + instance_profile_setting = instance.get('IamInstanceProfile') + if instance_profile_setting and desired_profile_name: + if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')): + # great, the profile we asked for is what's there + return False + else: + desired_arn = determine_iam_role(desired_profile_name) + if instance_profile_setting.get('Arn') == desired_arn: + return False + # update association + ec2 = module.client('ec2') + try: + association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) + except botocore.exceptions.ClientError as e: + # check for InvalidAssociationID.NotFound + module.fail_json_aws(e, "Could not find instance profile association") + try: + resp = ec2.replace_iam_instance_profile_association( + AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'], + IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)} + ) + return True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, "Could not associate instance profile") + + if not instance_profile_setting and desired_profile_name: + # create association + ec2 = module.client('ec2') + try: + resp = ec2.associate_iam_instance_profile( + IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}, + InstanceId=instance['InstanceId'] + ) + return True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, "Could not associate new instance profile") + + return False + + +def build_network_spec(params, ec2=None): + """ + Returns list of interfaces [complex] + Interface type: { + 'AssociatePublicIpAddress': True|False, + 'DeleteOnTermination': True|False, + 'Description': 'string', + 'DeviceIndex': 123, + 'Groups': [ + 'string', + ], + 'Ipv6AddressCount': 123, + 'Ipv6Addresses': [ + { + 'Ipv6Address': 'string' + }, + ], + 'NetworkInterfaceId': 'string', + 'PrivateIpAddress': 'string', + 'PrivateIpAddresses': [ + { + 'Primary': True|False, + 'PrivateIpAddress': 'string' + }, + ], + 'SecondaryPrivateIpAddressCount': 123, + 'SubnetId': 'string' + }, + """ + if ec2 is None: + ec2 = module.client('ec2') + + interfaces = [] + network = params.get('network') or {} + if not network.get('interfaces'): + # they only specified one interface + spec = { + 'DeviceIndex': 0, + } + if network.get('assign_public_ip') is not None: + spec['AssociatePublicIpAddress'] = network['assign_public_ip'] + + if params.get('vpc_subnet_id'): + spec['SubnetId'] = params['vpc_subnet_id'] + else: + default_vpc = get_default_vpc(ec2) + if default_vpc is None: + raise module.fail_json( + msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance") + else: + sub = get_default_subnet(ec2, default_vpc) + spec['SubnetId'] = sub['SubnetId'] + + if network.get('private_ip_address'): + spec['PrivateIpAddress'] = network['private_ip_address'] + + if params.get('security_group') or params.get('security_groups'): + groups = discover_security_groups( + group=params.get('security_group'), + groups=params.get('security_groups'), + subnet_id=spec['SubnetId'], + ec2=ec2 + ) + spec['Groups'] = [g['GroupId'] for g in groups] + if network.get('description') is not None: + spec['Description'] = network['description'] + # TODO more special snowflake network things + + return [spec] + + # handle list of `network.interfaces` options + for idx, interface_params in enumerate(network.get('interfaces', [])): + spec = { + 'DeviceIndex': idx, + } + + if isinstance(interface_params, string_types): + # naive case where user gave + # network_interfaces: [eni-1234, eni-4567, ....] + # put into normal data structure so we don't dupe code + interface_params = {'id': interface_params} + + if interface_params.get('id') is not None: + # if an ID is provided, we don't want to set any other parameters. + spec['NetworkInterfaceId'] = interface_params['id'] + interfaces.append(spec) + continue + + spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True) + + if interface_params.get('ipv6_addresses'): + spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])] + + if interface_params.get('private_ip_address'): + spec['PrivateIpAddress'] = interface_params.get('private_ip_address') + + if interface_params.get('description'): + spec['Description'] = interface_params.get('description') + + if interface_params.get('subnet_id', params.get('vpc_subnet_id')): + spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id')) + elif not spec.get('SubnetId') and not interface_params['id']: + # TODO grab a subnet from default VPC + raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params)) + + interfaces.append(spec) + return interfaces + + +def warn_if_public_ip_assignment_changed(instance): + # This is a non-modifiable attribute. + assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip') + if assign_public_ip is None: + return + + # Check that public ip assignment is the same and warn if not + public_dns_name = instance.get('PublicDnsName') + if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name): + module.warn( + "Unable to modify public ip assignment to {0} for instance {1}. " + "Whether or not to assign a public IP is determined during instance creation.".format( + assign_public_ip, instance['InstanceId'])) + + +def warn_if_cpu_options_changed(instance): + # This is a non-modifiable attribute. + cpu_options = module.params.get('cpu_options') + if cpu_options is None: + return + + # Check that the CpuOptions set are the same and warn if not + core_count_curr = instance['CpuOptions'].get('CoreCount') + core_count = cpu_options.get('core_count') + threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore') + threads_per_core = cpu_options.get('threads_per_core') + if core_count_curr != core_count: + module.warn( + "Unable to modify core_count from {0} to {1}. " + "Assigning a number of core is determinted during instance creation".format( + core_count_curr, core_count)) + + if threads_per_core_curr != threads_per_core: + module.warn( + "Unable to modify threads_per_core from {0} to {1}. " + "Assigning a number of threads per core is determined during instance creation.".format( + threads_per_core_curr, threads_per_core)) + + +def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None): + if ec2 is None: + ec2 = module.client('ec2') + + if subnet_id is not None: + try: + sub = ec2.describe_subnets(SubnetIds=[subnet_id]) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidGroup.NotFound': + module.fail_json( + "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( + subnet_id + ) + ) + module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) + parent_vpc_id = sub['Subnets'][0]['VpcId'] + + vpc = { + 'Name': 'vpc-id', + 'Values': [parent_vpc_id] + } + + # because filter lists are AND in the security groups API, + # make two separate requests for groups by ID and by name + id_filters = [vpc] + name_filters = [vpc] + + if group: + name_filters.append( + dict( + Name='group-name', + Values=[group] + ) + ) + if group.startswith('sg-'): + id_filters.append( + dict( + Name='group-id', + Values=[group] + ) + ) + if groups: + name_filters.append( + dict( + Name='group-name', + Values=groups + ) + ) + if [g for g in groups if g.startswith('sg-')]: + id_filters.append( + dict( + Name='group-id', + Values=[g for g in groups if g.startswith('sg-')] + ) + ) + + found_groups = [] + for f_set in (id_filters, name_filters): + if len(f_set) > 1: + found_groups.extend(ec2.get_paginator( + 'describe_security_groups' + ).paginate( + Filters=f_set + ).search('SecurityGroups[]')) + return list(dict((g['GroupId'], g) for g in found_groups).values()) + + +def build_top_level_options(params): + spec = {} + if params.get('image_id'): + spec['ImageId'] = params['image_id'] + elif isinstance(params.get('image'), dict): + image = params.get('image', {}) + spec['ImageId'] = image.get('id') + if 'ramdisk' in image: + spec['RamdiskId'] = image['ramdisk'] + if 'kernel' in image: + spec['KernelId'] = image['kernel'] + if not spec.get('ImageId') and not params.get('launch_template'): + module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.") + + if params.get('key_name') is not None: + spec['KeyName'] = params.get('key_name') + if params.get('user_data') is not None: + spec['UserData'] = to_native(params.get('user_data')) + elif params.get('tower_callback') is not None: + spec['UserData'] = tower_callback_script( + tower_conf=params.get('tower_callback'), + windows=params.get('tower_callback').get('windows', False), + passwd=params.get('tower_callback').get('set_password'), + ) + + if params.get('launch_template') is not None: + spec['LaunchTemplate'] = {} + if not params.get('launch_template').get('id') or params.get('launch_template').get('name'): + module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required") + + if params.get('launch_template').get('id') is not None: + spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id') + if params.get('launch_template').get('name') is not None: + spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name') + if params.get('launch_template').get('version') is not None: + spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version')) + + if params.get('detailed_monitoring', False): + spec['Monitoring'] = {'Enabled': True} + if params.get('cpu_credit_specification') is not None: + spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')} + if params.get('tenancy') is not None: + spec['Placement'] = {'Tenancy': params.get('tenancy')} + if params.get('placement_group'): + if 'Placement' in spec: + spec['Placement']['GroupName'] = str(params.get('placement_group')) + else: + spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))}) + if params.get('ebs_optimized') is not None: + spec['EbsOptimized'] = params.get('ebs_optimized') + if params.get('instance_initiated_shutdown_behavior'): + spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior') + if params.get('termination_protection') is not None: + spec['DisableApiTermination'] = params.get('termination_protection') + if params.get('cpu_options') is not None: + spec['CpuOptions'] = {} + spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core') + spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count') + return spec + + +def build_instance_tags(params, propagate_tags_to_volumes=True): + tags = params.get('tags', {}) + if params.get('name') is not None: + if tags is None: + tags = {} + tags['Name'] = params.get('name') + return [ + { + 'ResourceType': 'volume', + 'Tags': ansible_dict_to_boto3_tag_list(tags), + }, + { + 'ResourceType': 'instance', + 'Tags': ansible_dict_to_boto3_tag_list(tags), + }, + ] + + +def build_run_instance_spec(params, ec2=None): + if ec2 is None: + ec2 = module.client('ec2') + + spec = dict( + ClientToken=uuid.uuid4().hex, + MaxCount=1, + MinCount=1, + ) + # network parameters + spec['NetworkInterfaces'] = build_network_spec(params, ec2) + spec['BlockDeviceMappings'] = build_volume_spec(params) + spec.update(**build_top_level_options(params)) + spec['TagSpecifications'] = build_instance_tags(params) + + # IAM profile + if params.get('instance_role'): + spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role'))) + + spec['InstanceType'] = params['instance_type'] + return spec + + +def await_instances(ids, state='OK'): + if not module.params.get('wait', True): + # the user asked not to wait for anything + return + + if module.check_mode: + # In check mode, there is no change even if you wait. + return + + state_opts = { + 'OK': 'instance_status_ok', + 'STOPPED': 'instance_stopped', + 'TERMINATED': 'instance_terminated', + 'EXISTS': 'instance_exists', + 'RUNNING': 'instance_running', + } + if state not in state_opts: + module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state)) + waiter = module.client('ec2').get_waiter(state_opts[state]) + try: + waiter.wait( + InstanceIds=ids, + WaiterConfig={ + 'Delay': 15, + 'MaxAttempts': module.params.get('wait_timeout', 600) // 15, + } + ) + except botocore.exceptions.WaiterConfigError as e: + module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format( + to_native(e), ', '.join(ids), state)) + except botocore.exceptions.WaiterError as e: + module.warn("Instances {0} took too long to reach state {1}. {2}".format( + ', '.join(ids), state, to_native(e))) + + +def diff_instance_and_params(instance, params, ec2=None, skip=None): + """boto3 instance obj, module params""" + if ec2 is None: + ec2 = module.client('ec2') + + if skip is None: + skip = [] + + changes_to_apply = [] + id_ = instance['InstanceId'] + + ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value']) + + def value_wrapper(v): + return {'Value': v} + + param_mappings = [ + ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper), + ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper), + # user data is an immutable property + # ParamMapper('user_data', 'UserData', 'userData', value_wrapper), + ] + + for mapping in param_mappings: + if params.get(mapping.param_key) is not None and mapping.instance_key not in skip: + value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_) + if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key): + arguments = dict( + InstanceId=instance['InstanceId'], + # Attribute=mapping.attribute_name, + ) + arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) + changes_to_apply.append(arguments) + + if (params.get('network') or {}).get('source_dest_check') is not None: + # network.source_dest_check is nested, so needs to be treated separately + check = bool(params.get('network').get('source_dest_check')) + if instance['SourceDestCheck'] != check: + changes_to_apply.append(dict( + InstanceId=instance['InstanceId'], + SourceDestCheck={'Value': check}, + )) + + return changes_to_apply + + +def change_network_attachments(instance, params, ec2): + if (params.get('network') or {}).get('interfaces') is not None: + new_ids = [] + for inty in params.get('network').get('interfaces'): + if isinstance(inty, dict) and 'id' in inty: + new_ids.append(inty['id']) + elif isinstance(inty, string_types): + new_ids.append(inty) + # network.interfaces can create the need to attach new interfaces + old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] + to_attach = set(new_ids) - set(old_ids) + for eni_id in to_attach: + ec2.attach_network_interface( + DeviceIndex=new_ids.index(eni_id), + InstanceId=instance['InstanceId'], + NetworkInterfaceId=eni_id, + ) + return bool(len(to_attach)) + return False + + +def find_instances(ec2, ids=None, filters=None): + paginator = ec2.get_paginator('describe_instances') + if ids: + return list(paginator.paginate( + InstanceIds=ids, + ).search('Reservations[].Instances[]')) + elif filters is None: + module.fail_json(msg="No filters provided when they were required") + elif filters is not None: + for key in list(filters.keys()): + if not key.startswith("tag:"): + filters[key.replace("_", "-")] = filters.pop(key) + return list(paginator.paginate( + Filters=ansible_dict_to_boto3_filter_list(filters) + ).search('Reservations[].Instances[]')) + return [] + + +@AWSRetry.jittered_backoff() +def get_default_vpc(ec2): + vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + if len(vpcs.get('Vpcs', [])): + return vpcs.get('Vpcs')[0] + return None + + +@AWSRetry.jittered_backoff() +def get_default_subnet(ec2, vpc, availability_zone=None): + subnets = ec2.describe_subnets( + Filters=ansible_dict_to_boto3_filter_list({ + 'vpc-id': vpc['VpcId'], + 'state': 'available', + 'default-for-az': 'true', + }) + ) + if len(subnets.get('Subnets', [])): + if availability_zone is not None: + subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) + if availability_zone in subs_by_az: + return subs_by_az[availability_zone] + + # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first + # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list + by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone']) + return by_az[0] + return None + + +def ensure_instance_state(state, ec2=None): + if ec2 is None: + module.client('ec2') + if state in ('running', 'started'): + changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') + + if failed: + module.fail_json( + msg="Unable to start instances: {0}".format(failure_reason), + reboot_success=list(changed), + reboot_failed=failed) + + module.exit_json( + msg='Instances started', + reboot_success=list(changed), + changed=bool(len(changed)), + reboot_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + elif state in ('restarted', 'rebooted'): + changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_state='STOPPED') + changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_state='RUNNING') + + if failed: + module.fail_json( + msg="Unable to restart instances: {0}".format(failure_reason), + reboot_success=list(changed), + reboot_failed=failed) + + module.exit_json( + msg='Instances restarted', + reboot_success=list(changed), + changed=bool(len(changed)), + reboot_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + elif state in ('stopped',): + changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_state='STOPPED') + + if failed: + module.fail_json( + msg="Unable to stop instances: {0}".format(failure_reason), + stop_success=list(changed), + stop_failed=failed) + + module.exit_json( + msg='Instances stopped', + stop_success=list(changed), + changed=bool(len(changed)), + stop_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + elif state in ('absent', 'terminated'): + terminated, terminate_failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_state='TERMINATED') + + if terminate_failed: + module.fail_json( + msg="Unable to terminate instances: {0}".format(failure_reason), + terminate_success=list(terminated), + terminate_failed=terminate_failed) + module.exit_json( + msg='Instances terminated', + terminate_success=list(terminated), + changed=bool(len(terminated)), + terminate_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + + +@AWSRetry.jittered_backoff() +def change_instance_state(filters, desired_state, ec2=None): + """Takes STOPPED/RUNNING/TERMINATED""" + if ec2 is None: + ec2 = module.client('ec2') + + changed = set() + instances = find_instances(ec2, filters=filters) + to_change = set(i['InstanceId'] for i in instances if i['State']['Name'].upper() != desired_state) + unchanged = set() + failure_reason = "" + + for inst in instances: + try: + if desired_state == 'TERMINATED': + if module.check_mode: + changed.add(inst['InstanceId']) + continue + + # TODO use a client-token to prevent double-sends of these start/stop/terminate commands + # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html + resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']]) + [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] + if desired_state == 'STOPPED': + if inst['State']['Name'] in ('stopping', 'stopped'): + unchanged.add(inst['InstanceId']) + continue + + if module.check_mode: + changed.add(inst['InstanceId']) + continue + + resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']]) + [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] + if desired_state == 'RUNNING': + if module.check_mode: + changed.add(inst['InstanceId']) + continue + + resp = ec2.start_instances(InstanceIds=[inst['InstanceId']]) + [changed.add(i['InstanceId']) for i in resp['StartingInstances']] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + try: + failure_reason = to_native(e.message) + except AttributeError: + failure_reason = to_native(e) + + if changed: + await_instances(ids=list(changed) + list(unchanged), state=desired_state) + + change_failed = list(to_change - changed) + instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances)) + return changed, change_failed, instances, failure_reason + + +def pretty_instance(i): + instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) + instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags']) + return instance + + +def determine_iam_role(name_or_arn): + if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): + return name_or_arn + iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + try: + role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) + return role['InstanceProfile']['Arn'] + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) + + +def handle_existing(existing_matches, changed, ec2, state): + if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']: + ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') + if failed: + module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason)) + module.exit_json( + changed=bool(len(ins_changed)) or changed, + instances=[pretty_instance(i) for i in instances], + instance_ids=[i['InstanceId'] for i in instances], + ) + changes = diff_instance_and_params(existing_matches[0], module.params) + for c in changes: + AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c) + changed |= bool(changes) + changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role')) + changed |= change_network_attachments(existing_matches[0], module.params, ec2) + altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches]) + module.exit_json( + changed=bool(len(changes)) or changed, + instances=[pretty_instance(i) for i in altered], + instance_ids=[i['InstanceId'] for i in altered], + changes=changes, + ) + + +def ensure_present(existing_matches, changed, ec2, state): + if len(existing_matches): + try: + handle_existing(existing_matches, changed, ec2, state) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])), + # instances=[pretty_instance(i) for i in existing_matches], + # instance_ids=[i['InstanceId'] for i in existing_matches], + ) + try: + instance_spec = build_run_instance_spec(module.params) + # If check mode is enabled,suspend 'ensure function'. + if module.check_mode: + module.exit_json( + changed=True, + spec=instance_spec, + ) + instance_response = run_instances(ec2, **instance_spec) + instances = instance_response['Instances'] + instance_ids = [i['InstanceId'] for i in instances] + + for ins in instances: + changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) + for c in changes: + try: + AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) + + if not module.params.get('wait'): + module.exit_json( + changed=True, + instance_ids=instance_ids, + spec=instance_spec, + ) + await_instances(instance_ids) + instances = ec2.get_paginator('describe_instances').paginate( + InstanceIds=instance_ids + ).search('Reservations[].Instances[]') + + module.exit_json( + changed=True, + instances=[pretty_instance(i) for i in instances], + instance_ids=instance_ids, + spec=instance_spec, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to create new EC2 instance") + + +@AWSRetry.jittered_backoff() +def run_instances(ec2, **instance_spec): + try: + return ec2.run_instances(**instance_spec) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']: + # If the instance profile has just been created, it takes some time to be visible by ec2 + # So we wait 10 second and retry the run_instances + time.sleep(10) + return ec2.run_instances(**instance_spec) + else: + raise e + + +def main(): + global module + argument_spec = dict( + state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']), + wait=dict(default=True, type='bool'), + wait_timeout=dict(default=600, type='int'), + # count=dict(default=1, type='int'), + image=dict(type='dict'), + image_id=dict(type='str'), + instance_type=dict(default='t2.micro', type='str'), + user_data=dict(type='str'), + tower_callback=dict(type='dict'), + ebs_optimized=dict(type='bool'), + vpc_subnet_id=dict(type='str', aliases=['subnet_id']), + availability_zone=dict(type='str'), + security_groups=dict(default=[], type='list'), + security_group=dict(type='str'), + instance_role=dict(type='str'), + name=dict(type='str'), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), + filters=dict(type='dict', default=None), + launch_template=dict(type='dict'), + key_name=dict(type='str'), + cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']), + cpu_options=dict(type='dict', options=dict( + core_count=dict(type='int', required=True), + threads_per_core=dict(type='int', choices=[1, 2], required=True) + )), + tenancy=dict(type='str', choices=['dedicated', 'default']), + placement_group=dict(type='str'), + instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), + termination_protection=dict(type='bool'), + detailed_monitoring=dict(type='bool'), + instance_ids=dict(default=[], type='list'), + network=dict(default=None, type='dict'), + volumes=dict(default=None, type='list'), + ) + # running/present are synonyms + # as are terminated/absent + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['security_groups', 'security_group'], + ['availability_zone', 'vpc_subnet_id'], + ['tower_callback', 'user_data'], + ['image_id', 'image'], + ], + supports_check_mode=True + ) + + if module.params.get('network'): + if module.params.get('network').get('interfaces'): + if module.params.get('security_group'): + module.fail_json(msg="Parameter network.interfaces can't be used with security_group") + if module.params.get('security_groups'): + module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") + + state = module.params.get('state') + ec2 = module.client('ec2') + if module.params.get('filters') is None: + filters = { + # all states except shutting-down and terminated + 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'] + } + if state == 'stopped': + # only need to change instances that aren't already stopped + filters['instance-state-name'] = ['stopping', 'pending', 'running'] + + if isinstance(module.params.get('instance_ids'), string_types): + filters['instance-id'] = [module.params.get('instance_ids')] + elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')): + filters['instance-id'] = module.params.get('instance_ids') + else: + if not module.params.get('vpc_subnet_id'): + if module.params.get('network'): + # grab AZ from one of the ENIs + ints = module.params.get('network').get('interfaces') + if ints: + filters['network-interface.network-interface-id'] = [] + for i in ints: + if isinstance(i, dict): + i = i['id'] + filters['network-interface.network-interface-id'].append(i) + else: + sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone')) + filters['subnet-id'] = sub['SubnetId'] + else: + filters['subnet-id'] = [module.params.get('vpc_subnet_id')] + + if module.params.get('name'): + filters['tag:Name'] = [module.params.get('name')] + + if module.params.get('image_id'): + filters['image-id'] = [module.params.get('image_id')] + elif (module.params.get('image') or {}).get('id'): + filters['image-id'] = [module.params.get('image', {}).get('id')] + + module.params['filters'] = filters + + if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'): + module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16") + + existing_matches = find_instances(ec2, filters=module.params.get('filters')) + changed = False + + if state not in ('terminated', 'absent') and existing_matches: + for match in existing_matches: + warn_if_public_ip_assignment_changed(match) + warn_if_cpu_options_changed(match) + tags = module.params.get('tags') or {} + name = module.params.get('name') + if name: + tags['Name'] = name + changed |= manage_tags(match, tags, module.params.get('purge_tags', False), ec2) + + if state in ('present', 'running', 'started'): + ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state) + elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'): + if existing_matches: + ensure_instance_state(state, ec2) + else: + module.exit_json( + msg='No matching instances found', + changed=False, + instances=[], + ) + else: + module.fail_json(msg="We don't handle the state {0}".format(state)) + + +if __name__ == '__main__': + main() diff --git a/ec2_instance_facts.py b/ec2_instance_facts.py new file mode 120000 index 00000000000..7010fdcb95f --- /dev/null +++ b/ec2_instance_facts.py @@ -0,0 +1 @@ +ec2_instance_info.py \ No newline at end of file diff --git a/ec2_instance_info.py b/ec2_instance_info.py new file mode 100644 index 00000000000..e16a3c2f164 --- /dev/null +++ b/ec2_instance_info.py @@ -0,0 +1,570 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ec2_instance_info +short_description: Gather information about ec2 instances in AWS +description: + - Gather information about ec2 instances in AWS + - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change. +author: + - Michael Schuett (@michaeljs1990) + - Rob White (@wimnat) +requirements: [ "boto3", "botocore" ] +options: + instance_ids: + description: + - If you specify one or more instance IDs, only instances that have the specified IDs are returned. + required: false + type: list + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter + names and values are case sensitive. + required: false + default: {} + type: dict + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all instances +- ec2_instance_info: + +# Gather information about all instances in AZ ap-southeast-2a +- ec2_instance_info: + filters: + availability-zone: ap-southeast-2a + +# Gather information about a particular instance using ID +- ec2_instance_info: + instance_ids: + - i-12345678 + +# Gather information about any instance with a tag key Name and value Example +- ec2_instance_info: + filters: + "tag:Name": Example + +# Gather information about any instance in states "shutting-down", "stopping", "stopped" +- ec2_instance_info: + filters: + instance-state-name: [ "shutting-down", "stopping", "stopped" ] + +''' + +RETURN = ''' +instances: + description: a list of ec2 instances + returned: always + type: complex + contains: + ami_launch_index: + description: The AMI launch index, which can be used to find this instance in the launch group. + returned: always + type: int + sample: 0 + architecture: + description: The architecture of the image + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/sdh or xvdh). + returned: always + type: str + sample: /dev/sdh + ebs: + description: Parameters used to automatically set up EBS volumes when the instance is launched. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: attached + volume_id: + description: The ID of the EBS volume + returned: always + type: str + sample: vol-12345678 + cpu_options: + description: The CPU options set for the instance. + returned: always if botocore version >= 1.10.16 + type: complex + contains: + core_count: + description: The number of CPU cores for the instance. + returned: always + type: int + sample: 1 + threads_per_core: + description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled. + returned: always + type: int + sample: 1 + client_token: + description: The idempotency token you provided when you launched the instance, if applicable. + returned: always + type: str + sample: mytoken + ebs_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + hypervisor: + description: The hypervisor type of the instance. + returned: always + type: str + sample: xen + iam_instance_profile: + description: The IAM instance profile associated with the instance, if applicable. + returned: always + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the instance profile. + returned: always + type: str + sample: "arn:aws:iam::000012345678:instance-profile/myprofile" + id: + description: The ID of the instance profile + returned: always + type: str + sample: JFJ397FDG400FG9FD1N + image_id: + description: The ID of the AMI used to launch the instance. + returned: always + type: str + sample: ami-0011223344 + instance_id: + description: The ID of the instance. + returned: always + type: str + sample: i-012345678 + instance_type: + description: The instance type size of the running instance. + returned: always + type: str + sample: t2.micro + key_name: + description: The name of the key pair, if this instance was launched with an associated key pair. + returned: always + type: str + sample: my-key + launch_time: + description: The time the instance was launched. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + monitoring: + description: The monitoring for the instance. + returned: always + type: complex + contains: + state: + description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. + returned: always + type: str + sample: disabled + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + association: + description: The association information for an Elastic IPv4 associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + attachment: + description: The network interface attachment. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + attachment_id: + description: The ID of the network interface attachment. + returned: always + type: str + sample: eni-attach-3aff3f + delete_on_termination: + description: Indicates whether the network interface is deleted when the instance is terminated. + returned: always + type: bool + sample: true + device_index: + description: The index of the device on the instance for the network interface attachment. + returned: always + type: int + sample: 0 + status: + description: The attachment state. + returned: always + type: str + sample: attached + description: + description: The description. + returned: always + type: str + sample: My interface + groups: + description: One or more security groups. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-abcdef12 + group_name: + description: The name of the security group. + returned: always + type: str + sample: mygroup + ipv6_addresses: + description: One or more IPv6 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + ipv6_address: + description: The IPv6 address. + returned: always + type: str + sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + owner_id: + description: The AWS account ID of the owner of the network interface. + returned: always + type: str + sample: 01234567890 + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + private_ip_addresses: + description: The private IPv4 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + association: + description: The association information for an Elastic IP address (IPv4) associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + primary: + description: Indicates whether this IPv4 address is the primary private IP address of the network interface. + returned: always + type: bool + sample: true + private_ip_address: + description: The private IPv4 address of the network interface. + returned: always + type: str + sample: 10.0.0.1 + source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + status: + description: The status of the network interface. + returned: always + type: str + sample: in-use + subnet_id: + description: The ID of the subnet for the network interface. + returned: always + type: str + sample: subnet-0123456 + vpc_id: + description: The ID of the VPC for the network interface. + returned: always + type: str + sample: vpc-0123456 + placement: + description: The location where the instance launched, if applicable. + returned: always + type: complex + contains: + availability_zone: + description: The Availability Zone of the instance. + returned: always + type: str + sample: ap-southeast-2a + group_name: + description: The name of the placement group the instance is in (for cluster compute instances). + returned: always + type: str + sample: "" + tenancy: + description: The tenancy of the instance (if the instance is running in a VPC). + returned: always + type: str + sample: default + private_dns_name: + description: The private DNS name. + returned: always + type: str + sample: ip-10-0-0-1.ap-southeast-2.compute.internal + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + product_codes: + description: One or more product codes. + returned: always + type: list + elements: dict + contains: + product_code_id: + description: The product code. + returned: always + type: str + sample: aw0evgkw8ef3n2498gndfgasdfsd5cce + product_code_type: + description: The type of product code. + returned: always + type: str + sample: marketplace + public_dns_name: + description: The public DNS name assigned to the instance. + returned: always + type: str + sample: + public_ip_address: + description: The public IPv4 address assigned to the instance + returned: always + type: str + sample: 52.0.0.1 + root_device_name: + description: The device name of the root device + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + state: + description: The current state of the instance. + returned: always + type: complex + contains: + code: + description: The low byte represents the state. + returned: always + type: int + sample: 16 + name: + description: The name of the state. + returned: always + type: str + sample: running + state_transition_reason: + description: The reason for the most recent state transition. + returned: always + type: str + sample: + subnet_id: + description: The ID of the subnet in which the instance is running. + returned: always + type: str + sample: subnet-00abcdef + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: dict + sample: vpc-0011223344 +''' + +import traceback + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info) + + +def list_ec2_instances(connection, module): + + instance_ids = module.params.get("instance_ids") + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + reservations_paginator = connection.get_paginator('describe_instances') + reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result() + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + # Get instances from reservations + instances = [] + for reservation in reservations['Reservations']: + instances = instances + reservation['Instances'] + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] + + # Turn the boto3 result in to ansible friendly tag dictionary + for instance in snaked_instances: + instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value') + + module.exit_json(instances=snaked_instances) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + instance_ids=dict(default=[], type='list'), + filters=dict(default={}, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[ + ['instance_ids', 'filters'] + ], + supports_check_mode=True + ) + if module._name == 'ec2_instance_facts': + module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_ec2_instances(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_launch_template.py b/ec2_launch_template.py new file mode 100644 index 00000000000..94cf2404cdf --- /dev/null +++ b/ec2_launch_template.py @@ -0,0 +1,702 @@ +#!/usr/bin/python +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ec2_launch_template +short_description: Manage EC2 launch templates +description: + - Create, modify, and delete EC2 Launch Templates, which can be used to + create individual instances or with Autoscaling Groups. + - The I(ec2_instance) and I(ec2_asg) modules can, instead of specifying all + parameters on those tasks, be passed a Launch Template which contains + settings like instance size, disk type, subnet, and more. +requirements: + - botocore + - boto3 >= 1.6.0 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: + - Ryan Scott Brown (@ryansb) +options: + template_id: + description: + - The ID for the launch template, can be used for all cases except creating a new Launch Template. + aliases: [id] + type: str + template_name: + description: + - The template name. This must be unique in the region-account combination you are using. + aliases: [name] + type: str + default_version: + description: + - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default. + type: str + default: latest + state: + description: + - Whether the launch template should exist or not. + - Deleting specific versions of a launch template is not supported at this time. + choices: [present, absent] + default: present + type: str + block_device_mappings: + description: + - The block device mapping. Supplying both a snapshot ID and an encryption + value as arguments for block-device mapping results in an error. This is + because only blank volumes can be encrypted on start, and these are not + created from a snapshot. If a snapshot is the basis for the volume, it + contains data by definition and its encryption status cannot be changed + using this action. + type: list + elements: dict + suboptions: + device_name: + description: The device name (for example, /dev/sdh or xvdh). + type: str + no_device: + description: Suppresses the specified device included in the block device mapping of the AMI. + type: str + virtual_name: + description: > + The virtual device name (ephemeralN). Instance store volumes are + numbered starting from 0. An instance type with 2 available instance + store volumes can specify mappings for ephemeral0 and ephemeral1. The + number of available instance store volumes depends on the instance + type. After you connect to the instance, you must mount the volume. + type: str + ebs: + description: Parameters used to automatically set up EBS volumes when the instance is launched. + type: dict + suboptions: + delete_on_termination: + description: Indicates whether the EBS volume is deleted on instance termination. + type: bool + encrypted: + description: > + Indicates whether the EBS volume is encrypted. Encrypted volumes + can only be attached to instances that support Amazon EBS + encryption. If you are creating a volume from a snapshot, you + can't specify an encryption value. + type: bool + iops: + description: + - The number of I/O operations per second (IOPS) that the volume + supports. For io1, this represents the number of IOPS that are + provisioned for the volume. For gp2, this represents the baseline + performance of the volume and the rate at which the volume + accumulates I/O credits for bursting. For more information about + General Purpose SSD baseline performance, I/O credits, and + bursting, see Amazon EBS Volume Types in the Amazon Elastic + Compute Cloud User Guide. + - > + Condition: This parameter is required for requests to create io1 + volumes; it is not used in requests to create gp2, st1, sc1, or + standard volumes. + type: int + kms_key_id: + description: The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption. + type: str + snapshot_id: + description: The ID of the snapshot to create the volume from. + type: str + volume_size: + description: + - The size of the volume, in GiB. + - "Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size." + type: int + volume_type: + description: The volume type + type: str + cpu_options: + description: + - Choose CPU settings for the EC2 instances that will be created with this template. + - For more information, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + type: dict + suboptions: + core_count: + description: The number of CPU cores for the instance. + type: int + threads_per_core: + description: > + The number of threads per CPU core. To disable Intel Hyper-Threading + Technology for the instance, specify a value of 1. Otherwise, specify + the default value of 2. + type: int + credit_specification: + description: The credit option for CPU usage of the instance. Valid for T2 or T3 instances only. + type: dict + suboptions: + cpu_credits: + description: > + The credit option for CPU usage of a T2 or T3 instance. Valid values + are C(standard) and C(unlimited). + type: str + disable_api_termination: + description: > + This helps protect instances from accidental termination. If set to true, + you can't terminate the instance using the Amazon EC2 console, CLI, or + API. To change this attribute to false after launch, use + I(ModifyInstanceAttribute). + type: bool + ebs_optimized: + description: > + Indicates whether the instance is optimized for Amazon EBS I/O. This + optimization provides dedicated throughput to Amazon EBS and an optimized + configuration stack to provide optimal Amazon EBS I/O performance. This + optimization isn't available with all instance types. Additional usage + charges apply when using an EBS-optimized instance. + type: bool + elastic_gpu_specifications: + type: list + elements: dict + description: Settings for Elastic GPU attachments. See U(https://aws.amazon.com/ec2/elastic-gpus/) for details. + suboptions: + type: + description: The type of Elastic GPU to attach + type: str + iam_instance_profile: + description: > + The name or ARN of an IAM instance profile. Requires permissions to + describe existing instance roles to confirm ARN is properly formed. + type: str + image_id: + description: > + The AMI ID to use for new instances launched with this template. This + value is region-dependent since AMIs are not global resources. + type: str + instance_initiated_shutdown_behavior: + description: > + Indicates whether an instance stops or terminates when you initiate + shutdown from the instance using the operating system shutdown command. + choices: [stop, terminate] + type: str + instance_market_options: + description: Options for alternative instance markets, currently only the spot market is supported. + type: dict + suboptions: + market_type: + description: The market type. This should always be 'spot'. + type: str + spot_options: + description: Spot-market specific settings. + type: dict + suboptions: + block_duration_minutes: + description: > + The required duration for the Spot Instances (also known as Spot + blocks), in minutes. This value must be a multiple of 60 (60, + 120, 180, 240, 300, or 360). + type: int + instance_interruption_behavior: + description: The behavior when a Spot Instance is interrupted. The default is C(terminate). + choices: [hibernate, stop, terminate] + type: str + max_price: + description: The highest hourly price you're willing to pay for this Spot Instance. + type: str + spot_instance_type: + description: The request type to send. + choices: [one-time, persistent] + type: str + instance_type: + description: > + The instance type, such as C(c5.2xlarge). For a full list of instance types, see + U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + type: str + kernel_id: + description: > + The ID of the kernel. We recommend that you use PV-GRUB instead of + kernels and RAM disks. For more information, see + U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + type: str + key_name: + description: + - The name of the key pair. You can create a key pair using M(ec2_key). + - If you do not specify a key pair, you can't connect to the instance + unless you choose an AMI that is configured to allow users another way to + log in. + type: str + monitoring: + description: Settings for instance monitoring. + type: dict + suboptions: + enabled: + type: bool + description: Whether to turn on detailed monitoring for new instances. This will incur extra charges. + network_interfaces: + description: One or more network interfaces. + type: list + elements: dict + suboptions: + associate_public_ip_address: + description: Associates a public IPv4 address with eth0 for a new network interface. + type: bool + delete_on_termination: + description: Indicates whether the network interface is deleted when the instance is terminated. + type: bool + description: + description: A description for the network interface. + type: str + device_index: + description: The device index for the network interface attachment. + type: int + groups: + description: List of security group IDs to include on this instance. + type: list + elements: str + ipv6_address_count: + description: > + The number of IPv6 addresses to assign to a network interface. Amazon + EC2 automatically selects the IPv6 addresses from the subnet range. + You can't use this option if specifying the I(ipv6_addresses) option. + type: int + ipv6_addresses: + description: > + A list of one or more specific IPv6 addresses from the IPv6 CIDR + block range of your subnet. You can't use this option if you're + specifying the I(ipv6_address_count) option. + type: list + elements: str + network_interface_id: + description: The eni ID of a network interface to attach. + type: str + private_ip_address: + description: The primary private IPv4 address of the network interface. + type: str + subnet_id: + description: The ID of the subnet for the network interface. + type: str + placement: + description: The placement group settings for the instance. + type: dict + suboptions: + affinity: + description: The affinity setting for an instance on a Dedicated Host. + type: str + availability_zone: + description: The Availability Zone for the instance. + type: str + group_name: + description: The name of the placement group for the instance. + type: str + host_id: + description: The ID of the Dedicated Host for the instance. + type: str + tenancy: + description: > + The tenancy of the instance (if the instance is running in a VPC). An + instance with a tenancy of dedicated runs on single-tenant hardware. + type: str + ram_disk_id: + description: > + The ID of the RAM disk to launch the instance with. We recommend that you + use PV-GRUB instead of kernels and RAM disks. For more information, see + U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + type: str + security_group_ids: + description: A list of security group IDs (VPC or EC2-Classic) that the new instances will be added to. + type: list + elements: str + security_groups: + description: A list of security group names (VPC or EC2-Classic) that the new instances will be added to. + type: list + elements: str + tags: + type: dict + description: + - A set of key-value pairs to be applied to resources when this Launch Template is used. + - "Tag key constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with I(aws:)" + - "Tag value constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters." + user_data: + description: > + The Base64-encoded user data to make available to the instance. For more information, see the Linux + U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Windows + U(http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + documentation on user-data. + type: str +''' + +EXAMPLES = ''' +- name: Create an ec2 launch template + ec2_launch_template: + name: "my_template" + image_id: "ami-04b762b4289fba92b" + key_name: my_ssh_key + instance_type: t2.micro + iam_instance_profile: myTestProfile + disable_api_termination: true + +- name: > + Create a new version of an existing ec2 launch template with a different instance type, + while leaving an older version as the default version + ec2_launch_template: + name: "my_template" + default_version: 1 + instance_type: c5.4xlarge + +- name: Delete an ec2 launch template + ec2_launch_template: + name: "my_template" + state: absent + +# This module does not yet allow deletion of specific versions of launch templates +''' + +RETURN = ''' +latest_version: + description: Latest available version of the launch template + returned: when state=present + type: int +default_version: + description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always. + returned: when state=present + type: int +''' +import re +from uuid import uuid4 + +from ansible.module_utils._text import to_text +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def determine_iam_role(module, name_or_arn): + if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): + return name_or_arn + iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + try: + role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) + return {'arn': role['InstanceProfile']['Arn']} + except is_boto3_error_code('NoSuchEntity') as e: + module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) + + +def existing_templates(module): + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + matches = None + try: + if module.params.get('template_id'): + matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')]) + elif module.params.get('template_name'): + matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')]) + except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: + # no named template was found, return nothing/empty versions + return None, [] + except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( + module.params.get('launch_template_id'))) + except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg='Launch template with ID {0} could not be found, please supply a name ' + 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) + except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') + else: + template = matches['LaunchTemplates'][0] + template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] + try: + return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions'] + except (ClientError, BotoCoreError, WaiterError) as e: + module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id)) + + +def params_to_launch_data(module, template_params): + if template_params.get('tags'): + template_params['tag_specifications'] = [ + { + 'resource_type': r_type, + 'tags': [ + {'Key': k, 'Value': v} for k, v + in template_params['tags'].items() + ] + } + for r_type in ('instance', 'volume') + ] + del template_params['tags'] + if module.params.get('iam_instance_profile'): + template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile']) + params = snake_dict_to_camel_dict( + dict((k, v) for k, v in template_params.items() if v is not None), + capitalize_first=True, + ) + return params + + +def delete_template(module): + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + template, template_versions = existing_templates(module) + deleted_versions = [] + if template or template_versions: + non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']] + if non_default_versions: + try: + v_resp = ec2.delete_launch_template_versions( + LaunchTemplateId=template['LaunchTemplateId'], + Versions=non_default_versions, + ) + if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: + module.warn('Failed to delete template versions {0} on launch template {1}'.format( + v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'], + template['LaunchTemplateId'], + )) + deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']] + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId'])) + try: + resp = ec2.delete_launch_template( + LaunchTemplateId=template['LaunchTemplateId'], + ) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId'])) + return { + 'deleted_versions': deleted_versions, + 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), + 'changed': True, + } + else: + return {'changed': False} + + +def create_or_update(module, template_options): + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound'])) + template, template_versions = existing_templates(module) + out = {} + lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) + if not (template or template_versions): + # create a full new one + try: + resp = ec2.create_launch_template( + LaunchTemplateName=module.params['template_name'], + LaunchTemplateData=lt_data, + ClientToken=uuid4().hex, + aws_retry=True, + ) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create launch template") + template, template_versions = existing_templates(module) + out['changed'] = True + elif template and template_versions: + most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] + if lt_data == most_recent['LaunchTemplateData']: + out['changed'] = False + return out + try: + resp = ec2.create_launch_template_version( + LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateData=lt_data, + ClientToken=uuid4().hex, + aws_retry=True, + ) + if module.params.get('default_version') in (None, ''): + # no need to do anything, leave the existing version as default + pass + elif module.params.get('default_version') == 'latest': + set_default = ec2.modify_launch_template( + LaunchTemplateId=template['LaunchTemplateId'], + DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']), + ClientToken=uuid4().hex, + aws_retry=True, + ) + else: + try: + int(module.params.get('default_version')) + except ValueError: + module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version'))) + set_default = ec2.modify_launch_template( + LaunchTemplateId=template['LaunchTemplateId'], + DefaultVersion=to_text(int(module.params.get('default_version'))), + ClientToken=uuid4().hex, + aws_retry=True, + ) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create subsequent launch template version") + template, template_versions = existing_templates(module) + out['changed'] = True + return out + + +def format_module_output(module): + output = {} + template, template_versions = existing_templates(module) + template = camel_dict_to_snake_dict(template) + template_versions = [camel_dict_to_snake_dict(v) for v in template_versions] + for v in template_versions: + for ts in (v['launch_template_data'].get('tag_specifications') or []): + ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags')) + output.update(dict(template=template, versions=template_versions)) + output['default_template'] = [ + v for v in template_versions + if v.get('default_version') + ][0] + output['latest_template'] = [ + v for v in template_versions + if ( + v.get('version_number') and + int(v['version_number']) == int(template['latest_version_number']) + ) + ][0] + if "version_number" in output['default_template']: + output['default_version'] = output['default_template']['version_number'] + if "version_number" in output['latest_template']: + output['latest_version'] = output['latest_template']['version_number'] + return output + + +def main(): + template_options = dict( + block_device_mappings=dict( + type='list', + options=dict( + device_name=dict(), + ebs=dict( + type='dict', + options=dict( + delete_on_termination=dict(type='bool'), + encrypted=dict(type='bool'), + iops=dict(type='int'), + kms_key_id=dict(), + snapshot_id=dict(), + volume_size=dict(type='int'), + volume_type=dict(), + ), + ), + no_device=dict(), + virtual_name=dict(), + ), + ), + cpu_options=dict( + type='dict', + options=dict( + core_count=dict(type='int'), + threads_per_core=dict(type='int'), + ), + ), + credit_specification=dict( + dict(type='dict'), + options=dict( + cpu_credits=dict(), + ), + ), + disable_api_termination=dict(type='bool'), + ebs_optimized=dict(type='bool'), + elastic_gpu_specifications=dict( + options=dict(type=dict()), + type='list', + ), + iam_instance_profile=dict(), + image_id=dict(), + instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']), + instance_market_options=dict( + type='dict', + options=dict( + market_type=dict(), + spot_options=dict( + type='dict', + options=dict( + block_duration_minutes=dict(type='int'), + instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']), + max_price=dict(), + spot_instance_type=dict(choices=['one-time', 'persistent']), + ), + ), + ), + ), + instance_type=dict(), + kernel_id=dict(), + key_name=dict(), + monitoring=dict( + type='dict', + options=dict( + enabled=dict(type='bool') + ), + ), + network_interfaces=dict( + type='list', + options=dict( + associate_public_ip_address=dict(type='bool'), + delete_on_termination=dict(type='bool'), + description=dict(), + device_index=dict(type='int'), + groups=dict(type='list'), + ipv6_address_count=dict(type='int'), + ipv6_addresses=dict(type='list'), + network_interface_id=dict(), + private_ip_address=dict(), + subnet_id=dict(), + ), + ), + placement=dict( + options=dict( + affinity=dict(), + availability_zone=dict(), + group_name=dict(), + host_id=dict(), + tenancy=dict(), + ), + type='dict', + ), + ram_disk_id=dict(), + security_group_ids=dict(type='list'), + security_groups=dict(type='list'), + tags=dict(type='dict'), + user_data=dict(), + ) + + arg_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + template_name=dict(aliases=['name']), + template_id=dict(aliases=['id']), + default_version=dict(default='latest'), + ) + + arg_spec.update(template_options) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + required_one_of=[ + ('template_name', 'template_id') + ], + supports_check_mode=True + ) + + if not module.boto3_at_least('1.6.0'): + module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0") + + for interface in (module.params.get('network_interfaces') or []): + if interface.get('ipv6_addresses'): + interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']] + + if module.params.get('state') == 'present': + out = create_or_update(module, template_options) + out.update(format_module_output(module)) + elif module.params.get('state') == 'absent': + out = delete_template(module) + else: + module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state'))) + + module.exit_json(**out) + + +if __name__ == '__main__': + main() diff --git a/ec2_lc.py b/ec2_lc.py new file mode 100644 index 00000000000..8764514b0a9 --- /dev/null +++ b/ec2_lc.py @@ -0,0 +1,704 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_lc + +short_description: Create or delete AWS Autoscaling Launch Configurations + +description: + - Can create or delete AWS Autoscaling Configurations. + - Works with the ec2_asg module to manage Autoscaling Groups. + +notes: + - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the + launch configuration on AWS. You must create a new config and assign it to the ASG instead. + - encrypted volumes are supported on versions >= 2.4 + + +author: + - "Gareth Rushgrove (@garethr)" + - "Willem van Ketwich (@wilvk)" + +options: + state: + description: + - Register or deregister the instance. + default: present + choices: ['present', 'absent'] + type: str + name: + description: + - Unique name for configuration. + required: true + type: str + instance_type: + description: + - Instance type to use for the instance. + - Required when creating a new Launch Configuration. + type: str + image_id: + description: + - The AMI unique identifier to be used for the group. + type: str + key_name: + description: + - The SSH key name to be used for access to managed instances. + type: str + security_groups: + description: + - A list of security groups to apply to the instances. Since version 2.4 you can specify either security group names or IDs or a mix. Previous + to 2.4, for VPC instances, specify security group IDs and for EC2-Classic, specify either security group names or IDs. + type: list + elements: str + volumes: + description: + - A list dictionaries defining the volumes to create. + - For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. + type: list + elements: dict + suboptions: + device_name: + type: str + description: + - The name for the volume (For example C(/dev/sda)). + required: true + no_device: + type: bool + description: + - When I(no_device=true) the device will not be created. + snapshot: + type: str + description: + - The ID of an EBS snapshot to copy when creating the volume. + - Mutually exclusive with the I(ephemeral) parameter. + ephemeral: + type: str + description: + - Whether the volume should be ephemeral. + - Data on ephemeral volumes is lost when the instance is stopped. + - Mutually exclusive with the I(snapshot) parameter. + volume_size: + type: int + description: + - The size of the volume (in GiB). + - Required unless one of I(ephemeral), I(snapshot) or I(no_device) is set. + volume_type: + type: str + description: + - The type of volume to create. + - See + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types. + delete_on_termination: + type: bool + default: false + description: + - Whether the volume should be automatically deleted when the instance + is terminated. + iops: + type: int + description: + - The number of IOPS per second to provision for the volume. + - Required when I(volume_type=io1). + encrypted: + type: bool + default: false + description: + - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK. + user_data: + description: + - Opaque blob of data which is made available to the ec2 instance. Mutually exclusive with I(user_data_path). + type: str + user_data_path: + description: + - Path to the file that contains userdata for the ec2 instances. Mutually exclusive with I(user_data). + type: path + kernel_id: + description: + - Kernel id for the EC2 instance. + type: str + spot_price: + description: + - The spot price you are bidding. Only applies for an autoscaling group with spot instances. + type: float + instance_monitoring: + description: + - Specifies whether instances are launched with detailed monitoring. + type: bool + default: false + assign_public_ip: + description: + - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address + to each instance launched in a Amazon VPC. + type: bool + ramdisk_id: + description: + - A RAM disk id for the instances. + type: str + instance_profile_name: + description: + - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances. + type: str + ebs_optimized: + description: + - Specifies whether the instance is optimized for EBS I/O (true) or not (false). + default: false + type: bool + classic_link_vpc_id: + description: + - Id of ClassicLink enabled VPC + type: str + classic_link_vpc_security_groups: + description: + - A list of security group IDs with which to associate the ClassicLink VPC instances. + type: list + elements: str + vpc_id: + description: + - VPC ID, used when resolving security group names to IDs. + type: str + instance_id: + description: + - The Id of a running instance to use as a basis for a launch configuration. Can be used in place of I(image_id) and I(instance_type). + type: str + placement_tenancy: + description: + - Determines whether the instance runs on single-tenant hardware or not. + - When not set AWS will default to C(default). + type: str + choices: ['default', 'dedicated'] + associate_public_ip_address: + description: + - The I(associate_public_ip_address) option does nothing and will be removed in Ansible 2.14. + type: bool + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +requirements: + - boto3 >= 1.4.4 + +''' + +EXAMPLES = ''' + +# create a launch configuration using an AMI image and instance type as a basis + +- name: note that encrypted volumes are only supported in >= Ansible 2.4 + ec2_lc: + name: special + image_id: ami-XXX + key_name: default + security_groups: ['group', 'group2' ] + instance_type: t1.micro + volumes: + - device_name: /dev/sda1 + volume_size: 100 + volume_type: io1 + iops: 3000 + delete_on_termination: true + encrypted: true + - device_name: /dev/sdb + ephemeral: ephemeral0 + +# create a launch configuration using a running instance id as a basis + +- ec2_lc: + name: special + instance_id: i-00a48b207ec59e948 + key_name: default + security_groups: ['launch-wizard-2' ] + volumes: + - device_name: /dev/sda1 + volume_size: 120 + volume_type: io1 + iops: 3000 + delete_on_termination: true + +# create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image + +- ec2_lc: + name: special + image_id: ami-XXX + key_name: default + security_groups: ['group', 'group2' ] + instance_type: t1.micro + volumes: + - device_name: /dev/sdf + no_device: true + +- name: Use EBS snapshot ID for volume + block: + - name: Set Volume Facts + set_fact: + volumes: + - device_name: /dev/sda1 + volume_size: 20 + ebs: + snapshot: snap-XXXX + volume_type: gp2 + delete_on_termination: true + encrypted: no + + - name: Create launch configuration + ec2_lc: + name: lc1 + image_id: ami-xxxx + assign_public_ip: yes + instance_type: t2.medium + key_name: my-key + security_groups: "['sg-xxxx']" + volumes: "{{ volumes }}" + register: lc_info +''' + +RETURN = ''' +arn: + description: The Amazon Resource Name of the launch configuration. + returned: when I(state=present) + type: str + sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name +changed: + description: Whether the state of the launch configuration has changed. + returned: always + type: bool + sample: false +created_time: + description: The creation date and time for the launch configuration. + returned: when I(state=present) + type: str + sample: '2017-11-03 23:46:44.841000' +image_id: + description: The ID of the Amazon Machine Image used by the launch configuration. + returned: when I(state=present) + type: str + sample: ami-9be6f38c +instance_type: + description: The instance type for the instances. + returned: when I(state=present) + type: str + sample: t1.micro +name: + description: The name of the launch configuration. + returned: when I(state=present) + type: str + sample: launch_config_name +result: + description: The specification details for the launch configuration. + returned: when I(state=present) + type: complex + contains: + PlacementTenancy: + description: The tenancy of the instances, either default or dedicated. + returned: when I(state=present) + type: str + sample: default + associate_public_ip_address: + description: (EC2-VPC) Indicates whether to assign a public IP address to each instance. + returned: when I(state=present) + type: bool + sample: false + block_device_mappings: + description: A block device mapping, which specifies the block devices. + returned: when I(state=present) + type: complex + contains: + device_name: + description: The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh). + returned: when I(state=present) + type: str + sample: /dev/sda1 + ebs: + description: The information about the Amazon EBS volume. + returned: when I(state=present) + type: complex + contains: + snapshot_id: + description: The ID of the snapshot. + returned: when I(state=present) + type: str + volume_size: + description: The volume size, in GiB. + returned: when I(state=present) + type: str + sample: '100' + virtual_name: + description: The name of the virtual device (for example, ephemeral0). + returned: when I(state=present) + type: str + sample: ephemeral0 + classic_link_vpc_id: + description: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + returned: when I(state=present) + type: str + classic_link_vpc_security_groups: + description: The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + returned: when I(state=present) + type: list + sample: [] + created_time: + description: The creation date and time for the launch configuration. + returned: when I(state=present) + type: str + sample: '2017-11-03 23:46:44.841000' + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: when I(state=present) + type: bool + sample: true + ebs_optimized: + description: Indicates whether the instance is optimized for EBS I/O (true) or not (false). + returned: when I(state=present) + type: bool + sample: false + image_id: + description: The ID of the Amazon Machine Image used by the launch configuration. + returned: when I(state=present) + type: str + sample: ami-9be6f38c + instance_monitoring: + description: Indicates whether instances in this group are launched with detailed (true) or basic (false) monitoring. + returned: when I(state=present) + type: bool + sample: true + instance_profile_name: + description: The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. + returned: when I(state=present) + type: str + sample: null + instance_type: + description: The instance type for the instances. + returned: when I(state=present) + type: str + sample: t1.micro + iops: + description: The number of I/O operations per second (IOPS) to provision for the volume. + returned: when I(state=present) + type: int + kernel_id: + description: The ID of the kernel associated with the AMI. + returned: when I(state=present) + type: str + sample: '' + key_name: + description: The name of the key pair. + returned: when I(state=present) + type: str + sample: testkey + launch_configuration_arn: + description: The Amazon Resource Name (ARN) of the launch configuration. + returned: when I(state=present) + type: str + sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name + member: + description: "" + returned: when I(state=present) + type: str + sample: "\n " + name: + description: The name of the launch configuration. + returned: when I(state=present) + type: str + sample: launch_config_name + ramdisk_id: + description: The ID of the RAM disk associated with the AMI. + returned: when I(state=present) + type: str + sample: '' + security_groups: + description: The security groups to associate with the instances. + returned: when I(state=present) + type: list + sample: + - sg-5e27db2f + spot_price: + description: The price to bid when launching Spot Instances. + returned: when I(state=present) + type: float + use_block_device_types: + description: Indicates whether to suppress a device mapping. + returned: when I(state=present) + type: bool + sample: false + user_data: + description: The user data available to the instances. + returned: when I(state=present) + type: str + sample: '' + volume_type: + description: The volume type (one of standard, io1, gp2). + returned: when I(state=present) + type: str + sample: io1 +security_groups: + description: The security groups to associate with the instances. + returned: when I(state=present) + type: list + sample: + - sg-5e27db2f + +''' + + +import traceback +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, ec2_argument_spec, ec2_connect, camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names, + boto3_conn, snake_dict_to_camel_dict, HAS_BOTO3) +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule + +try: + import botocore +except ImportError: + pass + + +def create_block_device_meta(module, volume): + if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume: + if 'volume_size' not in volume: + module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') + if 'snapshot' in volume: + if volume.get('volume_type') == 'io1' and 'iops' not in volume: + module.fail_json(msg='io1 volumes must have an iops value set') + if 'ephemeral' in volume: + if 'snapshot' in volume: + module.fail_json(msg='Cannot set both ephemeral and snapshot') + + return_object = {} + + if 'ephemeral' in volume: + return_object['VirtualName'] = volume.get('ephemeral') + + if 'device_name' in volume: + return_object['DeviceName'] = volume.get('device_name') + + if 'no_device' in volume: + return_object['NoDevice'] = volume.get('no_device') + + if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'ips', 'encrypted']): + return_object['Ebs'] = {} + + if 'snapshot' in volume: + return_object['Ebs']['SnapshotId'] = volume.get('snapshot') + + if 'volume_size' in volume: + return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0)) + + if 'volume_type' in volume: + return_object['Ebs']['VolumeType'] = volume.get('volume_type') + + if 'delete_on_termination' in volume: + return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False) + + if 'iops' in volume: + return_object['Ebs']['Iops'] = volume.get('iops') + + if 'encrypted' in volume: + return_object['Ebs']['Encrypted'] = volume.get('encrypted') + + return return_object + + +def create_launch_config(connection, module): + name = module.params.get('name') + vpc_id = module.params.get('vpc_id') + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + ec2_connection = boto3_conn(module, 'client', 'ec2', region, ec2_url, **aws_connect_kwargs) + security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except ValueError as e: + module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) + user_data = module.params.get('user_data') + user_data_path = module.params.get('user_data_path') + volumes = module.params['volumes'] + instance_monitoring = module.params.get('instance_monitoring') + assign_public_ip = module.params.get('assign_public_ip') + instance_profile_name = module.params.get('instance_profile_name') + ebs_optimized = module.params.get('ebs_optimized') + classic_link_vpc_id = module.params.get('classic_link_vpc_id') + classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') + + block_device_mapping = [] + + convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price'] + + launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) + + if user_data_path: + try: + with open(user_data_path, 'r') as user_data_file: + user_data = user_data_file.read() + except IOError as e: + module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) + + if volumes: + for volume in volumes: + if 'device_name' not in volume: + module.fail_json(msg='Device name must be set for volume') + # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume + if 'volume_size' not in volume or int(volume['volume_size']) > 0: + block_device_mapping.append(create_block_device_meta(module, volume)) + + try: + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to describe launch configuration by name", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + changed = False + result = {} + + launch_config['LaunchConfigurationName'] = name + + if security_groups is not None: + launch_config['SecurityGroups'] = security_groups + + if classic_link_vpc_id is not None: + launch_config['ClassicLinkVPCId'] = classic_link_vpc_id + + if instance_monitoring is not None: + launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} + + if classic_link_vpc_security_groups is not None: + launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups + + if block_device_mapping: + launch_config['BlockDeviceMappings'] = block_device_mapping + + if instance_profile_name is not None: + launch_config['IamInstanceProfile'] = instance_profile_name + + if assign_public_ip is not None: + launch_config['AssociatePublicIpAddress'] = assign_public_ip + + if user_data is not None: + launch_config['UserData'] = user_data + + if ebs_optimized is not None: + launch_config['EbsOptimized'] = ebs_optimized + + if len(launch_configs) == 0: + try: + connection.create_launch_configuration(**launch_config) + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + changed = True + if launch_configs: + launch_config = launch_configs[0] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + result = (dict((k, v) for k, v in launch_config.items() + if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings'])) + + result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) + + try: + result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled')) + except AttributeError: + result['InstanceMonitoring'] = False + + result['BlockDeviceMappings'] = [] + + for block_device_mapping in launch_config.get('BlockDeviceMappings', []): + result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) + if block_device_mapping.get('Ebs') is not None: + result['BlockDeviceMappings'][-1]['ebs'] = dict( + snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) + + if user_data_path: + result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal + + return_object = { + 'Name': result.get('LaunchConfigurationName'), + 'CreatedTime': result.get('CreatedTime'), + 'ImageId': result.get('ImageId'), + 'Arn': result.get('LaunchConfigurationARN'), + 'SecurityGroups': result.get('SecurityGroups'), + 'InstanceType': result.get('InstanceType'), + 'Result': result + } + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object)) + + +def delete_launch_config(connection, module): + try: + name = module.params.get('name') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + if launch_configs: + connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName')) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Failed to delete launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + image_id=dict(), + instance_id=dict(), + key_name=dict(), + security_groups=dict(default=[], type='list'), + user_data=dict(), + user_data_path=dict(type='path'), + kernel_id=dict(), + volumes=dict(type='list'), + instance_type=dict(), + state=dict(default='present', choices=['present', 'absent']), + spot_price=dict(type='float'), + ramdisk_id=dict(), + instance_profile_name=dict(), + ebs_optimized=dict(default=False, type='bool'), + associate_public_ip_address=dict(type='bool', removed_in_version='2.14'), + instance_monitoring=dict(default=False, type='bool'), + assign_public_ip=dict(type='bool'), + classic_link_vpc_security_groups=dict(type='list'), + classic_link_vpc_id=dict(), + vpc_id=dict(), + placement_tenancy=dict(choices=['default', 'dedicated']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['user_data', 'user_data_path']] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoRegionError: + module.fail_json(msg=("region must be specified as a parameter in AWS_DEFAULT_REGION environment variable or in boto configuration file")) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="unable to establish connection - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + state = module.params.get('state') + + if state == 'present': + create_launch_config(connection, module) + elif state == 'absent': + delete_launch_config(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_lc_facts.py b/ec2_lc_facts.py new file mode 120000 index 00000000000..cb62597c074 --- /dev/null +++ b/ec2_lc_facts.py @@ -0,0 +1 @@ +ec2_lc_info.py \ No newline at end of file diff --git a/ec2_lc_find.py b/ec2_lc_find.py new file mode 100644 index 00000000000..1ba21ae382f --- /dev/null +++ b/ec2_lc_find.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2015, Jose Armesto +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_lc_find +short_description: Find AWS Autoscaling Launch Configurations +description: + - Returns list of matching Launch Configurations for a given name, along with other useful information. + - Results can be sorted and sliced. + - It depends on boto. + - Based on the work by Tom Bamford U(https://github.com/tombamford) + +author: "Jose Armesto (@fiunchinho)" +options: + name_regex: + description: + - A Launch Configuration to match. + - It'll be compiled as regex. + required: True + type: str + sort_order: + description: + - Order in which to sort results. + choices: ['ascending', 'descending'] + default: 'ascending' + type: str + limit: + description: + - How many results to show. + - Corresponds to Python slice notation like list[:limit]. + type: int +requirements: + - "python >= 2.6" + - boto3 +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Search for the Launch Configurations that start with "app" +- ec2_lc_find: + name_regex: app.* + sort_order: descending + limit: 2 +''' + +RETURN = ''' +image_id: + description: AMI id + returned: when Launch Configuration was found + type: str + sample: "ami-0d75df7e" +user_data: + description: User data used to start instance + returned: when Launch Configuration was found + type: str + sample: "ZXhwb3J0IENMT1VE" +name: + description: Name of the Launch Configuration + returned: when Launch Configuration was found + type: str + sample: "myapp-v123" +arn: + description: Name of the AMI + returned: when Launch Configuration was found + type: str + sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject" +instance_type: + description: Type of ec2 instance + returned: when Launch Configuration was found + type: str + sample: "t2.small" +created_time: + description: When it was created + returned: when Launch Configuration was found + type: str + sample: "2016-06-29T14:59:22.222000+00:00" +ebs_optimized: + description: Launch Configuration EBS optimized property + returned: when Launch Configuration was found + type: bool + sample: False +instance_monitoring: + description: Launch Configuration instance monitoring property + returned: when Launch Configuration was found + type: str + sample: {"Enabled": false} +classic_link_vpc_security_groups: + description: Launch Configuration classic link vpc security groups property + returned: when Launch Configuration was found + type: list + sample: [] +block_device_mappings: + description: Launch Configuration block device mappings property + returned: when Launch Configuration was found + type: list + sample: [] +keyname: + description: Launch Configuration ssh key + returned: when Launch Configuration was found + type: str + sample: mykey +security_groups: + description: Launch Configuration security groups + returned: when Launch Configuration was found + type: list + sample: [] +kernel_id: + description: Launch Configuration kernel to use + returned: when Launch Configuration was found + type: str + sample: '' +ram_disk_id: + description: Launch Configuration ram disk property + returned: when Launch Configuration was found + type: str + sample: '' +associate_public_address: + description: Assign public address or not + returned: when Launch Configuration was found + type: bool + sample: True +... +''' +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +def find_launch_configs(client, module): + name_regex = module.params.get('name_regex') + sort_order = module.params.get('sort_order') + limit = module.params.get('limit') + + paginator = client.get_paginator('describe_launch_configurations') + + response_iterator = paginator.paginate( + PaginationConfig={ + 'MaxItems': 1000, + 'PageSize': 100 + } + ) + + results = [] + + for response in response_iterator: + response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']), + response['LaunchConfigurations']) + + for lc in response['LaunchConfigurations']: + data = { + 'name': lc['LaunchConfigurationName'], + 'arn': lc['LaunchConfigurationARN'], + 'created_time': lc['CreatedTime'], + 'user_data': lc['UserData'], + 'instance_type': lc['InstanceType'], + 'image_id': lc['ImageId'], + 'ebs_optimized': lc['EbsOptimized'], + 'instance_monitoring': lc['InstanceMonitoring'], + 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'], + 'block_device_mappings': lc['BlockDeviceMappings'], + 'keyname': lc['KeyName'], + 'security_groups': lc['SecurityGroups'], + 'kernel_id': lc['KernelId'], + 'ram_disk_id': lc['RamdiskId'], + 'associate_public_address': lc.get('AssociatePublicIpAddress', False), + } + + results.append(data) + + results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending')) + + if limit: + results = results[:int(limit)] + + module.exit_json(changed=False, results=results) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name_regex=dict(required=True), + sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), + limit=dict(required=False, type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, True) + + client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params) + find_launch_configs(client, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_lc_info.py b/ec2_lc_info.py new file mode 100644 index 00000000000..ed49b946363 --- /dev/null +++ b/ec2_lc_info.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_lc_info +short_description: Gather information about AWS Autoscaling Launch Configurations. +description: + - Gather information about AWS Autoscaling Launch Configurations. + - This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change. +author: "Loïc Latreille (@psykotox)" +requirements: [ boto3 ] +options: + name: + description: + - A name or a list of name to match. + default: [] + type: list + elements: str + sort: + description: + - Optional attribute which with to sort the results. + choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name'] + type: str + sort_order: + description: + - Order in which to sort results. + - Only used when the 'sort' parameter is specified. + choices: ['ascending', 'descending'] + default: 'ascending' + type: str + sort_start: + description: + - Which result to start with (when sorting). + - Corresponds to Python slice notation. + type: int + sort_end: + description: + - Which result to end with (when sorting). + - Corresponds to Python slice notation. + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all launch configurations +- ec2_lc_info: + +# Gather information about launch configuration with name "example" +- ec2_lc_info: + name: example + +# Gather information sorted by created_time from most recent to least recent +- ec2_lc_info: + sort: created_time + sort_order: descending +''' + +RETURN = ''' +block_device_mapping: + description: Block device mapping for the instances of launch configuration + type: list + returned: always + sample: "[{ + 'device_name': '/dev/xvda':, + 'ebs': { + 'delete_on_termination': true, + 'volume_size': 8, + 'volume_type': 'gp2' + }]" +classic_link_vpc_security_groups: + description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id + type: str + returned: always + sample: +created_time: + description: The creation date and time for the launch configuration + type: str + returned: always + sample: "2016-05-27T13:47:44.216000+00:00" +ebs_optimized: + description: EBS I/O optimized (true ) or not (false ) + type: bool + returned: always + sample: true, +image_id: + description: ID of the Amazon Machine Image (AMI) + type: str + returned: always + sample: "ami-12345678" +instance_monitoring: + description: Launched with detailed monitoring or not + type: dict + returned: always + sample: "{ + 'enabled': true + }" +instance_type: + description: Instance type + type: str + returned: always + sample: "t2.micro" +kernel_id: + description: ID of the kernel associated with the AMI + type: str + returned: always + sample: +key_name: + description: Name of the key pair + type: str + returned: always + sample: "user_app" +launch_configuration_arn: + description: Amazon Resource Name (ARN) of the launch configuration + type: str + returned: always + sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app" +launch_configuration_name: + description: Name of the launch configuration + type: str + returned: always + sample: "lc-app" +ramdisk_id: + description: ID of the RAM disk associated with the AMI + type: str + returned: always + sample: +security_groups: + description: Security groups to associated + type: list + returned: always + sample: "[ + 'web' + ]" +user_data: + description: User data available + type: str + returned: always + sample: +''' + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, + get_aws_connection_info) + + +def list_launch_configs(connection, module): + + launch_config_name = module.params.get("name") + sort = module.params.get('sort') + sort_order = module.params.get('sort_order') + sort_start = module.params.get('sort_start') + sort_end = module.params.get('sort_end') + + try: + pg = connection.get_paginator('describe_launch_configurations') + launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() + except ClientError as e: + module.fail_json(msg=e.message) + + snaked_launch_configs = [] + for launch_config in launch_configs['LaunchConfigurations']: + snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config)) + + for launch_config in snaked_launch_configs: + if 'CreatedTime' in launch_config: + launch_config['CreatedTime'] = str(launch_config['CreatedTime']) + + if sort: + snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending')) + + if sort and sort_start and sort_end: + snaked_launch_configs = snaked_launch_configs[sort_start:sort_end] + elif sort and sort_start: + snaked_launch_configs = snaked_launch_configs[sort_start:] + elif sort and sort_end: + snaked_launch_configs = snaked_launch_configs[:sort_end] + + module.exit_json(launch_configurations=snaked_launch_configs) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=False, default=[], type='list'), + sort=dict(required=False, default=None, + choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), + sort_order=dict(required=False, default='ascending', + choices=['ascending', 'descending']), + sort_start=dict(required=False, type='int'), + sort_end=dict(required=False, type='int'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'ec2_lc_facts': + module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_launch_configs(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py new file mode 100644 index 00000000000..05bd69654b9 --- /dev/null +++ b/ec2_metric_alarm.py @@ -0,0 +1,409 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_metric_alarm +short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" +description: + - Can create or delete AWS metric alarms. + - Metrics you wish to alarm on must already exist. +author: "Zacharie Eakin (@Zeekin)" +options: + state: + description: + - Register or deregister the alarm. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Unique name for the alarm. + required: true + type: str + metric: + description: + - Name of the monitored metric (e.g. C(CPUUtilization)). + - Metric must already exist. + required: false + type: str + namespace: + description: + - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in cloudwatch. + required: false + type: str + statistic: + description: + - Operation applied to the metric. + - Works in conjunction with I(period) and I(evaluation_periods) to determine the comparison value. + required: false + choices: ['SampleCount','Average','Sum','Minimum','Maximum'] + type: str + comparison: + description: + - Determines how the threshold value is compared + - Symbolic comparison operators have been deprecated, and will be removed in 2.14 + required: false + type: str + choices: + - 'GreaterThanOrEqualToThreshold' + - 'GreaterThanThreshold' + - 'LessThanThreshold' + - 'LessThanOrEqualToThreshold' + - '<=' + - '<' + - '>=' + - '>' + threshold: + description: + - Sets the min/max bound for triggering the alarm. + required: false + type: float + period: + description: + - The time (in seconds) between metric evaluations. + required: false + type: int + evaluation_periods: + description: + - The number of times in which the metric is evaluated before final calculation. + required: false + type: int + unit: + description: + - The threshold's unit of measurement. + required: false + type: str + choices: + - 'Seconds' + - 'Microseconds' + - 'Milliseconds' + - 'Bytes' + - 'Kilobytes' + - 'Megabytes' + - 'Gigabytes' + - 'Terabytes' + - 'Bits' + - 'Kilobits' + - 'Megabits' + - 'Gigabits' + - 'Terabits' + - 'Percent' + - 'Count' + - 'Bytes/Second' + - 'Kilobytes/Second' + - 'Megabytes/Second' + - 'Gigabytes/Second' + - 'Terabytes/Second' + - 'Bits/Second' + - 'Kilobits/Second' + - 'Megabits/Second' + - 'Gigabits/Second' + - 'Terabits/Second' + - 'Count/Second' + - 'None' + description: + description: + - A longer description of the alarm. + required: false + type: str + dimensions: + description: + - A dictionary describing which metric the alarm is applied to. + - 'For more information see the AWS documentation:' + - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension) + required: false + type: dict + alarm_actions: + description: + - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s). + required: false + type: list + elements: str + insufficient_data_actions: + description: + - A list of the names of action(s) to take when the alarm is in the C(insufficient_data) status. + required: false + type: list + elements: str + ok_actions: + description: + - A list of the names of action(s) to take when the alarm is in the C(ok) status, denoted as Amazon Resource Name(s). + required: false + type: list + elements: str + treat_missing_data: + description: + - Sets how the alarm handles missing data points. + required: false + type: str + choices: + - 'breaching' + - 'notBreaching' + - 'ignore' + - 'missing' + default: 'missing' +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' + - name: create alarm + ec2_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metric: "CPUUtilization" + namespace: "AWS/EC2" + statistic: Average + comparison: "LessThanOrEqualToThreshold" + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: "Percent" + description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes " + dimensions: {'InstanceId':'i-XXX'} + alarm_actions: ["action1","action2"] + + - name: Create an alarm to recover a failed instance + ec2_metric_alarm: + state: present + region: us-west-1 + name: "recover-instance" + metric: "StatusCheckFailed_System" + namespace: "AWS/EC2" + statistic: "Minimum" + comparison: ">=" + threshold: 1.0 + period: 60 + evaluation_periods: 2 + unit: "Count" + description: "This will recover an instance when it fails" + dimensions: {"InstanceId":'i-XXX'} + alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # protected by AnsibleAWSModule + + +def create_metric_alarm(connection, module): + + name = module.params.get('name') + metric = module.params.get('metric') + namespace = module.params.get('namespace') + statistic = module.params.get('statistic') + comparison = module.params.get('comparison') + threshold = module.params.get('threshold') + period = module.params.get('period') + evaluation_periods = module.params.get('evaluation_periods') + unit = module.params.get('unit') + description = module.params.get('description') + dimensions = module.params.get('dimensions') + alarm_actions = module.params.get('alarm_actions') + insufficient_data_actions = module.params.get('insufficient_data_actions') + ok_actions = module.params.get('ok_actions') + treat_missing_data = module.params.get('treat_missing_data') + + warnings = [] + + alarms = connection.describe_alarms(AlarmNames=[name]) + + comparisons = {'<=': 'LessThanOrEqualToThreshold', + '<': 'LessThanThreshold', + '>=': 'GreaterThanOrEqualToThreshold', + '>': 'GreaterThanThreshold'} + if comparison in ('<=', '<', '>', '>='): + module.deprecate('Using the <=, <, > and >= operators for comparison has been deprecated. Please use LessThanOrEqualToThreshold, ' + 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.', version="2.14") + comparison = comparisons[comparison] + + if not isinstance(dimensions, list): + fixed_dimensions = [] + for key, value in dimensions.items(): + fixed_dimensions.append({'Name': key, 'Value': value}) + dimensions = fixed_dimensions + + if not alarms['MetricAlarms']: + try: + connection.put_metric_alarm(AlarmName=name, + MetricName=metric, + Namespace=namespace, + Statistic=statistic, + ComparisonOperator=comparison, + Threshold=threshold, + Period=period, + EvaluationPeriods=evaluation_periods, + Unit=unit, + AlarmDescription=description, + Dimensions=dimensions, + AlarmActions=alarm_actions, + InsufficientDataActions=insufficient_data_actions, + OKActions=ok_actions, + TreatMissingData=treat_missing_data) + changed = True + alarms = connection.describe_alarms(AlarmNames=[name]) + except ClientError as e: + module.fail_json_aws(e) + + else: + changed = False + alarm = alarms['MetricAlarms'][0] + + # Workaround for alarms created before TreatMissingData was introduced + if 'TreatMissingData' not in alarm.keys(): + alarm['TreatMissingData'] = 'missing' + + for key, value in {'MetricName': metric, + 'Namespace': namespace, + 'Statistic': statistic, + 'ComparisonOperator': comparison, + 'Threshold': threshold, + 'Period': period, + 'EvaluationPeriods': evaluation_periods, + 'Unit': unit, + 'AlarmDescription': description, + 'Dimensions': dimensions, + 'TreatMissingData': treat_missing_data}.items(): + try: + if alarm[key] != value: + changed = True + except KeyError: + if value is not None: + changed = True + + alarm[key] = value + + for key, value in {'AlarmActions': alarm_actions, + 'InsufficientDataActions': insufficient_data_actions, + 'OKActions': ok_actions}.items(): + action = value or [] + if alarm[key] != action: + changed = True + alarm[key] = value + + try: + if changed: + connection.put_metric_alarm(AlarmName=alarm['AlarmName'], + MetricName=alarm['MetricName'], + Namespace=alarm['Namespace'], + Statistic=alarm['Statistic'], + ComparisonOperator=alarm['ComparisonOperator'], + Threshold=alarm['Threshold'], + Period=alarm['Period'], + EvaluationPeriods=alarm['EvaluationPeriods'], + Unit=alarm['Unit'], + AlarmDescription=alarm['AlarmDescription'], + Dimensions=alarm['Dimensions'], + AlarmActions=alarm['AlarmActions'], + InsufficientDataActions=alarm['InsufficientDataActions'], + OKActions=alarm['OKActions'], + TreatMissingData=alarm['TreatMissingData']) + except ClientError as e: + module.fail_json_aws(e) + + result = alarms['MetricAlarms'][0] + module.exit_json(changed=changed, warnings=warnings, + name=result['AlarmName'], + actions_enabled=result['ActionsEnabled'], + alarm_actions=result['AlarmActions'], + alarm_arn=result['AlarmArn'], + comparison=result['ComparisonOperator'], + description=result['AlarmDescription'], + dimensions=result['Dimensions'], + evaluation_periods=result['EvaluationPeriods'], + insufficient_data_actions=result['InsufficientDataActions'], + last_updated=result['AlarmConfigurationUpdatedTimestamp'], + metric=result['MetricName'], + namespace=result['Namespace'], + ok_actions=result['OKActions'], + period=result['Period'], + state_reason=result['StateReason'], + state_value=result['StateValue'], + statistic=result['Statistic'], + threshold=result['Threshold'], + treat_missing_data=result['TreatMissingData'], + unit=result['Unit']) + + +def delete_metric_alarm(connection, module): + name = module.params.get('name') + alarms = connection.describe_alarms(AlarmNames=[name]) + + if alarms['MetricAlarms']: + try: + connection.delete_alarms(AlarmNames=[name]) + module.exit_json(changed=True) + except (ClientError) as e: + module.fail_json_aws(e) + else: + module.exit_json(changed=False) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + metric=dict(type='str'), + namespace=dict(type='str'), + statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), + comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold', + 'GreaterThanOrEqualToThreshold', '<=', '<', '>', '>=']), + threshold=dict(type='float'), + period=dict(type='int'), + unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', + 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', + 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', + 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', + 'Terabits/Second', 'Count/Second', 'None']), + evaluation_periods=dict(type='int'), + description=dict(type='str'), + dimensions=dict(type='dict', default={}), + alarm_actions=dict(type='list', default=[]), + insufficient_data_actions=dict(type='list', default=[]), + ok_actions=dict(type='list', default=[]), + treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'), + state=dict(default='present', choices=['present', 'absent']), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + state = module.params.get('state') + + connection = module.client('cloudwatch') + + if state == 'present': + create_metric_alarm(connection, module) + elif state == 'absent': + delete_metric_alarm(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_placement_group.py b/ec2_placement_group.py new file mode 100644 index 00000000000..d1d26535261 --- /dev/null +++ b/ec2_placement_group.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_placement_group +short_description: Create or delete an EC2 Placement Group +description: + - Create an EC2 Placement Group; if the placement group already exists, + nothing is done. Or, delete an existing placement group. If the placement + group is absent, do nothing. See also + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +author: "Brad Macpherson (@iiibrad)" +options: + name: + description: + - The name for the placement group. + required: true + type: str + state: + description: + - Create or delete placement group. + default: present + choices: [ 'present', 'absent' ] + type: str + strategy: + description: + - Placement group strategy. Cluster will cluster instances into a + low-latency group in a single Availability Zone, while Spread spreads + instances across underlying hardware. + default: cluster + choices: [ 'cluster', 'spread' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide +# for details. + +# Create a placement group. +- ec2_placement_group: + name: my-cluster + state: present + +# Create a Spread placement group. +- ec2_placement_group: + name: my-cluster + state: present + strategy: spread + +# Delete a placement group. +- ec2_placement_group: + name: my-cluster + state: absent + +''' + + +RETURN = ''' +placement_group: + description: Placement group attributes + returned: when state != absent + type: complex + contains: + name: + description: PG name + type: str + sample: my-cluster + state: + description: PG state + type: str + sample: "available" + strategy: + description: PG strategy + type: str + sample: "cluster" + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +try: + from botocore.exceptions import (BotoCoreError, ClientError) +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.exponential_backoff() +def get_placement_group_details(connection, module): + name = module.params.get("name") + try: + response = connection.describe_placement_groups( + Filters=[{ + "Name": "group-name", + "Values": [name] + }]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws( + e, + msg="Couldn't find placement group named [%s]" % name) + + if len(response['PlacementGroups']) != 1: + return None + else: + placement_group = response['PlacementGroups'][0] + return { + "name": placement_group['GroupName'], + "state": placement_group['State'], + "strategy": placement_group['Strategy'], + } + + +@AWSRetry.exponential_backoff() +def create_placement_group(connection, module): + name = module.params.get("name") + strategy = module.params.get("strategy") + + try: + connection.create_placement_group( + GroupName=name, Strategy=strategy, DryRun=module.check_mode) + except (BotoCoreError, ClientError) as e: + if e.response['Error']['Code'] == "DryRunOperation": + module.exit_json(changed=True, placement_group={ + "name": name, + "state": 'DryRun', + "strategy": strategy, + }) + module.fail_json_aws( + e, + msg="Couldn't create placement group [%s]" % name) + + module.exit_json(changed=True, + placement_group=get_placement_group_details( + connection, module + )) + + +@AWSRetry.exponential_backoff() +def delete_placement_group(connection, module): + name = module.params.get("name") + + try: + connection.delete_placement_group( + GroupName=name, DryRun=module.check_mode) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws( + e, + msg="Couldn't delete placement group [%s]" % name) + + module.exit_json(changed=True) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent']), + strategy=dict(default='cluster', choices=['cluster', 'spread']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + connection = module.client('ec2') + + state = module.params.get("state") + + if state == 'present': + placement_group = get_placement_group_details(connection, module) + if placement_group is None: + create_placement_group(connection, module) + else: + strategy = module.params.get("strategy") + if placement_group['strategy'] == strategy: + module.exit_json( + changed=False, placement_group=placement_group) + else: + name = module.params.get("name") + module.fail_json( + msg=("Placement group '{}' exists, can't change strategy" + + " from '{}' to '{}'").format( + name, + placement_group['strategy'], + strategy)) + + elif state == 'absent': + placement_group = get_placement_group_details(connection, module) + if placement_group is None: + module.exit_json(changed=False) + else: + delete_placement_group(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_placement_group_facts.py b/ec2_placement_group_facts.py new file mode 120000 index 00000000000..7d33ef0167f --- /dev/null +++ b/ec2_placement_group_facts.py @@ -0,0 +1 @@ +ec2_placement_group_info.py \ No newline at end of file diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py new file mode 100644 index 00000000000..f0e7092e43e --- /dev/null +++ b/ec2_placement_group_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_placement_group_info +short_description: List EC2 Placement Group(s) details +description: + - List details of EC2 Placement Group(s). + - This module was called C(ec2_placement_group_facts) before Ansible 2.9. The usage did not change. +author: "Brad Macpherson (@iiibrad)" +options: + names: + description: + - A list of names to filter on. If a listed group does not exist, there + will be no corresponding entry in the result; no error will be raised. + type: list + elements: str + required: false + default: [] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details or the AWS region, +# see the AWS Guide for details. + +# List all placement groups. +- ec2_placement_group_info: + register: all_ec2_placement_groups + +# List two placement groups. +- ec2_placement_group_info: + names: + - my-cluster + - my-other-cluster + register: specific_ec2_placement_groups + +- debug: msg="{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}" + +''' + + +RETURN = ''' +placement_groups: + description: Placement group attributes + returned: always + type: complex + contains: + name: + description: PG name + type: str + sample: my-cluster + state: + description: PG state + type: str + sample: "available" + strategy: + description: PG strategy + type: str + sample: "cluster" + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +try: + from botocore.exceptions import (BotoCoreError, ClientError) +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_placement_groups_details(connection, module): + names = module.params.get("names") + try: + if len(names) > 0: + response = connection.describe_placement_groups( + Filters=[{ + "Name": "group-name", + "Values": names + }]) + else: + response = connection.describe_placement_groups() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws( + e, + msg="Couldn't find placement groups named [%s]" % names) + + results = [] + for placement_group in response['PlacementGroups']: + results.append({ + "name": placement_group['GroupName'], + "state": placement_group['State'], + "strategy": placement_group['Strategy'], + }) + return results + + +def main(): + argument_spec = dict( + names=dict(type='list', default=[]) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + if module._module._name == 'ec2_placement_group_facts': + module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'", version='2.13') + + connection = module.client('ec2') + + placement_groups = get_placement_groups_details(connection, module) + module.exit_json(changed=False, placement_groups=placement_groups) + + +if __name__ == '__main__': + main() diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py new file mode 100644 index 00000000000..954a148f374 --- /dev/null +++ b/ec2_scaling_policy.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_scaling_policy +short_description: Create or delete AWS scaling policies for Autoscaling groups +description: + - Can create or delete scaling policies for autoscaling groups. + - Referenced autoscaling groups must already exist. +author: "Zacharie Eakin (@Zeekin)" +options: + state: + description: + - Register or deregister the policy. + default: present + choices: ['present', 'absent'] + type: str + name: + description: + - Unique name for the scaling policy. + required: true + type: str + asg_name: + description: + - Name of the associated autoscaling group. + required: true + type: str + adjustment_type: + description: + - The type of change in capacity of the autoscaling group. + choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity'] + type: str + scaling_adjustment: + description: + - The amount by which the autoscaling group is adjusted by the policy. + type: int + min_adjustment_step: + description: + - Minimum amount of adjustment when policy is triggered. + type: int + cooldown: + description: + - The minimum period of time (in seconds) between which autoscaling actions can take place. + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- ec2_scaling_policy: + state: present + region: US-XXX + name: "scaledown-policy" + adjustment_type: "ChangeInCapacity" + asg_name: "slave-pool" + scaling_adjustment: -1 + min_adjustment_step: 1 + cooldown: 300 +''' + +try: + import boto.ec2.autoscale + import boto.exception + from boto.ec2.autoscale import ScalingPolicy + from boto.exception import BotoServerError +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, + get_aws_connection_info) + + +def create_scaling_policy(connection, module): + sp_name = module.params.get('name') + adjustment_type = module.params.get('adjustment_type') + asg_name = module.params.get('asg_name') + scaling_adjustment = module.params.get('scaling_adjustment') + min_adjustment_step = module.params.get('min_adjustment_step') + cooldown = module.params.get('cooldown') + + scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name]) + + if not scalingPolicies: + sp = ScalingPolicy( + name=sp_name, + adjustment_type=adjustment_type, + as_name=asg_name, + scaling_adjustment=scaling_adjustment, + min_adjustment_step=min_adjustment_step, + cooldown=cooldown) + + try: + connection.create_scaling_policy(sp) + policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0] + module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, + cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) + except BotoServerError as e: + module.fail_json(msg=str(e)) + else: + policy = scalingPolicies[0] + changed = False + + # min_adjustment_step attribute is only relevant if the adjustment_type + # is set to percentage change in capacity, so it is a special case + if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity': + if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'): + changed = True + + # set the min adjustment step in case the user decided to change their + # adjustment type to percentage + setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) + + # check the remaining attributes + for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'): + if getattr(policy, attr) != module.params.get(attr): + changed = True + setattr(policy, attr, module.params.get(attr)) + + try: + if changed: + connection.create_scaling_policy(policy) + policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0] + module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, + cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) + except BotoServerError as e: + module.fail_json(msg=str(e)) + + +def delete_scaling_policy(connection, module): + sp_name = module.params.get('name') + asg_name = module.params.get('asg_name') + + scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name]) + + if scalingPolicies: + try: + connection.delete_policy(sp_name, asg_name) + module.exit_json(changed=True) + except BotoServerError as e: + module.exit_json(changed=False, msg=str(e)) + else: + module.exit_json(changed=False) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), + asg_name=dict(required=True, type='str'), + scaling_adjustment=dict(type='int'), + min_adjustment_step=dict(type='int'), + cooldown=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + state = module.params.get('state') + + try: + connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + if state == 'present': + create_scaling_policy(connection, module) + elif state == 'absent': + delete_scaling_policy(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py new file mode 100644 index 00000000000..89ace145e6c --- /dev/null +++ b/ec2_snapshot_copy.py @@ -0,0 +1,200 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: ec2_snapshot_copy +short_description: Copies an EC2 snapshot and returns the new Snapshot ID. +description: + - Copies an EC2 Snapshot from a source region to a destination region. +options: + source_region: + description: + - The source region the Snapshot should be copied from. + required: true + type: str + source_snapshot_id: + description: + - The ID of the Snapshot in source region that should be copied. + required: true + type: str + description: + description: + - An optional human-readable string describing purpose of the new Snapshot. + type: str + encrypted: + description: + - Whether or not the destination Snapshot should be encrypted. + type: bool + default: 'no' + kms_key_id: + description: + - KMS key id used to encrypt snapshot. If not specified, AWS defaults to C(alias/aws/ebs). + type: str + wait: + description: + - Wait for the copied Snapshot to be in 'Available' state before returning. + type: bool + default: 'no' + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 600 + type: int + tags: + description: + - A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}' + type: dict +author: Deepak Kothandan (@Deepakkothandan) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 +''' + +EXAMPLES = ''' +# Basic Snapshot Copy +- ec2_snapshot_copy: + source_region: eu-central-1 + region: eu-west-1 + source_snapshot_id: snap-xxxxxxx + +# Copy Snapshot and wait until available +- ec2_snapshot_copy: + source_region: eu-central-1 + region: eu-west-1 + source_snapshot_id: snap-xxxxxxx + wait: yes + wait_timeout: 1200 # Default timeout is 600 + register: snapshot_id + +# Tagged Snapshot copy +- ec2_snapshot_copy: + source_region: eu-central-1 + region: eu-west-1 + source_snapshot_id: snap-xxxxxxx + tags: + Name: Snapshot-Name + +# Encrypted Snapshot copy +- ec2_snapshot_copy: + source_region: eu-central-1 + region: eu-west-1 + source_snapshot_id: snap-xxxxxxx + encrypted: yes + +# Encrypted Snapshot copy with specified key +- ec2_snapshot_copy: + source_region: eu-central-1 + region: eu-west-1 + source_snapshot_id: snap-xxxxxxx + encrypted: yes + kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b +''' + +RETURN = ''' +snapshot_id: + description: snapshot id of the newly created snapshot + returned: when snapshot copy is successful + type: str + sample: "snap-e9095e8c" +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict) +from ansible.module_utils._text import to_native + +try: + import boto3 + from botocore.exceptions import ClientError, WaiterError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def copy_snapshot(module, ec2): + """ + Copies an EC2 Snapshot to another region + + module : AnsibleModule object + ec2: ec2 connection object + """ + + params = { + 'SourceRegion': module.params.get('source_region'), + 'SourceSnapshotId': module.params.get('source_snapshot_id'), + 'Description': module.params.get('description') + } + + if module.params.get('encrypted'): + params['Encrypted'] = True + + if module.params.get('kms_key_id'): + params['KmsKeyId'] = module.params.get('kms_key_id') + + try: + snapshot_id = ec2.copy_snapshot(**params)['SnapshotId'] + if module.params.get('wait'): + delay = 15 + # Add one to max_attempts as wait() increment + # its counter before assessing it for time.sleep() + max_attempts = (module.params.get('wait_timeout') // delay) + 1 + ec2.get_waiter('snapshot_completed').wait( + SnapshotIds=[snapshot_id], + WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) + ) + if module.params.get('tags'): + ec2.create_tags( + Resources=[snapshot_id], + Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()] + ) + + except WaiterError as we: + module.fail_json(msg='An error occurred waiting for the snapshot to become available. (%s)' % str(we), exception=traceback.format_exc()) + except ClientError as ce: + module.fail_json(msg=str(ce), exception=traceback.format_exc(), **camel_dict_to_snake_dict(ce.response)) + + module.exit_json(changed=True, snapshot_id=snapshot_id) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + source_region=dict(required=True), + source_snapshot_id=dict(required=True), + description=dict(default=''), + encrypted=dict(type='bool', default=False, required=False), + kms_key_id=dict(type='str', required=False), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=600), + tags=dict(type='dict'))) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='botocore and boto3 are required.') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='ec2', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + + copy_snapshot(module, client) + + +if __name__ == '__main__': + main() diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py new file mode 100644 index 00000000000..8408b4369c7 --- /dev/null +++ b/ec2_transit_gateway.py @@ -0,0 +1,578 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_transit_gateway +short_description: Create and delete AWS Transit Gateways +description: + - Creates AWS Transit Gateways. + - Deletes AWS Transit Gateways. + - Updates tags on existing transit gateways. +requirements: [ 'botocore', 'boto3' ] +options: + asn: + description: + - A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + - The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs. + type: int + auto_associate: + description: + - Enable or disable automatic association with the default association route table. + default: true + type: bool + auto_attach: + description: + - Enable or disable automatic acceptance of attachment requests. + default: false + type: bool + auto_propagate: + description: + - Enable or disable automatic propagation of routes to the default propagation route table. + default: true + type: bool + description: + description: + - The description of the transit gateway. + type: str + dns_support: + description: + - Whether to enable AWS DNS support. + default: true + type: bool + purge_tags: + description: + - Whether to purge existing tags not included with tags argument. + default: true + type: bool + state: + description: + - C(present) to ensure resource is created. + - C(absent) to remove resource. + default: present + choices: [ "present", "absent"] + type: str + tags: + description: + - A dictionary of resource tags + type: dict + transit_gateway_id: + description: + - The ID of the transit gateway. + type: str + vpn_ecmp_support: + description: + - Enable or disable Equal Cost Multipath Protocol support. + default: true + type: bool + wait: + description: + - Whether to wait for status + default: true + type: bool + wait_timeout: + description: + - number of seconds to wait for status + default: 300 + type: int + +author: "Bob Boldin (@BobBoldin)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Create a new transit gateway using defaults + ec2_transit_gateway: + state: present + region: us-east-1 + description: personal-testing + register: created_tgw + +- name: Create a new transit gateway with options + ec2_transit_gateway: + asn: 64514 + auto_associate: no + auto_propagate: no + dns_support: True + description: "nonprod transit gateway" + purge_tags: False + state: present + region: us-east-1 + tags: + Name: nonprod transit gateway + status: testing + +- name: Remove a transit gateway by description + ec2_transit_gateway: + state: absent + region: us-east-1 + description: personal-testing + +- name: Remove a transit gateway by id + ec2_transit_gateway: + state: absent + region: ap-southeast-2 + transit_gateway_id: tgw-3a9aa123 + register: deleted_tgw +''' + +RETURN = ''' +transit_gateway: + description: The attributes of the transit gateway. + type: complex + returned: I(state=present) + contains: + creation_time: + description: The creation time of the transit gateway. + returned: always + type: str + sample: "2019-03-06T17:13:51+00:00" + description: + description: The description of the transit gateway. + returned: always + type: str + sample: my test tgw + options: + description: The options attributes of the transit gateway + returned: always + type: complex + contains: + amazon_side_asn: + description: + - A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs. + returned: always + type: str + sample: 64512 + auto_accept_shared_attachements: + description: Indicates whether attachment requests are automatically accepted. + returned: always + type: str + sample: disable + default_route_table_association: + description: + - Indicates whether resource attachments are automatically + associated with the default association route table. + returned: always + type: str + sample: enable + association_default_route_table_id: + description: The ID of the default association route table. + returned: Iwhen exists + type: str + sample: tgw-rtb-abc123444 + default_route_table_propagation: + description: + - Indicates whether resource attachments automatically + propagate routes to the default propagation route table. + returned: always + type: str + sample: disable + propagation_default_route_table_id: + description: The ID of the default propagation route table. + returned: when exists + type: str + sample: tgw-rtb-def456777 + vpn_ecmp_support: + description: Indicates whether Equal Cost Multipath Protocol support is enabled. + returned: always + type: str + sample: enable + dns_support: + description: Indicates whether DNS support is enabled. + returned: always + type: str + sample: enable + owner_id: + description: The account that owns the transit gateway. + returned: always + type: str + sample: '123456789012' + state: + description: The state of the transit gateway. + returned: always + type: str + sample: pending + tags: + description: A dictionary of resource tags + returned: always + type: dict + sample: + tags: + Name: nonprod_tgw + transit_gateway_arn: + description: The ID of the transit_gateway. + returned: always + type: str + sample: tgw-3a9aa123 + transit_gateway_id: + description: The ID of the transit_gateway. + returned: always + type: str + sample: tgw-3a9aa123 +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except Exception: + pass + # handled by imported AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from time import sleep, time +from ansible.module_utils._text import to_text +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + ansible_dict_to_boto3_tag_list, + ansible_dict_to_boto3_filter_list, + AWSRetry, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + compare_aws_tags +) + + +class AnsibleEc2Tgw(object): + + def __init__(self, module, results): + self._module = module + self._results = results + self._connection = self._module.client('ec2') + self._check_mode = self._module.check_mode + + if not hasattr(self._connection, 'describe_transit_gateways'): + self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52') + + def process(self): + """ Process the request based on state parameter . + state = present will search for an existing tgw based and return the object data. + if no object is found it will be created + + state = absent will attempt to remove the tgw however will fail if it still has + attachments or associations + """ + description = self._module.params.get('description') + state = self._module.params.get('state', 'present') + tgw_id = self._module.params.get('transit_gateway_id') + + if state == 'present': + self.ensure_tgw_present(tgw_id, description) + elif state == 'absent': + self.ensure_tgw_absent(tgw_id, description) + + def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): + """ + Wait for the Transit Gateway to reach the specified status + :param wait_timeout: Number of seconds to wait, until this timeout is reached. + :param tgw_id: The Amazon nat id. + :param status: The status to wait for. + examples. status=available, status=deleted + :param skip_deleted: ignore deleted transit gateways + :return dict: transit gateway object + """ + polling_increment_secs = 5 + wait_timeout = time() + wait_timeout + status_achieved = False + transit_gateway = dict() + + while wait_timeout > time(): + try: + transit_gateway = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=skip_deleted) + + if transit_gateway: + if self._check_mode: + transit_gateway['state'] = status + + if transit_gateway.get('state') == status: + status_achieved = True + break + + elif transit_gateway.get('state') == 'failed': + break + + else: + sleep(polling_increment_secs) + + except ClientError as e: + self._module.fail_json_aws(e) + + if not status_achieved: + self._module.fail_json( + msg="Wait time out reached, while waiting for results") + + return transit_gateway + + def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): + """ search for an existing tgw by either tgw_id or description + :param tgw_id: The AWS id of the transit gateway + :param description: The description of the transit gateway. + :param skip_deleted: ignore deleted transit gateways + :return dict: transit gateway object + """ + filters = [] + if tgw_id: + filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id}) + + try: + response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters) + except (ClientError, BotoCoreError) as e: + self._module.fail_json_aws(e) + + tgw = None + tgws = [] + + if len(response.get('TransitGateways', [])) == 1 and tgw_id: + if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted: + tgws.extend(response['TransitGateways']) + + for gateway in response.get('TransitGateways', []): + if description == gateway['Description'] and gateway['State'] != 'deleted': + tgws.append(gateway) + + if len(tgws) > 1: + self._module.fail_json( + msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description)) + elif tgws: + tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags']) + tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags']) + + return tgw + + @staticmethod + def enable_option_flag(flag): + disabled = "disable" + enabled = "enable" + if flag: + return enabled + return disabled + + def create_tgw(self, description): + """ + Create a transit gateway and optionally wait for status to become available. + + :param description: The description of the transit gateway. + :return dict: transit gateway object + """ + options = dict() + wait = self._module.params.get('wait') + wait_timeout = self._module.params.get('wait_timeout') + + if self._module.params.get('asn'): + options['AmazonSideAsn'] = self._module.params.get('asn') + + options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach')) + options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate')) + options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate')) + options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support')) + options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support')) + + try: + response = self._connection.create_transit_gateway(Description=description, Options=options) + except (ClientError, BotoCoreError) as e: + self._module.fail_json_aws(e) + + tgw_id = response['TransitGateway']['TransitGatewayId'] + + if wait: + result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available") + else: + result = self.get_matching_tgw(tgw_id=tgw_id) + + self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id'])) + + return result + + def delete_tgw(self, tgw_id): + """ + De;lete the transit gateway and optionally wait for status to become deleted + + :param tgw_id: The id of the transit gateway + :return dict: transit gateway object + """ + wait = self._module.params.get('wait') + wait_timeout = self._module.params.get('wait_timeout') + + try: + response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id) + except (ClientError, BotoCoreError) as e: + self._module.fail_json_aws(e) + + if wait: + result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False) + else: + result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) + + self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id)) + + return result + + def ensure_tags(self, tgw_id, tags, purge_tags): + """ + Ensures tags are applied to the transit gateway. Optionally will remove any + existing tags not in the tags argument if purge_tags is set to true + + :param tgw_id: The AWS id of the transit gateway + :param tags: list of tags to apply to the transit gateway. + :param purge_tags: when true existing tags not in tags parms are removed + :return: true if tags were updated + """ + tags_changed = False + filters = ansible_dict_to_boto3_filter_list({'resource-id': tgw_id}) + try: + cur_tags = self._connection.describe_tags(Filters=filters) + except (ClientError, BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't describe tags") + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + + if to_update: + try: + if not self._check_mode: + AWSRetry.exponential_backoff()(self._connection.create_tags)( + Resources=[tgw_id], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + self._results['changed'] = True + tags_changed = True + except (ClientError, BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't create tags {0} for resource {1}".format( + ansible_dict_to_boto3_tag_list(to_update), tgw_id)) + + if to_delete: + try: + if not self._check_mode: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + AWSRetry.exponential_backoff()(self._connection.delete_tags)( + Resources=[tgw_id], + Tags=tags_list + ) + self._results['changed'] = True + tags_changed = True + except (ClientError, BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't delete tags {0} for resource {1}".format( + ansible_dict_to_boto3_tag_list(to_delete), tgw_id)) + + return tags_changed + + def ensure_tgw_present(self, tgw_id=None, description=None): + """ + Will create a tgw if no match to the tgw_id or description are found + Will update the tgw tags if matching one found but tags are not synced + + :param tgw_id: The AWS id of the transit gateway + :param description: The description of the transit gateway. + :return dict: transit gateway object + """ + tgw = self.get_matching_tgw(tgw_id, description) + + if tgw is None: + if self._check_mode: + self._results['changed'] = True + self._results['transit_gateway_id'] = None + return self._results + + try: + if not description: + self._module.fail_json(msg="Failed to create Transit Gateway: description argument required") + tgw = self.create_tgw(description) + self._results['changed'] = True + except (BotoCoreError, ClientError) as e: + self._module.fail_json_aws(e, msg='Unable to create Transit Gateway') + + if self._module.params.get('tags') != tgw.get('tags'): + stringed_tags_dict = dict((to_text(k), to_text(v)) for k, v in self._module.params.get('tags').items()) + if self.ensure_tags(tgw['transit_gateway_id'], stringed_tags_dict, self._module.params.get('purge_tags')): + self._results['changed'] = True + + self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id']) + + return self._results + + def ensure_tgw_absent(self, tgw_id=None, description=None): + """ + Will delete the tgw if a single tgw is found not yet in deleted status + + :param tgw_id: The AWS id of the transit gateway + :param description: The description of the transit gateway. + :return doct: transit gateway object + """ + self._results['transit_gateway_id'] = None + tgw = self.get_matching_tgw(tgw_id, description) + + if tgw is not None: + if self._check_mode: + self._results['changed'] = True + return self._results + + try: + tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id']) + self._results['changed'] = True + self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'], + skip_deleted=False) + except (BotoCoreError, ClientError) as e: + self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway') + + return self._results + + +def setup_module_object(): + """ + merge argument spec and create Ansible module object + :return: Ansible module object + """ + + argument_spec = dict( + asn=dict(type='int'), + auto_associate=dict(type='bool', default='yes'), + auto_attach=dict(type='bool', default='no'), + auto_propagate=dict(type='bool', default='yes'), + description=dict(type='str'), + dns_support=dict(type='bool', default='yes'), + purge_tags=dict(type='bool', default='yes'), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(default=dict(), type='dict'), + transit_gateway_id=dict(type='str'), + vpn_ecmp_support=dict(type='bool', default='yes'), + wait=dict(type='bool', default='yes'), + wait_timeout=dict(type='int', default=300) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[('description', 'transit_gateway_id')], + supports_check_mode=True, + ) + + return module + + +def main(): + + module = setup_module_object() + + results = dict( + changed=False + ) + + tgw_manager = AnsibleEc2Tgw(module=module, results=results) + tgw_manager.process() + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py new file mode 100644 index 00000000000..041e88ae638 --- /dev/null +++ b/ec2_transit_gateway_info.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = ''' +module: ec2_transit_gateway_info +short_description: Gather information about ec2 transit gateways in AWS +description: + - Gather information about ec2 transit gateways in AWS +author: "Bob Boldin (@BobBoldin)" +requirements: + - botocore + - boto3 +options: + transit_gateway_ids: + description: + - A list of transit gateway IDs to gather information for. + aliases: [transit_gateway_id] + type: list + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters. + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather info about all transit gateways +- ec2_transit_gateway_info: + +# Gather info about a particular transit gateway using filter transit gateway ID +- ec2_transit_gateway_info: + filters: + transit-gateway-id: tgw-02c42332e6b7da829 + +# Gather info about a particular transit gateway using multiple option filters +- ec2_transit_gateway_info: + filters: + options.dns-support: enable + options.vpn-ecmp-support: enable + +# Gather info about multiple transit gateways using module param +- ec2_transit_gateway_info: + transit_gateway_ids: + - tgw-02c42332e6b7da829 + - tgw-03c53443d5a8cb716 +''' + +RETURN = ''' +transit_gateways: + description: > + Transit gateways that match the provided filters. Each element consists of a dict with all the information + related to that transit gateway. + returned: on success + type: complex + contains: + creation_time: + description: The creation time. + returned: always + type: str + sample: "2019-02-05T16:19:58+00:00" + description: + description: The description of the transit gateway. + returned: always + type: str + sample: "A transit gateway" + options: + description: A dictionary of the transit gateway options. + returned: always + type: complex + contains: + amazon_side_asn: + description: + - A private Autonomous System Number (ASN) for the Amazon + side of a BGP session. The range is 64512 to 65534 for + 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs. + returned: always + type: int + sample: 64512 + auto_accept_shared_attachments: + description: + - Indicates whether attachment requests are automatically accepted. + returned: always + type: str + sample: "enable" + default_route_table_association: + description: + - Indicates whether resource attachments are automatically + associated with the default association route table. + returned: always + type: str + sample: "disable" + association_default_route_table_id: + description: + - The ID of the default association route table. + returned: when present + type: str + sample: "rtb-11223344" + default_route_table_propagation: + description: + - Indicates whether resource attachments automatically + propagate routes to the default propagation route table. + returned: always + type: str + sample: "disable" + dns_support: + description: + - Indicates whether DNS support is enabled. + returned: always + type: str + sample: "enable" + propagation_default_route_table_id: + description: + - The ID of the default propagation route table. + returned: when present + type: str + sample: "rtb-11223344" + vpn_ecmp_support: + description: + - Indicates whether Equal Cost Multipath Protocol support + is enabled. + returned: always + type: str + sample: "enable" + owner_id: + description: The AWS account number ID which owns the transit gateway. + returned: always + type: str + sample: "1234567654323" + state: + description: The state of the transit gateway. + returned: always + type: str + sample: "available" + tags: + description: A dict of tags associated with the transit gateway. + returned: always + type: dict + sample: '{ + "Name": "A sample TGW" + }' + transit_gateway_arn: + description: The Amazon Resource Name (ARN) of the transit gateway. + returned: always + type: str + sample: "arn:aws:ec2:us-west-2:1234567654323:transit-gateway/tgw-02c42332e6b7da829" + transit_gateway_id: + description: The ID of the transit gateway. + returned: always + type: str + sample: "tgw-02c42332e6b7da829" +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except Exception: + pass + # handled by imported AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + AWSRetry, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list +) + + +class AnsibleEc2TgwInfo(object): + + def __init__(self, module, results): + self._module = module + self._results = results + self._connection = self._module.client('ec2') + self._check_mode = self._module.check_mode + + if not hasattr(self._connection, 'describe_transit_gateways'): + self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52') + + @AWSRetry.exponential_backoff() + def describe_transit_gateways(self): + """ + Describe transit gateways. + + module : AnsibleAWSModule object + connection : boto3 client connection object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(self._module.params['filters']) + transit_gateway_ids = self._module.params['transit_gateway_ids'] + + # init empty list for return vars + transit_gateway_info = list() + + # Get the basic transit gateway info + try: + response = self._connection.describe_transit_gateways( + TransitGatewayIds=transit_gateway_ids, Filters=filters) + except ClientError as e: + if e.response['Error']['Code'] == 'InvalidTransitGatewayID.NotFound': + self._results['transit_gateways'] = [] + return + raise + + for transit_gateway in response['TransitGateways']: + transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags'])) + # convert tag list to ansible dict + transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', [])) + + self._results['transit_gateways'] = transit_gateway_info + return + + +def setup_module_object(): + """ + merge argument spec and create Ansible module object + :return: Ansible module object + """ + + argument_spec = dict( + transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + return module + + +def main(): + + module = setup_module_object() + + results = dict( + changed=False + ) + + tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results) + try: + tgwf_manager.describe_transit_gateways() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py new file mode 100644 index 00000000000..cba7aa66a8c --- /dev/null +++ b/ec2_vpc_egress_igw.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_egress_igw +short_description: Manage an AWS VPC Egress Only Internet gateway +description: + - Manage an AWS VPC Egress Only Internet gateway +author: Daniel Shepherd (@shepdelacreme) +options: + vpc_id: + description: + - The VPC ID for the VPC that this Egress Only Internet Gateway should be attached. + required: true + type: str + state: + description: + - Create or delete the EIGW. + default: present + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Ensure that the VPC has an Internet Gateway. +# The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc. +ec2_vpc_egress_igw: + vpc_id: vpc-abcdefgh + state: present +register: eigw + +''' + +RETURN = ''' +gateway_id: + description: The ID of the Egress Only Internet Gateway or Null. + returned: always + type: str + sample: eigw-0e00cf111ba5bc11e +vpc_id: + description: The ID of the VPC to attach or detach gateway from. + returned: always + type: str + sample: vpc-012345678 +''' + + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +def delete_eigw(module, conn, eigw_id): + """ + Delete EIGW. + + module : AnsibleModule object + conn : boto3 client connection object + eigw_id : ID of the EIGW to delete + """ + changed = False + + try: + response = conn.delete_egress_only_internet_gateway(DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id) + except botocore.exceptions.ClientError as e: + # When boto3 method is run with DryRun=True it returns an error on success + # We need to catch the error and return something valid + if e.response.get('Error', {}).get('Code') == "DryRunOperation": + changed = True + else: + module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) + + if not module.check_mode: + changed = response.get('ReturnCode', False) + + return changed + + +def create_eigw(module, conn, vpc_id): + """ + Create EIGW. + + module : AnsibleModule object + conn : boto3 client connection object + vpc_id : ID of the VPC we are operating on + """ + gateway_id = None + changed = False + + try: + response = conn.create_egress_only_internet_gateway(DryRun=module.check_mode, VpcId=vpc_id) + except botocore.exceptions.ClientError as e: + # When boto3 method is run with DryRun=True it returns an error on success + # We need to catch the error and return something valid + if e.response.get('Error', {}).get('Code') == "DryRunOperation": + changed = True + elif e.response.get('Error', {}).get('Code') == "InvalidVpcID.NotFound": + module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) + else: + module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) + + if not module.check_mode: + gateway = response.get('EgressOnlyInternetGateway', {}) + state = gateway.get('Attachments', [{}])[0].get('State') + gateway_id = gateway.get('EgressOnlyInternetGatewayId') + + if gateway_id and state in ('attached', 'attaching'): + changed = True + else: + # EIGW gave back a bad attachment state or an invalid response so we error out + module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id), + **camel_dict_to_snake_dict(response)) + + return changed, gateway_id + + +def describe_eigws(module, conn, vpc_id): + """ + Describe EIGWs. + + module : AnsibleModule object + conn : boto3 client connection object + vpc_id : ID of the VPC we are operating on + """ + gateway_id = None + + try: + response = conn.describe_egress_only_internet_gateways() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways") + + for eigw in response.get('EgressOnlyInternetGateways', []): + for attachment in eigw.get('Attachments', []): + if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'): + gateway_id = eigw.get('EgressOnlyInternetGatewayId') + + return gateway_id + + +def main(): + argument_spec = dict( + vpc_id=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client('ec2') + + vpc_id = module.params.get('vpc_id') + state = module.params.get('state') + + eigw_id = describe_eigws(module, connection, vpc_id) + + result = dict(gateway_id=eigw_id, vpc_id=vpc_id) + changed = False + + if state == 'present' and not eigw_id: + changed, result['gateway_id'] = create_eigw(module, connection, vpc_id) + elif state == 'absent' and eigw_id: + changed = delete_eigw(module, connection, eigw_id) + + module.exit_json(changed=changed, **result) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py new file mode 100644 index 00000000000..7c1f9f619ab --- /dev/null +++ b/ec2_vpc_endpoint.py @@ -0,0 +1,400 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_vpc_endpoint +short_description: Create and delete AWS VPC Endpoints. +description: + - Creates AWS VPC endpoints. + - Deletes AWS VPC endpoints. + - This module supports check mode. +requirements: [ boto3 ] +options: + vpc_id: + description: + - Required when creating a VPC endpoint. + required: false + type: str + service: + description: + - An AWS supported vpc endpoint service. Use the M(ec2_vpc_endpoint_info) + module to describe the supported endpoint services. + - Required when creating an endpoint. + required: false + type: str + policy: + description: + - A properly formatted json policy as string, see + U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813). + Cannot be used with I(policy_file). + - Option when creating an endpoint. If not provided AWS will + utilise a default policy which provides full access to the service. + required: false + type: json + policy_file: + description: + - The path to the properly json formatted policy file, see + U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) + on how to use it properly. Cannot be used with I(policy). + - Option when creating an endpoint. If not provided AWS will + utilise a default policy which provides full access to the service. + required: false + aliases: [ "policy_path" ] + type: path + state: + description: + - present to ensure resource is created. + - absent to remove resource + required: false + default: present + choices: [ "present", "absent"] + type: str + wait: + description: + - When specified, will wait for either available status for state present. + Unfortunately this is ignored for delete actions due to a difference in + behaviour from AWS. + required: false + default: no + type: bool + wait_timeout: + description: + - Used in conjunction with wait. Number of seconds to wait for status. + Unfortunately this is ignored for delete actions due to a difference in + behaviour from AWS. + required: false + default: 320 + type: int + route_table_ids: + description: + - List of one or more route table ids to attach to the endpoint. A route + is added to the route table with the destination of the endpoint if + provided. + required: false + type: list + elements: str + vpc_endpoint_id: + description: + - One or more vpc endpoint ids to remove from the AWS account + required: false + type: str + client_token: + description: + - Optional client token to ensure idempotency + required: false + type: str +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create new vpc endpoint with a json template for policy + ec2_vpc_endpoint: + state: present + region: ap-southeast-2 + vpc_id: vpc-12345678 + service: com.amazonaws.ap-southeast-2.s3 + policy: " {{ lookup( 'template', 'endpoint_policy.json.j2') }} " + route_table_ids: + - rtb-12345678 + - rtb-87654321 + register: new_vpc_endpoint + +- name: Create new vpc endpoint with the default policy + ec2_vpc_endpoint: + state: present + region: ap-southeast-2 + vpc_id: vpc-12345678 + service: com.amazonaws.ap-southeast-2.s3 + route_table_ids: + - rtb-12345678 + - rtb-87654321 + register: new_vpc_endpoint + +- name: Create new vpc endpoint with json file + ec2_vpc_endpoint: + state: present + region: ap-southeast-2 + vpc_id: vpc-12345678 + service: com.amazonaws.ap-southeast-2.s3 + policy_file: "{{ role_path }}/files/endpoint_policy.json" + route_table_ids: + - rtb-12345678 + - rtb-87654321 + register: new_vpc_endpoint + +- name: Delete newly created vpc endpoint + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}" + region: ap-southeast-2 +''' + +RETURN = ''' +endpoints: + description: The resulting endpoints from the module call + returned: success + type: list + sample: [ + { + "creation_timestamp": "2017-02-20T05:04:15+00:00", + "policy_document": { + "Id": "Policy1450910922815", + "Statement": [ + { + "Action": "s3:*", + "Effect": "Allow", + "Principal": "*", + "Resource": [ + "arn:aws:s3:::*/*", + "arn:aws:s3:::*" + ], + "Sid": "Stmt1450910920641" + } + ], + "Version": "2012-10-17" + }, + "route_table_ids": [ + "rtb-abcd1234" + ], + "service_name": "com.amazonaws.ap-southeast-2.s3", + "vpc_endpoint_id": "vpce-a1b2c3d4", + "vpc_id": "vpc-abbad0d0" + } + ] +''' + +import datetime +import json +import time +import traceback + +try: + import botocore +except ImportError: + pass # will be picked up by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, boto3_conn, ec2_argument_spec, HAS_BOTO3, + camel_dict_to_snake_dict) +from ansible.module_utils.six import string_types + + +def date_handler(obj): + return obj.isoformat() if hasattr(obj, 'isoformat') else obj + + +def wait_for_status(client, module, resource_id, status): + polling_increment_secs = 15 + max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + resource = get_endpoints(client, module, resource_id)['VpcEndpoints'][0] + if resource['State'] == status: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + return status_achieved, resource + + +def get_endpoints(client, module, resource_id=None): + params = dict() + if resource_id: + params['VpcEndpointIds'] = [resource_id] + + result = json.loads(json.dumps(client.describe_vpc_endpoints(**params), default=date_handler)) + return result + + +def setup_creation(client, module): + vpc_id = module.params.get('vpc_id') + service_name = module.params.get('service') + + if module.params.get('route_table_ids'): + route_table_ids = module.params.get('route_table_ids') + existing_endpoints = get_endpoints(client, module) + for endpoint in existing_endpoints['VpcEndpoints']: + if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: + sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) + sorted_route_table_ids = sorted(route_table_ids) + if sorted_endpoint_rt_ids == sorted_route_table_ids: + return False, camel_dict_to_snake_dict(endpoint) + + changed, result = create_vpc_endpoint(client, module) + + return changed, json.loads(json.dumps(result, default=date_handler)) + + +def create_vpc_endpoint(client, module): + params = dict() + changed = False + token_provided = False + params['VpcId'] = module.params.get('vpc_id') + params['ServiceName'] = module.params.get('service') + params['DryRun'] = module.check_mode + + if module.params.get('route_table_ids'): + params['RouteTableIds'] = module.params.get('route_table_ids') + + if module.params.get('client_token'): + token_provided = True + request_time = datetime.datetime.utcnow() + params['ClientToken'] = module.params.get('client_token') + + policy = None + if module.params.get('policy'): + try: + policy = json.loads(module.params.get('policy')) + except ValueError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + elif module.params.get('policy_file'): + try: + with open(module.params.get('policy_file'), 'r') as json_data: + policy = json.load(json_data) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + if policy: + params['PolicyDocument'] = json.dumps(policy) + + try: + changed = True + result = camel_dict_to_snake_dict(client.create_vpc_endpoint(**params)['VpcEndpoint']) + if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)): + changed = False + elif module.params.get('wait') and not module.check_mode: + status_achieved, result = wait_for_status(client, module, result['vpc_endpoint_id'], 'available') + if not status_achieved: + module.fail_json(msg='Error waiting for vpc endpoint to become available - please check the AWS console') + except botocore.exceptions.ClientError as e: + if "DryRunOperation" in e.message: + changed = True + result = 'Would have created VPC Endpoint if not in check mode' + elif "IdempotentParameterMismatch" in e.message: + module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") + elif "RouteAlreadyExists" in e.message: + module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") + else: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + return changed, result + + +def setup_removal(client, module): + params = dict() + changed = False + params['DryRun'] = module.check_mode + if isinstance(module.params.get('vpc_endpoint_id'), string_types): + params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')] + else: + params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') + try: + result = client.delete_vpc_endpoints(**params)['Unsuccessful'] + if not module.check_mode and (result != []): + module.fail_json(msg=result) + except botocore.exceptions.ClientError as e: + if "DryRunOperation" in e.message: + changed = True + result = 'Would have deleted VPC Endpoint if not in check mode' + else: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + return changed, result + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + vpc_id=dict(), + service=dict(), + policy=dict(type='json'), + policy_file=dict(type='path', aliases=['policy_path']), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + route_table_ids=dict(type='list'), + vpc_endpoint_id=dict(), + client_token=dict(), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['policy', 'policy_file']], + required_if=[ + ['state', 'present', ['vpc_id', 'service']], + ['state', 'absent', ['vpc_endpoint_id']], + ] + ) + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore and boto3 are required for this module') + + state = module.params.get('state') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + except NameError as e: + # Getting around the get_aws_connection_info boto reliance for region + if "global name 'boto' is not defined" in e.message: + module.params['region'] = botocore.session.get_session().get_config_variable('region') + if not module.params['region']: + module.fail_json(msg="Error - no region provided") + else: + module.fail_json(msg="Can't retrieve connection information - " + str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Failed to connect to AWS due to wrong or missing credentials: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + # Ensure resource is present + if state == 'present': + (changed, results) = setup_creation(ec2, module) + else: + (changed, results) = setup_removal(ec2, module) + + module.exit_json(changed=changed, result=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_endpoint_facts.py b/ec2_vpc_endpoint_facts.py new file mode 120000 index 00000000000..d2a144a7b86 --- /dev/null +++ b/ec2_vpc_endpoint_facts.py @@ -0,0 +1 @@ +ec2_vpc_endpoint_info.py \ No newline at end of file diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py new file mode 100644 index 00000000000..d82a3b8faf6 --- /dev/null +++ b/ec2_vpc_endpoint_info.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: ec2_vpc_endpoint_info +short_description: Retrieves AWS VPC endpoints details using AWS methods. +description: + - Gets various details related to AWS VPC Endpoints. + - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +options: + query: + description: + - Specifies the query action to take. Services returns the supported + AWS services that can be specified when creating an endpoint. + required: True + choices: + - services + - endpoints + type: str + vpc_endpoint_ids: + description: + - Get details of specific endpoint IDs + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html) + for possible filters. + type: dict +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Simple example of listing all support AWS services for VPC endpoints +- name: List supported AWS endpoint services + ec2_vpc_endpoint_info: + query: services + region: ap-southeast-2 + register: supported_endpoint_services + +- name: Get all endpoints in ap-southeast-2 region + ec2_vpc_endpoint_info: + query: endpoints + region: ap-southeast-2 + register: existing_endpoints + +- name: Get all endpoints with specific filters + ec2_vpc_endpoint_info: + query: endpoints + region: ap-southeast-2 + filters: + vpc-id: + - vpc-12345678 + - vpc-87654321 + vpc-endpoint-state: + - available + - pending + register: existing_endpoints + +- name: Get details on specific endpoint + ec2_vpc_endpoint_info: + query: endpoints + region: ap-southeast-2 + vpc_endpoint_ids: + - vpce-12345678 + register: endpoint_details +''' + +RETURN = ''' +service_names: + description: AWS VPC endpoint service names + returned: I(query) is C(services) + type: list + sample: + service_names: + - com.amazonaws.ap-southeast-2.s3 +vpc_endpoints: + description: + - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp, + policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id. + returned: I(query) is C(endpoints) + type: list + sample: + vpc_endpoints: + - creation_timestamp: "2017-02-16T11:06:48+00:00" + policy_document: > + "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\", + \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\", + \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}" + route_table_ids: + - rtb-abcd1234 + service_name: "com.amazonaws.ap-southeast-2.s3" + state: "available" + vpc_endpoint_id: "vpce-abbad0d0" + vpc_id: "vpc-1111ffff" +''' + +import json + +try: + import botocore +except ImportError: + pass # will be picked up from imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, boto3_conn, get_aws_connection_info, + ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict, AWSRetry) + + +def date_handler(obj): + return obj.isoformat() if hasattr(obj, 'isoformat') else obj + + +@AWSRetry.exponential_backoff() +def get_supported_services(client, module): + results = list() + params = dict() + while True: + response = client.describe_vpc_endpoint_services(**params) + results.extend(response['ServiceNames']) + if 'NextToken' in response: + params['NextToken'] = response['NextToken'] + else: + break + return dict(service_names=results) + + +@AWSRetry.exponential_backoff() +def get_endpoints(client, module): + results = list() + params = dict() + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + if module.params.get('vpc_endpoint_ids'): + params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') + while True: + response = client.describe_vpc_endpoints(**params) + results.extend(response['VpcEndpoints']) + if 'NextToken' in response: + params['NextToken'] = response['NextToken'] + else: + break + try: + results = json.loads(json.dumps(results, default=date_handler)) + except Exception as e: + module.fail_json(msg=str(e.message)) + return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + query=dict(choices=['services', 'endpoints'], required=True), + filters=dict(default={}, type='dict'), + vpc_endpoint_ids=dict(type='list'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vpc_endpoint_facts': + module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", version='2.13') + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore and boto3 are required.') + + try: + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=str(e)) + + invocations = { + 'services': get_supported_services, + 'endpoints': get_endpoints, + } + results = invocations[module.params.get('query')](connection, module) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py new file mode 100644 index 00000000000..76973a6e341 --- /dev/null +++ b/ec2_vpc_igw.py @@ -0,0 +1,282 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_igw +short_description: Manage an AWS VPC Internet gateway +description: + - Manage an AWS VPC Internet gateway +author: Robert Estelle (@erydo) +options: + vpc_id: + description: + - The VPC ID for the VPC in which to manage the Internet Gateway. + required: true + type: str + tags: + description: + - "A dict of tags to apply to the internet gateway. Any tags currently applied to the internet gateway and not present here will be removed." + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - Create or terminate the IGW + default: present + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - botocore + - boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Ensure that the VPC has an Internet Gateway. +# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc. +ec2_vpc_igw: + vpc_id: vpc-abcdefgh + state: present +register: igw + +''' + +RETURN = ''' +changed: + description: If any changes have been made to the Internet Gateway. + type: bool + returned: always + sample: + changed: false +gateway_id: + description: The unique identifier for the Internet Gateway. + type: str + returned: I(state=present) + sample: + gateway_id: "igw-XXXXXXXX" +tags: + description: The tags associated the Internet Gateway. + type: dict + returned: I(state=present) + sample: + tags: + "Ansible": "Test" +vpc_id: + description: The VPC ID associated with the Internet Gateway. + type: str + returned: I(state=present) + sample: + vpc_id: "vpc-XXXXXXXX" +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + AWSRetry, + camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_filter_list, + ansible_dict_to_boto3_tag_list, + compare_aws_tags +) +from ansible.module_utils.six import string_types + + +class AnsibleEc2Igw(object): + + def __init__(self, module, results): + self._module = module + self._results = results + self._connection = self._module.client('ec2') + self._check_mode = self._module.check_mode + + def process(self): + vpc_id = self._module.params.get('vpc_id') + state = self._module.params.get('state', 'present') + tags = self._module.params.get('tags') + + if state == 'present': + self.ensure_igw_present(vpc_id, tags) + elif state == 'absent': + self.ensure_igw_absent(vpc_id) + + def get_matching_igw(self, vpc_id): + filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + igws = [] + try: + response = self._connection.describe_internet_gateways(Filters=filters) + igws = response.get('InternetGateways', []) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e) + + igw = None + if len(igws) > 1: + self._module.fail_json( + msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id)) + elif igws: + igw = camel_dict_to_snake_dict(igws[0]) + + return igw + + def check_input_tags(self, tags): + nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)] + if nonstring_tags: + self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags)) + + def ensure_tags(self, igw_id, tags, add_only): + final_tags = [] + + filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'}) + cur_tags = None + try: + cur_tags = self._connection.describe_tags(Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't describe tags") + + purge_tags = bool(not add_only) + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) + + if to_update: + try: + if self._check_mode: + # update tags + final_tags.update(to_update) + else: + AWSRetry.exponential_backoff()(self._connection.create_tags)( + Resources=[igw_id], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + + self._results['changed'] = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't create tags") + + if to_delete: + try: + if self._check_mode: + # update tags + for key in to_delete: + del final_tags[key] + else: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list) + + self._results['changed'] = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't delete tags") + + if not self._check_mode and (to_update or to_delete): + try: + response = self._connection.describe_tags(Filters=filters) + final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Couldn't describe tags") + + return final_tags + + @staticmethod + def get_igw_info(igw): + return { + 'gateway_id': igw['internet_gateway_id'], + 'tags': igw['tags'], + 'vpc_id': igw['vpc_id'] + } + + def ensure_igw_absent(self, vpc_id): + igw = self.get_matching_igw(vpc_id) + if igw is None: + return self._results + + if self._check_mode: + self._results['changed'] = True + return self._results + + try: + self._results['changed'] = True + self._connection.detach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) + self._connection.delete_internet_gateway(InternetGatewayId=igw['internet_gateway_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway") + + return self._results + + def ensure_igw_present(self, vpc_id, tags): + self.check_input_tags(tags) + + igw = self.get_matching_igw(vpc_id) + + if igw is None: + if self._check_mode: + self._results['changed'] = True + self._results['gateway_id'] = None + return self._results + + try: + response = self._connection.create_internet_gateway() + + # Ensure the gateway exists before trying to attach it or add tags + waiter = get_waiter(self._connection, 'internet_gateway_exists') + waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']]) + + igw = camel_dict_to_snake_dict(response['InternetGateway']) + self._connection.attach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) + self._results['changed'] = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') + + igw['vpc_id'] = vpc_id + + igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, add_only=False) + + igw_info = self.get_igw_info(igw) + self._results.update(igw_info) + + return self._results + + +def main(): + argument_spec = dict( + vpc_id=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(default=dict(), required=False, type='dict', aliases=['resource_tags']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + results = dict( + changed=False + ) + igw_manager = AnsibleEc2Igw(module=module, results=results) + igw_manager.process() + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_igw_facts.py b/ec2_vpc_igw_facts.py new file mode 120000 index 00000000000..b3eeb3fee6e --- /dev/null +++ b/ec2_vpc_igw_facts.py @@ -0,0 +1 @@ +ec2_vpc_igw_info.py \ No newline at end of file diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py new file mode 100644 index 00000000000..097072c3d5c --- /dev/null +++ b/ec2_vpc_igw_info.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_igw_info +short_description: Gather information about internet gateways in AWS +description: + - Gather information about internet gateways in AWS. + - This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: "Nick Aslanidis (@naslanidis)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters. + type: dict + internet_gateway_ids: + description: + - Get details of specific Internet Gateway ID. Provide this value as a list. + type: list + elements: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all Internet Gateways for an account or profile + ec2_vpc_igw_info: + region: ap-southeast-2 + profile: production + register: igw_info + +- name: Gather information about a filtered list of Internet Gateways + ec2_vpc_igw_info: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "igw-123" + register: igw_info + +- name: Gather information about a specific internet gateway by InternetGatewayId + ec2_vpc_igw_info: + region: ap-southeast-2 + profile: production + internet_gateway_ids: igw-c1231234 + register: igw_info +''' + +RETURN = ''' +internet_gateways: + description: The internet gateways for the account. + returned: always + type: list + sample: [ + { + "attachments": [ + { + "state": "available", + "vpc_id": "vpc-02123b67" + } + ], + "internet_gateway_id": "igw-2123634d", + "tags": [ + { + "key": "Name", + "value": "test-vpc-20-igw" + } + ] + } + ] + +changed: + description: True if listing the internet gateways succeeds. + type: bool + returned: always + sample: "false" +''' + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, + camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3) + + +def get_internet_gateway_info(internet_gateway): + internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'], + 'Attachments': internet_gateway['Attachments'], + 'Tags': internet_gateway['Tags']} + return internet_gateway_info + + +def list_internet_gateways(client, module): + params = dict() + + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + + if module.params.get("internet_gateway_ids"): + params['InternetGatewayIds'] = module.params.get("internet_gateway_ids") + + try: + all_internet_gateways = client.describe_internet_gateways(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + return [camel_dict_to_snake_dict(get_internet_gateway_info(igw)) + for igw in all_internet_gateways['InternetGateways']] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(type='dict', default=dict()), + internet_gateway_ids=dict(type='list', default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vpc_igw_facts': + module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", version='2.13') + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore and boto3 are required.') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - " + str(e)) + + # call your function here + results = list_internet_gateways(connection, module) + + module.exit_json(internet_gateways=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py new file mode 100644 index 00000000000..f7c8be76b44 --- /dev/null +++ b/ec2_vpc_nacl.py @@ -0,0 +1,633 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_vpc_nacl +short_description: create and delete Network ACLs. +description: + - Read the AWS documentation for Network ACLS + U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +options: + name: + description: + - Tagged name identifying a network ACL. + - One and only one of the I(name) or I(nacl_id) is required. + required: false + type: str + nacl_id: + description: + - NACL id identifying a network ACL. + - One and only one of the I(name) or I(nacl_id) is required. + required: false + type: str + vpc_id: + description: + - VPC id of the requesting VPC. + - Required when state present. + required: false + type: str + subnets: + description: + - The list of subnets that should be associated with the network ACL. + - Must be specified as a list + - Each subnet can be specified as subnet ID, or its tagged name. + required: false + type: list + egress: + description: + - A list of rules for outgoing traffic. Each rule must be specified as a list. + Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']), + the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny, + the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for + TCP or UDP protocols, and the first port in the range for TCP or UDP protocols. + See examples. + default: [] + required: false + type: list + ingress: + description: + - List of rules for incoming traffic. Each rule must be specified as a list. + Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']), + the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny, + the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for + TCP or UDP protocols, and the first port in the range for TCP or UDP protocols. + See examples. + default: [] + required: false + type: list + tags: + description: + - Dictionary of tags to look for and apply when creating a network ACL. + required: false + type: dict + state: + description: + - Creates or modifies an existing NACL + - Deletes a NACL and reassociates subnets to the default NACL + required: false + type: str + choices: ['present', 'absent'] + default: present +author: Mike Mochan (@mmochan) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ botocore, boto3, json ] +''' + +EXAMPLES = ''' + +# Complete example to create and delete a network ACL +# that allows SSH, HTTP and ICMP in, and all traffic out. +- name: "Create and associate production DMZ network ACL with DMZ subnets" + ec2_vpc_nacl: + vpc_id: vpc-12345678 + name: prod-dmz-nacl + region: ap-southeast-2 + subnets: ['prod-dmz-1', 'prod-dmz-2'] + tags: + CostCode: CC1234 + Project: phoenix + Description: production DMZ + ingress: + # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code, + # port from, port to + - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22] + - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80] + - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8] + egress: + - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] + state: 'present' + +- name: "Remove the ingress and egress rules - defaults to deny all" + ec2_vpc_nacl: + vpc_id: vpc-12345678 + name: prod-dmz-nacl + region: ap-southeast-2 + subnets: + - prod-dmz-1 + - prod-dmz-2 + tags: + CostCode: CC1234 + Project: phoenix + Description: production DMZ + state: present + +- name: "Remove the NACL subnet associations and tags" + ec2_vpc_nacl: + vpc_id: 'vpc-12345678' + name: prod-dmz-nacl + region: ap-southeast-2 + state: present + +- name: "Delete nacl and subnet associations" + ec2_vpc_nacl: + vpc_id: vpc-12345678 + name: prod-dmz-nacl + state: absent + +- name: "Delete nacl by its id" + ec2_vpc_nacl: + nacl_id: acl-33b4ee5b + state: absent +''' +RETURN = ''' +task: + description: The result of the create, or delete action. + returned: success + type: dict +nacl_id: + description: The id of the NACL (when creating or updating an ACL) + returned: success + type: str + sample: acl-123456789abcdef01 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry + +# VPC-supported IANA protocol numbers +# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, } + + +# Utility methods +def icmp_present(entry): + if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1: + return True + + +def load_tags(module): + tags = [] + if module.params.get('tags'): + for name, value in module.params.get('tags').items(): + tags.append({'Key': name, 'Value': str(value)}) + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + else: + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + return tags + + +def subnets_removed(nacl_id, subnets, client, module): + results = find_acl_by_id(nacl_id, client, module) + associations = results['NetworkAcls'][0]['Associations'] + subnet_ids = [assoc['SubnetId'] for assoc in associations] + return [subnet for subnet in subnet_ids if subnet not in subnets] + + +def subnets_added(nacl_id, subnets, client, module): + results = find_acl_by_id(nacl_id, client, module) + associations = results['NetworkAcls'][0]['Associations'] + subnet_ids = [assoc['SubnetId'] for assoc in associations] + return [subnet for subnet in subnets if subnet not in subnet_ids] + + +def subnets_changed(nacl, client, module): + changed = False + vpc_id = module.params.get('vpc_id') + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + subnets = subnets_to_associate(nacl, client, module) + if not subnets: + default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] + subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module) + if subnets: + replace_network_acl_association(default_nacl_id, subnets, client, module) + changed = True + return changed + changed = False + return changed + subs_added = subnets_added(nacl_id, subnets, client, module) + if subs_added: + replace_network_acl_association(nacl_id, subs_added, client, module) + changed = True + subs_removed = subnets_removed(nacl_id, subnets, client, module) + if subs_removed: + default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] + replace_network_acl_association(default_nacl_id, subs_removed, client, module) + changed = True + return changed + + +def nacls_changed(nacl, client, module): + changed = False + params = dict() + params['egress'] = module.params.get('egress') + params['ingress'] = module.params.get('ingress') + + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl = describe_network_acl(client, module) + entries = nacl['NetworkAcls'][0]['Entries'] + egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767] + ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767] + if rules_changed(egress, params['egress'], True, nacl_id, client, module): + changed = True + if rules_changed(ingress, params['ingress'], False, nacl_id, client, module): + changed = True + return changed + + +def tags_changed(nacl_id, client, module): + changed = False + tags = dict() + if module.params.get('tags'): + tags = module.params.get('tags') + if module.params.get('name') and not tags.get('Name'): + tags['Name'] = module.params['name'] + nacl = find_acl_by_id(nacl_id, client, module) + if nacl['NetworkAcls']: + nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']] + nacl_tags = [item for sublist in nacl_values for item in sublist] + tag_values = [[key, str(value)] for key, value in tags.items()] + tags = [item for sublist in tag_values for item in sublist] + if sorted(nacl_tags) == sorted(tags): + changed = False + return changed + else: + delete_tags(nacl_id, client, module) + create_tags(nacl_id, client, module) + changed = True + return changed + return changed + + +def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): + changed = False + rules = list() + for entry in param_rules: + rules.append(process_rule_entry(entry, Egress)) + if rules == aws_rules: + return changed + else: + removed_rules = [x for x in aws_rules if x not in rules] + if removed_rules: + params = dict() + for rule in removed_rules: + params['NetworkAclId'] = nacl_id + params['RuleNumber'] = rule['RuleNumber'] + params['Egress'] = Egress + delete_network_acl_entry(params, client, module) + changed = True + added_rules = [x for x in rules if x not in aws_rules] + if added_rules: + for rule in added_rules: + rule['NetworkAclId'] = nacl_id + create_network_acl_entry(rule, client, module) + changed = True + return changed + + +def process_rule_entry(entry, Egress): + params = dict() + params['RuleNumber'] = entry[0] + params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) + params['RuleAction'] = entry[2] + params['Egress'] = Egress + params['CidrBlock'] = entry[3] + if icmp_present(entry): + params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} + else: + if entry[6] or entry[7]: + params['PortRange'] = {"From": entry[6], 'To': entry[7]} + return params + + +def restore_default_associations(assoc_ids, default_nacl_id, client, module): + if assoc_ids: + params = dict() + params['NetworkAclId'] = default_nacl_id[0] + for assoc_id in assoc_ids: + params['AssociationId'] = assoc_id + restore_default_acl_association(params, client, module) + return True + + +def construct_acl_entries(nacl, client, module): + for entry in module.params.get('ingress'): + params = process_rule_entry(entry, Egress=False) + params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + create_network_acl_entry(params, client, module) + for rule in module.params.get('egress'): + params = process_rule_entry(rule, Egress=True) + params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + create_network_acl_entry(params, client, module) + + +# Module invocations +def setup_network_acl(client, module): + changed = False + nacl = describe_network_acl(client, module) + if not nacl['NetworkAcls']: + nacl = create_network_acl(module.params.get('vpc_id'), client, module) + nacl_id = nacl['NetworkAcl']['NetworkAclId'] + create_tags(nacl_id, client, module) + subnets = subnets_to_associate(nacl, client, module) + replace_network_acl_association(nacl_id, subnets, client, module) + construct_acl_entries(nacl, client, module) + changed = True + return(changed, nacl['NetworkAcl']['NetworkAclId']) + else: + changed = False + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + changed |= subnets_changed(nacl, client, module) + changed |= nacls_changed(nacl, client, module) + changed |= tags_changed(nacl_id, client, module) + return (changed, nacl_id) + + +def remove_network_acl(client, module): + changed = False + result = dict() + nacl = describe_network_acl(client, module) + if nacl['NetworkAcls']: + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + vpc_id = nacl['NetworkAcls'][0]['VpcId'] + associations = nacl['NetworkAcls'][0]['Associations'] + assoc_ids = [a['NetworkAclAssociationId'] for a in associations] + default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) + if not default_nacl_id: + result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} + return changed, result + if restore_default_associations(assoc_ids, default_nacl_id, client, module): + delete_network_acl(nacl_id, client, module) + changed = True + result[nacl_id] = "Successfully deleted" + return changed, result + if not assoc_ids: + delete_network_acl(nacl_id, client, module) + changed = True + result[nacl_id] = "Successfully deleted" + return changed, result + return changed, result + + +# Boto3 client methods +@AWSRetry.jittered_backoff() +def _create_network_acl(client, *args, **kwargs): + return client.create_network_acl(*args, **kwargs) + + +def create_network_acl(vpc_id, client, module): + try: + if module.check_mode: + nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) + else: + nacl = _create_network_acl(client, VpcId=vpc_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + return nacl + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _create_network_acl_entry(client, *args, **kwargs): + return client.create_network_acl_entry(*args, **kwargs) + + +def create_network_acl_entry(params, client, module): + try: + if not module.check_mode: + _create_network_acl_entry(client, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _create_tags(client, *args, **kwargs): + return client.create_tags(*args, **kwargs) + + +def create_tags(nacl_id, client, module): + try: + delete_tags(nacl_id, client, module) + if not module.check_mode: + _create_tags(client, Resources=[nacl_id], Tags=load_tags(module)) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff() +def _delete_network_acl(client, *args, **kwargs): + return client.delete_network_acl(*args, **kwargs) + + +def delete_network_acl(nacl_id, client, module): + try: + if not module.check_mode: + _delete_network_acl(client, NetworkAclId=nacl_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _delete_network_acl_entry(client, *args, **kwargs): + return client.delete_network_acl_entry(*args, **kwargs) + + +def delete_network_acl_entry(params, client, module): + try: + if not module.check_mode: + _delete_network_acl_entry(client, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _delete_tags(client, *args, **kwargs): + return client.delete_tags(*args, **kwargs) + + +def delete_tags(nacl_id, client, module): + try: + if not module.check_mode: + _delete_tags(client, Resources=[nacl_id]) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff() +def _describe_network_acls(client, **kwargs): + return client.describe_network_acls(**kwargs) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _describe_network_acls_retry_missing(client, **kwargs): + return client.describe_network_acls(**kwargs) + + +def describe_acl_associations(subnets, client, module): + if not subnets: + return [] + try: + results = _describe_network_acls_retry_missing(client, Filters=[ + {'Name': 'association.subnet-id', 'Values': subnets} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + associations = results['NetworkAcls'][0]['Associations'] + return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets] + + +def describe_network_acl(client, module): + try: + if module.params.get('nacl_id'): + nacl = _describe_network_acls(client, Filters=[ + {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]} + ]) + else: + nacl = _describe_network_acls(client, Filters=[ + {'Name': 'tag:Name', 'Values': [module.params.get('name')]} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + return nacl + + +def find_acl_by_id(nacl_id, client, module): + try: + return _describe_network_acls_retry_missing(client, NetworkAclIds=[nacl_id]) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +def find_default_vpc_nacl(vpc_id, client, module): + try: + response = _describe_network_acls_retry_missing(client, Filters=[ + {'Name': 'vpc-id', 'Values': [vpc_id]}]) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + nacls = response['NetworkAcls'] + return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True] + + +def find_subnet_ids_by_nacl_id(nacl_id, client, module): + try: + results = _describe_network_acls_retry_missing(client, Filters=[ + {'Name': 'association.network-acl-id', 'Values': [nacl_id]} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + if results['NetworkAcls']: + associations = results['NetworkAcls'][0]['Associations'] + return [s['SubnetId'] for s in associations if s['SubnetId']] + else: + return [] + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _replace_network_acl_association(client, *args, **kwargs): + return client.replace_network_acl_association(*args, **kwargs) + + +def replace_network_acl_association(nacl_id, subnets, client, module): + params = dict() + params['NetworkAclId'] = nacl_id + for association in describe_acl_associations(subnets, client, module): + params['AssociationId'] = association + try: + if not module.check_mode: + _replace_network_acl_association(client, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _replace_network_acl_entry(client, *args, **kwargs): + return client.replace_network_acl_entry(*args, **kwargs) + + +def replace_network_acl_entry(entries, Egress, nacl_id, client, module): + for entry in entries: + params = entry + params['NetworkAclId'] = nacl_id + try: + if not module.check_mode: + _replace_network_acl_entry(client, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +def _replace_network_acl_association(client, *args, **kwargs): + return client.replace_network_acl_association(*args, **kwargs) + + +def restore_default_acl_association(params, client, module): + try: + if not module.check_mode: + _replace_network_acl_association(client, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff() +def _describe_subnets(client, *args, **kwargs): + return client.describe_subnets(*args, **kwargs) + + +def subnets_to_associate(nacl, client, module): + params = list(module.params.get('subnets')) + if not params: + return [] + all_found = [] + if any(x.startswith("subnet-") for x in params): + try: + subnets = _describe_subnets(client, Filters=[ + {'Name': 'subnet-id', 'Values': params}]) + all_found.extend(subnets.get('Subnets', [])) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + if len(params) != len(all_found): + try: + subnets = _describe_subnets(client, Filters=[ + {'Name': 'tag:Name', 'Values': params}]) + all_found.extend(subnets.get('Subnets', [])) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId'))) + + +def main(): + argument_spec = dict( + vpc_id=dict(), + name=dict(), + nacl_id=dict(), + subnets=dict(required=False, type='list', default=list()), + tags=dict(required=False, type='dict'), + ingress=dict(required=False, type='list', default=list()), + egress=dict(required=False, type='list', default=list()), + state=dict(default='present', choices=['present', 'absent']), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'nacl_id']], + required_if=[['state', 'present', ['vpc_id']]]) + + state = module.params.get('state').lower() + + client = module.client('ec2') + + invocations = { + "present": setup_network_acl, + "absent": remove_network_acl + } + (changed, results) = invocations[state](client, module) + module.exit_json(changed=changed, nacl_id=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_nacl_facts.py b/ec2_vpc_nacl_facts.py new file mode 120000 index 00000000000..a88962d88f4 --- /dev/null +++ b/ec2_vpc_nacl_facts.py @@ -0,0 +1 @@ +ec2_vpc_nacl_info.py \ No newline at end of file diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py new file mode 100644 index 00000000000..9db13f104d2 --- /dev/null +++ b/ec2_vpc_nacl_info.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_nacl_info +short_description: Gather information about Network ACLs in an AWS VPC +description: + - Gather information about Network ACLs in an AWS VPC + - This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change. +author: "Brad Davidson (@brandond)" +requirements: [ boto3 ] +options: + nacl_ids: + description: + - A list of Network ACL IDs to retrieve information about. + required: false + default: [] + aliases: [nacl_id] + type: list + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter + names and values are case sensitive. + required: false + default: {} + type: dict +notes: + - By default, the module will return all Network ACLs. + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all Network ACLs: +- name: Get All NACLs + register: all_nacls + ec2_vpc_nacl_info: + region: us-west-2 + +# Retrieve default Network ACLs: +- name: Get Default NACLs + register: default_nacls + ec2_vpc_nacl_info: + region: us-west-2 + filters: + 'default': 'true' +''' + +RETURN = ''' +nacls: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + nacl_id: + description: The ID of the Network Access Control List. + returned: always + type: str + vpc_id: + description: The ID of the VPC that the NACL is attached to. + returned: always + type: str + is_default: + description: True if the NACL is the default for its VPC. + returned: always + type: bool + tags: + description: A dict of tags associated with the NACL. + returned: always + type: dict + subnets: + description: A list of subnet IDs that are associated with the NACL. + returned: always + type: list + elements: str + ingress: + description: + - A list of NACL ingress rules with the following format. + - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])" + returned: always + type: list + elements: list + sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]] + egress: + description: + - A list of NACL egress rules with the following format. + - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])" + returned: always + type: list + elements: list + sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]] +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list, + camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict) + + +# VPC-supported IANA protocol numbers +# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'} + + +def list_ec2_vpc_nacls(connection, module): + + nacl_ids = module.params.get("nacl_ids") + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + if nacl_ids is None: + nacl_ids = [] + + try: + nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters) + except ClientError as e: + if e.response['Error']['Code'] == 'InvalidNetworkAclID.NotFound': + module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist') + module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) + except BotoCoreError as e: + module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_nacls = [] + for nacl in nacls['NetworkAcls']: + snaked_nacls.append(camel_dict_to_snake_dict(nacl)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for nacl in snaked_nacls: + if 'tags' in nacl: + nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value') + if 'entries' in nacl: + nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] + if entry['rule_number'] < 32767 and entry['egress']] + nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] + if entry['rule_number'] < 32767 and not entry['egress']] + del nacl['entries'] + if 'associations' in nacl: + nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']] + del nacl['associations'] + if 'network_acl_id' in nacl: + nacl['nacl_id'] = nacl['network_acl_id'] + del nacl['network_acl_id'] + + module.exit_json(nacls=snaked_nacls) + + +def nacl_entry_to_list(entry): + + # entry list format + # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to] + elist = [] + + elist.append(entry['rule_number']) + + if entry.get('protocol') in PROTOCOL_NAMES: + elist.append(PROTOCOL_NAMES[entry['protocol']]) + else: + elist.append(entry.get('protocol')) + + elist.append(entry['rule_action']) + + if entry.get('cidr_block'): + elist.append(entry['cidr_block']) + elif entry.get('ipv6_cidr_block'): + elist.append(entry['ipv6_cidr_block']) + else: + elist.append(None) + + elist = elist + [None, None, None, None] + + if entry['protocol'] in ('1', '58'): + elist[4] = entry.get('icmp_type_code', {}).get('type') + elist[5] = entry.get('icmp_type_code', {}).get('code') + + if entry['protocol'] not in ('1', '6', '17', '58'): + elist[6] = 0 + elist[7] = 65535 + elif 'port_range' in entry: + elist[6] = entry['port_range']['from'] + elist[7] = entry['port_range']['to'] + + return elist + + +def main(): + + argument_spec = dict( + nacl_ids=dict(default=[], type='list', aliases=['nacl_id']), + filters=dict(default={}, type='dict')) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vpc_nacl_facts': + module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", version='2.13') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_vpc_nacls(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py new file mode 100644 index 00000000000..2e35459d438 --- /dev/null +++ b/ec2_vpc_nat_gateway.py @@ -0,0 +1,1020 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_nat_gateway +short_description: Manage AWS VPC NAT Gateways. +description: + - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids. +requirements: [boto3, botocore] +options: + state: + description: + - Ensure NAT Gateway is present or absent. + default: "present" + choices: ["present", "absent"] + type: str + nat_gateway_id: + description: + - The id AWS dynamically allocates to the NAT Gateway on creation. + This is required when the absent option is present. + type: str + subnet_id: + description: + - The id of the subnet to create the NAT Gateway in. This is required + with the present option. + type: str + allocation_id: + description: + - The id of the elastic IP allocation. If this is not passed and the + eip_address is not passed. An EIP is generated for this NAT Gateway. + type: str + eip_address: + description: + - The elastic IP address of the EIP you want attached to this NAT Gateway. + If this is not passed and the allocation_id is not passed, + an EIP is generated for this NAT Gateway. + type: str + if_exist_do_not_create: + description: + - if a NAT Gateway exists already in the subnet_id, then do not create a new one. + required: false + default: false + type: bool + release_eip: + description: + - Deallocate the EIP from the VPC. + - Option is only valid with the absent state. + - You should use this with the wait option. Since you can not release an address while a delete operation is happening. + default: false + type: bool + wait: + description: + - Wait for operation to complete before returning. + default: false + type: bool + wait_timeout: + description: + - How many seconds to wait for an operation to complete before timing out. + default: 320 + type: int + client_token: + description: + - Optional unique token to be used during create to ensure idempotency. + When specifying this option, ensure you specify the eip_address parameter + as well otherwise any subsequent runs will fail. + type: str +author: + - Allen Sanabria (@linuxdynasty) + - Jon Hadfield (@jonhadfield) + - Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create new nat gateway with client token. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + eip_address: 52.1.1.1 + region: ap-southeast-2 + client_token: abcd-12345678 + register: new_nat_gateway + +- name: Create new nat gateway using an allocation-id. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway, using an EIP address and wait for available status. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + eip_address: 52.1.1.1 + wait: true + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway and allocate new EIP. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + wait: true + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + wait: true + region: ap-southeast-2 + if_exist_do_not_create: true + register: new_nat_gateway + +- name: Delete nat gateway using discovered nat gateways from facts module. + ec2_vpc_nat_gateway: + state: absent + region: ap-southeast-2 + wait: true + nat_gateway_id: "{{ item.NatGatewayId }}" + release_eip: true + register: delete_nat_gateway_result + loop: "{{ gateways_to_remove.result }}" + +- name: Delete nat gateway and wait for deleted status. + ec2_vpc_nat_gateway: + state: absent + nat_gateway_id: nat-12345678 + wait: true + wait_timeout: 500 + region: ap-southeast-2 + +- name: Delete nat gateway and release EIP. + ec2_vpc_nat_gateway: + state: absent + nat_gateway_id: nat-12345678 + release_eip: true + wait: yes + wait_timeout: 300 + region: ap-southeast-2 +''' + +RETURN = ''' +create_time: + description: The ISO 8601 date time format in UTC. + returned: In all cases. + type: str + sample: "2016-03-05T05:19:20.282000+00:00'" +nat_gateway_id: + description: id of the VPC NAT Gateway + returned: In all cases. + type: str + sample: "nat-0d1e3a878585988f8" +subnet_id: + description: id of the Subnet + returned: In all cases. + type: str + sample: "subnet-12345" +state: + description: The current state of the NAT Gateway. + returned: In all cases. + type: str + sample: "available" +vpc_id: + description: id of the VPC. + returned: In all cases. + type: str + sample: "vpc-12345" +nat_gateway_addresses: + description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id. + returned: In all cases. + type: str + sample: [ + { + 'public_ip': '52.52.52.52', + 'network_interface_id': 'eni-12345', + 'private_ip': '10.0.0.100', + 'allocation_id': 'eipalloc-12345' + } + ] +''' + +import datetime +import random +import time + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, + camel_dict_to_snake_dict, HAS_BOTO3) + + +DRY_RUN_GATEWAYS = [ + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-123456789", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "available", + "create_time": "2016-03-05T05:19:20.282000+00:00", + "vpc_id": "vpc-12345678" + } +] + +DRY_RUN_ALLOCATION_UNCONVERTED = { + 'Addresses': [ + { + 'PublicIp': '55.55.55.55', + 'Domain': 'vpc', + 'AllocationId': 'eipalloc-1234567' + } + ] +} + +DRY_RUN_MSGS = 'DryRun Mode:' + + +def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, + states=None, check_mode=False): + """Retrieve a list of NAT Gateways + Args: + client (botocore.client.EC2): Boto3 client + + Kwargs: + subnet_id (str): The subnet_id the nat resides in. + nat_gateway_id (str): The Amazon nat id. + states (list): States available (pending, failed, available, deleting, and deleted) + default=None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-12345678' + >>> get_nat_gateways(client, subnet_id) + [ + true, + "", + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-123456789", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-12345678" + } + + Returns: + Tuple (bool, str, list) + """ + params = dict() + err_msg = "" + gateways_retrieved = False + existing_gateways = list() + if not states: + states = ['available', 'pending'] + if nat_gateway_id: + params['NatGatewayIds'] = [nat_gateway_id] + else: + params['Filter'] = [ + { + 'Name': 'subnet-id', + 'Values': [subnet_id] + }, + { + 'Name': 'state', + 'Values': states + } + ] + + try: + if not check_mode: + gateways = client.describe_nat_gateways(**params)['NatGateways'] + if gateways: + for gw in gateways: + existing_gateways.append(camel_dict_to_snake_dict(gw)) + gateways_retrieved = True + else: + gateways_retrieved = True + if nat_gateway_id: + if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id: + existing_gateways = DRY_RUN_GATEWAYS + elif subnet_id: + if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id: + existing_gateways = DRY_RUN_GATEWAYS + err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return gateways_retrieved, err_msg, existing_gateways + + +def wait_for_status(client, wait_timeout, nat_gateway_id, status, + check_mode=False): + """Wait for the NAT Gateway to reach a status + Args: + client (botocore.client.EC2): Boto3 client + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + nat_gateway_id (str): The Amazon nat id. + status (str): The status to wait for. + examples. status=available, status=deleted + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-12345678' + >>> allocation_id = 'eipalloc-12345678' + >>> wait_for_status(client, subnet_id, allocation_id) + [ + true, + "", + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-1234567", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-12345678" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-12345677" + } + ] + + Returns: + Tuple (bool, str, dict) + """ + polling_increment_secs = 5 + wait_timeout = time.time() + wait_timeout + status_achieved = False + nat_gateway = dict() + states = ['pending', 'failed', 'available', 'deleting', 'deleted'] + err_msg = "" + + while wait_timeout > time.time(): + try: + gws_retrieved, err_msg, nat_gateways = ( + get_nat_gateways( + client, nat_gateway_id=nat_gateway_id, + states=states, check_mode=check_mode + ) + ) + if gws_retrieved and nat_gateways: + nat_gateway = nat_gateways[0] + if check_mode: + nat_gateway['state'] = status + + if nat_gateway.get('state') == status: + status_achieved = True + break + + elif nat_gateway.get('state') == 'failed': + err_msg = nat_gateway.get('failure_message') + break + + elif nat_gateway.get('state') == 'pending': + if 'failure_message' in nat_gateway: + err_msg = nat_gateway.get('failure_message') + status_achieved = False + break + + else: + time.sleep(polling_increment_secs) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + if not status_achieved: + err_msg = "Wait time out reached, while waiting for results" + + return status_achieved, err_msg, nat_gateway + + +def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, + check_mode=False): + """Retrieve all NAT Gateways for a subnet. + Args: + subnet_id (str): The subnet_id the nat resides in. + + Kwargs: + allocation_id (str): The EIP Amazon identifier. + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-1234567' + >>> allocation_id = 'eipalloc-1234567' + >>> gateway_in_subnet_exists(client, subnet_id, allocation_id) + ( + [ + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-123456789", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-1234567" + } + ], + False + ) + + Returns: + Tuple (list, bool) + """ + allocation_id_exists = False + gateways = [] + states = ['available', 'pending'] + gws_retrieved, err_msg, gws = ( + get_nat_gateways( + client, subnet_id, states=states, check_mode=check_mode + ) + ) + if not gws_retrieved: + return gateways, allocation_id_exists + for gw in gws: + for address in gw['nat_gateway_addresses']: + if allocation_id: + if address.get('allocation_id') == allocation_id: + allocation_id_exists = True + gateways.append(gw) + else: + gateways.append(gw) + + return gateways, allocation_id_exists + + +def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + eip_address (str): The Elastic IP Address of the EIP. + + Kwargs: + check_mode (bool): if set to true, do not run anything and + falsify the results. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> eip_address = '52.87.29.36' + >>> get_eip_allocation_id_by_address(client, eip_address) + 'eipalloc-36014da3' + + Returns: + Tuple (str, str) + """ + params = { + 'PublicIps': [eip_address], + } + allocation_id = None + err_msg = "" + try: + if not check_mode: + allocations = client.describe_addresses(**params)['Addresses'] + if len(allocations) == 1: + allocation = allocations[0] + else: + allocation = None + else: + dry_run_eip = ( + DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp'] + ) + if dry_run_eip == eip_address: + allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0] + else: + allocation = None + if allocation: + if allocation.get('Domain') != 'vpc': + err_msg = ( + "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" + .format(eip_address) + ) + else: + allocation_id = allocation.get('AllocationId') + else: + err_msg = ( + "EIP {0} does not exist".format(eip_address) + ) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return allocation_id, err_msg + + +def allocate_eip_address(client, check_mode=False): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + + Kwargs: + check_mode (bool): if set to true, do not run anything and + falsify the results. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> allocate_eip_address(client) + True + + Returns: + Tuple (bool, str) + """ + ip_allocated = False + new_eip = None + err_msg = '' + params = { + 'Domain': 'vpc', + } + try: + if check_mode: + ip_allocated = True + random_numbers = ( + ''.join(str(x) for x in random.sample(range(0, 9), 7)) + ) + new_eip = 'eipalloc-{0}'.format(random_numbers) + else: + new_eip = client.allocate_address(**params)['AllocationId'] + ip_allocated = True + err_msg = 'eipalloc id {0} created'.format(new_eip) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return ip_allocated, err_msg, new_eip + + +def release_address(client, allocation_id, check_mode=False): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + allocation_id (str): The eip Amazon identifier. + + Kwargs: + check_mode (bool): if set to true, do not run anything and + falsify the results. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> allocation_id = "eipalloc-123456" + >>> release_address(client, allocation_id) + True + + Returns: + Boolean, string + """ + err_msg = '' + if check_mode: + return True, '' + + ip_released = False + try: + client.describe_addresses(AllocationIds=[allocation_id]) + except botocore.exceptions.ClientError as e: + # IP address likely already released + # Happens with gateway in 'deleted' state that + # still lists associations + return True, str(e) + try: + client.release_address(AllocationId=allocation_id) + ip_released = True + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return ip_released, err_msg + + +def create(client, subnet_id, allocation_id, client_token=None, + wait=False, wait_timeout=0, if_exist_do_not_create=False, + check_mode=False): + """Create an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + subnet_id (str): The subnet_id the nat resides in. + allocation_id (str): The eip Amazon identifier. + + Kwargs: + if_exist_do_not_create (bool): if a nat gateway already exists in this + subnet, than do not create another one. + default = False + wait (bool): Wait for the nat to be in the deleted state before returning. + default = False + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + default = 0 + client_token (str): + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-1234567' + >>> allocation_id = 'eipalloc-1234567' + >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500) + [ + true, + "", + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-1234567", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-1234567" + } + ] + + Returns: + Tuple (bool, str, list) + """ + params = { + 'SubnetId': subnet_id, + 'AllocationId': allocation_id + } + request_time = datetime.datetime.utcnow() + changed = False + success = False + token_provided = False + err_msg = "" + + if client_token: + token_provided = True + params['ClientToken'] = client_token + + try: + if not check_mode: + result = camel_dict_to_snake_dict(client.create_nat_gateway(**params)["NatGateway"]) + else: + result = DRY_RUN_GATEWAYS[0] + result['create_time'] = datetime.datetime.utcnow() + result['nat_gateway_addresses'][0]['allocation_id'] = allocation_id + result['subnet_id'] = subnet_id + + success = True + changed = True + create_time = result['create_time'].replace(tzinfo=None) + if token_provided and (request_time > create_time): + changed = False + elif wait: + success, err_msg, result = ( + wait_for_status( + client, wait_timeout, result['nat_gateway_id'], 'available', + check_mode=check_mode + ) + ) + if success: + err_msg = ( + 'NAT gateway {0} created'.format(result['nat_gateway_id']) + ) + + except botocore.exceptions.ClientError as e: + if "IdempotentParameterMismatch" in e.message: + err_msg = ( + 'NAT Gateway does not support update and token has already been provided: ' + str(e) + ) + else: + err_msg = str(e) + success = False + changed = False + result = None + + return success, changed, err_msg, result + + +def pre_create(client, subnet_id, allocation_id=None, eip_address=None, + if_exist_do_not_create=False, wait=False, wait_timeout=0, + client_token=None, check_mode=False): + """Create an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + subnet_id (str): The subnet_id the nat resides in. + + Kwargs: + allocation_id (str): The EIP Amazon identifier. + default = None + eip_address (str): The Elastic IP Address of the EIP. + default = None + if_exist_do_not_create (bool): if a nat gateway already exists in this + subnet, than do not create another one. + default = False + wait (bool): Wait for the nat to be in the deleted state before returning. + default = False + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + default = 0 + client_token (str): + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-w4t12897' + >>> allocation_id = 'eipalloc-36014da3' + >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500) + [ + true, + "", + { + "nat_gateway_id": "nat-03835afb6e31df79b", + "subnet_id": "subnet-w4t12897", + "nat_gateway_addresses": [ + { + "public_ip": "52.87.29.36", + "network_interface_id": "eni-5579742d", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-36014da3" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-w68571b5" + } + ] + + Returns: + Tuple (bool, bool, str, list) + """ + success = False + changed = False + err_msg = "" + results = list() + + if not allocation_id and not eip_address: + existing_gateways, allocation_id_exists = ( + gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode) + ) + + if len(existing_gateways) > 0 and if_exist_do_not_create: + success = True + changed = False + results = existing_gateways[0] + err_msg = ( + 'NAT Gateway {0} already exists in subnet_id {1}' + .format( + existing_gateways[0]['nat_gateway_id'], subnet_id + ) + ) + return success, changed, err_msg, results + else: + success, err_msg, allocation_id = ( + allocate_eip_address(client, check_mode=check_mode) + ) + if not success: + return success, 'False', err_msg, dict() + + elif eip_address or allocation_id: + if eip_address and not allocation_id: + allocation_id, err_msg = ( + get_eip_allocation_id_by_address( + client, eip_address, check_mode=check_mode + ) + ) + if not allocation_id: + success = False + changed = False + return success, changed, err_msg, dict() + + existing_gateways, allocation_id_exists = ( + gateway_in_subnet_exists( + client, subnet_id, allocation_id, check_mode=check_mode + ) + ) + if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): + success = True + changed = False + results = existing_gateways[0] + err_msg = ( + 'NAT Gateway {0} already exists in subnet_id {1}' + .format( + existing_gateways[0]['nat_gateway_id'], subnet_id + ) + ) + return success, changed, err_msg, results + + success, changed, err_msg, results = create( + client, subnet_id, allocation_id, client_token, + wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode + ) + + return success, changed, err_msg, results + + +def remove(client, nat_gateway_id, wait=False, wait_timeout=0, + release_eip=False, check_mode=False): + """Delete an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + nat_gateway_id (str): The Amazon nat id. + + Kwargs: + wait (bool): Wait for the nat to be in the deleted state before returning. + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> nat_gw_id = 'nat-03835afb6e31df79b' + >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True) + [ + true, + "", + { + "nat_gateway_id": "nat-03835afb6e31df79b", + "subnet_id": "subnet-w4t12897", + "nat_gateway_addresses": [ + { + "public_ip": "52.87.29.36", + "network_interface_id": "eni-5579742d", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-36014da3" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-w68571b5" + } + ] + + Returns: + Tuple (bool, str, list) + """ + params = { + 'NatGatewayId': nat_gateway_id + } + success = False + changed = False + err_msg = "" + results = list() + states = ['pending', 'available'] + try: + exist, err_msg, gw = ( + get_nat_gateways( + client, nat_gateway_id=nat_gateway_id, + states=states, check_mode=check_mode + ) + ) + if exist and len(gw) == 1: + results = gw[0] + if not check_mode: + client.delete_nat_gateway(**params) + + allocation_id = ( + results['nat_gateway_addresses'][0]['allocation_id'] + ) + changed = True + success = True + err_msg = ( + 'NAT gateway {0} is in a deleting state. Delete was successful' + .format(nat_gateway_id) + ) + + if wait: + status_achieved, err_msg, results = ( + wait_for_status( + client, wait_timeout, nat_gateway_id, 'deleted', + check_mode=check_mode + ) + ) + if status_achieved: + err_msg = ( + 'NAT gateway {0} was deleted successfully' + .format(nat_gateway_id) + ) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + if release_eip: + eip_released, eip_err = ( + release_address(client, allocation_id, check_mode) + ) + if not eip_released: + err_msg = ( + "{0}: Failed to release EIP {1}: {2}" + .format(err_msg, allocation_id, eip_err) + ) + success = False + + return success, changed, err_msg, results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + subnet_id=dict(type='str'), + eip_address=dict(type='str'), + allocation_id=dict(type='str'), + if_exist_do_not_create=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + release_eip=dict(type='bool', default=False), + nat_gateway_id=dict(type='str'), + client_token=dict(type='str'), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['allocation_id', 'eip_address'] + ], + required_if=[['state', 'absent', ['nat_gateway_id']], + ['state', 'present', ['subnet_id']]] + ) + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore/boto3 is required.') + + state = module.params.get('state').lower() + check_mode = module.check_mode + subnet_id = module.params.get('subnet_id') + allocation_id = module.params.get('allocation_id') + eip_address = module.params.get('eip_address') + nat_gateway_id = module.params.get('nat_gateway_id') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + release_eip = module.params.get('release_eip') + client_token = module.params.get('client_token') + if_exist_do_not_create = module.params.get('if_exist_do_not_create') + + try: + region, ec2_url, aws_connect_kwargs = ( + get_aws_connection_info(module, boto3=True) + ) + client = ( + boto3_conn( + module, conn_type='client', resource='ec2', + region=region, endpoint=ec2_url, **aws_connect_kwargs + ) + ) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) + + changed = False + err_msg = '' + + if state == 'present': + success, changed, err_msg, results = ( + pre_create( + client, subnet_id, allocation_id, eip_address, + if_exist_do_not_create, wait, wait_timeout, + client_token, check_mode=check_mode + ) + ) + else: + success, changed, err_msg, results = ( + remove( + client, nat_gateway_id, wait, wait_timeout, release_eip, + check_mode=check_mode + ) + ) + + if not success: + module.fail_json( + msg=err_msg, success=success, changed=changed + ) + else: + module.exit_json( + msg=err_msg, success=success, changed=changed, **results + ) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_nat_gateway_facts.py b/ec2_vpc_nat_gateway_facts.py new file mode 120000 index 00000000000..fd969989977 --- /dev/null +++ b/ec2_vpc_nat_gateway_facts.py @@ -0,0 +1 @@ +ec2_vpc_nat_gateway_info.py \ No newline at end of file diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py new file mode 100644 index 00000000000..b86e4bb8114 --- /dev/null +++ b/ec2_vpc_nat_gateway_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_vpc_nat_gateway_info +short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods. +description: + - Gets various details related to AWS VPC Managed Nat Gateways + - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +options: + nat_gateway_ids: + description: + - List of specific nat gateway IDs to fetch details for. + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html) + for possible filters. + type: dict +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Simple example of listing all nat gateways +- name: List all managed nat gateways in ap-southeast-2 + ec2_vpc_nat_gateway_info: + region: ap-southeast-2 + register: all_ngws + +- name: Debugging the result + debug: + msg: "{{ all_ngws.result }}" + +- name: Get details on specific nat gateways + ec2_vpc_nat_gateway_info: + nat_gateway_ids: + - nat-1234567891234567 + - nat-7654321987654321 + region: ap-southeast-2 + register: specific_ngws + +- name: Get all nat gateways with specific filters + ec2_vpc_nat_gateway_info: + region: ap-southeast-2 + filters: + state: ['pending'] + register: pending_ngws + +- name: Get nat gateways with specific filter + ec2_vpc_nat_gateway_info: + region: ap-southeast-2 + filters: + subnet-id: subnet-12345678 + state: ['available'] + register: existing_nat_gateways +''' + +RETURN = ''' +result: + description: The result of the describe, converted to ansible snake case style. + See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response. + returned: success + type: list +''' + +import json + +try: + import botocore +except ImportError: + pass # will be detected by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, + camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, HAS_BOTO3) + + +def date_handler(obj): + return obj.isoformat() if hasattr(obj, 'isoformat') else obj + + +def get_nat_gateways(client, module, nat_gateway_id=None): + params = dict() + nat_gateways = list() + + params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params['NatGatewayIds'] = module.params.get('nat_gateway_ids') + + try: + result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler)) + except Exception as e: + module.fail_json(msg=str(e.message)) + + for gateway in result['NatGateways']: + # Turn the boto3 result into ansible_friendly_snaked_names + converted_gateway = camel_dict_to_snake_dict(gateway) + if 'tags' in converted_gateway: + # Turn the boto3 result into ansible friendly tag dictionary + converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) + + nat_gateways.append(converted_gateway) + + return nat_gateways + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(default={}, type='dict'), + nat_gateway_ids=dict(default=[], type='list'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + if module._name == 'ec2_vpc_nat_gateway_facts': + module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'", version='2.13') + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore/boto3 is required.') + + try: + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=str(e)) + + results = get_nat_gateways(connection, module) + + module.exit_json(result=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py new file mode 100644 index 00000000000..9e1cdd06112 --- /dev/null +++ b/ec2_vpc_peer.py @@ -0,0 +1,447 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_vpc_peer +short_description: create, delete, accept, and reject VPC peering connections between two VPCs. +description: + - Read the AWS documentation for VPC Peering Connections + U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html). +options: + vpc_id: + description: + - VPC id of the requesting VPC. + required: false + type: str + peering_id: + description: + - Peering connection id. + required: false + type: str + peer_region: + description: + - Region of the accepting VPC. + required: false + type: str + peer_vpc_id: + description: + - VPC id of the accepting VPC. + required: false + type: str + peer_owner_id: + description: + - The AWS account number for cross account peering. + required: false + type: str + tags: + description: + - Dictionary of tags to look for and apply when creating a Peering Connection. + required: false + type: dict + state: + description: + - Create, delete, accept, reject a peering connection. + required: false + default: present + choices: ['present', 'absent', 'accept', 'reject'] + type: str +author: Mike Mochan (@mmochan) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ botocore, boto3, json ] +''' + +EXAMPLES = ''' +# Complete example to create and accept a local peering connection. +- name: Create local account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + state: present + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept local VPC peering request + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: accept + register: action_peer + +# Complete example to delete a local peering connection. +- name: Create local account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + state: present + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: delete a local VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: absent + register: vpc_peer + + # Complete example to create and accept a cross account peering connection. +- name: Create cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-12345678 + peer_owner_id: 123456789102 + state: present + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept peering connection from remote account + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + profile: bot03_profile_for_cross_account + state: accept + register: vpc_peer + +# Complete example to create and accept an intra-region peering connection. +- name: Create intra-region VPC peering Connection + ec2_vpc_peer: + region: us-east-1 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + peer_region: us-west-2 + state: present + tags: + Name: Peering connection for us-east-1 VPC to us-west-2 VPC + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept peering connection from peer region + ec2_vpc_peer: + region: us-west-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: accept + register: vpc_peer + +# Complete example to create and reject a local peering connection. +- name: Create local account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + state: present + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Reject a local VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: reject + +# Complete example to create and accept a cross account peering connection. +- name: Create cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-12345678 + peer_owner_id: 123456789102 + state: present + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept a cross account VPC peering connection request + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + profile: bot03_profile_for_cross_account + state: accept + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + +# Complete example to create and reject a cross account peering connection. +- name: Create cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-12345678 + peer_owner_id: 123456789102 + state: present + tags: + Name: Peering connection for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Reject a cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + profile: bot03_profile_for_cross_account + state: reject + +''' +RETURN = ''' +task: + description: The result of the create, accept, reject or delete action. + returned: success + type: dict +''' + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + +import distutils.version +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, HAS_BOTO3 +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import is_boto3_error_code + + +def tags_changed(pcx_id, client, module): + changed = False + tags = dict() + if module.params.get('tags'): + tags = module.params.get('tags') + pcx = find_pcx_by_id(pcx_id, client, module) + if pcx['VpcPeeringConnections']: + pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']] + pcx_tags = [item for sublist in pcx_values for item in sublist] + tag_values = [[key, str(value)] for key, value in tags.items()] + tags = [item for sublist in tag_values for item in sublist] + if sorted(pcx_tags) == sorted(tags): + changed = False + elif tags: + delete_tags(pcx_id, client, module) + create_tags(pcx_id, client, module) + changed = True + return changed + + +def describe_peering_connections(params, client): + result = client.describe_vpc_peering_connections( + Filters=[ + {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]}, + {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]} + ] + ) + if result['VpcPeeringConnections'] == []: + result = client.describe_vpc_peering_connections( + Filters=[ + {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}, + {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]} + ] + ) + return result + + +def is_active(peering_conn): + return peering_conn['Status']['Code'] == 'active' + + +def is_pending(peering_conn): + return peering_conn['Status']['Code'] == 'pending-acceptance' + + +def create_peer_connection(client, module): + changed = False + params = dict() + params['VpcId'] = module.params.get('vpc_id') + params['PeerVpcId'] = module.params.get('peer_vpc_id') + if module.params.get('peer_region'): + if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.8.6'): + module.fail_json(msg="specifying peer_region parameter requires botocore >= 1.8.6") + params['PeerRegion'] = module.params.get('peer_region') + if module.params.get('peer_owner_id'): + params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) + peering_conns = describe_peering_connections(params, client) + for peering_conn in peering_conns['VpcPeeringConnections']: + pcx_id = peering_conn['VpcPeeringConnectionId'] + if tags_changed(pcx_id, client, module): + changed = True + if is_active(peering_conn): + return (changed, peering_conn['VpcPeeringConnectionId']) + if is_pending(peering_conn): + return (changed, peering_conn['VpcPeeringConnectionId']) + try: + peering_conn = client.create_vpc_peering_connection(**params) + pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] + if module.params.get('tags'): + create_tags(pcx_id, client, module) + changed = True + return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def remove_peer_connection(client, module): + pcx_id = module.params.get('peering_id') + if not pcx_id: + params = dict() + params['VpcId'] = module.params.get('vpc_id') + params['PeerVpcId'] = module.params.get('peer_vpc_id') + params['PeerRegion'] = module.params.get('peer_region') + if module.params.get('peer_owner_id'): + params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) + peering_conns = describe_peering_connections(params, client) + if not peering_conns: + module.exit_json(changed=False) + else: + pcx_id = peering_conns['VpcPeeringConnections'][0]['VpcPeeringConnectionId'] + + try: + params = dict() + params['VpcPeeringConnectionId'] = pcx_id + client.delete_vpc_peering_connection(**params) + module.exit_json(changed=True) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def peer_status(client, module): + params = dict() + params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')] + try: + vpc_peering_connection = client.describe_vpc_peering_connections(**params) + return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code'] + except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: # pylint: disable=duplicate-except + module.fail_json(msg='Malformed connection ID: {0}'.format(e), traceback=traceback.format_exc()) + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json(msg='Error while describing peering connection by peering_id: {0}'.format(e), traceback=traceback.format_exc()) + + +def accept_reject(state, client, module): + changed = False + params = dict() + params['VpcPeeringConnectionId'] = module.params.get('peering_id') + if peer_status(client, module) != 'active': + try: + if state == 'accept': + client.accept_vpc_peering_connection(**params) + else: + client.reject_vpc_peering_connection(**params) + if module.params.get('tags'): + create_tags(params['VpcPeeringConnectionId'], client, module) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + if tags_changed(params['VpcPeeringConnectionId'], client, module): + changed = True + return changed, params['VpcPeeringConnectionId'] + + +def load_tags(module): + tags = [] + if module.params.get('tags'): + for name, value in module.params.get('tags').items(): + tags.append({'Key': name, 'Value': str(value)}) + return tags + + +def create_tags(pcx_id, client, module): + try: + delete_tags(pcx_id, client, module) + client.create_tags(Resources=[pcx_id], Tags=load_tags(module)) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def delete_tags(pcx_id, client, module): + try: + client.delete_tags(Resources=[pcx_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def find_pcx_by_id(pcx_id, client, module): + try: + return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + vpc_id=dict(), + peer_vpc_id=dict(), + peer_region=dict(), + peering_id=dict(), + peer_owner_id=dict(), + tags=dict(required=False, type='dict'), + profile=dict(), + state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']) + ) + ) + required_if = [ + ('state', 'present', ['vpc_id', 'peer_vpc_id']), + ('state', 'accept', ['peering_id']), + ('state', 'reject', ['peering_id']) + ] + + module = AnsibleModule(argument_spec=argument_spec, required_if=required_if) + + if not HAS_BOTO3: + module.fail_json(msg='json, botocore and boto3 are required.') + state = module.params.get('state') + peering_id = module.params.get('peering_id') + vpc_id = module.params.get('vpc_id') + peer_vpc_id = module.params.get('peer_vpc_id') + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='ec2', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - " + str(e)) + + if state == 'present': + (changed, results) = create_peer_connection(client, module) + module.exit_json(changed=changed, peering_id=results) + elif state == 'absent': + if not peering_id and (not vpc_id or not peer_vpc_id): + module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]') + + remove_peer_connection(client, module) + else: + (changed, results) = accept_reject(state, client, module) + module.exit_json(changed=changed, peering_id=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_peering_facts.py b/ec2_vpc_peering_facts.py new file mode 120000 index 00000000000..074baf65a0f --- /dev/null +++ b/ec2_vpc_peering_facts.py @@ -0,0 +1 @@ +ec2_vpc_peering_info.py \ No newline at end of file diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py new file mode 100644 index 00000000000..4fe5a4a1bd1 --- /dev/null +++ b/ec2_vpc_peering_info.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: ec2_vpc_peering_info +short_description: Retrieves AWS VPC Peering details using AWS methods. +description: + - Gets various details related to AWS VPC Peers + - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +options: + peer_connection_ids: + description: + - List of specific VPC peer IDs to get details for. + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html) + for possible filters. + type: dict +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Simple example of listing all VPC Peers +- name: List all vpc peers + ec2_vpc_peering_info: + region: ap-southeast-2 + register: all_vpc_peers + +- name: Debugging the result + debug: + msg: "{{ all_vpc_peers.result }}" + +- name: Get details on specific VPC peer + ec2_vpc_peering_info: + peer_connection_ids: + - pcx-12345678 + - pcx-87654321 + region: ap-southeast-2 + register: all_vpc_peers + +- name: Get all vpc peers with specific filters + ec2_vpc_peering_info: + region: ap-southeast-2 + filters: + status-code: ['pending-acceptance'] + register: pending_vpc_peers +''' + +RETURN = ''' +result: + description: The result of the describe. + returned: success + type: list +''' + +import json + +try: + import botocore +except ImportError: + pass # will be picked up by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, + ec2_argument_spec, boto3_conn, get_aws_connection_info, + ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict) + + +def date_handler(obj): + return obj.isoformat() if hasattr(obj, 'isoformat') else obj + + +def get_vpc_peers(client, module): + params = dict() + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + if module.params.get('peer_connection_ids'): + params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids') + try: + result = json.loads(json.dumps(client.describe_vpc_peering_connections(**params), default=date_handler)) + except Exception as e: + module.fail_json(msg=str(e.message)) + + return result['VpcPeeringConnections'] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(default=dict(), type='dict'), + peer_connection_ids=dict(default=None, type='list'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + if module._name == 'ec2_vpc_peering_facts': + module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", version='2.13') + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore and boto3 are required.') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + except NameError as e: + # Getting around the get_aws_connection_info boto reliance for region + if "global name 'boto' is not defined" in e.message: + module.params['region'] = botocore.session.get_session().get_config_variable('region') + if not module.params['region']: + module.fail_json(msg="Error - no region provided") + else: + module.fail_json(msg="Can't retrieve connection information - " + str(e)) + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=str(e)) + + # Turn the boto3 result in to ansible friendly_snaked_names + results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)] + + # Turn the boto3 result in to ansible friendly tag dictionary + for peer in results: + peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', [])) + + module.exit_json(result=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py new file mode 100644 index 00000000000..538f40b0f0b --- /dev/null +++ b/ec2_vpc_route_table.py @@ -0,0 +1,747 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_route_table +short_description: Manage route tables for AWS virtual private clouds +description: + - Manage route tables for AWS virtual private clouds +author: +- Robert Estelle (@erydo) +- Rob White (@wimnat) +- Will Thames (@willthames) +options: + lookup: + description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. + If no tags are specified then no lookup for an existing route table is performed and a new + route table will be created. To change tags of a route table you must look up by id. + default: tag + choices: [ 'tag', 'id' ] + type: str + propagating_vgw_ids: + description: Enable route propagation from virtual gateways specified by ID. + type: list + elements: str + purge_routes: + description: Purge existing routes that are not found in routes. + type: bool + default: 'yes' + purge_subnets: + description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied. + default: 'true' + type: bool + purge_tags: + description: Purge existing tags that are not found in route table. + type: bool + default: 'no' + route_table_id: + description: + - The ID of the route table to update or delete. + - Required when I(lookup=id). + type: str + routes: + description: List of routes in the route table. + Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', + 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'. + If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. + Routes are required for present states. + type: list + elements: dict + state: + description: Create or destroy the VPC route table. + default: present + choices: [ 'present', 'absent' ] + type: str + subnets: + description: An array of subnets to add to this route table. Subnets may be specified + by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. + type: list + elements: str + tags: + description: > + A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 }). Tags are + used to uniquely identify route tables within a VPC when the route_table_id is not supplied. + aliases: [ "resource_tags" ] + type: dict + vpc_id: + description: + - VPC ID of the VPC in which to create the route table. + - Required when I(state=present) or I(lookup=tag). + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic creation example: +- name: Set up public subnet route table + ec2_vpc_route_table: + vpc_id: vpc-1245678 + region: us-west-1 + tags: + Name: Public + subnets: + - "{{ jumpbox_subnet.subnet.id }}" + - "{{ frontend_subnet.subnet.id }}" + - "{{ vpn_subnet.subnet_id }}" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + register: public_route_table + +- name: Set up NAT-protected route table + ec2_vpc_route_table: + vpc_id: vpc-1245678 + region: us-west-1 + tags: + Name: Internal + subnets: + - "{{ application_subnet.subnet.id }}" + - 'Database Subnet' + - '10.0.0.0/8' + routes: + - dest: 0.0.0.0/0 + instance_id: "{{ nat.instance_id }}" + register: nat_route_table + +- name: delete route table + ec2_vpc_route_table: + vpc_id: vpc-1245678 + region: us-west-1 + route_table_id: "{{ route_table.id }}" + lookup: id + state: absent +''' + +RETURN = ''' +route_table: + description: Route Table result + returned: always + type: complex + contains: + associations: + description: List of subnets associated with the route table + returned: always + type: complex + contains: + main: + description: Whether this is the main route table + returned: always + type: bool + sample: false + route_table_association_id: + description: ID of association between route table and subnet + returned: always + type: str + sample: rtbassoc-ab47cfc3 + route_table_id: + description: ID of the route table + returned: always + type: str + sample: rtb-bf779ed7 + subnet_id: + description: ID of the subnet + returned: always + type: str + sample: subnet-82055af9 + id: + description: ID of the route table (same as route_table_id for backwards compatibility) + returned: always + type: str + sample: rtb-bf779ed7 + propagating_vgws: + description: List of Virtual Private Gateways propagating routes + returned: always + type: list + sample: [] + route_table_id: + description: ID of the route table + returned: always + type: str + sample: rtb-bf779ed7 + routes: + description: List of routes in the route table + returned: always + type: complex + contains: + destination_cidr_block: + description: CIDR block of destination + returned: always + type: str + sample: 10.228.228.0/22 + gateway_id: + description: ID of the gateway + returned: when gateway is local or internet gateway + type: str + sample: local + instance_id: + description: ID of a NAT instance + returned: when the route is via an EC2 instance + type: str + sample: i-abcd123456789 + instance_owner_id: + description: AWS account owning the NAT instance + returned: when the route is via an EC2 instance + type: str + sample: 123456789012 + nat_gateway_id: + description: ID of the NAT gateway + returned: when the route is via a NAT gateway + type: str + sample: local + origin: + description: mechanism through which the route is in the table + returned: always + type: str + sample: CreateRouteTable + state: + description: state of the route + returned: always + type: str + sample: active + tags: + description: Tags applied to the route table + returned: always + type: dict + sample: + Name: Public route table + Public: 'true' + vpc_id: + description: ID for the VPC in which the route lives + returned: always + type: str + sample: vpc-6e2d2407 +''' + +import re +from time import sleep +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags, AWSRetry + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') +SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') +ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$') + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(connection, **params): + return connection.describe_subnets(**params)['Subnets'] + + +def find_subnets(connection, module, vpc_id, identified_subnets): + """ + Finds a list of subnets, each identified either by a raw ID, a unique + 'Name' tag, or a CIDR such as 10.0.0.0/8. + + Note that this function is duplicated in other ec2 modules, and should + potentially be moved into a shared module_utils + """ + subnet_ids = [] + subnet_names = [] + subnet_cidrs = [] + for subnet in (identified_subnets or []): + if re.match(SUBNET_RE, subnet): + subnet_ids.append(subnet) + elif re.match(CIDR_RE, subnet): + subnet_cidrs.append(subnet) + else: + subnet_names.append(subnet) + + subnets_by_id = [] + if subnet_ids: + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + try: + subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) + + subnets_by_cidr = [] + if subnet_cidrs: + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs}) + try: + subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) + + subnets_by_name = [] + if subnet_names: + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names}) + try: + subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) + + for name in subnet_names: + matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name]) + if matching_count == 0: + module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) + elif matching_count > 1: + module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) + + return subnets_by_id + subnets_by_cidr + subnets_by_name + + +def find_igw(connection, module, vpc_id): + """ + Finds the Internet gateway for the given VPC ID. + """ + filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + try: + igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) + if len(igw) == 1: + return igw[0]['InternetGatewayId'] + elif len(igw) == 0: + module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id)) + else: + module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) + + +@AWSRetry.exponential_backoff() +def describe_tags_with_backoff(connection, resource_id): + filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id}) + paginator = connection.get_paginator('describe_tags') + tags = paginator.paginate(Filters=filters).build_full_result()['Tags'] + return boto3_tag_list_to_ansible_dict(tags) + + +def tags_match(match_tags, candidate_tags): + return all((k in candidate_tags and candidate_tags[k] == v + for k, v in match_tags.items())) + + +def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None): + try: + cur_tags = describe_tags_with_backoff(connection, resource_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to list tags for VPC') + + to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags) + + if not to_add and not to_delete: + return {'changed': False, 'tags': cur_tags} + if check_mode: + if not purge_tags: + tags = cur_tags.update(tags) + return {'changed': True, 'tags': tags} + + if to_delete: + try: + connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete tags") + if to_add: + try: + connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create tags") + + try: + latest_tags = describe_tags_with_backoff(connection, resource_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to list tags for VPC') + return {'changed': True, 'tags': latest_tags} + + +@AWSRetry.exponential_backoff() +def describe_route_tables_with_backoff(connection, **params): + try: + return connection.describe_route_tables(**params)['RouteTables'] + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound': + return None + else: + raise + + +def get_route_table_by_id(connection, module, route_table_id): + + route_table = None + try: + route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route table") + if route_tables: + route_table = route_tables[0] + + return route_table + + +def get_route_table_by_tags(connection, module, vpc_id, tags): + count = 0 + route_table = None + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + try: + route_tables = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route table") + for table in route_tables: + this_tags = describe_tags_with_backoff(connection, table['RouteTableId']) + if tags_match(tags, this_tags): + route_table = table + count += 1 + + if count > 1: + module.fail_json(msg="Tags provided do not identify a unique route table") + else: + return route_table + + +def route_spec_matches_route(route_spec, route): + if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']: + route_spec['NatGatewayId'] = route_spec.pop('GatewayId') + if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']: + if route_spec.get('DestinationCidrBlock', '').startswith('pl-'): + route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock') + + return set(route_spec.items()).issubset(route.items()) + + +def route_spec_matches_route_cidr(route_spec, route): + return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock') + + +def rename_key(d, old_key, new_key): + d[new_key] = d.pop(old_key) + + +def index_of_matching_route(route_spec, routes_to_match): + for i, route in enumerate(routes_to_match): + if route_spec_matches_route(route_spec, route): + return "exact", i + elif 'Origin' in route_spec and route_spec['Origin'] != 'EnableVgwRoutePropagation': + if route_spec_matches_route_cidr(route_spec, route): + return "replace", i + + +def ensure_routes(connection=None, module=None, route_table=None, route_specs=None, + propagating_vgw_ids=None, check_mode=None, purge_routes=None): + routes_to_match = [route for route in route_table['Routes']] + route_specs_to_create = [] + route_specs_to_recreate = [] + for route_spec in route_specs: + match = index_of_matching_route(route_spec, routes_to_match) + if match is None: + if route_spec.get('DestinationCidrBlock'): + route_specs_to_create.append(route_spec) + else: + module.warn("Skipping creating {0} because it has no destination cidr block. " + "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec)) + else: + if match[0] == "replace": + if route_spec.get('DestinationCidrBlock'): + route_specs_to_recreate.append(route_spec) + else: + module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec)) + del routes_to_match[match[1]] + + routes_to_delete = [] + if purge_routes: + for r in routes_to_match: + if not r.get('DestinationCidrBlock'): + module.warn("Skipping purging route {0} because it has no destination cidr block. " + "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r)) + continue + if r['Origin'] == 'CreateRoute': + routes_to_delete.append(r) + + changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate) + if changed and not check_mode: + for route in routes_to_delete: + try: + connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete route") + + for route_spec in route_specs_to_recreate: + try: + connection.replace_route(RouteTableId=route_table['RouteTableId'], + **route_spec) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't recreate route") + + for route_spec in route_specs_to_create: + try: + connection.create_route(RouteTableId=route_table['RouteTableId'], + **route_spec) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create route") + + return {'changed': bool(changed)} + + +def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None, + check_mode=None): + filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id}) + try: + route_tables = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route tables") + for route_table in route_tables: + if route_table['RouteTableId'] is None: + continue + for a in route_table['Associations']: + if a['Main']: + continue + if a['SubnetId'] == subnet_id: + if route_table['RouteTableId'] == route_table_id: + return {'changed': False, 'association_id': a['RouteTableAssociationId']} + else: + if check_mode: + return {'changed': True} + try: + connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") + + try: + association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate subnet with route table") + return {'changed': True, 'association_id': association_id} + + +def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None, + check_mode=None, purge_subnets=None): + current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']] + new_association_ids = [] + changed = False + for subnet in subnets: + result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'], + route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode) + changed = changed or result['changed'] + if changed and check_mode: + return {'changed': True} + new_association_ids.append(result['association_id']) + + if purge_subnets: + to_delete = [a_id for a_id in current_association_ids + if a_id not in new_association_ids] + + for a_id in to_delete: + changed = True + if not check_mode: + try: + connection.disassociate_route_table(AssociationId=a_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") + + return {'changed': changed} + + +def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None, + check_mode=None): + changed = False + gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']] + to_add = set(propagating_vgw_ids) - set(gateways) + if to_add: + changed = True + if not check_mode: + for vgw_id in to_add: + try: + connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'], + GatewayId=vgw_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't enable route propagation") + + return {'changed': changed} + + +def ensure_route_table_absent(connection, module): + + lookup = module.params.get('lookup') + route_table_id = module.params.get('route_table_id') + tags = module.params.get('tags') + vpc_id = module.params.get('vpc_id') + purge_subnets = module.params.get('purge_subnets') + + if lookup == 'tag': + if tags is not None: + route_table = get_route_table_by_tags(connection, module, vpc_id, tags) + else: + route_table = None + elif lookup == 'id': + route_table = get_route_table_by_id(connection, module, route_table_id) + + if route_table is None: + return {'changed': False} + + # disassociate subnets before deleting route table + if not module.check_mode: + ensure_subnet_associations(connection=connection, module=module, route_table=route_table, + subnets=[], check_mode=False, purge_subnets=purge_subnets) + try: + connection.delete_route_table(RouteTableId=route_table['RouteTableId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error deleting route table") + + return {'changed': True} + + +def get_route_table_info(connection, module, route_table): + result = get_route_table_by_id(connection, module, route_table['RouteTableId']) + try: + result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get tags for route table") + result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) + # backwards compatibility + result['id'] = result['route_table_id'] + return result + + +def create_route_spec(connection, module, vpc_id): + routes = module.params.get('routes') + + for route_spec in routes: + rename_key(route_spec, 'dest', 'destination_cidr_block') + + if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': + igw = find_igw(connection, module, vpc_id) + route_spec['gateway_id'] = igw + if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): + rename_key(route_spec, 'gateway_id', 'nat_gateway_id') + + return snake_dict_to_camel_dict(routes, capitalize_first=True) + + +def ensure_route_table_present(connection, module): + + lookup = module.params.get('lookup') + propagating_vgw_ids = module.params.get('propagating_vgw_ids') + purge_routes = module.params.get('purge_routes') + purge_subnets = module.params.get('purge_subnets') + purge_tags = module.params.get('purge_tags') + route_table_id = module.params.get('route_table_id') + subnets = module.params.get('subnets') + tags = module.params.get('tags') + vpc_id = module.params.get('vpc_id') + routes = create_route_spec(connection, module, vpc_id) + + changed = False + tags_valid = False + + if lookup == 'tag': + if tags is not None: + try: + route_table = get_route_table_by_tags(connection, module, vpc_id, tags) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'") + else: + route_table = None + elif lookup == 'id': + try: + route_table = get_route_table_by_id(connection, module, route_table_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error finding route table with lookup 'id'") + + # If no route table returned then create new route table + if route_table is None: + changed = True + if not module.check_mode: + try: + route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable'] + # try to wait for route table to be present before moving on + get_waiter( + connection, 'route_table_exists' + ).wait( + RouteTableIds=[route_table['RouteTableId']], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error creating route table") + else: + route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id} + module.exit_json(changed=changed, route_table=route_table) + + if routes is not None: + result = ensure_routes(connection=connection, module=module, route_table=route_table, + route_specs=routes, propagating_vgw_ids=propagating_vgw_ids, + check_mode=module.check_mode, purge_routes=purge_routes) + changed = changed or result['changed'] + + if propagating_vgw_ids is not None: + result = ensure_propagation(connection=connection, module=module, route_table=route_table, + propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode) + changed = changed or result['changed'] + + if not tags_valid and tags is not None: + result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags, + purge_tags=purge_tags, check_mode=module.check_mode) + route_table['Tags'] = result['tags'] + changed = changed or result['changed'] + + if subnets is not None: + associated_subnets = find_subnets(connection, module, vpc_id, subnets) + + result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table, + subnets=associated_subnets, check_mode=module.check_mode, + purge_subnets=purge_subnets) + changed = changed or result['changed'] + + if changed: + # pause to allow route table routes/subnets/associations to be updated before exiting with final state + sleep(5) + module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table)) + + +def main(): + argument_spec = dict( + lookup=dict(default='tag', choices=['tag', 'id']), + propagating_vgw_ids=dict(type='list'), + purge_routes=dict(default=True, type='bool'), + purge_subnets=dict(default=True, type='bool'), + purge_tags=dict(default=False, type='bool'), + route_table_id=dict(), + routes=dict(default=[], type='list'), + state=dict(default='present', choices=['present', 'absent']), + subnets=dict(type='list'), + tags=dict(type='dict', aliases=['resource_tags']), + vpc_id=dict() + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['lookup', 'id', ['route_table_id']], + ['lookup', 'tag', ['vpc_id']], + ['state', 'present', ['vpc_id']]], + supports_check_mode=True) + + connection = module.client('ec2') + + state = module.params.get('state') + + if state == 'present': + result = ensure_route_table_present(connection, module) + elif state == 'absent': + result = ensure_route_table_absent(connection, module) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_route_table_facts.py b/ec2_vpc_route_table_facts.py new file mode 120000 index 00000000000..ed0f72a1aa3 --- /dev/null +++ b/ec2_vpc_route_table_facts.py @@ -0,0 +1 @@ +ec2_vpc_route_table_info.py \ No newline at end of file diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py new file mode 100644 index 00000000000..2ad8b73f499 --- /dev/null +++ b/ec2_vpc_route_table_info.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_route_table_info +short_description: Gather information about ec2 VPC route tables in AWS +description: + - Gather information about ec2 VPC route tables in AWS + - This module was called C(ec2_vpc_route_table_facts) before Ansible 2.9. The usage did not change. +author: "Rob White (@wimnat)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. + type: dict +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPC route tables +- ec2_vpc_route_table_info: + +# Gather information about a particular VPC route table using route table ID +- ec2_vpc_route_table_info: + filters: + route-table-id: rtb-00112233 + +# Gather information about any VPC route table with a tag key Name and value Example +- ec2_vpc_route_table_info: + filters: + "tag:Name": Example + +# Gather information about any VPC route table within VPC with ID vpc-abcdef00 +- ec2_vpc_route_table_info: + filters: + vpc-id: vpc-abcdef00 + +''' + +try: + import boto.vpc + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def get_route_table_info(route_table): + + # Add any routes to array + routes = [] + associations = [] + for route in route_table.routes: + routes.append(route.__dict__) + for association in route_table.associations: + associations.append(association.__dict__) + + route_table_info = {'id': route_table.id, + 'routes': routes, + 'associations': associations, + 'tags': route_table.tags, + 'vpc_id': route_table.vpc_id + } + + return route_table_info + + +def list_ec2_vpc_route_tables(connection, module): + + filters = module.params.get("filters") + route_table_dict_array = [] + + try: + all_route_tables = connection.get_all_route_tables(filters=filters) + except BotoServerError as e: + module.fail_json(msg=e.message) + + for route_table in all_route_tables: + route_table_dict_array.append(get_route_table_info(route_table)) + + module.exit_json(route_tables=route_table_dict_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(default=None, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + if module._name == 'ec2_vpc_route_table_facts': + module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", version='2.13') + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_ec2_vpc_route_tables(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py new file mode 100644 index 00000000000..a8ba032058c --- /dev/null +++ b/ec2_vpc_vgw.py @@ -0,0 +1,580 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: ec2_vpc_vgw +short_description: Create and delete AWS VPN Virtual Gateways. +description: + - Creates AWS VPN Virtual Gateways + - Deletes AWS VPN Virtual Gateways + - Attaches Virtual Gateways to VPCs + - Detaches Virtual Gateways from VPCs +requirements: [ boto3 ] +options: + state: + description: + - present to ensure resource is created. + - absent to remove resource + default: present + choices: [ "present", "absent"] + type: str + name: + description: + - name of the vgw to be created or deleted + type: str + type: + description: + - type of the virtual gateway to be created + choices: [ "ipsec.1" ] + default: "ipsec.1" + type: str + vpn_gateway_id: + description: + - vpn gateway id of an existing virtual gateway + type: str + vpc_id: + description: + - the vpc-id of a vpc to attach or detach + type: str + asn: + description: + - the BGP ASN of the amazon side + type: int + wait_timeout: + description: + - number of seconds to wait for status during vpc attach and detach + default: 320 + type: int + tags: + description: + - dictionary of resource tags + aliases: [ "resource_tags" ] + type: dict +author: Nick Aslanidis (@naslanidis) +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +''' + +EXAMPLES = ''' +- name: Create a new vgw attached to a specific VPC + ec2_vpc_vgw: + state: present + region: ap-southeast-2 + profile: personal + vpc_id: vpc-12345678 + name: personal-testing + type: ipsec.1 + register: created_vgw + +- name: Create a new unattached vgw + ec2_vpc_vgw: + state: present + region: ap-southeast-2 + profile: personal + name: personal-testing + type: ipsec.1 + tags: + environment: production + owner: ABC + register: created_vgw + +- name: Remove a new vgw using the name + ec2_vpc_vgw: + state: absent + region: ap-southeast-2 + profile: personal + name: personal-testing + type: ipsec.1 + register: deleted_vgw + +- name: Remove a new vgw using the vpn_gateway_id + ec2_vpc_vgw: + state: absent + region: ap-southeast-2 + profile: personal + vpn_gateway_id: vgw-3a9aa123 + register: deleted_vgw +''' + +RETURN = ''' +result: + description: The result of the create, or delete action. + returned: success + type: dict +''' + +import time +import traceback + +try: + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info, AWSRetry +from ansible.module_utils._text import to_native + + +def get_vgw_info(vgws): + if not isinstance(vgws, list): + return + + for vgw in vgws: + vgw_info = { + 'id': vgw['VpnGatewayId'], + 'type': vgw['Type'], + 'state': vgw['State'], + 'vpc_id': None, + 'tags': dict() + } + + for tag in vgw['Tags']: + vgw_info['tags'][tag['Key']] = tag['Value'] + + if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached': + vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId'] + + return vgw_info + + +def wait_for_status(client, module, vpn_gateway_id, status): + polling_increment_secs = 15 + max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + response = find_vgw(client, module, vpn_gateway_id) + if response[0]['VpcAttachments'][0]['State'] == status: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return status_achieved, result + + +def attach_vgw(client, module, vpn_gateway_id): + params = dict() + params['VpcId'] = module.params.get('vpc_id') + + try: + # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State + # as available several seconds before actually permitting a new attachment. + # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185 + response = AWSRetry.jittered_backoff(retries=5, + catch_extra_error_codes=['InvalidParameterValue'] + )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id, + VpcId=params['VpcId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached') + if not status_achieved: + module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console') + + result = response + return result + + +def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): + params = dict() + params['VpcId'] = module.params.get('vpc_id') + + if vpc_id: + try: + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + else: + try: + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') + if not status_achieved: + module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console') + + result = response + return result + + +def create_vgw(client, module): + params = dict() + params['Type'] = module.params.get('type') + if module.params.get('asn'): + params['AmazonSideAsn'] = module.params.get('asn') + + try: + response = client.create_vpn_gateway(**params) + get_waiter( + client, 'vpn_gateway_exists' + ).wait( + VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] + ) + except botocore.exceptions.WaiterError as e: + module.fail_json(msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']), + exception=traceback.format_exc()) + except is_boto3_error_code('VpnGatewayLimitExceeded'): + module.fail_json(msg="Too many VPN gateways exist in this account.", exception=traceback.format_exc()) + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return result + + +def delete_vgw(client, module, vpn_gateway_id): + + try: + response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + # return the deleted VpnGatewayId as this is not included in the above response + result = vpn_gateway_id + return result + + +def create_tags(client, module, vpn_gateway_id): + params = dict() + + try: + response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module)) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return result + + +def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None): + params = dict() + + if tags_to_delete: + try: + response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + else: + try: + response = client.delete_tags(Resources=[vpn_gateway_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return result + + +def load_tags(module): + tags = [] + + if module.params.get('tags'): + for name, value in module.params.get('tags').items(): + tags.append({'Key': name, 'Value': str(value)}) + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + else: + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + return tags + + +def find_tags(client, module, resource_id=None): + + if resource_id: + try: + response = client.describe_tags(Filters=[ + {'Name': 'resource-id', 'Values': [resource_id]} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return result + + +def check_tags(client, module, existing_vgw, vpn_gateway_id): + params = dict() + params['Tags'] = module.params.get('tags') + vgw = existing_vgw + changed = False + tags_list = {} + + # format tags for comparison + for tags in existing_vgw[0]['Tags']: + if tags['Key'] != 'Name': + tags_list[tags['Key']] = tags['Value'] + + # if existing tags don't match the tags arg, delete existing and recreate with new list + if params['Tags'] is not None and tags_list != params['Tags']: + delete_tags(client, module, vpn_gateway_id) + create_tags(client, module, vpn_gateway_id) + vgw = find_vgw(client, module) + changed = True + + # if no tag args are supplied, delete any existing tags with the exception of the name tag + if params['Tags'] is None and tags_list != {}: + tags_to_delete = [] + for tags in existing_vgw[0]['Tags']: + if tags['Key'] != 'Name': + tags_to_delete.append(tags) + + delete_tags(client, module, vpn_gateway_id, tags_to_delete) + vgw = find_vgw(client, module) + changed = True + + return vgw, changed + + +def find_vpc(client, module): + params = dict() + params['vpc_id'] = module.params.get('vpc_id') + + if params['vpc_id']: + try: + response = client.describe_vpcs(VpcIds=[params['vpc_id']]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + result = response + return result + + +def find_vgw(client, module, vpn_gateway_id=None): + params = dict() + if vpn_gateway_id: + params['VpnGatewayIds'] = vpn_gateway_id + else: + params['Filters'] = [ + {'Name': 'type', 'Values': [module.params.get('type')]}, + {'Name': 'tag:Name', 'Values': [module.params.get('name')]}, + ] + if module.params.get('state') == 'present': + params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']}) + try: + response = client.describe_vpn_gateways(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId']) + + +def ensure_vgw_present(client, module): + + # If an existing vgw name and type matches our args, then a match is considered to have been + # found and we will not create another vgw. + + changed = False + params = dict() + result = dict() + params['Name'] = module.params.get('name') + params['VpcId'] = module.params.get('vpc_id') + params['Type'] = module.params.get('type') + params['Tags'] = module.params.get('tags') + params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + + # check that the vpc_id exists. If not, an exception is thrown + if params['VpcId']: + vpc = find_vpc(client, module) + + # check if a gateway matching our module args already exists + existing_vgw = find_vgw(client, module) + + if existing_vgw != []: + vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] + vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id) + + # if a vpc_id was provided, check if it exists and if it's attached + if params['VpcId']: + + current_vpc_attachments = existing_vgw[0]['VpcAttachments'] + + if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached': + if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached': + # detach the existing vpc from the virtual gateway + vpc_to_detach = current_vpc_attachments[0]['VpcId'] + detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) + get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id]) + attached_vgw = attach_vgw(client, module, vpn_gateway_id) + changed = True + else: + # attach the vgw to the supplied vpc + attached_vgw = attach_vgw(client, module, vpn_gateway_id) + changed = True + + # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it. + else: + existing_vgw = find_vgw(client, module, [vpn_gateway_id]) + + if existing_vgw[0]['VpcAttachments'] != []: + if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + # detach the vpc from the vgw + vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) + changed = True + + else: + # create a new vgw + new_vgw = create_vgw(client, module) + changed = True + vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId'] + + # tag the new virtual gateway + create_tags(client, module, vpn_gateway_id) + + # if a vpc-id was supplied, attempt to attach it to the vgw + if params['VpcId']: + attached_vgw = attach_vgw(client, module, vpn_gateway_id) + changed = True + + # return current state of the vgw + vgw = find_vgw(client, module, [vpn_gateway_id]) + result = get_vgw_info(vgw) + return changed, result + + +def ensure_vgw_absent(client, module): + + # If an existing vgw name and type matches our args, then a match is considered to have been + # found and we will take steps to delete it. + + changed = False + params = dict() + result = dict() + params['Name'] = module.params.get('name') + params['VpcId'] = module.params.get('vpc_id') + params['Type'] = module.params.get('type') + params['Tags'] = module.params.get('tags') + params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + + # check if a gateway matching our module args already exists + if params['VpnGatewayIds']: + existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']]) + if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted': + existing_vgw = existing_vgw_with_id + if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if params['VpcId']: + if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: + module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + + else: + # detach the vpc from the vgw + detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId']) + deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + changed = True + + else: + # attempt to detach any attached vpcs + vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach) + deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + changed = True + + else: + # no vpc's are attached so attempt to delete the vgw + deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + changed = True + + else: + changed = False + deleted_vgw = "Nothing to do" + + else: + # Check that a name and type argument has been supplied if no vgw-id + if not module.params.get('name') or not module.params.get('type'): + module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied') + + existing_vgw = find_vgw(client, module) + if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted': + vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] + if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if params['VpcId']: + if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: + module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + + else: + # detach the vpc from the vgw + detach_vgw(client, module, vpn_gateway_id, params['VpcId']) + + # now that the vpc has been detached, delete the vgw + deleted_vgw = delete_vgw(client, module, vpn_gateway_id) + changed = True + + else: + # attempt to detach any attached vpcs + vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) + changed = True + + # now that the vpc has been detached, delete the vgw + deleted_vgw = delete_vgw(client, module, vpn_gateway_id) + + else: + # no vpc's are attached so attempt to delete the vgw + deleted_vgw = delete_vgw(client, module, vpn_gateway_id) + changed = True + + else: + changed = False + deleted_vgw = None + + result = deleted_vgw + return changed, result + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(), + vpn_gateway_id=dict(), + vpc_id=dict(), + asn=dict(type='int'), + wait_timeout=dict(type='int', default=320), + type=dict(default='ipsec.1', choices=['ipsec.1']), + tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), + ) + ) + module = AnsibleModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['name']]]) + + if not HAS_BOTO3: + module.fail_json(msg='json and boto3 is required.') + + state = module.params.get('state').lower() + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - %s" % to_native(e), exception=traceback.format_exc()) + + if state == 'present': + (changed, results) = ensure_vgw_present(client, module) + else: + (changed, results) = ensure_vgw_absent(client, module) + module.exit_json(changed=changed, vgw=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_vgw_facts.py b/ec2_vpc_vgw_facts.py new file mode 120000 index 00000000000..bbcf44bef40 --- /dev/null +++ b/ec2_vpc_vgw_facts.py @@ -0,0 +1 @@ +ec2_vpc_vgw_info.py \ No newline at end of file diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py new file mode 100644 index 00000000000..a1ac5b04c7b --- /dev/null +++ b/ec2_vpc_vgw_info.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_vgw_info +short_description: Gather information about virtual gateways in AWS +description: + - Gather information about virtual gateways in AWS. + - This module was called C(ec2_vpc_vgw_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters. + type: dict + vpn_gateway_ids: + description: + - Get details of a specific Virtual Gateway ID. This value should be provided as a list. + type: list + elements: str +author: "Nick Aslanidis (@naslanidis)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all virtual gateways for an account or profile + ec2_vpc_vgw_info: + region: ap-southeast-2 + profile: production + register: vgw_info + +- name: Gather information about a filtered list of Virtual Gateways + ec2_vpc_vgw_info: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "main-virt-gateway" + register: vgw_info + +- name: Gather information about a specific virtual gateway by VpnGatewayIds + ec2_vpc_vgw_info: + region: ap-southeast-2 + profile: production + vpn_gateway_ids: vgw-c432f6a7 + register: vgw_info +''' + +RETURN = ''' +virtual_gateways: + description: The virtual gateways for the account. + returned: always + type: list + sample: [ + { + "state": "available", + "tags": [ + { + "key": "Name", + "value": "TEST-VGW" + } + ], + "type": "ipsec.1", + "vpc_attachments": [ + { + "state": "attached", + "vpc_id": "vpc-22a93c74" + } + ], + "vpn_gateway_id": "vgw-23e3d64e" + } + ] + +changed: + description: True if listing the virtual gateways succeeds. + returned: always + type: bool + sample: "false" +''' +import traceback + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, + camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3) + + +def get_virtual_gateway_info(virtual_gateway): + virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'], + 'State': virtual_gateway['State'], + 'Type': virtual_gateway['Type'], + 'VpcAttachments': virtual_gateway['VpcAttachments'], + 'Tags': virtual_gateway.get('Tags', [])} + return virtual_gateway_info + + +def list_virtual_gateways(client, module): + params = dict() + + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params['DryRun'] = module.check_mode + + if module.params.get("vpn_gateway_ids"): + params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids") + + try: + all_virtual_gateways = client.describe_vpn_gateways(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw)) + for vgw in all_virtual_gateways['VpnGateways']] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(type='dict', default=dict()), + vpn_gateway_ids=dict(type='list', default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_vpc_vgw_facts': + module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", version='2.13') + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='json and boto3 is required.') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - " + str(e)) + + # call your function here + results = list_virtual_gateways(connection, module) + + module.exit_json(virtual_gateways=results) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py new file mode 100644 index 00000000000..586138d3a1b --- /dev/null +++ b/ec2_vpc_vpn.py @@ -0,0 +1,780 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_vpn +short_description: Create, modify, and delete EC2 VPN connections. +description: + - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters + option or specifying the VPN connection identifier. +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +requirements: ['boto3', 'botocore'] +author: "Sloane Hertel (@s-hertel)" +options: + state: + description: + - The desired state of the VPN connection. + choices: ['present', 'absent'] + default: present + required: no + type: str + customer_gateway_id: + description: + - The ID of the customer gateway. + type: str + connection_type: + description: + - The type of VPN connection. + - At this time only 'ipsec.1' is supported. + default: ipsec.1 + type: str + vpn_gateway_id: + description: + - The ID of the virtual private gateway. + type: str + vpn_connection_id: + description: + - The ID of the VPN connection. Required to modify or delete a connection if the filters option does not provide a unique match. + type: str + tags: + description: + - Tags to attach to the VPN connection. + type: dict + purge_tags: + description: + - Whether or not to delete VPN connections tags that are associated with the connection but not specified in the task. + type: bool + default: false + static_only: + description: + - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP. + default: False + type: bool + required: no + tunnel_options: + description: + - An optional list object containing no more than two dict members, each of which may contain 'TunnelInsideCidr' + and/or 'PreSharedKey' keys with appropriate string values. AWS defaults will apply in absence of either of + the aforementioned keys. + required: no + type: list + elements: dict + suboptions: + TunnelInsideCidr: + type: str + description: The range of inside IP addresses for the tunnel. + PreSharedKey: + type: str + description: The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway. + filters: + description: + - An alternative to using vpn_connection_id. If multiple matches are found, vpn_connection_id is required. + If one of the following suboptions is a list of items to filter by, only one item needs to match to find the VPN + that correlates. e.g. if the filter 'cidr' is ['194.168.2.0/24', '192.168.2.0/24'] and the VPN route only has the + destination cidr block of '192.168.2.0/24' it will be found with this filter (assuming there are not multiple + VPNs that are matched). Another example, if the filter 'vpn' is equal to ['vpn-ccf7e7ad', 'vpn-cb0ae2a2'] and one + of of the VPNs has the state deleted (exists but is unmodifiable) and the other exists and is not deleted, + it will be found via this filter. See examples. + suboptions: + cgw-config: + description: + - The customer gateway configuration of the VPN as a string (in the format of the return value) or a list of those strings. + static-routes-only: + description: + - The type of routing; true or false. + cidr: + description: + - The destination cidr of the VPN's route as a string or a list of those strings. + bgp: + description: + - The BGP ASN number associated with a BGP device. Only works if the connection is attached. + This filtering option is currently not working. + vpn: + description: + - The VPN connection id as a string or a list of those strings. + vgw: + description: + - The virtual private gateway as a string or a list of those strings. + tag-keys: + description: + - The key of a tag as a string or a list of those strings. + tag-values: + description: + - The value of a tag as a string or a list of those strings. + tags: + description: + - A dict of key value pairs. + cgw: + description: + - The customer gateway id as a string or a list of those strings. + type: dict + routes: + description: + - Routes to add to the connection. + type: list + elements: str + purge_routes: + description: + - Whether or not to delete VPN connections routes that are not specified in the task. + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 600 + type: int + required: false + delay: + description: + - The time to wait before checking operation again. in seconds. + required: false + type: int + default: 15 +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +- name: create a VPN connection + ec2_vpc_vpn: + state: present + vpn_gateway_id: vgw-XXXXXXXX + customer_gateway_id: cgw-XXXXXXXX + +- name: modify VPN connection tags + ec2_vpc_vpn: + state: present + vpn_connection_id: vpn-XXXXXXXX + tags: + Name: ansible-tag-1 + Other: ansible-tag-2 + +- name: delete a connection + ec2_vpc_vpn: + vpn_connection_id: vpn-XXXXXXXX + state: absent + +- name: modify VPN tags (identifying VPN by filters) + ec2_vpc_vpn: + state: present + filters: + cidr: 194.168.1.0/24 + tag-keys: + - Ansible + - Other + tags: + New: Tag + purge_tags: true + static_only: true + +- name: set up VPN with tunnel options utilizing 'TunnelInsideCidr' only + ec2_vpc_vpn: + state: present + filters: + vpn: vpn-XXXXXXXX + static_only: true + tunnel_options: + - + TunnelInsideCidr: '169.254.100.1/30' + - + TunnelInsideCidr: '169.254.100.5/30' + +- name: add routes and remove any preexisting ones + ec2_vpc_vpn: + state: present + filters: + vpn: vpn-XXXXXXXX + routes: + - 195.168.2.0/24 + - 196.168.2.0/24 + purge_routes: true + +- name: remove all routes + ec2_vpc_vpn: + state: present + vpn_connection_id: vpn-XXXXXXXX + routes: [] + purge_routes: true + +- name: delete a VPN identified by filters + ec2_vpc_vpn: + state: absent + filters: + tags: + Ansible: Tag +""" + +RETURN = """ +changed: + description: If the VPN connection has changed. + type: bool + returned: always + sample: + changed: true +customer_gateway_configuration: + description: The configuration of the VPN connection. + returned: I(state=present) + type: str +customer_gateway_id: + description: The customer gateway connected via the connection. + type: str + returned: I(state=present) + sample: + customer_gateway_id: cgw-1220c87b +vpn_gateway_id: + description: The virtual private gateway connected via the connection. + type: str + returned: I(state=present) + sample: + vpn_gateway_id: vgw-cb0ae2a2 +options: + description: The VPN connection options (currently only containing static_routes_only). + type: complex + returned: I(state=present) + contains: + static_routes_only: + description: If the VPN connection only allows static routes. + returned: I(state=present) + type: str + sample: + static_routes_only: true +routes: + description: The routes of the VPN connection. + type: list + returned: I(state=present) + sample: + routes: [{ + 'destination_cidr_block': '192.168.1.0/24', + 'state': 'available' + }] +state: + description: The status of the VPN connection. + type: str + returned: I(state=present) + sample: + state: available +tags: + description: The tags associated with the connection. + type: dict + returned: I(state=present) + sample: + tags: + name: ansible-test + other: tag +type: + description: The type of VPN connection (currently only ipsec.1 is available). + type: str + returned: I(state=present) + sample: + type: "ipsec.1" +vgw_telemetry: + type: list + returned: I(state=present) + description: The telemetry for the VPN tunnel. + sample: + vgw_telemetry: [{ + 'outside_ip_address': 'string', + 'status': 'up', + 'last_status_change': datetime(2015, 1, 1), + 'status_message': 'string', + 'accepted_route_count': 123 + }] +vpn_connection_id: + description: The identifier for the VPN connection. + type: str + returned: I(state=present) + sample: + vpn_connection_id: vpn-781e0e19 +""" + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils._text import to_text +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict, + compare_aws_tags, + ansible_dict_to_boto3_tag_list, +) + +try: + from botocore.exceptions import BotoCoreError, ClientError, WaiterError +except ImportError: + pass # Handled by AnsibleAWSModule + + +class VPNConnectionException(Exception): + def __init__(self, msg, exception=None): + self.msg = msg + self.exception = exception + + +def find_connection(connection, module_params, vpn_connection_id=None): + ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, + or raise an error if there were multiple viable connections. ''' + + filters = module_params.get('filters') + + # vpn_connection_id may be provided via module option; takes precedence over any filter values + if not vpn_connection_id and module_params.get('vpn_connection_id'): + vpn_connection_id = module_params.get('vpn_connection_id') + + if not isinstance(vpn_connection_id, list) and vpn_connection_id: + vpn_connection_id = [to_text(vpn_connection_id)] + elif isinstance(vpn_connection_id, list): + vpn_connection_id = [to_text(connection) for connection in vpn_connection_id] + + formatted_filter = [] + # if vpn_connection_id is provided it will take precedence over any filters since it is a unique identifier + if not vpn_connection_id: + formatted_filter = create_filter(module_params, provided_filters=filters) + + # see if there is a unique matching connection + try: + if vpn_connection_id: + existing_conn = connection.describe_vpn_connections(VpnConnectionIds=vpn_connection_id, + Filters=formatted_filter) + else: + existing_conn = connection.describe_vpn_connections(Filters=formatted_filter) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed while describing VPN connection.", + exception=e) + + return find_connection_response(connections=existing_conn) + + +def add_routes(connection, vpn_connection_id, routes_to_add): + for route in routes_to_add: + try: + connection.create_vpn_connection_route(VpnConnectionId=vpn_connection_id, + DestinationCidrBlock=route) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), + exception=e) + + +def remove_routes(connection, vpn_connection_id, routes_to_remove): + for route in routes_to_remove: + try: + connection.delete_vpn_connection_route(VpnConnectionId=vpn_connection_id, + DestinationCidrBlock=route) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), + exception=e) + + +def create_filter(module_params, provided_filters): + """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """ + boto3ify_filter = {'cgw-config': 'customer-gateway-configuration', + 'static-routes-only': 'option.static-routes-only', + 'cidr': 'route.destination-cidr-block', + 'bgp': 'bgp-asn', + 'vpn': 'vpn-connection-id', + 'vgw': 'vpn-gateway-id', + 'tag-keys': 'tag-key', + 'tag-values': 'tag-value', + 'tags': 'tag', + 'cgw': 'customer-gateway-id'} + + # unmodifiable options and their filter name counterpart + param_to_filter = {"customer_gateway_id": "customer-gateway-id", + "vpn_gateway_id": "vpn-gateway-id", + "vpn_connection_id": "vpn-connection-id"} + + flat_filter_dict = {} + formatted_filter = [] + + for raw_param in dict(provided_filters): + + # fix filter names to be recognized by boto3 + if raw_param in boto3ify_filter: + param = boto3ify_filter[raw_param] + provided_filters[param] = provided_filters.pop(raw_param) + elif raw_param in list(boto3ify_filter.items()): + param = raw_param + else: + raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param)) + + # reformat filters with special formats + if param == 'tag': + for key in provided_filters[param]: + formatted_key = 'tag:' + key + if isinstance(provided_filters[param][key], list): + flat_filter_dict[formatted_key] = str(provided_filters[param][key]) + else: + flat_filter_dict[formatted_key] = [str(provided_filters[param][key])] + elif param == 'option.static-routes-only': + flat_filter_dict[param] = [str(provided_filters[param]).lower()] + else: + if isinstance(provided_filters[param], list): + flat_filter_dict[param] = provided_filters[param] + else: + flat_filter_dict[param] = [str(provided_filters[param])] + + # if customer_gateway, vpn_gateway, or vpn_connection was specified in the task but not the filter, add it + for param in param_to_filter: + if param_to_filter[param] not in flat_filter_dict and module_params.get(param): + flat_filter_dict[param_to_filter[param]] = [module_params.get(param)] + + # change the flat dict into something boto3 will understand + formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()] + + return formatted_filter + + +def find_connection_response(connections=None): + """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, + returns None if the connection does not exist, raise an error if multiple matches are found. """ + + # Found no connections + if not connections or 'VpnConnections' not in connections: + return None + + # Too many results + elif connections and len(connections['VpnConnections']) > 1: + viable = [] + for each in connections['VpnConnections']: + # deleted connections are not modifiable + if each['State'] not in ("deleted", "deleting"): + viable.append(each) + if len(viable) == 1: + # Found one viable result; return unique match + return viable[0] + elif len(viable) == 0: + # Found a result but it was deleted already; since there was only one viable result create a new one + return None + else: + raise VPNConnectionException(msg="More than one matching VPN connection was found. " + "To modify or delete a VPN please specify vpn_connection_id or add filters.") + + # Found unique match + elif connections and len(connections['VpnConnections']) == 1: + # deleted connections are not modifiable + if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"): + return connections['VpnConnections'][0] + + +def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None): + """ Creates a VPN connection """ + + options = {'StaticRoutesOnly': static_only} + if tunnel_options and len(tunnel_options) <= 2: + t_opt = [] + for m in tunnel_options: + # See Boto3 docs regarding 'create_vpn_connection' + # tunnel options for allowed 'TunnelOptions' keys. + if not isinstance(m, dict): + raise TypeError("non-dict list member") + t_opt.append(m) + if t_opt: + options['TunnelOptions'] = t_opt + + if not (customer_gateway_id and vpn_gateway_id): + raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide " + "both vpn_gateway_id and customer_gateway_id.") + try: + vpn = connection.create_vpn_connection(Type=connection_type, + CustomerGatewayId=customer_gateway_id, + VpnGatewayId=vpn_gateway_id, + Options=options) + connection.get_waiter('vpn_connection_available').wait( + VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']], + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + ) + except WaiterError as e: + raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']), + exception=e) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed to create VPN connection", + exception=e) + + return vpn['VpnConnection'] + + +def delete_connection(connection, vpn_connection_id, delay, max_attempts): + """ Deletes a VPN connection """ + try: + connection.delete_vpn_connection(VpnConnectionId=vpn_connection_id) + connection.get_waiter('vpn_connection_deleted').wait( + VpnConnectionIds=[vpn_connection_id], + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + ) + except WaiterError as e: + raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), + exception=e) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), + exception=e) + + +def add_tags(connection, vpn_connection_id, add): + try: + connection.create_tags(Resources=[vpn_connection_id], + Tags=add) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), + exception=e) + + +def remove_tags(connection, vpn_connection_id, remove): + # format tags since they are a list in the format ['tag1', 'tag2', 'tag3'] + key_dict_list = [{'Key': tag} for tag in remove] + try: + connection.delete_tags(Resources=[vpn_connection_id], + Tags=key_dict_list) + except (BotoCoreError, ClientError) as e: + raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), + exception=e) + + +def check_for_update(connection, module_params, vpn_connection_id): + """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """ + tags = module_params.get('tags') + routes = module_params.get('routes') + purge_tags = module_params.get('purge_tags') + purge_routes = module_params.get('purge_routes') + + vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id) + current_attrs = camel_dict_to_snake_dict(vpn_connection) + + # Initialize changes dict + changes = {'tags_to_add': [], + 'tags_to_remove': [], + 'routes_to_add': [], + 'routes_to_remove': []} + + # Get changes to tags + current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value') + tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags) + changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) + # Get changes to routes + if 'Routes' in vpn_connection: + current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']] + if purge_routes: + changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes] + changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes] + + # Check if nonmodifiable attributes are attempted to be modified + for attribute in current_attrs: + if attribute in ("tags", "routes", "state"): + continue + elif attribute == 'options': + will_be = module_params.get('static_only', None) + is_now = bool(current_attrs[attribute]['static_routes_only']) + attribute = 'static_only' + elif attribute == 'type': + will_be = module_params.get("connection_type", None) + is_now = current_attrs[attribute] + else: + is_now = current_attrs[attribute] + will_be = module_params.get(attribute, None) + + if will_be is not None and to_text(will_be) != to_text(is_now): + raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " + "connection attributes are tags and routes. The value you tried to change it to " + "is {2}.".format(attribute, is_now, will_be)) + + return changes + + +def make_changes(connection, vpn_connection_id, changes): + """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', + the values of which are lists (generated by check_for_update()). + """ + changed = False + + if changes['tags_to_add']: + changed = True + add_tags(connection, vpn_connection_id, changes['tags_to_add']) + + if changes['tags_to_remove']: + changed = True + remove_tags(connection, vpn_connection_id, changes['tags_to_remove']) + + if changes['routes_to_add']: + changed = True + add_routes(connection, vpn_connection_id, changes['routes_to_add']) + + if changes['routes_to_remove']: + changed = True + remove_routes(connection, vpn_connection_id, changes['routes_to_remove']) + + return changed + + +def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None): + """ Returns the changes that would be made to a VPN Connection """ + state = module_params.get('state') + if state == 'absent': + if vpn_connection_id: + return True, {} + else: + return False, {} + + changed = False + results = {'customer_gateway_configuration': '', + 'customer_gateway_id': module_params.get('customer_gateway_id'), + 'vpn_gateway_id': module_params.get('vpn_gateway_id'), + 'options': {'static_routes_only': module_params.get('static_only')}, + 'routes': [module_params.get('routes')]} + + # get combined current tags and tags to set + present_tags = module_params.get('tags') + if current_state and 'Tags' in current_state: + current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags']) + if module_params.get('purge_tags'): + if current_tags != present_tags: + changed = True + elif current_tags != present_tags: + if not set(present_tags.keys()) < set(current_tags.keys()): + changed = True + # add preexisting tags that new tags didn't overwrite + present_tags.update((tag, current_tags[tag]) for tag in current_tags if tag not in present_tags) + elif current_tags.keys() == present_tags.keys() and set(present_tags.values()) != set(current_tags.values()): + changed = True + elif module_params.get('tags'): + changed = True + if present_tags: + results['tags'] = present_tags + + # get combined current routes and routes to add + present_routes = module_params.get('routes') + if current_state and 'Routes' in current_state: + current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']] + if module_params.get('purge_routes'): + if set(current_routes) != set(present_routes): + changed = True + elif set(present_routes) != set(current_routes): + if not set(present_routes) < set(current_routes): + changed = True + present_routes.extend([route for route in current_routes if route not in present_routes]) + elif module_params.get('routes'): + changed = True + results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] + + # return the vpn_connection_id if it's known + if vpn_connection_id: + results['vpn_connection_id'] = vpn_connection_id + else: + changed = True + results['vpn_connection_id'] = 'vpn-XXXXXXXX' + + return changed, results + + +def ensure_present(connection, module_params, check_mode=False): + """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """ + vpn_connection = find_connection(connection, module_params) + changed = False + delay = module_params.get('delay') + max_attempts = module_params.get('wait_timeout') // delay + + # No match but vpn_connection_id was specified. + if not vpn_connection and module_params.get('vpn_connection_id'): + raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?") + + # Unique match was found. Check if attributes provided differ. + elif vpn_connection: + vpn_connection_id = vpn_connection['VpnConnectionId'] + # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove + changes = check_for_update(connection, module_params, vpn_connection_id) + if check_mode: + return get_check_mode_results(connection, module_params, vpn_connection_id, current_state=vpn_connection) + changed = make_changes(connection, vpn_connection_id, changes) + + # No match was found. Create and tag a connection and add routes. + else: + changed = True + if check_mode: + return get_check_mode_results(connection, module_params) + vpn_connection = create_connection(connection, + customer_gateway_id=module_params.get('customer_gateway_id'), + static_only=module_params.get('static_only'), + vpn_gateway_id=module_params.get('vpn_gateway_id'), + connection_type=module_params.get('connection_type'), + tunnel_options=module_params.get('tunnel_options'), + max_attempts=max_attempts, + delay=delay) + changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId']) + make_changes(connection, vpn_connection['VpnConnectionId'], changes) + + # get latest version if a change has been made and make tags output nice before returning it + if vpn_connection: + vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId']) + if 'Tags' in vpn_connection: + vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags']) + + return changed, vpn_connection + + +def ensure_absent(connection, module_params, check_mode=False): + """ Deletes a VPN connection if it exists. """ + vpn_connection = find_connection(connection, module_params) + + if check_mode: + return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None) + + delay = module_params.get('delay') + max_attempts = module_params.get('wait_timeout') // delay + + if vpn_connection: + delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts) + changed = True + else: + changed = False + + return changed, {} + + +def main(): + argument_spec = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + filters=dict(type='dict', default={}), + vpn_gateway_id=dict(type='str'), + tags=dict(default={}, type='dict'), + connection_type=dict(default='ipsec.1', type='str'), + tunnel_options=dict(no_log=True, type='list', default=[]), + static_only=dict(default=False, type='bool'), + customer_gateway_id=dict(type='str'), + vpn_connection_id=dict(type='str'), + purge_tags=dict(type='bool', default=False), + routes=dict(type='list', default=[]), + purge_routes=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=600), + delay=dict(type='int', default=15), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) + connection = module.client('ec2') + + state = module.params.get('state') + parameters = dict(module.params) + + try: + if state == 'present': + changed, response = ensure_present(connection, parameters, module.check_mode) + elif state == 'absent': + changed, response = ensure_absent(connection, parameters, module.check_mode) + except VPNConnectionException as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.msg) + else: + module.fail_json(msg=e.msg) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + +if __name__ == '__main__': + main() diff --git a/ec2_vpc_vpn_facts.py b/ec2_vpc_vpn_facts.py new file mode 120000 index 00000000000..671a1a30341 --- /dev/null +++ b/ec2_vpc_vpn_facts.py @@ -0,0 +1 @@ +ec2_vpc_vpn_info.py \ No newline at end of file diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py new file mode 100644 index 00000000000..fa7a1ea7973 --- /dev/null +++ b/ec2_vpc_vpn_info.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_vpn_info +short_description: Gather information about VPN Connections in AWS. +description: + - Gather information about VPN Connections in AWS. + - This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: Madhura Naniwadekar (@Madhura-CSI) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters. + required: false + type: dict + vpn_connection_ids: + description: + - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list. + required: false + type: list + elements: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Gather information about all vpn connections + ec2_vpc_vpn_info: + +- name: Gather information about a filtered list of vpn connections, based on tags + ec2_vpc_vpn_info: + filters: + "tag:Name": test-connection + register: vpn_conn_info + +- name: Gather information about vpn connections by specifying connection IDs. + ec2_vpc_vpn_info: + filters: + vpn-gateway-id: vgw-cbe66beb + register: vpn_conn_info +''' + +RETURN = ''' +vpn_connections: + description: List of one or more VPN Connections. + returned: always + type: complex + contains: + category: + description: The category of the VPN connection. + returned: always + type: str + sample: VPN + customer_gatway_configuration: + description: The configuration information for the VPN connection's customer gateway (in the native XML format). + returned: always + type: str + customer_gateway_id: + description: The ID of the customer gateway at your end of the VPN connection. + returned: always + type: str + sample: cgw-17a53c37 + options: + description: The VPN connection options. + returned: always + type: dict + sample: { + "static_routes_only": false + } + routes: + description: List of static routes associated with the VPN connection. + returned: always + type: complex + contains: + destination_cidr_block: + description: The CIDR block associated with the local subnet of the customer data center. + returned: always + type: str + sample: 10.0.0.0/16 + state: + description: The current state of the static route. + returned: always + type: str + sample: available + state: + description: The current state of the VPN connection. + returned: always + type: str + sample: available + tags: + description: Any tags assigned to the VPN connection. + returned: always + type: dict + sample: { + "Name": "test-conn" + } + type: + description: The type of VPN connection. + returned: always + type: str + sample: ipsec.1 + vgw_telemetry: + description: Information about the VPN tunnel. + returned: always + type: complex + contains: + accepted_route_count: + description: The number of accepted routes. + returned: always + type: int + sample: 0 + last_status_change: + description: The date and time of the last change in status. + returned: always + type: str + sample: "2018-02-09T14:35:27+00:00" + outside_ip_address: + description: The Internet-routable IP address of the virtual private gateway's outside interface. + returned: always + type: str + sample: 13.127.79.191 + status: + description: The status of the VPN tunnel. + returned: always + type: str + sample: DOWN + status_message: + description: If an error occurs, a description of the error. + returned: always + type: str + sample: IPSEC IS DOWN + certificate_arn: + description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate. + returned: when a private certificate is used for authentication + type: str + sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example" + vpn_connection_id: + description: The ID of the VPN connection. + returned: always + type: str + sample: vpn-f700d5c0 + vpn_gateway_id: + description: The ID of the virtual private gateway at the AWS side of the VPN connection. + returned: always + type: str + sample: vgw-cbe56bfb +''' + +import json +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict + + +def date_handler(obj): + return obj.isoformat() if hasattr(obj, 'isoformat') else obj + + +def list_vpn_connections(connection, module): + params = dict() + + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params['VpnConnectionIds'] = module.params.get('vpn_connection_ids') + + try: + result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler)) + except ValueError as e: + module.fail_json_aws(e, msg="Cannot validate JSON data") + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Could not describe customer gateways") + snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']] + if snaked_vpn_connections: + for vpn_connection in snaked_vpn_connections: + vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', [])) + module.exit_json(changed=False, vpn_connections=snaked_vpn_connections) + + +def main(): + + argument_spec = dict( + vpn_connection_ids=dict(default=[], type='list'), + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[['vpn_connection_ids', 'filters']], + supports_check_mode=True) + if module._module._name == 'ec2_vpc_vpn_facts': + module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", version='2.13') + + connection = module.client('ec2') + + list_vpn_connections(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_win_password.py b/ec2_win_password.py new file mode 100644 index 00000000000..ccc9ea1fb90 --- /dev/null +++ b/ec2_win_password.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_win_password +short_description: Gets the default administrator password for ec2 windows instances +description: + - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). + - This module has a dependency on python-boto. +author: "Rick Mendes (@rickmendes)" +options: + instance_id: + description: + - The instance id to get the password data from. + required: true + type: str + key_file: + description: + - Path to the file containing the key pair used on the instance. + - Conflicts with I(key_data). + required: false + type: path + key_data: + description: + - The private key (usually stored in vault). + - Conflicts with I(key_file), + required: false + type: str + key_passphrase: + description: + - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to + convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3). + type: str + wait: + description: + - Whether or not to wait for the password to be available before returning. + type: bool + default: false + wait_timeout: + description: + - Number of seconds to wait before giving up. + default: 120 + type: int + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + + +requirements: + - cryptography + +notes: + - As of Ansible 2.4, this module requires the python cryptography module rather than the + older pycrypto module. +''' + +EXAMPLES = ''' +# Example of getting a password +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_test_key.pem" + +# Example of getting a password using a variable +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_data: "{{ ec2_private_key }}" + +# Example of getting a password with a password protected key +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_protected_test_key.pem" + key_passphrase: "secret" + +# Example of waiting for a password +- name: get the Administrator password + ec2_win_password: + profile: my-boto-profile + instance_id: i-XXXXXX + region: us-east-1 + key_file: "~/aws-creds/my_test_key.pem" + wait: yes + wait_timeout: 45 +''' + +import datetime +import time +from base64 import b64decode + +try: + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 + from cryptography.hazmat.primitives.serialization import load_pem_private_key + HAS_CRYPTOGRAPHY = True +except ImportError: + HAS_CRYPTOGRAPHY = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect +from ansible.module_utils._text import to_bytes + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + instance_id=dict(required=True), + key_file=dict(required=False, default=None, type='path'), + key_passphrase=dict(no_log=True, default=None, required=False), + key_data=dict(no_log=True, default=None, required=False), + wait=dict(type='bool', default=False, required=False), + wait_timeout=dict(default=120, required=False, type='int'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='Boto required for this module.') + + if not HAS_CRYPTOGRAPHY: + module.fail_json(msg='cryptography package required for this module.') + + instance_id = module.params.get('instance_id') + key_file = module.params.get('key_file') + key_data = module.params.get('key_data') + if module.params.get('key_passphrase') is None: + b_key_passphrase = None + else: + b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + ec2 = ec2_connect(module) + + if wait: + start = datetime.datetime.now() + end = start + datetime.timedelta(seconds=wait_timeout) + + while datetime.datetime.now() < end: + data = ec2.get_password_data(instance_id) + decoded = b64decode(data) + if not decoded: + time.sleep(5) + else: + break + else: + data = ec2.get_password_data(instance_id) + decoded = b64decode(data) + + if wait and datetime.datetime.now() >= end: + module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout) + + if key_file is not None and key_data is None: + try: + with open(key_file, 'rb') as f: + key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) + except IOError as e: + # Handle bad files + module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror)) + except (ValueError, TypeError) as e: + # Handle issues loading key + module.fail_json(msg="unable to parse key file") + elif key_data is not None and key_file is None: + try: + key = load_pem_private_key(key_data, b_key_passphrase, default_backend()) + except (ValueError, TypeError) as e: + module.fail_json(msg="unable to parse key data") + + try: + decrypted = key.decrypt(decoded, PKCS1v15()) + except ValueError as e: + decrypted = None + + if decrypted is None: + module.exit_json(win_password='', changed=False) + else: + if wait: + elapsed = datetime.datetime.now() - start + module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds) + else: + module.exit_json(win_password=decrypted, changed=True) + + +if __name__ == '__main__': + main() diff --git a/ecs_attribute.py b/ecs_attribute.py new file mode 100644 index 00000000000..7bec7343b64 --- /dev/null +++ b/ecs_attribute.py @@ -0,0 +1,311 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ecs_attribute +short_description: manage ecs attributes +description: + - Create, update or delete ECS container instance attributes. +author: Andrej Svenke (@anryko) +requirements: [ botocore, boto3 ] +options: + cluster: + description: + - The short name or full Amazon Resource Name (ARN) of the cluster + that contains the resource to apply attributes. + required: true + type: str + state: + description: + - The desired state of the attributes. + required: false + default: present + choices: ['present', 'absent'] + type: str + attributes: + description: + - List of attributes. + required: true + type: list + elements: dict + suboptions: + name: + description: + - The name of the attribute. Up to 128 letters (uppercase and lowercase), + numbers, hyphens, underscores, and periods are allowed. + required: true + type: str + value: + description: + - The value of the attribute. Up to 128 letters (uppercase and lowercase), + numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, + and spaces are allowed. + required: false + type: str + ec2_instance_id: + description: + - EC2 instance ID of ECS cluster container instance. + required: true + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Set attributes +- ecs_attribute: + state: present + cluster: test-cluster + ec2_instance_id: "{{ ec2_id }}" + attributes: + - flavor: test + - migrated + delegate_to: localhost + +# Delete attributes +- ecs_attribute: + state: absent + cluster: test-cluster + ec2_instance_id: "{{ ec2_id }}" + attributes: + - flavor: test + - migrated + delegate_to: localhost +''' + +RETURN = ''' +attributes: + description: attributes + type: complex + returned: always + contains: + cluster: + description: cluster name + type: str + ec2_instance_id: + description: ec2 instance id of ecs container instance + type: str + attributes: + description: list of attributes + type: list + elements: dict + contains: + name: + description: name of the attribute + type: str + value: + description: value of the attribute + returned: if present + type: str +''' + +try: + import boto3 + from botocore.exceptions import ClientError, EndpointConnectionError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsAttributes(object): + """Handles ECS Cluster Attribute""" + + def __init__(self, module, attributes): + self.module = module + self.attributes = attributes if self._validate_attrs(attributes) else self._parse_attrs(attributes) + + def __bool__(self): + return bool(self.attributes) + + __nonzero__ = __bool__ + + def __iter__(self): + return iter(self.attributes) + + @staticmethod + def _validate_attrs(attrs): + return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs) + + def _parse_attrs(self, attrs): + attrs_parsed = [] + for attr in attrs: + if isinstance(attr, dict): + if len(attr) != 1: + self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr)) + name, value = list(attr.items())[0] + attrs_parsed.append({'name': name, 'value': value}) + elif isinstance(attr, str): + attrs_parsed.append({'name': attr, 'value': None}) + else: + self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs)) + + return attrs_parsed + + def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False): + attr_obj = {'targetType': 'container-instance', + 'targetId': ecs_arn, + 'name': name} + if not skip_value and value is not None: + attr_obj['value'] = value + + return attr_obj + + def get_for_ecs_arn(self, ecs_arn, skip_value=False): + """ + Returns list of attribute dicts ready to be passed to boto3 + attributes put/delete methods. + """ + return [self._setup_attr_obj(ecs_arn, skip_value=skip_value, **attr) for attr in self.attributes] + + def diff(self, attrs): + """ + Returns EcsAttributes Object containing attributes which are present + in self but are absent in passed attrs (EcsAttributes Object). + """ + attrs_diff = [attr for attr in self.attributes if attr not in attrs] + return EcsAttributes(self.module, attrs_diff) + + +class Ec2EcsInstance(object): + """Handle ECS Cluster Remote Operations""" + + def __init__(self, module, cluster, ec2_id): + self.module = module + self.cluster = cluster + self.ec2_id = ec2_id + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg=("Region must be specified as a parameter," + " in EC2_REGION or AWS_REGION environment" + " variables or in boto configuration file")) + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + + self.ecs_arn = self._get_ecs_arn() + + def _get_ecs_arn(self): + try: + ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns'] + ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster, + containerInstances=ecs_instances_arns)['containerInstances'] + except (ClientError, EndpointConnectionError) as e: + self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + + try: + ecs_arn = next(inst for inst in ec2_instances + if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn'] + except StopIteration: + self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster)) + + return ecs_arn + + def attrs_put(self, attrs): + """Puts attributes on ECS container instance""" + try: + self.ecs.put_attributes(cluster=self.cluster, + attributes=attrs.get_for_ecs_arn(self.ecs_arn)) + except ClientError as e: + self.module.fail_json(msg=str(e)) + + def attrs_delete(self, attrs): + """Deletes attributes from ECS container instance.""" + try: + self.ecs.delete_attributes(cluster=self.cluster, + attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True)) + except ClientError as e: + self.module.fail_json(msg=str(e)) + + def attrs_get_by_name(self, attrs): + """ + Returns EcsAttributes object containing attributes from ECS container instance with names + matching to attrs.attributes (EcsAttributes Object). + """ + attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']} + for attr in attrs] + + try: + matched_ecs_targets = [attr_found for attr_obj in attr_objs + for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']] + except ClientError as e: + self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + + matched_objs = [target for target in matched_ecs_targets + if target['targetId'] == self.ecs_arn] + + results = [{'name': match['name'], 'value': match.get('value', None)} + for match in matched_objs] + + return EcsAttributes(self.module, results) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + cluster=dict(required=True, type='str'), + ec2_instance_id=dict(required=True, type='str'), + attributes=dict(required=True, type='list'), + )) + + required_together = [['cluster', 'ec2_instance_id', 'attributes']] + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, + required_together=required_together) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + cluster = module.params['cluster'] + ec2_instance_id = module.params['ec2_instance_id'] + attributes = module.params['attributes'] + + conti = Ec2EcsInstance(module, cluster, ec2_instance_id) + attrs = EcsAttributes(module, attributes) + + results = {'changed': False, + 'attributes': [ + {'cluster': cluster, + 'ec2_instance_id': ec2_instance_id, + 'attributes': attributes} + ]} + + attrs_present = conti.attrs_get_by_name(attrs) + + if module.params['state'] == 'present': + attrs_diff = attrs.diff(attrs_present) + if not attrs_diff: + module.exit_json(**results) + + conti.attrs_put(attrs_diff) + results['changed'] = True + + elif module.params['state'] == 'absent': + if not attrs_present: + module.exit_json(**results) + + conti.attrs_delete(attrs_present) + results['changed'] = True + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ecs_cluster.py b/ecs_cluster.py new file mode 100644 index 00000000000..25937369ec9 --- /dev/null +++ b/ecs_cluster.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ecs_cluster +short_description: Create or terminate ECS clusters. +notes: + - When deleting a cluster, the information returned is the state of the cluster prior to deletion. + - It will also wait for a cluster to have instances registered to it. +description: + - Creates or terminates ecs clusters. +author: Mark Chance (@Java1Guy) +requirements: [ boto3 ] +options: + state: + description: + - The desired state of the cluster. + required: true + choices: ['present', 'absent', 'has_instances'] + type: str + name: + description: + - The cluster name. + required: true + type: str + delay: + description: + - Number of seconds to wait. + required: false + type: int + default: 10 + repeat: + description: + - The number of times to wait for the cluster to have an instance. + required: false + type: int + default: 10 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Cluster creation +- ecs_cluster: + name: default + state: present + +# Cluster deletion +- ecs_cluster: + name: default + state: absent + +- name: Wait for register + ecs_cluster: + name: "{{ new_cluster }}" + state: has_instances + delay: 10 + repeat: 10 + register: task_output + +''' +RETURN = ''' +activeServicesCount: + description: how many services are active in this cluster + returned: 0 if a new cluster + type: int +clusterArn: + description: the ARN of the cluster just created + type: str + returned: 0 if a new cluster + sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok +clusterName: + description: name of the cluster just created (should match the input argument) + type: str + returned: always + sample: test-cluster-mfshcdok +pendingTasksCount: + description: how many tasks are waiting to run in this cluster + returned: 0 if a new cluster + type: int +registeredContainerInstancesCount: + description: how many container instances are available in this cluster + returned: 0 if a new cluster + type: int +runningTasksCount: + description: how many tasks are running in this cluster + returned: 0 if a new cluster + type: int +status: + description: the status of the new cluster + returned: always + type: str + sample: ACTIVE +''' +import time + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsClusterManager: + """Handles ECS Clusters""" + + def __init__(self, module): + self.module = module + + # self.ecs = boto3.client('ecs') + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + + def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'): + for c in array_of_clusters: + if c[field_name].endswith(cluster_name): + return c + return None + + def describe_cluster(self, cluster_name): + response = self.ecs.describe_clusters(clusters=[ + cluster_name + ]) + if len(response['failures']) > 0: + c = self.find_in_array(response['failures'], cluster_name, 'arn') + if c and c['reason'] == 'MISSING': + return None + # fall thru and look through found ones + if len(response['clusters']) > 0: + c = self.find_in_array(response['clusters'], cluster_name) + if c: + return c + raise Exception("Unknown problem describing cluster %s." % cluster_name) + + def create_cluster(self, clusterName='default'): + response = self.ecs.create_cluster(clusterName=clusterName) + return response['cluster'] + + def delete_cluster(self, clusterName): + return self.ecs.delete_cluster(cluster=clusterName) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent', 'has_instances']), + name=dict(required=True, type='str'), + delay=dict(required=False, type='int', default=10), + repeat=dict(required=False, type='int', default=10) + )) + required_together = [['state', 'name']] + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + cluster_mgr = EcsClusterManager(module) + try: + existing = cluster_mgr.describe_cluster(module.params['name']) + except Exception as e: + module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e)) + + results = dict(changed=False) + if module.params['state'] == 'present': + if existing and 'status' in existing and existing['status'] == "ACTIVE": + results['cluster'] = existing + else: + if not module.check_mode: + # doesn't exist. create it. + results['cluster'] = cluster_mgr.create_cluster(module.params['name']) + results['changed'] = True + + # delete the cluster + elif module.params['state'] == 'absent': + if not existing: + pass + else: + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + results['cluster'] = existing + if 'status' in existing and existing['status'] == "INACTIVE": + results['changed'] = False + else: + if not module.check_mode: + cluster_mgr.delete_cluster(module.params['name']) + results['changed'] = True + elif module.params['state'] == 'has_instances': + if not existing: + module.fail_json(msg="Cluster '" + module.params['name'] + " not found.") + return + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + delay = module.params['delay'] + repeat = module.params['repeat'] + time.sleep(delay) + count = 0 + for i in range(repeat): + existing = cluster_mgr.describe_cluster(module.params['name']) + count = existing['registeredContainerInstancesCount'] + if count > 0: + results['changed'] = True + break + time.sleep(delay) + if count == 0 and i is repeat - 1: + module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.") + return + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ecs_ecr.py b/ecs_ecr.py new file mode 100644 index 00000000000..7989d404dd2 --- /dev/null +++ b/ecs_ecr.py @@ -0,0 +1,528 @@ +#!/usr/bin/python +# -*- coding: utf-8 -* + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ecs_ecr +short_description: Manage Elastic Container Registry repositories +description: + - Manage Elastic Container Registry repositories. +requirements: [ boto3 ] +options: + name: + description: + - The name of the repository. + required: true + type: str + registry_id: + description: + - AWS account id associated with the registry. + - If not specified, the default registry is assumed. + required: false + type: str + policy: + description: + - JSON or dict that represents the new policy. + required: false + type: json + force_set_policy: + description: + - If I(force_set_policy=false), it prevents setting a policy that would prevent you from + setting another policy in the future. + required: false + default: false + type: bool + purge_policy: + description: + - If yes, remove the policy from the repository. + - Alias C(delete_policy) has been deprecated and will be removed in Ansible 2.14 + required: false + default: false + type: bool + aliases: [ delete_policy ] + image_tag_mutability: + description: + - Configure whether repository should be mutable (ie. an already existing tag can be overwritten) or not. + required: false + choices: [mutable, immutable] + default: 'mutable' + type: str + lifecycle_policy: + description: + - JSON or dict that represents the new lifecycle policy + required: false + type: json + purge_lifecycle_policy: + description: + - if yes, remove the lifecycle policy from the repository + required: false + default: false + type: bool + state: + description: + - Create or destroy the repository. + required: false + choices: [present, absent] + default: 'present' + type: str +author: + - David M. Lee (@leedm777) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# If the repository does not exist, it is created. If it does exist, would not +# affect any policies already on it. +- name: ecr-repo + ecs_ecr: name=super/cool + +- name: destroy-ecr-repo + ecs_ecr: name=old/busted state=absent + +- name: Cross account ecr-repo + ecs_ecr: registry_id=999999999999 name=cross/account + +- name: set-policy as object + ecs_ecr: + name: needs-policy-object + policy: + Version: '2008-10-17' + Statement: + - Sid: read-only + Effect: Allow + Principal: + AWS: '{{ read_only_arn }}' + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + +- name: set-policy as string + ecs_ecr: + name: needs-policy-string + policy: "{{ lookup('template', 'policy.json.j2') }}" + +- name: delete-policy + ecs_ecr: + name: needs-no-policy + purge_policy: yes + +- name: create immutable ecr-repo + ecs_ecr: + name: super/cool + image_tag_mutability: immutable + +- name: set-lifecycle-policy + ecs_ecr: + name: needs-lifecycle-policy + lifecycle_policy: + rules: + - rulePriority: 1 + description: new policy + selection: + tagStatus: untagged + countType: sinceImagePushed + countUnit: days + countNumber: 365 + action: + type: expire + +- name: purge-lifecycle-policy + ecs_ecr: + name: needs-no-lifecycle-policy + purge_lifecycle_policy: true +''' + +RETURN = ''' +state: + type: str + description: The asserted state of the repository (present, absent) + returned: always +created: + type: bool + description: If true, the repository was created + returned: always +name: + type: str + description: The name of the repository + returned: "when state == 'absent'" +repository: + type: dict + description: The created or updated repository + returned: "when state == 'present'" + sample: + createdAt: '2017-01-17T08:41:32-06:00' + registryId: '999999999999' + repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090 + repositoryName: ecr-test-1484664090 + repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090 +''' + +import json +import traceback + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict +from ansible.module_utils.six import string_types + + +def build_kwargs(registry_id): + """ + Builds a kwargs dict which may contain the optional registryId. + + :param registry_id: Optional string containing the registryId. + :return: kwargs dict with registryId, if given + """ + if not registry_id: + return dict() + else: + return dict(registryId=registry_id) + + +class EcsEcr: + def __init__(self, module): + self.ecr = module.client('ecr') + self.sts = module.client('sts') + self.check_mode = module.check_mode + self.changed = False + self.skipped = False + + def get_repository(self, registry_id, name): + try: + res = self.ecr.describe_repositories( + repositoryNames=[name], **build_kwargs(registry_id)) + repos = res.get('repositories') + return repos and repos[0] + except ClientError as err: + code = err.response['Error'].get('Code', 'Unknown') + if code == 'RepositoryNotFoundException': + return None + raise + + def get_repository_policy(self, registry_id, name): + try: + res = self.ecr.get_repository_policy( + repositoryName=name, **build_kwargs(registry_id)) + text = res.get('policyText') + return text and json.loads(text) + except ClientError as err: + code = err.response['Error'].get('Code', 'Unknown') + if code == 'RepositoryPolicyNotFoundException': + return None + raise + + def create_repository(self, registry_id, name, image_tag_mutability): + if registry_id: + default_registry_id = self.sts.get_caller_identity().get('Account') + if registry_id != default_registry_id: + raise Exception('Cannot create repository in registry {0}.' + 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + + if not self.check_mode: + repo = self.ecr.create_repository( + repositoryName=name, + imageTagMutability=image_tag_mutability).get('repository') + self.changed = True + return repo + else: + self.skipped = True + return dict(repositoryName=name) + + def set_repository_policy(self, registry_id, name, policy_text, force): + if not self.check_mode: + policy = self.ecr.set_repository_policy( + repositoryName=name, + policyText=policy_text, + force=force, + **build_kwargs(registry_id)) + self.changed = True + return policy + else: + self.skipped = True + if self.get_repository(registry_id, name) is None: + printable = name + if registry_id: + printable = '{0}:{1}'.format(registry_id, name) + raise Exception( + 'could not find repository {0}'.format(printable)) + return + + def delete_repository(self, registry_id, name): + if not self.check_mode: + repo = self.ecr.delete_repository( + repositoryName=name, **build_kwargs(registry_id)) + self.changed = True + return repo + else: + repo = self.get_repository(registry_id, name) + if repo: + self.skipped = True + return repo + return None + + def delete_repository_policy(self, registry_id, name): + if not self.check_mode: + policy = self.ecr.delete_repository_policy( + repositoryName=name, **build_kwargs(registry_id)) + self.changed = True + return policy + else: + policy = self.get_repository_policy(registry_id, name) + if policy: + self.skipped = True + return policy + return None + + def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration): + repo = self.get_repository(registry_id, name) + current_mutability_configuration = repo.get('imageTagMutability') + + if current_mutability_configuration != new_mutability_configuration: + if not self.check_mode: + self.ecr.put_image_tag_mutability( + repositoryName=name, + imageTagMutability=new_mutability_configuration, + **build_kwargs(registry_id)) + else: + self.skipped = True + self.changed = True + + repo['imageTagMutability'] = new_mutability_configuration + return repo + + def get_lifecycle_policy(self, registry_id, name): + try: + res = self.ecr.get_lifecycle_policy( + repositoryName=name, **build_kwargs(registry_id)) + text = res.get('lifecyclePolicyText') + return text and json.loads(text) + except ClientError as err: + code = err.response['Error'].get('Code', 'Unknown') + if code == 'LifecyclePolicyNotFoundException': + return None + raise + + def put_lifecycle_policy(self, registry_id, name, policy_text): + if not self.check_mode: + policy = self.ecr.put_lifecycle_policy( + repositoryName=name, + lifecyclePolicyText=policy_text, + **build_kwargs(registry_id)) + self.changed = True + return policy + else: + self.skipped = True + if self.get_repository(registry_id, name) is None: + printable = name + if registry_id: + printable = '{0}:{1}'.format(registry_id, name) + raise Exception( + 'could not find repository {0}'.format(printable)) + return + + def purge_lifecycle_policy(self, registry_id, name): + if not self.check_mode: + policy = self.ecr.delete_lifecycle_policy( + repositoryName=name, **build_kwargs(registry_id)) + self.changed = True + return policy + else: + policy = self.get_lifecycle_policy(registry_id, name) + if policy: + self.skipped = True + return policy + return None + + +def sort_lists_of_strings(policy): + for statement_index in range(0, len(policy.get('Statement', []))): + for key in policy['Statement'][statement_index]: + value = policy['Statement'][statement_index][key] + if isinstance(value, list) and all(isinstance(item, string_types) for item in value): + policy['Statement'][statement_index][key] = sorted(value) + return policy + + +def run(ecr, params): + # type: (EcsEcr, dict, int) -> Tuple[bool, dict] + result = {} + try: + name = params['name'] + state = params['state'] + policy_text = params['policy'] + purge_policy = params['purge_policy'] + registry_id = params['registry_id'] + force_set_policy = params['force_set_policy'] + image_tag_mutability = params['image_tag_mutability'].upper() + lifecycle_policy_text = params['lifecycle_policy'] + purge_lifecycle_policy = params['purge_lifecycle_policy'] + + # Parse policies, if they are given + try: + policy = policy_text and json.loads(policy_text) + except ValueError: + result['policy'] = policy_text + result['msg'] = 'Could not parse policy' + return False, result + + try: + lifecycle_policy = \ + lifecycle_policy_text and json.loads(lifecycle_policy_text) + except ValueError: + result['lifecycle_policy'] = lifecycle_policy_text + result['msg'] = 'Could not parse lifecycle_policy' + return False, result + + result['state'] = state + result['created'] = False + + repo = ecr.get_repository(registry_id, name) + + if state == 'present': + result['created'] = False + + if not repo: + repo = ecr.create_repository(registry_id, name, image_tag_mutability) + result['changed'] = True + result['created'] = True + else: + repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) + result['repository'] = repo + + if purge_lifecycle_policy: + original_lifecycle_policy = \ + ecr.get_lifecycle_policy(registry_id, name) + + result['lifecycle_policy'] = None + + if original_lifecycle_policy: + ecr.purge_lifecycle_policy(registry_id, name) + result['changed'] = True + + elif lifecycle_policy_text is not None: + try: + lifecycle_policy = sort_json_policy_dict(lifecycle_policy) + result['lifecycle_policy'] = lifecycle_policy + + original_lifecycle_policy = ecr.get_lifecycle_policy( + registry_id, name) + + if original_lifecycle_policy: + original_lifecycle_policy = sort_json_policy_dict( + original_lifecycle_policy) + + if original_lifecycle_policy != lifecycle_policy: + ecr.put_lifecycle_policy(registry_id, name, + lifecycle_policy_text) + result['changed'] = True + except Exception: + # Some failure w/ the policy. It's helpful to know what the + # policy is. + result['lifecycle_policy'] = lifecycle_policy_text + raise + + if purge_policy: + original_policy = ecr.get_repository_policy(registry_id, name) + + result['policy'] = None + + if original_policy: + ecr.delete_repository_policy(registry_id, name) + result['changed'] = True + + elif policy_text is not None: + try: + # Sort any lists containing only string types + policy = sort_lists_of_strings(policy) + + result['policy'] = policy + + original_policy = ecr.get_repository_policy( + registry_id, name) + if original_policy: + original_policy = sort_lists_of_strings(original_policy) + + if compare_policies(original_policy, policy): + ecr.set_repository_policy( + registry_id, name, policy_text, force_set_policy) + result['changed'] = True + except Exception: + # Some failure w/ the policy. It's helpful to know what the + # policy is. + result['policy'] = policy_text + raise + + elif state == 'absent': + result['name'] = name + if repo: + ecr.delete_repository(registry_id, name) + result['changed'] = True + + except Exception as err: + msg = str(err) + if isinstance(err, ClientError): + msg = boto_exception(err) + result['msg'] = msg + result['exception'] = traceback.format_exc() + return False, result + + if ecr.skipped: + result['skipped'] = True + + if ecr.changed: + result['changed'] = True + + return True, result + + +def main(): + argument_spec = dict( + name=dict(required=True), + registry_id=dict(required=False), + state=dict(required=False, choices=['present', 'absent'], + default='present'), + force_set_policy=dict(required=False, type='bool', default=False), + policy=dict(required=False, type='json'), + image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], + default='mutable'), + purge_policy=dict(required=False, type='bool', aliases=['delete_policy'], + deprecated_aliases=[dict(name='delete_policy', version='2.14')]), + lifecycle_policy=dict(required=False, type='json'), + purge_lifecycle_policy=dict(required=False, type='bool') + ) + mutually_exclusive = [ + ['policy', 'purge_policy'], + ['lifecycle_policy', 'purge_lifecycle_policy']] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + + ecr = EcsEcr(module) + passed, result = run(ecr, module.params) + + if passed: + module.exit_json(**result) + else: + module.fail_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ecs_service.py b/ecs_service.py new file mode 100644 index 00000000000..23b1fcfea95 --- /dev/null +++ b/ecs_service.py @@ -0,0 +1,839 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ecs_service +short_description: Create, terminate, start or stop a service in ECS +description: + - Creates or terminates ECS. services. +notes: + - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com) + - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html). + - An IAM role must have been previously created. +author: + - "Mark Chance (@Java1Guy)" + - "Darek Kaczynski (@kaczynskid)" + - "Stephane Maarek (@simplesteph)" + - "Zac Blazic (@zacblazic)" + +requirements: [ json, botocore, boto3 ] +options: + state: + description: + - The desired state of the service. + required: true + choices: ["present", "absent", "deleting"] + type: str + name: + description: + - The name of the service. + required: true + type: str + cluster: + description: + - The name of the cluster in which the service exists. + required: false + type: str + task_definition: + description: + - The task definition the service will run. + - This parameter is required when I(state=present). + required: false + type: str + load_balancers: + description: + - The list of ELBs defined for this service. + required: false + type: list + elements: str + desired_count: + description: + - The count of how many instances of the service. + - This parameter is required when I(state=present). + required: false + type: int + client_token: + description: + - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed. + required: false + type: str + role: + description: + - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer + on your behalf. + - This parameter is only required if you are using a load balancer with your service in a network mode other than C(awsvpc). + required: false + type: str + delay: + description: + - The time to wait before checking that the service is available. + required: false + default: 10 + type: int + repeat: + description: + - The number of times to check that the service is available. + required: false + default: 10 + type: int + force_new_deployment: + description: + - Force deployment of service even if there are no changes. + required: false + type: bool + deployment_configuration: + description: + - Optional parameters that control the deployment_configuration. + - Format is '{"maximum_percent":, "minimum_healthy_percent":} + required: false + type: dict + suboptions: + maximum_percent: + type: int + description: Upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. + minimum_healthy_percent: + type: int + description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. + placement_constraints: + description: + - The placement constraints for the tasks in the service. + required: false + type: list + elements: dict + suboptions: + placement_strategy: + description: + - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service. + required: false + type: list + elements: dict + suboptions: + type: + description: The type of placement strategy. + type: str + field: + description: The field to apply the placement strategy against. + type: str + network_configuration: + description: + - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). + - I(assign_public_ip) requires botocore >= 1.8.4 + type: dict + suboptions: + subnets: + description: + - A list of subnet IDs to associate with the task + type: list + elements: str + security_groups: + description: + - A list of security group names or group IDs to associate with the task + type: list + elements: str + assign_public_ip: + description: + - Whether the task's elastic network interface receives a public IP address. + - This option requires botocore >= 1.8.4. + type: bool + launch_type: + description: + - The launch type on which to run your service. + required: false + choices: ["EC2", "FARGATE"] + type: str + health_check_grace_period_seconds: + description: + - Seconds to wait before health checking the freshly added/updated services. + - This option requires botocore >= 1.8.20. + required: false + type: int + service_registries: + description: + - Describes service discovery registries this service will register with. + type: list + elements: dict + required: false + suboptions: + container_name: + description: + - container name for service discovery registration + type: str + container_port: + description: + - container port for service discovery registration + type: int + arn: + description: + - Service discovery registry ARN + type: str + scheduling_strategy: + description: + - The scheduling strategy, defaults to "REPLICA" if not given to preserve previous behavior + required: false + choices: ["DAEMON", "REPLICA"] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic provisioning example +- ecs_service: + state: present + name: console-test-service + cluster: new_cluster + task_definition: 'new_cluster-task:1' + desired_count: 0 + +- name: create ECS service on VPC network + ecs_service: + state: present + name: console-test-service + cluster: new_cluster + task_definition: 'new_cluster-task:1' + desired_count: 0 + network_configuration: + subnets: + - subnet-abcd1234 + security_groups: + - sg-aaaa1111 + - my_security_group + +# Simple example to delete +- ecs_service: + name: default + state: absent + cluster: new_cluster + +# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4) +- ecs_service: + state: present + name: test-service + cluster: test-cluster + task_definition: test-task-definition + desired_count: 3 + deployment_configuration: + minimum_healthy_percent: 75 + maximum_percent: 150 + placement_constraints: + - type: memberOf + expression: 'attribute:flavor==test' + placement_strategy: + - type: binpack + field: memory +''' + +RETURN = ''' +service: + description: Details of created service. + returned: when creating a service + type: complex + contains: + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + returned: always + type: str + desiredCount: + description: The desired number of instantiations of the task definition to keep running on the service. + returned: always + type: int + loadBalancers: + description: A list of load balancer objects + returned: always + type: complex + contains: + loadBalancerName: + description: the name + returned: always + type: str + containerName: + description: The name of the container to associate with the load balancer. + returned: always + type: str + containerPort: + description: The port on the container to associate with the load balancer. + returned: always + type: int + pendingCount: + description: The number of tasks in the cluster that are in the PENDING state. + returned: always + type: int + runningCount: + description: The number of tasks in the cluster that are in the RUNNING state. + returned: always + type: int + serviceArn: + description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region + of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, + arn:aws:ecs:region :012345678910 :service/my-service . + returned: always + type: str + serviceName: + description: A user-generated string used to identify the service + returned: always + type: str + status: + description: The valid values are ACTIVE, DRAINING, or INACTIVE. + returned: always + type: str + taskDefinition: + description: The ARN of a task definition to use for tasks in the service. + returned: always + type: str + deployments: + description: list of service deployments + returned: always + type: list + elements: dict + deploymentConfiguration: + description: dictionary of deploymentConfiguration + returned: always + type: complex + contains: + maximumPercent: + description: maximumPercent param + returned: always + type: int + minimumHealthyPercent: + description: minimumHealthyPercent param + returned: always + type: int + events: + description: list of service events + returned: always + type: list + elements: dict + placementConstraints: + description: List of placement constraints objects + returned: always + type: list + elements: dict + contains: + type: + description: The type of constraint. Valid values are distinctInstance and memberOf. + returned: always + type: str + expression: + description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is + distinctInstance. + returned: always + type: str + placementStrategy: + description: List of placement strategy objects + returned: always + type: list + elements: dict + contains: + type: + description: The type of placement strategy. Valid values are random, spread and binpack. + returned: always + type: str + field: + description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId + (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, + such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY. + returned: always + type: str + +ansible_facts: + description: Facts about deleted service. + returned: when deleting a service + type: complex + contains: + service: + description: Details of deleted service. + returned: when service existed and was deleted + type: complex + contains: + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + returned: always + type: str + desiredCount: + description: The desired number of instantiations of the task definition to keep running on the service. + returned: always + type: int + loadBalancers: + description: A list of load balancer objects + returned: always + type: complex + contains: + loadBalancerName: + description: the name + returned: always + type: str + containerName: + description: The name of the container to associate with the load balancer. + returned: always + type: str + containerPort: + description: The port on the container to associate with the load balancer. + returned: always + type: int + pendingCount: + description: The number of tasks in the cluster that are in the PENDING state. + returned: always + type: int + runningCount: + description: The number of tasks in the cluster that are in the RUNNING state. + returned: always + type: int + serviceArn: + description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region + of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, + arn:aws:ecs:region :012345678910 :service/my-service . + returned: always + type: str + serviceName: + description: A user-generated string used to identify the service + returned: always + type: str + status: + description: The valid values are ACTIVE, DRAINING, or INACTIVE. + returned: always + type: str + taskDefinition: + description: The ARN of a task definition to use for tasks in the service. + returned: always + type: str + deployments: + description: list of service deployments + returned: always + type: list + elements: dict + deploymentConfiguration: + description: dictionary of deploymentConfiguration + returned: always + type: complex + contains: + maximumPercent: + description: maximumPercent param + returned: always + type: int + minimumHealthyPercent: + description: minimumHealthyPercent param + returned: always + type: int + events: + description: list of service events + returned: always + type: list + elements: dict + placementConstraints: + description: List of placement constraints objects + returned: always + type: list + elements: dict + contains: + type: + description: The type of constraint. Valid values are distinctInstance and memberOf. + returned: always + type: str + expression: + description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if + the constraint type is distinctInstance. + returned: always + type: str + placementStrategy: + description: List of placement strategy objects + returned: always + type: list + elements: dict + contains: + type: + description: The type of placement strategy. Valid values are random, spread and binpack. + returned: always + type: str + field: + description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId + (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, + such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY. + returned: always + type: str +''' +import time + +DEPLOYMENT_CONFIGURATION_TYPE_MAP = { + 'maximum_percent': 'int', + 'minimum_healthy_percent': 'int' +} + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +class EcsServiceManager: + """Handles ECS Services""" + + def __init__(self, module): + self.module = module + self.ecs = module.client('ecs') + self.ec2 = module.client('ec2') + + def format_network_configuration(self, network_config): + result = dict() + if network_config['subnets'] is not None: + result['subnets'] = network_config['subnets'] + else: + self.module.fail_json(msg="Network configuration must include subnets") + if network_config['security_groups'] is not None: + groups = network_config['security_groups'] + if any(not sg.startswith('sg-') for sg in groups): + try: + vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't look up security groups") + result['securityGroups'] = groups + if network_config['assign_public_ip'] is not None: + if self.module.botocore_at_least('1.8.4'): + if network_config['assign_public_ip'] is True: + result['assignPublicIp'] = "ENABLED" + else: + result['assignPublicIp'] = "DISABLED" + else: + self.module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration') + return dict(awsvpcConfiguration=result) + + def find_in_array(self, array_of_services, service_name, field_name='serviceArn'): + for c in array_of_services: + if c[field_name].endswith(service_name): + return c + return None + + def describe_service(self, cluster_name, service_name): + response = self.ecs.describe_services( + cluster=cluster_name, + services=[service_name]) + msg = '' + if len(response['failures']) > 0: + c = self.find_in_array(response['failures'], service_name, 'arn') + msg += ", failure reason is " + c['reason'] + if c and c['reason'] == 'MISSING': + return None + # fall thru and look through found ones + if len(response['services']) > 0: + c = self.find_in_array(response['services'], service_name) + if c: + return c + raise Exception("Unknown problem describing service %s." % service_name) + + def is_matching_service(self, expected, existing): + if expected['task_definition'] != existing['taskDefinition']: + return False + + if (expected['load_balancers'] or []) != existing['loadBalancers']: + return False + + # expected is params. DAEMON scheduling strategy returns desired count equal to + # number of instances running; don't check desired count if scheduling strat is daemon + if (expected['scheduling_strategy'] != 'DAEMON'): + if (expected['desired_count'] or 0) != existing['desiredCount']: + return False + + return True + + def create_service(self, service_name, cluster_name, task_definition, load_balancers, + desired_count, client_token, role, deployment_configuration, + placement_constraints, placement_strategy, health_check_grace_period_seconds, + network_configuration, service_registries, launch_type, scheduling_strategy): + + params = dict( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_definition, + loadBalancers=load_balancers, + clientToken=client_token, + role=role, + deploymentConfiguration=deployment_configuration, + placementConstraints=placement_constraints, + placementStrategy=placement_strategy + ) + if network_configuration: + params['networkConfiguration'] = network_configuration + if launch_type: + params['launchType'] = launch_type + if self.health_check_setable(params) and health_check_grace_period_seconds is not None: + params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + if service_registries: + params['serviceRegistries'] = service_registries + # desired count is not required if scheduling strategy is daemon + if desired_count is not None: + params['desiredCount'] = desired_count + + if scheduling_strategy: + params['schedulingStrategy'] = scheduling_strategy + response = self.ecs.create_service(**params) + return self.jsonize(response['service']) + + def update_service(self, service_name, cluster_name, task_definition, + desired_count, deployment_configuration, network_configuration, + health_check_grace_period_seconds, force_new_deployment): + params = dict( + cluster=cluster_name, + service=service_name, + taskDefinition=task_definition, + deploymentConfiguration=deployment_configuration) + if network_configuration: + params['networkConfiguration'] = network_configuration + if force_new_deployment: + params['forceNewDeployment'] = force_new_deployment + if health_check_grace_period_seconds is not None: + params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + # desired count is not required if scheduling strategy is daemon + if desired_count is not None: + params['desiredCount'] = desired_count + + response = self.ecs.update_service(**params) + return self.jsonize(response['service']) + + def jsonize(self, service): + # some fields are datetime which is not JSON serializable + # make them strings + if 'createdAt' in service: + service['createdAt'] = str(service['createdAt']) + if 'deployments' in service: + for d in service['deployments']: + if 'createdAt' in d: + d['createdAt'] = str(d['createdAt']) + if 'updatedAt' in d: + d['updatedAt'] = str(d['updatedAt']) + if 'events' in service: + for e in service['events']: + if 'createdAt' in e: + e['createdAt'] = str(e['createdAt']) + return service + + def delete_service(self, service, cluster=None): + return self.ecs.delete_service(cluster=cluster, service=service) + + def ecs_api_handles_network_configuration(self): + # There doesn't seem to be a nice way to inspect botocore to look + # for attributes (and networkConfiguration is not an explicit argument + # to e.g. ecs.run_task, it's just passed as a keyword argument) + return self.module.botocore_at_least('1.7.44') + + def health_check_setable(self, params): + load_balancers = params.get('loadBalancers', []) + # check if botocore (and thus boto3) is new enough for using the healthCheckGracePeriodSeconds parameter + return len(load_balancers) > 0 and self.module.botocore_at_least('1.8.20') + + +def main(): + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent', 'deleting']), + name=dict(required=True, type='str'), + cluster=dict(required=False, type='str'), + task_definition=dict(required=False, type='str'), + load_balancers=dict(required=False, default=[], type='list'), + desired_count=dict(required=False, type='int'), + client_token=dict(required=False, default='', type='str'), + role=dict(required=False, default='', type='str'), + delay=dict(required=False, type='int', default=10), + repeat=dict(required=False, type='int', default=10), + force_new_deployment=dict(required=False, default=False, type='bool'), + deployment_configuration=dict(required=False, default={}, type='dict'), + placement_constraints=dict(required=False, default=[], type='list'), + placement_strategy=dict(required=False, default=[], type='list'), + health_check_grace_period_seconds=dict(required=False, type='int'), + network_configuration=dict(required=False, type='dict', options=dict( + subnets=dict(type='list'), + security_groups=dict(type='list'), + assign_public_ip=dict(type='bool') + )), + launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + service_registries=dict(required=False, type='list', default=[]), + scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True, + required_if=[('state', 'present', ['task_definition']), + ('launch_type', 'FARGATE', ['network_configuration'])], + required_together=[['load_balancers', 'role']]) + + if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': + if module.params['desired_count'] is None: + module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') + + service_mgr = EcsServiceManager(module) + if module.params['network_configuration']: + if not service_mgr.ecs_api_handles_network_configuration(): + module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration') + network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) + else: + network_configuration = None + + deployment_configuration = map_complex_type(module.params['deployment_configuration'], + DEPLOYMENT_CONFIGURATION_TYPE_MAP) + + deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) + serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) + + try: + existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + except Exception as e: + module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e)) + + results = dict(changed=False) + + if module.params['launch_type']: + if not module.botocore_at_least('1.8.4'): + module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type') + if module.params['force_new_deployment']: + if not module.botocore_at_least('1.8.4'): + module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use force_new_deployment') + if module.params['health_check_grace_period_seconds']: + if not module.botocore_at_least('1.8.20'): + module.fail_json(msg='botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds') + + if module.params['state'] == 'present': + + matching = False + update = False + + if existing and 'status' in existing and existing['status'] == "ACTIVE": + if module.params['force_new_deployment']: + update = True + elif service_mgr.is_matching_service(module.params, existing): + matching = True + results['service'] = existing + else: + update = True + + if not matching: + if not module.check_mode: + + role = module.params['role'] + clientToken = module.params['client_token'] + + loadBalancers = [] + for loadBalancer in module.params['load_balancers']: + if 'containerPort' in loadBalancer: + loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + loadBalancers.append(loadBalancer) + + for loadBalancer in loadBalancers: + if 'containerPort' in loadBalancer: + loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + + if update: + # check various parameters and boto versions and give a helpful error in boto is not new enough for feature + + if module.params['scheduling_strategy']: + if not module.botocore_at_least('1.10.37'): + module.fail_json(msg='botocore needs to be version 1.10.37 or higher to use scheduling_strategy') + elif (existing['schedulingStrategy']) != module.params['scheduling_strategy']: + module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service") + + if module.params['service_registries']: + if not module.botocore_at_least('1.9.15'): + module.fail_json(msg='botocore needs to be version 1.9.15 or higher to use service_registries') + elif (existing['serviceRegistries'] or []) != serviceRegistries: + module.fail_json(msg="It is not possible to update the service registries of an existing service") + + if (existing['loadBalancers'] or []) != loadBalancers: + module.fail_json(msg="It is not possible to update the load balancers of an existing service") + + # update required + response = service_mgr.update_service(module.params['name'], + module.params['cluster'], + module.params['task_definition'], + module.params['desired_count'], + deploymentConfiguration, + network_configuration, + module.params['health_check_grace_period_seconds'], + module.params['force_new_deployment']) + + else: + try: + response = service_mgr.create_service(module.params['name'], + module.params['cluster'], + module.params['task_definition'], + loadBalancers, + module.params['desired_count'], + clientToken, + role, + deploymentConfiguration, + module.params['placement_constraints'], + module.params['placement_strategy'], + module.params['health_check_grace_period_seconds'], + network_configuration, + serviceRegistries, + module.params['launch_type'], + module.params['scheduling_strategy'] + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, msg="Couldn't create service") + + results['service'] = response + + results['changed'] = True + + elif module.params['state'] == 'absent': + if not existing: + pass + else: + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + del existing['deployments'] + del existing['events'] + results['ansible_facts'] = existing + if 'status' in existing and existing['status'] == "INACTIVE": + results['changed'] = False + else: + if not module.check_mode: + try: + service_mgr.delete_service( + module.params['name'], + module.params['cluster'] + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, msg="Couldn't delete service") + results['changed'] = True + + elif module.params['state'] == 'deleting': + if not existing: + module.fail_json(msg="Service '" + module.params['name'] + " not found.") + return + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + delay = module.params['delay'] + repeat = module.params['repeat'] + time.sleep(delay) + for i in range(repeat): + existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + status = existing['status'] + if status == "INACTIVE": + results['changed'] = True + break + time.sleep(delay) + if i is repeat - 1: + module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.") + return + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ecs_service_facts.py b/ecs_service_facts.py new file mode 120000 index 00000000000..fead2dab76b --- /dev/null +++ b/ecs_service_facts.py @@ -0,0 +1 @@ +ecs_service_info.py \ No newline at end of file diff --git a/ecs_service_info.py b/ecs_service_info.py new file mode 100644 index 00000000000..974b4fba867 --- /dev/null +++ b/ecs_service_info.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ecs_service_info +short_description: List or describe services in ECS +description: + - Lists or describes services in ECS. + - This module was called C(ecs_service_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ecs_service_info) module no longer returns C(ansible_facts)! +author: + - "Mark Chance (@Java1Guy)" + - "Darek Kaczynski (@kaczynskid)" +requirements: [ json, botocore, boto3 ] +options: + details: + description: + - Set this to true if you want detailed information about the services. + required: false + default: false + type: bool + events: + description: + - Whether to return ECS service events. Only has an effect if I(details=true). + required: false + default: true + type: bool + cluster: + description: + - The cluster ARNS in which to list the services. + required: false + type: str + service: + description: + - One or more services to get details for + required: false + type: list + elements: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic listing example +- ecs_service_info: + cluster: test-cluster + service: console-test-service + details: true + register: output + +# Basic listing example +- ecs_service_info: + cluster: test-cluster + register: output +''' + +RETURN = ''' +services: + description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below. + returned: success + type: complex + contains: + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + returned: always + type: str + desiredCount: + description: The desired number of instantiations of the task definition to keep running on the service. + returned: always + type: int + loadBalancers: + description: A list of load balancer objects + returned: always + type: complex + contains: + loadBalancerName: + description: the name + returned: always + type: str + containerName: + description: The name of the container to associate with the load balancer. + returned: always + type: str + containerPort: + description: The port on the container to associate with the load balancer. + returned: always + type: int + pendingCount: + description: The number of tasks in the cluster that are in the PENDING state. + returned: always + type: int + runningCount: + description: The number of tasks in the cluster that are in the RUNNING state. + returned: always + type: int + serviceArn: + description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service . + returned: always + type: str + serviceName: + description: A user-generated string used to identify the service + returned: always + type: str + status: + description: The valid values are ACTIVE, DRAINING, or INACTIVE. + returned: always + type: str + taskDefinition: + description: The ARN of a task definition to use for tasks in the service. + returned: always + type: str + deployments: + description: list of service deployments + returned: always + type: list + elements: dict + events: + description: list of service events + returned: when events is true + type: list + elements: dict +''' # NOQA + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry + + +class EcsServiceManager: + """Handles ECS Services""" + + def __init__(self, module): + self.module = module + self.ecs = module.client('ecs') + + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + def list_services_with_backoff(self, **kwargs): + paginator = self.ecs.get_paginator('list_services') + try: + return paginator.paginate(**kwargs).build_full_result() + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'ClusterNotFoundException': + self.module.fail_json_aws(e, "Could not find cluster to list services") + else: + raise + + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + def describe_services_with_backoff(self, **kwargs): + return self.ecs.describe_services(**kwargs) + + def list_services(self, cluster): + fn_args = dict() + if cluster and cluster is not None: + fn_args['cluster'] = cluster + try: + response = self.list_services_with_backoff(**fn_args) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't list ECS services") + relevant_response = dict(services=response['serviceArns']) + return relevant_response + + def describe_services(self, cluster, services): + fn_args = dict() + if cluster and cluster is not None: + fn_args['cluster'] = cluster + fn_args['services'] = services + try: + response = self.describe_services_with_backoff(**fn_args) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't describe ECS services") + running_services = [self.extract_service_from(service) for service in response.get('services', [])] + services_not_running = response.get('failures', []) + return running_services, services_not_running + + def extract_service_from(self, service): + # some fields are datetime which is not JSON serializable + # make them strings + if 'deployments' in service: + for d in service['deployments']: + if 'createdAt' in d: + d['createdAt'] = str(d['createdAt']) + if 'updatedAt' in d: + d['updatedAt'] = str(d['updatedAt']) + if 'events' in service: + if not self.module.params['events']: + del service['events'] + else: + for e in service['events']: + if 'createdAt' in e: + e['createdAt'] = str(e['createdAt']) + return service + + +def chunks(l, n): + """Yield successive n-sized chunks from l.""" + """ https://stackoverflow.com/a/312464 """ + for i in range(0, len(l), n): + yield l[i:i + n] + + +def main(): + + argument_spec = dict( + details=dict(type='bool', default=False), + events=dict(type='bool', default=True), + cluster=dict(), + service=dict(type='list') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + is_old_facts = module._name == 'ecs_service_facts' + if is_old_facts: + module.deprecate("The 'ecs_service_facts' module has been renamed to 'ecs_service_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + show_details = module.params.get('details') + + task_mgr = EcsServiceManager(module) + if show_details: + if module.params['service']: + services = module.params['service'] + else: + services = task_mgr.list_services(module.params['cluster'])['services'] + ecs_info = dict(services=[], services_not_running=[]) + for chunk in chunks(services, 10): + running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk) + ecs_info['services'].extend(running_services) + ecs_info['services_not_running'].extend(services_not_running) + else: + ecs_info = task_mgr.list_services(module.params['cluster']) + + if is_old_facts: + module.exit_json(changed=False, ansible_facts=ecs_info, **ecs_info) + else: + module.exit_json(changed=False, **ecs_info) + + +if __name__ == '__main__': + main() diff --git a/ecs_tag.py b/ecs_tag.py new file mode 100644 index 00000000000..1a093e9fc5e --- /dev/null +++ b/ecs_tag.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Michael Pechner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ecs_tag +short_description: create and remove tags on Amazon ECS resources +notes: + - none +description: + - Creates and removes tags for Amazon ECS resources. + - Resources are referenced by their cluster name. +author: + - Michael Pechner (@mpechner) +requirements: [ boto3, botocore ] +options: + cluster_name: + description: + - The name of the cluster whose resources we are tagging. + required: true + type: str + resource: + description: + - The ECS resource name. + - Required unless I(resource_type=cluster). + type: str + resource_type: + description: + - The type of resource. + default: cluster + choices: ['cluster', 'task', 'service', 'task_definition', 'container'] + type: str + state: + description: + - Whether the tags should be present or absent on the resource. + default: present + choices: ['present', 'absent'] + type: str + tags: + description: + - A dictionary of tags to add or remove from the resource. + - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. + type: dict + purge_tags: + description: + - Whether unspecified tags should be removed from the resource. + - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. + type: bool + default: false +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = r''' +- name: Ensure tags are present on a resource + ecs_tag: + cluster_name: mycluster + resource_type: cluster + state: present + tags: + Name: ubervol + env: prod + +- name: Remove the Env tag + ecs_tag: + cluster_name: mycluster + resource_type: cluster + tags: + Env: + state: absent + +- name: Remove the Env tag if it's currently 'development' + ecs_tag: + cluster_name: mycluster + resource_type: cluster + tags: + Env: development + state: absent + +- name: Remove all tags except for Name from a cluster + ecs_tag: + cluster_name: mycluster + resource_type: cluster + tags: + Name: foo + state: absent + purge_tags: true +''' + +RETURN = r''' +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +added_tags: + description: A dict of tags that were added to the resource + returned: If tags were added + type: dict +removed_tags: + description: A dict of tags that were removed from the resource + returned: If tags were removed + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule +__metaclass__ = type + + +def get_tags(ecs, module, resource): + try: + return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) + + +def get_arn(ecs, module, cluster_name, resource_type, resource): + + try: + if resource_type == 'cluster': + description = ecs.describe_clusters(clusters=[resource]) + resource_arn = description['clusters'][0]['clusterArn'] + elif resource_type == 'task': + description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource]) + resource_arn = description['tasks'][0]['taskArn'] + elif resource_type == 'service': + description = ecs.describe_services(cluster=cluster_name, services=[resource]) + resource_arn = description['services'][0]['serviceArn'] + elif resource_type == 'task_definition': + description = ecs.describe_task_definition(taskDefinition=resource) + resource_arn = description['taskDefinition']['taskDefinitionArn'] + elif resource_type == 'container': + description = ecs.describe_container_instances(clusters=[resource]) + resource_arn = description['containerInstances'][0]['containerInstanceArn'] + except (IndexError, KeyError): + module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource)) + + return resource_arn + + +def main(): + argument_spec = dict( + cluster_name=dict(required=True), + resource=dict(required=False), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container']) + ) + required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])] + + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + + resource_type = module.params['resource_type'] + cluster_name = module.params['cluster_name'] + if resource_type == 'cluster': + resource = cluster_name + else: + resource = module.params['resource'] + tags = module.params['tags'] + state = module.params['state'] + purge_tags = module.params['purge_tags'] + + result = {'changed': False} + + ecs = module.client('ecs') + + resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource) + + current_tags = get_tags(ecs, module, resource_arn) + + add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + remove_tags = {} + if state == 'absent': + for key in tags: + if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): + remove_tags[key] = current_tags[key] + + for key in remove: + remove_tags[key] = current_tags[key] + + if remove_tags: + result['changed'] = True + result['removed_tags'] = remove_tags + if not module.check_mode: + try: + ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + + if state == 'present' and add_tags: + result['changed'] = True + result['added_tags'] = add_tags + current_tags.update(add_tags) + if not module.check_mode: + try: + tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value') + ecs.tag_resource(resourceArn=resource_arn, tags=tags) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + + result['tags'] = get_tags(ecs, module, resource_arn) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ecs_task.py b/ecs_task.py new file mode 100644 index 00000000000..717b0d8236d --- /dev/null +++ b/ecs_task.py @@ -0,0 +1,447 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ecs_task +short_description: Run, start or stop a task in ecs +description: + - Creates or deletes instances of task definitions. +author: Mark Chance (@Java1Guy) +requirements: [ json, botocore, boto3 ] +options: + operation: + description: + - Which task operation to execute. + required: True + choices: ['run', 'start', 'stop'] + type: str + cluster: + description: + - The name of the cluster to run the task on. + required: False + type: str + task_definition: + description: + - The task definition to start or run. + required: False + type: str + overrides: + description: + - A dictionary of values to pass to the new instances. + required: False + type: dict + count: + description: + - How many new instances to start. + required: False + type: int + task: + description: + - The task to stop. + required: False + type: str + container_instances: + description: + - The list of container instances on which to deploy the task. + required: False + type: list + elements: str + started_by: + description: + - A value showing who or what started the task (for informational purposes). + required: False + type: str + network_configuration: + description: + - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). + type: dict + suboptions: + subnets: + description: A list of subnet IDs to which the task is attached. + type: list + elements: str + security_groups: + description: A list of group names or group IDs for the task. + type: list + elements: str + launch_type: + description: + - The launch type on which to run your service. + required: false + choices: ["EC2", "FARGATE"] + type: str + tags: + type: dict + description: + - Tags that will be added to ecs tasks on start and run + required: false +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Simple example of run task +- name: Run task + ecs_task: + operation: run + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + count: 1 + started_by: ansible_user + register: task_output + +# Simple example of start task + +- name: Start a task + ecs_task: + operation: start + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + tags: + resourceName: a_task_for_ansible_to_run + type: long_running_task + network: internal + version: 1.4 + container_instances: + - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8 + started_by: ansible_user + network_configuration: + subnets: + - subnet-abcd1234 + security_groups: + - sg-aaaa1111 + - my_security_group + register: task_output + +- name: RUN a task on Fargate + ecs_task: + operation: run + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + started_by: ansible_user + launch_type: FARGATE + network_configuration: + subnets: + - subnet-abcd1234 + security_groups: + - sg-aaaa1111 + - my_security_group + register: task_output + +- name: Stop a task + ecs_task: + operation: stop + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" +''' +RETURN = ''' +task: + description: details about the task that was started + returned: success + type: complex + contains: + taskArn: + description: The Amazon Resource Name (ARN) that identifies the task. + returned: always + type: str + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task. + returned: only when details is true + type: str + taskDefinitionArn: + description: The Amazon Resource Name (ARN) of the task definition. + returned: only when details is true + type: str + containerInstanceArn: + description: The Amazon Resource Name (ARN) of the container running the task. + returned: only when details is true + type: str + overrides: + description: The container overrides set for this task. + returned: only when details is true + type: list + elements: dict + lastStatus: + description: The last recorded status of the task. + returned: only when details is true + type: str + desiredStatus: + description: The desired status of the task. + returned: only when details is true + type: str + containers: + description: The container details. + returned: only when details is true + type: list + elements: dict + startedBy: + description: The used who started the task. + returned: only when details is true + type: str + stoppedReason: + description: The reason why the task was stopped. + returned: only when details is true + type: str + createdAt: + description: The timestamp of when the task was created. + returned: only when details is true + type: str + startedAt: + description: The timestamp of when the task was started. + returned: only when details is true + type: str + stoppedAt: + description: The timestamp of when the task was stopped. + returned: only when details is true + type: str + launchType: + description: The launch type on which to run your task. + returned: always + type: str +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.basic import missing_required_lib +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +class EcsExecManager: + """Handles ECS Tasks""" + + def __init__(self, module): + self.module = module + self.ecs = module.client('ecs') + self.ec2 = module.client('ec2') + + def format_network_configuration(self, network_config): + result = dict() + if 'subnets' in network_config: + result['subnets'] = network_config['subnets'] + else: + self.module.fail_json(msg="Network configuration must include subnets") + if 'security_groups' in network_config: + groups = network_config['security_groups'] + if any(not sg.startswith('sg-') for sg in groups): + try: + vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't look up security groups") + result['securityGroups'] = groups + return dict(awsvpcConfiguration=result) + + def list_tasks(self, cluster_name, service_name, status): + response = self.ecs.list_tasks( + cluster=cluster_name, + family=service_name, + desiredStatus=status + ) + if len(response['taskArns']) > 0: + for c in response['taskArns']: + if c.endswith(service_name): + return c + return None + + def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags): + if overrides is None: + overrides = dict() + params = dict(cluster=cluster, taskDefinition=task_definition, + overrides=overrides, count=count, startedBy=startedBy) + if self.module.params['network_configuration']: + params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + if launch_type: + params['launchType'] = launch_type + if tags: + params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + + # TODO: need to check if long arn format enabled. + try: + response = self.ecs.run_task(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't run task") + # include tasks and failures + return response['tasks'] + + def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags): + args = dict() + if cluster: + args['cluster'] = cluster + if task_definition: + args['taskDefinition'] = task_definition + if overrides: + args['overrides'] = overrides + if container_instances: + args['containerInstances'] = container_instances + if startedBy: + args['startedBy'] = startedBy + if self.module.params['network_configuration']: + args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + if tags: + args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + try: + response = self.ecs.start_task(**args) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't start task") + # include tasks and failures + return response['tasks'] + + def stop_task(self, cluster, task): + response = self.ecs.stop_task(cluster=cluster, task=task) + return response['task'] + + def ecs_api_handles_launch_type(self): + from distutils.version import LooseVersion + # There doesn't seem to be a nice way to inspect botocore to look + # for attributes (and networkConfiguration is not an explicit argument + # to e.g. ecs.run_task, it's just passed as a keyword argument) + return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4') + + def ecs_task_long_format_enabled(self): + account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True) + return account_support['settings'][0]['value'] == 'enabled' + + def ecs_api_handles_tags(self): + from distutils.version import LooseVersion + # There doesn't seem to be a nice way to inspect botocore to look + # for attributes (and networkConfiguration is not an explicit argument + # to e.g. ecs.run_task, it's just passed as a keyword argument) + return LooseVersion(botocore.__version__) >= LooseVersion('1.12.46') + + def ecs_api_handles_network_configuration(self): + from distutils.version import LooseVersion + # There doesn't seem to be a nice way to inspect botocore to look + # for attributes (and networkConfiguration is not an explicit argument + # to e.g. ecs.run_task, it's just passed as a keyword argument) + return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44') + + +def main(): + argument_spec = dict( + operation=dict(required=True, choices=['run', 'start', 'stop']), + cluster=dict(required=False, type='str'), # R S P + task_definition=dict(required=False, type='str'), # R* S* + overrides=dict(required=False, type='dict'), # R S + count=dict(required=False, type='int'), # R + task=dict(required=False, type='str'), # P* + container_instances=dict(required=False, type='list'), # S* + started_by=dict(required=False, type='str'), # R S + network_configuration=dict(required=False, type='dict'), + launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + tags=dict(required=False, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, + required_if=[('launch_type', 'FARGATE', ['network_configuration'])]) + + # Validate Inputs + if module.params['operation'] == 'run': + if 'task_definition' not in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To run a task, a task_definition must be specified") + task_to_list = module.params['task_definition'] + status_type = "RUNNING" + + if module.params['operation'] == 'start': + if 'task_definition' not in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To start a task, a task_definition must be specified") + if 'container_instances' not in module.params and module.params['container_instances'] is None: + module.fail_json(msg="To start a task, container instances must be specified") + task_to_list = module.params['task'] + status_type = "RUNNING" + + if module.params['operation'] == 'stop': + if 'task' not in module.params and module.params['task'] is None: + module.fail_json(msg="To stop a task, a task must be specified") + if 'task_definition' not in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To stop a task, a task definition must be specified") + task_to_list = module.params['task_definition'] + status_type = "STOPPED" + + service_mgr = EcsExecManager(module) + + if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration(): + module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration') + + if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type(): + module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type') + + if module.params['tags']: + if not service_mgr.ecs_api_handles_tags(): + module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags")) + if not service_mgr.ecs_task_long_format_enabled(): + module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags") + + existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type) + + results = dict(changed=False) + if module.params['operation'] == 'run': + if existing: + # TBD - validate the rest of the details + results['task'] = existing + else: + if not module.check_mode: + results['task'] = service_mgr.run_task( + module.params['cluster'], + module.params['task_definition'], + module.params['overrides'], + module.params['count'], + module.params['started_by'], + module.params['launch_type'], + module.params['tags'], + ) + results['changed'] = True + + elif module.params['operation'] == 'start': + if existing: + # TBD - validate the rest of the details + results['task'] = existing + else: + if not module.check_mode: + results['task'] = service_mgr.start_task( + module.params['cluster'], + module.params['task_definition'], + module.params['overrides'], + module.params['container_instances'], + module.params['started_by'], + module.params['tags'], + ) + results['changed'] = True + + elif module.params['operation'] == 'stop': + if existing: + results['task'] = existing + else: + if not module.check_mode: + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + results['task'] = service_mgr.stop_task( + module.params['cluster'], + module.params['task'] + ) + results['changed'] = True + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py new file mode 100644 index 00000000000..88fe6b47353 --- /dev/null +++ b/ecs_taskdefinition.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ecs_taskdefinition +short_description: register a task definition in ecs +description: + - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS). +author: Mark Chance (@Java1Guy) +requirements: [ json, botocore, boto3 ] +options: + state: + description: + - State whether the task definition should exist or be deleted. + required: true + choices: ['present', 'absent'] + type: str + arn: + description: + - The ARN of the task description to delete. + required: false + type: str + family: + description: + - A Name that would be given to the task definition. + required: false + type: str + revision: + description: + - A revision number for the task definition. + required: False + type: int + force_create: + description: + - Always create new task definition. + required: False + type: bool + containers: + description: + - A list of containers definitions. + required: False + type: list + elements: str + network_mode: + description: + - The Docker networking mode to use for the containers in the task. + - C(awsvpc) mode was added in Ansible 2.5 + - Windows containers must use I(network_mode=default), which will utilize docker NAT networking. + - Setting I(network_mode=default) for a Linux container will use bridge mode. + required: false + default: bridge + choices: [ 'default', 'bridge', 'host', 'none', 'awsvpc' ] + type: str + task_role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted + the permissions that are specified in this role. + required: false + type: str + execution_role_arn: + description: + - The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. + required: false + type: str + volumes: + description: + - A list of names of volumes to be attached. + required: False + type: list + elements: dict + suboptions: + name: + type: str + description: The name of the volume. + required: true + launch_type: + description: + - The launch type on which to run your task. + required: false + type: str + choices: ["EC2", "FARGATE"] + cpu: + description: + - The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. + - If using the Fargate launch type, this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096). + required: false + type: str + memory: + description: + - The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. + - If using the Fargate launch type, this field is required and is limited by the cpu. + required: false + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Create task definition + ecs_taskdefinition: + containers: + - name: simple-app + cpu: 10 + essential: true + image: "httpd:2.4" + memory: 300 + mountPoints: + - containerPath: /usr/local/apache2/htdocs + sourceVolume: my-vol + portMappings: + - containerPort: 80 + hostPort: 80 + logConfiguration: + logDriver: awslogs + options: + awslogs-group: /ecs/test-cluster-taskdef + awslogs-region: us-west-2 + awslogs-stream-prefix: ecs + - name: busybox + command: + - > + /bin/sh -c "while true; do echo 'Amazon ECS Sample App

Amazon ECS Sample App

Congratulations! +

Your application is now running on a container in Amazon ECS.

' > top; /bin/date > date ; echo '
' > bottom; + cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done" + cpu: 10 + entryPoint: + - sh + - "-c" + essential: false + image: busybox + memory: 200 + volumesFrom: + - sourceContainer: simple-app + volumes: + - name: my-vol + family: test-cluster-taskdef + state: present + register: task_output + +- name: Create task definition + ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + state: present + +- name: Create task definition + ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + launch_type: FARGATE + cpu: 512 + memory: 1024 + state: present + network_mode: awsvpc + +# Create Task Definition with Environment Variables and Secrets +- name: Create task definition + ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + environment: + - name: "PORT" + value: "8080" + secrets: + # For variables stored in Secrets Manager + - name: "NGINX_HOST" + valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST" + # For variables stored in Parameter Store + - name: "API_KEY" + valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY" + launch_type: FARGATE + cpu: 512 + memory: 1GB + state: present + network_mode: awsvpc +''' +RETURN = ''' +taskdefinition: + description: a reflection of the input parameters + type: dict + returned: always +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils._text import to_text + + +class EcsTaskManager: + """Handles ECS Tasks""" + + def __init__(self, module): + self.module = module + + self.ecs = module.client('ecs') + + def describe_task(self, task_name): + try: + response = self.ecs.describe_task_definition(taskDefinition=task_name) + return response['taskDefinition'] + except botocore.exceptions.ClientError: + return None + + def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory): + validated_containers = [] + + # Ensures the number parameters are int as required by boto + for container in container_definitions: + for param in ('memory', 'cpu', 'memoryReservation'): + if param in container: + container[param] = int(container[param]) + + if 'portMappings' in container: + for port_mapping in container['portMappings']: + for port in ('hostPort', 'containerPort'): + if port in port_mapping: + port_mapping[port] = int(port_mapping[port]) + if network_mode == 'awsvpc' and 'hostPort' in port_mapping: + if port_mapping['hostPort'] != port_mapping.get('containerPort'): + self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as " + "container port or not be set") + + validated_containers.append(container) + + params = dict( + family=family, + taskRoleArn=task_role_arn, + containerDefinitions=container_definitions, + volumes=volumes + ) + if network_mode != 'default': + params['networkMode'] = network_mode + if cpu: + params['cpu'] = cpu + if memory: + params['memory'] = memory + if launch_type: + params['requiresCompatibilities'] = [launch_type] + if execution_role_arn: + params['executionRoleArn'] = execution_role_arn + + try: + response = self.ecs.register_task_definition(**params) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + return response['taskDefinition'] + + def describe_task_definitions(self, family): + data = { + "taskDefinitionArns": [], + "nextToken": None + } + + def fetch(): + # Boto3 is weird about params passed, so only pass nextToken if we have a value + params = { + 'familyPrefix': family + } + + if data['nextToken']: + params['nextToken'] = data['nextToken'] + + result = self.ecs.list_task_definitions(**params) + data['taskDefinitionArns'] += result['taskDefinitionArns'] + data['nextToken'] = result.get('nextToken', None) + return data['nextToken'] is not None + + # Fetch all the arns, possibly across multiple pages + while fetch(): + pass + + # Return the full descriptions of the task definitions, sorted ascending by revision + return list( + sorted( + [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], + key=lambda td: td['revision'] + ) + ) + + def deregister_task(self, taskArn): + response = self.ecs.deregister_task_definition(taskDefinition=taskArn) + return response['taskDefinition'] + + +def main(): + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + arn=dict(required=False, type='str'), + family=dict(required=False, type='str'), + revision=dict(required=False, type='int'), + force_create=dict(required=False, default=False, type='bool'), + containers=dict(required=False, type='list'), + network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), + task_role_arn=dict(required=False, default='', type='str'), + execution_role_arn=dict(required=False, default='', type='str'), + volumes=dict(required=False, type='list'), + launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + cpu=dict(), + memory=dict(required=False, type='str') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True, + required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])] + ) + + task_to_describe = None + task_mgr = EcsTaskManager(module) + results = dict(changed=False) + + if module.params['launch_type']: + if not module.botocore_at_least('1.8.4'): + module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type') + + if module.params['execution_role_arn']: + if not module.botocore_at_least('1.10.44'): + module.fail_json(msg='botocore needs to be version 1.10.44 or higher to use execution_role_arn') + + if module.params['containers']: + for container in module.params['containers']: + for environment in container.get('environment', []): + environment['value'] = to_text(environment['value']) + + if module.params['state'] == 'present': + if 'containers' not in module.params or not module.params['containers']: + module.fail_json(msg="To use task definitions, a list of containers must be specified") + + if 'family' not in module.params or not module.params['family']: + module.fail_json(msg="To use task definitions, a family must be specified") + + network_mode = module.params['network_mode'] + launch_type = module.params['launch_type'] + if launch_type == 'FARGATE' and network_mode != 'awsvpc': + module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") + + family = module.params['family'] + existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) + + if 'revision' in module.params and module.params['revision']: + # The definition specifies revision. We must guarantee that an active revision of that number will result from this. + revision = int(module.params['revision']) + + # A revision has been explicitly specified. Attempt to locate a matching revision + tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision] + existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None + + if existing and existing['status'] != "ACTIVE": + # We cannot reactivate an inactive revision + module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision)) + elif not existing: + if not existing_definitions_in_family and revision != 1: + module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision) + elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision: + module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % + (revision, existing_definitions_in_family[-1]['revision'] + 1)) + else: + existing = None + + def _right_has_values_of_left(left, right): + # Make sure the values are equivalent for everything left has + for k, v in left.items(): + if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])): + # We don't care about list ordering because ECS can change things + if isinstance(v, list) and k in right: + left_list = v + right_list = right[k] or [] + + if len(left_list) != len(right_list): + return False + + for list_val in left_list: + if list_val not in right_list: + return False + else: + return False + + # Make sure right doesn't have anything that left doesn't + for k, v in right.items(): + if v and k not in left: + return False + + return True + + def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, existing_task_definition): + if td['status'] != "ACTIVE": + return None + + if requested_task_role_arn != td.get('taskRoleArn', ""): + return None + + existing_volumes = td.get('volumes', []) or [] + + if len(requested_volumes) != len(existing_volumes): + # Nope. + return None + + if len(requested_volumes) > 0: + for requested_vol in requested_volumes: + found = False + + for actual_vol in existing_volumes: + if _right_has_values_of_left(requested_vol, actual_vol): + found = True + break + + if not found: + return None + + existing_containers = td.get('containerDefinitions', []) or [] + + if len(requested_containers) != len(existing_containers): + # Nope. + return None + + for requested_container in requested_containers: + found = False + + for actual_container in existing_containers: + if _right_has_values_of_left(requested_container, actual_container): + found = True + break + + if not found: + return None + + return existing_task_definition + + # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested + for td in existing_definitions_in_family: + requested_volumes = module.params['volumes'] or [] + requested_containers = module.params['containers'] or [] + requested_task_role_arn = module.params['task_role_arn'] + existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, td) + + if existing: + break + + if existing and not module.params.get('force_create'): + # Awesome. Have an existing one. Nothing to do. + results['taskdefinition'] = existing + else: + if not module.check_mode: + # Doesn't exist. create it. + volumes = module.params.get('volumes', []) or [] + results['taskdefinition'] = task_mgr.register_task(module.params['family'], + module.params['task_role_arn'], + module.params['execution_role_arn'], + module.params['network_mode'], + module.params['containers'], + volumes, + module.params['launch_type'], + module.params['cpu'], + module.params['memory']) + results['changed'] = True + + elif module.params['state'] == 'absent': + # When de-registering a task definition, we can specify the ARN OR the family and revision. + if module.params['state'] == 'absent': + if 'arn' in module.params and module.params['arn'] is not None: + task_to_describe = module.params['arn'] + elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \ + module.params['revision'] is not None: + task_to_describe = module.params['family'] + ":" + str(module.params['revision']) + else: + module.fail_json(msg="To use task definitions, an arn or family and revision must be specified") + + existing = task_mgr.describe_task(task_to_describe) + + if not existing: + pass + else: + # It exists, so we should delete it and mark changed. Return info about the task definition deleted + results['taskdefinition'] = existing + if 'status' in existing and existing['status'] == "INACTIVE": + results['changed'] = False + else: + if not module.check_mode: + task_mgr.deregister_task(task_to_describe) + results['changed'] = True + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ecs_taskdefinition_facts.py b/ecs_taskdefinition_facts.py new file mode 120000 index 00000000000..0eb6f10b8ff --- /dev/null +++ b/ecs_taskdefinition_facts.py @@ -0,0 +1 @@ +ecs_taskdefinition_info.py \ No newline at end of file diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py new file mode 100644 index 00000000000..c2a9f15008e --- /dev/null +++ b/ecs_taskdefinition_info.py @@ -0,0 +1,334 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ecs_taskdefinition_info +short_description: Describe a task definition in ECS +notes: + - For details of the parameters and returns see + U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition) + - This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage did not change. +description: + - Describes a task definition in ECS. +author: + - Gustavo Maia (@gurumaia) + - Mark Chance (@Java1Guy) + - Darek Kaczynski (@kaczynskid) +requirements: [ json, botocore, boto3 ] +options: + task_definition: + description: + - The name of the task definition to get details for + required: true + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- ecs_taskdefinition_info: + task_definition: test-td +''' + +RETURN = ''' +container_definitions: + description: Returns a list of complex objects representing the containers + returned: success + type: complex + contains: + name: + description: The name of a container. + returned: always + type: str + image: + description: The image used to start a container. + returned: always + type: str + cpu: + description: The number of cpu units reserved for the container. + returned: always + type: int + memoryReservation: + description: The soft limit (in MiB) of memory to reserve for the container. + returned: when present + type: int + links: + description: Links to other containers. + returned: when present + type: str + portMappings: + description: The list of port mappings for the container. + returned: always + type: complex + contains: + containerPort: + description: The port number on the container. + returned: when present + type: int + hostPort: + description: The port number on the container instance to reserve for your container. + returned: when present + type: int + protocol: + description: The protocol used for the port mapping. + returned: when present + type: str + essential: + description: Whether this is an essential container or not. + returned: always + type: bool + entryPoint: + description: The entry point that is passed to the container. + returned: when present + type: str + command: + description: The command that is passed to the container. + returned: when present + type: str + environment: + description: The environment variables to pass to a container. + returned: always + type: complex + contains: + name: + description: The name of the environment variable. + returned: when present + type: str + value: + description: The value of the environment variable. + returned: when present + type: str + mountPoints: + description: The mount points for data volumes in your container. + returned: always + type: complex + contains: + sourceVolume: + description: The name of the volume to mount. + returned: when present + type: str + containerPath: + description: The path on the container to mount the host volume at. + returned: when present + type: str + readOnly: + description: If this value is true , the container has read-only access to the volume. + If this value is false , then the container can write to the volume. + returned: when present + type: bool + volumesFrom: + description: Data volumes to mount from another container. + returned: always + type: complex + contains: + sourceContainer: + description: The name of another container within the same task definition to mount volumes from. + returned: when present + type: str + readOnly: + description: If this value is true , the container has read-only access to the volume. + If this value is false , then the container can write to the volume. + returned: when present + type: bool + hostname: + description: The hostname to use for your container. + returned: when present + type: str + user: + description: The user name to use inside the container. + returned: when present + type: str + workingDirectory: + description: The working directory in which to run commands inside the container. + returned: when present + type: str + disableNetworking: + description: When this parameter is true, networking is disabled within the container. + returned: when present + type: bool + privileged: + description: When this parameter is true, the container is given elevated + privileges on the host container instance (similar to the root user). + returned: when present + type: bool + readonlyRootFilesystem: + description: When this parameter is true, the container is given read-only access to its root file system. + returned: when present + type: bool + dnsServers: + description: A list of DNS servers that are presented to the container. + returned: when present + type: str + dnsSearchDomains: + description: A list of DNS search domains that are presented to the container. + returned: when present + type: str + extraHosts: + description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. + returned: when present + type: complex + contains: + hostname: + description: The hostname to use in the /etc/hosts entry. + returned: when present + type: str + ipAddress: + description: The IP address to use in the /etc/hosts entry. + returned: when present + type: str + dockerSecurityOptions: + description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. + returned: when present + type: str + dockerLabels: + description: A key/value map of labels to add to the container. + returned: when present + type: str + ulimits: + description: A list of ulimits to set in the container. + returned: when present + type: complex + contains: + name: + description: The type of the ulimit . + returned: when present + type: str + softLimit: + description: The soft limit for the ulimit type. + returned: when present + type: int + hardLimit: + description: The hard limit for the ulimit type. + returned: when present + type: int + logConfiguration: + description: The log configuration specification for the container. + returned: when present + type: str + options: + description: The configuration options to send to the log driver. + returned: when present + type: str + +family: + description: The family of your task definition, used as the definition name + returned: always + type: str +task_definition_arn: + description: ARN of the task definition + returned: always + type: str +task_role_arn: + description: The ARN of the IAM role that containers in this task can assume + returned: when role is set + type: str +network_mode: + description: Network mode for the containers + returned: always + type: str +revision: + description: Revision number that was queried + returned: always + type: int +volumes: + description: The list of volumes in a task + returned: always + type: complex + contains: + name: + description: The name of the volume. + returned: when present + type: str + host: + description: The contents of the host parameter determine whether your data volume + persists on the host container instance and where it is stored. + returned: when present + type: bool + source_path: + description: The path on the host container instance that is presented to the container. + returned: when present + type: str +status: + description: The status of the task definition + returned: always + type: str +requires_attributes: + description: The container instance attributes required by your task + returned: when present + type: complex + contains: + name: + description: The name of the attribute. + returned: when present + type: str + value: + description: The value of the attribute. + returned: when present + type: str + targetType: + description: The type of the target with which to attach the attribute. + returned: when present + type: str + targetId: + description: The ID of the target. + returned: when present + type: str +placement_constraints: + description: A list of placement constraint objects to use for tasks + returned: always + type: complex + contains: + type: + description: The type of constraint. + returned: when present + type: str + expression: + description: A cluster query language expression to apply to the constraint. + returned: when present + type: str +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +def main(): + argument_spec = dict( + task_definition=dict(required=True, type='str') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ecs_taskdefinition_facts': + module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", version='2.13') + + ecs = module.client('ecs') + + try: + ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition'] + except botocore.exceptions.ClientError: + ecs_td = {} + + module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td)) + + +if __name__ == '__main__': + main() diff --git a/efs.py b/efs.py new file mode 100644 index 00000000000..26671a62850 --- /dev/null +++ b/efs.py @@ -0,0 +1,753 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: efs +short_description: create and maintain EFS file systems +description: + - Module allows create, search and destroy Amazon EFS file systems. +requirements: [ boto3 ] +author: + - "Ryan Sydnor (@ryansydnor)" + - "Artem Kazakov (@akazakov)" +options: + encrypt: + description: + - If I(encrypt=true) creates an encrypted file system. This can not be modified after the file system is created. + type: bool + default: false + kms_key_id: + description: + - The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only + required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for + Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN. + type: str + purge_tags: + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter + is not set then tags will not be modified. + type: bool + default: true + state: + description: + - Allows to create, search and destroy Amazon EFS file system. + default: 'present' + choices: ['present', 'absent'] + type: str + name: + description: + - Creation Token of Amazon EFS file system. Required for create and update. Either name or ID required for delete. + type: str + id: + description: + - ID of Amazon EFS. Either name or ID required for delete. + type: str + performance_mode: + description: + - File system's performance mode to use. Only takes effect during creation. + default: 'general_purpose' + choices: ['general_purpose', 'max_io'] + type: str + tags: + description: + - "List of tags of Amazon EFS. Should be defined as dictionary + In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data." + type: dict + targets: + description: + - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes: + This data may be modified for existing EFS using state 'present' and new list of mount targets." + type: list + elements: dict + suboptions: + subnet_id: + required: true + description: The ID of the subnet to add the mount target in. + ip_address: + type: str + description: A valid IPv4 address within the address range of the specified subnet. + security_groups: + type: list + elements: str + description: List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified + throughput_mode: + description: + - The throughput_mode for the file system to be created. + - Requires botocore >= 1.10.57 + choices: ['bursting', 'provisioned'] + type: str + provisioned_throughput_in_mibps: + description: + - If the throughput_mode is provisioned, select the amount of throughput to provisioned in Mibps. + - Requires botocore >= 1.10.57 + type: float + wait: + description: + - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted') + In case of 'absent' state should wait for EFS 'deleted' life cycle state" + type: bool + default: false + wait_timeout: + description: + - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary. + default: 0 + type: int + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# EFS provisioning +- efs: + state: present + name: myTestEFS + tags: + Name: myTestNameTag + purpose: file-storage + targets: + - subnet_id: subnet-748c5d03 + security_groups: [ "sg-1a2b3c4d" ] + +# Modifying EFS data +- efs: + state: present + name: myTestEFS + tags: + name: myAnotherTestTag + targets: + - subnet_id: subnet-7654fdca + security_groups: [ "sg-4c5d6f7a" ] + +# Deleting EFS +- efs: + state: absent + name: myTestEFS +''' + +RETURN = ''' +creation_time: + description: timestamp of creation date + returned: always + type: str + sample: "2015-11-16 07:30:57-05:00" +creation_token: + description: EFS creation token + returned: always + type: str + sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961" +file_system_id: + description: ID of the file system + returned: always + type: str + sample: "fs-xxxxxxxx" +life_cycle_state: + description: state of the EFS file system + returned: always + type: str + sample: "creating, available, deleting, deleted" +mount_point: + description: url of file system with leading dot from the time when AWS EFS required to add a region suffix to the address + returned: always + type: str + sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/" +filesystem_address: + description: url of file system valid for use with mount + returned: always + type: str + sample: "fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/" +mount_targets: + description: list of mount targets + returned: always + type: list + sample: + [ + { + "file_system_id": "fs-a7ad440e", + "ip_address": "172.31.17.173", + "life_cycle_state": "available", + "mount_target_id": "fsmt-d8907871", + "network_interface_id": "eni-6e387e26", + "owner_id": "740748460359", + "security_groups": [ + "sg-a30b22c6" + ], + "subnet_id": "subnet-e265c895" + }, + ... + ] +name: + description: name of the file system + returned: always + type: str + sample: "my-efs" +number_of_mount_targets: + description: the number of targets mounted + returned: always + type: int + sample: 3 +owner_id: + description: AWS account ID of EFS owner + returned: always + type: str + sample: "XXXXXXXXXXXX" +size_in_bytes: + description: size of the file system in bytes as of a timestamp + returned: always + type: dict + sample: + { + "timestamp": "2015-12-21 13:59:59-05:00", + "value": 12288 + } +performance_mode: + description: performance mode of the file system + returned: always + type: str + sample: "generalPurpose" +tags: + description: tags on the efs instance + returned: always + type: dict + sample: + { + "name": "my-efs", + "key": "Value" + } + +''' + +from time import sleep +from time import time as timestamp + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError as e: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (compare_aws_tags, camel_dict_to_snake_dict, + ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict) + + +def _index_by_key(key, items): + return dict((item[key], item) for item in items) + + +class EFSConnection(object): + + DEFAULT_WAIT_TIMEOUT_SECONDS = 0 + + STATE_CREATING = 'creating' + STATE_AVAILABLE = 'available' + STATE_DELETING = 'deleting' + STATE_DELETED = 'deleted' + + def __init__(self, module): + self.connection = module.client('efs') + region = module.region + + self.module = module + self.region = region + self.wait = module.params.get('wait') + self.wait_timeout = module.params.get('wait_timeout') + + def get_file_systems(self, **kwargs): + """ + Returns generator of file systems including all attributes of FS + """ + items = iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + **kwargs + ) + for item in items: + item['Name'] = item['CreationToken'] + item['CreationTime'] = str(item['CreationTime']) + """ + In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it + AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose + And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) + AWS documentation is available here: + https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html + """ + item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + if 'Timestamp' in item['SizeInBytes']: + item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + if item['LifeCycleState'] == self.STATE_AVAILABLE: + item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId']) + item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId'])) + else: + item['Tags'] = {} + item['MountTargets'] = [] + yield item + + def get_tags(self, **kwargs): + """ + Returns tag list for selected instance of EFS + """ + tags = self.connection.describe_tags(**kwargs)['Tags'] + return tags + + def get_mount_targets(self, **kwargs): + """ + Returns mount targets for selected instance of EFS + """ + targets = iterate_all( + 'MountTargets', + self.connection.describe_mount_targets, + **kwargs + ) + for target in targets: + if target['LifeCycleState'] == self.STATE_AVAILABLE: + target['SecurityGroups'] = list(self.get_security_groups( + MountTargetId=target['MountTargetId'] + )) + else: + target['SecurityGroups'] = [] + yield target + + def get_security_groups(self, **kwargs): + """ + Returns security groups for selected instance of EFS + """ + return iterate_all( + 'SecurityGroups', + self.connection.describe_mount_target_security_groups, + **kwargs + ) + + def get_file_system_id(self, name): + """ + Returns ID of instance by instance name + """ + info = first_or_default(iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + CreationToken=name + )) + return info and info['FileSystemId'] or None + + def get_file_system_state(self, name, file_system_id=None): + """ + Returns state of filesystem by EFS id/name + """ + info = first_or_default(iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + CreationToken=name, + FileSystemId=file_system_id + )) + return info and info['LifeCycleState'] or self.STATE_DELETED + + def get_mount_targets_in_state(self, file_system_id, states=None): + """ + Returns states of mount targets of selected EFS with selected state(s) (optional) + """ + targets = iterate_all( + 'MountTargets', + self.connection.describe_mount_targets, + FileSystemId=file_system_id + ) + + if states: + if not isinstance(states, list): + states = [states] + targets = filter(lambda target: target['LifeCycleState'] in states, targets) + + return list(targets) + + def supports_provisioned_mode(self): + """ + Ensure boto3 includes provisioned throughput mode feature + """ + return hasattr(self.connection, 'update_file_system') + + def get_throughput_mode(self, **kwargs): + """ + Returns throughput mode for selected EFS instance + """ + info = first_or_default(iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + **kwargs + )) + + return info and info['ThroughputMode'] or None + + def get_provisioned_throughput_in_mibps(self, **kwargs): + """ + Returns throughput mode for selected EFS instance + """ + info = first_or_default(iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + **kwargs + )) + return info.get('ProvisionedThroughputInMibps', None) + + def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps): + """ + Creates new filesystem with selected name + """ + changed = False + state = self.get_file_system_state(name) + params = {} + params['CreationToken'] = name + params['PerformanceMode'] = performance_mode + if encrypt: + params['Encrypted'] = encrypt + if kms_key_id is not None: + params['KmsKeyId'] = kms_key_id + if throughput_mode: + if self.supports_provisioned_mode(): + params['ThroughputMode'] = throughput_mode + else: + self.module.fail_json(msg="throughput_mode parameter requires botocore >= 1.10.57") + if provisioned_throughput_in_mibps: + if self.supports_provisioned_mode(): + params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + else: + self.module.fail_json(msg="provisioned_throughput_in_mibps parameter requires botocore >= 1.10.57") + + if state in [self.STATE_DELETING, self.STATE_DELETED]: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_DELETED + ) + try: + self.connection.create_file_system(**params) + changed = True + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Unable to create file system.") + + # we always wait for the state to be available when creating. + # if we try to take any actions on the file system before it's available + # we'll throw errors + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_AVAILABLE, + self.wait_timeout + ) + + return changed + + def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mibps): + """ + Update filesystem with new throughput settings + """ + changed = False + state = self.get_file_system_state(name) + if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: + fs_id = self.get_file_system_id(name) + current_mode = self.get_throughput_mode(FileSystemId=fs_id) + current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id) + params = dict() + if throughput_mode and throughput_mode != current_mode: + params['ThroughputMode'] = throughput_mode + if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput: + params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + if len(params) > 0: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_AVAILABLE, + self.wait_timeout + ) + try: + self.connection.update_file_system(FileSystemId=fs_id, **params) + changed = True + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Unable to update file system.") + return changed + + def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps): + """ + Change attributes (mount targets and tags) of filesystem by name + """ + result = False + fs_id = self.get_file_system_id(name) + + if tags is not None: + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags) + + if tags_to_delete: + try: + self.connection.delete_tags( + FileSystemId=fs_id, + TagKeys=tags_to_delete + ) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Unable to delete tags.") + + result = True + + if tags_need_modify: + try: + self.connection.create_tags( + FileSystemId=fs_id, + Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) + ) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Unable to create tags.") + + result = True + + if targets is not None: + incomplete_states = [self.STATE_CREATING, self.STATE_DELETING] + wait_for( + lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), + 0 + ) + current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id)) + targets = _index_by_key('SubnetId', targets) + + targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, + targets, True) + + # To modify mount target it should be deleted and created again + changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'], + current_targets[sid], targets[sid])] + targets_to_delete = list(targets_to_delete) + changed + targets_to_create = list(targets_to_create) + changed + + if targets_to_delete: + for sid in targets_to_delete: + self.connection.delete_mount_target( + MountTargetId=current_targets[sid]['MountTargetId'] + ) + wait_for( + lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), + 0 + ) + result = True + + if targets_to_create: + for sid in targets_to_create: + self.connection.create_mount_target( + FileSystemId=fs_id, + **targets[sid] + ) + wait_for( + lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), + 0, + self.wait_timeout + ) + result = True + + # If no security groups were passed into the module, then do not change it. + security_groups_to_update = [sid for sid in intersection if + 'SecurityGroups' in targets[sid] and + current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']] + + if security_groups_to_update: + for sid in security_groups_to_update: + self.connection.modify_mount_target_security_groups( + MountTargetId=current_targets[sid]['MountTargetId'], + SecurityGroups=targets[sid].get('SecurityGroups', None) + ) + result = True + + return result + + def delete_file_system(self, name, file_system_id=None): + """ + Removes EFS instance by id/name + """ + result = False + state = self.get_file_system_state(name, file_system_id) + if state in [self.STATE_CREATING, self.STATE_AVAILABLE]: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_AVAILABLE + ) + if not file_system_id: + file_system_id = self.get_file_system_id(name) + self.delete_mount_targets(file_system_id) + self.connection.delete_file_system(FileSystemId=file_system_id) + result = True + + if self.wait: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_DELETED, + self.wait_timeout + ) + + return result + + def delete_mount_targets(self, file_system_id): + """ + Removes mount targets by EFS id + """ + wait_for( + lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)), + 0 + ) + + targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE) + for target in targets: + self.connection.delete_mount_target(MountTargetId=target['MountTargetId']) + + wait_for( + lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)), + 0 + ) + + return len(targets) > 0 + + +def iterate_all(attr, map_method, **kwargs): + """ + Method creates iterator from result set + """ + args = dict((key, value) for (key, value) in kwargs.items() if value is not None) + wait = 1 + while True: + try: + data = map_method(**args) + for elm in data[attr]: + yield elm + if 'NextMarker' in data: + args['Marker'] = data['Nextmarker'] + continue + break + except ClientError as e: + if e.response['Error']['Code'] == "ThrottlingException" and wait < 600: + sleep(wait) + wait = wait * 2 + continue + else: + raise + + +def targets_equal(keys, a, b): + """ + Method compare two mount targets by specified attributes + """ + for key in keys: + if key in b and a[key] != b[key]: + return False + + return True + + +def dict_diff(dict1, dict2, by_key=False): + """ + Helper method to calculate difference of two dictionaries + """ + keys1 = set(dict1.keys() if by_key else dict1.items()) + keys2 = set(dict2.keys() if by_key else dict2.items()) + + intersection = keys1 & keys2 + + return keys2 ^ intersection, intersection, keys1 ^ intersection + + +def first_or_default(items, default=None): + """ + Helper method to fetch first element of list (if exists) + """ + for item in items: + return item + return default + + +def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS): + """ + Helper method to wait for desired value returned by callback method + """ + wait_start = timestamp() + while True: + if callback() != value: + if timeout != 0 and (timestamp() - wait_start > timeout): + raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)') + else: + sleep(5) + continue + break + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + encrypt=dict(required=False, type="bool", default=False), + state=dict(required=False, type='str', choices=["present", "absent"], default="present"), + kms_key_id=dict(required=False, type='str', default=None), + purge_tags=dict(default=True, type='bool'), + id=dict(required=False, type='str', default=None), + name=dict(required=False, type='str', default=None), + tags=dict(required=False, type="dict", default={}), + targets=dict(required=False, type="list", default=[]), + performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), + throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None), + provisioned_throughput_in_mibps=dict(required=False, type='float'), + wait=dict(required=False, type="bool", default=False), + wait_timeout=dict(required=False, type="int", default=0) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + connection = EFSConnection(module) + + name = module.params.get('name') + fs_id = module.params.get('id') + tags = module.params.get('tags') + target_translations = { + 'ip_address': 'IpAddress', + 'security_groups': 'SecurityGroups', + 'subnet_id': 'SubnetId' + } + targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')] + performance_mode_translations = { + 'general_purpose': 'generalPurpose', + 'max_io': 'maxIO' + } + encrypt = module.params.get('encrypt') + kms_key_id = module.params.get('kms_key_id') + performance_mode = performance_mode_translations[module.params.get('performance_mode')] + purge_tags = module.params.get('purge_tags') + throughput_mode = module.params.get('throughput_mode') + provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps') + state = str(module.params.get('state')).lower() + changed = False + + if state == 'present': + if not name: + module.fail_json(msg='Name parameter is required for create') + + changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps) + if connection.supports_provisioned_mode(): + changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed + changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets, + throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed + result = first_or_default(connection.get_file_systems(CreationToken=name)) + + elif state == 'absent': + if not name and not fs_id: + module.fail_json(msg='Either name or id parameter is required for delete') + + changed = connection.delete_file_system(name, fs_id) + result = None + if result: + result = camel_dict_to_snake_dict(result) + module.exit_json(changed=changed, efs=result) + + +if __name__ == '__main__': + main() diff --git a/efs_facts.py b/efs_facts.py new file mode 120000 index 00000000000..781c362da4b --- /dev/null +++ b/efs_facts.py @@ -0,0 +1 @@ +efs_info.py \ No newline at end of file diff --git a/efs_info.py b/efs_info.py new file mode 100644 index 00000000000..ca59d179c12 --- /dev/null +++ b/efs_info.py @@ -0,0 +1,401 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: efs_info +short_description: Get information about Amazon EFS file systems +description: + - This module can be used to search Amazon EFS file systems. + - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(efs_info) module no longer returns C(ansible_facts)! +requirements: [ boto3 ] +author: + - "Ryan Sydnor (@ryansydnor)" +options: + name: + description: + - Creation Token of Amazon EFS file system. + aliases: [ creation_token ] + type: str + id: + description: + - ID of Amazon EFS. + type: str + tags: + description: + - List of tags of Amazon EFS. Should be defined as dictionary. + type: dict + targets: + description: + - List of targets on which to filter the returned results. + - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address. + type: list + elements: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Find all existing efs + efs_info: + register: result + +- name: Find efs using id + efs_info: + id: fs-1234abcd + register: result + +- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a' + efs_info: + tags: + Name: myTestNameTag + targets: + - subnet-1a2b3c4d + - sg-4d3c2b1a + register: result + +- debug: + msg: "{{ result['efs'] }}" +''' + +RETURN = ''' +creation_time: + description: timestamp of creation date + returned: always + type: str + sample: "2015-11-16 07:30:57-05:00" +creation_token: + description: EFS creation token + returned: always + type: str + sample: console-88609e04-9a0e-4a2e-912c-feaa99509961 +file_system_id: + description: ID of the file system + returned: always + type: str + sample: fs-xxxxxxxx +life_cycle_state: + description: state of the EFS file system + returned: always + type: str + sample: creating, available, deleting, deleted +mount_point: + description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address + returned: always + type: str + sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ +filesystem_address: + description: url of file system + returned: always + type: str + sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ +mount_targets: + description: list of mount targets + returned: always + type: list + sample: + [ + { + "file_system_id": "fs-a7ad440e", + "ip_address": "172.31.17.173", + "life_cycle_state": "available", + "mount_target_id": "fsmt-d8907871", + "network_interface_id": "eni-6e387e26", + "owner_id": "740748460359", + "security_groups": [ + "sg-a30b22c6" + ], + "subnet_id": "subnet-e265c895" + }, + ... + ] +name: + description: name of the file system + returned: always + type: str + sample: my-efs +number_of_mount_targets: + description: the number of targets mounted + returned: always + type: int + sample: 3 +owner_id: + description: AWS account ID of EFS owner + returned: always + type: str + sample: XXXXXXXXXXXX +size_in_bytes: + description: size of the file system in bytes as of a timestamp + returned: always + type: dict + sample: + { + "timestamp": "2015-12-21 13:59:59-05:00", + "value": 12288 + } +performance_mode: + description: performance mode of the file system + returned: always + type: str + sample: "generalPurpose" +throughput_mode: + description: mode of throughput for the file system + returned: when botocore >= 1.10.57 + type: str + sample: "bursting" +provisioned_throughput_in_mibps: + description: throughput provisioned in Mibps + returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned" + type: float + sample: 15.0 +tags: + description: tags on the efs instance + returned: always + type: dict + sample: + { + "name": "my-efs", + "key": "Value" + } + +''' + + +from collections import defaultdict + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, AWSRetry +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict +from ansible.module_utils._text import to_native + + +class EFSConnection(object): + STATE_CREATING = 'creating' + STATE_AVAILABLE = 'available' + STATE_DELETING = 'deleting' + STATE_DELETED = 'deleted' + + def __init__(self, module): + try: + self.connection = module.client('efs') + self.module = module + except Exception as e: + module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) + + self.region = get_aws_connection_info(module, boto3=True)[0] + + @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + def list_file_systems(self, **kwargs): + """ + Returns generator of file systems including all attributes of FS + """ + paginator = self.connection.get_paginator('describe_file_systems') + return paginator.paginate(**kwargs).build_full_result()['FileSystems'] + + @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + def get_tags(self, file_system_id): + """ + Returns tag list for selected instance of EFS + """ + paginator = self.connection.get_paginator('describe_tags') + return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags']) + + @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + def get_mount_targets(self, file_system_id): + """ + Returns mount targets for selected instance of EFS + """ + paginator = self.connection.get_paginator('describe_mount_targets') + return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets'] + + @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException']) + def get_security_groups(self, mount_target_id): + """ + Returns security groups for selected instance of EFS + """ + return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups'] + + def get_mount_targets_data(self, file_systems): + for item in file_systems: + if item['life_cycle_state'] == self.STATE_AVAILABLE: + try: + mount_targets = self.get_mount_targets(item['file_system_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get EFS targets") + for mt in mount_targets: + item['mount_targets'].append(camel_dict_to_snake_dict(mt)) + return file_systems + + def get_security_groups_data(self, file_systems): + for item in file_systems: + if item['life_cycle_state'] == self.STATE_AVAILABLE: + for target in item['mount_targets']: + if target['life_cycle_state'] == self.STATE_AVAILABLE: + try: + target['security_groups'] = self.get_security_groups(target['mount_target_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get EFS security groups") + else: + target['security_groups'] = [] + else: + item['tags'] = {} + item['mount_targets'] = [] + return file_systems + + def get_file_systems(self, file_system_id=None, creation_token=None): + kwargs = dict() + if file_system_id: + kwargs['FileSystemId'] = file_system_id + if creation_token: + kwargs['CreationToken'] = creation_token + try: + file_systems = self.list_file_systems(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get EFS file systems") + + results = list() + for item in file_systems: + item['CreationTime'] = str(item['CreationTime']) + """ + In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it + AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose + And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) + AWS documentation is available here: + U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) + """ + item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + + if 'Timestamp' in item['SizeInBytes']: + item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + result = camel_dict_to_snake_dict(item) + result['tags'] = {} + result['mount_targets'] = [] + # Set tags *after* doing camel to snake + if result['life_cycle_state'] == self.STATE_AVAILABLE: + try: + result['tags'] = self.get_tags(result['file_system_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get EFS tags") + results.append(result) + return results + + +def prefix_to_attr(attr_id): + """ + Helper method to convert ID prefix to mount target attribute + """ + attr_by_prefix = { + 'fsmt-': 'mount_target_id', + 'subnet-': 'subnet_id', + 'eni-': 'network_interface_id', + 'sg-': 'security_groups' + } + return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items() + if str(attr_id).startswith(prefix)], 'ip_address') + + +def first_or_default(items, default=None): + """ + Helper method to fetch first element of list (if exists) + """ + for item in items: + return item + return default + + +def has_tags(available, required): + """ + Helper method to determine if tag requested already exists + """ + for key, value in required.items(): + if key not in available or value != available[key]: + return False + return True + + +def has_targets(available, required): + """ + Helper method to determine if mount target requested already exists + """ + grouped = group_list_of_dict(available) + for (value, field) in required: + if field not in grouped or value not in grouped[field]: + return False + return True + + +def group_list_of_dict(array): + """ + Helper method to group list of dict to dict with all possible values + """ + result = defaultdict(list) + for item in array: + for key, value in item.items(): + result[key] += value if isinstance(value, list) else [value] + return result + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + id=dict(), + name=dict(aliases=['creation_token']), + tags=dict(type="dict", default={}), + targets=dict(type="list", default=[]) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) + is_old_facts = module._name == 'efs_facts' + if is_old_facts: + module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + connection = EFSConnection(module) + + name = module.params.get('name') + fs_id = module.params.get('id') + tags = module.params.get('tags') + targets = module.params.get('targets') + + file_systems_info = connection.get_file_systems(fs_id, name) + + if tags: + file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)] + + file_systems_info = connection.get_mount_targets_data(file_systems_info) + file_systems_info = connection.get_security_groups_data(file_systems_info) + + if targets: + targets = [(item, prefix_to_attr(item)) for item in targets] + file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)] + + if is_old_facts: + module.exit_json(changed=False, ansible_facts={'efs': file_systems_info}) + else: + module.exit_json(changed=False, efs=file_systems_info) + + +if __name__ == '__main__': + main() diff --git a/elasticache.py b/elasticache.py new file mode 100644 index 00000000000..e930ab40393 --- /dev/null +++ b/elasticache.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elasticache +short_description: Manage cache clusters in Amazon ElastiCache +description: + - Manage cache clusters in Amazon ElastiCache. + - Returns information about the specified cache cluster. +requirements: [ boto3 ] +author: "Jim Dalton (@jsdalton)" +options: + state: + description: + - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. + - C(rebooted) will reboot the cluster, resulting in a momentary outage. + choices: ['present', 'absent', 'rebooted'] + required: true + type: str + name: + description: + - The cache cluster identifier. + required: true + type: str + engine: + description: + - Name of the cache engine to be used. + - Supported values are C(redis) and C(memcached). + default: memcached + type: str + cache_engine_version: + description: + - The version number of the cache engine. + type: str + node_type: + description: + - The compute and memory capacity of the nodes in the cache cluster. + default: cache.t2.small + type: str + num_nodes: + description: + - The initial number of cache nodes that the cache cluster will have. + - Required when I(state=present). + type: int + default: 1 + cache_port: + description: + - The port number on which each of the cache nodes will accept + connections. + type: int + cache_parameter_group: + description: + - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group + for the specified engine will be used. + aliases: [ 'parameter_group' ] + type: str + cache_subnet_group: + description: + - The subnet group name to associate with. Only use if inside a vpc. + - Required if inside a vpc + type: str + security_group_ids: + description: + - A list of vpc security group IDs to associate with this cache cluster. Only use if inside a vpc. + type: list + elements: str + cache_security_groups: + description: + - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc. + type: list + elements: str + zone: + description: + - The EC2 Availability Zone in which the cache cluster will be created. + type: str + wait: + description: + - Wait for cache cluster result before returning. + type: bool + default: true + hard_modify: + description: + - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state. + type: bool + default: false +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic example +- elasticache: + name: "test-please-delete" + state: present + engine: memcached + cache_engine_version: 1.4.14 + node_type: cache.m1.small + num_nodes: 1 + cache_port: 11211 + cache_security_groups: + - default + zone: us-east-1d + + +# Ensure cache cluster is gone +- elasticache: + name: "test-please-delete" + state: absent + +# Reboot cache cluster +- elasticache: + name: "test-please-delete" + state: rebooted + +""" +from time import sleep +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict + +try: + import boto3 + import botocore +except ImportError: + pass # will be detected by imported HAS_BOTO3 + + +class ElastiCacheManager(object): + + """Handles elasticache creation and destruction""" + + EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] + + def __init__(self, module, name, engine, cache_engine_version, node_type, + num_nodes, cache_port, cache_parameter_group, cache_subnet_group, + cache_security_groups, security_group_ids, zone, wait, + hard_modify, region, **aws_connect_kwargs): + self.module = module + self.name = name + self.engine = engine.lower() + self.cache_engine_version = cache_engine_version + self.node_type = node_type + self.num_nodes = num_nodes + self.cache_port = cache_port + self.cache_parameter_group = cache_parameter_group + self.cache_subnet_group = cache_subnet_group + self.cache_security_groups = cache_security_groups + self.security_group_ids = security_group_ids + self.zone = zone + self.wait = wait + self.hard_modify = hard_modify + + self.region = region + self.aws_connect_kwargs = aws_connect_kwargs + + self.changed = False + self.data = None + self.status = 'gone' + self.conn = self._get_elasticache_connection() + self._refresh_data() + + def ensure_present(self): + """Ensure cache cluster exists or create it if not""" + if self.exists(): + self.sync() + else: + self.create() + + def ensure_absent(self): + """Ensure cache cluster is gone or delete it if not""" + self.delete() + + def ensure_rebooted(self): + """Ensure cache cluster is gone or delete it if not""" + self.reboot() + + def exists(self): + """Check if cache cluster exists""" + return self.status in self.EXIST_STATUSES + + def create(self): + """Create an ElastiCache cluster""" + if self.status == 'available': + return + if self.status in ['creating', 'rebooting', 'modifying']: + if self.wait: + self._wait_for_status('available') + return + if self.status == 'deleting': + if self.wait: + self._wait_for_status('gone') + else: + msg = "'%s' is currently deleting. Cannot create." + self.module.fail_json(msg=msg % self.name) + + kwargs = dict(CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeType=self.node_type, + Engine=self.engine, + EngineVersion=self.cache_engine_version, + CacheSecurityGroupNames=self.cache_security_groups, + SecurityGroupIds=self.security_group_ids, + CacheParameterGroupName=self.cache_parameter_group, + CacheSubnetGroupName=self.cache_subnet_group) + if self.cache_port is not None: + kwargs['Port'] = self.cache_port + if self.zone is not None: + kwargs['PreferredAvailabilityZone'] = self.zone + + try: + self.conn.create_cache_cluster(**kwargs) + + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg=e.message, exception=format_exc(), + **camel_dict_to_snake_dict(e.response)) + + self._refresh_data() + + self.changed = True + if self.wait: + self._wait_for_status('available') + return True + + def delete(self): + """Destroy an ElastiCache cluster""" + if self.status == 'gone': + return + if self.status == 'deleting': + if self.wait: + self._wait_for_status('gone') + return + if self.status in ['creating', 'rebooting', 'modifying']: + if self.wait: + self._wait_for_status('available') + else: + msg = "'%s' is currently %s. Cannot delete." + self.module.fail_json(msg=msg % (self.name, self.status)) + + try: + response = self.conn.delete_cache_cluster(CacheClusterId=self.name) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg=e.message, exception=format_exc(), + **camel_dict_to_snake_dict(e.response)) + + cache_cluster_data = response['CacheCluster'] + self._refresh_data(cache_cluster_data) + + self.changed = True + if self.wait: + self._wait_for_status('gone') + + def sync(self): + """Sync settings to cluster if required""" + if not self.exists(): + msg = "'%s' is %s. Cannot sync." + self.module.fail_json(msg=msg % (self.name, self.status)) + + if self.status in ['creating', 'rebooting', 'modifying']: + if self.wait: + self._wait_for_status('available') + else: + # Cluster can only be synced if available. If we can't wait + # for this, then just be done. + return + + if self._requires_destroy_and_create(): + if not self.hard_modify: + msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." + self.module.fail_json(msg=msg % self.name) + if not self.wait: + msg = "'%s' requires destructive modification. 'wait' must be set to true." + self.module.fail_json(msg=msg % self.name) + self.delete() + self.create() + return + + if self._requires_modification(): + self.modify() + + def modify(self): + """Modify the cache cluster. Note it's only possible to modify a few select options.""" + nodes_to_remove = self._get_nodes_to_remove() + try: + self.conn.modify_cache_cluster(CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeIdsToRemove=nodes_to_remove, + CacheSecurityGroupNames=self.cache_security_groups, + CacheParameterGroupName=self.cache_parameter_group, + SecurityGroupIds=self.security_group_ids, + ApplyImmediately=True, + EngineVersion=self.cache_engine_version) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg=e.message, exception=format_exc(), + **camel_dict_to_snake_dict(e.response)) + + self._refresh_data() + + self.changed = True + if self.wait: + self._wait_for_status('available') + + def reboot(self): + """Reboot the cache cluster""" + if not self.exists(): + msg = "'%s' is %s. Cannot reboot." + self.module.fail_json(msg=msg % (self.name, self.status)) + if self.status == 'rebooting': + return + if self.status in ['creating', 'modifying']: + if self.wait: + self._wait_for_status('available') + else: + msg = "'%s' is currently %s. Cannot reboot." + self.module.fail_json(msg=msg % (self.name, self.status)) + + # Collect ALL nodes for reboot + cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + try: + self.conn.reboot_cache_cluster(CacheClusterId=self.name, + CacheNodeIdsToReboot=cache_node_ids) + except botocore.exceptions.ClientError as e: + self.module.fail_json(msg=e.message, exception=format_exc(), + **camel_dict_to_snake_dict(e.response)) + + self._refresh_data() + + self.changed = True + if self.wait: + self._wait_for_status('available') + + def get_info(self): + """Return basic info about the cache cluster""" + info = { + 'name': self.name, + 'status': self.status + } + if self.data: + info['data'] = self.data + return info + + def _wait_for_status(self, awaited_status): + """Wait for status to change from present status to awaited_status""" + status_map = { + 'creating': 'available', + 'rebooting': 'available', + 'modifying': 'available', + 'deleting': 'gone' + } + if self.status == awaited_status: + # No need to wait, we're already done + return + if status_map[self.status] != awaited_status: + msg = "Invalid awaited status. '%s' cannot transition to '%s'" + self.module.fail_json(msg=msg % (self.status, awaited_status)) + + if awaited_status not in set(status_map.values()): + msg = "'%s' is not a valid awaited status." + self.module.fail_json(msg=msg % awaited_status) + + while True: + sleep(1) + self._refresh_data() + if self.status == awaited_status: + break + + def _requires_modification(self): + """Check if cluster requires (nondestructive) modification""" + # Check modifiable data attributes + modifiable_data = { + 'NumCacheNodes': self.num_nodes, + 'EngineVersion': self.cache_engine_version + } + for key, value in modifiable_data.items(): + if value is not None and value and self.data[key] != value: + return True + + # Check cache security groups + cache_security_groups = [] + for sg in self.data['CacheSecurityGroups']: + cache_security_groups.append(sg['CacheSecurityGroupName']) + if set(cache_security_groups) != set(self.cache_security_groups): + return True + + # check vpc security groups + if self.security_group_ids: + vpc_security_groups = [] + security_groups = self.data['SecurityGroups'] or [] + for sg in security_groups: + vpc_security_groups.append(sg['SecurityGroupId']) + if set(vpc_security_groups) != set(self.security_group_ids): + return True + + return False + + def _requires_destroy_and_create(self): + """ + Check whether a destroy and create is required to synchronize cluster. + """ + unmodifiable_data = { + 'node_type': self.data['CacheNodeType'], + 'engine': self.data['Engine'], + 'cache_port': self._get_port() + } + # Only check for modifications if zone is specified + if self.zone is not None: + unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] + for key, value in unmodifiable_data.items(): + if getattr(self, key) is not None and getattr(self, key) != value: + return True + return False + + def _get_elasticache_connection(self): + """Get an elasticache connection""" + region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True) + if region: + return boto3_conn(self.module, conn_type='client', resource='elasticache', + region=region, endpoint=ec2_url, **aws_connect_params) + else: + self.module.fail_json(msg="region must be specified") + + def _get_port(self): + """Get the port. Where this information is retrieved from is engine dependent.""" + if self.data['Engine'] == 'memcached': + return self.data['ConfigurationEndpoint']['Port'] + elif self.data['Engine'] == 'redis': + # Redis only supports a single node (presently) so just use + # the first and only + return self.data['CacheNodes'][0]['Endpoint']['Port'] + + def _refresh_data(self, cache_cluster_data=None): + """Refresh data about this cache cluster""" + + if cache_cluster_data is None: + try: + response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'CacheClusterNotFound': + self.data = None + self.status = 'gone' + return + else: + self.module.fail_json(msg=e.message, exception=format_exc(), + **camel_dict_to_snake_dict(e.response)) + cache_cluster_data = response['CacheClusters'][0] + self.data = cache_cluster_data + self.status = self.data['CacheClusterStatus'] + + # The documentation for elasticache lies -- status on rebooting is set + # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it + # here to make status checks etc. more sane. + if self.status == 'rebooting cache cluster nodes': + self.status = 'rebooting' + + def _get_nodes_to_remove(self): + """If there are nodes to remove, it figures out which need to be removed""" + num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes + if num_nodes_to_remove <= 0: + return [] + + if not self.hard_modify: + msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." + self.module.fail_json(msg=msg % self.name) + + cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + return cache_node_ids[-num_nodes_to_remove:] + + +def main(): + """ elasticache ansible module """ + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent', 'rebooted']), + name=dict(required=True), + engine=dict(default='memcached'), + cache_engine_version=dict(default=""), + node_type=dict(default='cache.t2.small'), + num_nodes=dict(default=1, type='int'), + # alias for compat with the original PR 1950 + cache_parameter_group=dict(default="", aliases=['parameter_group']), + cache_port=dict(type='int'), + cache_subnet_group=dict(default=""), + cache_security_groups=dict(default=[], type='list'), + security_group_ids=dict(default=[], type='list'), + zone=dict(), + wait=dict(default=True, type='bool'), + hard_modify=dict(type='bool') + )) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + name = module.params['name'] + state = module.params['state'] + engine = module.params['engine'] + cache_engine_version = module.params['cache_engine_version'] + node_type = module.params['node_type'] + num_nodes = module.params['num_nodes'] + cache_port = module.params['cache_port'] + cache_subnet_group = module.params['cache_subnet_group'] + cache_security_groups = module.params['cache_security_groups'] + security_group_ids = module.params['security_group_ids'] + zone = module.params['zone'] + wait = module.params['wait'] + hard_modify = module.params['hard_modify'] + cache_parameter_group = module.params['cache_parameter_group'] + + if cache_subnet_group and cache_security_groups: + module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups") + + if state == 'present' and not num_nodes: + module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") + + elasticache_manager = ElastiCacheManager(module, name, engine, + cache_engine_version, node_type, + num_nodes, cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, zone, wait, + hard_modify, region, **aws_connect_kwargs) + + if state == 'present': + elasticache_manager.ensure_present() + elif state == 'absent': + elasticache_manager.ensure_absent() + elif state == 'rebooted': + elasticache_manager.ensure_rebooted() + + facts_result = dict(changed=elasticache_manager.changed, + elasticache=elasticache_manager.get_info()) + + module.exit_json(**facts_result) + + +if __name__ == '__main__': + main() diff --git a/elasticache_facts.py b/elasticache_facts.py new file mode 120000 index 00000000000..d6cd32eb0c5 --- /dev/null +++ b/elasticache_facts.py @@ -0,0 +1 @@ +elasticache_info.py \ No newline at end of file diff --git a/elasticache_info.py b/elasticache_info.py new file mode 100644 index 00000000000..359cca82206 --- /dev/null +++ b/elasticache_info.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: elasticache_info +short_description: Retrieve information for AWS ElastiCache clusters +description: + - Retrieve information from AWS ElastiCache clusters + - This module was called C(elasticache_facts) before Ansible 2.9. The usage did not change. +options: + name: + description: + - The name of an ElastiCache cluster. + type: str + +author: + - Will Thames (@willthames) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: obtain all ElastiCache information + elasticache_info: + +- name: obtain all information for a single ElastiCache cluster + elasticache_info: + name: test_elasticache +''' + +RETURN = ''' +elasticache_clusters: + description: List of ElastiCache clusters + returned: always + type: complex + contains: + auto_minor_version_upgrade: + description: Whether to automatically upgrade to minor versions + returned: always + type: bool + sample: true + cache_cluster_create_time: + description: Date and time cluster was created + returned: always + type: str + sample: '2017-09-15T05:43:46.038000+00:00' + cache_cluster_id: + description: ID of the cache cluster + returned: always + type: str + sample: abcd-1234-001 + cache_cluster_status: + description: Status of ElastiCache cluster + returned: always + type: str + sample: available + cache_node_type: + description: Instance type of ElastiCache nodes + returned: always + type: str + sample: cache.t2.micro + cache_nodes: + description: List of ElastiCache nodes in the cluster + returned: always + type: complex + contains: + cache_node_create_time: + description: Date and time node was created + returned: always + type: str + sample: '2017-09-15T05:43:46.038000+00:00' + cache_node_id: + description: ID of the cache node + returned: always + type: str + sample: '0001' + cache_node_status: + description: Status of the cache node + returned: always + type: str + sample: available + customer_availability_zone: + description: Availability Zone in which the cache node was created + returned: always + type: str + sample: ap-southeast-2b + endpoint: + description: Connection details for the cache node + returned: always + type: complex + contains: + address: + description: URL of the cache node endpoint + returned: always + type: str + sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com + port: + description: Port of the cache node endpoint + returned: always + type: int + sample: 6379 + parameter_group_status: + description: Status of the Cache Parameter Group + returned: always + type: str + sample: in-sync + cache_parameter_group: + description: Contents of the Cache Parameter Group + returned: always + type: complex + contains: + cache_node_ids_to_reboot: + description: Cache nodes which need to be rebooted for parameter changes to be applied + returned: always + type: list + sample: [] + cache_parameter_group_name: + description: Name of the cache parameter group + returned: always + type: str + sample: default.redis3.2 + parameter_apply_status: + description: Status of parameter updates + returned: always + type: str + sample: in-sync + cache_security_groups: + description: Security Groups used by the cache + returned: always + type: list + sample: + - 'sg-abcd1234' + cache_subnet_group_name: + description: ElastiCache Subnet Group used by the cache + returned: always + type: str + sample: abcd-subnet-group + client_download_landing_page: + description: URL of client download web page + returned: always + type: str + sample: 'https://console.aws.amazon.com/elasticache/home#client-download:' + engine: + description: Engine used by ElastiCache + returned: always + type: str + sample: redis + engine_version: + description: Version of ElastiCache engine + returned: always + type: str + sample: 3.2.4 + notification_configuration: + description: Configuration of notifications + returned: if notifications are enabled + type: complex + contains: + topic_arn: + description: ARN of notification destination topic + returned: if notifications are enabled + type: str + sample: arn:aws:sns:*:123456789012:my_topic + topic_name: + description: Name of notification destination topic + returned: if notifications are enabled + type: str + sample: MyTopic + num_cache_nodes: + description: Number of Cache Nodes + returned: always + type: int + sample: 1 + pending_modified_values: + description: Values that are pending modification + returned: always + type: complex + contains: {} + preferred_availability_zone: + description: Preferred Availability Zone + returned: always + type: str + sample: ap-southeast-2b + preferred_maintenance_window: + description: Time slot for preferred maintenance window + returned: always + type: str + sample: sat:12:00-sat:13:00 + replication_group_id: + description: Replication Group Id + returned: always + type: str + sample: replication-001 + security_groups: + description: List of Security Groups associated with ElastiCache + returned: always + type: complex + contains: + security_group_id: + description: Security Group ID + returned: always + type: str + sample: sg-abcd1234 + status: + description: Status of Security Group + returned: always + type: str + sample: active + tags: + description: Tags applied to the ElastiCache cluster + returned: always + type: complex + contains: {} + sample: + Application: web + Environment: test +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.exponential_backoff() +def describe_cache_clusters_with_backoff(client, cluster_id=None): + paginator = client.get_paginator('describe_cache_clusters') + params = dict(ShowCacheNodeInfo=True) + if cluster_id: + params['CacheClusterId'] = cluster_id + try: + response = paginator.paginate(**params).build_full_result() + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'CacheClusterNotFound': + return [] + raise + except botocore.exceptions.BotoCoreError: + raise + return response['CacheClusters'] + + +@AWSRetry.exponential_backoff() +def get_elasticache_tags_with_backoff(client, cluster_id): + return client.list_tags_for_resource(ResourceName=cluster_id)['TagList'] + + +def get_aws_account_id(module): + try: + client = module.client('sts') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Can't authorize connection") + + try: + return client.get_caller_identity()['Account'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain AWS account id") + + +def get_elasticache_clusters(client, module): + region = get_aws_connection_info(module, boto3=True)[0] + try: + clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain cache cluster info") + + account_id = get_aws_account_id(module) + results = [] + for cluster in clusters: + + cluster = camel_dict_to_snake_dict(cluster) + arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id']) + try: + tags = get_elasticache_tags_with_backoff(client, arn) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") + + cluster['tags'] = boto3_tag_list_to_ansible_dict(tags) + results.append(cluster) + return results + + +def main(): + argument_spec = dict( + name=dict(required=False), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'elasticache_facts': + module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", version='2.13') + + client = module.client('elasticache') + + module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module)) + + +if __name__ == '__main__': + main() diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py new file mode 100644 index 00000000000..d9d8d19788f --- /dev/null +++ b/elasticache_parameter_group.py @@ -0,0 +1,356 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elasticache_parameter_group +short_description: Manage cache parameter groups in Amazon ElastiCache. +description: + - Manage cache security groups in Amazon ElastiCache. + - Returns information about the specified cache cluster. +author: "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ boto3, botocore ] +options: + group_family: + description: + - The name of the cache parameter group family that the cache parameter group can be used with. + Required when creating a cache parameter group. + choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0'] + type: str + name: + description: + - A user-specified name for the cache parameter group. + required: yes + type: str + description: + description: + - A user-specified description for the cache parameter group. + type: str + state: + description: + - Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed. + choices: ['present', 'absent', 'reset'] + required: true + type: str + values: + description: + - A user-specified dictionary of parameters to reset or modify for the cache parameter group. + type: dict +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. +--- +- hosts: localhost + connection: local + tasks: + - name: 'Create a test parameter group' + elasticache_parameter_group: + name: 'test-param-group' + group_family: 'redis3.2' + description: 'This is a cache parameter group' + state: 'present' + - name: 'Modify a test parameter group' + elasticache_parameter_group: + name: 'test-param-group' + values: + activerehashing: yes + client-output-buffer-limit-normal-hard-limit: 4 + state: 'present' + - name: 'Reset all modifiable parameters for the test parameter group' + elasticache_parameter_group: + name: 'test-param-group' + state: reset + - name: 'Delete a test parameter group' + elasticache_parameter_group: + name: 'test-param-group' + state: 'absent' +""" + +RETURN = """ +elasticache: + description: cache parameter group information and response metadata + returned: always + type: dict + sample: + cache_parameter_group: + cache_parameter_group_family: redis3.2 + cache_parameter_group_name: test-please-delete + description: "initial description" + response_metadata: + http_headers: + content-length: "562" + content-type: text/xml + date: "Mon, 06 Feb 2017 22:14:08 GMT" + x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1 + http_status_code: 200 + request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1 + retry_attempts: 0 +changed: + description: if the cache parameter group has changed + returned: always + type: bool + sample: + changed: true +""" + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict +from ansible.module_utils._text import to_text +from ansible.module_utils.six import string_types +import traceback + +try: + import boto3 + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def create(module, conn, name, group_family, description): + """ Create ElastiCache parameter group. """ + try: + response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to create cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + return response, changed + + +def delete(module, conn, name): + """ Delete ElastiCache parameter group. """ + try: + conn.delete_cache_parameter_group(CacheParameterGroupName=name) + response = {} + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to delete cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + return response, changed + + +def make_current_modifiable_param_dict(module, conn, name): + """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" + current_info = get_info(conn, name) + if current_info is False: + module.fail_json(msg="Could not connect to the cache parameter group %s." % name) + + parameters = current_info["Parameters"] + modifiable_params = {} + + for param in parameters: + if param["IsModifiable"]: + modifiable_params[param["ParameterName"]] = [param.get("AllowedValues")] + modifiable_params[param["ParameterName"]].append(param["DataType"]) + modifiable_params[param["ParameterName"]].append(param.get("ParameterValue")) + return modifiable_params + + +def check_valid_modification(module, values, modifiable_params): + """ Check if the parameters and values in values are valid. """ + changed_with_update = False + + for parameter in values: + new_value = values[parameter] + + # check valid modifiable parameters + if parameter not in modifiable_params: + module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys())) + + # check allowed datatype for modified parameters + str_to_type = {"integer": int, "string": string_types} + expected_type = str_to_type[modifiable_params[parameter][1]] + if not isinstance(new_value, expected_type): + if expected_type == str: + if isinstance(new_value, bool): + values[parameter] = "yes" if new_value else "no" + else: + values[parameter] = to_text(new_value) + elif expected_type == int: + if isinstance(new_value, bool): + values[parameter] = 1 if new_value else 0 + else: + module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % + (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + else: + module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % + (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + + # check allowed values for modifiable parameters + choices = modifiable_params[parameter][0] + if choices: + if not (to_text(new_value) in choices or isinstance(new_value, int)): + module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." % + (new_value, parameter, choices)) + + # check if a new value is different from current value + if to_text(values[parameter]) != modifiable_params[parameter][2]: + changed_with_update = True + + return changed_with_update, values + + +def check_changed_parameter_values(values, old_parameters, new_parameters): + """ Checking if the new values are different than the old values. """ + changed_with_update = False + + # if the user specified parameters to reset, only check those for change + if values: + for parameter in values: + if old_parameters[parameter] != new_parameters[parameter]: + changed_with_update = True + break + # otherwise check all to find a change + else: + for parameter in old_parameters: + if old_parameters[parameter] != new_parameters[parameter]: + changed_with_update = True + break + + return changed_with_update + + +def modify(module, conn, name, values): + """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """ + # compares current group parameters with the parameters we've specified to to a value to see if this will change the group + format_parameters = [] + for key in values: + value = to_text(values[key]) + format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + try: + response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to modify cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + return response + + +def reset(module, conn, name, values): + """ Reset ElastiCache parameter group if the current information is different from the new information. """ + # used to compare with the reset parameters' dict to see if there have been changes + old_parameters_dict = make_current_modifiable_param_dict(module, conn, name) + + format_parameters = [] + + # determine whether to reset all or specific parameters + if values: + all_parameters = False + format_parameters = [] + for key in values: + value = to_text(values[key]) + format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + else: + all_parameters = True + + try: + response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to reset cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + # determine changed + new_parameters_dict = make_current_modifiable_param_dict(module, conn, name) + changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict) + + return response, changed + + +def get_info(conn, name): + """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """ + try: + data = conn.describe_cache_parameters(CacheParameterGroupName=name) + return data + except botocore.exceptions.ClientError as e: + return False + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']), + name=dict(required=True, type='str'), + description=dict(default='', type='str'), + state=dict(required=True, choices=['present', 'absent', 'reset']), + values=dict(type='dict'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto required for this module') + + parameter_group_family = module.params.get('group_family') + parameter_group_name = module.params.get('name') + group_description = module.params.get('description') + state = module.params.get('state') + values = module.params.get('values') + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.") + + connection = boto3_conn(module, conn_type='client', + resource='elasticache', region=region, + endpoint=ec2_url, **aws_connect_kwargs) + + exists = get_info(connection, parameter_group_name) + + # check that the needed requirements are available + if state == 'present' and not (exists or parameter_group_family): + module.fail_json(msg="Creating a group requires a family group.") + elif state == 'reset' and not exists: + module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name) + + # Taking action + changed = False + if state == 'present': + if exists: + # confirm that the group exists without any actions + if not values: + response = exists + changed = False + # modify existing group + else: + modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) + changed, values = check_valid_modification(module, values, modifiable_params) + response = modify(module, connection, parameter_group_name, values) + # create group + else: + response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description) + if values: + modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) + changed, values = check_valid_modification(module, values, modifiable_params) + response = modify(module, connection, parameter_group_name, values) + elif state == 'absent': + if exists: + # delete group + response, changed = delete(module, connection, parameter_group_name) + else: + response = {} + changed = False + elif state == 'reset': + response, changed = reset(module, connection, parameter_group_name, values) + + facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response)) + + module.exit_json(**facts_result) + + +if __name__ == '__main__': + main() diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py new file mode 100644 index 00000000000..675e3297801 --- /dev/null +++ b/elasticache_snapshot.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elasticache_snapshot +short_description: Manage cache snapshots in Amazon ElastiCache +description: + - Manage cache snapshots in Amazon ElastiCache. + - Returns information about the specified snapshot. +author: "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ boto3, botocore ] +options: + name: + description: + - The name of the snapshot we want to create, copy, delete. + required: true + type: str + state: + description: + - Actions that will create, destroy, or copy a snapshot. + required: true + choices: ['present', 'absent', 'copy'] + type: str + replication_id: + description: + - The name of the existing replication group to make the snapshot. + type: str + cluster_id: + description: + - The name of an existing cache cluster in the replication group to make the snapshot. + type: str + target: + description: + - The name of a snapshot copy. + type: str + bucket: + description: + - The s3 bucket to which the snapshot is exported. + type: str +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. +--- +- hosts: localhost + connection: local + tasks: + - name: 'Create a snapshot' + elasticache_snapshot: + name: 'test-snapshot' + state: 'present' + cluster_id: '{{ cluster }}' + replication_id: '{{ replication }}' +""" + +RETURN = """ +response_metadata: + description: response metadata about the snapshot + returned: always + type: dict + sample: + http_headers: + content-length: 1490 + content-type: text/xml + date: Tue, 07 Feb 2017 16:43:04 GMT + x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d + http_status_code: 200 + request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d + retry_attempts: 0 +snapshot: + description: snapshot data + returned: always + type: dict + sample: + auto_minor_version_upgrade: true + cache_cluster_create_time: 2017-02-01T17:43:58.261000+00:00 + cache_cluster_id: test-please-delete + cache_node_type: cache.m1.small + cache_parameter_group_name: default.redis3.2 + cache_subnet_group_name: default + engine: redis + engine_version: 3.2.4 + node_snapshots: + cache_node_create_time: 2017-02-01T17:43:58.261000+00:00 + cache_node_id: 0001 + cache_size: + num_cache_nodes: 1 + port: 11211 + preferred_availability_zone: us-east-1d + preferred_maintenance_window: wed:03:00-wed:04:00 + snapshot_name: deletesnapshot + snapshot_retention_limit: 0 + snapshot_source: manual + snapshot_status: creating + snapshot_window: 10:00-11:00 + vpc_id: vpc-c248fda4 +changed: + description: if a snapshot has been created, deleted, or copied + returned: always + type: bool + sample: + changed: true +""" + +import traceback + +try: + import boto3 + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict + + +def create(module, connection, replication_id, cluster_id, name): + """ Create an ElastiCache backup. """ + try: + response = connection.create_snapshot(ReplicationGroupId=replication_id, + CacheClusterId=cluster_id, + SnapshotName=name) + changed = True + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault": + response = {} + changed = False + else: + module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc()) + return response, changed + + +def copy(module, connection, name, target, bucket): + """ Copy an ElastiCache backup. """ + try: + response = connection.copy_snapshot(SourceSnapshotName=name, + TargetSnapshotName=target, + TargetBucket=bucket) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc()) + return response, changed + + +def delete(module, connection, name): + """ Delete an ElastiCache backup. """ + try: + response = connection.delete_snapshot(SnapshotName=name) + changed = True + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == "SnapshotNotFoundFault": + response = {} + changed = False + elif e.response['Error']['Code'] == "InvalidSnapshotState": + module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." + "You may need to wait a few minutes.") + else: + module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc()) + return response, changed + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), + replication_id=dict(type='str'), + cluster_id=dict(type='str'), + target=dict(type='str'), + bucket=dict(type='str'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto required for this module') + + name = module.params.get('name') + state = module.params.get('state') + replication_id = module.params.get('replication_id') + cluster_id = module.params.get('cluster_id') + target = module.params.get('target') + bucket = module.params.get('bucket') + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + + connection = boto3_conn(module, conn_type='client', + resource='elasticache', region=region, + endpoint=ec2_url, **aws_connect_kwargs) + + changed = False + response = {} + + if state == 'present': + if not all((replication_id, cluster_id)): + module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'") + response, changed = create(module, connection, replication_id, cluster_id, name) + elif state == 'absent': + response, changed = delete(module, connection, name) + elif state == 'copy': + if not all((target, bucket)): + module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.") + response, changed = copy(module, connection, name, target, bucket) + + facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response)) + + module.exit_json(**facts_result) + + +if __name__ == '__main__': + main() diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py new file mode 100644 index 00000000000..3441439adb3 --- /dev/null +++ b/elasticache_subnet_group.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elasticache_subnet_group +short_description: manage ElastiCache subnet groups +description: + - Creates, modifies, and deletes ElastiCache subnet groups. This module has a dependency on python-boto >= 2.5. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + required: true + choices: [ 'present' , 'absent' ] + type: str + name: + description: + - Database subnet group identifier. + required: true + type: str + description: + description: + - ElastiCache subnet group description. Only set when a new group is added. + type: str + subnets: + description: + - List of subnet IDs that make up the ElastiCache subnet group. + type: list + elements: str +author: "Tim Mahoney (@timmahoney)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Add or change a subnet group +- elasticache_subnet_group: + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + +# Remove a subnet group +- elasticache_subnet_group: + state: absent + name: norwegian-blue +''' + +try: + import boto + from boto.elasticache import connect_to_region + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connection_info + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + description=dict(required=False), + subnets=dict(required=False, type='list'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + state = module.params.get('state') + group_name = module.params.get('name').lower() + group_description = module.params.get('description') + group_subnets = module.params.get('subnets') or {} + + if state == 'present': + for required in ['name', 'description', 'subnets']: + if not module.params.get(required): + module.fail_json(msg=str("Parameter %s required for state='present'" % required)) + else: + for not_allowed in ['description', 'subnets']: + if module.params.get(not_allowed): + module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed)) + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + if not region: + module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + + """Get an elasticache connection""" + try: + conn = connect_to_region(region_name=region, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=e.message) + + try: + changed = False + exists = False + + try: + matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) + exists = len(matching_groups) > 0 + except BotoServerError as e: + if e.error_code != 'CacheSubnetGroupNotFoundFault': + module.fail_json(msg=e.error_message) + + if state == 'absent': + if exists: + conn.delete_cache_subnet_group(group_name) + changed = True + else: + if not exists: + new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) + changed = True + else: + changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) + changed = True + + except BotoServerError as e: + if e.error_message != 'No modifications were requested.': + module.fail_json(msg=e.error_message) + else: + changed = False + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/elb_application_lb.py b/elb_application_lb.py new file mode 100644 index 00000000000..5536f3de6f1 --- /dev/null +++ b/elb_application_lb.py @@ -0,0 +1,655 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: elb_application_lb +short_description: Manage an Application load balancer +description: + - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + access_logs_enabled: + description: + - Whether or not to enable access logs. + - When set, I(access_logs_s3_bucket) must also be set. + type: bool + access_logs_s3_bucket: + description: + - The name of the S3 bucket for the access logs. + - The bucket must exist in the same + region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket. + - Required if access logs in Amazon S3 are enabled. + - When set, I(access_logs_enabled) must also be set. + type: str + access_logs_s3_prefix: + description: + - The prefix for the log location in the S3 bucket. + - If you don't specify a prefix, the access logs are stored in the root of the bucket. + - Cannot begin or end with a slash. + type: str + deletion_protection: + description: + - Indicates whether deletion protection for the ELB is enabled. + default: no + type: bool + http2: + description: + - Indicates whether to enable HTTP2 routing. + default: no + type: bool + idle_timeout: + description: + - The number of seconds to wait before an idle connection is closed. + type: int + listeners: + description: + - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys + are CamelCased. + type: list + suboptions: + Port: + description: The port on which the load balancer is listening. + required: true + type: int + Protocol: + description: The protocol for connections from clients to the load balancer. + required: true + type: str + Certificates: + description: The SSL server certificate. + type: list + suboptions: + CertificateArn: + description: The Amazon Resource Name (ARN) of the certificate. + type: str + SslPolicy: + description: The security policy that defines which ciphers and protocols are supported. + type: str + DefaultActions: + description: The default actions for the listener. + required: true + type: list + suboptions: + Type: + description: The type of action. + type: str + TargetGroupArn: + description: The Amazon Resource Name (ARN) of the target group. + type: str + Rules: + type: list + description: + - A list of ALB Listener Rules. + - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:' + - 'https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_rule' + suboptions: + Conditions: + type: list + description: Conditions which must be met for the actions to be applied. + Priority: + type: int + description: The rule priority. + Actions: + type: list + description: Actions to apply if all of the rule's conditions are met. + name: + description: + - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric + characters or hyphens, and must not begin or end with a hyphen. + required: true + type: str + purge_listeners: + description: + - If yes, existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. If the I(listeners) parameter is + not set then listeners will not be modified + default: yes + type: bool + purge_tags: + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then + tags will not be modified. + default: yes + type: bool + subnets: + description: + - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from + at least two Availability Zones. + - Required if I(state=present). + type: list + security_groups: + description: + - A list of the names or IDs of the security groups to assign to the load balancer. + - Required if I(state=present). + default: [] + type: list + scheme: + description: + - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. + default: internet-facing + choices: [ 'internet-facing', 'internal' ] + type: str + state: + description: + - Create or destroy the load balancer. + default: present + choices: [ 'present', 'absent' ] + type: str + tags: + description: + - A dictionary of one or more tags to assign to the load balancer. + type: dict + wait: + description: + - Wait for the load balancer to have a state of 'active' before completing. A status check is + performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. + default: no + type: bool + wait_timeout: + description: + - The time in seconds to use in conjunction with I(wait). + type: int + purge_rules: + description: + - When set to no, keep the existing load balancer rules in place. Will modify and add, but will not delete. + default: yes + type: bool +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +notes: + - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. + - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ELB and attach a listener +- elb_application_lb: + name: myelb + security_groups: + - sg-12345678 + - my-sec-group + subnets: + - subnet-012345678 + - subnet-abcdef000 + listeners: + - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. + SslPolicy: ELBSecurityPolicy-2015-05 + Certificates: # The ARN of the certificate (only one certficate ARN should be provided) + - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com + DefaultActions: + - Type: forward # Required. + TargetGroupName: # Required. The name of the target group + state: present + +# Create an ELB and attach a listener with logging enabled +- elb_application_lb: + access_logs_enabled: yes + access_logs_s3_bucket: mybucket + access_logs_s3_prefix: "logs" + name: myelb + security_groups: + - sg-12345678 + - my-sec-group + subnets: + - subnet-012345678 + - subnet-abcdef000 + listeners: + - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. + SslPolicy: ELBSecurityPolicy-2015-05 + Certificates: # The ARN of the certificate (only one certficate ARN should be provided) + - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com + DefaultActions: + - Type: forward # Required. + TargetGroupName: # Required. The name of the target group + state: present + +# Create an ALB with listeners and rules +- elb_application_lb: + name: test-alb + subnets: + - subnet-12345678 + - subnet-87654321 + security_groups: + - sg-12345678 + scheme: internal + listeners: + - Protocol: HTTPS + Port: 443 + DefaultActions: + - Type: forward + TargetGroupName: test-target-group + Certificates: + - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com + SslPolicy: ELBSecurityPolicy-2015-05 + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '1' + Actions: + - TargetGroupName: test-target-group + Type: forward + - Conditions: + - Field: path-pattern + Values: + - "/redirect-path/*" + Priority: '2' + Actions: + - Type: redirect + RedirectConfig: + Host: "#{host}" + Path: "/example/redir" # or /#{path} + Port: "#{port}" + Protocol: "#{protocol}" + Query: "#{query}" + StatusCode: "HTTP_302" # or HTTP_301 + - Conditions: + - Field: path-pattern + Values: + - "/fixed-response-path/" + Priority: '3' + Actions: + - Type: fixed-response + FixedResponseConfig: + ContentType: "text/plain" + MessageBody: "This is the page you're looking for" + StatusCode: "200" + - Conditions: + - Field: host-header + Values: + - "hostname.domain.com" + - "alternate.domain.com" + Priority: '4' + Actions: + - TargetGroupName: test-target-group + Type: forward + state: present + +# Remove an ELB +- elb_application_lb: + name: myelb + state: absent + +''' + +RETURN = ''' +access_logs_s3_bucket: + description: The name of the S3 bucket for the access logs. + returned: when state is present + type: str + sample: mys3bucket +access_logs_s3_enabled: + description: Indicates whether access logs stored in Amazon S3 are enabled. + returned: when state is present + type: str + sample: true +access_logs_s3_prefix: + description: The prefix for the location in the S3 bucket. + returned: when state is present + type: str + sample: my/logs +availability_zones: + description: The Availability Zones for the load balancer. + returned: when state is present + type: list + sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" +canonical_hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. + returned: when state is present + type: str + sample: ABCDEF12345678 +created_time: + description: The date and time the load balancer was created. + returned: when state is present + type: str + sample: "2015-02-12T02:14:02+00:00" +deletion_protection_enabled: + description: Indicates whether deletion protection is enabled. + returned: when state is present + type: str + sample: true +dns_name: + description: The public DNS name of the load balancer. + returned: when state is present + type: str + sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com +idle_timeout_timeout_seconds: + description: The idle timeout value, in seconds. + returned: when state is present + type: int + sample: 60 +ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + returned: when state is present + type: str + sample: ipv4 +listeners: + description: Information about the listeners. + returned: when state is present + type: complex + contains: + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. + returned: when state is present + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: "" + port: + description: The port on which the load balancer is listening. + returned: when state is present + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + returned: when state is present + type: str + sample: HTTPS + certificates: + description: The SSL server certificate. + returned: when state is present + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + returned: when state is present + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. + returned: when state is present + type: str + sample: "" + default_actions: + description: The default actions for the listener. + returned: when state is present + type: str + contains: + type: + description: The type of action. + returned: when state is present + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + returned: when state is present + type: str + sample: "" +load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 +load_balancer_name: + description: The name of the load balancer. + returned: when state is present + type: str + sample: my-elb +routing_http2_enabled: + description: Indicates whether HTTP/2 is enabled. + returned: when state is present + type: str + sample: true +scheme: + description: Internet-facing or internal load balancer. + returned: when state is present + type: str + sample: internal +security_groups: + description: The IDs of the security groups for the load balancer. + returned: when state is present + type: list + sample: ['sg-0011223344'] +state: + description: The state of the load balancer. + returned: when state is present + type: dict + sample: "{'code': 'active'}" +tags: + description: The tags attached to the load balancer. + returned: when state is present + type: dict + sample: "{ + 'Tag': 'Example' + }" +type: + description: The type of load balancer. + returned: when state is present + type: str + sample: application +vpc_id: + description: The ID of the VPC for the load balancer. + returned: when state is present + type: str + sample: vpc-0011223344 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.elbv2 import ApplicationLoadBalancer, ELBListeners, ELBListener, ELBListenerRules, ELBListenerRule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.elb_utils import get_elb_listener_rules + + +def create_or_update_elb(elb_obj): + """Create ELB or modify main attributes. json_exit here""" + + if elb_obj.elb: + # ELB exists so check subnets, security groups and tags match what has been passed + + # Subnets + if not elb_obj.compare_subnets(): + elb_obj.modify_subnets() + + # Security Groups + if not elb_obj.compare_security_groups(): + elb_obj.modify_security_groups() + + # Tags - only need to play with tags if tags parameter has been set to something + if elb_obj.tags is not None: + + # Delete necessary tags + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), + boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) + if tags_to_delete: + elb_obj.delete_tags(tags_to_delete) + + # Add/update tags + if tags_need_modify: + elb_obj.modify_tags() + + else: + # Create load balancer + elb_obj.create_elb() + + # ELB attributes + elb_obj.update_elb_attributes() + elb_obj.modify_elb_attributes() + + # Listeners + listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + + listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() + + # Delete listeners + for listener_to_delete in listeners_to_delete: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj.delete() + listeners_obj.changed = True + + # Add listeners + for listener_to_add in listeners_to_add: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj.add() + listeners_obj.changed = True + + # Modify listeners + for listener_to_modify in listeners_to_modify: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj.modify() + listeners_obj.changed = True + + # If listeners changed, mark ELB as changed + if listeners_obj.changed: + elb_obj.changed = True + + # Rules of each listener + for listener in listeners_obj.listeners: + if 'Rules' in listener: + rules_obj = ELBListenerRules(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) + + rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() + + # Delete rules + if elb_obj.module.params['purge_rules']: + for rule in rules_to_delete: + rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) + rule_obj.delete() + elb_obj.changed = True + + # Add rules + for rule in rules_to_add: + rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn) + rule_obj.create() + elb_obj.changed = True + + # Modify rules + for rule in rules_to_modify: + rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn) + rule_obj.modify() + elb_obj.changed = True + + # Get the ELB again + elb_obj.update() + + # Get the ELB listeners again + listeners_obj.update() + + # Update the ELB attributes + elb_obj.update_elb_attributes() + + # Convert to snake_case and merge in everything we want to return to the user + snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) + snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) + snaked_elb['listeners'] = [] + for listener in listeners_obj.current_listeners: + # For each listener, get listener rules + listener['rules'] = get_elb_listener_rules(elb_obj.connection, elb_obj.module, listener['ListenerArn']) + snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + + # Change tags to ansible friendly dict + snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + + elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) + + +def delete_elb(elb_obj): + + if elb_obj.elb: + elb_obj.delete() + + elb_obj.module.exit_json(changed=elb_obj.changed) + + +def main(): + + argument_spec = dict( + access_logs_enabled=dict(type='bool'), + access_logs_s3_bucket=dict(type='str'), + access_logs_s3_prefix=dict(type='str'), + deletion_protection=dict(type='bool'), + http2=dict(type='bool'), + idle_timeout=dict(type='int'), + listeners=dict(type='list', + elements='dict', + options=dict( + Protocol=dict(type='str', required=True), + Port=dict(type='int', required=True), + SslPolicy=dict(type='str'), + Certificates=dict(type='list'), + DefaultActions=dict(type='list', required=True), + Rules=dict(type='list') + ) + ), + name=dict(required=True, type='str'), + purge_listeners=dict(default=True, type='bool'), + purge_tags=dict(default=True, type='bool'), + subnets=dict(type='list'), + security_groups=dict(type='list'), + scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), + state=dict(choices=['present', 'absent'], default='present'), + tags=dict(type='dict'), + wait_timeout=dict(type='int'), + wait=dict(default=False, type='bool'), + purge_rules=dict(default=True, type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['subnets', 'security_groups']) + ], + required_together=[ + ['access_logs_enabled', 'access_logs_s3_bucket'] + ] + ) + + # Quick check of listeners parameters + listeners = module.params.get("listeners") + if listeners is not None: + for listener in listeners: + for key in listener.keys(): + if key == 'Protocol' and listener[key] == 'HTTPS': + if listener.get('SslPolicy') is None: + module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS") + + if listener.get('Certificates') is None: + module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS") + + connection = module.client('elbv2') + connection_ec2 = module.client('ec2') + + state = module.params.get("state") + + elb = ApplicationLoadBalancer(connection, connection_ec2, module) + + if state == 'present': + create_or_update_elb(elb) + else: + delete_elb(elb) + + +if __name__ == '__main__': + main() diff --git a/elb_application_lb_facts.py b/elb_application_lb_facts.py new file mode 120000 index 00000000000..c5ee0eaca83 --- /dev/null +++ b/elb_application_lb_facts.py @@ -0,0 +1 @@ +elb_application_lb_info.py \ No newline at end of file diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py new file mode 100644 index 00000000000..d115d029f36 --- /dev/null +++ b/elb_application_lb_info.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: elb_application_lb_info +short_description: Gather information about application ELBs in AWS +description: + - Gather information about application ELBs in AWS + - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: Rob White (@wimnat) +options: + load_balancer_arns: + description: + - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call. + required: false + type: list + names: + description: + - The names of the load balancers. + required: false + type: list + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all target groups +- elb_application_lb_info: + +# Gather information about the target group attached to a particular ELB +- elb_application_lb_info: + load_balancer_arns: + - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" + +# Gather information about a target groups named 'tg1' and 'tg2' +- elb_application_lb_info: + names: + - elb1 + - elb2 + +# Gather information about specific ALB +- elb_application_lb_info: + names: "alb-name" + region: "aws-region" + register: alb_info +- debug: + var: alb_info +''' + +RETURN = ''' +load_balancers: + description: a list of load balancers + returned: always + type: complex + contains: + access_logs_s3_bucket: + description: The name of the S3 bucket for the access logs. + returned: when status is present + type: str + sample: mys3bucket + access_logs_s3_enabled: + description: Indicates whether access logs stored in Amazon S3 are enabled. + returned: when status is present + type: str + sample: true + access_logs_s3_prefix: + description: The prefix for the location in the S3 bucket. + returned: when status is present + type: str + sample: /my/logs + availability_zones: + description: The Availability Zones for the load balancer. + returned: when status is present + type: list + sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" + canonical_hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. + returned: when status is present + type: str + sample: ABCDEF12345678 + created_time: + description: The date and time the load balancer was created. + returned: when status is present + type: str + sample: "2015-02-12T02:14:02+00:00" + deletion_protection_enabled: + description: Indicates whether deletion protection is enabled. + returned: when status is present + type: str + sample: true + dns_name: + description: The public DNS name of the load balancer. + returned: when status is present + type: str + sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com + idle_timeout_timeout_seconds: + description: The idle timeout value, in seconds. + returned: when status is present + type: str + sample: 60 + ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + returned: when status is present + type: str + sample: ipv4 + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when status is present + type: str + sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + load_balancer_name: + description: The name of the load balancer. + returned: when status is present + type: str + sample: my-elb + scheme: + description: Internet-facing or internal load balancer. + returned: when status is present + type: str + sample: internal + security_groups: + description: The IDs of the security groups for the load balancer. + returned: when status is present + type: list + sample: ['sg-0011223344'] + state: + description: The state of the load balancer. + returned: when status is present + type: dict + sample: "{'code': 'active'}" + tags: + description: The tags attached to the load balancer. + returned: when status is present + type: dict + sample: "{ + 'Tag': 'Example' + }" + type: + description: The type of load balancer. + returned: when status is present + type: str + sample: application + vpc_id: + description: The ID of the VPC for the load balancer. + returned: when status is present + type: str + sample: vpc-0011223344 +''' + +import traceback + +try: + import boto3 + from botocore.exceptions import ClientError, NoCredentialsError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info) + + +def get_elb_listeners(connection, module, elb_arn): + + try: + return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners'] + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + +def get_listener_rules(connection, module, listener_arn): + + try: + return connection.describe_rules(ListenerArn=listener_arn)['Rules'] + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + +def get_load_balancer_attributes(connection, module, load_balancer_arn): + + try: + load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + # Replace '.' with '_' in attribute key names to make it more Ansibley + for k, v in list(load_balancer_attributes.items()): + load_balancer_attributes[k.replace('.', '_')] = v + del load_balancer_attributes[k] + + return load_balancer_attributes + + +def get_load_balancer_tags(connection, module, load_balancer_arn): + + try: + return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + +def list_load_balancers(connection, module): + + load_balancer_arns = module.params.get("load_balancer_arns") + names = module.params.get("names") + + try: + load_balancer_paginator = connection.get_paginator('describe_load_balancers') + if not load_balancer_arns and not names: + load_balancers = load_balancer_paginator.paginate().build_full_result() + if load_balancer_arns: + load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result() + if names: + load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result() + except ClientError as e: + if e.response['Error']['Code'] == 'LoadBalancerNotFound': + module.exit_json(load_balancers=[]) + else: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except NoCredentialsError as e: + module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc()) + + for load_balancer in load_balancers['LoadBalancers']: + # Get the attributes for each elb + load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) + + # Get the listeners for each elb + load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn']) + + # For each listener, get listener rules + for listener in load_balancer['listeners']: + listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] + + # Get tags for each load balancer + for snaked_load_balancer in snaked_load_balancers: + snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn']) + + module.exit_json(load_balancers=snaked_load_balancers) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + load_balancer_arns=dict(type='list'), + names=dict(type='list') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['load_balancer_arns', 'names']], + supports_check_mode=True + ) + if module._name == 'elb_application_lb_facts': + module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_load_balancers(connection, module) + + +if __name__ == '__main__': + main() diff --git a/elb_classic_lb.py b/elb_classic_lb.py new file mode 100644 index 00000000000..b8b08dfb4bd --- /dev/null +++ b/elb_classic_lb.py @@ -0,0 +1,1349 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elb_classic_lb +description: + - Returns information about the load balancer. + - Will be marked changed when called only if state is changed. +short_description: Creates or destroys Amazon ELB. +author: + - "Jim Dalton (@jsdalton)" +options: + state: + description: + - Create or destroy the ELB + choices: ["present", "absent"] + required: true + type: str + name: + description: + - The name of the ELB + required: true + type: str + listeners: + description: + - List of ports/protocols for this ELB to listen on (see example) + type: list + purge_listeners: + description: + - Purge existing listeners on ELB that are not found in listeners + type: bool + default: 'yes' + instance_ids: + description: + - List of instance ids to attach to this ELB + type: list + purge_instance_ids: + description: + - Purge existing instance ids on ELB that are not found in instance_ids + type: bool + default: 'no' + zones: + description: + - List of availability zones to enable on this ELB + type: list + purge_zones: + description: + - Purge existing availability zones on ELB that are not found in zones + type: bool + default: 'no' + security_group_ids: + description: + - A list of security groups to apply to the elb + type: list + security_group_names: + description: + - A list of security group names to apply to the elb + type: list + health_check: + description: + - An associative array of health check configuration settings (see example) + type: dict + access_logs: + description: + - An associative array of access logs configuration settings (see example) + type: dict + subnets: + description: + - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. + type: list + purge_subnets: + description: + - Purge existing subnet on ELB that are not found in subnets + type: bool + default: 'no' + scheme: + description: + - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'. + If you choose to update your scheme with a different value the ELB will be destroyed and + recreated. To update scheme you must use the option wait. + choices: ["internal", "internet-facing"] + default: 'internet-facing' + type: str + validate_certs: + description: + - When set to C(no), SSL certificates will not be validated for boto versions >= 2.6.0. + type: bool + default: 'yes' + connection_draining_timeout: + description: + - Wait a specified timeout allowing connections to drain before terminating an instance + type: int + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time + type: int + cross_az_load_balancing: + description: + - Distribute load across all configured Availability Zones + type: bool + default: 'no' + stickiness: + description: + - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example ) + type: dict + wait: + description: + - When specified, Ansible will check the status of the load balancer to ensure it has been successfully + removed from AWS. + type: bool + default: 'no' + wait_timeout: + description: + - Used in conjunction with wait. Number of seconds to wait for the elb to be terminated. + A maximum of 600 seconds (10 minutes) is allowed. + default: 60 + type: int + tags: + description: + - An associative array of tags. To delete all tags, supply an empty dict. + type: dict + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic provisioning example (non-VPC) + +- elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + proxy_protocol: True + - protocol: https + load_balancer_port: 443 + instance_protocol: http # optional, defaults to value of protocol setting + instance_port: 80 + # ssl certificate required for https or ssl + ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" + delegate_to: localhost + +# Internal ELB example + +- elb_classic_lb: + name: "test-vpc" + scheme: internal + state: present + instance_ids: + - i-abcd1234 + purge_instance_ids: true + subnets: + - subnet-abcd1234 + - subnet-1a2b3c4d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + delegate_to: localhost + +# Configure a health check and the access logs +- elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 + access_logs: + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" + delegate_to: localhost + +# Ensure ELB is gone +- elb_classic_lb: + name: "test-please-delete" + state: absent + delegate_to: localhost + +# Ensure ELB is gone and wait for check (for default timeout) +- elb_classic_lb: + name: "test-please-delete" + state: absent + wait: yes + delegate_to: localhost + +# Ensure ELB is gone and wait for check with timeout value +- elb_classic_lb: + name: "test-please-delete" + state: absent + wait: yes + wait_timeout: 600 + delegate_to: localhost + +# Normally, this module will purge any listeners that exist on the ELB +# but aren't specified in the listeners parameter. If purge_listeners is +# false it leaves them alone +- elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_listeners: no + delegate_to: localhost + +# Normally, this module will leave availability zones that are enabled +# on the ELB alone. If purge_zones is true, then any extraneous zones +# will be removed +- elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: yes + delegate_to: localhost + +# Creates a ELB and assigns a list of subnets to it. +- elb_classic_lb: + state: present + name: 'New ELB' + security_group_ids: 'sg-123456, sg-67890' + region: us-west-2 + subnets: 'subnet-123456,subnet-67890' + purge_subnets: yes + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + delegate_to: localhost + +# Create an ELB with connection draining, increased idle timeout and cross availability +# zone load balancing +- elb_classic_lb: + name: "New ELB" + state: present + connection_draining_timeout: 60 + idle_timeout: 300 + cross_az_load_balancing: "yes" + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + delegate_to: localhost + +# Create an ELB with load balancer stickiness enabled +- elb_classic_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: loadbalancer + enabled: yes + expiration: 300 + delegate_to: localhost + +# Create an ELB with application stickiness enabled +- elb_classic_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: application + enabled: yes + cookie: SESSIONID + delegate_to: localhost + +# Create an ELB and add tags +- elb_classic_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: + Name: "New ELB" + stack: "production" + client: "Bob" + delegate_to: localhost + +# Delete all tags from an ELB +- elb_classic_lb: + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: {} + delegate_to: localhost +""" + +import random +import time +import traceback + +try: + import boto + import boto.ec2.elb + import boto.ec2.elb.attributes + import boto.vpc + from boto.ec2.elb.healthcheck import HealthCheck + from boto.ec2.tag import Tag + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native + + +def _throttleable_operation(max_retries): + def _operation_wrapper(op): + def _do_op(*args, **kwargs): + retry = 0 + while True: + try: + return op(*args, **kwargs) + except boto.exception.BotoServerError as e: + if retry < max_retries and e.code in \ + ("Throttling", "RequestLimitExceeded"): + retry = retry + 1 + time.sleep(min(random.random() * (2 ** retry), 300)) + continue + else: + raise + return _do_op + return _operation_wrapper + + +def _get_vpc_connection(module, region, aws_connect_params): + try: + return connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + +_THROTTLING_RETRIES = 5 + + +class ElbManager(object): + """Handles ELB creation and destruction""" + + def __init__(self, module, name, listeners=None, purge_listeners=None, + zones=None, purge_zones=None, security_group_ids=None, + health_check=None, subnets=None, purge_subnets=None, + scheme="internet-facing", connection_draining_timeout=None, + idle_timeout=None, + cross_az_load_balancing=None, access_logs=None, + stickiness=None, wait=None, wait_timeout=None, tags=None, + region=None, + instance_ids=None, purge_instance_ids=None, **aws_connect_params): + + self.module = module + self.name = name + self.listeners = listeners + self.purge_listeners = purge_listeners + self.instance_ids = instance_ids + self.purge_instance_ids = purge_instance_ids + self.zones = zones + self.purge_zones = purge_zones + self.security_group_ids = security_group_ids + self.health_check = health_check + self.subnets = subnets + self.purge_subnets = purge_subnets + self.scheme = scheme + self.connection_draining_timeout = connection_draining_timeout + self.idle_timeout = idle_timeout + self.cross_az_load_balancing = cross_az_load_balancing + self.access_logs = access_logs + self.stickiness = stickiness + self.wait = wait + self.wait_timeout = wait_timeout + self.tags = tags + + self.aws_connect_params = aws_connect_params + self.region = region + + self.changed = False + self.status = 'gone' + self.elb_conn = self._get_elb_connection() + + try: + self.elb = self._get_elb() + except boto.exception.BotoServerError as e: + module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc()) + + self.ec2_conn = self._get_ec2_connection() + + @_throttleable_operation(_THROTTLING_RETRIES) + def ensure_ok(self): + """Create the ELB""" + if not self.elb: + # Zones and listeners will be added at creation + self._create_elb() + else: + if self._get_scheme(): + # the only way to change the scheme is by recreating the resource + self.ensure_gone() + self._create_elb() + else: + self._set_zones() + self._set_security_groups() + self._set_elb_listeners() + self._set_subnets() + self._set_health_check() + # boto has introduced support for some ELB attributes in + # different versions, so we check first before trying to + # set them to avoid errors + if self._check_attribute_support('connection_draining'): + self._set_connection_draining_timeout() + if self._check_attribute_support('connecting_settings'): + self._set_idle_timeout() + if self._check_attribute_support('cross_zone_load_balancing'): + self._set_cross_az_load_balancing() + if self._check_attribute_support('access_log'): + self._set_access_log() + # add sticky options + self.select_stickiness_policy() + + # ensure backend server policies are correct + self._set_backend_policies() + # set/remove instance ids + self._set_instance_ids() + + self._set_tags() + + def ensure_gone(self): + """Destroy the ELB""" + if self.elb: + self._delete_elb() + if self.wait: + elb_removed = self._wait_for_elb_removed() + # Unfortunately even though the ELB itself is removed quickly + # the interfaces take longer so reliant security groups cannot + # be deleted until the interface has registered as removed. + elb_interface_removed = self._wait_for_elb_interface_removed() + if not (elb_removed and elb_interface_removed): + self.module.fail_json(msg='Timed out waiting for removal of load balancer.') + + def get_info(self): + try: + check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] + except Exception: + check_elb = None + + if not check_elb: + info = { + 'name': self.name, + 'status': self.status, + 'region': self.region + } + else: + try: + lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name'] + except Exception: + lb_cookie_policy = None + try: + app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name'] + except Exception: + app_cookie_policy = None + + info = { + 'name': check_elb.name, + 'dns_name': check_elb.dns_name, + 'zones': check_elb.availability_zones, + 'security_group_ids': check_elb.security_groups, + 'status': self.status, + 'subnets': self.subnets, + 'scheme': check_elb.scheme, + 'hosted_zone_name': check_elb.canonical_hosted_zone_name, + 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, + 'lb_cookie_policy': lb_cookie_policy, + 'app_cookie_policy': app_cookie_policy, + 'proxy_policy': self._get_proxy_protocol_policy(), + 'backends': self._get_backend_policies(), + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0, + 'region': self.region + } + + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [dict( + instance_id=instance_state.instance_id, + reason_code=instance_state.reason_code, + state=instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] += 1 + + if check_elb.health_check: + info['health_check'] = { + 'target': check_elb.health_check.target, + 'interval': check_elb.health_check.interval, + 'timeout': check_elb.health_check.timeout, + 'healthy_threshold': check_elb.health_check.healthy_threshold, + 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, + } + + if check_elb.listeners: + info['listeners'] = [self._api_listener_as_tuple(l) + for l in check_elb.listeners] + elif self.status == 'created': + # When creating a new ELB, listeners don't show in the + # immediately returned result, so just include the + # ones that were added + info['listeners'] = [self._listener_as_tuple(l) + for l in self.listeners] + else: + info['listeners'] = [] + + if self._check_attribute_support('connection_draining'): + info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout) + + if self._check_attribute_support('connecting_settings'): + info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout + + if self._check_attribute_support('cross_zone_load_balancing'): + is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') + if is_cross_az_lb_enabled: + info['cross_az_load_balancing'] = 'yes' + else: + info['cross_az_load_balancing'] = 'no' + + # return stickiness info? + + info['tags'] = self.tags + + return info + + @_throttleable_operation(_THROTTLING_RETRIES) + def _wait_for_elb_removed(self): + polling_increment_secs = 15 + max_retries = (self.wait_timeout // polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + self.elb_conn.get_all_lb_attributes(self.name) + except (boto.exception.BotoServerError, Exception) as e: + if "LoadBalancerNotFound" in e.code: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + + return status_achieved + + @_throttleable_operation(_THROTTLING_RETRIES) + def _wait_for_elb_interface_removed(self): + polling_increment_secs = 15 + max_retries = (self.wait_timeout // polling_increment_secs) + status_achieved = False + + elb_interfaces = self.ec2_conn.get_all_network_interfaces( + filters={'attachment.instance-owner-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name)}) + + for x in range(0, max_retries): + for interface in elb_interfaces: + try: + result = self.ec2_conn.get_all_network_interfaces(interface.id) + if result == []: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except (boto.exception.BotoServerError, Exception) as e: + if 'InvalidNetworkInterfaceID' in e.code: + status_achieved = True + break + else: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + return status_achieved + + @_throttleable_operation(_THROTTLING_RETRIES) + def _get_elb(self): + elbs = self.elb_conn.get_all_load_balancers() + for elb in elbs: + if self.name == elb.name: + self.status = 'ok' + return elb + + def _get_elb_connection(self): + try: + return connect_to_aws(boto.ec2.elb, self.region, + **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + def _get_ec2_connection(self): + try: + return connect_to_aws(boto.ec2, self.region, + **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, Exception) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + @_throttleable_operation(_THROTTLING_RETRIES) + def _delete_elb(self): + # True if succeeds, exception raised if not + result = self.elb_conn.delete_load_balancer(name=self.name) + if result: + self.changed = True + self.status = 'deleted' + + def _create_elb(self): + listeners = [self._listener_as_tuple(l) for l in self.listeners] + self.elb = self.elb_conn.create_load_balancer(name=self.name, + zones=self.zones, + security_groups=self.security_group_ids, + complex_listeners=listeners, + subnets=self.subnets, + scheme=self.scheme) + if self.elb: + # HACK: Work around a boto bug in which the listeners attribute is + # always set to the listeners argument to create_load_balancer, and + # not the complex_listeners + # We're not doing a self.elb = self._get_elb here because there + # might be eventual consistency issues and it doesn't necessarily + # make sense to wait until the ELB gets returned from the EC2 API. + # This is necessary in the event we hit the throttling errors and + # need to retry ensure_ok + # See https://github.com/boto/boto/issues/3526 + self.elb.listeners = self.listeners + self.changed = True + self.status = 'created' + + def _create_elb_listeners(self, listeners): + """Takes a list of listener tuples and creates them""" + # True if succeeds, exception raised if not + self.changed = self.elb_conn.create_load_balancer_listeners(self.name, + complex_listeners=listeners) + + def _delete_elb_listeners(self, listeners): + """Takes a list of listener tuples and deletes them from the elb""" + ports = [l[0] for l in listeners] + + # True if succeeds, exception raised if not + self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, + ports) + + def _set_elb_listeners(self): + """ + Creates listeners specified by self.listeners; overwrites existing + listeners on these ports; removes extraneous listeners + """ + listeners_to_add = [] + listeners_to_remove = [] + listeners_to_keep = [] + + # Check for any listeners we need to create or overwrite + for listener in self.listeners: + listener_as_tuple = self._listener_as_tuple(listener) + + # First we loop through existing listeners to see if one is + # already specified for this port + existing_listener_found = None + for existing_listener in self.elb.listeners: + # Since ELB allows only one listener on each incoming port, a + # single match on the incoming port is all we're looking for + if existing_listener[0] == int(listener['load_balancer_port']): + existing_listener_found = self._api_listener_as_tuple(existing_listener) + break + + if existing_listener_found: + # Does it match exactly? + if listener_as_tuple != existing_listener_found: + # The ports are the same but something else is different, + # so we'll remove the existing one and add the new one + listeners_to_remove.append(existing_listener_found) + listeners_to_add.append(listener_as_tuple) + else: + # We already have this listener, so we're going to keep it + listeners_to_keep.append(existing_listener_found) + else: + # We didn't find an existing listener, so just add the new one + listeners_to_add.append(listener_as_tuple) + + # Check for any extraneous listeners we need to remove, if desired + if self.purge_listeners: + for existing_listener in self.elb.listeners: + existing_listener_tuple = self._api_listener_as_tuple(existing_listener) + if existing_listener_tuple in listeners_to_remove: + # Already queued for removal + continue + if existing_listener_tuple in listeners_to_keep: + # Keep this one around + continue + # Since we're not already removing it and we don't need to keep + # it, let's get rid of it + listeners_to_remove.append(existing_listener_tuple) + + if listeners_to_remove: + self._delete_elb_listeners(listeners_to_remove) + + if listeners_to_add: + self._create_elb_listeners(listeners_to_add) + + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = listener.get_complex_tuple() + if listener.ssl_certificate_id and len(base_tuple) < 5: + return base_tuple + (listener.ssl_certificate_id,) + return base_tuple + + def _listener_as_tuple(self, listener): + """Formats listener as a 4- or 5-tuples, in the order specified by the + ELB API""" + # N.B. string manipulations on protocols below (str(), upper()) is to + # ensure format matches output from ELB API + listener_list = [ + int(listener['load_balancer_port']), + int(listener['instance_port']), + str(listener['protocol'].upper()), + ] + + # Instance protocol is not required by ELB API; it defaults to match + # load balancer protocol. We'll mimic that behavior here + if 'instance_protocol' in listener: + listener_list.append(str(listener['instance_protocol'].upper())) + else: + listener_list.append(str(listener['protocol'].upper())) + + if 'ssl_certificate_id' in listener: + listener_list.append(str(listener['ssl_certificate_id'])) + + return tuple(listener_list) + + def _enable_zones(self, zones): + try: + self.elb.enable_zones(zones) + except boto.exception.BotoServerError as e: + self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc()) + + self.changed = True + + def _disable_zones(self, zones): + try: + self.elb.disable_zones(zones) + except boto.exception.BotoServerError as e: + self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc()) + self.changed = True + + def _attach_subnets(self, subnets): + self.elb_conn.attach_lb_to_subnets(self.name, subnets) + self.changed = True + + def _detach_subnets(self, subnets): + self.elb_conn.detach_lb_from_subnets(self.name, subnets) + self.changed = True + + def _set_subnets(self): + """Determine which subnets need to be attached or detached on the ELB""" + if self.subnets: + if self.purge_subnets: + subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) + subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) + else: + subnets_to_detach = None + subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) + + if subnets_to_attach: + self._attach_subnets(subnets_to_attach) + if subnets_to_detach: + self._detach_subnets(subnets_to_detach) + + def _get_scheme(self): + """Determine if the current scheme is different than the scheme of the ELB""" + if self.scheme: + if self.elb.scheme != self.scheme: + if not self.wait: + self.module.fail_json(msg="Unable to modify scheme without using the wait option") + return True + return False + + def _set_zones(self): + """Determine which zones need to be enabled or disabled on the ELB""" + if self.zones: + if self.purge_zones: + zones_to_disable = list(set(self.elb.availability_zones) - + set(self.zones)) + zones_to_enable = list(set(self.zones) - + set(self.elb.availability_zones)) + else: + zones_to_disable = None + zones_to_enable = list(set(self.zones) - + set(self.elb.availability_zones)) + if zones_to_enable: + self._enable_zones(zones_to_enable) + # N.B. This must come second, in case it would have removed all zones + if zones_to_disable: + self._disable_zones(zones_to_disable) + + def _set_security_groups(self): + if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids): + self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) + self.changed = True + + def _set_health_check(self): + """Set health check values on ELB as needed""" + if self.health_check: + # This just makes it easier to compare each of the attributes + # and look for changes. Keys are attributes of the current + # health_check; values are desired values of new health_check + health_check_config = { + "target": self._get_health_check_target(), + "timeout": self.health_check['response_timeout'], + "interval": self.health_check['interval'], + "unhealthy_threshold": self.health_check['unhealthy_threshold'], + "healthy_threshold": self.health_check['healthy_threshold'], + } + + update_health_check = False + + # The health_check attribute is *not* set on newly created + # ELBs! So we have to create our own. + if not self.elb.health_check: + self.elb.health_check = HealthCheck() + + for attr, desired_value in health_check_config.items(): + if getattr(self.elb.health_check, attr) != desired_value: + setattr(self.elb.health_check, attr, desired_value) + update_health_check = True + + if update_health_check: + self.elb.configure_health_check(self.elb.health_check) + self.changed = True + + def _check_attribute_support(self, attr): + return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) + + def _set_cross_az_load_balancing(self): + attributes = self.elb.get_attributes() + if self.cross_az_load_balancing: + if not attributes.cross_zone_load_balancing.enabled: + self.changed = True + attributes.cross_zone_load_balancing.enabled = True + else: + if attributes.cross_zone_load_balancing.enabled: + self.changed = True + attributes.cross_zone_load_balancing.enabled = False + self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', + attributes.cross_zone_load_balancing.enabled) + + def _set_access_log(self): + attributes = self.elb.get_attributes() + if self.access_logs: + if 's3_location' not in self.access_logs: + self.module.fail_json(msg='s3_location information required') + + access_logs_config = { + "enabled": True, + "s3_bucket_name": self.access_logs['s3_location'], + "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''), + "emit_interval": self.access_logs.get('interval', 60), + } + + update_access_logs_config = False + for attr, desired_value in access_logs_config.items(): + if getattr(attributes.access_log, attr) != desired_value: + setattr(attributes.access_log, attr, desired_value) + update_access_logs_config = True + if update_access_logs_config: + self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) + self.changed = True + elif attributes.access_log.enabled: + attributes.access_log.enabled = False + self.changed = True + self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) + + def _set_connection_draining_timeout(self): + attributes = self.elb.get_attributes() + if self.connection_draining_timeout is not None: + if not attributes.connection_draining.enabled or \ + attributes.connection_draining.timeout != self.connection_draining_timeout: + self.changed = True + attributes.connection_draining.enabled = True + attributes.connection_draining.timeout = self.connection_draining_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + else: + if attributes.connection_draining.enabled: + self.changed = True + attributes.connection_draining.enabled = False + self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + + def _set_idle_timeout(self): + attributes = self.elb.get_attributes() + if self.idle_timeout is not None: + if attributes.connecting_settings.idle_timeout != self.idle_timeout: + self.changed = True + attributes.connecting_settings.idle_timeout = self.idle_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) + + def _policy_name(self, policy_type): + return 'elb-classic-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict')) + + def _create_policy(self, policy_param, policy_meth, policy): + getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy) + + def _delete_policy(self, elb_name, policy): + self.elb_conn.delete_lb_policy(elb_name, policy) + + def _update_policy(self, policy_param, policy_meth, policy_attr, policy): + self._delete_policy(self.elb.name, policy) + self._create_policy(policy_param, policy_meth, policy) + + def _set_listener_policy(self, listeners_dict, policy=None): + policy = [] if policy is None else policy + + for listener_port in listeners_dict: + if listeners_dict[listener_port].startswith('HTTP'): + self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy) + + def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs): + for p in getattr(elb_info.policies, policy_attrs['attr']): + if str(p.__dict__['policy_name']) == str(policy[0]): + if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0): + self._set_listener_policy(listeners_dict) + self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0]) + self.changed = True + break + else: + self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) + self.changed = True + + self._set_listener_policy(listeners_dict, policy) + + def select_stickiness_policy(self): + if self.stickiness: + + if 'cookie' in self.stickiness and 'expiration' in self.stickiness: + self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time') + + elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0] + d = {} + for listener in elb_info.listeners: + d[listener[0]] = listener[2] + listeners_dict = d + + if self.stickiness['type'] == 'loadbalancer': + policy = [] + policy_type = 'LBCookieStickinessPolicyType' + + if self.module.boolean(self.stickiness['enabled']): + + if 'expiration' not in self.stickiness: + self.module.fail_json(msg='expiration must be set when type is loadbalancer') + + try: + expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None + except ValueError: + self.module.fail_json(msg='expiration must be set to an integer') + + policy_attrs = { + 'type': policy_type, + 'attr': 'lb_cookie_stickiness_policies', + 'method': 'create_lb_cookie_stickiness_policy', + 'dict_key': 'cookie_expiration_period', + 'param_value': expiration + } + policy.append(self._policy_name(policy_attrs['type'])) + + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) + elif not self.module.boolean(self.stickiness['enabled']): + if len(elb_info.policies.lb_cookie_stickiness_policies): + if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): + self.changed = True + else: + self.changed = False + self._set_listener_policy(listeners_dict) + self._delete_policy(self.elb.name, self._policy_name(policy_type)) + + elif self.stickiness['type'] == 'application': + policy = [] + policy_type = 'AppCookieStickinessPolicyType' + if self.module.boolean(self.stickiness['enabled']): + + if 'cookie' not in self.stickiness: + self.module.fail_json(msg='cookie must be set when type is application') + + policy_attrs = { + 'type': policy_type, + 'attr': 'app_cookie_stickiness_policies', + 'method': 'create_app_cookie_stickiness_policy', + 'dict_key': 'cookie_name', + 'param_value': self.stickiness['cookie'] + } + policy.append(self._policy_name(policy_attrs['type'])) + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) + elif not self.module.boolean(self.stickiness['enabled']): + if len(elb_info.policies.app_cookie_stickiness_policies): + if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): + self.changed = True + self._set_listener_policy(listeners_dict) + self._delete_policy(self.elb.name, self._policy_name(policy_type)) + + else: + self._set_listener_policy(listeners_dict) + + def _get_backend_policies(self): + """Get a list of backend policies""" + policies = [] + if self.elb.backends is not None: + for backend in self.elb.backends: + if backend.policies is not None: + for policy in backend.policies: + policies.append(str(backend.instance_port) + ':' + policy.policy_name) + + return policies + + def _set_backend_policies(self): + """Sets policies for all backends""" + ensure_proxy_protocol = False + replace = [] + backend_policies = self._get_backend_policies() + + # Find out what needs to be changed + for listener in self.listeners: + want = False + + if 'proxy_protocol' in listener and listener['proxy_protocol']: + ensure_proxy_protocol = True + want = True + + if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies: + if not want: + replace.append({'port': listener['instance_port'], 'policies': []}) + elif want: + replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']}) + + # enable or disable proxy protocol + if ensure_proxy_protocol: + self._set_proxy_protocol_policy() + + # Make the backend policies so + for item in replace: + self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies']) + self.changed = True + + def _get_proxy_protocol_policy(self): + """Find out if the elb has a proxy protocol enabled""" + if self.elb.policies is not None and self.elb.policies.other_policies is not None: + for policy in self.elb.policies.other_policies: + if policy.policy_name == 'ProxyProtocol-policy': + return policy.policy_name + + return None + + def _set_proxy_protocol_policy(self): + """Install a proxy protocol policy if needed""" + proxy_policy = self._get_proxy_protocol_policy() + + if proxy_policy is None: + self.elb_conn.create_lb_policy( + self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True} + ) + self.changed = True + + # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there + + def _diff_list(self, a, b): + """Find the entries in list a that are not in list b""" + b = set(b) + return [aa for aa in a if aa not in b] + + def _get_instance_ids(self): + """Get the current list of instance ids installed in the elb""" + instances = [] + if self.elb.instances is not None: + for instance in self.elb.instances: + instances.append(instance.id) + + return instances + + def _set_instance_ids(self): + """Register or deregister instances from an lb instance""" + assert_instances = self.instance_ids or [] + + has_instances = self._get_instance_ids() + + add_instances = self._diff_list(assert_instances, has_instances) + if add_instances: + self.elb_conn.register_instances(self.elb.name, add_instances) + self.changed = True + + if self.purge_instance_ids: + remove_instances = self._diff_list(has_instances, assert_instances) + if remove_instances: + self.elb_conn.deregister_instances(self.elb.name, remove_instances) + self.changed = True + + def _set_tags(self): + """Add/Delete tags""" + if self.tags is None: + return + + params = {'LoadBalancerNames.member.1': self.name} + + tagdict = dict() + + # get the current list of tags from the ELB, if ELB exists + if self.elb: + current_tags = self.elb_conn.get_list('DescribeTags', params, + [('member', Tag)]) + tagdict = dict((tag.Key, tag.Value) for tag in current_tags + if hasattr(tag, 'Key')) + + # Add missing tags + dictact = dict(set(self.tags.items()) - set(tagdict.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + params['Tags.member.%d.Value' % (i + 1)] = dictact[key] + + self.elb_conn.make_request('AddTags', params) + self.changed = True + + # Remove extra tags + dictact = dict(set(tagdict.items()) - set(self.tags.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + + self.elb_conn.make_request('RemoveTags', params) + self.changed = True + + def _get_health_check_target(self): + """Compose target string from healthcheck parameters""" + protocol = self.health_check['ping_protocol'].upper() + path = "" + + if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: + path = self.health_check['ping_path'] + + return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['present', 'absent']}, + name={'required': True}, + listeners={'default': None, 'required': False, 'type': 'list'}, + purge_listeners={'default': True, 'required': False, 'type': 'bool'}, + instance_ids={'default': None, 'required': False, 'type': 'list'}, + purge_instance_ids={'default': False, 'required': False, 'type': 'bool'}, + zones={'default': None, 'required': False, 'type': 'list'}, + purge_zones={'default': False, 'required': False, 'type': 'bool'}, + security_group_ids={'default': None, 'required': False, 'type': 'list'}, + security_group_names={'default': None, 'required': False, 'type': 'list'}, + health_check={'default': None, 'required': False, 'type': 'dict'}, + subnets={'default': None, 'required': False, 'type': 'list'}, + purge_subnets={'default': False, 'required': False, 'type': 'bool'}, + scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']}, + connection_draining_timeout={'default': None, 'required': False, 'type': 'int'}, + idle_timeout={'default': None, 'type': 'int', 'required': False}, + cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False}, + stickiness={'default': None, 'required': False, 'type': 'dict'}, + access_logs={'default': None, 'required': False, 'type': 'dict'}, + wait={'default': False, 'type': 'bool', 'required': False}, + wait_timeout={'default': 60, 'type': 'int', 'required': False}, + tags={'default': None, 'required': False, 'type': 'dict'} + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['security_group_ids', 'security_group_names']] + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + name = module.params['name'] + state = module.params['state'] + listeners = module.params['listeners'] + purge_listeners = module.params['purge_listeners'] + instance_ids = module.params['instance_ids'] + purge_instance_ids = module.params['purge_instance_ids'] + zones = module.params['zones'] + purge_zones = module.params['purge_zones'] + security_group_ids = module.params['security_group_ids'] + security_group_names = module.params['security_group_names'] + health_check = module.params['health_check'] + access_logs = module.params['access_logs'] + subnets = module.params['subnets'] + purge_subnets = module.params['purge_subnets'] + scheme = module.params['scheme'] + connection_draining_timeout = module.params['connection_draining_timeout'] + idle_timeout = module.params['idle_timeout'] + cross_az_load_balancing = module.params['cross_az_load_balancing'] + stickiness = module.params['stickiness'] + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] + tags = module.params['tags'] + + if state == 'present' and not listeners: + module.fail_json(msg="At least one listener is required for ELB creation") + + if state == 'present' and not (zones or subnets): + module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") + + if wait_timeout > 600: + module.fail_json(msg='wait_timeout maximum is 600 seconds') + + if security_group_names: + security_group_ids = [] + try: + ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) + if subnets: # We have at least one subnet, ergo this is a VPC + vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params) + vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id + filters = {'vpc_id': vpc_id} + else: + filters = None + grp_details = ec2.get_all_security_groups(filters=filters) + + for group_name in security_group_names: + if isinstance(group_name, string_types): + group_name = [group_name] + + group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] + security_group_ids.extend(group_id) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + elb_man = ElbManager(module, name, listeners, purge_listeners, zones, + purge_zones, security_group_ids, health_check, + subnets, purge_subnets, scheme, + connection_draining_timeout, idle_timeout, + cross_az_load_balancing, + access_logs, stickiness, wait, wait_timeout, tags, + region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids, + **aws_connect_params) + + # check for unsupported attributes for this version of boto + if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): + module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") + + if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): + module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") + + if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): + module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") + + if state == 'present': + elb_man.ensure_ok() + elif state == 'absent': + elb_man.ensure_gone() + + ansible_facts = {'ec2_elb': 'info'} + ec2_facts_result = dict(changed=elb_man.changed, + elb=elb_man.get_info(), + ansible_facts=ansible_facts) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/elb_classic_lb_facts.py b/elb_classic_lb_facts.py new file mode 120000 index 00000000000..d182d5e1441 --- /dev/null +++ b/elb_classic_lb_facts.py @@ -0,0 +1 @@ +elb_classic_lb_info.py \ No newline at end of file diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py new file mode 100644 index 00000000000..f8c7a12f6e8 --- /dev/null +++ b/elb_classic_lb_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elb_classic_lb_info +short_description: Gather information about EC2 Elastic Load Balancers in AWS +description: + - Gather information about EC2 Elastic Load Balancers in AWS + - This module was called C(elb_classic_lb_facts) before Ansible 2.9. The usage did not change. +author: + - "Michael Schultz (@mjschultz)" + - "Fernando Jose Pando (@nand0p)" +options: + names: + description: + - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. + type: list +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - botocore + - boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Output format tries to match ec2_elb_lb module input parameters + +# Gather information about all ELBs +- elb_classic_lb_info: + register: elb_info + +- debug: + msg: "{{ item.dns_name }}" + loop: "{{ elb_info.elbs }}" + +# Gather information about a particular ELB +- elb_classic_lb_info: + names: frontend-prod-elb + register: elb_info + +- debug: + msg: "{{ elb_info.elbs.0.dns_name }}" + +# Gather information about a set of ELBs +- elb_classic_lb_info: + names: + - frontend-prod-elb + - backend-prod-elb + register: elb_info + +- debug: + msg: "{{ item.dns_name }}" + loop: "{{ elb_info.elbs }}" + +''' + +RETURN = ''' +elbs: + description: a list of load balancers + returned: always + type: list + sample: + elbs: + - attributes: + access_log: + enabled: false + connection_draining: + enabled: true + timeout: 300 + connection_settings: + idle_timeout: 60 + cross_zone_load_balancing: + enabled: true + availability_zones: + - "us-east-1a" + - "us-east-1b" + - "us-east-1c" + - "us-east-1d" + - "us-east-1e" + backend_server_description: [] + canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com + canonical_hosted_zone_name_id: XXXXXXXXXXXXXX + created_time: 2017-08-23T18:25:03.280000+00:00 + dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com + health_check: + healthy_threshold: 10 + interval: 30 + target: HTTP:80/index.html + timeout: 5 + unhealthy_threshold: 2 + instances: [] + instances_inservice: [] + instances_inservice_count: 0 + instances_outofservice: [] + instances_outofservice_count: 0 + instances_unknownservice: [] + instances_unknownservice_count: 0 + listener_descriptions: + - listener: + instance_port: 80 + instance_protocol: HTTP + load_balancer_port: 80 + protocol: HTTP + policy_names: [] + load_balancer_name: test-lb + policies: + app_cookie_stickiness_policies: [] + lb_cookie_stickiness_policies: [] + other_policies: [] + scheme: internet-facing + security_groups: + - sg-29d13055 + source_security_group: + group_name: default + owner_alias: XXXXXXXXXXXX + subnets: + - subnet-XXXXXXXX + - subnet-XXXXXXXX + tags: {} + vpc_id: vpc-c248fda4 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( + AWSRetry, + camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict +) + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def list_elbs(connection, names): + paginator = connection.get_paginator('describe_load_balancers') + load_balancers = paginator.paginate(LoadBalancerNames=names).build_full_result().get('LoadBalancerDescriptions', []) + results = [] + + for lb in load_balancers: + description = camel_dict_to_snake_dict(lb) + name = lb['LoadBalancerName'] + instances = lb.get('Instances', []) + description['tags'] = get_tags(connection, name) + description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService') + description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService') + description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown') + description['attributes'] = get_lb_attributes(connection, name) + results.append(description) + return results + + +def get_lb_attributes(connection, name): + attributes = connection.describe_load_balancer_attributes(LoadBalancerName=name).get('LoadBalancerAttributes', {}) + return camel_dict_to_snake_dict(attributes) + + +def get_tags(connection, load_balancer_name): + tags = connection.describe_tags(LoadBalancerNames=[load_balancer_name])['TagDescriptions'] + if not tags: + return {} + return boto3_tag_list_to_ansible_dict(tags[0]['Tags']) + + +def lb_instance_health(connection, load_balancer_name, instances, state): + instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', []) + instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state] + return instate, len(instate) + + +def main(): + argument_spec = dict( + names={'default': [], 'type': 'list'} + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) + if module._name == 'elb_classic_lb_facts': + module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", version='2.13') + + connection = module.client('elb') + + try: + elbs = list_elbs(connection, module.params.get('names')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get load balancer information.") + + module.exit_json(elbs=elbs) + + +if __name__ == '__main__': + main() diff --git a/elb_instance.py b/elb_instance.py new file mode 100644 index 00000000000..13971573b33 --- /dev/null +++ b/elb_instance.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: elb_instance +short_description: De-registers or registers instances from EC2 ELBs +description: + - This module de-registers or registers an AWS EC2 instance from the ELBs + that it belongs to. + - Returns fact "ec2_elbs" which is a list of elbs attached to the instance + if state=absent is passed as an argument. + - Will be marked changed when called only if there are ELBs found to operate on. +author: "John Jarvis (@jarv)" +options: + state: + description: + - register or deregister the instance + required: true + choices: ['present', 'absent'] + type: str + instance_id: + description: + - EC2 Instance ID + required: true + type: str + ec2_elbs: + description: + - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. + type: list + enable_availability_zone: + description: + - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already + been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. + type: bool + default: 'yes' + wait: + description: + - Wait for instance registration or deregistration to complete successfully before returning. + type: bool + default: 'yes' + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. + type: bool + default: 'yes' + wait_timeout: + description: + - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. + If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. + default: 0 + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = """ +# basic pre_task and post_task example +pre_tasks: + - name: Gathering ec2 facts + action: ec2_facts + - name: Instance De-register + elb_instance: + instance_id: "{{ ansible_ec2_instance_id }}" + state: absent + delegate_to: localhost +roles: + - myrole +post_tasks: + - name: Instance Register + elb_instance: + instance_id: "{{ ansible_ec2_instance_id }}" + ec2_elbs: "{{ item }}" + state: present + delegate_to: localhost + loop: "{{ ec2_elbs }}" +""" + +import time + +try: + import boto + import boto.ec2 + import boto.ec2.autoscale + import boto.ec2.elb + from boto.regioninfo import RegionInfo + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, + get_aws_connection_info) + + +class ElbManager: + """Handles EC2 instance ELB registration and de-registration""" + + def __init__(self, module, instance_id=None, ec2_elbs=None, + region=None, **aws_connect_params): + self.module = module + self.instance_id = instance_id + self.region = region + self.aws_connect_params = aws_connect_params + self.lbs = self._get_instance_lbs(ec2_elbs) + self.changed = False + + def deregister(self, wait, timeout): + """De-register the instance from all ELBs and wait for the ELB + to report it out-of-service""" + + for lb in self.lbs: + initial_state = self._get_instance_health(lb) + if initial_state is None: + # Instance isn't registered with this load + # balancer. Ignore it and try the next one. + continue + + lb.deregister_instances([self.instance_id]) + + # The ELB is changing state in some way. Either an instance that's + # InService is moving to OutOfService, or an instance that's + # already OutOfService is being deregistered. + self.changed = True + + if wait: + self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) + + def register(self, wait, enable_availability_zone, timeout): + """Register the instance for all ELBs and wait for the ELB + to report the instance in-service""" + for lb in self.lbs: + initial_state = self._get_instance_health(lb) + + if enable_availability_zone: + self._enable_availailability_zone(lb) + + lb.register_instances([self.instance_id]) + + if wait: + self._await_elb_instance_state(lb, 'InService', initial_state, timeout) + else: + # We cannot assume no change was made if we don't wait + # to find out + self.changed = True + + def exists(self, lbtest): + """ Verify that the named ELB actually exists """ + + found = False + for lb in self.lbs: + if lb.name == lbtest: + found = True + break + return found + + def _enable_availailability_zone(self, lb): + """Enable the current instance's availability zone in the provided lb. + Returns True if the zone was enabled or False if no change was made. + lb: load balancer""" + instance = self._get_instance() + if instance.placement in lb.availability_zones: + return False + + lb.enable_zones(zones=instance.placement) + + # If successful, the new zone will have been added to + # lb.availability_zones + return instance.placement in lb.availability_zones + + def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): + """Wait for an ELB to change state + lb: load balancer + awaited_state : state to poll for (string)""" + + wait_timeout = time.time() + timeout + while True: + instance_state = self._get_instance_health(lb) + + if not instance_state: + msg = ("The instance %s could not be put in service on %s." + " Reason: Invalid Instance") + self.module.fail_json(msg=msg % (self.instance_id, lb)) + + if instance_state.state == awaited_state: + # Check the current state against the initial state, and only set + # changed if they are different. + if (initial_state is None) or (instance_state.state != initial_state.state): + self.changed = True + break + elif self._is_instance_state_pending(instance_state): + # If it's pending, we'll skip further checks and continue waiting + pass + elif (awaited_state == 'InService' + and instance_state.reason_code == "Instance" + and time.time() >= wait_timeout): + # If the reason_code for the instance being out of service is + # "Instance" this indicates a failure state, e.g. the instance + # has failed a health check or the ELB does not have the + # instance's availability zone enabled. The exact reason why is + # described in InstantState.description. + msg = ("The instance %s could not be put in service on %s." + " Reason: %s") + self.module.fail_json(msg=msg % (self.instance_id, + lb, + instance_state.description)) + time.sleep(1) + + def _is_instance_state_pending(self, instance_state): + """ + Determines whether the instance_state is "pending", meaning there is + an operation under way to bring it in service. + """ + # This is messy, because AWS provides no way to distinguish between + # an instance that is is OutOfService because it's pending vs. OutOfService + # because it's failing health checks. So we're forced to analyze the + # description, which is likely to be brittle. + return (instance_state and 'pending' in instance_state.description) + + def _get_instance_health(self, lb): + """ + Check instance health, should return status object or None under + certain error conditions. + """ + try: + status = lb.get_instance_health([self.instance_id])[0] + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstance': + return None + else: + raise + return status + + def _get_instance_lbs(self, ec2_elbs=None): + """Returns a list of ELBs attached to self.instance_id + ec2_elbs: an optional list of elb names that will be used + for elb lookup instead of returning what elbs + are attached to self.instance_id""" + + if not ec2_elbs: + ec2_elbs = self._get_auto_scaling_group_lbs() + + try: + elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + elbs = [] + marker = None + while True: + try: + newelbs = elb.get_all_load_balancers(marker=marker) + marker = newelbs.next_marker + elbs.extend(newelbs) + if not marker: + break + except TypeError: + # Older version of boto do not allow for params + elbs = elb.get_all_load_balancers() + break + + if ec2_elbs: + lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) + else: + lbs = [] + for lb in elbs: + for info in lb.instances: + if self.instance_id == info.id: + lbs.append(lb) + return lbs + + def _get_auto_scaling_group_lbs(self): + """Returns a list of ELBs associated with self.instance_id + indirectly through its auto scaling group membership""" + + try: + asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + + asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) + if len(asg_instances) > 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") + + if not asg_instances: + asg_elbs = [] + else: + asg_name = asg_instances[0].group_name + + asgs = asg.get_all_groups([asg_name]) + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + + asg_elbs = asgs[0].load_balancers + + return asg_elbs + + def _get_instance(self): + """Returns a boto.ec2.InstanceObject for self.instance_id""" + try: + ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + self.module.fail_json(msg=str(e)) + return ec2.get_only_instances(instance_ids=[self.instance_id])[0] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['present', 'absent']}, + instance_id={'required': True}, + ec2_elbs={'default': None, 'required': False, 'type': 'list'}, + enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, + wait={'required': False, 'default': True, 'type': 'bool'}, + wait_timeout={'required': False, 'default': 0, 'type': 'int'} + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ec2_elbs = module.params['ec2_elbs'] + wait = module.params['wait'] + enable_availability_zone = module.params['enable_availability_zone'] + timeout = module.params['wait_timeout'] + + if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + module.fail_json(msg="ELBs are required for registration") + + instance_id = module.params['instance_id'] + elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) + + if ec2_elbs is not None: + for elb in ec2_elbs: + if not elb_man.exists(elb): + msg = "ELB %s does not exist" % elb + module.fail_json(msg=msg) + + if not module.check_mode: + if module.params['state'] == 'present': + elb_man.register(wait, enable_availability_zone, timeout) + elif module.params['state'] == 'absent': + elb_man.deregister(wait, timeout) + + ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} + ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) + + module.exit_json(**ec2_facts_result) + + +if __name__ == '__main__': + main() diff --git a/elb_network_lb.py b/elb_network_lb.py new file mode 100644 index 00000000000..59252419670 --- /dev/null +++ b/elb_network_lb.py @@ -0,0 +1,469 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Rob White (@wimnat) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: elb_network_lb +short_description: Manage a Network Load Balancer +description: + - Manage an AWS Network Elastic Load Balancer. See + U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + cross_zone_load_balancing: + description: + - Indicates whether cross-zone load balancing is enabled. + default: false + type: bool + deletion_protection: + description: + - Indicates whether deletion protection for the ELB is enabled. + default: false + type: bool + listeners: + description: + - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys + are CamelCased. + type: list + elements: dict + suboptions: + Port: + description: The port on which the load balancer is listening. + type: int + required: true + Protocol: + description: The protocol for connections from clients to the load balancer. + type: str + required: true + Certificates: + description: The SSL server certificate. + type: list + elements: dict + suboptions: + CertificateArn: + description: The Amazon Resource Name (ARN) of the certificate. + type: str + SslPolicy: + description: The security policy that defines which ciphers and protocols are supported. + type: str + DefaultActions: + description: The default actions for the listener. + required: true + type: list + elements: dict + suboptions: + Type: + description: The type of action. + type: str + TargetGroupArn: + description: The Amazon Resource Name (ARN) of the target group. + type: str + name: + description: + - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric + characters or hyphens, and must not begin or end with a hyphen. + required: true + type: str + purge_listeners: + description: + - If I(purge_listeners=true), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. + - If the I(listeners) parameter is not set then listeners will not be modified. + default: true + type: bool + purge_tags: + description: + - If I(purge_tags=true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified. + default: true + type: bool + subnet_mappings: + description: + - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP + to attach to the load balancer. You can specify one Elastic IP address per subnet. + - This parameter is mutually exclusive with I(subnets). + type: list + elements: dict + subnets: + description: + - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from + at least two Availability Zones. + - Required when I(state=present). + - This parameter is mutually exclusive with I(subnet_mappings). + type: list + scheme: + description: + - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. + default: internet-facing + choices: [ 'internet-facing', 'internal' ] + type: str + state: + description: + - Create or destroy the load balancer. + - The current default is C(absent). However, this behavior is inconsistent with other modules + and as such the default will change to C(present) in 2.14. + To maintain the existing behavior explicitly set I(state=absent). + choices: [ 'present', 'absent' ] + type: str + tags: + description: + - A dictionary of one or more tags to assign to the load balancer. + type: dict + wait: + description: + - Whether or not to wait for the network load balancer to reach the desired state. + type: bool + wait_timeout: + description: + - The duration in seconds to wait, used in conjunction with I(wait). + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +notes: + - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. + - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ELB and attach a listener +- elb_network_lb: + name: myelb + subnets: + - subnet-012345678 + - subnet-abcdef000 + listeners: + - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + DefaultActions: + - Type: forward # Required. Only 'forward' is accepted at this time + TargetGroupName: mytargetgroup # Required. The name of the target group + state: present + +# Create an ELB with an attached Elastic IP address +- elb_network_lb: + name: myelb + subnet_mappings: + - SubnetId: subnet-012345678 + AllocationId: eipalloc-aabbccdd + listeners: + - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + DefaultActions: + - Type: forward # Required. Only 'forward' is accepted at this time + TargetGroupName: mytargetgroup # Required. The name of the target group + state: present + +# Remove an ELB +- elb_network_lb: + name: myelb + state: absent + +''' + +RETURN = ''' +availability_zones: + description: The Availability Zones for the load balancer. + returned: when state is present + type: list + sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]" +canonical_hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. + returned: when state is present + type: str + sample: ABCDEF12345678 +created_time: + description: The date and time the load balancer was created. + returned: when state is present + type: str + sample: "2015-02-12T02:14:02+00:00" +deletion_protection_enabled: + description: Indicates whether deletion protection is enabled. + returned: when state is present + type: str + sample: true +dns_name: + description: The public DNS name of the load balancer. + returned: when state is present + type: str + sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com +idle_timeout_timeout_seconds: + description: The idle timeout value, in seconds. + returned: when state is present + type: str + sample: 60 +ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + returned: when state is present + type: str + sample: ipv4 +listeners: + description: Information about the listeners. + returned: when state is present + type: complex + contains: + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. + returned: when state is present + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: "" + port: + description: The port on which the load balancer is listening. + returned: when state is present + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + returned: when state is present + type: str + sample: HTTPS + certificates: + description: The SSL server certificate. + returned: when state is present + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + returned: when state is present + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. + returned: when state is present + type: str + sample: "" + default_actions: + description: The default actions for the listener. + returned: when state is present + type: str + contains: + type: + description: The type of action. + returned: when state is present + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + returned: when state is present + type: str + sample: "" +load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 +load_balancer_name: + description: The name of the load balancer. + returned: when state is present + type: str + sample: my-elb +load_balancing_cross_zone_enabled: + description: Indicates whether cross-zone load balancing is enabled. + returned: when state is present + type: str + sample: true +scheme: + description: Internet-facing or internal load balancer. + returned: when state is present + type: str + sample: internal +state: + description: The state of the load balancer. + returned: when state is present + type: dict + sample: "{'code': 'active'}" +tags: + description: The tags attached to the load balancer. + returned: when state is present + type: dict + sample: "{ + 'Tag': 'Example' + }" +type: + description: The type of load balancer. + returned: when state is present + type: str + sample: network +vpc_id: + description: The ID of the VPC for the load balancer. + returned: when state is present + type: str + sample: vpc-0011223344 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags +from ansible_collections.ansible.amazon.plugins.module_utils.aws.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener + + +def create_or_update_elb(elb_obj): + """Create ELB or modify main attributes. json_exit here""" + + if elb_obj.elb: + # ELB exists so check subnets, security groups and tags match what has been passed + + # Subnets + if not elb_obj.compare_subnets(): + elb_obj.modify_subnets() + + # Tags - only need to play with tags if tags parameter has been set to something + if elb_obj.tags is not None: + + # Delete necessary tags + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), + boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) + if tags_to_delete: + elb_obj.delete_tags(tags_to_delete) + + # Add/update tags + if tags_need_modify: + elb_obj.modify_tags() + + else: + # Create load balancer + elb_obj.create_elb() + + # ELB attributes + elb_obj.update_elb_attributes() + elb_obj.modify_elb_attributes() + + # Listeners + listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + + listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() + + # Delete listeners + for listener_to_delete in listeners_to_delete: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj.delete() + listeners_obj.changed = True + + # Add listeners + for listener_to_add in listeners_to_add: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj.add() + listeners_obj.changed = True + + # Modify listeners + for listener_to_modify in listeners_to_modify: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj.modify() + listeners_obj.changed = True + + # If listeners changed, mark ELB as changed + if listeners_obj.changed: + elb_obj.changed = True + + # Get the ELB again + elb_obj.update() + + # Get the ELB listeners again + listeners_obj.update() + + # Update the ELB attributes + elb_obj.update_elb_attributes() + + # Convert to snake_case and merge in everything we want to return to the user + snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) + snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) + snaked_elb['listeners'] = [] + for listener in listeners_obj.current_listeners: + snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + + # Change tags to ansible friendly dict + snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + + elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) + + +def delete_elb(elb_obj): + + if elb_obj.elb: + elb_obj.delete() + + elb_obj.module.exit_json(changed=elb_obj.changed) + + +def main(): + + argument_spec = ( + dict( + cross_zone_load_balancing=dict(type='bool'), + deletion_protection=dict(type='bool'), + listeners=dict(type='list', + elements='dict', + options=dict( + Protocol=dict(type='str', required=True), + Port=dict(type='int', required=True), + SslPolicy=dict(type='str'), + Certificates=dict(type='list'), + DefaultActions=dict(type='list', required=True) + ) + ), + name=dict(required=True, type='str'), + purge_listeners=dict(default=True, type='bool'), + purge_tags=dict(default=True, type='bool'), + subnets=dict(type='list'), + subnet_mappings=dict(type='list'), + scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), + state=dict(choices=['present', 'absent'], type='str'), + tags=dict(type='dict'), + wait_timeout=dict(type='int'), + wait=dict(type='bool') + ) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[['subnets', 'subnet_mappings']]) + + # Check for subnets or subnet_mappings if state is present + state = module.params.get("state") + if state == 'present': + if module.params.get("subnets") is None and module.params.get("subnet_mappings") is None: + module.fail_json(msg="'subnets' or 'subnet_mappings' is required when state=present") + + if state is None: + # See below, unless state==present we delete. Ouch. + module.deprecate('State currently defaults to absent. This is inconsistent with other modules' + ' and the default will be changed to `present` in Ansible 2.14', + version='2.14') + + # Quick check of listeners parameters + listeners = module.params.get("listeners") + if listeners is not None: + for listener in listeners: + for key in listener.keys(): + protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP'] + if key == 'Protocol' and listener[key] not in protocols_list: + module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list)) + + connection = module.client('elbv2') + connection_ec2 = module.client('ec2') + + elb = NetworkLoadBalancer(connection, connection_ec2, module) + + if state == 'present': + create_or_update_elb(elb) + else: + delete_elb(elb) + + +if __name__ == '__main__': + main() diff --git a/elb_target.py b/elb_target.py new file mode 100644 index 00000000000..9c3ff4eea36 --- /dev/null +++ b/elb_target.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: elb_target +short_description: Manage a target in a target group +description: + - Used to register or deregister a target in a target group +author: "Rob White (@wimnat)" +options: + deregister_unused: + description: + - The default behaviour for targets that are unused is to leave them registered. If instead you would like to remove them + set I(deregister_unused) to yes. + type: bool + target_az: + description: + - An Availability Zone or all. This determines whether the target receives traffic from the load balancer nodes in the specified + Availability Zone or from all enabled Availability Zones for the load balancer. This parameter is not supported if the target + type of the target group is instance. + type: str + target_group_arn: + description: + - The Amazon Resource Name (ARN) of the target group. Mutually exclusive of I(target_group_name). + type: str + target_group_name: + description: + - The name of the target group. Mutually exclusive of I(target_group_arn). + type: str + target_id: + description: + - The ID of the target. + required: true + type: str + target_port: + description: + - The port on which the target is listening. You can specify a port override. If a target is already registered, + you can register it again using a different port. + - The default port for a target is the port for the target group. + required: false + type: int + target_status: + description: + - Blocks and waits for the target status to equal given value. For more detail on target status see + U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#target-health-states) + required: false + choices: [ 'initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable' ] + type: str + target_status_timeout: + description: + - Maximum time in seconds to wait for target_status change + required: false + default: 60 + type: int + state: + description: + - Register or deregister the target. + required: true + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +notes: + - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Register an IP address target to a target group +- elb_target: + target_group_name: myiptargetgroup + target_id: i-1234567 + state: present + +# Register an instance target to a target group +- elb_target: + target_group_name: mytargetgroup + target_id: i-1234567 + state: present + +# Deregister a target from a target group +- elb_target: + target_group_name: mytargetgroup + target_id: i-1234567 + state: absent + +# Modify a target to use a different port +# Register a target to a target group +- elb_target: + target_group_name: mytargetgroup + target_id: i-1234567 + target_port: 8080 + state: present + +''' + +RETURN = ''' + +''' + +import traceback +from time import time, sleep +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info, + AWSRetry) + +try: + import boto3 + from botocore.exceptions import ClientError, BotoCoreError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +def describe_target_groups_with_backoff(connection, tg_name): + return connection.describe_target_groups(Names=[tg_name]) + + +def convert_tg_name_to_arn(connection, module, tg_name): + + try: + response = describe_target_groups_with_backoff(connection, tg_name) + except ClientError as e: + module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except BotoCoreError as e: + module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)), + exception=traceback.format_exc()) + + tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + + return tg_arn + + +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +def describe_targets_with_backoff(connection, tg_arn, target): + if target is None: + tg = [] + else: + tg = [target] + + return connection.describe_target_health(TargetGroupArn=tg_arn, Targets=tg) + + +def describe_targets(connection, module, tg_arn, target=None): + + """ + Describe targets in a target group + + :param module: ansible module object + :param connection: boto3 connection + :param tg_arn: target group arn + :param target: dictionary containing target id and port + :return: + """ + + try: + targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions'] + if not targets: + return {} + return targets[0] + except ClientError as e: + module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except BotoCoreError as e: + module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)), + exception=traceback.format_exc()) + + +@AWSRetry.jittered_backoff(retries=10, delay=10) +def register_target_with_backoff(connection, target_group_arn, target): + connection.register_targets(TargetGroupArn=target_group_arn, Targets=[target]) + + +def register_target(connection, module): + + """ + Registers a target to a target group + + :param module: ansible module object + :param connection: boto3 connection + :return: + """ + + target_az = module.params.get("target_az") + target_group_arn = module.params.get("target_group_arn") + target_id = module.params.get("target_id") + target_port = module.params.get("target_port") + target_status = module.params.get("target_status") + target_status_timeout = module.params.get("target_status_timeout") + changed = False + + if not target_group_arn: + target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name")) + + target = dict(Id=target_id) + if target_az: + target['AvailabilityZone'] = target_az + if target_port: + target['Port'] = target_port + + target_description = describe_targets(connection, module, target_group_arn, target) + + if 'Reason' in target_description['TargetHealth']: + if target_description['TargetHealth']['Reason'] == "Target.NotRegistered": + try: + register_target_with_backoff(connection, target_group_arn, target) + changed = True + if target_status: + target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) + except ClientError as e: + module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except BotoCoreError as e: + module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), + exception=traceback.format_exc()) + + # Get all targets for the target group + target_descriptions = describe_targets(connection, module, target_group_arn) + + module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + + +@AWSRetry.jittered_backoff(retries=10, delay=10) +def deregister_target_with_backoff(connection, target_group_arn, target): + connection.deregister_targets(TargetGroupArn=target_group_arn, Targets=[target]) + + +def deregister_target(connection, module): + + """ + Deregisters a target to a target group + + :param module: ansible module object + :param connection: boto3 connection + :return: + """ + + deregister_unused = module.params.get("deregister_unused") + target_group_arn = module.params.get("target_group_arn") + target_id = module.params.get("target_id") + target_port = module.params.get("target_port") + target_status = module.params.get("target_status") + target_status_timeout = module.params.get("target_status_timeout") + changed = False + + if not target_group_arn: + target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name")) + + target = dict(Id=target_id) + if target_port: + target['Port'] = target_port + + target_description = describe_targets(connection, module, target_group_arn, target) + current_target_state = target_description['TargetHealth']['State'] + current_target_reason = target_description['TargetHealth'].get('Reason') + + needs_deregister = False + + if deregister_unused and current_target_state == 'unused': + if current_target_reason != 'Target.NotRegistered': + needs_deregister = True + elif current_target_state not in ['unused', 'draining']: + needs_deregister = True + + if needs_deregister: + try: + deregister_target_with_backoff(connection, target_group_arn, target) + changed = True + except ClientError as e: + module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except BotoCoreError as e: + module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), + exception=traceback.format_exc()) + else: + if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining': + module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " + + "To force deregistration use the 'deregister_unused' option.") + + if target_status: + target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) + + # Get all targets for the target group + target_descriptions = describe_targets(connection, module, target_group_arn) + + module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + + +def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout): + reached_state = False + timeout = target_status_timeout + time() + while time() < timeout: + health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State'] + if health_state == target_status: + reached_state = True + break + sleep(1) + if not reached_state: + module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state)) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + deregister_unused=dict(type='bool', default=False), + target_az=dict(type='str'), + target_group_arn=dict(type='str'), + target_group_name=dict(type='str'), + target_id=dict(type='str', required=True), + target_port=dict(type='int'), + target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'), + target_status_timeout=dict(type='int', default=60), + state=dict(required=True, choices=['present', 'absent'], type='str'), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['target_group_arn', 'target_group_name']] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) + + state = module.params.get("state") + + if state == 'present': + register_target(connection, module) + else: + deregister_target(connection, module) + + +if __name__ == '__main__': + main() diff --git a/elb_target_facts.py b/elb_target_facts.py new file mode 120000 index 00000000000..897c23897de --- /dev/null +++ b/elb_target_facts.py @@ -0,0 +1 @@ +elb_target_info.py \ No newline at end of file diff --git a/elb_target_group.py b/elb_target_group.py new file mode 100644 index 00000000000..f6194b07ec1 --- /dev/null +++ b/elb_target_group.py @@ -0,0 +1,857 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: elb_target_group +short_description: Manage a target group for an Application or Network load balancer +description: + - Manage an AWS Elastic Load Balancer target group. See + U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or + U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details. +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + deregistration_delay_timeout: + description: + - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. + The range is 0-3600 seconds. + type: int + health_check_protocol: + description: + - The protocol the load balancer uses when performing health checks on targets. + required: false + choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] + type: str + health_check_port: + description: + - The port the load balancer uses when performing health checks on targets. + Can be set to 'traffic-port' to match target port. + - When not defined will default to the port on which each target receives traffic from the load balancer. + required: false + type: str + health_check_path: + description: + - The ping path that is the destination on the targets for health checks. The path must be defined in order to set a health check. + - Requires the I(health_check_protocol) parameter to be set. + required: false + type: str + health_check_interval: + description: + - The approximate amount of time, in seconds, between health checks of an individual target. + required: false + type: int + health_check_timeout: + description: + - The amount of time, in seconds, during which no response from a target means a failed health check. + required: false + type: int + healthy_threshold_count: + description: + - The number of consecutive health checks successes required before considering an unhealthy target healthy. + required: false + type: int + modify_targets: + description: + - Whether or not to alter existing targets in the group to match what is passed with the module + required: false + default: yes + type: bool + name: + description: + - The name of the target group. + required: true + type: str + port: + description: + - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. Required if + I(state) is C(present). + required: false + type: int + protocol: + description: + - The protocol to use for routing traffic to the targets. Required when I(state) is C(present). + required: false + choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] + type: str + purge_tags: + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the tag parameter is not set then + tags will not be modified. + required: false + default: yes + type: bool + state: + description: + - Create or destroy the target group. + required: true + choices: [ 'present', 'absent' ] + type: str + stickiness_enabled: + description: + - Indicates whether sticky sessions are enabled. + type: bool + stickiness_lb_cookie_duration: + description: + - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load + balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). + type: int + stickiness_type: + description: + - The type of sticky sessions. The possible value is lb_cookie. + default: lb_cookie + type: str + successful_response_codes: + description: + - The HTTP codes to use when checking for a successful response from a target. + - Accepts multiple values (for example, "200,202") or a range of values (for example, "200-299"). + - Requires the I(health_check_protocol) parameter to be set. + required: false + type: str + tags: + description: + - A dictionary of one or more tags to assign to the target group. + required: false + type: dict + target_type: + description: + - The type of target that you must specify when registering targets with this target group. The possible values are + C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address) or C(lambda) (target is specified by ARN). + Note that you can't specify targets for a target group using more than one type. Target type lambda only accept one target. When more than + one target is specified, only the first one is used. All additional targets are ignored. + If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target + group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). + You can't specify publicly routable IP addresses. + - The default behavior is C(instance). + required: false + choices: ['instance', 'ip', 'lambda'] + type: str + targets: + description: + - A list of targets to assign to the target group. This parameter defaults to an empty list. Unless you set the 'modify_targets' parameter then + all existing targets will be removed from the group. The list should be an Id and a Port parameter. See the Examples for detail. + required: false + type: list + unhealthy_threshold_count: + description: + - The number of consecutive health check failures required before considering a target unhealthy. + required: false + type: int + vpc_id: + description: + - The identifier of the virtual private cloud (VPC). Required when I(state) is C(present). + required: false + type: str + wait: + description: + - Whether or not to wait for the target group. + type: bool + default: false + wait_timeout: + description: + - The time to wait for the target group. + default: 200 + type: int +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +notes: + - Once a target group has been created, only its health check can then be modified using subsequent calls +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a target group with a default health check +- elb_target_group: + name: mytargetgroup + protocol: http + port: 80 + vpc_id: vpc-01234567 + state: present + +# Modify the target group with a custom health check +- elb_target_group: + name: mytargetgroup + protocol: http + port: 80 + vpc_id: vpc-01234567 + health_check_protocol: http + health_check_path: /health_check + health_check_port: 80 + successful_response_codes: 200 + health_check_interval: 15 + health_check_timeout: 3 + healthy_threshold_count: 4 + unhealthy_threshold_count: 3 + state: present + +# Delete a target group +- elb_target_group: + name: mytargetgroup + state: absent + +# Create a target group with instance targets +- elb_target_group: + name: mytargetgroup + protocol: http + port: 81 + vpc_id: vpc-01234567 + health_check_protocol: http + health_check_path: / + successful_response_codes: "200,250-260" + targets: + - Id: i-01234567 + Port: 80 + - Id: i-98765432 + Port: 80 + state: present + wait_timeout: 200 + wait: True + +# Create a target group with IP address targets +- elb_target_group: + name: mytargetgroup + protocol: http + port: 81 + vpc_id: vpc-01234567 + health_check_protocol: http + health_check_path: / + successful_response_codes: "200,250-260" + target_type: ip + targets: + - Id: 10.0.0.10 + Port: 80 + AvailabilityZone: all + - Id: 10.0.0.20 + Port: 80 + state: present + wait_timeout: 200 + wait: True + +# Using lambda as targets require that the target group +# itself is allow to invoke the lambda function. +# therefore you need first to create an empty target group +# to receive its arn, second, allow the target group +# to invoke the lamba function and third, add the target +# to the target group +- name: first, create empty target group + elb_target_group: + name: my-lambda-targetgroup + target_type: lambda + state: present + modify_targets: False + register: out + +- name: second, allow invoke of the lambda + lambda_policy: + state: "{{ state | default('present') }}" + function_name: my-lambda-function + statement_id: someID + action: lambda:InvokeFunction + principal: elasticloadbalancing.amazonaws.com + source_arn: "{{ out.target_group_arn }}" + +- name: third, add target + elb_target_group: + name: my-lambda-targetgroup + target_type: lambda + state: present + targets: + - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function + +''' + +RETURN = ''' +deregistration_delay_timeout_seconds: + description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. + returned: when state present + type: int + sample: 300 +health_check_interval_seconds: + description: The approximate amount of time, in seconds, between health checks of an individual target. + returned: when state present + type: int + sample: 30 +health_check_path: + description: The destination for the health check request. + returned: when state present + type: str + sample: /index.html +health_check_port: + description: The port to use to connect with the target. + returned: when state present + type: str + sample: traffic-port +health_check_protocol: + description: The protocol to use to connect with the target. + returned: when state present + type: str + sample: HTTP +health_check_timeout_seconds: + description: The amount of time, in seconds, during which no response means a failed health check. + returned: when state present + type: int + sample: 5 +healthy_threshold_count: + description: The number of consecutive health checks successes required before considering an unhealthy target healthy. + returned: when state present + type: int + sample: 5 +load_balancer_arns: + description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group. + returned: when state present + type: list + sample: [] +matcher: + description: The HTTP codes to use when checking for a successful response from a target. + returned: when state present + type: dict + sample: { + "http_code": "200" + } +port: + description: The port on which the targets are listening. + returned: when state present + type: int + sample: 80 +protocol: + description: The protocol to use for routing traffic to the targets. + returned: when state present + type: str + sample: HTTP +stickiness_enabled: + description: Indicates whether sticky sessions are enabled. + returned: when state present + type: bool + sample: true +stickiness_lb_cookie_duration_seconds: + description: The time period, in seconds, during which requests from a client should be routed to the same target. + returned: when state present + type: int + sample: 86400 +stickiness_type: + description: The type of sticky sessions. + returned: when state present + type: str + sample: lb_cookie +tags: + description: The tags attached to the target group. + returned: when state present + type: dict + sample: "{ + 'Tag': 'Example' + }" +target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + returned: when state present + type: str + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211" +target_group_name: + description: The name of the target group. + returned: when state present + type: str + sample: mytargetgroup +unhealthy_threshold_count: + description: The number of consecutive health check failures required before considering the target unhealthy. + returned: when state present + type: int + sample: 2 +vpc_id: + description: The ID of the VPC for the targets. + returned: when state present + type: str + sample: vpc-0123456 +''' + +import time + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, + compare_aws_tags, ansible_dict_to_boto3_tag_list) +from distutils.version import LooseVersion + + +def get_tg_attributes(connection, module, tg_arn): + try: + tg_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=tg_arn)['Attributes']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get target group attributes") + + # Replace '.' with '_' in attribute key names to make it more Ansibley + return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items()) + + +def get_target_group_tags(connection, module, target_group_arn): + try: + return connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get target group tags") + + +def get_target_group(connection, module): + try: + target_group_paginator = connection.get_paginator('describe_target_groups') + return (target_group_paginator.paginate(Names=[module.params.get("name")]).build_full_result())['TargetGroups'][0] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if e.response['Error']['Code'] == 'TargetGroupNotFound': + return None + else: + module.fail_json_aws(e, msg="Couldn't get target group") + + +def wait_for_status(connection, module, target_group_arn, targets, status): + polling_increment_secs = 5 + max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets) + if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe target health") + + result = response + return status_achieved, result + + +def fail_if_ip_target_type_not_supported(module): + if LooseVersion(botocore.__version__) < LooseVersion('1.7.2'): + module.fail_json(msg="target_type ip requires botocore version 1.7.2 or later. Version %s is installed" % + botocore.__version__) + + +def create_or_update_target_group(connection, module): + + changed = False + new_target_group = False + params = dict() + target_type = module.params.get("target_type") + params['Name'] = module.params.get("name") + params['TargetType'] = target_type + if target_type != "lambda": + params['Protocol'] = module.params.get("protocol").upper() + params['Port'] = module.params.get("port") + params['VpcId'] = module.params.get("vpc_id") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + deregistration_delay_timeout = module.params.get("deregistration_delay_timeout") + stickiness_enabled = module.params.get("stickiness_enabled") + stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") + stickiness_type = module.params.get("stickiness_type") + + health_option_keys = [ + "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", + "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes" + ] + health_options = any([module.params[health_option_key] is not None for health_option_key in health_option_keys]) + + # Set health check if anything set + if health_options: + + if module.params.get("health_check_protocol") is not None: + params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper() + + if module.params.get("health_check_port") is not None: + params['HealthCheckPort'] = module.params.get("health_check_port") + + if module.params.get("health_check_interval") is not None: + params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval") + + if module.params.get("health_check_timeout") is not None: + params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout") + + if module.params.get("healthy_threshold_count") is not None: + params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count") + + if module.params.get("unhealthy_threshold_count") is not None: + params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count") + + # Only need to check response code and path for http(s) health checks + protocol = module.params.get("health_check_protocol") + if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']: + + if module.params.get("health_check_path") is not None: + params['HealthCheckPath'] = module.params.get("health_check_path") + + if module.params.get("successful_response_codes") is not None: + params['Matcher'] = {} + params['Matcher']['HttpCode'] = module.params.get("successful_response_codes") + + # Get target type + if target_type == 'ip': + fail_if_ip_target_type_not_supported(module) + + # Get target group + tg = get_target_group(connection, module) + + if tg: + diffs = [param for param in ('Port', 'Protocol', 'VpcId') + if tg.get(param) != params.get(param)] + if diffs: + module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % + ", ".join(diffs)) + # Target group exists so check health check parameters match what has been passed + health_check_params = dict() + + # Modify health check if anything set + if health_options: + + # Health check protocol + if 'HealthCheckProtocol' in params and tg['HealthCheckProtocol'] != params['HealthCheckProtocol']: + health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol'] + + # Health check port + if 'HealthCheckPort' in params and tg['HealthCheckPort'] != params['HealthCheckPort']: + health_check_params['HealthCheckPort'] = params['HealthCheckPort'] + + # Health check interval + if 'HealthCheckIntervalSeconds' in params and tg['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']: + health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds'] + + # Health check timeout + if 'HealthCheckTimeoutSeconds' in params and tg['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']: + health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds'] + + # Healthy threshold + if 'HealthyThresholdCount' in params and tg['HealthyThresholdCount'] != params['HealthyThresholdCount']: + health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount'] + + # Unhealthy threshold + if 'UnhealthyThresholdCount' in params and tg['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']: + health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount'] + + # Only need to check response code and path for http(s) health checks + if tg['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: + # Health check path + if 'HealthCheckPath'in params and tg['HealthCheckPath'] != params['HealthCheckPath']: + health_check_params['HealthCheckPath'] = params['HealthCheckPath'] + + # Matcher (successful response codes) + # TODO: required and here? + if 'Matcher' in params: + current_matcher_list = tg['Matcher']['HttpCode'].split(',') + requested_matcher_list = params['Matcher']['HttpCode'].split(',') + if set(current_matcher_list) != set(requested_matcher_list): + health_check_params['Matcher'] = {} + health_check_params['Matcher']['HttpCode'] = ','.join(requested_matcher_list) + + try: + if health_check_params: + connection.modify_target_group(TargetGroupArn=tg['TargetGroupArn'], **health_check_params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update target group") + + # Do we need to modify targets? + if module.params.get("modify_targets"): + # get list of current target instances. I can't see anything like a describe targets in the doco so + # describe_target_health seems to be the only way to get them + try: + current_targets = connection.describe_target_health( + TargetGroupArn=tg['TargetGroupArn']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get target group health") + + if module.params.get("targets"): + + if target_type != "lambda": + params['Targets'] = module.params.get("targets") + + # Correct type of target ports + for target in params['Targets']: + target['Port'] = int(target.get('Port', module.params.get('port'))) + + current_instance_ids = [] + + for instance in current_targets['TargetHealthDescriptions']: + current_instance_ids.append(instance['Target']['Id']) + + new_instance_ids = [] + for instance in params['Targets']: + new_instance_ids.append(instance['Id']) + + add_instances = set(new_instance_ids) - set(current_instance_ids) + + if add_instances: + instances_to_add = [] + for target in params['Targets']: + if target['Id'] in add_instances: + instances_to_add.append({'Id': target['Id'], 'Port': target['Port']}) + + changed = True + try: + connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't register targets") + + if module.params.get("wait"): + status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_add, 'healthy') + if not status_achieved: + module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') + + remove_instances = set(current_instance_ids) - set(new_instance_ids) + + if remove_instances: + instances_to_remove = [] + for target in current_targets['TargetHealthDescriptions']: + if target['Target']['Id'] in remove_instances: + instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + + changed = True + try: + connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove targets") + + if module.params.get("wait"): + status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused') + if not status_achieved: + module.fail_json(msg='Error waiting for target deregistration - please check the AWS console') + + # register lambda target + else: + try: + changed = False + target = module.params.get("targets")[0] + if len(current_targets["TargetHealthDescriptions"]) == 0: + changed = True + else: + for item in current_targets["TargetHealthDescriptions"]: + if target["Id"] != item["Target"]["Id"]: + changed = True + break # only one target is possible with lambda + + if changed: + if target.get("Id"): + response = connection.register_targets( + TargetGroupArn=tg['TargetGroupArn'], + Targets=[ + { + "Id": target['Id'] + } + ] + ) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg="Couldn't register targets") + else: + if target_type != "lambda": + + current_instances = current_targets['TargetHealthDescriptions'] + + if current_instances: + instances_to_remove = [] + for target in current_targets['TargetHealthDescriptions']: + instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + + changed = True + try: + connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove targets") + + if module.params.get("wait"): + status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused') + if not status_achieved: + module.fail_json(msg='Error waiting for target deregistration - please check the AWS console') + + # remove lambda targets + else: + changed = False + if current_targets["TargetHealthDescriptions"]: + changed = True + # only one target is possible with lambda + target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] + if changed: + connection.deregister_targets( + TargetGroupArn=tg['TargetGroupArn'], Targets=[{"Id": target_to_remove}]) + else: + try: + connection.create_target_group(**params) + changed = True + new_target_group = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create target group") + + tg = get_target_group(connection, module) + + if module.params.get("targets"): + if target_type != "lambda": + params['Targets'] = module.params.get("targets") + try: + connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=params['Targets']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't register targets") + + if module.params.get("wait"): + status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], params['Targets'], 'healthy') + if not status_achieved: + module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') + + else: + try: + target = module.params.get("targets")[0] + response = connection.register_targets( + TargetGroupArn=tg['TargetGroupArn'], + Targets=[ + { + "Id": target["Id"] + } + ] + ) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg="Couldn't register targets") + + # Now set target group attributes + update_attributes = [] + + # Get current attributes + current_tg_attributes = get_tg_attributes(connection, module, tg['TargetGroupArn']) + + if deregistration_delay_timeout is not None: + if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: + update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if stickiness_enabled is not None: + if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": + update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) + if stickiness_lb_cookie_duration is not None: + if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: + update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) + if stickiness_type is not None and "stickiness_type" in current_tg_attributes: + if stickiness_type != current_tg_attributes['stickiness_type']: + update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) + + if update_attributes: + try: + connection.modify_target_group_attributes(TargetGroupArn=tg['TargetGroupArn'], Attributes=update_attributes) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state + if new_target_group: + connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn']) + module.fail_json_aws(e, msg="Couldn't delete target group") + + # Tags - only need to play with tags if tags parameter has been set to something + if tags: + # Get tags + current_tags = get_target_group_tags(connection, module, tg['TargetGroupArn']) + + # Delete necessary tags + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags) + if tags_to_delete: + try: + connection.remove_tags(ResourceArns=[tg['TargetGroupArn']], TagKeys=tags_to_delete) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete tags from target group") + changed = True + + # Add/update tags + if tags_need_modify: + try: + connection.add_tags(ResourceArns=[tg['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't add tags to target group") + changed = True + + # Get the target group again + tg = get_target_group(connection, module) + + # Get the target group attributes again + tg.update(get_tg_attributes(connection, module, tg['TargetGroupArn'])) + + # Convert tg to snake_case + snaked_tg = camel_dict_to_snake_dict(tg) + + snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, tg['TargetGroupArn'])) + + module.exit_json(changed=changed, **snaked_tg) + + +def delete_target_group(connection, module): + changed = False + tg = get_target_group(connection, module) + + if tg: + try: + connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn']) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete target group") + + module.exit_json(changed=changed) + + +def main(): + protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', + 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] + argument_spec = dict( + deregistration_delay_timeout=dict(type='int'), + health_check_protocol=dict(choices=protocols_list), + health_check_port=dict(), + health_check_path=dict(), + health_check_interval=dict(type='int'), + health_check_timeout=dict(type='int'), + healthy_threshold_count=dict(type='int'), + modify_targets=dict(default=True, type='bool'), + name=dict(required=True), + port=dict(type='int'), + protocol=dict(choices=protocols_list), + purge_tags=dict(default=True, type='bool'), + stickiness_enabled=dict(type='bool'), + stickiness_type=dict(default='lb_cookie'), + stickiness_lb_cookie_duration=dict(type='int'), + state=dict(required=True, choices=['present', 'absent']), + successful_response_codes=dict(), + tags=dict(default={}, type='dict'), + target_type=dict(choices=['instance', 'ip', 'lambda']), + targets=dict(type='list'), + unhealthy_threshold_count=dict(type='int'), + vpc_id=dict(), + wait_timeout=dict(type='int', default=200), + wait=dict(type='bool', default=False) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[ + ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], + ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], + ] + ) + + if module.params.get('target_type') is None: + module.params['target_type'] = 'instance' + + connection = module.client('elbv2') + + if module.params.get('state') == 'present': + create_or_update_target_group(connection, module) + else: + delete_target_group(connection, module) + + +if __name__ == '__main__': + main() diff --git a/elb_target_group_facts.py b/elb_target_group_facts.py new file mode 120000 index 00000000000..3abd2ee5a65 --- /dev/null +++ b/elb_target_group_facts.py @@ -0,0 +1 @@ +elb_target_group_info.py \ No newline at end of file diff --git a/elb_target_group_info.py b/elb_target_group_info.py new file mode 100644 index 00000000000..65cdc0e707e --- /dev/null +++ b/elb_target_group_info.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: elb_target_group_info +short_description: Gather information about ELB target groups in AWS +description: + - Gather information about ELB target groups in AWS + - This module was called C(elb_target_group_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: Rob White (@wimnat) +options: + load_balancer_arn: + description: + - The Amazon Resource Name (ARN) of the load balancer. + required: false + type: str + target_group_arns: + description: + - The Amazon Resource Names (ARN) of the target groups. + required: false + type: list + names: + description: + - The names of the target groups. + required: false + type: list + collect_targets_health: + description: + - When set to "yes", output contains targets health description + required: false + default: no + type: bool + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all target groups +- elb_target_group_info: + +# Gather information about the target group attached to a particular ELB +- elb_target_group_info: + load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" + +# Gather information about a target groups named 'tg1' and 'tg2' +- elb_target_group_info: + names: + - tg1 + - tg2 + +''' + +RETURN = ''' +target_groups: + description: a list of target groups + returned: always + type: complex + contains: + deregistration_delay_timeout_seconds: + description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. + returned: always + type: int + sample: 300 + health_check_interval_seconds: + description: The approximate amount of time, in seconds, between health checks of an individual target. + returned: always + type: int + sample: 30 + health_check_path: + description: The destination for the health check request. + returned: always + type: str + sample: /index.html + health_check_port: + description: The port to use to connect with the target. + returned: always + type: str + sample: traffic-port + health_check_protocol: + description: The protocol to use to connect with the target. + returned: always + type: str + sample: HTTP + health_check_timeout_seconds: + description: The amount of time, in seconds, during which no response means a failed health check. + returned: always + type: int + sample: 5 + healthy_threshold_count: + description: The number of consecutive health checks successes required before considering an unhealthy target healthy. + returned: always + type: int + sample: 5 + load_balancer_arns: + description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group. + returned: always + type: list + sample: [] + matcher: + description: The HTTP codes to use when checking for a successful response from a target. + returned: always + type: dict + sample: { + "http_code": "200" + } + port: + description: The port on which the targets are listening. + returned: always + type: int + sample: 80 + protocol: + description: The protocol to use for routing traffic to the targets. + returned: always + type: str + sample: HTTP + stickiness_enabled: + description: Indicates whether sticky sessions are enabled. + returned: always + type: bool + sample: true + stickiness_lb_cookie_duration_seconds: + description: Indicates whether sticky sessions are enabled. + returned: always + type: int + sample: 86400 + stickiness_type: + description: The type of sticky sessions. + returned: always + type: str + sample: lb_cookie + tags: + description: The tags attached to the target group. + returned: always + type: dict + sample: "{ + 'Tag': 'Example' + }" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + returned: always + type: str + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211" + targets_health_description: + description: Targets health description. + returned: when collect_targets_health is enabled + type: complex + contains: + health_check_port: + description: The port to check target health. + returned: always + type: str + sample: '80' + target: + description: The target metadata. + returned: always + type: complex + contains: + id: + description: The ID of the target. + returned: always + type: str + sample: i-0123456789 + port: + description: The port to use to connect with the target. + returned: always + type: int + sample: 80 + target_health: + description: The target health status. + returned: always + type: complex + contains: + state: + description: The state of the target health. + returned: always + type: str + sample: healthy + target_group_name: + description: The name of the target group. + returned: always + type: str + sample: mytargetgroup + unhealthy_threshold_count: + description: The number of consecutive health check failures required before considering the target unhealthy. + returned: always + type: int + sample: 2 + vpc_id: + description: The ID of the VPC for the targets. + returned: always + type: str + sample: vpc-0123456 +''' + +import traceback + +try: + import boto3 + from botocore.exceptions import ClientError, NoCredentialsError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info) + + +def get_target_group_attributes(connection, module, target_group_arn): + + try: + target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + # Replace '.' with '_' in attribute key names to make it more Ansibley + return dict((k.replace('.', '_'), v) + for (k, v) in target_group_attributes.items()) + + +def get_target_group_tags(connection, module, target_group_arn): + + try: + return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + +def get_target_group_targets_health(connection, module, target_group_arn): + + try: + return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + +def list_target_groups(connection, module): + + load_balancer_arn = module.params.get("load_balancer_arn") + target_group_arns = module.params.get("target_group_arns") + names = module.params.get("names") + collect_targets_health = module.params.get("collect_targets_health") + + try: + target_group_paginator = connection.get_paginator('describe_target_groups') + if not load_balancer_arn and not target_group_arns and not names: + target_groups = target_group_paginator.paginate().build_full_result() + if load_balancer_arn: + target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result() + if target_group_arns: + target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result() + if names: + target_groups = target_group_paginator.paginate(Names=names).build_full_result() + except ClientError as e: + if e.response['Error']['Code'] == 'TargetGroupNotFound': + module.exit_json(target_groups=[]) + else: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except NoCredentialsError as e: + module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc()) + + # Get the attributes and tags for each target group + for target_group in target_groups['TargetGroups']: + target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn'])) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']] + + # Get tags for each target group + for snaked_target_group in snaked_target_groups: + snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn']) + if collect_targets_health: + snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict( + target) for target in get_target_group_targets_health(connection, module, snaked_target_group['target_group_arn'])] + + module.exit_json(target_groups=snaked_target_groups) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + load_balancer_arn=dict(type='str'), + target_group_arns=dict(type='list'), + names=dict(type='list'), + collect_targets_health=dict(default=False, type='bool', required=False) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], + supports_check_mode=True + ) + if module._name == 'elb_target_group_facts': + module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_target_groups(connection, module) + + +if __name__ == '__main__': + main() diff --git a/elb_target_info.py b/elb_target_info.py new file mode 100644 index 00000000000..aa2ec02e7db --- /dev/null +++ b/elb_target_info.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Yaakov Kuperman +# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community"} + + +DOCUMENTATION = ''' +--- +module: elb_target_info +short_description: Gathers which target groups a target is associated with. +description: + - This module will search through every target group in a region to find + which ones have registered a given instance ID or IP. + - This module was called C(elb_target_facts) before Ansible 2.9. The usage did not change. + +author: "Yaakov Kuperman (@yaakov-github)" +options: + instance_id: + description: + - What instance ID to get information for. + type: str + required: true + get_unused_target_groups: + description: + - Whether or not to get target groups not used by any load balancers. + type: bool + default: true + +requirements: + - boto3 + - botocore +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = """ +# practical use case - dynamically deregistering and reregistering nodes + + - name: Get EC2 Metadata + action: ec2_metadata_facts + + - name: Get initial list of target groups + delegate_to: localhost + elb_target_info: + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_info + + - name: save fact for later + set_fact: + original_tgs: "{{ target_info.instance_target_groups }}" + + - name: Deregister instance from all target groups + delegate_to: localhost + elb_target: + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: absent + target_status: "draining" + region: "{{ ansible_ec2_placement_region }}" + with_subelements: + - "{{ original_tgs }}" + - "targets" + + # This avoids having to wait for 'elb_target' to serially deregister each + # target group. An alternative would be to run all of the 'elb_target' + # tasks async and wait for them to finish. + + - name: wait for all targets to deregister simultaneously + delegate_to: localhost + elb_target_info: + get_unused_target_groups: false + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_info + until: (target_info.instance_target_groups | length) == 0 + retries: 60 + delay: 10 + + - name: reregister in elbv2s + elb_target: + region: "{{ ansible_ec2_placement_region }}" + target_group_arn: "{{ item.0.target_group_arn }}" + target_port: "{{ item.1.target_port }}" + target_az: "{{ item.1.target_az }}" + target_id: "{{ item.1.target_id }}" + state: present + target_status: "initial" + with_subelements: + - "{{ original_tgs }}" + - "targets" + + # wait until all groups associated with this instance are 'healthy' or + # 'unused' + - name: wait for registration + elb_target_info: + get_unused_target_groups: false + instance_id: "{{ ansible_ec2_instance_id }}" + region: "{{ ansible_ec2_placement_region }}" + register: target_info + until: (target_info.instance_target_groups | + map(attribute='targets') | + flatten | + map(attribute='target_health') | + rejectattr('state', 'equalto', 'healthy') | + rejectattr('state', 'equalto', 'unused') | + list | + length) == 0 + retries: 61 + delay: 10 + +# using the target groups to generate AWS CLI commands to reregister the +# instance - useful in case the playbook fails mid-run and manual +# rollback is required + - name: "reregistration commands: ELBv2s" + debug: + msg: > + aws --region {{ansible_ec2_placement_region}} elbv2 + register-targets --target-group-arn {{item.target_group_arn}} + --targets{%for target in item.targets%} + Id={{target.target_id}}, + Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}} + {%endif%} + {%endfor%} + loop: "{{target_info.instance_target_groups}}" + +""" + +RETURN = """ +instance_target_groups: + description: a list of target groups to which the instance is registered to + returned: always + type: complex + contains: + target_group_arn: + description: The ARN of the target group + type: str + returned: always + sample: + - "arn:aws:elasticloadbalancing:eu-west-1:111111111111:targetgroup/target-group/deadbeefdeadbeef" + target_group_type: + description: Which target type is used for this group + returned: always + type: str + sample: + - ip + - instance + targets: + description: A list of targets that point to this instance ID + returned: always + type: complex + contains: + target_id: + description: the target ID referring to this instance + type: str + returned: always + sample: + - i-deadbeef + - 1.2.3.4 + target_port: + description: which port this target is listening on + type: str + returned: always + sample: + - 80 + target_az: + description: which availability zone is explicitly + associated with this target + type: str + returned: when an AZ is associated with this instance + sample: + - us-west-2a + target_health: + description: + - The target health description. + - See following link for all the possible values + U(https://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health) + returned: always + type: complex + contains: + description: + description: description of target health + returned: if I(state!=present) + sample: + - "Target desregistration is in progress" + type: str + reason: + description: reason code for target health + returned: if I(state!=healthy) + sample: + - "Target.Deregistration in progress" + type: str + state: + description: health state + returned: always + sample: + - "healthy" + - "draining" + - "initial" + - "unhealthy" + - "unused" + - "unavailable" + type: str +""" + +__metaclass__ = type + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + # we can handle the lack of boto3 based on the ec2 module + pass + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry + + +class Target(object): + """Models a target in a target group""" + def __init__(self, target_id, port, az, raw_target_health): + self.target_port = port + self.target_id = target_id + self.target_az = az + self.target_health = self.convert_target_health(raw_target_health) + + def convert_target_health(self, raw_target_health): + return camel_dict_to_snake_dict(raw_target_health) + + +class TargetGroup(object): + """Models an elbv2 target group""" + + def __init__(self, **kwargs): + self.target_group_type = kwargs["target_group_type"] + self.target_group_arn = kwargs["target_group_arn"] + # the relevant targets associated with this group + self.targets = [] + + def add_target(self, target_id, target_port, target_az, raw_target_health): + self.targets.append(Target(target_id, + target_port, + target_az, + raw_target_health)) + + def to_dict(self): + object_dict = vars(self) + object_dict["targets"] = [vars(each) for each in self.get_targets()] + return object_dict + + def get_targets(self): + return list(self.targets) + + +class TargetInfoGatherer(object): + + def __init__(self, module, instance_id, get_unused_target_groups): + self.module = module + try: + self.ec2 = self.module.client( + "ec2", + retry_decorator=AWSRetry.jittered_backoff(retries=10) + ) + except (ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, + msg="Couldn't connect to ec2" + ) + + try: + self.elbv2 = self.module.client( + "elbv2", + retry_decorator=AWSRetry.jittered_backoff(retries=10) + ) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, + msg="Could not connect to elbv2" + ) + + self.instance_id = instance_id + self.get_unused_target_groups = get_unused_target_groups + self.tgs = self._get_target_groups() + + def _get_instance_ips(self): + """Fetch all IPs associated with this instance so that we can determine + whether or not an instance is in an IP-based target group""" + try: + # get ahold of the instance in the API + reservations = self.ec2.describe_instances( + InstanceIds=[self.instance_id], + aws_retry=True + )["Reservations"] + except (BotoCoreError, ClientError) as e: + # typically this will happen if the instance doesn't exist + self.module.fail_json_aws(e, + msg="Could not get instance info" + + " for instance '%s'" % + (self.instance_id) + ) + + if len(reservations) < 1: + self.module.fail_json( + msg="Instance ID %s could not be found" % self.instance_id + ) + + instance = reservations[0]["Instances"][0] + + # IPs are represented in a few places in the API, this should + # account for all of them + ips = set() + ips.add(instance["PrivateIpAddress"]) + for nic in instance["NetworkInterfaces"]: + ips.add(nic["PrivateIpAddress"]) + for ip in nic["PrivateIpAddresses"]: + ips.add(ip["PrivateIpAddress"]) + + return list(ips) + + def _get_target_group_objects(self): + """helper function to build a list of TargetGroup objects based on + the AWS API""" + try: + paginator = self.elbv2.get_paginator( + "describe_target_groups" + ) + tg_response = paginator.paginate().build_full_result() + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, + msg="Could not describe target" + + " groups" + ) + + # build list of TargetGroup objects representing every target group in + # the system + target_groups = [] + for each_tg in tg_response["TargetGroups"]: + if not self.get_unused_target_groups and \ + len(each_tg["LoadBalancerArns"]) < 1: + # only collect target groups that actually are connected + # to LBs + continue + + target_groups.append( + TargetGroup(target_group_arn=each_tg["TargetGroupArn"], + target_group_type=each_tg["TargetType"], + ) + ) + return target_groups + + def _get_target_descriptions(self, target_groups): + """Helper function to build a list of all the target descriptions + for this target in a target group""" + # Build a list of all the target groups pointing to this instance + # based on the previous list + tgs = set() + # Loop through all the target groups + for tg in target_groups: + try: + # Get the list of targets for that target group + response = self.elbv2.describe_target_health( + TargetGroupArn=tg.target_group_arn, + aws_retry=True + ) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, + msg="Could not describe target " + + "health for target group %s" % + tg.target_group_arn + ) + + for t in response["TargetHealthDescriptions"]: + # If the target group has this instance as a target, add to + # list. This logic also accounts for the possibility of a + # target being in the target group multiple times with + # overridden ports + if t["Target"]["Id"] == self.instance_id or \ + t["Target"]["Id"] in self.instance_ips: + + # The 'AvailabilityZone' parameter is a weird one, see the + # API docs for more. Basically it's only supposed to be + # there under very specific circumstances, so we need + # to account for that + az = t["Target"]["AvailabilityZone"] \ + if "AvailabilityZone" in t["Target"] \ + else None + + tg.add_target(t["Target"]["Id"], + t["Target"]["Port"], + az, + t["TargetHealth"]) + # since tgs is a set, each target group will be added only + # once, even though we call add on each successful match + tgs.add(tg) + return list(tgs) + + def _get_target_groups(self): + # do this first since we need the IPs later on in this function + self.instance_ips = self._get_instance_ips() + + # build list of target groups + target_groups = self._get_target_group_objects() + return self._get_target_descriptions(target_groups) + + +def main(): + argument_spec = dict( + instance_id={"required": True, "type": "str"}, + get_unused_target_groups={"required": False, + "default": True, "type": "bool"} + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if module._name == 'elb_target_facts': + module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", version='2.13') + + instance_id = module.params["instance_id"] + get_unused_target_groups = module.params["get_unused_target_groups"] + + tg_gatherer = TargetInfoGatherer(module, + instance_id, + get_unused_target_groups + ) + + instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] + + module.exit_json(instance_target_groups=instance_target_groups) + + +if __name__ == "__main__": + main() diff --git a/execute_lambda.py b/execute_lambda.py new file mode 100644 index 00000000000..6c3ff264ae1 --- /dev/null +++ b/execute_lambda.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: execute_lambda +short_description: Execute an AWS Lambda function +description: + - This module executes AWS Lambda functions, allowing synchronous and asynchronous + invocation. +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: "Ryan Scott Brown (@ryansb) " +requirements: + - python >= 2.6 + - boto3 +notes: + - Async invocation will always return an empty C(output) key. + - Synchronous invocation may result in a function timeout, resulting in an + empty C(output) key. +options: + name: + description: + - The name of the function to be invoked. This can only be used for + invocations within the calling account. To invoke a function in another + account, use I(function_arn) to specify the full ARN. + type: str + function_arn: + description: + - The name of the function to be invoked + type: str + tail_log: + description: + - If I(tail_log=yes), the result of the task will include the last 4 KB + of the CloudWatch log for the function execution. Log tailing only + works if you use synchronous invocation I(wait=yes). This is usually + used for development or testing Lambdas. + type: bool + default: false + wait: + description: + - Whether to wait for the function results or not. If I(wait=no) + the task will not return any results. To wait for the Lambda function + to complete, set I(wait=yes) and the result will be available in the + I(output) key. + type: bool + default: true + dry_run: + description: + - Do not *actually* invoke the function. A C(DryRun) call will check that + the caller has permissions to call the function, especially for + checking cross-account permissions. + type: bool + default: false + version_qualifier: + description: + - Which version/alias of the function to run. This defaults to the + C(LATEST) revision, but can be set to any existing version or alias. + See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html) + for details. + type: str + payload: + description: + - A dictionary in any form to be provided as input to the Lambda function. + default: {} + type: dict +''' + +EXAMPLES = ''' +- execute_lambda: + name: test-function + # the payload is automatically serialized and sent to the function + payload: + foo: bar + value: 8 + register: response + +# Test that you have sufficient permissions to execute a Lambda function in +# another account +- execute_lambda: + function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function + dry_run: true + +- execute_lambda: + name: test-function + payload: + foo: bar + value: 8 + wait: true + tail_log: true + register: response + # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda + +# Pass the Lambda event payload as a json file. +- execute_lambda: + name: test-function + payload: "{{ lookup('file','lambda_event.json') }}" + register: response + +- execute_lambda: + name: test-function + version_qualifier: PRODUCTION +''' + +RETURN = ''' +output: + description: Function output if wait=true and the function returns a value + returned: success + type: dict + sample: "{ 'output': 'something' }" +logs: + description: The last 4KB of the function logs. Only provided if I(tail_log) is true + type: str + returned: if I(tail_log) == true +status: + description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async) + type: int + sample: 200 + returned: always +''' + +import base64 +import json +import traceback + +try: + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible.module_utils._text import to_native + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(), + function_arn=dict(), + wait=dict(default=True, type='bool'), + tail_log=dict(default=False, type='bool'), + dry_run=dict(default=False, type='bool'), + version_qualifier=dict(), + payload=dict(default={}, type='dict'), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['name', 'function_arn'], + ] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + name = module.params.get('name') + function_arn = module.params.get('function_arn') + await_return = module.params.get('wait') + dry_run = module.params.get('dry_run') + tail_log = module.params.get('tail_log') + version_qualifier = module.params.get('version_qualifier') + payload = module.params.get('payload') + + if not HAS_BOTO3: + module.fail_json(msg='Python module "boto3" is missing, please install it') + + if not (name or function_arn): + module.fail_json(msg="Must provide either a function_arn or a name to invoke.") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3) + if not region: + module.fail_json(msg="The AWS region must be specified as an " + "environment variable or in the AWS credentials " + "profile.") + + try: + client = boto3_conn(module, conn_type='client', resource='lambda', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: + module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc()) + + invoke_params = {} + + if await_return: + # await response + invoke_params['InvocationType'] = 'RequestResponse' + else: + # fire and forget + invoke_params['InvocationType'] = 'Event' + if dry_run or module.check_mode: + # dry_run overrides invocation type + invoke_params['InvocationType'] = 'DryRun' + + if tail_log and await_return: + invoke_params['LogType'] = 'Tail' + elif tail_log and not await_return: + module.fail_json(msg="The `tail_log` parameter is only available if " + "the invocation waits for the function to complete. " + "Set `wait` to true or turn off `tail_log`.") + else: + invoke_params['LogType'] = 'None' + + if version_qualifier: + invoke_params['Qualifier'] = version_qualifier + + if payload: + invoke_params['Payload'] = json.dumps(payload) + + if function_arn: + invoke_params['FunctionName'] = function_arn + elif name: + invoke_params['FunctionName'] = name + + try: + response = client.invoke(**invoke_params) + except botocore.exceptions.ClientError as ce: + if ce.response['Error']['Code'] == 'ResourceNotFoundException': + module.fail_json(msg="Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function.", + exception=traceback.format_exc()) + module.fail_json(msg="Client-side error when invoking Lambda, check inputs and specific error", + exception=traceback.format_exc()) + except botocore.exceptions.ParamValidationError as ve: + module.fail_json(msg="Parameters to `invoke` failed to validate", + exception=traceback.format_exc()) + except Exception as e: + module.fail_json(msg="Unexpected failure while invoking Lambda function", + exception=traceback.format_exc()) + + results = { + 'logs': '', + 'status': response['StatusCode'], + 'output': '', + } + + if response.get('LogResult'): + try: + # logs are base64 encoded in the API response + results['logs'] = base64.b64decode(response.get('LogResult', '')) + except Exception as e: + module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc()) + + if invoke_params['InvocationType'] == 'RequestResponse': + try: + results['output'] = json.loads(response['Payload'].read().decode('utf8')) + except Exception as e: + module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc()) + + if isinstance(results.get('output'), dict) and any( + [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): + # AWS sends back stack traces and error messages when a function failed + # in a RequestResponse (synchronous) context. + template = ("Function executed, but there was an error in the Lambda function. " + "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") + error_data = { + # format the stacktrace sent back as an array into a multiline string + 'trace': '\n'.join( + [' '.join([ + str(x) for x in line # cast line numbers to strings + ]) for line in results.get('output', {}).get('stackTrace', [])] + ), + 'errmsg': results['output'].get('errorMessage'), + 'type': results['output'].get('errorType') + } + module.fail_json(msg=template.format(**error_data), result=results) + + module.exit_json(changed=True, result=results) + + +if __name__ == '__main__': + main() diff --git a/iam.py b/iam.py new file mode 100644 index 00000000000..b77bb219a27 --- /dev/null +++ b/iam.py @@ -0,0 +1,871 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam +short_description: Manage IAM users, groups, roles and keys +description: + - Allows for the management of IAM users, user API keys, groups, roles. +options: + iam_type: + description: + - Type of IAM resource. + choices: ["user", "group", "role"] + type: str + required: true + name: + description: + - Name of IAM resource to create or identify. + required: true + type: str + new_name: + description: + - When I(state=update), will replace I(name) with I(new_name) on IAM resource. + type: str + new_path: + description: + - When I(state=update), will replace the path with new_path on the IAM resource. + type: str + state: + description: + - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. + required: true + choices: [ "present", "absent", "update" ] + type: str + path: + description: + - When creating or updating, specify the desired path of the resource. + - If I(state=present), it will replace the current path to match what is passed in when they do not match. + default: "/" + type: str + trust_policy: + description: + - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. + - Mutually exclusive with I(trust_policy_filepath). + type: dict + trust_policy_filepath: + description: + - The path to the trust policy document that grants an entity permission to assume the role. + - Mutually exclusive with I(trust_policy). + type: str + access_key_state: + description: + - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. + choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"] + type: str + key_count: + description: + - When I(access_key_state=create) it will ensure this quantity of keys are present. + default: 1 + type: int + access_key_ids: + description: + - A list of the keys that you want affected by the I(access_key_state) parameter. + type: list + groups: + description: + - A list of groups the user should belong to. When I(state=update), will gracefully remove groups not listed. + type: list + password: + description: + - When I(type=user) and either I(state=present) or I(state=update), define the users login password. + - Note that this will always return 'changed'. + type: str + update_password: + default: always + choices: ['always', 'on_create'] + description: + - When to update user passwords. + - I(update_password=always) will ensure the password is set to I(password). + - I(update_password=on_create) will only set the password for newly created users. + type: str +notes: + - 'Currently boto does not support the removal of Managed Policies, the module will error out if your + user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' +author: + - "Jonathan I. Davila (@defionscode)" + - "Paul Seiffert (@seiffert)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Basic user creation example +tasks: +- name: Create two new IAM users with API keys + iam: + iam_type: user + name: "{{ item }}" + state: present + password: "{{ temp_pass }}" + access_key_state: create + loop: + - jcleese + - mpython + +# Advanced example, create two new groups and add the pre-existing user +# jdavila to both groups. +task: +- name: Create Two Groups, Mario and Luigi + iam: + iam_type: group + name: "{{ item }}" + state: present + loop: + - Mario + - Luigi + register: new_groups + +- name: + iam: + iam_type: user + name: jdavila + state: update + groups: "{{ item.created_group.group_name }}" + loop: "{{ new_groups.results }}" + +# Example of role with custom trust policy for Lambda service +- name: Create IAM role with custom trust relationship + iam: + iam_type: role + name: AAALambdaTestRole + state: present + trust_policy: + Version: '2012-10-17' + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: lambda.amazonaws.com + +''' +RETURN = ''' +role_result: + description: the IAM.role dict returned by Boto + type: str + returned: if iam_type=role and state=present + sample: { + "arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role", + "assume_role_policy_document": "...truncated...", + "create_date": "2017-09-02T14:32:23Z", + "path": "/", + "role_id": "AROAA1B2C3D4E5F6G7H8I", + "role_name": "my-new-role" + } +roles: + description: a list containing the name of the currently defined roles + type: list + returned: if iam_type=role and state=present + sample: [ + "my-new-role", + "my-existing-role-1", + "my-existing-role-2", + "my-existing-role-3", + "my-existing-role-...", + ] +''' + +import json +import traceback + +try: + import boto.exception + import boto.iam + import boto.iam.connection +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec, + get_aws_connection_info) + + +def _paginate(func, attr): + ''' + paginates the results from func by continuously passing in + the returned marker if the results were truncated. this returns + an iterator over the items in the returned response. `attr` is + the name of the attribute to iterate over in the response. + ''' + finished, marker = False, None + while not finished: + res = func(marker=marker) + for item in getattr(res, attr): + yield item + + finished = res.is_truncated == 'false' + if not finished: + marker = res.marker + + +def list_all_groups(iam): + return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')] + + +def list_all_users(iam): + return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')] + + +def list_all_roles(iam): + return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')] + + +def list_all_instance_profiles(iam): + return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')] + + +def create_user(module, iam, name, pwd, path, key_state, key_count): + key_qty = 0 + keys = [] + try: + user_meta = iam.create_user( + name, path).create_user_response.create_user_result.user + changed = True + if pwd is not None: + pwd = iam.create_login_profile(name, pwd) + if key_state in ['create']: + if key_count: + while key_count > key_qty: + keys.append(iam.create_access_key( + user_name=name).create_access_key_response. + create_access_key_result. + access_key) + key_qty += 1 + else: + keys = None + except boto.exception.BotoServerError as err: + module.fail_json(changed=False, msg=str(err)) + else: + user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) + return (user_info, changed) + + +def delete_dependencies_first(module, iam, name): + changed = False + # try to delete any keys + try: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + for key in current_keys: + iam.delete_access_key(key, name) + changed = True + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc()) + + # try to delete login profiles + try: + login_profile = iam.get_login_profiles(name).get_login_profile_response + iam.delete_login_profile(name) + changed = True + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg: + module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc()) + + # try to detach policies + try: + for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: + iam.delete_user_policy(name, policy) + changed = True + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if 'must detach all policies first' in error_msg: + module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the policies " + "through the console and try again." % name) + module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc()) + + # try to deactivate associated MFA devices + try: + mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', []) + for device in mfa_devices: + iam.deactivate_mfa_device(name, device['serial_number']) + changed = True + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc()) + + return changed + + +def delete_user(module, iam, name): + changed = delete_dependencies_first(module, iam, name) + try: + iam.delete_user(name) + except boto.exception.BotoServerError as ex: + module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc()) + else: + changed = True + return name, changed + + +def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): + changed = False + name_change = False + if updated and new_name: + name = new_name + try: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + status = [ck['status'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + key_qty = len(current_keys) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if 'cannot be found' in error_msg and updated: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] + status = [ck['status'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] + name = new_name + else: + module.fail_json(changed=False, msg=str(err)) + + updated_key_list = {} + + if new_name or new_path: + c_path = iam.get_user(name).get_user_result.user['path'] + if (name != new_name) or (c_path != new_path): + changed = True + try: + if not updated: + user = iam.update_user( + name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata + else: + user = iam.update_user( + name, new_path=new_path).update_user_response.response_metadata + user['updates'] = dict( + old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + module.fail_json(changed=False, msg=str(err)) + else: + if not updated: + name_change = True + + if pwd: + try: + iam.update_login_profile(name, pwd) + changed = True + except boto.exception.BotoServerError: + try: + iam.create_login_profile(name, pwd) + changed = True + except boto.exception.BotoServerError as err: + error_msg = boto_exception(str(err)) + if 'Password does not conform to the account password policy' in error_msg: + module.fail_json(changed=False, msg="Password doesn't conform to policy") + else: + module.fail_json(msg=error_msg) + + try: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + status = [ck['status'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + key_qty = len(current_keys) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if 'cannot be found' in error_msg and updated: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] + status = [ck['status'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] + name = new_name + else: + module.fail_json(changed=False, msg=str(err)) + + new_keys = [] + if key_state == 'create': + try: + while key_count > key_qty: + new_keys.append(iam.create_access_key( + user_name=name).create_access_key_response.create_access_key_result.access_key) + key_qty += 1 + changed = True + + except boto.exception.BotoServerError as err: + module.fail_json(changed=False, msg=str(err)) + + if keys and key_state: + for access_key in keys: + if key_state in ('active', 'inactive'): + if access_key in current_keys: + for current_key, current_key_state in zip(current_keys, status): + if key_state != current_key_state.lower(): + try: + iam.update_access_key(access_key, key_state.capitalize(), user_name=name) + changed = True + except boto.exception.BotoServerError as err: + module.fail_json(changed=False, msg=str(err)) + else: + module.fail_json(msg="Supplied keys not found for %s. " + "Current keys: %s. " + "Supplied key(s): %s" % + (name, current_keys, keys) + ) + + if key_state == 'remove': + if access_key in current_keys: + try: + iam.delete_access_key(access_key, user_name=name) + except boto.exception.BotoServerError as err: + module.fail_json(changed=False, msg=str(err)) + else: + changed = True + + try: + final_keys, final_key_status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(name). + list_access_keys_result. + access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(name). + list_access_keys_result. + access_key_metadata] + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err)) + + for fk, fks in zip(final_keys, final_key_status): + updated_key_list.update({fk: fks}) + + return name_change, updated_key_list, changed, new_keys + + +def set_users_groups(module, iam, name, groups, updated=None, + new_name=None): + """ Sets groups for a user, will purge groups not explicitly passed, while + retaining pre-existing groups that also are in the new list. + """ + changed = False + + if updated: + name = new_name + + try: + orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user( + name).list_groups_for_user_result.groups] + remove_groups = [ + rg for rg in frozenset(orig_users_groups).difference(groups)] + new_groups = [ + ng for ng in frozenset(groups).difference(orig_users_groups)] + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err)) + else: + if len(orig_users_groups) > 0: + for new in new_groups: + iam.add_user_to_group(new, name) + for rm in remove_groups: + iam.remove_user_from_group(rm, name) + else: + for group in groups: + try: + iam.add_user_to_group(group, name) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if ('The group with name %s cannot be found.' % group) in error_msg: + module.fail_json(changed=False, msg="Group %s doesn't exist" % group) + + if len(remove_groups) > 0 or len(new_groups) > 0: + changed = True + + return (groups, changed) + + +def create_group(module=None, iam=None, name=None, path=None): + changed = False + try: + iam.create_group( + name, path).create_group_response.create_group_result.group + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + return name, changed + + +def delete_group(module=None, iam=None, name=None): + changed = False + try: + iam.delete_group(name) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if ('must delete policies first') in error_msg: + for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: + iam.delete_group_policy(name, policy) + try: + iam.delete_group(name) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if ('must delete policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the policies " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(error_msg)) + else: + changed = True + else: + module.fail_json(changed=changed, msg=str(error_msg)) + else: + changed = True + return changed, name + + +def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): + changed = False + try: + current_group_path = iam.get_group( + name).get_group_response.get_group_result.group['path'] + if new_path: + if current_group_path != new_path: + iam.update_group(name, new_path=new_path) + changed = True + if new_name: + if name != new_name: + iam.update_group(name, new_group_name=new_name, new_path=new_path) + changed = True + name = new_name + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err)) + + return changed, name, new_path, current_group_path + + +def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc): + changed = False + iam_role_result = None + instance_profile_result = None + try: + if name not in role_list: + changed = True + iam_role_result = iam.create_role(name, + assume_role_policy_document=trust_policy_doc, + path=path).create_role_response.create_role_result.role + + if name not in prof_list: + instance_profile_result = iam.create_instance_profile(name, path=path) \ + .create_instance_profile_response.create_instance_profile_result.instance_profile + iam.add_role_to_instance_profile(name, name) + else: + instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err)) + else: + updated_role_list = list_all_roles(iam) + iam_role_result = iam.get_role(name).get_role_response.get_role_result.role + return changed, updated_role_list, iam_role_result, instance_profile_result + + +def delete_role(module, iam, name, role_list, prof_list): + changed = False + iam_role_result = None + instance_profile_result = None + try: + if name in role_list: + cur_ins_prof = [rp['instance_profile_name'] for rp in + iam.list_instance_profiles_for_role(name). + list_instance_profiles_for_role_result. + instance_profiles] + for profile in cur_ins_prof: + iam.remove_role_from_instance_profile(profile, name) + try: + iam.delete_role(name) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: + iam.delete_role_policy(name, policy) + try: + iam_role_result = iam.delete_role(name) + except boto.exception.BotoServerError as err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the policies " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + + else: + changed = True + + for prof in prof_list: + if name == prof: + instance_profile_result = iam.delete_instance_profile(name) + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err)) + else: + updated_role_list = list_all_roles(iam) + return changed, updated_role_list, iam_role_result, instance_profile_result + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + iam_type=dict(required=True, choices=['user', 'group', 'role']), + groups=dict(type='list', default=None, required=False), + state=dict(required=True, choices=['present', 'absent', 'update']), + password=dict(default=None, required=False, no_log=True), + update_password=dict(default='always', required=False, choices=['always', 'on_create']), + access_key_state=dict(default=None, required=False, choices=[ + 'active', 'inactive', 'create', 'remove', + 'Active', 'Inactive', 'Create', 'Remove']), + access_key_ids=dict(type='list', default=None, required=False), + key_count=dict(type='int', default=1, required=False), + name=dict(required=True), + trust_policy_filepath=dict(default=None, required=False), + trust_policy=dict(type='dict', default=None, required=False), + new_name=dict(default=None, required=False), + path=dict(default='/', required=False), + new_path=dict(default=None, required=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['trust_policy', 'trust_policy_filepath']], + ) + + if not HAS_BOTO: + module.fail_json(msg='This module requires boto, please install it') + + state = module.params.get('state').lower() + iam_type = module.params.get('iam_type').lower() + groups = module.params.get('groups') + name = module.params.get('name') + new_name = module.params.get('new_name') + password = module.params.get('password') + update_pw = module.params.get('update_password') + path = module.params.get('path') + new_path = module.params.get('new_path') + key_count = module.params.get('key_count') + key_state = module.params.get('access_key_state') + trust_policy = module.params.get('trust_policy') + trust_policy_filepath = module.params.get('trust_policy_filepath') + key_ids = module.params.get('access_key_ids') + + if key_state: + key_state = key_state.lower() + if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: + module.fail_json(changed=False, msg="At least one access key has to be defined in order" + " to use 'active' or 'inactive'") + + if iam_type == 'user' and module.params.get('password') is not None: + pwd = module.params.get('password') + elif iam_type != 'user' and module.params.get('password') is not None: + module.fail_json(msg="a password is being specified when the iam_type " + "is not user. Check parameters") + else: + pwd = None + + if iam_type != 'user' and (module.params.get('access_key_state') is not None or + module.params.get('access_key_id') is not None): + module.fail_json(msg="the IAM type must be user, when IAM access keys " + "are being modified. Check parameters") + + if iam_type == 'role' and state == 'update': + module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " + "please specify present or absent") + + # check if trust_policy is present -- it can be inline JSON or a file path to a JSON file + if trust_policy_filepath: + try: + with open(trust_policy_filepath, 'r') as json_data: + trust_policy_doc = json.dumps(json.load(json_data)) + except Exception as e: + module.fail_json(msg=str(e) + ': ' + trust_policy_filepath) + elif trust_policy: + try: + trust_policy_doc = json.dumps(trust_policy) + except Exception as e: + module.fail_json(msg=str(e) + ': ' + trust_policy) + else: + trust_policy_doc = None + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + try: + if region: + iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) + else: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + result = {} + changed = False + + try: + orig_group_list = list_all_groups(iam) + + orig_user_list = list_all_users(iam) + + orig_role_list = list_all_roles(iam) + + orig_prof_list = list_all_instance_profiles(iam) + except boto.exception.BotoServerError as err: + module.fail_json(msg=err.message) + + if iam_type == 'user': + been_updated = False + user_groups = None + user_exists = any([n in [name, new_name] for n in orig_user_list]) + if user_exists: + current_path = iam.get_user(name).get_user_result.user['path'] + if not new_path and current_path != path: + new_path = path + path = current_path + + if state == 'present' and not user_exists and not new_name: + (meta, changed) = create_user( + module, iam, name, password, path, key_state, key_count) + keys = iam.get_all_access_keys(name).list_access_keys_result.\ + access_key_metadata + if groups: + (user_groups, changed) = set_users_groups( + module, iam, name, groups, been_updated, new_name) + module.exit_json( + user_meta=meta, groups=user_groups, keys=keys, changed=changed) + + elif state in ['present', 'update'] and user_exists: + if update_pw == 'on_create': + password = None + if name not in orig_user_list and new_name in orig_user_list: + been_updated = True + name_change, key_list, user_changed, new_key = update_user( + module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated) + if new_key: + user_meta = {'access_keys': list(new_key)} + user_meta['access_keys'].extend( + [{'access_key_id': key, 'status': value} for key, value in key_list.items() if + key not in [it['access_key_id'] for it in new_key]]) + else: + user_meta = { + 'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]} + + if name_change and new_name: + orig_name = name + name = new_name + if isinstance(groups, list): + user_groups, groups_changed = set_users_groups( + module, iam, name, groups, been_updated, new_name) + if groups_changed == user_changed: + changed = groups_changed + else: + changed = True + else: + changed = user_changed + if new_name and new_path: + module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name, + new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list, + created_keys=new_key, user_meta=user_meta) + elif new_name and not new_path and not been_updated: + module.exit_json( + changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list, + created_keys=new_key, user_meta=user_meta) + elif new_name and not new_path and been_updated: + module.exit_json( + changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state, + created_keys=new_key, user_meta=user_meta) + elif not new_name and new_path: + module.exit_json( + changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, + keys=key_list, created_keys=new_key, user_meta=user_meta) + else: + module.exit_json( + changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key, + user_meta=user_meta) + + elif state == 'update' and not user_exists: + module.fail_json( + msg="The user %s does not exist. No update made." % name) + + elif state == 'absent': + if user_exists: + try: + set_users_groups(module, iam, name, '') + name, changed = delete_user(module, iam, name) + module.exit_json(deleted_user=name, changed=changed) + + except Exception as ex: + module.fail_json(changed=changed, msg=str(ex)) + else: + module.exit_json( + changed=False, msg="User %s is already absent from your AWS IAM users" % name) + + elif iam_type == 'group': + group_exists = name in orig_group_list + + if state == 'present' and not group_exists: + new_group, changed = create_group(module=module, iam=iam, name=name, path=path) + module.exit_json(changed=changed, group_name=new_group) + elif state in ['present', 'update'] and group_exists: + changed, updated_name, updated_path, cur_path = update_group( + module=module, iam=iam, name=name, new_name=new_name, + new_path=new_path) + + if new_path and new_name: + module.exit_json(changed=changed, old_group_name=name, + new_group_name=updated_name, old_path=cur_path, + new_group_path=updated_path) + + if new_path and not new_name: + module.exit_json(changed=changed, group_name=name, + old_path=cur_path, + new_group_path=updated_path) + + if not new_path and new_name: + module.exit_json(changed=changed, old_group_name=name, + new_group_name=updated_name, group_path=cur_path) + + if not new_path and not new_name: + module.exit_json( + changed=changed, group_name=name, group_path=cur_path) + + elif state == 'update' and not group_exists: + module.fail_json( + changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name) + + elif state == 'absent': + if name in orig_group_list: + removed_group, changed = delete_group(module=module, iam=iam, name=name) + module.exit_json(changed=changed, delete_group=removed_group) + else: + module.exit_json(changed=changed, msg="Group already absent") + + elif iam_type == 'role': + role_list = [] + if state == 'present': + changed, role_list, role_result, instance_profile_result = create_role( + module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc) + elif state == 'absent': + changed, role_list, role_result, instance_profile_result = delete_role( + module, iam, name, orig_role_list, orig_prof_list) + elif state == 'update': + module.fail_json( + changed=False, msg='Role update not currently supported by boto.') + module.exit_json(changed=changed, roles=role_list, role_result=role_result, + instance_profile_result=instance_profile_result) + + +if __name__ == '__main__': + main() diff --git a/iam_cert.py b/iam_cert.py new file mode 100644 index 00000000000..38a979e9672 --- /dev/null +++ b/iam_cert.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_cert +short_description: Manage server certificates for use on ELBs and CloudFront +description: + - Allows for the management of server certificates. +options: + name: + description: + - Name of certificate to add, update or remove. + required: true + type: str + new_name: + description: + - When state is present, this will update the name of the cert. + - The cert, key and cert_chain parameters will be ignored if this is defined. + type: str + new_path: + description: + - When state is present, this will update the path of the cert. + - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined. + type: str + state: + description: + - Whether to create(or update) or delete the certificate. + - If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these. + required: true + choices: [ "present", "absent" ] + type: str + path: + description: + - When creating or updating, specify the desired path of the certificate. + default: "/" + type: str + cert_chain: + description: + - The path to, or content of, the CA certificate chain in PEM encoded format. + As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + type: str + cert: + description: + - The path to, or content of the certificate body in PEM encoded format. + As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + type: str + key: + description: + - The path to, or content of the private key in PEM encoded format. + As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + type: str + dup_ok: + description: + - By default the module will not upload a certificate that is already uploaded into AWS. + - If I(dup_ok=True), it will upload the certificate as long as the name is unique. + default: False + type: bool + +requirements: [ "boto" ] +author: Jonathan I. Davila (@defionscode) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Basic server certificate upload from local file +- iam_cert: + name: very_ssl + state: present + cert: "{{ lookup('file', 'path/to/cert') }}" + key: "{{ lookup('file', 'path/to/key') }}" + cert_chain: "{{ lookup('file', 'path/to/certchain') }}" + +# Basic server certificate upload +- iam_cert: + name: very_ssl + state: present + cert: path/to/cert + key: path/to/key + cert_chain: path/to/certchain + +# Server certificate upload using key string +- iam_cert: + name: very_ssl + state: present + path: "/a/cert/path/" + cert: body_of_somecert + key: vault_body_of_privcertkey + cert_chain: body_of_myverytrustedchain + +# Basic rename of existing certificate +- iam_cert: + name: very_ssl + new_name: new_very_ssl + state: present + +''' +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws +import os + +try: + import boto + import boto.iam + import boto.ec2 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def cert_meta(iam, name): + certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate + ocert = certificate.certificate_body + opath = certificate.server_certificate_metadata.path + ocert_id = certificate.server_certificate_metadata.server_certificate_id + upload_date = certificate.server_certificate_metadata.upload_date + exp = certificate.server_certificate_metadata.expiration + arn = certificate.server_certificate_metadata.arn + return opath, ocert, ocert_id, upload_date, exp, arn + + +def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): + update = False + + # IAM cert names are case insensitive + names_lower = [n.lower() for n in [name, new_name] if n is not None] + orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names] + + if any(ct in orig_cert_names_lower for ct in names_lower): + for i_name in names_lower: + if cert is not None: + try: + c_index = orig_cert_names_lower.index(i_name) + except NameError: + continue + else: + # NOTE: remove the carriage return to strictly compare the cert bodies. + slug_cert = cert.replace('\r', '') + slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '') + if slug_orig_cert_bodies == slug_cert: + update = True + break + elif slug_cert.startswith(slug_orig_cert_bodies): + update = True + break + else: + module.fail_json(changed=False, msg='A cert with the name %s already exists and' + ' has a different certificate body associated' + ' with it. Certificates cannot have the same name' % orig_cert_names[c_index]) + else: + update = True + break + elif cert in orig_cert_bodies and not dup_ok: + for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): + if crt_body == cert: + module.fail_json(changed=False, msg='This certificate already' + ' exists under the name %s' % crt_name) + + return update + + +def cert_action(module, iam, name, cpath, new_name, new_path, state, + cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok): + if state == 'present': + update = dup_check(module, iam, name, new_name, cert, orig_cert_names, + orig_cert_bodies, dup_ok) + if update: + opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) + changed = True + if new_name and new_path: + iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + elif new_name and not new_path: + iam.update_server_cert(name, new_cert_name=new_name) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + elif not new_name and new_path: + iam.update_server_cert(name, new_path=new_path) + module.exit_json(changed=changed, name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + else: + changed = False + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn, + msg='No new path or name specified. No changes made') + else: + changed = True + iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath) + opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + elif state == 'absent': + if name in orig_cert_names: + changed = True + iam.delete_server_cert(name) + module.exit_json(changed=changed, deleted_cert=name) + else: + changed = False + module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name) + + +def load_data(cert, key, cert_chain): + # if paths are provided rather than lookups read the files and return the contents + if cert and os.path.isfile(cert): + with open(cert, 'r') as cert_fh: + cert = cert_fh.read().rstrip() + if key and os.path.isfile(key): + with open(key, 'r') as key_fh: + key = key_fh.read().rstrip() + if cert_chain and os.path.isfile(cert_chain): + with open(cert_chain, 'r') as cert_chain_fh: + cert_chain = cert_chain_fh.read() + return cert, key, cert_chain + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + cert=dict(), + key=dict(no_log=True), + cert_chain=dict(), + new_name=dict(), + path=dict(default='/'), + new_path=dict(), + dup_ok=dict(type='bool') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['new_path', 'key'], + ['new_path', 'cert'], + ['new_path', 'cert_chain'], + ['new_name', 'key'], + ['new_name', 'cert'], + ['new_name', 'cert_chain'], + ], + ) + + if not HAS_BOTO: + module.fail_json(msg="Boto is required for this module") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + try: + if region: + iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) + else: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + name = module.params.get('name') + path = module.params.get('path') + new_name = module.params.get('new_name') + new_path = module.params.get('new_path') + dup_ok = module.params.get('dup_ok') + if state == 'present' and not new_name and not new_path: + cert, key, cert_chain = load_data(cert=module.params.get('cert'), + key=module.params.get('key'), + cert_chain=module.params.get('cert_chain')) + else: + cert = key = cert_chain = None + + orig_cert_names = [ctb['server_certificate_name'] for ctb in + iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list] + orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body + for thing in orig_cert_names] + if new_name == name: + new_name = None + if new_path == path: + new_path = None + + changed = False + try: + cert_action(module, iam, name, path, new_name, new_path, state, + cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok) + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err), debug=[cert, key]) + + +if __name__ == '__main__': + main() diff --git a/iam_cert_facts.py b/iam_cert_facts.py new file mode 120000 index 00000000000..63244caa58d --- /dev/null +++ b/iam_cert_facts.py @@ -0,0 +1 @@ +iam_server_certificate_info.py \ No newline at end of file diff --git a/iam_group.py b/iam_group.py new file mode 100644 index 00000000000..7ce18593e0a --- /dev/null +++ b/iam_group.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iam_group +short_description: Manage AWS IAM groups +description: + - Manage AWS IAM groups. +author: +- Nick Aslanidis (@naslanidis) +- Maksym Postument (@infectsoldier) +options: + name: + description: + - The name of the group to create. + required: true + type: str + managed_policies: + description: + - A list of managed policy ARNs or friendly names to attach to the role. + - To embed an inline policy, use M(iam_policy). + required: false + type: list + elements: str + aliases: ['managed_policy'] + users: + description: + - A list of existing users to add as members of the group. + required: false + type: list + elements: str + state: + description: + - Create or remove the IAM group. + required: true + choices: [ 'present', 'absent' ] + type: str + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + required: false + default: false + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + purge_users: + description: + - When I(purge_users=true) users which are not included in I(users) will be detached. + required: false + default: false + type: bool +requirements: [ botocore, boto3 ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a group +- iam_group: + name: testgroup1 + state: present + +# Create a group and attach a managed policy using its ARN +- iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + state: present + +# Create a group with users as members and attach a managed policy using its ARN +- iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + users: + - test_user1 + - test_user2 + state: present + +# Remove all managed policies from an existing group with an empty list +- iam_group: + name: testgroup1 + state: present + purge_policies: true + +# Remove all group members from an existing group +- iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + purge_users: true + state: present + + +# Delete the group +- iam_group: + name: testgroup1 + state: absent + +''' +RETURN = ''' +iam_group: + description: dictionary containing all the group information including group membership + returned: success + type: complex + contains: + group: + description: dictionary containing all the group information + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the group + type: str + sample: "arn:aws:iam::1234567890:group/testgroup1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the group was created + type: str + sample: "2017-02-08T04:36:28+00:00" + group_id: + description: the stable and unique string identifying the group + type: str + sample: AGPAIDBWE12NSFINE55TM + group_name: + description: the friendly name that identifies the group + type: str + sample: testgroup1 + path: + description: the path to the group + type: str + sample: / + users: + description: list containing all the group members + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the user + type: str + sample: "arn:aws:iam::1234567890:user/test_user1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the user was created + type: str + sample: "2017-02-08T04:36:28+00:00" + user_id: + description: the stable and unique string identifying the user + type: str + sample: AIDAIZTPY123YQRS22YU2 + user_name: + description: the friendly name that identifies the user + type: str + sample: testgroup1 + path: + description: the path to the user + type: str + sample: / +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +def compare_attached_group_policies(current_attached_policies, new_attached_policies): + + # If new_attached_policies is None it means we want to remove all policies + if len(current_attached_policies) > 0 and new_attached_policies is None: + return False + + current_attached_policies_arn_list = [] + for policy in current_attached_policies: + current_attached_policies_arn_list.append(policy['PolicyArn']) + + if set(current_attached_policies_arn_list) == set(new_attached_policies): + return True + else: + return False + + +def compare_group_members(current_group_members, new_group_members): + + # If new_attached_policies is None it means we want to remove all policies + if len(current_group_members) > 0 and new_group_members is None: + return False + if set(current_group_members) == set(new_group_members): + return True + else: + return False + + +def convert_friendly_names_to_arns(connection, module, policy_names): + + if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]): + return policy_names + allpolicies = {} + paginator = connection.get_paginator('list_policies') + policies = paginator.paginate().build_full_result()['Policies'] + + for policy in policies: + allpolicies[policy['PolicyName']] = policy['Arn'] + allpolicies[policy['Arn']] = policy['Arn'] + try: + return [allpolicies[policy] for policy in policy_names] + except KeyError as e: + module.fail_json(msg="Couldn't find policy: " + str(e)) + + +def create_or_update_group(connection, module): + + params = dict() + params['GroupName'] = module.params.get('name') + managed_policies = module.params.get('managed_policies') + users = module.params.get('users') + purge_users = module.params.get('purge_users') + purge_policies = module.params.get('purge_policies') + changed = False + if managed_policies: + managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + + # Get group + try: + group = get_group(connection, module, params['GroupName']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get group") + + # If group is None, create it + if group is None: + # Check mode means we would create the group + if module.check_mode: + module.exit_json(changed=True) + + try: + group = connection.create_group(**params) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't create group") + + # Manage managed policies + current_attached_policies = get_attached_policy_list(connection, module, params['GroupName']) + if not compare_attached_group_policies(current_attached_policies, managed_policies): + current_attached_policies_arn_list = [] + for policy in current_attached_policies: + current_attached_policies_arn_list.append(policy['PolicyArn']) + + # If managed_policies has a single empty element we want to remove all attached policies + if purge_policies: + # Detach policies not present + for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): + changed = True + if not module.check_mode: + try: + connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName']) + # If there are policies to adjust that aren't in the current list, then things have changed + # Otherwise the only changes were in purging above + if set(managed_policies) - set(current_attached_policies_arn_list): + changed = True + # If there are policies in managed_policies attach each policy + if managed_policies != [None] and not module.check_mode: + for policy_arn in managed_policies: + try: + connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName']) + + # Manage group memberships + try: + current_group_members = get_group(connection, module, params['GroupName'])['Users'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + + current_group_members_list = [] + for member in current_group_members: + current_group_members_list.append(member['UserName']) + + if not compare_group_members(current_group_members_list, users): + + if purge_users: + for user in list(set(current_group_members_list) - set(users)): + # Ensure we mark things have changed if any user gets purged + changed = True + # Skip actions for check mode + if not module.check_mode: + try: + connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName'])) + # If there are users to adjust that aren't in the current list, then things have changed + # Otherwise the only changes were in purging above + if set(users) - set(current_group_members_list): + changed = True + # Skip actions for check mode + if users != [None] and not module.check_mode: + for user in users: + try: + connection.add_user_to_group(GroupName=params['GroupName'], UserName=user) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName'])) + if module.check_mode: + module.exit_json(changed=changed) + + # Get the group again + try: + group = get_group(connection, module, params['GroupName']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + + module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) + + +def destroy_group(connection, module): + + params = dict() + params['GroupName'] = module.params.get('name') + + try: + group = get_group(connection, module, params['GroupName']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + if group: + # Check mode means we would remove this group + if module.check_mode: + module.exit_json(changed=True) + + # Remove any attached policies otherwise deletion fails + try: + for policy in get_attached_policy_list(connection, module, params['GroupName']): + connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName']) + + # Remove any users in the group otherwise deletion fails + current_group_members_list = [] + try: + current_group_members = get_group(connection, module, params['GroupName'])['Users'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + for member in current_group_members: + current_group_members_list.append(member['UserName']) + for user in current_group_members_list: + try: + connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName'])) + + try: + connection.delete_group(**params) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName']) + + else: + module.exit_json(changed=False) + + module.exit_json(changed=True) + + +@AWSRetry.exponential_backoff() +def get_group(connection, module, name): + try: + paginator = connection.get_paginator('get_group') + return paginator.paginate(GroupName=name).build_full_result() + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return None + else: + raise + + +@AWSRetry.exponential_backoff() +def get_attached_policy_list(connection, module, name): + + try: + paginator = connection.get_paginator('list_attached_group_policies') + return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies'] + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return None + else: + raise + + +def main(): + + argument_spec = dict( + name=dict(required=True), + managed_policies=dict(default=[], type='list', aliases=['managed_policy']), + users=dict(default=[], type='list'), + state=dict(choices=['present', 'absent'], required=True), + purge_users=dict(default=False, type='bool'), + purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + connection = module.client('iam') + + state = module.params.get("state") + + if state == 'present': + create_or_update_group(connection, module) + else: + destroy_group(connection, module) + + +if __name__ == '__main__': + main() diff --git a/iam_managed_policy.py b/iam_managed_policy.py new file mode 100644 index 00000000000..3b8d4736aef --- /dev/null +++ b/iam_managed_policy.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iam_managed_policy +short_description: Manage User Managed IAM policies +description: + - Allows creating and removing managed IAM policies +options: + policy_name: + description: + - The name of the managed policy. + required: True + type: str + policy_description: + description: + - A helpful description of this policy, this value is immutable and only set when creating a new policy. + default: '' + type: str + policy: + description: + - A properly json formatted policy + type: json + make_default: + description: + - Make this revision the default revision. + default: True + type: bool + only_version: + description: + - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted. + type: bool + default: false + state: + description: + - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found. + default: present + choices: [ "present", "absent" ] + type: str + fail_on_delete: + description: + - The I(fail_on_delete) option does nothing and will be removed in Ansible 2.14. + type: bool + +author: "Dan Kozlowski (@dkhenry)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore +''' + +EXAMPLES = ''' +# Create Policy ex nihilo +- name: Create IAM Managed Policy + iam_managed_policy: + policy_name: "ManagedPolicy" + policy_description: "A Helpful managed policy" + policy: "{{ lookup('template', 'managed_policy.json.j2') }}" + state: present + +# Update a policy with a new default version +- name: Create IAM Managed Policy + iam_managed_policy: + policy_name: "ManagedPolicy" + policy: "{{ lookup('file', 'managed_policy_update.json') }}" + state: present + +# Update a policy with a new non default version +- name: Create IAM Managed Policy + iam_managed_policy: + policy_name: "ManagedPolicy" + policy: "{{ lookup('file', 'managed_policy_update.json') }}" + make_default: false + state: present + +# Update a policy and make it the only version and the default version +- name: Create IAM Managed Policy + iam_managed_policy: + policy_name: "ManagedPolicy" + policy: "{ 'Version': '2012-10-17', 'Statement':[{'Effect': 'Allow','Action': '*','Resource': '*'}]}" + only_version: true + state: present + +# Remove a policy +- name: Create IAM Managed Policy + iam_managed_policy: + policy_name: "ManagedPolicy" + state: absent +''' + +RETURN = ''' +policy: + description: Returns the policy json structure, when state == absent this will return the value of the removed policy. + returned: success + type: str + sample: '{ + "arn": "arn:aws:iam::aws:policy/AdministratorAccess " + "attachment_count": 0, + "create_date": "2017-03-01T15:42:55.981000+00:00", + "default_version_id": "v1", + "is_attachable": true, + "path": "/", + "policy_id": "ANPALM4KLDMTFXGOOJIHL", + "policy_name": "AdministratorAccess", + "update_date": "2017-03-01T15:42:55.981000+00:00" + }' +''' + +import json +import traceback + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry, + camel_dict_to_snake_dict, HAS_BOTO3, compare_policies) +from ansible.module_utils._text import to_native + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def list_policies_with_backoff(iam): + paginator = iam.get_paginator('list_policies') + return paginator.paginate(Scope='Local').build_full_result() + + +def get_policy_by_name(module, iam, name): + try: + response = list_policies_with_backoff(iam) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't list policies: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + for policy in response['Policies']: + if policy['PolicyName'] == name: + return policy + return None + + +def delete_oldest_non_default_version(module, iam, policy): + try: + versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + if not v['IsDefaultVersion']] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't list policy versions: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + versions.sort(key=lambda v: v['CreateDate'], reverse=True) + for v in versions[-1:]: + try: + iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't delete policy version: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + +# This needs to return policy_version, changed +def get_or_create_policy_version(module, iam, policy, policy_document): + try: + versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't list policy versions: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + for v in versions: + try: + document = iam.get_policy_version(PolicyArn=policy['Arn'], + VersionId=v['VersionId'])['PolicyVersion']['Document'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't get policy version %s: %s" % (v['VersionId'], str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + # If the current policy matches the existing one + if not compare_policies(document, json.loads(to_native(policy_document))): + return v, False + + # No existing version so create one + # There is a service limit (typically 5) of policy versions. + # + # Rather than assume that it is 5, we'll try to create the policy + # and if that doesn't work, delete the oldest non default policy version + # and try again. + try: + version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + return version, True + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'LimitExceeded': + delete_oldest_non_default_version(module, iam, policy) + try: + version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + return version, True + except botocore.exceptions.ClientError as second_e: + e = second_e + # Handle both when the exception isn't LimitExceeded or + # the second attempt still failed + module.fail_json(msg="Couldn't create policy version: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + +def set_if_default(module, iam, policy, policy_version, is_default): + if is_default and not policy_version['IsDefaultVersion']: + try: + iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't set default policy version: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + return True + return False + + +def set_if_only(module, iam, policy, policy_version, is_only): + if is_only: + try: + versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[ + 'Versions'] if not v['IsDefaultVersion']] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't list policy versions: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + for v in versions: + try: + iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't delete policy version: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + return len(versions) > 0 + return False + + +def detach_all_entities(module, iam, policy, **kwargs): + try: + entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't detach list entities for policy %s: %s" % (policy['PolicyName'], str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + for g in entities['PolicyGroups']: + try: + iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't detach group policy %s: %s" % (g['GroupName'], str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + for u in entities['PolicyUsers']: + try: + iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't detach user policy %s: %s" % (u['UserName'], str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + for r in entities['PolicyRoles']: + try: + iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't detach role policy %s: %s" % (r['RoleName'], str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + if entities['IsTruncated']: + detach_all_entities(module, iam, policy, marker=entities['Marker']) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + policy_name=dict(required=True), + policy_description=dict(default=''), + policy=dict(type='json'), + make_default=dict(type='bool', default=True), + only_version=dict(type='bool', default=False), + fail_on_delete=dict(type='bool', removed_in_version='2.14'), + state=dict(default='present', choices=['present', 'absent']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['policy']]] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module') + + name = module.params.get('policy_name') + description = module.params.get('policy_description') + state = module.params.get('state') + default = module.params.get('make_default') + only = module.params.get('only_version') + + policy = None + + if module.params.get('policy') is not None: + policy = json.dumps(json.loads(module.params.get('policy'))) + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + iam = boto3_conn(module, conn_type='client', resource='iam', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: + module.fail_json(msg="Can't authorize connection. Check your credentials and profile.", + exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + + p = get_policy_by_name(module, iam, name) + if state == 'present': + if p is None: + # No Policy so just create one + try: + rvalue = iam.create_policy(PolicyName=name, Path='/', + PolicyDocument=policy, Description=description) + except Exception as e: + module.fail_json(msg="Couldn't create policy %s: %s" % (name, to_native(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) + else: + policy_version, changed = get_or_create_policy_version(module, iam, p, policy) + changed = set_if_default(module, iam, p, policy_version, default) or changed + changed = set_if_only(module, iam, p, policy_version, only) or changed + # If anything has changed we needto refresh the policy + if changed: + try: + p = iam.get_policy(PolicyArn=p['Arn'])['Policy'] + except Exception as e: + module.fail_json(msg="Couldn't get policy: %s" % to_native(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p)) + else: + # Check for existing policy + if p: + # Detach policy + detach_all_entities(module, iam, p) + # Delete Versions + try: + versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't list policy versions: %s" % to_native(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + for v in versions: + if not v['IsDefaultVersion']: + try: + iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't delete policy version %s: %s" % + (v['VersionId'], to_native(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + # Delete policy + try: + iam.delete_policy(PolicyArn=p['Arn']) + except Exception as e: + module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], to_native(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + # This is the one case where we will return the old policy + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p)) + else: + module.exit_json(changed=False, policy=None) +# end main + + +if __name__ == '__main__': + main() diff --git a/iam_mfa_device_facts.py b/iam_mfa_device_facts.py new file mode 120000 index 00000000000..63be2b059fd --- /dev/null +++ b/iam_mfa_device_facts.py @@ -0,0 +1 @@ +iam_mfa_device_info.py \ No newline at end of file diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py new file mode 100644 index 00000000000..712a951a5d9 --- /dev/null +++ b/iam_mfa_device_info.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_mfa_device_info +short_description: List the MFA (Multi-Factor Authentication) devices registered for a user +description: + - List the MFA (Multi-Factor Authentication) devices registered for a user + - This module was called C(iam_mfa_device_facts) before Ansible 2.9. The usage did not change. +author: Victor Costan (@pwnall) +options: + user_name: + description: + - The name of the user whose MFA devices will be listed + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore +''' + +RETURN = """ +mfa_devices: + description: The MFA devices registered for the given user + returned: always + type: list + sample: + - enable_date: "2016-03-11T23:25:36+00:00" + serial_number: arn:aws:iam::085120003701:mfa/pwnall + user_name: pwnall + - enable_date: "2016-03-11T23:25:37+00:00" + serial_number: arn:aws:iam::085120003702:mfa/pwnall + user_name: pwnall +""" + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# List MFA devices (more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html) +- iam_mfa_device_info: + register: mfa_devices + +# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +- sts_assume_role: + mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" + register: assumed_role +''' + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, + get_aws_connection_info) + + +def list_mfa_devices(connection, module): + user_name = module.params.get('user_name') + changed = False + + args = {} + if user_name is not None: + args['UserName'] = user_name + try: + response = connection.list_mfa_devices(**args) + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + user_name=dict(required=False, default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'iam_mfa_device_facts': + module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if region: + connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) + else: + module.fail_json(msg="region must be specified") + + list_mfa_devices(connection, module) + + +if __name__ == '__main__': + main() diff --git a/iam_password_policy.py b/iam_password_policy.py new file mode 100644 index 00000000000..8eb03b96f78 --- /dev/null +++ b/iam_password_policy.py @@ -0,0 +1,216 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_password_policy +short_description: Update an IAM Password Policy +description: + - Module updates an IAM Password Policy on a given AWS account +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + state: + description: + - Specifies the overall state of the password policy. + required: true + choices: ['present', 'absent'] + type: str + min_pw_length: + description: + - Minimum password length. + default: 6 + aliases: [minimum_password_length] + type: int + require_symbols: + description: + - Require symbols in password. + default: false + type: bool + require_numbers: + description: + - Require numbers in password. + default: false + type: bool + require_uppercase: + description: + - Require uppercase letters in password. + default: false + type: bool + require_lowercase: + description: + - Require lowercase letters in password. + default: false + type: bool + allow_pw_change: + description: + - Allow users to change their password. + default: false + type: bool + aliases: [allow_password_change] + pw_max_age: + description: + - Maximum age for a password in days. When this option is 0 then passwords + do not expire automatically. + default: 0 + aliases: [password_max_age] + type: int + pw_reuse_prevent: + description: + - Prevent re-use of passwords. + default: 0 + aliases: [password_reuse_prevent, prevent_reuse] + type: int + pw_expire: + description: + - Prevents users from change an expired password. + default: false + type: bool + aliases: [password_expire, expire] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: Password policy for AWS account + iam_password_policy: + state: present + min_pw_length: 8 + require_symbols: false + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + pw_max_age: 60 + pw_reuse_prevent: 5 + pw_expire: false +''' + +RETURN = ''' # ''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +class IAMConnection(object): + def __init__(self, module): + try: + self.connection = module.resource('iam') + self.module = module + except Exception as e: + module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) + + def policy_to_dict(self, policy): + policy_attributes = [ + 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry', + 'max_password_age', 'minimum_password_length', 'password_reuse_prevention', + 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters' + ] + ret = {} + for attr in policy_attributes: + ret[attr] = getattr(policy, attr) + return ret + + def update_password_policy(self, module, policy): + min_pw_length = module.params.get('min_pw_length') + require_symbols = module.params.get('require_symbols') + require_numbers = module.params.get('require_numbers') + require_uppercase = module.params.get('require_uppercase') + require_lowercase = module.params.get('require_lowercase') + allow_pw_change = module.params.get('allow_pw_change') + pw_max_age = module.params.get('pw_max_age') + pw_reuse_prevent = module.params.get('pw_reuse_prevent') + pw_expire = module.params.get('pw_expire') + + update_parameters = dict( + MinimumPasswordLength=min_pw_length, + RequireSymbols=require_symbols, + RequireNumbers=require_numbers, + RequireUppercaseCharacters=require_uppercase, + RequireLowercaseCharacters=require_lowercase, + AllowUsersToChangePassword=allow_pw_change, + HardExpiry=pw_expire + ) + if pw_reuse_prevent: + update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) + if pw_max_age: + update_parameters.update(MaxPasswordAge=pw_max_age) + + try: + original_policy = self.policy_to_dict(policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + original_policy = {} + + try: + results = policy.update(**update_parameters) + policy.reload() + updated_policy = self.policy_to_dict(policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") + + changed = (original_policy != updated_policy) + return (changed, updated_policy, camel_dict_to_snake_dict(results)) + + def delete_password_policy(self, policy): + try: + results = policy.delete() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"}) + else: + self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") + return camel_dict_to_snake_dict(results) + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'state': dict(choices=['present', 'absent'], required=True), + 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6), + 'require_symbols': dict(type='bool', default=False), + 'require_numbers': dict(type='bool', default=False), + 'require_uppercase': dict(type='bool', default=False), + 'require_lowercase': dict(type='bool', default=False), + 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False), + 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0), + 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0), + 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False), + }, + supports_check_mode=True, + ) + + resource = IAMConnection(module) + policy = resource.connection.AccountPasswordPolicy() + + state = module.params.get('state') + + if state == 'present': + (changed, new_policy, update_result) = resource.update_password_policy(module, policy) + module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy) + + if state == 'absent': + delete_result = resource.delete_password_policy(policy) + module.exit_json(changed=True, task_status={'IAM': delete_result}) + + +if __name__ == '__main__': + main() diff --git a/iam_policy.py b/iam_policy.py new file mode 100644 index 00000000000..97209071845 --- /dev/null +++ b/iam_policy.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iam_policy +short_description: Manage inline IAM policies for users, groups, and roles +description: + - Allows uploading or removing inline IAM policies for IAM users, groups or roles. + - To administer managed policies please see M(iam_user), M(iam_role), + M(iam_group) and M(iam_managed_policy) +options: + iam_type: + description: + - Type of IAM resource. + required: true + choices: [ "user", "group", "role"] + type: str + iam_name: + description: + - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name. + required: true + type: str + policy_name: + description: + - The name label for the policy to create or remove. + required: true + type: str + policy_document: + description: + - The path to the properly json formatted policy file. + - Mutually exclusive with I(policy_json). + - This option has been deprecated and will be removed in 2.14. The existing behavior can be + reproduced by using the I(policy_json) option and reading the file using the lookup plugin. + type: str + policy_json: + description: + - A properly json formatted policy as string. + - Mutually exclusive with I(policy_document). + - See U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) on how to use it properly. + type: json + state: + description: + - Whether to create or delete the IAM policy. + choices: [ "present", "absent"] + default: present + type: str + skip_duplicates: + description: + - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. If there is a match it will not make + a new policy object with the same rules. + - The current default is C(true). However, this behavior can be confusing and as such the default will change to C(false) in 2.14. To maintain + the existing behavior explicitly set I(skip_duplicates=true). + type: bool + +author: + - "Jonathan I. Davila (@defionscode)" + - "Dennis Podkovyrin (@sbj-ss)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create a policy with the name of 'Admin' to the group 'administrators' +- name: Assign a policy called Admin to the administrators group + iam_policy: + iam_type: group + iam_name: administrators + policy_name: Admin + state: present + policy_document: admin_policy.json + +# Advanced example, create two new groups and add a READ-ONLY policy to both +# groups. +- name: Create Two Groups, Mario and Luigi + iam: + iam_type: group + name: "{{ item }}" + state: present + loop: + - Mario + - Luigi + register: new_groups + +- name: Apply READ-ONLY policy to new groups that have been recently created + iam_policy: + iam_type: group + iam_name: "{{ item.created_group.group_name }}" + policy_name: "READ-ONLY" + policy_document: readonlypolicy.json + state: present + loop: "{{ new_groups.results }}" + +# Create a new S3 policy with prefix per user +- name: Create S3 policy from template + iam_policy: + iam_type: user + iam_name: "{{ item.user }}" + policy_name: "s3_limited_access_{{ item.prefix }}" + state: present + policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " + loop: + - user: s3_user + prefix: s3_user_prefix + +''' +import json + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_policies +from ansible.module_utils.six import string_types + + +class PolicyError(Exception): + pass + + +class Policy: + + def __init__(self, client, name, policy_name, policy_document, policy_json, skip_duplicates, state, check_mode): + self.client = client + self.name = name + self.policy_name = policy_name + self.policy_document = policy_document + self.policy_json = policy_json + self.skip_duplicates = skip_duplicates + self.state = state + self.check_mode = check_mode + self.changed = False + + @staticmethod + def _iam_type(): + return '' + + def _list(self, name): + return {} + + def list(self): + return self._list(self.name).get('PolicyNames', []) + + def _get(self, name, policy_name): + return '{}' + + def get(self, policy_name): + return self._get(self.name, policy_name)['PolicyDocument'] + + def _put(self, name, policy_name, policy_doc): + pass + + def put(self, policy_doc): + if not self.check_mode: + self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True)) + self.changed = True + + def _delete(self, name, policy_name): + pass + + def delete(self): + if self.policy_name not in self.list(): + self.changed = False + return + + self.changed = True + if not self.check_mode: + self._delete(self.name, self.policy_name) + + def get_policy_text(self): + try: + if self.policy_document is not None: + return self.get_policy_from_document() + if self.policy_json is not None: + return self.get_policy_from_json() + except json.JSONDecodeError as e: + raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e)) + return None + + def get_policy_from_document(self): + try: + with open(self.policy_document, 'r') as json_data: + pdoc = json.load(json_data) + json_data.close() + except IOError as e: + if e.errno == 2: + raise PolicyError('policy_document {0:!r} does not exist'.format(self.policy_document)) + raise + return pdoc + + def get_policy_from_json(self): + if isinstance(self.policy_json, string_types): + pdoc = json.loads(self.policy_json) + else: + pdoc = self.policy_json + return pdoc + + def create(self): + matching_policies = [] + policy_doc = self.get_policy_text() + policy_match = False + for pol in self.list(): + if not compare_policies(self.get(pol), policy_doc): + matching_policies.append(pol) + policy_match = True + + if (self.policy_name not in matching_policies) and not (self.skip_duplicates and policy_match): + self.put(policy_doc) + + def run(self): + if self.state == 'present': + self.create() + elif self.state == 'absent': + self.delete() + return { + 'changed': self.changed, + self._iam_type() + '_name': self.name, + 'policies': self.list() + } + + +class UserPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'user' + + def _list(self, name): + return self.client.list_user_policies(UserName=name) + + def _get(self, name, policy_name): + return self.client.get_user_policy(UserName=name, PolicyName=policy_name) + + def _put(self, name, policy_name, policy_doc): + return self.client.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + + def _delete(self, name, policy_name): + return self.client.delete_user_policy(UserName=name, PolicyName=policy_name) + + +class RolePolicy(Policy): + + @staticmethod + def _iam_type(): + return 'role' + + def _list(self, name): + return self.client.list_role_policies(RoleName=name) + + def _get(self, name, policy_name): + return self.client.get_role_policy(RoleName=name, PolicyName=policy_name) + + def _put(self, name, policy_name, policy_doc): + return self.client.put_role_policy(RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + + def _delete(self, name, policy_name): + return self.client.delete_role_policy(RoleName=name, PolicyName=policy_name) + + +class GroupPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'group' + + def _list(self, name): + return self.client.list_group_policies(GroupName=name) + + def _get(self, name, policy_name): + return self.client.get_group_policy(GroupName=name, PolicyName=policy_name) + + def _put(self, name, policy_name, policy_doc): + return self.client.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + + def _delete(self, name, policy_name): + return self.client.delete_group_policy(GroupName=name, PolicyName=policy_name) + + +def main(): + argument_spec = dict( + iam_type=dict(required=True, choices=['user', 'group', 'role']), + state=dict(default='present', choices=['present', 'absent']), + iam_name=dict(required=True), + policy_name=dict(required=True), + policy_document=dict(default=None, required=False), + policy_json=dict(type='json', default=None, required=False), + skip_duplicates=dict(type='bool', default=None, required=False) + ) + mutually_exclusive = [['policy_document', 'policy_json']] + + module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) + + skip_duplicates = module.params.get('skip_duplicates') + + if (skip_duplicates is None): + module.deprecate('The skip_duplicates behaviour has caused confusion and' + ' will be disabled by default in Ansible 2.14', + version='2.14') + skip_duplicates = True + + if module.params.get('policy_document'): + module.deprecate('The policy_document option has been deprecated and' + ' will be removed in Ansible 2.14', + version='2.14') + + args = dict( + client=module.client('iam'), + name=module.params.get('iam_name'), + policy_name=module.params.get('policy_name'), + policy_document=module.params.get('policy_document'), + policy_json=module.params.get('policy_json'), + skip_duplicates=skip_duplicates, + state=module.params.get('state'), + check_mode=module.check_mode, + ) + iam_type = module.params.get('iam_type') + + try: + if iam_type == 'user': + policy = UserPolicy(**args) + elif iam_type == 'role': + policy = RolePolicy(**args) + elif iam_type == 'group': + policy = GroupPolicy(**args) + + module.exit_json(**(policy.run())) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + except PolicyError as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/iam_policy_info.py b/iam_policy_info.py new file mode 100644 index 00000000000..5e272784d18 --- /dev/null +++ b/iam_policy_info.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iam_policy_info +short_description: Retrieve inline IAM policies for users, groups, and roles +description: + - Supports fetching of inline IAM policies for IAM users, groups and roles. +options: + iam_type: + description: + - Type of IAM resource you wish to retrieve inline policies for. + required: yes + choices: [ "user", "group", "role"] + type: str + iam_name: + description: + - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name. + required: yes + type: str + policy_name: + description: + - Name of a specific IAM inline policy you with to retrieve. + required: no + type: str + +author: + - Mark Chappell (@tremble) + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Describe all inline IAM policies on an IAM User +- iam_policy_info: + iam_type: user + iam_name: example_user + +# Describe a specific inline policy on an IAM Role +- iam_policy_info: + iam_type: role + iam_name: example_role + policy_name: example_policy + +''' +RETURN = ''' +policies: + description: A list containing the matching IAM inline policy names and their data + returned: success + type: complex + contains: + policy_name: + description: The Name of the inline policy + returned: success + type: str + policy_document: + description: The JSON document representing the inline IAM policy + returned: success + type: list +policy_names: + description: A list of matching names of the IAM inline policies on the queried object + returned: success + type: list +all_policy_names: + description: A list of names of all of the IAM inline policies on the queried object + returned: success + type: list +''' + +import json + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.six import string_types + + +class PolicyError(Exception): + pass + + +class Policy: + + def __init__(self, client, name, policy_name): + self.client = client + self.name = name + self.policy_name = policy_name + self.changed = False + + @staticmethod + def _iam_type(): + return '' + + def _list(self, name): + return {} + + def list(self): + return self._list(self.name).get('PolicyNames', []) + + def _get(self, name, policy_name): + return '{}' + + def get(self, policy_name): + return self._get(self.name, policy_name)['PolicyDocument'] + + def get_all(self): + policies = list() + for policy in self.list(): + policies.append({"policy_name": policy, "policy_document": self.get(policy)}) + return policies + + def run(self): + policy_list = self.list() + ret_val = { + 'changed': False, + self._iam_type() + '_name': self.name, + 'all_policy_names': policy_list + } + if self.policy_name is None: + ret_val.update(policies=self.get_all()) + ret_val.update(policy_names=policy_list) + elif self.policy_name in policy_list: + ret_val.update(policies=[{ + "policy_name": self.policy_name, + "policy_document": self.get(self.policy_name)}]) + ret_val.update(policy_names=[self.policy_name]) + return ret_val + + +class UserPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'user' + + def _list(self, name): + return self.client.list_user_policies(UserName=name) + + def _get(self, name, policy_name): + return self.client.get_user_policy(UserName=name, PolicyName=policy_name) + + +class RolePolicy(Policy): + + @staticmethod + def _iam_type(): + return 'role' + + def _list(self, name): + return self.client.list_role_policies(RoleName=name) + + def _get(self, name, policy_name): + return self.client.get_role_policy(RoleName=name, PolicyName=policy_name) + + +class GroupPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'group' + + def _list(self, name): + return self.client.list_group_policies(GroupName=name) + + def _get(self, name, policy_name): + return self.client.get_group_policy(GroupName=name, PolicyName=policy_name) + + +def main(): + argument_spec = dict( + iam_type=dict(required=True, choices=['user', 'group', 'role']), + iam_name=dict(required=True), + policy_name=dict(default=None, required=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + args = dict( + client=module.client('iam'), + name=module.params.get('iam_name'), + policy_name=module.params.get('policy_name'), + ) + iam_type = module.params.get('iam_type') + + try: + if iam_type == 'user': + policy = UserPolicy(**args) + elif iam_type == 'role': + policy = RolePolicy(**args) + elif iam_type == 'group': + policy = GroupPolicy(**args) + + module.exit_json(**(policy.run())) + except (BotoCoreError, ClientError) as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + module.exit_json(changed=False, msg=e.response['Error']['Message']) + module.fail_json_aws(e) + except PolicyError as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/iam_role.py b/iam_role.py new file mode 100644 index 00000000000..7b865efe896 --- /dev/null +++ b/iam_role.py @@ -0,0 +1,665 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_role +short_description: Manage AWS IAM roles +description: + - Manage AWS IAM roles. +author: "Rob White (@wimnat)" +options: + path: + description: + - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + default: "/" + type: str + name: + description: + - The name of the role to create. + required: true + type: str + description: + description: + - Provides a description of the role. + type: str + boundary: + description: + - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates. + - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false). + - This is intended for roles/users that have permissions to create new IAM objects. + - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). + - Requires botocore 1.10.57 or above. + aliases: [boundary_policy_arn] + type: str + assume_role_policy_document: + description: + - The trust relationship policy document that grants an entity permission to assume the role. + - This parameter is required when I(state=present). + type: json + managed_policies: + description: + - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names. + - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]). + - To embed an inline policy, use M(iam_policy). + aliases: ['managed_policy'] + type: list + max_session_duration: + description: + - The maximum duration (in seconds) of a session when assuming the role. + - Valid values are between 1 and 12 hours (3600 and 43200 seconds). + type: int + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false). + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + state: + description: + - Create or remove the IAM role. + default: present + choices: [ present, absent ] + type: str + create_instance_profile: + description: + - Creates an IAM instance profile along with the role. + default: true + type: bool + delete_instance_profile: + description: + - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance + profile created with the same I(name) as the role. + - Only applies when I(state=absent). + default: false + type: bool + tags: + description: + - Tag dict to apply to the queue. + - Requires botocore 1.12.46 or above. + type: dict + purge_tags: + description: + - Remove tags not listed in I(tags) when tags is specified. + default: true + type: bool +requirements: [ botocore, boto3 ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a role with description and tags + iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + description: This is My New Role + tags: + env: dev + +- name: "Create a role and attach a managed policy called 'PowerUserAccess'" + iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + managed_policies: + - arn:aws:iam::aws:policy/PowerUserAccess + +- name: Keep the role created above but remove all managed policies + iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + managed_policies: [] + +- name: Delete the role + iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" + state: absent + +''' +RETURN = ''' +iam_role: + description: dictionary containing the IAM Role data + returned: success + type: complex + contains: + path: + description: the path to the role + type: str + returned: always + sample: / + role_name: + description: the friendly name that identifies the role + type: str + returned: always + sample: myrole + role_id: + description: the stable and unique string identifying the role + type: str + returned: always + sample: ABCDEFF4EZ4ABCDEFV4ZC + arn: + description: the Amazon Resource Name (ARN) specifying the role + type: str + returned: always + sample: "arn:aws:iam::1234567890:role/mynewrole" + create_date: + description: the date and time, in ISO 8601 date-time format, when the role was created + type: str + returned: always + sample: "2016-08-14T04:36:28+00:00" + assume_role_policy_document: + description: the policy that grants an entity permission to assume the role + type: str + returned: always + sample: { + 'statement': [ + { + 'action': 'sts:AssumeRole', + 'effect': 'Allow', + 'principal': { + 'service': 'ec2.amazonaws.com' + }, + 'sid': '' + } + ], + 'version': '2012-10-17' + } + attached_policies: + description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role + type: list + returned: always + sample: [ + { + 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess', + 'policy_name': 'PowerUserAccess' + } + ] + tags: + description: role tags + type: dict + returned: always + sample: '{"Env": "Prod"}' +''' + +import json + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc): + if not compare_policies(current_policy_doc, json.loads(new_policy_doc)): + return True + else: + return False + + +@AWSRetry.jittered_backoff() +def _list_policies(connection): + paginator = connection.get_paginator('list_policies') + return paginator.paginate().build_full_result()['Policies'] + + +def convert_friendly_names_to_arns(connection, module, policy_names): + if not any([not policy.startswith('arn:') for policy in policy_names]): + return policy_names + allpolicies = {} + policies = _list_policies(connection) + + for policy in policies: + allpolicies[policy['PolicyName']] = policy['Arn'] + allpolicies[policy['Arn']] = policy['Arn'] + try: + return [allpolicies[policy] for policy in policy_names] + except KeyError as e: + module.fail_json_aws(e, msg="Couldn't find policy") + + +def attach_policies(connection, module, policies_to_attach, params): + changed = False + for policy_arn in policies_to_attach: + try: + if not module.check_mode: + connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName'])) + changed = True + return changed + + +def remove_policies(connection, module, policies_to_remove, params): + changed = False + for policy in policies_to_remove: + try: + if not module.check_mode: + connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName'])) + changed = True + return changed + + +def generate_create_params(module): + params = dict() + params['Path'] = module.params.get('path') + params['RoleName'] = module.params.get('name') + params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document') + if module.params.get('description') is not None: + params['Description'] = module.params.get('description') + if module.params.get('max_session_duration') is not None: + params['MaxSessionDuration'] = module.params.get('max_session_duration') + if module.params.get('boundary') is not None: + params['PermissionsBoundary'] = module.params.get('boundary') + if module.params.get('tags') is not None: + params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + + return params + + +def create_basic_role(connection, module, params): + """ + Perform the Role creation. + Assumes tests for the role existing have already been performed. + """ + + try: + if not module.check_mode: + role = connection.create_role(aws_retry=True, **params) + # 'Description' is documented as key of the role returned by create_role + # but appears to be an AWS bug (the value is not returned using the AWS CLI either). + # Get the role after creating it. + role = get_role_with_backoff(connection, module, params['RoleName']) + else: + role = {'MadeInCheckMode': True} + role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to create role") + + return role + + +def update_role_assumed_policy(connection, module, params, role): + # Check Assumed Policy document + if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']): + return False + + if module.check_mode: + return True + + try: + connection.update_assume_role_policy( + RoleName=params['RoleName'], + PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])), + aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName'])) + return True + + +def update_role_description(connection, module, params, role): + # Check Description update + if params.get('Description') is None: + return False + if role.get('Description') == params['Description']: + return False + + if module.check_mode: + return True + + try: + connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName'])) + return True + + +def update_role_max_session_duration(connection, module, params, role): + # Check MaxSessionDuration update + if params.get('MaxSessionDuration') is None: + return False + if role.get('MaxSessionDuration') == params['MaxSessionDuration']: + return False + + if module.check_mode: + return True + + try: + connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName'])) + return True + + +def update_role_permissions_boundary(connection, module, params, role): + # Check PermissionsBoundary + if params.get('PermissionsBoundary') is None: + return False + if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''): + return False + + if module.check_mode: + return True + + if params.get('PermissionsBoundary') == '': + try: + connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName'])) + else: + try: + connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName'])) + return True + + +def update_managed_policies(connection, module, params, role, managed_policies, purge_policies): + # Check Managed Policies + if managed_policies is None: + return False + + # If we're manipulating a fake role + if role.get('MadeInCheckMode', False): + role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies)) + return True + + # Get list of current attached managed policies + current_attached_policies = get_attached_policy_list(connection, module, params['RoleName']) + current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies] + + if len(managed_policies) == 1 and managed_policies[0] is None: + managed_policies = [] + + policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies) + policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list) + + changed = False + + if purge_policies: + changed |= remove_policies(connection, module, policies_to_remove, params) + + changed |= attach_policies(connection, module, policies_to_attach, params) + + return changed + + +def create_or_update_role(connection, module): + + params = generate_create_params(module) + role_name = params['RoleName'] + create_instance_profile = module.params.get('create_instance_profile') + purge_policies = module.params.get('purge_policies') + if purge_policies is None: + purge_policies = True + managed_policies = module.params.get('managed_policies') + if managed_policies: + # Attempt to list the policies early so we don't leave things behind if we can't find them. + managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + + changed = False + + # Get role + role = get_role(connection, module, role_name) + + # If role is None, create it + if role is None: + role = create_basic_role(connection, module, params) + changed = True + else: + changed |= update_role_tags(connection, module, params, role) + changed |= update_role_assumed_policy(connection, module, params, role) + changed |= update_role_description(connection, module, params, role) + changed |= update_role_max_session_duration(connection, module, params, role) + changed |= update_role_permissions_boundary(connection, module, params, role) + + if create_instance_profile: + changed |= create_instance_profiles(connection, module, params, role) + + changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies) + + # Get the role again + if not role.get('MadeInCheckMode', False): + role = get_role(connection, module, params['RoleName']) + role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName']) + role['tags'] = get_role_tags(connection, module) + + module.exit_json( + changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']), + **camel_dict_to_snake_dict(role, ignore_list=['tags'])) + + +def create_instance_profiles(connection, module, params, role): + + if role.get('MadeInCheckMode', False): + return False + + # Fetch existing Profiles + try: + instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName'])) + + # Profile already exists + if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles): + return False + + if module.check_mode: + return True + + # Make sure an instance profile is created + try: + connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True) + except ClientError as e: + # If the profile already exists, no problem, move on. + # Implies someone's changing things at the same time... + if e.response['Error']['Code'] == 'EntityAlreadyExists': + return False + else: + module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName'])) + except BotoCoreError as e: + module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName'])) + + # And attach the role to the profile + try: + connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName'])) + + return True + + +def remove_instance_profiles(connection, module, role_params, role): + role_name = module.params.get('name') + delete_profiles = module.params.get("delete_instance_profile") + + try: + instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) + + # Remove the role from the instance profile(s) + for profile in instance_profiles: + profile_name = profile['InstanceProfileName'] + try: + if not module.check_mode: + connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params) + if profile_name == role_name: + if delete_profiles: + try: + connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) + + +def destroy_role(connection, module): + + role_name = module.params.get('name') + role = get_role(connection, module, role_name) + role_params = dict() + role_params['RoleName'] = role_name + boundary_params = dict(role_params) + boundary_params['PermissionsBoundary'] = '' + + if role is None: + module.exit_json(changed=False) + + # Before we try to delete the role we need to remove any + # - attached instance profiles + # - attached managed policies + # - permissions boundary + remove_instance_profiles(connection, module, role_params, role) + update_managed_policies(connection, module, role_params, role, [], True) + update_role_permissions_boundary(connection, module, boundary_params, role) + + try: + if not module.check_mode: + connection.delete_role(aws_retry=True, **role_params) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete role") + + module.exit_json(changed=True) + + +def get_role_with_backoff(connection, module, name): + try: + return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + + +def get_role(connection, module, name): + try: + return connection.get_role(RoleName=name, aws_retry=True)['Role'] + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return None + else: + module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + except BotoCoreError as e: + module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + + +def get_attached_policy_list(connection, module, name): + try: + return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) + + +def get_role_tags(connection, module): + role_name = module.params.get('name') + if not hasattr(connection, 'list_role_tags'): + return {} + try: + return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) + + +def update_role_tags(connection, module, params, role): + new_tags = params.get('Tags') + if new_tags is None: + return False + new_tags = boto3_tag_list_to_ansible_dict(new_tags) + + role_name = module.params.get('name') + purge_tags = module.params.get('purge_tags') + + try: + existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + except (ClientError, KeyError): + existing_tags = {} + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + + if not module.check_mode: + try: + if tags_to_remove: + connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) + if tags_to_add: + connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name) + + changed = bool(tags_to_add) or bool(tags_to_remove) + return changed + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=True), + path=dict(type='str', default="/"), + assume_role_policy_document=dict(type='json'), + managed_policies=dict(type='list', aliases=['managed_policy']), + max_session_duration=dict(type='int'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + description=dict(type='str'), + boundary=dict(type='str', aliases=['boundary_policy_arn']), + create_instance_profile=dict(type='bool', default=True), + delete_instance_profile=dict(type='bool', default=False), + purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[('state', 'present', ['assume_role_policy_document'])], + supports_check_mode=True) + + if module.params.get('purge_policies') is None: + module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.' + ' To maintain the existing behaviour explicity set purge_policies=true', version='2.14') + + if module.params.get('boundary'): + if module.params.get('create_instance_profile'): + module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") + if not module.params.get('boundary').startswith('arn:aws:iam'): + module.fail_json(msg="Boundary policy must be an ARN") + if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'): + module.fail_json(msg="When managing tags botocore must be at least v1.12.46. " + "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions())) + if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'): + module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. " + "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions())) + if module.params.get('max_session_duration'): + max_session_duration = module.params.get('max_session_duration') + if max_session_duration < 3600 or max_session_duration > 43200: + module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)") + if module.params.get('path'): + path = module.params.get('path') + if not path.endswith('/') or not path.startswith('/'): + module.fail_json(msg="path must begin and end with /") + + connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + + if state == 'present': + create_or_update_role(connection, module) + else: + destroy_role(connection, module) + + +if __name__ == '__main__': + main() diff --git a/iam_role_facts.py b/iam_role_facts.py new file mode 120000 index 00000000000..e15c454b71c --- /dev/null +++ b/iam_role_facts.py @@ -0,0 +1 @@ +iam_role_info.py \ No newline at end of file diff --git a/iam_role_info.py b/iam_role_info.py new file mode 100644 index 00000000000..5a3753fd524 --- /dev/null +++ b/iam_role_info.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_role_info +short_description: Gather information on IAM roles +description: + - Gathers information about IAM roles. + - This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +author: + - "Will Thames (@willthames)" +options: + name: + description: + - Name of a role to search for. + - Mutually exclusive with I(path_prefix). + aliases: + - role_name + type: str + path_prefix: + description: + - Prefix of role to restrict IAM role search for. + - Mutually exclusive with I(name). + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# find all existing IAM roles +- iam_role_info: + register: result + +# describe a single role +- iam_role_info: + name: MyIAMRole + +# describe all roles matching a path prefix +- iam_role_info: + path_prefix: /application/path +''' + +RETURN = ''' +iam_roles: + description: List of IAM roles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for IAM role. + returned: always + type: str + sample: arn:aws:iam::123456789012:role/AnsibleTestRole + assume_role_policy_document: + description: Policy Document describing what can assume the role. + returned: always + type: str + create_date: + description: Date IAM role was created. + returned: always + type: str + sample: '2017-10-23T00:05:08+00:00' + inline_policies: + description: List of names of inline policies. + returned: always + type: list + sample: [] + managed_policies: + description: List of attached managed policies. + returned: always + type: complex + contains: + policy_arn: + description: Amazon Resource Name for the policy. + returned: always + type: str + sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy + policy_name: + description: Name of managed policy. + returned: always + type: str + sample: AnsibleTestEC2Policy + instance_profiles: + description: List of attached instance profiles. + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2017-10-23T00:05:08+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROAII7ABCD123456EFGH + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + path: + description: Path of role. + returned: always + type: str + sample: / + role_id: + description: Amazon Identifier for the role. + returned: always + type: str + sample: AROAII7ABCD123456EFGH + role_name: + description: Name of the role. + returned: always + type: str + sample: AnsibleTestRole + tags: + description: Role tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry + + +@AWSRetry.exponential_backoff() +def list_iam_roles_with_backoff(client, **kwargs): + paginator = client.get_paginator('list_roles') + return paginator.paginate(**kwargs).build_full_result() + + +@AWSRetry.exponential_backoff() +def list_iam_role_policies_with_backoff(client, role_name): + paginator = client.get_paginator('list_role_policies') + return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] + + +@AWSRetry.exponential_backoff() +def list_iam_attached_role_policies_with_backoff(client, role_name): + paginator = client.get_paginator('list_attached_role_policies') + return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] + + +@AWSRetry.exponential_backoff() +def list_iam_instance_profiles_for_role_with_backoff(client, role_name): + paginator = client.get_paginator('list_instance_profiles_for_role') + return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles'] + + +def describe_iam_role(module, client, role): + name = role['RoleName'] + try: + role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name) + try: + role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name) + try: + role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name) + try: + role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags']) + del role['Tags'] + except KeyError: + role['tags'] = {} + return role + + +def describe_iam_roles(module, client): + name = module.params['name'] + path_prefix = module.params['path_prefix'] + if name: + try: + roles = [client.get_role(RoleName=name)['Role']] + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return [] + else: + module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) + else: + params = dict() + if path_prefix: + if not path_prefix.startswith('/'): + path_prefix = '/' + path_prefix + if not path_prefix.endswith('/'): + path_prefix = path_prefix + '/' + params['PathPrefix'] = path_prefix + try: + roles = list_iam_roles_with_backoff(client, **params)['Roles'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list IAM roles") + return [camel_dict_to_snake_dict(describe_iam_role(module, client, role), ignore_list=['tags']) for role in roles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=['role_name']), + path_prefix=dict(), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['name', 'path_prefix']]) + if module._name == 'iam_role_facts': + module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", version='2.13') + + client = module.client('iam') + + module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) + + +if __name__ == '__main__': + main() diff --git a/iam_saml_federation.py b/iam_saml_federation.py new file mode 100644 index 00000000000..34f0db647a9 --- /dev/null +++ b/iam_saml_federation.py @@ -0,0 +1,249 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iam_saml_federation +short_description: Maintain IAM SAML federation configuration. +requirements: + - boto3 +description: + - Provides a mechanism to manage AWS IAM SAML Identity Federation providers (create/update/delete metadata). +options: + name: + description: + - The name of the provider to create. + required: true + type: str + saml_metadata_document: + description: + - The XML document generated by an identity provider (IdP) that supports SAML 2.0. + type: str + state: + description: + - Whether to create or delete identity provider. If 'present' is specified it will attempt to update the identity provider matching the name field. + default: present + choices: [ "present", "absent" ] + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: + - Tony (@axc450) + - Aidan Rowe (@aidan-) +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# It is assumed that their matching environment variables are set. +# Creates a new iam saml identity provider if not present +- name: saml provider + iam_saml_federation: + name: example1 + # the > below opens an indented block, so no escaping/quoting is needed when in the indentation level under this key + saml_metadata_document: > + ... + >> import boto3 + >>> iam = boto3.client('iam') + >>> name = "server-cert-name" + >>> results = get_server_certs(iam, name) + { + "upload_date": "2015-04-25T00:36:40+00:00", + "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO", + "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----", + "server_certificate_name": "server-cert-name", + "expiration": "2017-06-15T12:00:00+00:00", + "path": "/", + "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name" + } + """ + results = dict() + try: + if name: + server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']] + else: + server_certs = iam.list_server_certificates()['ServerCertificateMetadataList'] + + for server_cert in server_certs: + if not name: + server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate'] + cert_md = server_cert['ServerCertificateMetadata'] + results[cert_md['ServerCertificateName']] = { + 'certificate_body': server_cert['CertificateBody'], + 'server_certificate_id': cert_md['ServerCertificateId'], + 'server_certificate_name': cert_md['ServerCertificateName'], + 'arn': cert_md['Arn'], + 'path': cert_md['Path'], + 'expiration': cert_md['Expiration'].isoformat(), + 'upload_date': cert_md['UploadDate'].isoformat(), + } + + except botocore.exceptions.ClientError: + pass + + return results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str'), + )) + + module = AnsibleModule(argument_spec=argument_spec,) + if module._name == 'iam_server_certificate_facts': + module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", version='2.13') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) + + cert_name = module.params.get('name') + results = get_server_certs(iam, cert_name) + module.exit_json(results=results) + + +if __name__ == '__main__': + main() diff --git a/iam_user.py b/iam_user.py new file mode 100644 index 00000000000..f66738022bb --- /dev/null +++ b/iam_user.py @@ -0,0 +1,370 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iam_user +short_description: Manage AWS IAM users +description: + - Manage AWS IAM users. +author: Josh Souza (@joshsouza) +options: + name: + description: + - The name of the user to create. + required: true + type: str + managed_policies: + description: + - A list of managed policy ARNs or friendly names to attach to the user. + - To embed an inline policy, use M(iam_policy). + required: false + type: list + aliases: ['managed_policy'] + state: + description: + - Create or remove the IAM user. + required: true + choices: [ 'present', 'absent' ] + type: str + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + required: false + default: false + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] +requirements: [ botocore, boto3 ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Note: This module does not allow management of groups that users belong to. +# Groups should manage their membership directly using `iam_group`, +# as users belong to them. + +# Create a user +- iam_user: + name: testuser1 + state: present + +# Create a user and attach a managed policy using its ARN +- iam_user: + name: testuser1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + state: present + +# Remove all managed policies from an existing user with an empty list +- iam_user: + name: testuser1 + state: present + purge_policies: true + +# Delete the user +- iam_user: + name: testuser1 + state: absent + +''' +RETURN = ''' +user: + description: dictionary containing all the user information + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the user + type: str + sample: "arn:aws:iam::1234567890:user/testuser1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the user was created + type: str + sample: "2017-02-08T04:36:28+00:00" + user_id: + description: the stable and unique string identifying the user + type: str + sample: AGPAIDBWE12NSFINE55TM + user_name: + description: the friendly name that identifies the user + type: str + sample: testuser1 + path: + description: the path to the user + type: str + sample: / +''' + +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +import traceback + +try: + from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +def compare_attached_policies(current_attached_policies, new_attached_policies): + + # If new_attached_policies is None it means we want to remove all policies + if len(current_attached_policies) > 0 and new_attached_policies is None: + return False + + current_attached_policies_arn_list = [] + for policy in current_attached_policies: + current_attached_policies_arn_list.append(policy['PolicyArn']) + + if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)): + return True + else: + return False + + +def convert_friendly_names_to_arns(connection, module, policy_names): + + # List comprehension that looks for any policy in the 'policy_names' list + # that does not begin with 'arn'. If there aren't any, short circuit. + # If there are, translate friendly name to the full arn + if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]): + return policy_names + allpolicies = {} + paginator = connection.get_paginator('list_policies') + policies = paginator.paginate().build_full_result()['Policies'] + + for policy in policies: + allpolicies[policy['PolicyName']] = policy['Arn'] + allpolicies[policy['Arn']] = policy['Arn'] + try: + return [allpolicies[policy] for policy in policy_names] + except KeyError as e: + module.fail_json(msg="Couldn't find policy: " + str(e)) + + +def create_or_update_user(connection, module): + + params = dict() + params['UserName'] = module.params.get('name') + managed_policies = module.params.get('managed_policies') + purge_policies = module.params.get('purge_policies') + changed = False + if managed_policies: + managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + + # Get user + user = get_user(connection, module, params['UserName']) + + # If user is None, create it + if user is None: + # Check mode means we would create the user + if module.check_mode: + module.exit_json(changed=True) + + try: + connection.create_user(**params) + changed = True + except ClientError as e: + module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except ParamValidationError as e: + module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc()) + + # Manage managed policies + current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) + if not compare_attached_policies(current_attached_policies, managed_policies): + current_attached_policies_arn_list = [] + for policy in current_attached_policies: + current_attached_policies_arn_list.append(policy['PolicyArn']) + + # If managed_policies has a single empty element we want to remove all attached policies + if purge_policies: + # Detach policies not present + for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): + changed = True + if not module.check_mode: + try: + connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) + except ClientError as e: + module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format( + policy_arn, params['UserName'], to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except ParamValidationError as e: + module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format( + policy_arn, params['UserName'], to_native(e)), + exception=traceback.format_exc()) + + # If there are policies to adjust that aren't in the current list, then things have changed + # Otherwise the only changes were in purging above + if set(managed_policies).difference(set(current_attached_policies_arn_list)): + changed = True + # If there are policies in managed_policies attach each policy + if managed_policies != [None] and not module.check_mode: + for policy_arn in managed_policies: + try: + connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) + except ClientError as e: + module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format( + policy_arn, params['UserName'], to_native(e)), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except ParamValidationError as e: + module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format( + policy_arn, params['UserName'], to_native(e)), + exception=traceback.format_exc()) + if module.check_mode: + module.exit_json(changed=changed) + + # Get the user again + user = get_user(connection, module, params['UserName']) + + module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user)) + + +def destroy_user(connection, module): + + user_name = module.params.get('name') + + user = get_user(connection, module, user_name) + # User is not present + if not user: + module.exit_json(changed=False) + + # Check mode means we would remove this user + if module.check_mode: + module.exit_json(changed=True) + + # Remove any attached policies otherwise deletion fails + try: + for policy in get_attached_policy_list(connection, module, user_name): + connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) + + try: + # Remove user's access keys + access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"] + for access_key in access_keys: + connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"]) + + # Remove user's login profile (console password) + delete_user_login_profile(connection, module, user_name) + + # Remove user's ssh public keys + ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"] + for ssh_public_key in ssh_public_keys: + connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"]) + + # Remove user's service specific credentials + service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"] + for service_specific_credential in service_credentials: + connection.delete_service_specific_credential( + UserName=user_name, + ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"] + ) + + # Remove user's signing certificates + signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"] + for signing_certificate in signing_certificates: + connection.delete_signing_certificate( + UserName=user_name, + CertificateId=signing_certificate["CertificateId"] + ) + + # Remove user's MFA devices + mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"] + for mfa_device in mfa_devices: + connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"]) + + # Remove user's inline policies + inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"] + for policy_name in inline_policies: + connection.delete_user_policy(UserName=user_name, PolicyName=policy_name) + + # Remove user's group membership + user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"] + for group in user_groups: + connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"]) + + connection.delete_user(UserName=user_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) + + module.exit_json(changed=True) + + +def get_user(connection, module, name): + + params = dict() + params['UserName'] = name + + try: + return connection.get_user(**params) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return None + else: + module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)), + **camel_dict_to_snake_dict(e.response)) + + +def get_attached_policy_list(connection, module, name): + + try: + return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return None + else: + module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) + + +def delete_user_login_profile(connection, module, user_name): + + try: + return connection.delete_login_profile(UserName=user_name) + except ClientError as e: + if e.response["Error"]["Code"] == "NoSuchEntity": + return None + else: + module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) + + +def main(): + + argument_spec = dict( + name=dict(required=True, type='str'), + managed_policies=dict(default=[], type='list', aliases=['managed_policy']), + state=dict(choices=['present', 'absent'], required=True), + purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + connection = module.client('iam') + + state = module.params.get("state") + + if state == 'present': + create_or_update_user(connection, module) + else: + destroy_user(connection, module) + + +if __name__ == '__main__': + main() diff --git a/iam_user_info.py b/iam_user_info.py new file mode 100644 index 00000000000..d478f3306d3 --- /dev/null +++ b/iam_user_info.py @@ -0,0 +1,185 @@ +#!/usr/bin/python + +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: iam_user_info +short_description: Gather IAM user(s) facts in AWS +description: + - This module can be used to gather IAM user(s) facts in AWS. +author: + - Constantin Bugneac (@Constantin07) + - Abhijeet Kasurde (@Akasurde) +options: + name: + description: + - The name of the IAM user to look for. + required: false + type: str + group: + description: + - The group name name of the IAM user to look for. Mutually exclusive with C(path). + required: false + type: str + path: + description: + - The path to the IAM user. Mutually exclusive with C(group). + - If specified, then would get all user names whose path starts with user provided value. + required: false + default: '/' + type: str +requirements: + - botocore + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Gather facts about "test" user. +- name: Get IAM user facts + iam_user_info: + name: "test" + +# Gather facts about all users in the "dev" group. +- name: Get IAM user facts + iam_user_info: + group: "dev" + +# Gather facts about all users with "/division_abc/subdivision_xyz/" path. +- name: Get IAM user facts + iam_user_info: + path: "/division_abc/subdivision_xyz/" +''' + +RETURN = r''' +iam_users: + description: list of maching iam users + returned: success + type: complex + contains: + arn: + description: the ARN of the user + returned: if user exists + type: str + sample: "arn:aws:iam::156360693172:user/dev/test_user" + create_date: + description: the datetime user was created + returned: if user exists + type: str + sample: "2016-05-24T12:24:59+00:00" + password_last_used: + description: the last datetime the password was used by user + returned: if password was used at least once + type: str + sample: "2016-05-25T13:39:11+00:00" + path: + description: the path to user + returned: if user exists + type: str + sample: "/dev/" + user_id: + description: the unique user id + returned: if user exists + type: str + sample: "AIDUIOOCQKTUGI6QJLGH2" + user_name: + description: the user name + returned: if user exists + type: str + sample: "test_user" +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.exponential_backoff() +def list_iam_users_with_backoff(client, operation, **kwargs): + paginator = client.get_paginator(operation) + return paginator.paginate(**kwargs).build_full_result() + + +def list_iam_users(connection, module): + + name = module.params.get('name') + group = module.params.get('group') + path = module.params.get('path') + + params = dict() + iam_users = [] + + if not group and not path: + if name: + params['UserName'] = name + try: + iam_users.append(connection.get_user(**params)['User']) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) + + if group: + params['GroupName'] = group + try: + iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users'] + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) + if name: + iam_users = [user for user in iam_users if user['UserName'] == name] + + if path and not group: + params['PathPrefix'] = path + try: + iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users'] + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) + if name: + iam_users = [user for user in iam_users if user['UserName'] == name] + + module.exit_json(iam_users=[camel_dict_to_snake_dict(user) for user in iam_users]) + + +def main(): + argument_spec = dict( + name=dict(), + group=dict(), + path=dict(default='/') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['group', 'path'] + ], + supports_check_mode=True + ) + + connection = module.client('iam') + + list_iam_users(connection, module) + + +if __name__ == '__main__': + main() diff --git a/kinesis_stream.py b/kinesis_stream.py new file mode 100644 index 00000000000..33db98eca1b --- /dev/null +++ b/kinesis_stream.py @@ -0,0 +1,1425 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: kinesis_stream +short_description: Manage a Kinesis Stream. +description: + - Create or Delete a Kinesis Stream. + - Update the retention period of a Kinesis Stream. + - Update Tags on a Kinesis Stream. + - Enable/disable server side encryption on a Kinesis Stream. +requirements: [ boto3 ] +author: Allen Sanabria (@linuxdynasty) +options: + name: + description: + - The name of the Kinesis Stream you are managing. + required: true + type: str + shards: + description: + - The number of shards you want to have with this stream. + - This is required when I(state=present) + type: int + retention_period: + description: + - The length of time (in hours) data records are accessible after they are added to + the stream. + - The default retention period is 24 hours and can not be less than 24 hours. + - The maximum retention period is 168 hours. + - The retention period can be modified during any point in time. + type: int + state: + description: + - Create or Delete the Kinesis Stream. + default: present + choices: [ 'present', 'absent' ] + type: str + wait: + description: + - Wait for operation to complete before returning. + default: true + type: bool + wait_timeout: + description: + - How many seconds to wait for an operation to complete before timing out. + default: 300 + type: int + tags: + description: + - "A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 })." + aliases: [ "resource_tags" ] + type: dict + encryption_state: + description: + - Enable or Disable encryption on the Kinesis Stream. + choices: [ 'enabled', 'disabled' ] + type: str + encryption_type: + description: + - The type of encryption. + - Defaults to C(KMS) + choices: ['KMS', 'NONE'] + type: str + key_id: + description: + - The GUID or alias for the KMS key. + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic creation example: +- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE + kinesis_stream: + name: test-stream + shards: 10 + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic creation example with tags: +- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE + kinesis_stream: + name: test-stream + shards: 10 + tags: + Env: development + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours: +- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE + kinesis_stream: + name: test-stream + retention_period: 48 + shards: 10 + tags: + Env: development + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic delete example: +- name: Delete Kinesis Stream test-stream and wait for it to finish deleting. + kinesis_stream: + name: test-stream + state: absent + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic enable encryption example: +- name: Encrypt Kinesis Stream test-stream. + kinesis_stream: + name: test-stream + state: present + encryption_state: enabled + encryption_type: KMS + key_id: alias/aws/kinesis + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic disable encryption example: +- name: Encrypt Kinesis Stream test-stream. + kinesis_stream: + name: test-stream + state: present + encryption_state: disabled + encryption_type: KMS + key_id: alias/aws/kinesis + wait: yes + wait_timeout: 600 + register: test_stream +''' + +RETURN = ''' +stream_name: + description: The name of the Kinesis Stream. + returned: when state == present. + type: str + sample: "test-stream" +stream_arn: + description: The amazon resource identifier + returned: when state == present. + type: str + sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream" +stream_status: + description: The current state of the Kinesis Stream. + returned: when state == present. + type: str + sample: "ACTIVE" +retention_period_hours: + description: Number of hours messages will be kept for a Kinesis Stream. + returned: when state == present. + type: int + sample: 24 +tags: + description: Dictionary containing all the tags associated with the Kinesis stream. + returned: when state == present. + type: dict + sample: { + "Name": "Splunk", + "Env": "development" + } +''' + +import re +import datetime +import time +from functools import reduce + +try: + import botocore.exceptions +except ImportError: + pass # Taken care of by ec2.HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible.module_utils._text import to_native + + +def convert_to_lower(data): + """Convert all uppercase keys in dict with lowercase_ + Args: + data (dict): Dictionary with keys that have upper cases in them + Example.. FooBar == foo_bar + if a val is of type datetime.datetime, it will be converted to + the ISO 8601 + + Basic Usage: + >>> test = {'FooBar': []} + >>> test = convert_to_lower(test) + { + 'foo_bar': [] + } + + Returns: + Dictionary + """ + results = dict() + if isinstance(data, dict): + for key, val in data.items(): + key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower() + if key[0] == '_': + key = key[1:] + if isinstance(val, datetime.datetime): + results[key] = val.isoformat() + elif isinstance(val, dict): + results[key] = convert_to_lower(val) + elif isinstance(val, list): + converted = list() + for item in val: + converted.append(convert_to_lower(item)) + results[key] = converted + else: + results[key] = val + return results + + +def make_tags_in_proper_format(tags): + """Take a dictionary of tags and convert them into the AWS Tags format. + Args: + tags (list): The tags you want applied. + + Basic Usage: + >>> tags = [{'Key': 'env', 'Value': 'development'}] + >>> make_tags_in_proper_format(tags) + { + "env": "development", + } + + Returns: + Dict + """ + formatted_tags = dict() + for tag in tags: + formatted_tags[tag.get('Key')] = tag.get('Value') + + return formatted_tags + + +def make_tags_in_aws_format(tags): + """Take a dictionary of tags and convert them into the AWS Tags format. + Args: + tags (dict): The tags you want applied. + + Basic Usage: + >>> tags = {'env': 'development', 'service': 'web'} + >>> make_tags_in_proper_format(tags) + [ + { + "Value": "web", + "Key": "service" + }, + { + "Value": "development", + "key": "env" + } + ] + + Returns: + List + """ + formatted_tags = list() + for key, val in tags.items(): + formatted_tags.append({ + 'Key': key, + 'Value': val + }) + + return formatted_tags + + +def get_tags(client, stream_name, check_mode=False): + """Retrieve the tags for a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): Name of the Kinesis stream. + + Kwargs: + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >> get_tags(client, stream_name) + + Returns: + Tuple (bool, str, dict) + """ + err_msg = '' + success = False + params = { + 'StreamName': stream_name, + } + results = dict() + try: + if not check_mode: + results = ( + client.list_tags_for_stream(**params)['Tags'] + ) + else: + results = [ + { + 'Key': 'DryRunMode', + 'Value': 'true' + }, + ] + success = True + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + return success, err_msg, results + + +def find_stream(client, stream_name, check_mode=False): + """Retrieve a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): Name of the Kinesis stream. + + Kwargs: + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + + Returns: + Tuple (bool, str, dict) + """ + err_msg = '' + success = False + params = { + 'StreamName': stream_name, + } + results = dict() + has_more_shards = True + shards = list() + try: + if not check_mode: + while has_more_shards: + results = ( + client.describe_stream(**params)['StreamDescription'] + ) + shards.extend(results.pop('Shards')) + has_more_shards = results['HasMoreShards'] + results['Shards'] = shards + num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) + results['OpenShardsCount'] = len(shards) - num_closed_shards + results['ClosedShardsCount'] = num_closed_shards + results['ShardsCount'] = len(shards) + else: + results = { + 'OpenShardsCount': 5, + 'ClosedShardsCount': 0, + 'ShardsCount': 5, + 'HasMoreShards': True, + 'RetentionPeriodHours': 24, + 'StreamName': stream_name, + 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name), + 'StreamStatus': 'ACTIVE', + 'EncryptionType': 'NONE' + } + success = True + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + return success, err_msg, results + + +def wait_for_status(client, stream_name, status, wait_timeout=300, + check_mode=False): + """Wait for the status to change for a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client + stream_name (str): The name of the kinesis stream. + status (str): The status to wait for. + examples. status=available, status=deleted + + Kwargs: + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> wait_for_status(client, stream_name, 'ACTIVE', 300) + + Returns: + Tuple (bool, str, dict) + """ + polling_increment_secs = 5 + wait_timeout = time.time() + wait_timeout + status_achieved = False + stream = dict() + err_msg = "" + + while wait_timeout > time.time(): + try: + find_success, find_msg, stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if check_mode: + status_achieved = True + break + + elif status != 'DELETING': + if find_success and stream: + if stream.get('StreamStatus') == status: + status_achieved = True + break + + else: + if not find_success: + status_achieved = True + break + + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + time.sleep(polling_increment_secs) + + if not status_achieved: + err_msg = "Wait time out reached, while waiting for results" + else: + err_msg = "Status {0} achieved successfully".format(status) + + return status_achieved, err_msg, stream + + +def tags_action(client, stream_name, tags, action='create', check_mode=False): + """Create or delete multiple tags from a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + resource_id (str): The Amazon resource id. + tags (list): List of dictionaries. + examples.. [{Name: "", Values: [""]}] + + Kwargs: + action (str): The action to perform. + valid actions == create and delete + default=create + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('ec2') + >>> resource_id = 'pcx-123345678' + >>> tags = {'env': 'development'} + >>> update_tags(client, resource_id, tags) + [True, ''] + + Returns: + List (bool, str) + """ + success = False + err_msg = "" + params = {'StreamName': stream_name} + try: + if not check_mode: + if action == 'create': + params['Tags'] = tags + client.add_tags_to_stream(**params) + success = True + elif action == 'delete': + params['TagKeys'] = list(tags) + client.remove_tags_from_stream(**params) + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + else: + if action == 'create': + success = True + elif action == 'delete': + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + return success, err_msg + + +def recreate_tags_from_list(list_of_tags): + """Recreate tags from a list of tuples into the Amazon Tag format. + Args: + list_of_tags (list): List of tuples. + + Basic Usage: + >>> list_of_tags = [('Env', 'Development')] + >>> recreate_tags_from_list(list_of_tags) + [ + { + "Value": "Development", + "Key": "Env" + } + ] + + Returns: + List + """ + tags = list() + i = 0 + for i in range(len(list_of_tags)): + key_name = list_of_tags[i][0] + key_val = list_of_tags[i][1] + tags.append( + { + 'Key': key_name, + 'Value': key_val + } + ) + return tags + + +def update_tags(client, stream_name, tags, check_mode=False): + """Update tags for an amazon resource. + Args: + resource_id (str): The Amazon resource id. + tags (dict): Dictionary of tags you want applied to the Kinesis stream. + + Kwargs: + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('ec2') + >>> stream_name = 'test-stream' + >>> tags = {'env': 'development'} + >>> update_tags(client, stream_name, tags) + [True, ''] + + Return: + Tuple (bool, str) + """ + success = False + changed = False + err_msg = '' + tag_success, tag_msg, current_tags = ( + get_tags(client, stream_name, check_mode=check_mode) + ) + if current_tags: + tags = make_tags_in_aws_format(tags) + current_tags_set = ( + set( + reduce( + lambda x, y: x + y, + [make_tags_in_proper_format(current_tags).items()] + ) + ) + ) + + new_tags_set = ( + set( + reduce( + lambda x, y: x + y, + [make_tags_in_proper_format(tags).items()] + ) + ) + ) + tags_to_delete = list(current_tags_set.difference(new_tags_set)) + tags_to_update = list(new_tags_set.difference(current_tags_set)) + if tags_to_delete: + tags_to_delete = make_tags_in_proper_format( + recreate_tags_from_list(tags_to_delete) + ) + delete_success, delete_msg = ( + tags_action( + client, stream_name, tags_to_delete, action='delete', + check_mode=check_mode + ) + ) + if not delete_success: + return delete_success, changed, delete_msg + if tags_to_update: + tags = make_tags_in_proper_format( + recreate_tags_from_list(tags_to_update) + ) + else: + return True, changed, 'Tags do not need to be updated' + + if tags: + create_success, create_msg = ( + tags_action( + client, stream_name, tags, action='create', + check_mode=check_mode + ) + ) + if create_success: + changed = True + return create_success, changed, create_msg + + return success, changed, err_msg + + +def stream_action(client, stream_name, shard_count=1, action='create', + timeout=300, check_mode=False): + """Create or Delete an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + shard_count (int): Number of shards this stream will use. + action (str): The action to perform. + valid actions == create and delete + default=create + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> shard_count = 20 + >>> stream_action(client, stream_name, shard_count, action='create') + + Returns: + List (bool, str) + """ + success = False + err_msg = '' + params = { + 'StreamName': stream_name + } + try: + if not check_mode: + if action == 'create': + params['ShardCount'] = shard_count + client.create_stream(**params) + success = True + elif action == 'delete': + client.delete_stream(**params) + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + else: + if action == 'create': + success = True + elif action == 'delete': + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + return success, err_msg + + +def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='', + timeout=300, check_mode=False): + """Create, Encrypt or Delete an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + shard_count (int): Number of shards this stream will use. + action (str): The action to perform. + valid actions == create and delete + default=create + encryption_type (str): NONE or KMS + key_id (str): The GUID or alias for the KMS key + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> shard_count = 20 + >>> stream_action(client, stream_name, shard_count, action='create', encryption_type='KMS',key_id='alias/aws') + + Returns: + List (bool, str) + """ + success = False + err_msg = '' + params = { + 'StreamName': stream_name + } + try: + if not check_mode: + if action == 'start_encryption': + params['EncryptionType'] = encryption_type + params['KeyId'] = key_id + client.start_stream_encryption(**params) + success = True + elif action == 'stop_encryption': + params['EncryptionType'] = encryption_type + params['KeyId'] = key_id + client.stop_stream_encryption(**params) + success = True + else: + err_msg = 'Invalid encryption action {0}'.format(action) + else: + if action == 'start_encryption': + success = True + elif action == 'stop_encryption': + success = True + else: + err_msg = 'Invalid encryption action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + return success, err_msg + + +def retention_action(client, stream_name, retention_period=24, + action='increase', check_mode=False): + """Increase or Decrease the retention of messages in the Kinesis stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + retention_period (int): This is how long messages will be kept before + they are discarded. This can not be less than 24 hours. + action (str): The action to perform. + valid actions == create and delete + default=create + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> retention_period = 48 + >>> retention_action(client, stream_name, retention_period, action='increase') + + Returns: + Tuple (bool, str) + """ + success = False + err_msg = '' + params = { + 'StreamName': stream_name + } + try: + if not check_mode: + if action == 'increase': + params['RetentionPeriodHours'] = retention_period + client.increase_stream_retention_period(**params) + success = True + err_msg = ( + 'Retention Period increased successfully to {0}'.format(retention_period) + ) + elif action == 'decrease': + params['RetentionPeriodHours'] = retention_period + client.decrease_stream_retention_period(**params) + success = True + err_msg = ( + 'Retention Period decreased successfully to {0}'.format(retention_period) + ) + else: + err_msg = 'Invalid action {0}'.format(action) + else: + if action == 'increase': + success = True + elif action == 'decrease': + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = to_native(e) + + return success, err_msg + + +def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False): + """Increase or Decrease the number of shards in the Kinesis stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + number_of_shards (int): Number of shards this stream will use. + default=1 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> number_of_shards = 3 + >>> update_shard_count(client, stream_name, number_of_shards) + + Returns: + Tuple (bool, str) + """ + success = True + err_msg = '' + params = { + 'StreamName': stream_name, + 'ScalingType': 'UNIFORM_SCALING' + } + if not check_mode: + params['TargetShardCount'] = number_of_shards + try: + client.update_shard_count(**params) + except botocore.exceptions.ClientError as e: + return False, str(e) + + return success, err_msg + + +def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None, + tags=None, wait=False, wait_timeout=300, check_mode=False): + """Update an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + number_of_shards (int): Number of shards this stream will use. + default=1 + retention_period (int): This is how long messages will be kept before + they are discarded. This can not be less than 24 hours. + tags (dict): The tags you want applied. + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> current_stream = { + 'ShardCount': 3, + 'HasMoreShards': True, + 'RetentionPeriodHours': 24, + 'StreamName': 'test-stream', + 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream', + 'StreamStatus': "ACTIVE' + } + >>> stream_name = 'test-stream' + >>> retention_period = 48 + >>> number_of_shards = 10 + >>> update(client, current_stream, stream_name, + number_of_shards, retention_period ) + + Returns: + Tuple (bool, bool, str) + """ + success = True + changed = False + err_msg = '' + if retention_period: + if wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if not wait_success: + return wait_success, False, wait_msg + + if current_stream.get('StreamStatus') == 'ACTIVE': + retention_changed = False + if retention_period > current_stream['RetentionPeriodHours']: + retention_changed, retention_msg = ( + retention_action( + client, stream_name, retention_period, action='increase', + check_mode=check_mode + ) + ) + + elif retention_period < current_stream['RetentionPeriodHours']: + retention_changed, retention_msg = ( + retention_action( + client, stream_name, retention_period, action='decrease', + check_mode=check_mode + ) + ) + + elif retention_period == current_stream['RetentionPeriodHours']: + retention_msg = ( + 'Retention {0} is the same as {1}' + .format( + retention_period, + current_stream['RetentionPeriodHours'] + ) + ) + success = True + + if retention_changed: + success = True + changed = True + + err_msg = retention_msg + if changed and wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if not wait_success: + return wait_success, False, wait_msg + elif changed and not wait: + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found: + if current_stream['StreamStatus'] != 'ACTIVE': + err_msg = ( + 'Retention Period for {0} is in the process of updating' + .format(stream_name) + ) + return success, changed, err_msg + else: + err_msg = ( + 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' + .format(current_stream.get('StreamStatus', 'UNKNOWN')) + ) + return success, changed, err_msg + + if current_stream['OpenShardsCount'] != number_of_shards: + success, err_msg = ( + update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) + ) + + if not success: + return success, changed, err_msg + + changed = True + + if wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if not wait_success: + return wait_success, changed, wait_msg + else: + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found and current_stream['StreamStatus'] != 'ACTIVE': + err_msg = ( + 'Number of shards for {0} is in the process of updating' + .format(stream_name) + ) + return success, changed, err_msg + + if tags: + tag_success, tag_changed, err_msg = ( + update_tags(client, stream_name, tags, check_mode=check_mode) + ) + if wait: + success, err_msg, status_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if success and changed: + err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name) + elif success and not changed: + err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name) + + return success, changed, err_msg + + +def create_stream(client, stream_name, number_of_shards=1, retention_period=None, + tags=None, wait=False, wait_timeout=300, check_mode=False): + """Create an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + number_of_shards (int): Number of shards this stream will use. + default=1 + retention_period (int): Can not be less than 24 hours + default=None + tags (dict): The tags you want applied. + default=None + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> number_of_shards = 10 + >>> tags = {'env': 'test'} + >>> create_stream(client, stream_name, number_of_shards, tags=tags) + + Returns: + Tuple (bool, bool, str, dict) + """ + success = False + changed = False + err_msg = '' + results = dict() + + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + + if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + + if stream_found and current_stream.get('StreamStatus') != 'DELETING': + success, changed, err_msg = update( + client, current_stream, stream_name, number_of_shards, + retention_period, tags, wait, wait_timeout, check_mode=check_mode + ) + else: + create_success, create_msg = ( + stream_action( + client, stream_name, number_of_shards, action='create', + check_mode=check_mode + ) + ) + if not create_success: + changed = True + err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg) + return False, True, err_msg, {} + else: + changed = True + if wait: + wait_success, wait_msg, results = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = ( + 'Kinesis Stream {0} is in the process of being created' + .format(stream_name) + ) + if not wait_success: + return wait_success, True, wait_msg, results + else: + err_msg = ( + 'Kinesis Stream {0} created successfully' + .format(stream_name) + ) + + if tags: + changed, err_msg = ( + tags_action( + client, stream_name, tags, action='create', + check_mode=check_mode + ) + ) + if changed: + success = True + if not success: + return success, changed, err_msg, results + + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if retention_period and current_stream.get('StreamStatus') == 'ACTIVE': + changed, err_msg = ( + retention_action( + client, stream_name, retention_period, action='increase', + check_mode=check_mode + ) + ) + if changed: + success = True + if not success: + return success, changed, err_msg, results + else: + err_msg = ( + 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' + .format(current_stream.get('StreamStatus', 'UNKNOWN')) + ) + success = create_success + changed = True + + if success: + stream_found, stream_msg, results = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + tag_success, tag_msg, current_tags = ( + get_tags(client, stream_name, check_mode=check_mode) + ) + if current_tags and not check_mode: + current_tags = make_tags_in_proper_format(current_tags) + results['Tags'] = current_tags + elif check_mode and tags: + results['Tags'] = tags + else: + results['Tags'] = dict() + results = convert_to_lower(results) + + return success, changed, err_msg, results + + +def delete_stream(client, stream_name, wait=False, wait_timeout=300, + check_mode=False): + """Delete an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> delete_stream(client, stream_name) + + Returns: + Tuple (bool, bool, str, dict) + """ + success = False + changed = False + err_msg = '' + results = dict() + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found: + success, err_msg = ( + stream_action( + client, stream_name, action='delete', check_mode=check_mode + ) + ) + if success: + changed = True + if wait: + success, err_msg, results = ( + wait_for_status( + client, stream_name, 'DELETING', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = 'Stream {0} deleted successfully'.format(stream_name) + if not success: + return success, True, err_msg, results + else: + err_msg = ( + 'Stream {0} is in the process of being deleted' + .format(stream_name) + ) + else: + success = True + changed = False + err_msg = 'Stream {0} does not exist'.format(stream_name) + + return success, changed, err_msg, results + + +def start_stream_encryption(client, stream_name, encryption_type='', key_id='', + wait=False, wait_timeout=300, check_mode=False): + """Start encryption on an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + encryption_type (str): KMS or NONE + key_id (str): KMS key GUID or alias + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> key_id = 'alias/aws' + >>> encryption_type = 'KMS' + >>> start_stream_encryption(client, stream_name,encryption_type,key_id) + + Returns: + Tuple (bool, bool, str, dict) + """ + success = False + changed = False + err_msg = '' + params = { + 'StreamName': stream_name + } + + results = dict() + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found: + success, err_msg = ( + stream_encryption_action( + client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode + ) + ) + if success: + changed = True + if wait: + success, err_msg, results = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name) + if not success: + return success, True, err_msg, results + else: + err_msg = ( + 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name) + ) + else: + success = True + changed = False + err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name) + + return success, changed, err_msg, results + + +def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', + wait=True, wait_timeout=300, check_mode=False): + """Stop encryption on an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + encryption_type (str): KMS or NONE + key_id (str): KMS key GUID or alias + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> start_stream_encryption(client, stream_name,encryption_type, key_id) + + Returns: + Tuple (bool, bool, str, dict) + """ + success = False + changed = False + err_msg = '' + params = { + 'StreamName': stream_name + } + + results = dict() + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found: + if current_stream.get('EncryptionType') == 'KMS': + success, err_msg = ( + stream_encryption_action( + client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode + ) + ) + elif current_stream.get('EncryptionType') == 'NONE': + success = True + + if success: + changed = True + if wait: + success, err_msg, results = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name) + if not success: + return success, True, err_msg, results + else: + err_msg = ( + 'Stream {0} is in the process of stopping encryption.'.format(stream_name) + ) + else: + success = True + changed = False + err_msg = 'Stream {0} does not exist.'.format(stream_name) + + return success, changed, err_msg, results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + shards=dict(default=None, required=False, type='int'), + retention_period=dict(default=None, required=False, type='int'), + tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), + wait=dict(default=True, required=False, type='bool'), + wait_timeout=dict(default=300, required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + encryption_type=dict(required=False, choices=['NONE', 'KMS']), + key_id=dict(required=False, type='str'), + encryption_state=dict(required=False, choices=['enabled', 'disabled']), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + retention_period = module.params.get('retention_period') + stream_name = module.params.get('name') + shards = module.params.get('shards') + state = module.params.get('state') + tags = module.params.get('tags') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + encryption_type = module.params.get('encryption_type') + key_id = module.params.get('key_id') + encryption_state = module.params.get('encryption_state') + + if state == 'present' and not shards: + module.fail_json(msg='Shards is required when state == present.') + + if retention_period: + if retention_period < 24: + module.fail_json(msg='Retention period can not be less than 24 hours.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + check_mode = module.check_mode + try: + region, ec2_url, aws_connect_kwargs = ( + get_aws_connection_info(module, boto3=True) + ) + client = ( + boto3_conn( + module, conn_type='client', resource='kinesis', + region=region, endpoint=ec2_url, **aws_connect_kwargs + ) + ) + except botocore.exceptions.ClientError as e: + err_msg = 'Boto3 Client Error - {0}'.format(to_native(e.msg)) + module.fail_json( + success=False, changed=False, result={}, msg=err_msg + ) + + if state == 'present': + success, changed, err_msg, results = ( + create_stream( + client, stream_name, shards, retention_period, tags, + wait, wait_timeout, check_mode + ) + ) + if encryption_state == 'enabled': + success, changed, err_msg, results = ( + start_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode + ) + ) + elif encryption_state == 'disabled': + success, changed, err_msg, results = ( + stop_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode + ) + ) + elif state == 'absent': + success, changed, err_msg, results = ( + delete_stream(client, stream_name, wait, wait_timeout, check_mode) + ) + + if success: + module.exit_json( + success=success, changed=changed, msg=err_msg, **results + ) + else: + module.fail_json( + success=success, changed=changed, msg=err_msg, result=results + ) + + +if __name__ == '__main__': + main() diff --git a/lambda.py b/lambda.py new file mode 100644 index 00000000000..bf79bba0967 --- /dev/null +++ b/lambda.py @@ -0,0 +1,624 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lambda +short_description: Manage AWS Lambda functions +description: + - Allows for the management of Lambda functions. +requirements: [ boto3 ] +options: + name: + description: + - The name you want to assign to the function you are uploading. Cannot be changed. + required: true + type: str + state: + description: + - Create or delete Lambda function. + default: present + choices: [ 'present', 'absent' ] + type: str + runtime: + description: + - The runtime environment for the Lambda function you are uploading. + - Required when creating a function. Uses parameters as described in boto3 docs. + - Required when I(state=present). + - For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). + type: str + role: + description: + - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) + resources. You may use the bare ARN if the role belongs to the same AWS account. + - Required when I(state=present). + type: str + handler: + description: + - The function within your code that Lambda calls to begin execution. + type: str + zip_file: + description: + - A .zip file containing your deployment package + - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present. + aliases: [ 'src' ] + type: str + s3_bucket: + description: + - Amazon S3 bucket name where the .zip file containing your deployment package is stored. + - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present. + - I(s3_bucket) and I(s3_key) are required together. + type: str + s3_key: + description: + - The Amazon S3 object (the deployment package) key name you want to upload. + - I(s3_bucket) and I(s3_key) are required together. + type: str + s3_object_version: + description: + - The Amazon S3 object (the deployment package) version you want to upload. + type: str + description: + description: + - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit. + type: str + timeout: + description: + - The function maximum execution time in seconds after which Lambda should terminate the function. + default: 3 + type: int + memory_size: + description: + - The amount of memory, in MB, your Lambda function is given. + default: 128 + type: int + vpc_subnet_ids: + description: + - List of subnet IDs to run Lambda function in. + - Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC. + - If set, I(vpc_security_group_ids) must also be set. + type: list + elements: str + vpc_security_group_ids: + description: + - List of VPC security group IDs to associate with the Lambda function. + - Required when I(vpc_subnet_ids) is used. + type: list + elements: str + environment_variables: + description: + - A dictionary of environment variables the Lambda function is given. + type: dict + dead_letter_arn: + description: + - The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. + type: str + tracing_mode: + description: + - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default. + choices: ['Active', 'PassThrough'] + type: str + tags: + description: + - tag dict to apply to the function (requires botocore 1.5.40 or above). + type: dict +author: + - 'Steyn Huizinga (@steynovich)' +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create Lambda functions +- name: looped creation + lambda: + name: '{{ item.name }}' + state: present + zip_file: '{{ item.zip_file }}' + runtime: 'python2.7' + role: 'arn:aws:iam::987654321012:role/lambda_basic_execution' + handler: 'hello_python.my_handler' + vpc_subnet_ids: + - subnet-123abcde + - subnet-edcba321 + vpc_security_group_ids: + - sg-123abcde + - sg-edcba321 + environment_variables: '{{ item.env_vars }}' + tags: + key1: 'value1' + loop: + - name: HelloWorld + zip_file: hello-code.zip + env_vars: + key1: "first" + key2: "second" + - name: ByeBye + zip_file: bye-code.zip + env_vars: + key1: "1" + key2: "2" + +# To remove previously added tags pass an empty dict +- name: remove tags + lambda: + name: 'Lambda function' + state: present + zip_file: 'code.zip' + runtime: 'python2.7' + role: 'arn:aws:iam::987654321012:role/lambda_basic_execution' + handler: 'hello_python.my_handler' + tags: {} + +# Basic Lambda function deletion +- name: Delete Lambda functions HelloWorld and ByeBye + lambda: + name: '{{ item }}' + state: absent + loop: + - HelloWorld + - ByeBye +''' + +RETURN = ''' +code: + description: the lambda function location returned by get_function in boto3 + returned: success + type: dict + sample: + { + 'location': 'a presigned S3 URL', + 'repository_type': 'S3', + } +configuration: + description: the lambda function metadata returned by get_function in boto3 + returned: success + type: dict + sample: + { + 'code_sha256': 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=', + 'code_size': 123, + 'description': 'My function', + 'environment': { + 'variables': { + 'key': 'value' + } + }, + 'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1', + 'function_name': 'myFunction', + 'handler': 'index.handler', + 'last_modified': '2017-08-01T00:00:00.000+0000', + 'memory_size': 128, + 'revision_id': 'a2x9886d-d48a-4a0c-ab64-82abc005x80c', + 'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution', + 'runtime': 'nodejs6.10', + 'tracing_config': { 'mode': 'Active' }, + 'timeout': 3, + 'version': '1', + 'vpc_config': { + 'security_group_ids': [], + 'subnet_ids': [], + 'vpc_id': '123' + } + } +''' + +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags +import base64 +import hashlib +import traceback +import re + +try: + from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError +except ImportError: + pass # protected by AnsibleAWSModule + + +def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs): + """return the account information (account id and partition) we are currently working on + + get_account_info tries too find out the account that we are working + on. It's not guaranteed that this will be easy so we try in + several different ways. Giving either IAM or STS privileges to + the account should be enough to permit this. + """ + account_id = None + partition = None + try: + sts_client = boto3_conn(module, conn_type='client', resource='sts', + region=region, endpoint=endpoint, **aws_connect_kwargs) + caller_id = sts_client.get_caller_identity() + account_id = caller_id.get('Account') + partition = caller_id.get('Arn').split(':')[1] + except ClientError: + try: + iam_client = boto3_conn(module, conn_type='client', resource='iam', + region=region, endpoint=endpoint, **aws_connect_kwargs) + arn, partition, service, reg, account_id, resource = iam_client.get_user()['User']['Arn'].split(':') + except ClientError as e: + if (e.response['Error']['Code'] == 'AccessDenied'): + except_msg = to_native(e.message) + m = except_msg.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/") + account_id = m.group(4) + partition = m.group(1) + if account_id is None: + module.fail_json_aws(e, msg="getting account information") + if partition is None: + module.fail_json_aws(e, msg="getting account information: partition") + except Exception as e: + module.fail_json_aws(e, msg="getting account information") + + return account_id, partition + + +def get_current_function(connection, function_name, qualifier=None): + try: + if qualifier is not None: + return connection.get_function(FunctionName=function_name, Qualifier=qualifier) + return connection.get_function(FunctionName=function_name) + except ClientError as e: + try: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + return None + except (KeyError, AttributeError): + pass + raise e + + +def sha256sum(filename): + hasher = hashlib.sha256() + with open(filename, 'rb') as f: + hasher.update(f.read()) + + code_hash = hasher.digest() + code_b64 = base64.b64encode(code_hash) + hex_digest = code_b64.decode('utf-8') + + return hex_digest + + +def set_tag(client, module, tags, function): + if not hasattr(client, "list_tags"): + module.fail_json(msg="Using tags requires botocore 1.5.40 or above") + + changed = False + arn = function['Configuration']['FunctionArn'] + + try: + current_tags = client.list_tags(Resource=arn).get('Tags', {}) + except ClientError as e: + module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)), + exception=traceback.format_exc()) + + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True) + + try: + if tags_to_remove: + client.untag_resource( + Resource=arn, + TagKeys=tags_to_remove + ) + changed = True + + if tags_to_add: + client.tag_resource( + Resource=arn, + Tags=tags_to_add + ) + changed = True + + except ClientError as e: + module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn, + to_native(e)), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except BotoCoreError as e: + module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn, + to_native(e)), exception=traceback.format_exc()) + + return changed + + +def main(): + argument_spec = dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + runtime=dict(), + role=dict(), + handler=dict(), + zip_file=dict(aliases=['src']), + s3_bucket=dict(), + s3_key=dict(), + s3_object_version=dict(), + description=dict(default=''), + timeout=dict(type='int', default=3), + memory_size=dict(type='int', default=128), + vpc_subnet_ids=dict(type='list'), + vpc_security_group_ids=dict(type='list'), + environment_variables=dict(type='dict'), + dead_letter_arn=dict(), + tracing_mode=dict(choices=['Active', 'PassThrough']), + tags=dict(type='dict'), + ) + + mutually_exclusive = [['zip_file', 's3_key'], + ['zip_file', 's3_bucket'], + ['zip_file', 's3_object_version']] + + required_together = [['s3_key', 's3_bucket'], + ['vpc_subnet_ids', 'vpc_security_group_ids']] + + required_if = [['state', 'present', ['runtime', 'handler', 'role']]] + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_if=required_if) + + name = module.params.get('name') + state = module.params.get('state').lower() + runtime = module.params.get('runtime') + role = module.params.get('role') + handler = module.params.get('handler') + s3_bucket = module.params.get('s3_bucket') + s3_key = module.params.get('s3_key') + s3_object_version = module.params.get('s3_object_version') + zip_file = module.params.get('zip_file') + description = module.params.get('description') + timeout = module.params.get('timeout') + memory_size = module.params.get('memory_size') + vpc_subnet_ids = module.params.get('vpc_subnet_ids') + vpc_security_group_ids = module.params.get('vpc_security_group_ids') + environment_variables = module.params.get('environment_variables') + dead_letter_arn = module.params.get('dead_letter_arn') + tracing_mode = module.params.get('tracing_mode') + tags = module.params.get('tags') + + check_mode = module.check_mode + changed = False + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg='region must be specified') + + try: + client = boto3_conn(module, conn_type='client', resource='lambda', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except (ClientError, ValidationError) as e: + module.fail_json_aws(e, msg="Trying to connect to AWS") + + if state == 'present': + if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): + role_arn = role + else: + # get account ID and assemble ARN + account_id, partition = get_account_info(module, region=region, endpoint=ec2_url, **aws_connect_kwargs) + role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role) + + # Get function configuration if present, False otherwise + current_function = get_current_function(client, name) + + # Update existing Lambda function + if state == 'present' and current_function: + + # Get current state + current_config = current_function['Configuration'] + current_version = None + + # Update function configuration + func_kwargs = {'FunctionName': name} + + # Update configuration if needed + if role_arn and current_config['Role'] != role_arn: + func_kwargs.update({'Role': role_arn}) + if handler and current_config['Handler'] != handler: + func_kwargs.update({'Handler': handler}) + if description and current_config['Description'] != description: + func_kwargs.update({'Description': description}) + if timeout and current_config['Timeout'] != timeout: + func_kwargs.update({'Timeout': timeout}) + if memory_size and current_config['MemorySize'] != memory_size: + func_kwargs.update({'MemorySize': memory_size}) + if runtime and current_config['Runtime'] != runtime: + func_kwargs.update({'Runtime': runtime}) + if (environment_variables is not None) and (current_config.get( + 'Environment', {}).get('Variables', {}) != environment_variables): + func_kwargs.update({'Environment': {'Variables': environment_variables}}) + if dead_letter_arn is not None: + if current_config.get('DeadLetterConfig'): + if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn: + func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + else: + if dead_letter_arn != "": + func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode): + func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + + # If VPC configuration is desired + if vpc_subnet_ids or vpc_security_group_ids: + if not vpc_subnet_ids or not vpc_security_group_ids: + module.fail_json(msg='vpc connectivity requires at least one security group and one subnet') + + if 'VpcConfig' in current_config: + # Compare VPC config with current config + current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] + current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] + + subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) + vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) + + if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: + new_vpc_config = {'SubnetIds': vpc_subnet_ids, + 'SecurityGroupIds': vpc_security_group_ids} + func_kwargs.update({'VpcConfig': new_vpc_config}) + else: + # No VPC configuration is desired, assure VPC config is empty when present in current config + if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'): + func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}}) + + # Upload new configuration if configuration has changed + if len(func_kwargs) > 1: + try: + if not check_mode: + response = client.update_function_configuration(**func_kwargs) + current_version = response['Version'] + changed = True + except (ParamValidationError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to update lambda configuration") + + # Update code configuration + code_kwargs = {'FunctionName': name, 'Publish': True} + + # Update S3 location + if s3_bucket and s3_key: + # If function is stored on S3 always update + code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key}) + + # If S3 Object Version is given + if s3_object_version: + code_kwargs.update({'S3ObjectVersion': s3_object_version}) + + # Compare local checksum, update remote code when different + elif zip_file: + local_checksum = sha256sum(zip_file) + remote_checksum = current_config['CodeSha256'] + + # Only upload new code when local code is different compared to the remote code + if local_checksum != remote_checksum: + try: + with open(zip_file, 'rb') as f: + encoded_zip = f.read() + code_kwargs.update({'ZipFile': encoded_zip}) + except IOError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + # Tag Function + if tags is not None: + if set_tag(client, module, tags, current_function): + changed = True + + # Upload new code if needed (e.g. code checksum has changed) + if len(code_kwargs) > 2: + try: + if not check_mode: + response = client.update_function_code(**code_kwargs) + current_version = response['Version'] + changed = True + except (ParamValidationError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to upload new code") + + # Describe function code and configuration + response = get_current_function(client, name, qualifier=current_version) + if not response: + module.fail_json(msg='Unable to get function information after updating') + + # We're done + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + # Function doesn't exists, create new Lambda function + elif state == 'present': + if s3_bucket and s3_key: + # If function is stored on S3 + code = {'S3Bucket': s3_bucket, + 'S3Key': s3_key} + if s3_object_version: + code.update({'S3ObjectVersion': s3_object_version}) + elif zip_file: + # If function is stored in local zipfile + try: + with open(zip_file, 'rb') as f: + zip_content = f.read() + + code = {'ZipFile': zip_content} + except IOError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + else: + module.fail_json(msg='Either S3 object or path to zipfile required') + + func_kwargs = {'FunctionName': name, + 'Publish': True, + 'Runtime': runtime, + 'Role': role_arn, + 'Code': code, + 'Timeout': timeout, + 'MemorySize': memory_size, + } + + if description is not None: + func_kwargs.update({'Description': description}) + + if handler is not None: + func_kwargs.update({'Handler': handler}) + + if environment_variables: + func_kwargs.update({'Environment': {'Variables': environment_variables}}) + + if dead_letter_arn: + func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + + if tracing_mode: + func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + + # If VPC configuration is given + if vpc_subnet_ids or vpc_security_group_ids: + if not vpc_subnet_ids or not vpc_security_group_ids: + module.fail_json(msg='vpc connectivity requires at least one security group and one subnet') + + func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, + 'SecurityGroupIds': vpc_security_group_ids}}) + + # Finally try to create function + current_version = None + try: + if not check_mode: + response = client.create_function(**func_kwargs) + current_version = response['Version'] + changed = True + except (ParamValidationError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to create function") + + # Tag Function + if tags is not None: + if set_tag(client, module, tags, get_current_function(client, name)): + changed = True + + response = get_current_function(client, name, qualifier=current_version) + if not response: + module.fail_json(msg='Unable to get function information after creating') + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + # Delete existing Lambda function + if state == 'absent' and current_function: + try: + if not check_mode: + client.delete_function(FunctionName=name) + changed = True + except (ParamValidationError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to delete Lambda function") + + module.exit_json(changed=changed) + + # Function already absent, do nothing + elif state == 'absent': + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/lambda_alias.py b/lambda_alias.py new file mode 100644 index 00000000000..0e28d2a147c --- /dev/null +++ b/lambda_alias.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lambda_alias +short_description: Creates, updates or deletes AWS Lambda function aliases +description: + - This module allows the management of AWS Lambda functions aliases via the Ansible + framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function + itself and M(lambda_event) to manage event source mappings. + + +author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb) +options: + function_name: + description: + - The name of the function alias. + required: true + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + name: + description: + - Name of the function alias. + required: true + aliases: ['alias_name'] + type: str + description: + description: + - A short, user-defined function alias description. + type: str + function_version: + description: + - Version associated with the Lambda function alias. + A value of 0 (or omitted parameter) sets the alias to the $LATEST version. + aliases: ['version'] + type: int +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +# Simple example to create a lambda function and publish a version +- hosts: localhost + gather_facts: no + vars: + state: present + project_folder: /path/to/deployment/package + deployment_package: lambda.zip + account: 123456789012 + production_version: 5 + tasks: + - name: AWS Lambda Function + lambda: + state: "{{ state | default('present') }}" + name: myLambdaFunction + publish: True + description: lambda function description + code_s3_bucket: package-bucket + code_s3_key: "lambda/{{ deployment_package }}" + local_path: "{{ project_folder }}/{{ deployment_package }}" + runtime: python2.7 + timeout: 5 + handler: lambda.handler + memory_size: 128 + role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" + + - name: Get information + lambda_info: + name: myLambdaFunction + register: lambda_info + - name: show results + debug: + msg: "{{ lambda_info['lambda_facts'] }}" + +# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) + - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " + lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Dev + description: Development is $LATEST version + +# The QA alias will only be created when a new version is published (i.e. not = '$LATEST') + - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " + lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: QA + version: "{{ lambda_info.lambda_facts.Version }}" + description: "QA is version {{ lambda_info.lambda_facts.Version }}" + when: lambda_info.lambda_facts.Version != "$LATEST" + +# The Prod alias will have a fixed version based on a variable + - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " + lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Prod + version: "{{ production_version }}" + description: "Production is version {{ production_version }}" +''' + +RETURN = ''' +--- +alias_arn: + description: Full ARN of the function, including the alias + returned: success + type: str + sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev +description: + description: A short description of the alias + returned: success + type: str + sample: The development stage for my hot new app +function_version: + description: The qualifier that the alias refers to + returned: success + type: str + sample: $LATEST +name: + description: The name of the alias assigned + returned: success + type: str + sample: dev +''' + +import re + +try: + import boto3 + from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, + get_aws_connection_info) + + +class AWSConnection: + """ + Create the connection object and client objects as required. + """ + + def __init__(self, ansible_obj, resources, boto3_=True): + + try: + self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3_) + + self.resource_client = dict() + if not resources: + resources = ['lambda'] + + resources.append('iam') + + for resource in resources: + aws_connect_kwargs.update(dict(region=self.region, + endpoint=self.endpoint, + conn_type='client', + resource=resource + )) + self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) + + # if region is not provided, then get default profile/session region + if not self.region: + self.region = self.resource_client['lambda'].meta.region_name + + except (ClientError, ParamValidationError, MissingParametersError) as e: + ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + + try: + self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + except (ClientError, ValueError, KeyError, IndexError): + self.account_id = '' + + def client(self, resource='lambda'): + return self.resource_client[resource] + + +def pc(key): + """ + Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def set_api_params(module, module_params): + """ + Sets module parameters to those expected by the boto3 API. + + :param module: + :param module_params: + :return: + """ + + api_params = dict() + + for param in module_params: + module_param = module.params.get(param, None) + if module_param: + api_params[pc(param)] = module_param + + return api_params + + +def validate_params(module, aws): + """ + Performs basic parameter validation. + + :param module: Ansible module reference + :param aws: AWS client connection + :return: + """ + + function_name = module.params['function_name'] + + # validate function name + if not re.search(r'^[\w\-:]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string + if module.params['function_version'] == 0: + module.params['function_version'] = '$LATEST' + else: + module.params['function_version'] = str(module.params['function_version']) + + return + + +def get_lambda_alias(module, aws): + """ + Returns the lambda function alias if it exists. + + :param module: Ansible module reference + :param aws: AWS client connection + :return: + """ + + client = aws.client('lambda') + + # set API parameters + api_params = set_api_params(module, ('function_name', 'name')) + + # check if alias exists and get facts + try: + results = client.get_alias(**api_params) + + except (ClientError, ParamValidationError, MissingParametersError) as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + results = None + else: + module.fail_json(msg='Error retrieving function alias: {0}'.format(e)) + + return results + + +def lambda_alias(module, aws): + """ + Adds, updates or deletes lambda function aliases. + + :param module: Ansible module reference + :param aws: AWS client connection + :return dict: + """ + client = aws.client('lambda') + results = dict() + changed = False + current_state = 'absent' + state = module.params['state'] + + facts = get_lambda_alias(module, aws) + if facts: + current_state = 'present' + + if state == 'present': + if current_state == 'present': + + # check if alias has changed -- only version and description can change + alias_params = ('function_version', 'description') + for param in alias_params: + if module.params.get(param) != facts.get(pc(param)): + changed = True + break + + if changed: + api_params = set_api_params(module, ('function_name', 'name')) + api_params.update(set_api_params(module, alias_params)) + + if not module.check_mode: + try: + results = client.update_alias(**api_params) + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error updating function alias: {0}'.format(e)) + + else: + # create new function alias + api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description')) + + try: + if not module.check_mode: + results = client.create_alias(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error creating function alias: {0}'.format(e)) + + else: # state = 'absent' + if current_state == 'present': + # delete the function + api_params = set_api_params(module, ('function_name', 'name')) + + try: + if not module.check_mode: + results = client.delete_alias(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error deleting function alias: {0}'.format(e)) + + return dict(changed=changed, **dict(results or facts)) + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + function_name=dict(required=True), + name=dict(required=True, aliases=['alias_name']), + function_version=dict(type='int', required=False, default=0, aliases=['version']), + description=dict(required=False, default=None), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[] + ) + + # validate dependencies + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module.') + + aws = AWSConnection(module, ['lambda']) + + validate_params(module, aws) + + results = lambda_alias(module, aws) + + module.exit_json(**camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/lambda_event.py b/lambda_event.py new file mode 100644 index 00000000000..aea5c8ad4da --- /dev/null +++ b/lambda_event.py @@ -0,0 +1,448 @@ +#!/usr/bin/python +# (c) 2016, Pierre Jodouin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lambda_event +short_description: Creates, updates or deletes AWS Lambda function event mappings +description: + - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream + events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where + AWS Lambda invokes the function. + It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda + function itself and M(lambda_alias) to manage function aliases. + + +author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb) +options: + lambda_function_arn: + description: + - The name or ARN of the lambda function. + required: true + aliases: ['function_name', 'function_arn'] + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + alias: + description: + - Name of the function alias. + - Mutually exclusive with I(version). + type: str + version: + description: + - Version of the Lambda function. + - Mutually exclusive with I(alias). + type: int + event_source: + description: + - Source of the event that triggers the lambda function. + - For DynamoDB and Kinesis events, select C(stream) + - For SQS queues, select C(sqs) + default: stream + choices: ['stream', 'sqs'] + type: str + source_params: + description: + - Sub-parameters required for event source. + suboptions: + source_arn: + description: + - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source. + type: str + required: true + enabled: + description: + - Indicates whether AWS Lambda should begin polling or readin from the event source. + default: true. + type: bool + batch_size: + description: + - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. + default: 100 + type: int + starting_position: + description: + - The position in the stream where AWS Lambda should start reading. + - Required when I(event_source=stream). + choices: [TRIM_HORIZON,LATEST] + type: str + required: true + type: dict +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +# Example that creates a lambda event notification for a DynamoDB stream +- hosts: localhost + gather_facts: no + vars: + state: present + tasks: + - name: DynamoDB stream event mapping + lambda_event: + state: "{{ state | default('present') }}" + event_source: stream + function_name: "{{ function_name }}" + alias: Dev + source_params: + source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 + enabled: True + batch_size: 100 + starting_position: TRIM_HORIZON + + - name: Show source event + debug: + var: lambda_stream_events +''' + +RETURN = ''' +--- +lambda_stream_events: + description: list of dictionaries returned by the API describing stream event mappings + returned: success + type: list +''' + +import re +import sys + +try: + import boto3 + from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, + get_aws_connection_info) + + +# --------------------------------------------------------------------------------------------------- +# +# Helper Functions & classes +# +# --------------------------------------------------------------------------------------------------- + + +class AWSConnection: + """ + Create the connection object and client objects as required. + """ + + def __init__(self, ansible_obj, resources, use_boto3=True): + + try: + self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3) + + self.resource_client = dict() + if not resources: + resources = ['lambda'] + + resources.append('iam') + + for resource in resources: + aws_connect_kwargs.update(dict(region=self.region, + endpoint=self.endpoint, + conn_type='client', + resource=resource + )) + self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) + + # if region is not provided, then get default profile/session region + if not self.region: + self.region = self.resource_client['lambda'].meta.region_name + + except (ClientError, ParamValidationError, MissingParametersError) as e: + ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + + # set account ID + try: + self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + except (ClientError, ValueError, KeyError, IndexError): + self.account_id = '' + + def client(self, resource='lambda'): + return self.resource_client[resource] + + +def pc(key): + """ + Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def ordered_obj(obj): + """ + Order object for comparison purposes + + :param obj: + :return: + """ + + if isinstance(obj, dict): + return sorted((k, ordered_obj(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered_obj(x) for x in obj) + else: + return obj + + +def set_api_sub_params(params): + """ + Sets module sub-parameters to those expected by the boto3 API. + + :param params: + :return: + """ + + api_params = dict() + + for param in params.keys(): + param_value = params.get(param, None) + if param_value: + api_params[pc(param)] = param_value + + return api_params + + +def validate_params(module, aws): + """ + Performs basic parameter validation. + + :param module: + :param aws: + :return: + """ + + function_name = module.params['lambda_function_arn'] + + # validate function name + if not re.search(r'^[\w\-:]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'): + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'): + module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name)) + + # check if 'function_name' needs to be expanded in full ARN format + if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'): + function_name = module.params['lambda_function_arn'] + module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name) + + qualifier = get_qualifier(module) + if qualifier: + function_arn = module.params['lambda_function_arn'] + module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + + return + + +def get_qualifier(module): + """ + Returns the function qualifier as a version or alias or None. + + :param module: + :return: + """ + + qualifier = None + if module.params['version'] > 0: + qualifier = str(module.params['version']) + elif module.params['alias']: + qualifier = str(module.params['alias']) + + return qualifier + + +# --------------------------------------------------------------------------------------------------- +# +# Lambda Event Handlers +# +# This section defines a lambda_event_X function where X is an AWS service capable of initiating +# the execution of a Lambda function (pull only). +# +# --------------------------------------------------------------------------------------------------- + +def lambda_event_stream(module, aws): + """ + Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications. + :param module: + :param aws: + :return: + """ + + client = aws.client('lambda') + facts = dict() + changed = False + current_state = 'absent' + state = module.params['state'] + + api_params = dict(FunctionName=module.params['lambda_function_arn']) + + # check if required sub-parameters are present and valid + source_params = module.params['source_params'] + + source_arn = source_params.get('source_arn') + if source_arn: + api_params.update(EventSourceArn=source_arn) + else: + module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.") + + # check if optional sub-parameters are valid, if present + batch_size = source_params.get('batch_size') + if batch_size: + try: + source_params['batch_size'] = int(batch_size) + except ValueError: + module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size'])) + + # optional boolean value needs special treatment as not present does not imply False + source_param_enabled = module.boolean(source_params.get('enabled', 'True')) + + # check if event mapping exist + try: + facts = client.list_event_source_mappings(**api_params)['EventSourceMappings'] + if facts: + current_state = 'present' + except ClientError as e: + module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e)) + + if state == 'present': + if current_state == 'absent': + + starting_position = source_params.get('starting_position') + if starting_position: + api_params.update(StartingPosition=starting_position) + elif module.params.get('event_source') == 'sqs': + # starting position is not required for SQS + pass + else: + module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.") + + if source_arn: + api_params.update(Enabled=source_param_enabled) + if source_params.get('batch_size'): + api_params.update(BatchSize=source_params.get('batch_size')) + + try: + if not module.check_mode: + facts = client.create_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e)) + + else: + # current_state is 'present' + api_params = dict(FunctionName=module.params['lambda_function_arn']) + current_mapping = facts[0] + api_params.update(UUID=current_mapping['UUID']) + mapping_changed = False + + # check if anything changed + if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']: + api_params.update(BatchSize=source_params['batch_size']) + mapping_changed = True + + if source_param_enabled is not None: + if source_param_enabled: + if current_mapping['State'] not in ('Enabled', 'Enabling'): + api_params.update(Enabled=True) + mapping_changed = True + else: + if current_mapping['State'] not in ('Disabled', 'Disabling'): + api_params.update(Enabled=False) + mapping_changed = True + + if mapping_changed: + try: + if not module.check_mode: + facts = client.update_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e)) + + else: + if current_state == 'present': + # remove the stream event mapping + api_params = dict(UUID=facts[0]['UUID']) + + try: + if not module.check_mode: + facts = client.delete_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e)) + + return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) + + +def main(): + """Produce a list of function suffixes which handle lambda events.""" + source_choices = ["stream", "sqs"] + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), + event_source=dict(required=False, default="stream", choices=source_choices), + source_params=dict(type='dict', required=True), + alias=dict(required=False, default=None), + version=dict(type='int', required=False, default=0), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['alias', 'version']], + required_together=[] + ) + + # validate dependencies + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module.') + + aws = AWSConnection(module, ['lambda']) + + validate_params(module, aws) + + if module.params['event_source'].lower() in ('stream', 'sqs'): + results = lambda_event_stream(module, aws) + else: + module.fail_json(msg='Please select `stream` or `sqs` as the event type') + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/lambda_facts.py b/lambda_facts.py new file mode 100644 index 00000000000..aa93d32ebb9 --- /dev/null +++ b/lambda_facts.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lambda_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(lambda_info) instead. +short_description: Gathers AWS Lambda function details as Ansible facts +description: + - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. + Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and + M(lambda_event) to manage lambda event source mappings. + + +options: + query: + description: + - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts. + choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] + default: "all" + type: str + function_name: + description: + - The name of the lambda function for which facts are requested. + aliases: [ "function", "name"] + type: str + event_source_arn: + description: + - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. + type: str +author: Pierre Jodouin (@pjodouin) +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +# Simple example of listing all info for a function +- name: List all for a specific function + lambda_facts: + query: all + function_name: myFunction + register: my_function_details +# List all versions of a function +- name: List function versions + lambda_facts: + query: versions + function_name: myFunction + register: my_function_versions +# List all lambda function versions +- name: List all function + lambda_facts: + query: all + max_items: 20 +- name: show Lambda facts + debug: + var: lambda_facts +''' + +RETURN = ''' +--- +lambda_facts: + description: lambda facts + returned: success + type: dict +lambda_facts.function: + description: lambda function list + returned: success + type: dict +lambda_facts.function.TheName: + description: lambda function information, including event, mapping, and version information + returned: success + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +import json +import datetime +import sys +import re + + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +def fix_return(node): + """ + fixup returned dictionary + + :param node: + :return: + """ + + if isinstance(node, datetime.datetime): + node_value = str(node) + + elif isinstance(node, list): + node_value = [fix_return(item) for item in node] + + elif isinstance(node, dict): + node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) + + else: + node_value = node + + return node_value + + +def alias_details(client, module): + """ + Returns list of aliases for a specified function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + try: + lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(aliases=[]) + else: + module.fail_json_aws(e, msg="Trying to get aliases") + else: + module.fail_json(msg='Parameter function_name required for query=aliases.') + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def all_details(client, module): + """ + Returns all lambda related facts. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + if module.params.get('max_items') or module.params.get('next_marker'): + module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + lambda_facts[function_name] = {} + lambda_facts[function_name].update(config_details(client, module)[function_name]) + lambda_facts[function_name].update(alias_details(client, module)[function_name]) + lambda_facts[function_name].update(policy_details(client, module)[function_name]) + lambda_facts[function_name].update(version_details(client, module)[function_name]) + lambda_facts[function_name].update(mapping_details(client, module)[function_name]) + else: + lambda_facts.update(config_details(client, module)) + + return lambda_facts + + +def config_details(client, module): + """ + Returns configuration details for one or all lambda functions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + lambda_facts.update(client.get_function_configuration(FunctionName=function_name)) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(function={}) + else: + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + else: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_facts.update(function_list=client.list_functions(**params)['Functions']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(function_list=[]) + else: + module.fail_json_aws(e, msg="Trying to get function list") + + functions = dict() + for func in lambda_facts.pop('function_list', []): + functions[func['FunctionName']] = camel_dict_to_snake_dict(func) + return functions + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def mapping_details(client, module): + """ + Returns all lambda event source mappings. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + params = dict() + function_name = module.params.get('function_name') + + if function_name: + params['FunctionName'] = module.params.get('function_name') + + if module.params.get('event_source_arn'): + params['EventSourceArn'] = module.params.get('event_source_arn') + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(mappings=[]) + else: + module.fail_json_aws(e, msg="Trying to get source event mappings") + + if function_name: + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + return camel_dict_to_snake_dict(lambda_facts) + + +def policy_details(client, module): + """ + Returns policy attached to a lambda function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + if module.params.get('max_items') or module.params.get('next_marker'): + module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + # get_policy returns a JSON string so must convert to dict before reassigning to its key + lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(policy={}) + else: + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + else: + module.fail_json(msg='Parameter function_name required for query=policy.') + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def version_details(client, module): + """ + Returns all lambda function versions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(versions=[]) + else: + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + else: + module.fail_json(msg='Parameter function_name required for query=versions.') + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = dict( + function_name=dict(required=False, default=None, aliases=['function', 'name']), + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), + event_source_arn=dict(required=False, default=None) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[] + ) + + # validate function_name if present + function_name = module.params['function_name'] + if function_name: + if not re.search(r"^[\w\-:]+$", function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + client = module.client('lambda') + + this_module = sys.modules[__name__] + + invocations = dict( + aliases='alias_details', + all='all_details', + config='config_details', + mappings='mapping_details', + policy='policy_details', + versions='version_details', + ) + + this_module_function = getattr(this_module, invocations[module.params['query']]) + all_facts = fix_return(this_module_function(client, module)) + + results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False) + + if module.check_mode: + results['msg'] = 'Check mode set but ignored for fact gathering only.' + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/lambda_info.py b/lambda_info.py new file mode 100644 index 00000000000..425c47f1ea5 --- /dev/null +++ b/lambda_info.py @@ -0,0 +1,380 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lambda_info +short_description: Gathers AWS Lambda function details +description: + - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. + - Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and + M(lambda_event) to manage lambda event source mappings. + + +options: + query: + description: + - Specifies the resource type for which to gather information. Leave blank to retrieve all information. + choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] + default: "all" + type: str + function_name: + description: + - The name of the lambda function for which information is requested. + aliases: [ "function", "name"] + type: str + event_source_arn: + description: + - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. + type: str +author: Pierre Jodouin (@pjodouin) +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +# Simple example of listing all info for a function +- name: List all for a specific function + lambda_info: + query: all + function_name: myFunction + register: my_function_details +# List all versions of a function +- name: List function versions + lambda_info: + query: versions + function_name: myFunction + register: my_function_versions +# List all lambda function versions +- name: List all function + lambda_info: + query: all + max_items: 20 + register: output +- name: show Lambda information + debug: + msg: "{{ output['function'] }}" +''' + +RETURN = ''' +--- +function: + description: lambda function list + returned: success + type: dict +function.TheName: + description: lambda function information, including event, mapping, and version information + returned: success + type: dict +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +import json +import datetime +import re + + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +def fix_return(node): + """ + fixup returned dictionary + + :param node: + :return: + """ + + if isinstance(node, datetime.datetime): + node_value = str(node) + + elif isinstance(node, list): + node_value = [fix_return(item) for item in node] + + elif isinstance(node, dict): + node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) + + else: + node_value = node + + return node_value + + +def alias_details(client, module): + """ + Returns list of aliases for a specified function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_info = dict() + + function_name = module.params.get('function_name') + if function_name: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + try: + lambda_info.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_info.update(aliases=[]) + else: + module.fail_json_aws(e, msg="Trying to get aliases") + else: + module.fail_json(msg='Parameter function_name required for query=aliases.') + + return {function_name: camel_dict_to_snake_dict(lambda_info)} + + +def all_details(client, module): + """ + Returns all lambda related facts. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + if module.params.get('max_items') or module.params.get('next_marker'): + module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') + + lambda_info = dict() + + function_name = module.params.get('function_name') + if function_name: + lambda_info[function_name] = {} + lambda_info[function_name].update(config_details(client, module)[function_name]) + lambda_info[function_name].update(alias_details(client, module)[function_name]) + lambda_info[function_name].update(policy_details(client, module)[function_name]) + lambda_info[function_name].update(version_details(client, module)[function_name]) + lambda_info[function_name].update(mapping_details(client, module)[function_name]) + else: + lambda_info.update(config_details(client, module)) + + return lambda_info + + +def config_details(client, module): + """ + Returns configuration details for one or all lambda functions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_info = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + lambda_info.update(client.get_function_configuration(FunctionName=function_name)) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_info.update(function={}) + else: + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + else: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_info.update(function_list=client.list_functions(**params)['Functions']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_info.update(function_list=[]) + else: + module.fail_json_aws(e, msg="Trying to get function list") + + functions = dict() + for func in lambda_info.pop('function_list', []): + functions[func['FunctionName']] = camel_dict_to_snake_dict(func) + return functions + + return {function_name: camel_dict_to_snake_dict(lambda_info)} + + +def mapping_details(client, module): + """ + Returns all lambda event source mappings. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_info = dict() + params = dict() + function_name = module.params.get('function_name') + + if function_name: + params['FunctionName'] = module.params.get('function_name') + + if module.params.get('event_source_arn'): + params['EventSourceArn'] = module.params.get('event_source_arn') + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_info.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_info.update(mappings=[]) + else: + module.fail_json_aws(e, msg="Trying to get source event mappings") + + if function_name: + return {function_name: camel_dict_to_snake_dict(lambda_info)} + + return camel_dict_to_snake_dict(lambda_info) + + +def policy_details(client, module): + """ + Returns policy attached to a lambda function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + if module.params.get('max_items') or module.params.get('next_marker'): + module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') + + lambda_info = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + # get_policy returns a JSON string so must convert to dict before reassigning to its key + lambda_info.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_info.update(policy={}) + else: + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + else: + module.fail_json(msg='Parameter function_name required for query=policy.') + + return {function_name: camel_dict_to_snake_dict(lambda_info)} + + +def version_details(client, module): + """ + Returns all lambda function versions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_info = dict() + + function_name = module.params.get('function_name') + if function_name: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_info.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_info.update(versions=[]) + else: + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + else: + module.fail_json(msg='Parameter function_name required for query=versions.') + + return {function_name: camel_dict_to_snake_dict(lambda_info)} + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = dict( + function_name=dict(required=False, default=None, aliases=['function', 'name']), + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), + event_source_arn=dict(required=False, default=None) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[] + ) + + # validate function_name if present + function_name = module.params['function_name'] + if function_name: + if not re.search(r"^[\w\-:]+$", function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + client = module.client('lambda') + + invocations = dict( + aliases='alias_details', + all='all_details', + config='config_details', + mappings='mapping_details', + policy='policy_details', + versions='version_details', + ) + + this_module_function = globals()[invocations[module.params['query']]] + all_facts = fix_return(this_module_function(client, module)) + + results = dict(function=all_facts, changed=False) + + if module.check_mode: + results['msg'] = 'Check mode set but ignored for fact gathering only.' + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/lambda_policy.py b/lambda_policy.py new file mode 100644 index 00000000000..35e7a273e02 --- /dev/null +++ b/lambda_policy.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# Copyright (c) 2016, Pierre Jodouin +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: lambda_policy +short_description: Creates, updates or deletes AWS Lambda policy statements. +description: + - This module allows the management of AWS Lambda policy statements. + - It is idempotent and supports "Check" mode. + - Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases, + M(lambda_event) to manage event source mappings such as Kinesis streams, M(execute_lambda) to execute a + lambda function and M(lambda_info) to gather information relating to one or more lambda functions. + + +author: + - Pierre Jodouin (@pjodouin) + - Michael De La Rue (@mikedlr) +options: + function_name: + description: + - "Name of the Lambda function whose resource policy you are updating by adding a new permission." + - "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the" + - "function (for example, C(arn:aws:lambda:us-west-2:account-id:function:ThumbNail) ). AWS Lambda also allows you to" + - "specify partial ARN (for example, C(account-id:Thumbnail) ). Note that the length constraint applies only to the" + - "ARN. If you specify only the function name, it is limited to 64 character in length." + required: true + aliases: ['lambda_function_arn', 'function_arn'] + type: str + + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + + alias: + description: + - Name of the function alias. Mutually exclusive with I(version). + type: str + + version: + description: + - Version of the Lambda function. Mutually exclusive with I(alias). + type: int + + statement_id: + description: + - A unique statement identifier. + required: true + aliases: ['sid'] + type: str + + action: + description: + - "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with + lambda: followed by the API name (see Operations ). For example, C(lambda:CreateFunction) . You can use wildcard + (C(lambda:*)) to grant permission for all AWS Lambda actions." + required: true + type: str + + principal: + description: + - "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if + you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or + any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom + application in another AWS account to push events to AWS Lambda by invoking your function." + required: true + type: str + + source_arn: + description: + - This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this + field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from + the specified bucket can invoke the function. + type: str + + source_account: + description: + - The AWS account ID (without a hyphen) of the source owner. For example, if I(source_arn) identifies a bucket, + then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you + specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS + account created the bucket). You can also use this condition to specify all sources (that is, you don't + specify the I(source_arn) ) owned by a specific account. + type: str + + event_source_token: + description: + - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account). + type: str + +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: no + vars: + state: present + tasks: + - name: Lambda S3 event notification + lambda_policy: + state: "{{ state | default('present') }}" + function_name: functionName + alias: Dev + statement_id: lambda-s3-myBucket-create-data-log + action: lambda:InvokeFunction + principal: s3.amazonaws.com + source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName + source_account: 123456789012 + register: lambda_policy_action + + - name: show results + debug: + var: lambda_policy_action + +''' + +RETURN = ''' +--- +lambda_policy_action: + description: describes what action was taken + returned: success + type: str +''' + +import json +import re +from ansible.module_utils._text import to_native +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +try: + from botocore.exceptions import ClientError +except Exception: + pass # caught by AnsibleAWSModule + + +def pc(key): + """ + Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def policy_equal(module, current_statement): + for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'): + if module.params.get(param) != current_statement.get(param): + return False + + return True + + +def set_api_params(module, module_params): + """ + Sets module parameters to those expected by the boto3 API. + + :param module: + :param module_params: + :return: + """ + + api_params = dict() + + for param in module_params: + module_param = module.params.get(param) + if module_param is not None: + api_params[pc(param)] = module_param + + return api_params + + +def validate_params(module): + """ + Performs parameter validation beyond the module framework's validation. + + :param module: + :return: + """ + + function_name = module.params['function_name'] + + # validate function name + if function_name.startswith('arn:'): + if not re.search(r'^[\w\-:]+$', function_name): + module.fail_json( + msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name) + ) + if len(function_name) > 140: + module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name)) + else: + if not re.search(r'^[\w\-]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format( + function_name) + ) + if len(function_name) > 64: + module.fail_json( + msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + +def get_qualifier(module): + """ + Returns the function qualifier as a version or alias or None. + + :param module: + :return: + """ + + if module.params.get('version') is not None: + return to_native(module.params['version']) + elif module.params['alias']: + return to_native(module.params['alias']) + + return None + + +def extract_statement(policy, sid): + """return flattened single policy statement from a policy + + If a policy statement is present in the policy extract it and + return it in a flattened form. Otherwise return an empty + dictionary. + """ + if 'Statement' not in policy: + return {} + policy_statement = {} + # Now that we have the policy, check if required permission statement is present and flatten to + # simple dictionary if found. + for statement in policy['Statement']: + if statement['Sid'] == sid: + policy_statement['action'] = statement['Action'] + try: + policy_statement['principal'] = statement['Principal']['Service'] + except KeyError: + pass + try: + policy_statement['principal'] = statement['Principal']['AWS'] + except KeyError: + pass + try: + policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn'] + except KeyError: + pass + try: + policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount'] + except KeyError: + pass + try: + policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken'] + except KeyError: + pass + break + + return policy_statement + + +def get_policy_statement(module, client): + """Checks that policy exists and if so, that statement ID is present or absent. + + :param module: + :param client: + :return: + """ + sid = module.params['statement_id'] + + # set API parameters + api_params = set_api_params(module, ('function_name', )) + qualifier = get_qualifier(module) + if qualifier: + api_params.update(Qualifier=qualifier) + + policy_results = None + # check if function policy exists + try: + policy_results = client.get_policy(**api_params) + except ClientError as e: + try: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + return {} + except AttributeError: # catches ClientErrors without response, e.g. fail before connect + pass + module.fail_json_aws(e, msg="retrieving function policy") + except Exception as e: + module.fail_json_aws(e, msg="retrieving function policy") + + # get_policy returns a JSON string so must convert to dict before reassigning to its key + policy = json.loads(policy_results.get('Policy', '{}')) + return extract_statement(policy, sid) + + +def add_policy_permission(module, client): + """ + Adds a permission statement to the policy. + + :param module: + :param aws: + :return: + """ + + changed = False + + # set API parameters + params = ( + 'function_name', + 'statement_id', + 'action', + 'principal', + 'source_arn', + 'source_account', + 'event_source_token') + api_params = set_api_params(module, params) + qualifier = get_qualifier(module) + if qualifier: + api_params.update(Qualifier=qualifier) + + if not module.check_mode: + try: + client.add_permission(**api_params) + except Exception as e: + module.fail_json_aws(e, msg="adding permission to policy") + changed = True + + return changed + + +def remove_policy_permission(module, client): + """ + Removed a permission statement from the policy. + + :param module: + :param aws: + :return: + """ + + changed = False + + # set API parameters + api_params = set_api_params(module, ('function_name', 'statement_id')) + qualifier = get_qualifier(module) + if qualifier: + api_params.update(Qualifier=qualifier) + + try: + if not module.check_mode: + client.remove_permission(**api_params) + changed = True + except Exception as e: + module.fail_json_aws(e, msg="removing permission from policy") + + return changed + + +def manage_state(module, lambda_client): + changed = False + current_state = 'absent' + state = module.params['state'] + action_taken = 'none' + + # check if the policy exists + current_policy_statement = get_policy_statement(module, lambda_client) + if current_policy_statement: + current_state = 'present' + + if state == 'present': + if current_state == 'present' and not policy_equal(module, current_policy_statement): + remove_policy_permission(module, lambda_client) + changed = add_policy_permission(module, lambda_client) + action_taken = 'updated' + if not current_state == 'present': + changed = add_policy_permission(module, lambda_client) + action_taken = 'added' + elif current_state == 'present': + # remove the policy statement + changed = remove_policy_permission(module, lambda_client) + action_taken = 'deleted' + + return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken)) + + +def setup_module_object(): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']), + statement_id=dict(required=True, aliases=['sid']), + alias=dict(), + version=dict(type='int'), + action=dict(required=True, ), + principal=dict(required=True, ), + source_arn=dict(), + source_account=dict(), + event_source_token=dict(), + ) + + return AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['alias', 'version'], + ['event_source_token', 'source_arn'], + ['event_source_token', 'source_account']], + ) + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + + module = setup_module_object() + client = module.client('lambda') + validate_params(module) + results = manage_state(module, client) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/lightsail.py b/lightsail.py new file mode 100644 index 00000000000..c09e63283d9 --- /dev/null +++ b/lightsail.py @@ -0,0 +1,340 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: lightsail +short_description: Manage instances in AWS Lightsail +description: + - Manage instances in AWS Lightsail. + - Instance tagging is not yet supported in this module. +author: + - "Nick Ball (@nickball)" + - "Prasad Katti (@prasadkatti)" +options: + state: + description: + - Indicate desired state of the target. + - I(rebooted) and I(restarted) are aliases. + default: present + choices: ['present', 'absent', 'running', 'restarted', 'rebooted', 'stopped'] + type: str + name: + description: Name of the instance. + required: true + type: str + zone: + description: + - AWS availability zone in which to launch the instance. + - Required when I(state=present) + type: str + blueprint_id: + description: + - ID of the instance blueprint image. + - Required when I(state=present) + type: str + bundle_id: + description: + - Bundle of specification info for the instance. + - Required when I(state=present). + type: str + user_data: + description: + - Launch script that can configure the instance with additional data. + type: str + key_pair_name: + description: + - Name of the key pair to use with the instance. + - If I(state=present) and a key_pair_name is not provided, the default keypair from the region will be used. + type: str + wait: + description: + - Wait for the instance to be in state 'running' before returning. + - If I(wait=false) an ip_address may not be returned. + - Has no effect when I(state=rebooted) or I(state=absent). + type: bool + default: true + wait_timeout: + description: + - How long before I(wait) gives up, in seconds. + default: 300 + type: int + +requirements: + - boto3 + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + + +EXAMPLES = ''' +# Create a new Lightsail instance +- lightsail: + state: present + name: my_instance + region: us-east-1 + zone: us-east-1a + blueprint_id: ubuntu_16_04 + bundle_id: nano_1_0 + key_pair_name: id_rsa + user_data: " echo 'hello world' > /home/ubuntu/test.txt" + register: my_instance + +# Delete an instance +- lightsail: + state: absent + region: us-east-1 + name: my_instance + +''' + +RETURN = ''' +changed: + description: if a snapshot has been modified/created + returned: always + type: bool + sample: + changed: true +instance: + description: instance data + returned: always + type: dict + sample: + arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87" + blueprint_id: "ubuntu_16_04" + blueprint_name: "Ubuntu" + bundle_id: "nano_1_0" + created_at: "2017-03-27T08:38:59.714000-04:00" + hardware: + cpu_count: 1 + ram_size_in_gb: 0.5 + is_static_ip: false + location: + availability_zone: "us-east-1a" + region_name: "us-east-1" + name: "my_instance" + networking: + monthly_transfer: + gb_per_month_allocated: 1024 + ports: + - access_direction: "inbound" + access_from: "Anywhere (0.0.0.0/0)" + access_type: "public" + common_name: "" + from_port: 80 + protocol: tcp + to_port: 80 + - access_direction: "inbound" + access_from: "Anywhere (0.0.0.0/0)" + access_type: "public" + common_name: "" + from_port: 22 + protocol: tcp + to_port: 22 + private_ip_address: "172.26.8.14" + public_ip_address: "34.207.152.202" + resource_type: "Instance" + ssh_key_name: "keypair" + state: + code: 16 + name: running + support_code: "588307843083/i-0997c97831ee21e33" + username: "ubuntu" +''' + +import time + +try: + import botocore +except ImportError: + # will be caught by AnsibleAWSModule + pass + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +def find_instance_info(module, client, instance_name, fail_if_not_found=False): + + try: + res = client.get_instance(instanceName=instance_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'NotFoundException' and not fail_if_not_found: + return None + module.fail_json_aws(e) + return res['instance'] + + +def wait_for_instance_state(module, client, instance_name, states): + """ + `states` is a list of instance states that we are waiting for. + """ + + wait_timeout = module.params.get('wait_timeout') + wait_max = time.time() + wait_timeout + while wait_max > time.time(): + try: + instance = find_instance_info(module, client, instance_name) + if instance['state']['name'] in states: + break + time.sleep(5) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + else: + module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -' + ' {1}'.format(instance_name, states)) + + +def create_instance(module, client, instance_name): + + inst = find_instance_info(module, client, instance_name) + if inst: + module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst)) + else: + create_params = {'instanceNames': [instance_name], + 'availabilityZone': module.params.get('zone'), + 'blueprintId': module.params.get('blueprint_id'), + 'bundleId': module.params.get('bundle_id'), + 'userData': module.params.get('user_data')} + + key_pair_name = module.params.get('key_pair_name') + if key_pair_name: + create_params['keyPairName'] = key_pair_name + + try: + client.create_instances(**create_params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + wait = module.params.get('wait') + if wait: + desired_states = ['running'] + wait_for_instance_state(module, client, instance_name, desired_states) + inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + + module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst)) + + +def delete_instance(module, client, instance_name): + + changed = False + + inst = find_instance_info(module, client, instance_name) + if inst is None: + module.exit_json(changed=changed, instance={}) + + # Wait for instance to exit transition state before deleting + desired_states = ['running', 'stopped'] + wait_for_instance_state(module, client, instance_name, desired_states) + + try: + client.delete_instance(instanceName=instance_name) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst)) + + +def restart_instance(module, client, instance_name): + """ + Reboot an existing instance + Wait will not apply here as this is an OS-level operation + """ + + changed = False + + inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + + try: + client.reboot_instance(instanceName=instance_name) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst)) + + +def start_or_stop_instance(module, client, instance_name, state): + """ + Start or stop an existing instance + """ + + changed = False + + inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + + # Wait for instance to exit transition state before state change + desired_states = ['running', 'stopped'] + wait_for_instance_state(module, client, instance_name, desired_states) + + # Try state change + if inst and inst['state']['name'] != state: + try: + if state == 'running': + client.start_instance(instanceName=instance_name) + else: + client.stop_instance(instanceName=instance_name) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + changed = True + # Grab current instance info + inst = find_instance_info(module, client, instance_name) + + wait = module.params.get('wait') + if wait: + desired_states = [state] + wait_for_instance_state(module, client, instance_name, desired_states) + inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + + module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst)) + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted', + 'rebooted']), + zone=dict(type='str'), + blueprint_id=dict(type='str'), + bundle_id=dict(type='str'), + key_pair_name=dict(type='str'), + user_data=dict(type='str', default=''), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=300, type='int'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]]) + + client = module.client('lightsail') + + name = module.params.get('name') + state = module.params.get('state') + + if state == 'present': + create_instance(module, client, name) + elif state == 'absent': + delete_instance(module, client, name) + elif state in ('running', 'stopped'): + start_or_stop_instance(module, client, name, state) + elif state in ('restarted', 'rebooted'): + restart_instance(module, client, name) + + +if __name__ == '__main__': + main() diff --git a/rds.py b/rds.py new file mode 100644 index 00000000000..f3eb28cdf47 --- /dev/null +++ b/rds.py @@ -0,0 +1,1400 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rds +short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts +description: + - Creates, deletes, or modifies rds resources. + - When creating an instance it can be either a new instance or a read-only replica of an existing instance. + - This module has a dependency on python-boto >= 2.5 and will soon be deprecated. + - The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0). + - Please use boto3 based M(rds_instance) instead. +options: + command: + description: + - Specifies the action to take. The 'reboot' option is available starting at version 2.0. + required: true + choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] + type: str + instance_name: + description: + - Database instance identifier. + - Required except when using I(command=facts) or I(command=delete) on just a snapshot. + type: str + source_instance: + description: + - Name of the database to replicate. + - Used only when I(command=replicate). + type: str + db_engine: + description: + - The type of database. + - Used only when I(command=create). + - mariadb was added in version 2.2. + choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', + 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'] + type: str + size: + description: + - Size in gigabytes of the initial storage for the DB instance. + - Used only when I(command=create) or I(command=modify). + type: str + instance_type: + description: + - The instance type of the database. + - If not specified then the replica inherits the same instance type as the source instance. + - Required when I(command=create). + - Optional when I(command=replicate), I(command=modify) or I(command=restore). + aliases: ['type'] + type: str + username: + description: + - Master database username. + - Used only when I(command=create). + type: str + password: + description: + - Password for the master database username. + - Used only when I(command=create) or I(command=modify). + type: str + db_name: + description: + - Name of a database to create within the instance. + - If not specified then no database is created. + - Used only when I(command=create). + type: str + engine_version: + description: + - Version number of the database engine to use. + - If not specified then the current Amazon RDS default engine version is used + - Used only when I(command=create). + type: str + parameter_group: + description: + - Name of the DB parameter group to associate with this instance. + - If omitted then the RDS default DBParameterGroup will be used. + - Used only when I(command=create) or I(command=modify). + type: str + license_model: + description: + - The license model for this DB instance. + - Used only when I(command=create) or I(command=restore). + choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] + type: str + multi_zone: + description: + - Specifies if this is a Multi-availability-zone deployment. + - Can not be used in conjunction with I(zone) parameter. + - Used only when I(command=create) or I(command=modify). + type: bool + iops: + description: + - Specifies the number of IOPS for the instance. + - Used only when I(command=create) or I(command=modify). + - Must be an integer greater than 1000. + type: str + security_groups: + description: + - Comma separated list of one or more security groups. + - Used only when I(command=create) or I(command=modify). + type: str + vpc_security_groups: + description: + - Comma separated list of one or more vpc security group ids. + - Also requires I(subnet) to be specified. + - Used only when I(command=create) or I(command=modify). + type: list + elements: str + port: + description: + - Port number that the DB instance uses for connections. + - Used only when I(command=create) or I(command=replicate). + - 'Defaults to the standard ports for each I(db_engine): C(3306) for MySQL and MariaDB, C(1521) for Oracle + C(1433) for SQL Server, C(5432) for PostgreSQL.' + type: int + upgrade: + description: + - Indicates that minor version upgrades should be applied automatically. + - Used only when I(command=create) or I(command=modify) or I(command=restore) or I(command=replicate). + type: bool + default: false + option_group: + description: + - The name of the option group to use. + - If not specified then the default option group is used. + - Used only when I(command=create). + type: str + maint_window: + description: + - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))' + - Times are specified in UTC. + - If not specified then a random maintenance window is assigned. + - Used only when I(command=create) or I(command=modify). + type: str + backup_window: + description: + - 'Backup window in format of C(hh24:mi-hh24:mi). (Example: C(18:00-20:30))' + - Times are specified in UTC. + - If not specified then a random backup window is assigned. + - Used only when command=create or command=modify. + type: str + backup_retention: + description: + - Number of days backups are retained. + - Set to 0 to disable backups. + - Default is 1 day. + - 'Valid range: 0-35.' + - Used only when I(command=create) or I(command=modify). + type: str + zone: + description: + - availability zone in which to launch the instance. + - Used only when I(command=create), I(command=replicate) or I(command=restore). + - Can not be used in conjunction with I(multi_zone) parameter. + aliases: ['aws_zone', 'ec2_zone'] + type: str + subnet: + description: + - VPC subnet group. + - If specified then a VPC instance is created. + - Used only when I(command=create). + type: str + snapshot: + description: + - Name of snapshot to take. + - When I(command=delete), if no I(snapshot) name is provided then no snapshot is taken. + - When I(command=delete), if no I(instance_name) is provided the snapshot is deleted. + - Used with I(command=facts), I(command=delete) or I(command=snapshot). + type: str + wait: + description: + - When I(command=create), replicate, modify or restore then wait for the database to enter the 'available' state. + - When I(command=delete), wait for the database to be terminated. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + - Used when I(wait=true). + default: 300 + type: int + apply_immediately: + description: + - When I(apply_immediately=trye), the modifications will be applied as soon as possible rather than waiting for the + next preferred maintenance window. + - Used only when I(command=modify). + type: bool + default: false + force_failover: + description: + - If enabled, the reboot is done using a MultiAZ failover. + - Used only when I(command=reboot). + type: bool + default: false + new_instance_name: + description: + - Name to rename an instance to. + - Used only when I(command=modify). + type: str + character_set_name: + description: + - Associate the DB instance with a specified character set. + - Used with I(command=create). + type: str + publicly_accessible: + description: + - Explicitly set whether the resource should be publicly accessible or not. + - Used with I(command=create), I(command=replicate). + - Requires boto >= 2.26.0 + type: str + tags: + description: + - tags dict to apply to a resource. + - Used with I(command=create), I(command=replicate), I(command=restore). + - Requires boto >= 2.26.0 + type: dict +requirements: + - "python >= 2.6" + - "boto" +author: + - "Bruce Pennypacker (@bpennypacker)" + - "Will Thames (@willthames)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD + +EXAMPLES = ''' +# Basic mysql provisioning example +- rds: + command: create + instance_name: new-database + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: mysql_admin + password: 1nsecure + tags: + Environment: testing + Application: cms + +# Create a read-only replica and wait for it to become available +- rds: + command: replicate + instance_name: new-database-replica + source_instance: new_database + wait: yes + wait_timeout: 600 + +# Delete an instance, but create a snapshot before doing so +- rds: + command: delete + instance_name: new-database + snapshot: new_database_snapshot + +# Get facts about an instance +- rds: + command: facts + instance_name: new-database + register: new_database_facts + +# Rename an instance and wait for the change to take effect +- rds: + command: modify + instance_name: new-database + new_instance_name: renamed-database + wait: yes + +# Reboot an instance and wait for it to become available again +- rds: + command: reboot + instance_name: database + wait: yes + +# Restore a Postgres db instance from a snapshot, wait for it to become available again, and +# then modify it to add your security group. Also, display the new endpoint. +# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI +- local_action: + module: rds + command: restore + snapshot: mypostgres-snapshot + instance_name: MyNewInstanceName + region: us-west-2 + zone: us-west-2b + subnet: default-vpc-xx441xxx + publicly_accessible: yes + wait: yes + wait_timeout: 600 + tags: + Name: pg1_test_name_tag + register: rds + +- local_action: + module: rds + command: modify + instance_name: MyNewInstanceName + region: us-west-2 + vpc_security_groups: sg-xxx945xx + +- debug: + msg: "The new db endpoint is {{ rds.instance.endpoint }}" +''' + +RETURN = ''' +instance: + description: the rds instance + returned: always + type: complex + contains: + engine: + description: the name of the database engine + returned: when RDS instance exists + type: str + sample: "oracle-se" + engine_version: + description: the version of the database engine + returned: when RDS instance exists + type: str + sample: "11.2.0.4.v6" + license_model: + description: the license model information + returned: when RDS instance exists + type: str + sample: "bring-your-own-license" + character_set_name: + description: the name of the character set that this instance is associated with + returned: when RDS instance exists + type: str + sample: "AL32UTF8" + allocated_storage: + description: the allocated storage size in gigabytes (GB) + returned: when RDS instance exists + type: str + sample: "100" + publicly_accessible: + description: the accessibility options for the DB instance + returned: when RDS instance exists + type: bool + sample: "true" + latest_restorable_time: + description: the latest time to which a database can be restored with point-in-time restore + returned: when RDS instance exists + type: str + sample: "1489707802.0" + secondary_availability_zone: + description: the name of the secondary AZ for a DB instance with multi-AZ support + returned: when RDS instance exists and is multy-AZ + type: str + sample: "eu-west-1b" + backup_window: + description: the daily time range during which automated backups are created if automated backups are enabled + returned: when RDS instance exists and automated backups are enabled + type: str + sample: "03:00-03:30" + auto_minor_version_upgrade: + description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window + returned: when RDS instance exists + type: bool + sample: "true" + read_replica_source_dbinstance_identifier: + description: the identifier of the source DB instance if this RDS instance is a read replica + returned: when read replica RDS instance exists + type: str + sample: "null" + db_name: + description: the name of the database to create when the DB instance is created + returned: when RDS instance exists + type: str + sample: "ASERTG" + endpoint: + description: the endpoint uri of the database instance + returned: when RDS instance exists + type: str + sample: "my-ansible-database.asdfaosdgih.us-east-1.rds.amazonaws.com" + port: + description: the listening port of the database instance + returned: when RDS instance exists + type: int + sample: 3306 + parameter_groups: + description: the list of DB parameter groups applied to this RDS instance + returned: when RDS instance exists and parameter groups are defined + type: complex + contains: + parameter_apply_status: + description: the status of parameter updates + returned: when RDS instance exists + type: str + sample: "in-sync" + parameter_group_name: + description: the name of the DP parameter group + returned: when RDS instance exists + type: str + sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz" + option_groups: + description: the list of option group memberships for this RDS instance + returned: when RDS instance exists + type: complex + contains: + option_group_name: + description: the option group name for this RDS instance + returned: when RDS instance exists + type: str + sample: "default:oracle-se-11-2" + status: + description: the status of the RDS instance's option group membership + returned: when RDS instance exists + type: str + sample: "in-sync" + pending_modified_values: + description: a dictionary of changes to the RDS instance that are pending + returned: when RDS instance exists + type: complex + contains: + db_instance_class: + description: the new DB instance class for this RDS instance that will be applied or is in progress + returned: when RDS instance exists + type: str + sample: "null" + db_instance_identifier: + description: the new DB instance identifier this RDS instance that will be applied or is in progress + returned: when RDS instance exists + type: str + sample: "null" + allocated_storage: + description: the new allocated storage size for this RDS instance that will be applied or is in progress + returned: when RDS instance exists + type: str + sample: "null" + backup_retention_period: + description: the pending number of days for which automated backups are retained + returned: when RDS instance exists + type: str + sample: "null" + engine_version: + description: indicates the database engine version + returned: when RDS instance exists + type: str + sample: "null" + iops: + description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied + returned: when RDS instance exists + type: str + sample: "null" + master_user_password: + description: the pending or in-progress change of the master credentials for this RDS instance + returned: when RDS instance exists + type: str + sample: "null" + multi_az: + description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment + returned: when RDS instance exists + type: str + sample: "null" + port: + description: specifies the pending port for this RDS instance + returned: when RDS instance exists + type: str + sample: "null" + db_subnet_groups: + description: information on the subnet group associated with this RDS instance + returned: when RDS instance exists + type: complex + contains: + description: + description: the subnet group associated with the DB instance + returned: when RDS instance exists + type: str + sample: "Subnets for the UAT RDS SQL DB Instance" + name: + description: the name of the DB subnet group + returned: when RDS instance exists + type: str + sample: "samplesubnetgrouprds-j6paiqkxqp4z" + status: + description: the status of the DB subnet group + returned: when RDS instance exists + type: str + sample: "complete" + subnets: + description: the description of the DB subnet group + returned: when RDS instance exists + type: complex + contains: + availability_zone: + description: subnet availability zone information + returned: when RDS instance exists + type: complex + contains: + name: + description: availability zone + returned: when RDS instance exists + type: str + sample: "eu-west-1b" + provisioned_iops_capable: + description: whether provisioned iops are available in AZ subnet + returned: when RDS instance exists + type: bool + sample: "false" + identifier: + description: the identifier of the subnet + returned: when RDS instance exists + type: str + sample: "subnet-3fdba63e" + status: + description: the status of the subnet + returned: when RDS instance exists + type: str + sample: "active" +''' + +import time + +try: + import boto.rds + import boto.exception +except ImportError: + pass # Taken care of by ec2.HAS_BOTO + +try: + import boto.rds2 + import boto.rds2.exceptions + HAS_RDS2 = True +except ImportError: + HAS_RDS2 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +DEFAULT_PORTS = { + 'aurora': 3306, + 'mariadb': 3306, + 'mysql': 3306, + 'oracle': 1521, + 'sqlserver': 1433, + 'postgres': 5432, +} + + +class RDSException(Exception): + def __init__(self, exc): + if hasattr(exc, 'error_message') and exc.error_message: + self.message = exc.error_message + self.code = exc.error_code + elif hasattr(exc, 'body') and 'Error' in exc.body: + self.message = exc.body['Error']['Message'] + self.code = exc.body['Error']['Code'] + else: + self.message = str(exc) + self.code = 'Unknown Error' + + +class RDSConnection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) + except boto.exception.BotoServerError as e: + module.fail_json(msg=e.error_message) + + def get_db_instance(self, instancename): + try: + return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) + except boto.exception.BotoServerError: + return None + + def get_db_snapshot(self, snapshotid): + try: + return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) + except boto.exception.BotoServerError: + return None + + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + params['engine'] = db_engine + try: + result = self.connection.create_dbinstance(instance_name, size, instance_class, + username, password, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + try: + result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_instance(self, instance_name, **params): + try: + result = self.connection.delete_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_snapshot(self, snapshot): + try: + result = self.connection.delete_dbsnapshot(snapshot) + return RDSSnapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def modify_db_instance(self, instance_name, **params): + try: + result = self.connection.modify_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_dbinstance(instance_name) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + try: + result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_snapshot(self, snapshot, instance_name, **params): + try: + result = self.connection.create_dbsnapshot(snapshot, instance_name) + return RDSSnapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def promote_read_replica(self, instance_name, **params): + try: + result = self.connection.promote_read_replica(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + +class RDS2Connection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) + except boto.exception.BotoServerError as e: + module.fail_json(msg=e.error_message) + + def get_db_instance(self, instancename): + try: + dbinstances = self.connection.describe_db_instances( + db_instance_identifier=instancename + )['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] + result = RDS2DBInstance(dbinstances[0]) + return result + except boto.rds2.exceptions.DBInstanceNotFound as e: + return None + except Exception as e: + raise e + + def get_db_snapshot(self, snapshotid): + try: + snapshots = self.connection.describe_db_snapshots( + db_snapshot_identifier=snapshotid, + snapshot_type='manual' + )['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] + result = RDS2Snapshot(snapshots[0]) + return result + except boto.rds2.exceptions.DBSnapshotNotFound: + return None + + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + try: + result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password, + **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + try: + result = self.connection.create_db_instance_read_replica( + instance_name, + source_instance, + **params + )['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_instance(self, instance_name, **params): + try: + result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def delete_db_snapshot(self, snapshot): + try: + result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def modify_db_instance(self, instance_name, **params): + try: + result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + try: + result = self.connection.restore_db_instance_from_db_snapshot( + instance_name, + snapshot, + **params + )['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def create_db_snapshot(self, snapshot, instance_name, **params): + try: + result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + def promote_read_replica(self, instance_name, **params): + try: + result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError as e: + raise RDSException(e) + + +class RDSDBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + self.name = dbinstance.id + self.status = dbinstance.status + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.instance.create_time, + 'status': self.status, + 'availability_zone': self.instance.availability_zone, + 'backup_retention': self.instance.backup_retention_period, + 'backup_window': self.instance.preferred_backup_window, + 'maintenance_window': self.instance.preferred_maintenance_window, + 'multi_zone': self.instance.multi_az, + 'instance_type': self.instance.instance_class, + 'username': self.instance.master_username, + 'iops': self.instance.iops + } + + # Only assign an Endpoint if one is available + if hasattr(self.instance, 'endpoint'): + d["endpoint"] = self.instance.endpoint[0] + d["port"] = self.instance.endpoint[1] + if self.instance.vpc_security_groups is not None: + d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups) + else: + d["vpc_security_groups"] = None + else: + d["endpoint"] = None + d["port"] = None + d["vpc_security_groups"] = None + d['DBName'] = self.instance.DBName if hasattr(self.instance, 'DBName') else None + # ReadReplicaSourceDBInstanceIdentifier may or may not exist + try: + d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier + except Exception: + d["replication_source"] = None + return d + + +class RDS2DBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + if 'DBInstanceIdentifier' not in dbinstance: + self.name = None + else: + self.name = self.instance.get('DBInstanceIdentifier') + self.status = self.instance.get('DBInstanceStatus') + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.instance['InstanceCreateTime'], + 'engine': self.instance['Engine'], + 'engine_version': self.instance['EngineVersion'], + 'license_model': self.instance['LicenseModel'], + 'character_set_name': self.instance['CharacterSetName'], + 'allocated_storage': self.instance['AllocatedStorage'], + 'publicly_accessible': self.instance['PubliclyAccessible'], + 'latest_restorable_time': self.instance['LatestRestorableTime'], + 'status': self.status, + 'availability_zone': self.instance['AvailabilityZone'], + 'secondary_availability_zone': self.instance['SecondaryAvailabilityZone'], + 'backup_retention': self.instance['BackupRetentionPeriod'], + 'backup_window': self.instance['PreferredBackupWindow'], + 'maintenance_window': self.instance['PreferredMaintenanceWindow'], + 'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'], + 'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'], + 'multi_zone': self.instance['MultiAZ'], + 'instance_type': self.instance['DBInstanceClass'], + 'username': self.instance['MasterUsername'], + 'db_name': self.instance['DBName'], + 'iops': self.instance['Iops'], + 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier'] + } + if self.instance['DBParameterGroups'] is not None: + parameter_groups = [] + for x in self.instance['DBParameterGroups']: + parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']}) + d['parameter_groups'] = parameter_groups + if self.instance['OptionGroupMemberships'] is not None: + option_groups = [] + for x in self.instance['OptionGroupMemberships']: + option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']}) + d['option_groups'] = option_groups + if self.instance['PendingModifiedValues'] is not None: + pdv = self.instance['PendingModifiedValues'] + d['pending_modified_values'] = { + 'multi_az': pdv['MultiAZ'], + 'master_user_password': pdv['MasterUserPassword'], + 'port': pdv['Port'], + 'iops': pdv['Iops'], + 'allocated_storage': pdv['AllocatedStorage'], + 'engine_version': pdv['EngineVersion'], + 'backup_retention_period': pdv['BackupRetentionPeriod'], + 'db_instance_class': pdv['DBInstanceClass'], + 'db_instance_identifier': pdv['DBInstanceIdentifier'] + } + if self.instance["DBSubnetGroup"] is not None: + dsg = self.instance["DBSubnetGroup"] + db_subnet_groups = {} + db_subnet_groups['vpc_id'] = dsg['VpcId'] + db_subnet_groups['name'] = dsg['DBSubnetGroupName'] + db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower() + db_subnet_groups['description'] = dsg['DBSubnetGroupDescription'] + db_subnet_groups['subnets'] = [] + for x in dsg["Subnets"]: + db_subnet_groups['subnets'].append({ + 'status': x['SubnetStatus'].lower(), + 'identifier': x['SubnetIdentifier'], + 'availability_zone': { + 'name': x['SubnetAvailabilityZone']['Name'], + 'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable'] + } + }) + d['db_subnet_groups'] = db_subnet_groups + if self.instance["VpcSecurityGroups"] is not None: + d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) + if "Endpoint" in self.instance and self.instance["Endpoint"] is not None: + d['endpoint'] = self.instance["Endpoint"].get('Address', None) + d['port'] = self.instance["Endpoint"].get('Port', None) + else: + d['endpoint'] = None + d['port'] = None + d['DBName'] = self.instance['DBName'] if hasattr(self.instance, 'DBName') else None + return d + + +class RDSSnapshot: + def __init__(self, snapshot): + self.snapshot = snapshot + self.name = snapshot.id + self.status = snapshot.status + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.snapshot.snapshot_create_time, + 'status': self.status, + 'availability_zone': self.snapshot.availability_zone, + 'instance_id': self.snapshot.instance_id, + 'instance_created': self.snapshot.instance_create_time, + } + # needs boto >= 2.21.0 + if hasattr(self.snapshot, 'snapshot_type'): + d["snapshot_type"] = self.snapshot.snapshot_type + if hasattr(self.snapshot, 'iops'): + d["iops"] = self.snapshot.iops + return d + + +class RDS2Snapshot: + def __init__(self, snapshot): + if 'DeleteDBSnapshotResponse' in snapshot: + self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + else: + self.snapshot = snapshot + self.name = self.snapshot.get('DBSnapshotIdentifier') + self.status = self.snapshot.get('Status') + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.snapshot['SnapshotCreateTime'], + 'status': self.status, + 'availability_zone': self.snapshot['AvailabilityZone'], + 'instance_id': self.snapshot['DBInstanceIdentifier'], + 'instance_created': self.snapshot['InstanceCreateTime'], + 'snapshot_type': self.snapshot['SnapshotType'], + 'iops': self.snapshot['Iops'], + } + return d + + +def await_resource(conn, resource, status, module): + start_time = time.time() + wait_timeout = module.params.get('wait_timeout') + start_time + check_interval = 5 + while wait_timeout > time.time() and resource.status != status: + time.sleep(check_interval) + if wait_timeout <= time.time(): + module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name) + if module.params.get('command') == 'snapshot': + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) + # Back off if we're getting throttled, since we're just waiting anyway + resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) + else: + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) + # Back off if we're getting throttled, since we're just waiting anyway + resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) + if resource is None: + break + # Some RDS resources take much longer than others to be ready. Check + # less aggressively for slow ones to avoid throttling. + if time.time() > start_time + 90: + check_interval = 20 + return resource + + +def create_db_instance(module, conn): + required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password'] + valid_vars = ['backup_retention', 'backup_window', + 'character_set_name', 'db_name', 'engine_version', + 'instance_type', 'iops', 'license_model', 'maint_window', + 'multi_zone', 'option_group', 'parameter_group', 'port', + 'subnet', 'upgrade', 'zone'] + if module.params.get('subnet'): + valid_vars.append('vpc_security_groups') + else: + valid_vars.append('security_groups') + if HAS_RDS2: + valid_vars.extend(['publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance(instance_name, module.params.get('size'), + module.params.get('instance_type'), module.params.get('db_engine'), + module.params.get('username'), module.params.get('password'), **params) + changed = True + except RDSException as e: + module.fail_json(msg="Failed to create instance: %s" % e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def replicate_db_instance(module, conn): + required_vars = ['instance_name', 'source_instance'] + valid_vars = ['instance_type', 'port', 'upgrade', 'zone'] + if HAS_RDS2: + valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + source_instance = module.params.get('source_instance') + + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) + changed = True + except RDSException as e: + module.fail_json(msg="Failed to create replica instance: %s " % e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def delete_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if not instance_name: + result = conn.get_db_snapshot(snapshot) + else: + result = conn.get_db_instance(instance_name) + if not result: + module.exit_json(changed=False) + if result.status == 'deleting': + module.exit_json(changed=False) + try: + if instance_name: + if snapshot: + params["skip_final_snapshot"] = False + if HAS_RDS2: + params["final_db_snapshot_identifier"] = snapshot + else: + params["final_snapshot_id"] = snapshot + else: + params["skip_final_snapshot"] = True + result = conn.delete_db_instance(instance_name, **params) + else: + result = conn.delete_db_snapshot(snapshot) + except RDSException as e: + module.fail_json(msg="Failed to delete instance: %s" % e.message) + + # If we're not waiting for a delete to complete then we're all done + # so just return + if not module.params.get('wait'): + module.exit_json(changed=True) + try: + await_resource(conn, result, 'deleted', module) + module.exit_json(changed=True) + except RDSException as e: + if e.code == 'DBInstanceNotFound': + module.exit_json(changed=True) + else: + module.fail_json(msg=e.message) + except Exception as e: + module.fail_json(msg=str(e)) + + +def facts_db_instance_or_snapshot(module, conn): + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if instance_name and snapshot: + module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both") + if instance_name: + resource = conn.get_db_instance(instance_name) + if not resource: + module.fail_json(msg="DB instance %s does not exist" % instance_name) + if snapshot: + resource = conn.get_db_snapshot(snapshot) + if not resource: + module.fail_json(msg="DB snapshot %s does not exist" % snapshot) + + module.exit_json(changed=False, instance=resource.get_data()) + + +def modify_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['apply_immediately', 'backup_retention', 'backup_window', + 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', + 'maint_window', 'multi_zone', 'new_instance_name', + 'option_group', 'parameter_group', 'password', 'size', 'upgrade'] + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + new_instance_name = module.params.get('new_instance_name') + + try: + result = conn.modify_db_instance(instance_name, **params) + except RDSException as e: + module.fail_json(msg=e.message) + if params.get('apply_immediately'): + if new_instance_name: + # Wait until the new instance name is valid + new_instance = None + while not new_instance: + new_instance = conn.get_db_instance(new_instance_name) + time.sleep(5) + + # Found instance but it briefly flicks to available + # before rebooting so let's wait until we see it rebooting + # before we check whether to 'wait' + result = await_resource(conn, new_instance, 'rebooting', module) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + # guess that this changed the DB, need a way to check + module.exit_json(changed=True, instance=resource.get_data()) + + +def promote_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['backup_retention', 'backup_window'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if not result: + module.fail_json(msg="DB Instance %s does not exist" % instance_name) + + if result.get_data().get('replication_source'): + try: + result = conn.promote_read_replica(instance_name, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + else: + changed = False + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def snapshot_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['tags'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + changed = False + result = conn.get_db_snapshot(snapshot) + if not result: + try: + result = conn.create_db_snapshot(snapshot, instance_name, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_snapshot(snapshot) + + module.exit_json(changed=changed, snapshot=resource.get_data()) + + +def reboot_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = [] + + if HAS_RDS2: + valid_vars.append('force_failover') + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + result = conn.get_db_instance(instance_name) + changed = False + try: + result = conn.reboot_db_instance(instance_name, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def restore_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', + 'option_group', 'port', 'publicly_accessible', + 'subnet', 'tags', 'upgrade', 'zone'] + if HAS_RDS2: + valid_vars.append('instance_type') + else: + required_vars.append('instance_type') + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + instance_type = module.params.get('instance_type') + snapshot = module.params.get('snapshot') + + changed = False + result = conn.get_db_instance(instance_name) + if not result: + try: + result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) + changed = True + except RDSException as e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def validate_parameters(required_vars, valid_vars, module): + command = module.params.get('command') + for v in required_vars: + if not module.params.get(v): + module.fail_json(msg="Parameter %s required for %s command" % (v, command)) + + # map to convert rds module options to boto rds and rds2 options + optional_params = { + 'port': 'port', + 'db_name': 'db_name', + 'zone': 'availability_zone', + 'maint_window': 'preferred_maintenance_window', + 'backup_window': 'preferred_backup_window', + 'backup_retention': 'backup_retention_period', + 'multi_zone': 'multi_az', + 'engine_version': 'engine_version', + 'upgrade': 'auto_minor_version_upgrade', + 'subnet': 'db_subnet_group_name', + 'license_model': 'license_model', + 'option_group': 'option_group_name', + 'size': 'allocated_storage', + 'iops': 'iops', + 'new_instance_name': 'new_instance_id', + 'apply_immediately': 'apply_immediately', + } + # map to convert rds module options to boto rds options + optional_params_rds = { + 'db_engine': 'engine', + 'password': 'master_password', + 'parameter_group': 'param_group', + 'instance_type': 'instance_class', + } + # map to convert rds module options to boto rds2 options + optional_params_rds2 = { + 'tags': 'tags', + 'publicly_accessible': 'publicly_accessible', + 'parameter_group': 'db_parameter_group_name', + 'character_set_name': 'character_set_name', + 'instance_type': 'db_instance_class', + 'password': 'master_user_password', + 'new_instance_name': 'new_db_instance_identifier', + 'force_failover': 'force_failover', + } + if HAS_RDS2: + optional_params.update(optional_params_rds2) + sec_group = 'db_security_groups' + else: + optional_params.update(optional_params_rds) + sec_group = 'security_groups' + # Check for options only supported with rds2 + for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()): + if module.params.get(k): + module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k) + + params = {} + for (k, v) in optional_params.items(): + if module.params.get(k) is not None and k not in required_vars: + if k in valid_vars: + params[v] = module.params[k] + else: + if module.params.get(k) is False: + pass + else: + module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command)) + + if module.params.get('security_groups'): + params[sec_group] = module.params.get('security_groups').split(',') + + vpc_groups = module.params.get('vpc_security_groups') + if vpc_groups: + if HAS_RDS2: + params['vpc_security_group_ids'] = vpc_groups + else: + groups_list = [] + for x in vpc_groups: + groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) + params['vpc_security_groups'] = groups_list + + # Convert tags dict to list of tuples that rds2 expects + if 'tags' in params: + params['tags'] = module.params['tags'].items() + return params + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), + instance_name=dict(required=False), + source_instance=dict(required=False), + db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', + 'sqlserver-web', 'postgres', 'aurora'], required=False), + size=dict(required=False), + instance_type=dict(aliases=['type'], required=False), + username=dict(required=False), + password=dict(no_log=True, required=False), + db_name=dict(required=False), + engine_version=dict(required=False), + parameter_group=dict(required=False), + license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False), + multi_zone=dict(type='bool', required=False), + iops=dict(required=False), + security_groups=dict(required=False), + vpc_security_groups=dict(type='list', required=False), + port=dict(required=False, type='int'), + upgrade=dict(type='bool', default=False), + option_group=dict(required=False), + maint_window=dict(required=False), + backup_window=dict(required=False), + backup_retention=dict(required=False), + zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False), + subnet=dict(required=False), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + snapshot=dict(required=False), + apply_immediately=dict(type='bool', default=False), + new_instance_name=dict(required=False), + tags=dict(type='dict', required=False), + publicly_accessible=dict(required=False), + character_set_name=dict(required=False), + force_failover=dict(type='bool', required=False, default=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + invocations = { + 'create': create_db_instance, + 'replicate': replicate_db_instance, + 'delete': delete_db_instance_or_snapshot, + 'facts': facts_db_instance_or_snapshot, + 'modify': modify_db_instance, + 'promote': promote_db_instance, + 'snapshot': snapshot_db_instance, + 'reboot': reboot_db_instance, + 'restore': restore_db_instance, + } + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + # set port to per db defaults if not specified + if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create': + if '-' in module.params['db_engine']: + engine = module.params['db_engine'].split('-')[0] + else: + engine = module.params['db_engine'] + module.params['port'] = DEFAULT_PORTS[engine.lower()] + + # connect to the rds endpoint + if HAS_RDS2: + conn = RDS2Connection(module, region, **aws_connect_params) + else: + conn = RDSConnection(module, region, **aws_connect_params) + + invocations[module.params.get('command')](module, conn) + + +if __name__ == '__main__': + main() diff --git a/rds_instance.py b/rds_instance.py new file mode 100644 index 00000000000..7fa4b8f171d --- /dev/null +++ b/rds_instance.py @@ -0,0 +1,1225 @@ +#!/usr/bin/python +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: rds_instance +short_description: Manage RDS instances +description: + - Create, modify, and delete RDS instances. + +requirements: + - botocore + - boto3 >= 1.5.0 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: + - Sloane Hertel (@s-hertel) + +options: + # General module options + state: + description: + - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state + and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state, + (running if creating the DB instance). + - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance + is not idempotent. + choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'] + default: 'present' + type: str + creation_source: + description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot). + choices: ['snapshot', 's3', 'instance'] + type: str + force_update_password: + description: + - Set to True to update your cluster password with I(master_user_password). Since comparing passwords to determine + if it needs to be updated is not possible this is set to False by default to allow idempotence. + type: bool + default: False + purge_cloudwatch_logs_exports: + description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. + type: bool + default: True + purge_tags: + description: Set to False to retain any tags that aren't specified in task and are associated with the instance. + type: bool + default: True + read_replica: + description: + - Set to False to promote a read replica cluster or true to create one. When creating a read replica C(creation_source) should + be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. + type: bool + wait: + description: + - Whether to wait for the cluster to be available, stopped, or deleted. At a later time a wait_timeout option may be added. + Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches + the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the + instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). + If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications. + type: bool + default: True + + # Options that have a corresponding boto3 parameter + allocated_storage: + description: + - The amount of storage (in gibibytes) to allocate for the DB instance. + type: int + allow_major_version_upgrade: + description: + - Whether to allow major version upgrades. + type: bool + apply_immediately: + description: + - A value that specifies whether modifying a cluster with I(new_db_instance_identifier) and I(master_user_password) + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes + are applied during the next maintenance window. + type: bool + default: False + auto_minor_version_upgrade: + description: + - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window. + type: bool + availability_zone: + description: + - A list of EC2 Availability Zones that instances in the DB cluster can be created in. + May be used when creating a cluster or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az). + aliases: + - az + - zone + type: str + backup_retention_period: + description: + - The number of days for which automated backups are retained (must be greater or equal to 1). + May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. + type: int + ca_certificate_identifier: + description: + - The identifier of the CA certificate for the DB instance. + type: str + character_set_name: + description: + - The character set to associate with the DB cluster. + type: str + copy_tags_to_snapshot: + description: + - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating + a DB instance the RDS API defaults this to false if unspecified. + type: bool + db_cluster_identifier: + description: + - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to + 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or + contain consecutive hyphens. + aliases: + - cluster_id + type: str + db_instance_class: + description: + - The compute and memory capacity of the DB instance, for example db.t2.micro. + aliases: + - class + - instance_type + type: str + db_instance_identifier: + description: + - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or + hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. + aliases: + - instance_id + - id + required: True + type: str + db_name: + description: + - The name for your database. If a name is not provided Amazon RDS will not create a database. + type: str + db_parameter_group_name: + description: + - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this + argument is omitted the default DBParameterGroup for the specified engine is used. + type: str + db_security_groups: + description: + - (EC2-Classic platform) A list of DB security groups to associate with this DB instance. + type: list + db_snapshot_identifier: + description: + - The identifier for the DB snapshot to restore from if using I(creation_source=snapshot). + type: str + db_subnet_group_name: + description: + - The DB subnet group name to use for the DB instance. + aliases: + - subnet_group + type: str + domain: + description: + - The Active Directory Domain to restore the instance in. + type: str + domain_iam_role_name: + description: + - The name of the IAM role to be used when making API calls to the Directory Service. + type: str + enable_cloudwatch_logs_exports: + description: + - A list of log types that need to be enabled for exporting to CloudWatch Logs. + aliases: + - cloudwatch_log_exports + type: list + enable_iam_database_authentication: + description: + - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. + If this option is omitted when creating the cluster, Amazon RDS sets this to False. + type: bool + enable_performance_insights: + description: + - Whether to enable Performance Insights for the DB instance. + type: bool + engine: + description: + - The name of the database engine to be used for this DB instance. This is required to create an instance. + Valid choices are aurora | aurora-mysql | aurora-postgresql | mariadb | mysql | oracle-ee | oracle-se | + oracle-se1 | oracle-se2 | postgres | sqlserver-ee | sqlserver-ex | sqlserver-se | sqlserver-web + type: str + engine_version: + description: + - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12. + Aurora PostgreSQL example, 9.6.3 + type: str + final_db_snapshot_identifier: + description: + - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false. + aliases: + - final_snapshot_identifier + type: str + force_failover: + description: + - Set to true to conduct the reboot through a MultiAZ failover. + type: bool + iops: + description: + - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1. + type: int + kms_key_id: + description: + - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the + same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key + alias instead of the ARN for the KM encryption key. + - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used. + type: str + license_model: + description: + - The license model for the DB instance. + - Several options are license-included, bring-your-own-license, and general-public-license. + - This option can also be omitted to default to an accepted value. + type: str + master_user_password: + description: + - An 8-41 character password for the master database user. The password can contain any printable ASCII character + except "/", """, or "@". To modify the password use I(force_password_update). Use I(apply immediately) to change + the password immediately, otherwise it is updated during the next maintenance window. + aliases: + - password + type: str + master_username: + description: + - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter. + aliases: + - username + type: str + max_allocated_storage: + description: + - The upper limit to which Amazon RDS can automatically scale the storage of the DB instance. + type: int + monitoring_interval: + description: + - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting + metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. + type: int + monitoring_role_arn: + description: + - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. + type: str + multi_az: + description: + - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone). + type: bool + new_db_instance_identifier: + description: + - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB instance. The identifier must contain + from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or + contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the + next maintenance window. + aliases: + - new_instance_id + - new_id + type: str + option_group_name: + description: + - The option group to associate with the DB instance. + type: str + performance_insights_kms_key_id: + description: + - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data. + type: str + performance_insights_retention_period: + description: + - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731. + type: int + port: + description: + - The port number on which the instances accept connections. + type: int + preferred_backup_window: + description: + - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are + enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with + I(preferred_maintenance_window). + aliases: + - backup_window + type: str + preferred_maintenance_window: + description: + - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must + be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. + aliases: + - maintenance_window + type: str + processor_features: + description: + - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the + DB instance class of the DB instance. Names are threadsPerCore and coreCount. + Set this option to an empty dictionary to use the default processor features. + suboptions: + threadsPerCore: + description: The number of threads per core + coreCount: + description: The number of CPU cores + type: dict + promotion_tier: + description: + - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of + the existing primary instance. + type: str + publicly_accessible: + description: + - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with + a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal + instance with a DNS name that resolves to a private IP address. + type: bool + restore_time: + description: + - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. + For example, "2009-09-07T23:45:00Z". + - May alternatively set I(use_latest_restore_time=True). + - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. + type: str + s3_bucket_name: + description: + - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance. + type: str + s3_ingestion_role_arn: + description: + - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access + the Amazon S3 bucket on your behalf. + type: str + s3_prefix: + description: + - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not + specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket. + type: str + skip_final_snapshot: + description: + - Whether a final DB cluster snapshot is created before the DB cluster is deleted. If this is false I(final_db_snapshot_identifier) + must be provided. + type: bool + default: false + snapshot_identifier: + description: + - The ARN of the DB snapshot to restore from when using I(creation_source=snapshot). + type: str + source_db_instance_identifier: + description: + - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time + DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN. + type: str + source_engine: + description: + - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. + choices: + - mysql + type: str + source_engine_version: + description: + - The version of the database that the backup files were created from. + type: str + source_region: + description: + - The region of the DB instance from which the replica is created. + type: str + storage_encrypted: + description: + - Whether the DB instance is encrypted. + type: bool + storage_type: + description: + - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances. + choices: + - standard + - gp2 + - io1 + type: str + tags: + description: + - A dictionary of key value pairs to assign the DB cluster. + type: dict + tde_credential_arn: + description: + - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is + supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted) + though it might slightly affect the performance of your database. + aliases: + - transparent_data_encryption_arn + type: str + tde_credential_password: + description: + - The password for the given ARN from the key store in order to access the device. + aliases: + - transparent_data_encryption_password + type: str + timezone: + description: + - The time zone of the DB instance. + type: str + use_latest_restorable_time: + description: + - Whether to restore the DB instance to the latest restorable backup time. + - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. + type: bool + aliases: + - restore_from_latest + vpc_security_group_ids: + description: + - A list of EC2 VPC security groups to associate with the DB cluster. + type: list +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: create minimal aurora instance in default VPC and default subnet group + rds_instance: + engine: aurora + db_instance_identifier: ansible-test-aurora-db-instance + instance_type: db.t2.small + password: "{{ password }}" + username: "{{ username }}" + cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it + +- name: Create a DB instance using the default AWS KMS encryption key + rds_instance: + id: test-encrypted-db + state: present + engine: mariadb + storage_encrypted: True + db_instance_class: db.t2.medium + username: "{{ username }}" + password: "{{ password }}" + allocated_storage: "{{ allocated_storage }}" + +- name: remove the DB instance without a final snapshot + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + +- name: remove the DB instance with a final snapshot + rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ snapshot_id }}" +''' + +RETURN = ''' +allocated_storage: + description: The allocated storage size in gibibytes. This is always 1 for aurora database engines. + returned: always + type: int + sample: 20 +auto_minor_version_upgrade: + description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. + returned: always + type: bool + sample: true +availability_zone: + description: The availability zone for the DB instance. + returned: always + type: str + sample: us-east-1f +backup_retention_period: + description: The number of days for which automated backups are retained. + returned: always + type: int + sample: 1 +ca_certificate_identifier: + description: The identifier of the CA certificate for the DB instance. + returned: always + type: str + sample: rds-ca-2015 +copy_tags_to_snapshot: + description: Whether tags are copied from the DB instance to snapshots of the DB instance. + returned: always + type: bool + sample: false +db_instance_arn: + description: The Amazon Resource Name (ARN) for the DB instance. + returned: always + type: str + sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test +db_instance_class: + description: The name of the compute and memory capacity class of the DB instance. + returned: always + type: str + sample: db.m4.large +db_instance_identifier: + description: The identifier of the DB instance + returned: always + type: str + sample: ansible-test +db_instance_port: + description: The port that the DB instance listens on. + returned: always + type: int + sample: 0 +db_instance_status: + description: The current state of this database. + returned: always + type: str + sample: stopped +db_parameter_groups: + description: The list of DB parameter groups applied to this DB instance. + returned: always + type: complex + contains: + db_parameter_group_name: + description: The name of the DP parameter group. + returned: always + type: str + sample: default.mariadb10.0 + parameter_apply_status: + description: The status of parameter updates. + returned: always + type: str + sample: in-sync +db_security_groups: + description: A list of DB security groups associated with this DB instance. + returned: always + type: list + sample: [] +db_subnet_group: + description: The subnet group associated with the DB instance. + returned: always + type: complex + contains: + db_subnet_group_description: + description: The description of the DB subnet group. + returned: always + type: str + sample: default + db_subnet_group_name: + description: The name of the DB subnet group. + returned: always + type: str + sample: default + subnet_group_status: + description: The status of the DB subnet group. + returned: always + type: str + sample: Complete + subnets: + description: A list of Subnet elements. + returned: always + type: complex + contains: + subnet_availability_zone: + description: The availability zone of the subnet. + returned: always + type: complex + contains: + name: + description: The name of the Availability Zone. + returned: always + type: str + sample: us-east-1c + subnet_identifier: + description: The ID of the subnet. + returned: always + type: str + sample: subnet-12345678 + subnet_status: + description: The status of the subnet. + returned: always + type: str + sample: Active + vpc_id: + description: The VpcId of the DB subnet group. + returned: always + type: str + sample: vpc-12345678 +dbi_resource_id: + description: The AWS Region-unique, immutable identifier for the DB instance. + returned: always + type: str + sample: db-UHV3QRNWX4KB6GALCIGRML6QFA +domain_memberships: + description: The Active Directory Domain membership records associated with the DB instance. + returned: always + type: list + sample: [] +endpoint: + description: The connection endpoint. + returned: always + type: complex + contains: + address: + description: The DNS address of the DB instance. + returned: always + type: str + sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com + hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + returned: always + type: str + sample: ZTR2ITUGPA61AM + port: + description: The port that the database engine is listening on. + returned: always + type: int + sample: 3306 +engine: + description: The database engine version. + returned: always + type: str + sample: mariadb +engine_version: + description: The database engine version. + returned: always + type: str + sample: 10.0.35 +iam_database_authentication_enabled: + description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. + returned: always + type: bool + sample: false +instance_create_time: + description: The date and time the DB instance was created. + returned: always + type: str + sample: '2018-07-04T16:48:35.332000+00:00' +kms_key_id: + description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true. + returned: When storage_encrypted is true + type: str + sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33 +latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + returned: always + type: str + sample: '2018-07-04T16:50:50.642000+00:00' +license_model: + description: The License model information for this DB instance. + returned: always + type: str + sample: general-public-license +master_username: + description: The master username for the DB instance. + returned: always + type: str + sample: test +max_allocated_storage: + description: The upper limit to which Amazon RDS can automatically scale the storage of the DB instance. + returned: When max allocated storage is present. + type: int + sample: 100 +monitoring_interval: + description: + - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. + 0 means collecting Enhanced Monitoring metrics is disabled. + returned: always + type: int + sample: 0 +multi_az: + description: Whether the DB instance is a Multi-AZ deployment. + returned: always + type: bool + sample: false +option_group_memberships: + description: The list of option group memberships for this DB instance. + returned: always + type: complex + contains: + option_group_name: + description: The name of the option group that the instance belongs to. + returned: always + type: str + sample: default:mariadb-10-0 + status: + description: The status of the DB instance's option group membership. + returned: always + type: str + sample: in-sync +pending_modified_values: + description: The changes to the DB instance that are pending. + returned: always + type: complex + contains: {} +performance_insights_enabled: + description: True if Performance Insights is enabled for the DB instance, and otherwise false. + returned: always + type: bool + sample: false +preferred_backup_window: + description: The daily time range during which automated backups are created if automated backups are enabled. + returned: always + type: str + sample: 07:01-07:31 +preferred_maintenance_window: + description: The weekly time range (in UTC) during which system maintenance can occur. + returned: always + type: str + sample: sun:09:31-sun:10:01 +publicly_accessible: + description: + - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an + internal instance with a DNS name that resolves to a private IP address. + returned: always + type: bool + sample: true +read_replica_db_instance_identifiers: + description: Identifiers of the Read Replicas associated with this DB instance. + returned: always + type: list + sample: [] +storage_encrypted: + description: Whether the DB instance is encrypted. + returned: always + type: bool + sample: false +storage_type: + description: The storage type to be associated with the DB instance. + returned: always + type: str + sample: standard +tags: + description: A dictionary of tags associated with the DB instance. + returned: always + type: complex + contains: {} +vpc_security_groups: + description: A list of VPC security group elements that the DB instance belongs to. + returned: always + type: complex + contains: + status: + description: The status of the VPC security group. + returned: always + type: str + sample: active + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: str + sample: sg-12345678 +''' + +from ansible.module_utils._text import to_text +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.ansible.amazon.plugins.module_utils.aws.rds import ensure_tags, arg_spec_to_rds_params, call_method, get_rds_method_attribute, get_tags, get_final_identifier +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry +from ansible.module_utils.six import string_types + +from time import sleep + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_rds_method_attribute_name(instance, state, creation_source, read_replica): + method_name = None + if state == 'absent' or state == 'terminated': + if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']: + method_name = 'delete_db_instance' + else: + if instance: + method_name = 'modify_db_instance' + elif read_replica is True: + method_name = 'create_db_instance_read_replica' + elif creation_source == 'snapshot': + method_name = 'restore_db_instance_from_db_snapshot' + elif creation_source == 's3': + method_name = 'restore_db_instance_from_s3' + elif creation_source == 'instance': + method_name = 'restore_db_instance_to_point_in_time' + else: + method_name = 'create_db_instance' + return method_name + + +def get_instance(client, module, db_instance_id): + try: + for i in range(3): + try: + instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] + instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) + if instance.get('ProcessorFeatures'): + instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + instance['PendingModifiedValues']['ProcessorFeatures'] = dict( + (feature['Name'], feature['Value']) + for feature in instance['PendingModifiedValues']['ProcessorFeatures'] + ) + break + except is_boto3_error_code('DBInstanceNotFound'): + sleep(3) + else: + instance = {} + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe DB instances') + return instance + + +def get_final_snapshot(client, module, snapshot_identifier): + try: + snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) + if len(snapshots.get('DBSnapshots', [])) == 1: + return snapshots['DBSnapshots'][0] + return {} + except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True + return {} + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') + + +def get_parameters(client, module, parameters, method_name): + if method_name == 'restore_db_instance_to_point_in_time': + parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] + + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any([parameters.get(k) is None for k in required_options]): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + if parameters.get('ProcessorFeatures') is not None: + parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()] + + # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures) + if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': + parameters.pop('ProcessorFeatures') + + if method_name == 'create_db_instance' and parameters.get('Tags'): + parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if method_name == 'modify_db_instance': + parameters = get_options_with_changing_values(client, module, parameters) + + return parameters + + +def get_options_with_changing_values(client, module, parameters): + instance_id = module.params['db_instance_identifier'] + purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports'] + force_update_password = module.params['force_update_password'] + port = module.params['port'] + apply_immediately = parameters.pop('ApplyImmediately', None) + cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] + + if port: + parameters['DBPortNumber'] = port + if not force_update_password: + parameters.pop('MasterUserPassword', None) + if cloudwatch_logs_enabled: + parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled + if not module.params['storage_type']: + parameters.pop('Iops', None) + + instance = get_instance(client, module, instance_id) + updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs) + updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) + parameters = updated_parameters + + if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): + if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: + parameters.pop('NewDBInstanceIdentifier') + + if parameters: + parameters['DBInstanceIdentifier'] = instance_id + if apply_immediately is not None: + parameters['ApplyImmediately'] = apply_immediately + + return parameters + + +def get_current_attributes_with_inconsistent_keys(instance): + options = {} + if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []): + current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable'] + current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable'] + options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled} + else: + options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []} + if instance.get('PendingModifiedValues', {}).get('Port'): + options['DBPortNumber'] = instance['PendingModifiedValues']['Port'] + else: + options['DBPortNumber'] = instance['Endpoint']['Port'] + if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'): + options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName'] + else: + options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName'] + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures'] + else: + options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {}) + options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']] + options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] + options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] + options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] + options['AllowMajorVersionUpgrade'] = None + options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] + # PerformanceInsightsEnabled is not returned on older RDS instances it seems + options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False) + options['MasterUserPassword'] = None + options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] + + return options + + +def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs): + changing_params = {} + current_options = get_current_attributes_with_inconsistent_keys(instance) + + if current_options.get("MaxAllocatedStorage") is None: + current_options["MaxAllocatedStorage"] = None + + for option in current_options: + current_option = current_options[option] + desired_option = modify_params.pop(option, None) + if desired_option is None: + continue + + # TODO: allow other purge_option module parameters rather than just checking for things to add + if isinstance(current_option, list): + if isinstance(desired_option, list): + if set(desired_option) <= set(current_option): + continue + elif isinstance(desired_option, string_types): + if desired_option in current_option: + continue + + if current_option == desired_option: + continue + + if option == 'ProcessorFeatures' and desired_option == []: + changing_params['UseDefaultProcessorFeatures'] = True + elif option == 'CloudwatchLogsExportConfiguration': + current_option = set(current_option.get('LogTypesToEnable', [])) + desired_option = set(desired_option) + format_option = {'EnableLogTypes': [], 'DisableLogTypes': []} + format_option['EnableLogTypes'] = list(desired_option.difference(current_option)) + if purge_cloudwatch_logs: + format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) + if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: + changing_params[option] = format_option + else: + changing_params[option] = desired_option + + return changing_params + + +def get_changing_options_with_consistent_keys(modify_params, instance): + inconsistent_parameters = list(modify_params.keys()) + changing_params = {} + + for param in modify_params: + current_option = instance.get('PendingModifiedValues', {}).get(param) + if current_option is None: + current_option = instance[param] + if modify_params[param] != current_option: + changing_params[param] = modify_params[param] + + return changing_params + + +def validate_options(client, module, instance): + state = module.params['state'] + skip_final_snapshot = module.params['skip_final_snapshot'] + snapshot_id = module.params['final_db_snapshot_identifier'] + modified_id = module.params['new_db_instance_identifier'] + engine = module.params['engine'] + tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn']) + read_replica = module.params['read_replica'] + creation_source = module.params['creation_source'] + source_instance = module.params['source_db_instance_identifier'] + if module.params['source_region'] is not None: + same_region = bool(module.params['source_region'] == module.params['region']) + else: + same_region = True + + if modified_id: + modified_instance = get_instance(client, module, modified_id) + else: + modified_instance = {} + + if modified_id and instance and modified_instance: + module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id)) + if modified_id and not instance and modified_instance: + module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id)) + if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None: + module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier') + if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options: + module.fail_json(msg='TDE is available for MySQL and Oracle DB instances') + if read_replica is True and not instance and creation_source not in [None, 'instance']: + module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source)) + if read_replica is True and not instance and not source_instance: + module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier') + + +def update_instance(client, module, instance, instance_id): + changed = False + + # Get newly created DB instance + if not instance: + instance = get_instance(client, module, instance_id) + + # Check tagging/promoting/rebooting/starting/stopping instance + changed |= ensure_tags( + client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags'] + ) + changed |= promote_replication_instance(client, module, instance, module.params['read_replica']) + changed |= update_instance_state(client, module, instance, module.params['state']) + + return changed + + +def promote_replication_instance(client, module, instance, read_replica): + changed = False + if read_replica is False: + changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos')) + if changed: + try: + call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) + changed = True + except is_boto3_error_code('InvalidDBInstanceState') as e: + if 'DB Instance is not a read replica' in e.response['Error']['Message']: + pass + else: + raise e + return changed + + +def update_instance_state(client, module, instance, state): + changed = False + if state in ['rebooted', 'restarted']: + changed |= reboot_running_db_instance(client, module, instance) + if state in ['started', 'running', 'stopped']: + changed |= start_or_stop_instance(client, module, instance, state) + return changed + + +def reboot_running_db_instance(client, module, instance): + parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} + if instance['DBInstanceStatus'] in ['stopped', 'stopping']: + call_method(client, module, 'start_db_instance', parameters) + if module.params.get('force_failover') is not None: + parameters['ForceFailover'] = module.params['force_failover'] + results, changed = call_method(client, module, 'reboot_db_instance', parameters) + return changed + + +def start_or_stop_instance(client, module, instance, state): + changed = False + parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} + if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']: + if module.params['db_snapshot_identifier']: + parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] + result, changed = call_method(client, module, 'stop_db_instance', parameters) + elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']: + result, changed = call_method(client, module, 'start_db_instance', parameters) + return changed + + +def main(): + arg_spec = dict( + state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), + creation_source=dict(choices=['snapshot', 's3', 'instance']), + force_update_password=dict(type='bool', default=False), + purge_cloudwatch_logs_exports=dict(type='bool', default=True), + purge_tags=dict(type='bool', default=True), + read_replica=dict(type='bool'), + wait=dict(type='bool', default=True), + ) + + parameter_options = dict( + allocated_storage=dict(type='int'), + allow_major_version_upgrade=dict(type='bool'), + apply_immediately=dict(type='bool', default=False), + auto_minor_version_upgrade=dict(type='bool'), + availability_zone=dict(aliases=['az', 'zone']), + backup_retention_period=dict(type='int'), + ca_certificate_identifier=dict(), + character_set_name=dict(), + copy_tags_to_snapshot=dict(type='bool'), + db_cluster_identifier=dict(aliases=['cluster_id']), + db_instance_class=dict(aliases=['class', 'instance_type']), + db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), + db_name=dict(), + db_parameter_group_name=dict(), + db_security_groups=dict(type='list'), + db_snapshot_identifier=dict(), + db_subnet_group_name=dict(aliases=['subnet_group']), + domain=dict(), + domain_iam_role_name=dict(), + enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports']), + enable_iam_database_authentication=dict(type='bool'), + enable_performance_insights=dict(type='bool'), + engine=dict(), + engine_version=dict(), + final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), + force_failover=dict(type='bool'), + iops=dict(type='int'), + kms_key_id=dict(), + license_model=dict(), + master_user_password=dict(aliases=['password'], no_log=True), + master_username=dict(aliases=['username']), + max_allocated_storage=dict(type='int'), + monitoring_interval=dict(type='int'), + monitoring_role_arn=dict(), + multi_az=dict(type='bool'), + new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), + option_group_name=dict(), + performance_insights_kms_key_id=dict(), + performance_insights_retention_period=dict(type='int'), + port=dict(type='int'), + preferred_backup_window=dict(aliases=['backup_window']), + preferred_maintenance_window=dict(aliases=['maintenance_window']), + processor_features=dict(type='dict'), + promotion_tier=dict(), + publicly_accessible=dict(type='bool'), + restore_time=dict(), + s3_bucket_name=dict(), + s3_ingestion_role_arn=dict(), + s3_prefix=dict(), + skip_final_snapshot=dict(type='bool', default=False), + snapshot_identifier=dict(), + source_db_instance_identifier=dict(), + source_engine=dict(choices=['mysql']), + source_engine_version=dict(), + source_region=dict(), + storage_encrypted=dict(type='bool'), + storage_type=dict(choices=['standard', 'gp2', 'io1']), + tags=dict(type='dict'), + tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), + tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), + timezone=dict(), + use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), + vpc_security_group_ids=dict(type='list') + ) + arg_spec.update(parameter_options) + + required_if = [ + ('engine', 'aurora', ('db_cluster_identifier',)), + ('engine', 'aurora-mysql', ('db_cluster_identifier',)), + ('engine', 'aurora-postresql', ('db_cluster_identifier',)), + ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), + ('creation_source', 's3', ( + 's3_bucket_name', 'engine', 'master_username', 'master_user_password', + 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ] + mutually_exclusive = [ + ('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'), + ('use_latest_restorable_time', 'restore_time'), + ('availability_zone', 'multi_az'), + ] + + module = AnsibleAWSModule( + argument_spec=arg_spec, + required_if=required_if, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True + ) + + if not module.boto3_at_least('1.5.0'): + module.fail_json(msg="rds_instance requires boto3 > 1.5.0") + + # Sanitize instance identifiers + module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() + if module.params['new_db_instance_identifier']: + module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() + + # Sanitize processor features + if module.params['processor_features'] is not None: + module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) + + client = module.client('rds') + changed = False + state = module.params['state'] + instance_id = module.params['db_instance_identifier'] + instance = get_instance(client, module, instance_id) + validate_options(client, module, instance) + method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) + + if method_name: + raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + parameters = get_parameters(client, module, raw_parameters, method_name) + + if parameters: + result, changed = call_method(client, module, method_name, parameters) + + instance_id = get_final_identifier(method_name, module) + + # Check tagging/promoting/rebooting/starting/stopping instance + if state != 'absent' and (not module.check_mode or instance): + changed |= update_instance(client, module, instance, instance_id) + + if changed: + instance = get_instance(client, module, instance_id) + if state != 'absent' and (instance or not module.check_mode): + for attempt_to_wait in range(0, 10): + instance = get_instance(client, module, instance_id) + if instance: + break + else: + sleep(5) + + if state == 'absent' and changed and not module.params['skip_final_snapshot']: + instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) + + pending_processor_features = None + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') + instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) + if pending_processor_features is not None: + instance['pending_modified_values']['processor_features'] = pending_processor_features + + module.exit_json(changed=changed, **instance) + + +if __name__ == '__main__': + main() diff --git a/rds_instance_facts.py b/rds_instance_facts.py new file mode 120000 index 00000000000..f3dda867271 --- /dev/null +++ b/rds_instance_facts.py @@ -0,0 +1 @@ +rds_instance_info.py \ No newline at end of file diff --git a/rds_instance_info.py b/rds_instance_info.py new file mode 100644 index 00000000000..bbb8d8a08d0 --- /dev/null +++ b/rds_instance_info.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# Copyright (c) 2017, 2018 Michael De La Rue +# Copyright (c) 2017, 2018 Will Thames +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: rds_instance_info +short_description: obtain information about one or more RDS instances +description: + - Obtain information about one or more RDS instances. + - This module was called C(rds_instance_facts) before Ansible 2.9. The usage did not change. +options: + db_instance_identifier: + description: + - The RDS instance's unique identifier. + required: false + aliases: + - id + type: str + filters: + description: + - A filter that specifies one or more DB instances to describe. + See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) + type: dict +requirements: + - "python >= 2.7" + - "boto3" +author: + - "Will Thames (@willthames)" + - "Michael De La Rue (@mikedlr)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Get information about an instance +- rds_instance_info: + db_instance_identifier: new-database + register: new_database_info + +# Get all RDS instances +- rds_instance_info: +''' + +RETURN = ''' +instances: + description: List of RDS instances + returned: always + type: complex + contains: + allocated_storage: + description: Gigabytes of storage allocated to the database + returned: always + type: int + sample: 10 + auto_minor_version_upgrade: + description: Whether minor version upgrades happen automatically + returned: always + type: bool + sample: true + availability_zone: + description: Availability Zone in which the database resides + returned: always + type: str + sample: us-west-2b + backup_retention_period: + description: Days for which backups are retained + returned: always + type: int + sample: 7 + ca_certificate_identifier: + description: ID for the CA certificate + returned: always + type: str + sample: rds-ca-2015 + copy_tags_to_snapshot: + description: Whether DB tags should be copied to the snapshot + returned: always + type: bool + sample: false + db_instance_arn: + description: ARN of the database instance + returned: always + type: str + sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds + db_instance_class: + description: Instance class of the database instance + returned: always + type: str + sample: db.t2.small + db_instance_identifier: + description: Database instance identifier + returned: always + type: str + sample: helloworld-rds + db_instance_port: + description: Port used by the database instance + returned: always + type: int + sample: 0 + db_instance_status: + description: Status of the database instance + returned: always + type: str + sample: available + db_name: + description: Name of the database + returned: always + type: str + sample: management + db_parameter_groups: + description: List of database parameter groups + returned: always + type: complex + contains: + db_parameter_group_name: + description: Name of the database parameter group + returned: always + type: str + sample: psql-pg-helloworld + parameter_apply_status: + description: Whether the parameter group has been applied + returned: always + type: str + sample: in-sync + db_security_groups: + description: List of security groups used by the database instance + returned: always + type: list + sample: [] + db_subnet_group: + description: list of subnet groups + returned: always + type: complex + contains: + db_subnet_group_description: + description: Description of the DB subnet group + returned: always + type: str + sample: My database subnet group + db_subnet_group_name: + description: Name of the database subnet group + returned: always + type: str + sample: my-subnet-group + subnet_group_status: + description: Subnet group status + returned: always + type: str + sample: Complete + subnets: + description: List of subnets in the subnet group + returned: always + type: complex + contains: + subnet_availability_zone: + description: Availability zone of the subnet + returned: always + type: complex + contains: + name: + description: Name of the availability zone + returned: always + type: str + sample: us-west-2c + subnet_identifier: + description: Subnet ID + returned: always + type: str + sample: subnet-abcd1234 + subnet_status: + description: Subnet status + returned: always + type: str + sample: Active + vpc_id: + description: VPC id of the subnet group + returned: always + type: str + sample: vpc-abcd1234 + dbi_resource_id: + description: AWS Region-unique, immutable identifier for the DB instance + returned: always + type: str + sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA + domain_memberships: + description: List of domain memberships + returned: always + type: list + sample: [] + endpoint: + description: Database endpoint + returned: always + type: complex + contains: + address: + description: Database endpoint address + returned: always + type: str + sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com + hosted_zone_id: + description: Route53 hosted zone ID + returned: always + type: str + sample: Z1PABCD0000000 + port: + description: Database endpoint port + returned: always + type: int + sample: 5432 + engine: + description: Database engine + returned: always + type: str + sample: postgres + engine_version: + description: Database engine version + returned: always + type: str + sample: 9.5.10 + iam_database_authentication_enabled: + description: Whether database authentication through IAM is enabled + returned: always + type: bool + sample: false + instance_create_time: + description: Date and time the instance was created + returned: always + type: str + sample: '2017-10-10T04:00:07.434000+00:00' + kms_key_id: + description: KMS Key ID + returned: always + type: str + sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab + latest_restorable_time: + description: Latest time to which a database can be restored with point-in-time restore + returned: always + type: str + sample: '2018-05-17T00:03:56+00:00' + license_model: + description: License model + returned: always + type: str + sample: postgresql-license + master_username: + description: Database master username + returned: always + type: str + sample: dbadmin + monitoring_interval: + description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance + returned: always + type: int + sample: 0 + multi_az: + description: Whether Multi-AZ is on + returned: always + type: bool + sample: false + option_group_memberships: + description: List of option groups + returned: always + type: complex + contains: + option_group_name: + description: Option group name + returned: always + type: str + sample: default:postgres-9-5 + status: + description: Status of option group + returned: always + type: str + sample: in-sync + pending_modified_values: + description: Modified values pending application + returned: always + type: complex + contains: {} + performance_insights_enabled: + description: Whether performance insights are enabled + returned: always + type: bool + sample: false + preferred_backup_window: + description: Preferred backup window + returned: always + type: str + sample: 04:00-05:00 + preferred_maintenance_window: + description: Preferred maintenance window + returned: always + type: str + sample: mon:05:00-mon:05:30 + publicly_accessible: + description: Whether the DB is publicly accessible + returned: always + type: bool + sample: false + read_replica_db_instance_identifiers: + description: List of database instance read replicas + returned: always + type: list + sample: [] + storage_encrypted: + description: Whether the storage is encrypted + returned: always + type: bool + sample: true + storage_type: + description: Storage type of the Database instance + returned: always + type: str + sample: gp2 + tags: + description: Tags used by the database instance + returned: always + type: complex + contains: {} + vpc_security_groups: + description: List of VPC security groups + returned: always + type: complex + contains: + status: + description: Status of the VPC security group + returned: always + type: str + sample: active + vpc_security_group_id: + description: VPC Security Group ID + returned: always + type: str + sample: sg-abcd1234 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, AWSRetry, camel_dict_to_snake_dict + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + + +def instance_info(module, conn): + instance_name = module.params.get('db_instance_identifier') + filters = module.params.get('filters') + + params = dict() + if instance_name: + params['DBInstanceIdentifier'] = instance_name + if filters: + params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + + paginator = conn.get_paginator('describe_db_instances') + try: + results = paginator.paginate(**params).build_full_result()['DBInstances'] + except is_boto3_error_code('DBInstanceNotFound'): + results = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Couldn't get instance information") + + for instance in results: + try: + instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], + aws_retry=True)['TagList']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) + + return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results]) + + +def main(): + argument_spec = dict( + db_instance_identifier=dict(aliases=['id']), + filters=dict(type='dict') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if module._name == 'rds_instance_facts': + module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", version='2.13') + + conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + module.exit_json(**instance_info(module, conn)) + + +if __name__ == '__main__': + main() diff --git a/rds_param_group.py b/rds_param_group.py new file mode 100644 index 00000000000..6f9f6e19f2a --- /dev/null +++ b/rds_param_group.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rds_param_group +short_description: manage RDS parameter groups +description: + - Creates, modifies, and deletes RDS parameter groups. +requirements: [ boto3 ] +options: + state: + description: + - Specifies whether the group should be present or absent. + required: true + choices: [ 'present' , 'absent' ] + type: str + name: + description: + - Database parameter group identifier. + required: true + type: str + description: + description: + - Database parameter group description. Only set when a new group is added. + type: str + engine: + description: + - The type of database for this group. + - Please use following command to get list of all supported db engines and their respective versions. + - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"' + - Required for I(state=present). + type: str + immediate: + description: + - Whether to apply the changes immediately, or after the next reboot of any associated instances. + aliases: + - apply_immediately + type: bool + params: + description: + - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), + or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. + aliases: [parameters] + type: dict + tags: + description: + - Dictionary of tags to attach to the parameter group. + type: dict + purge_tags: + description: + - Whether or not to remove tags that do not appear in the M(tags) list. + type: bool + default: False +author: + - "Scott Anderson (@tastychutney)" + - "Will Thames (@willthames)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 +- rds_param_group: + state: present + name: norwegian-blue + description: 'My Fancy Ex Parrot Group' + engine: 'mysql5.6' + params: + auto_increment_increment: "42K" + tags: + Environment: production + Application: parrot + +# Remove a parameter group +- rds_param_group: + state: absent + name: norwegian-blue +''' + +RETURN = ''' +db_parameter_group_name: + description: Name of DB parameter group + type: str + returned: when state is present +db_parameter_group_family: + description: DB parameter group family that this DB parameter group is compatible with. + type: str + returned: when state is present +db_parameter_group_arn: + description: ARN of the DB parameter group + type: str + returned: when state is present +description: + description: description of the DB parameter group + type: str + returned: when state is present +errors: + description: list of errors from attempting to modify parameters that are not modifiable + type: list + returned: when state is present +tags: + description: dictionary of tags + type: dict + returned: when state is present +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, HAS_BOTO3, compare_aws_tags +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native + +import traceback + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + +INT_MODIFIERS = { + 'K': 1024, + 'M': pow(1024, 2), + 'G': pow(1024, 3), + 'T': pow(1024, 4), +} + + +def convert_parameter(param, value): + """ + Allows setting parameters with 10M = 10* 1024 * 1024 and so on. + """ + converted_value = value + + if param['DataType'] == 'integer': + if isinstance(value, string_types): + try: + for modifier in INT_MODIFIERS.keys(): + if value.endswith(modifier): + converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] + except ValueError: + # may be based on a variable (ie. {foo*3/4}) so + # just pass it on through to boto + pass + elif isinstance(value, bool): + converted_value = 1 if value else 0 + + elif param['DataType'] == 'boolean': + if isinstance(value, string_types): + converted_value = to_native(value) in BOOLEANS_TRUE + # convert True/False to 1/0 + converted_value = 1 if converted_value else 0 + return str(converted_value) + + +def update_parameters(module, connection): + groupname = module.params['name'] + desired = module.params['params'] + apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot' + errors = [] + modify_list = [] + parameters_paginator = connection.get_paginator('describe_db_parameters') + existing = parameters_paginator.paginate(DBParameterGroupName=groupname).build_full_result()['Parameters'] + lookup = dict((param['ParameterName'], param) for param in existing) + for param_key, param_value in desired.items(): + if param_key not in lookup: + errors.append("Parameter %s is not an available parameter for the %s engine" % + (param_key, module.params.get('engine'))) + else: + converted_value = convert_parameter(lookup[param_key], param_value) + # engine-default parameters do not have a ParameterValue, so we'll always override those. + if converted_value != lookup[param_key].get('ParameterValue'): + if lookup[param_key]['IsModifiable']: + modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method)) + else: + errors.append("Parameter %s is not modifiable" % param_key) + + # modify_db_parameters takes at most 20 parameters + if modify_list: + try: + from itertools import izip_longest as zip_longest # python 2 + except ImportError: + from itertools import zip_longest # python 3 + for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): + non_empty_slice = [item for item in modify_slice if item] + try: + connection.modify_db_parameter_group(DBParameterGroupName=groupname, Parameters=non_empty_slice) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't update parameters: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + return True, errors + return False, errors + + +def update_tags(module, connection, group, tags): + changed = False + existing_tags = connection.list_tags_for_resource(ResourceName=group['DBParameterGroupArn'])['TagList'] + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), + tags, module.params['purge_tags']) + if to_update: + try: + connection.add_tags_to_resource(ResourceName=group['DBParameterGroupArn'], + Tags=ansible_dict_to_boto3_tag_list(to_update)) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't add tags to parameter group: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.ParamValidationError as e: + # Usually a tag value has been passed as an int or bool, needs to be a string + # The AWS exception message is reasonably ok for this purpose + module.fail_json(msg="Couldn't add tags to parameter group: %s." % str(e), + exception=traceback.format_exc()) + if to_delete: + try: + connection.remove_tags_from_resource(ResourceName=group['DBParameterGroupArn'], + TagKeys=to_delete) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't remove tags from parameter group: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + return changed + + +def ensure_present(module, connection): + groupname = module.params['name'] + tags = module.params.get('tags') + changed = False + errors = [] + try: + response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'DBParameterGroupNotFound': + response = None + else: + module.fail_json(msg="Couldn't access parameter group information: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + if not response: + params = dict(DBParameterGroupName=groupname, + DBParameterGroupFamily=module.params['engine'], + Description=module.params['description']) + if tags: + params['Tags'] = ansible_dict_to_boto3_tag_list(tags) + try: + response = connection.create_db_parameter_group(**params) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't create parameter group: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + else: + group = response['DBParameterGroups'][0] + if tags: + changed = update_tags(module, connection, group, tags) + + if module.params.get('params'): + params_changed, errors = update_parameters(module, connection) + changed = changed or params_changed + + try: + response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname) + group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't obtain parameter group information: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + try: + tags = connection.list_tags_for_resource(ResourceName=group['db_parameter_group_arn'])['TagList'] + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't obtain parameter group tags: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + group['tags'] = boto3_tag_list_to_ansible_dict(tags) + + module.exit_json(changed=changed, errors=errors, **group) + + +def ensure_absent(module, connection): + group = module.params['name'] + try: + response = connection.describe_db_parameter_groups(DBParameterGroupName=group) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'DBParameterGroupNotFound': + module.exit_json(changed=False) + else: + module.fail_json(msg="Couldn't access parameter group information: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + try: + response = connection.delete_db_parameter_group(DBParameterGroupName=group) + module.exit_json(changed=True) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't delete parameter group: %s" % str(e), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + engine=dict(), + description=dict(), + params=dict(aliases=['parameters'], type='dict'), + immediate=dict(type='bool', aliases=['apply_immediately']), + tags=dict(type='dict', default={}), + purge_tags=dict(type='bool', default=False) + ) + ) + module = AnsibleModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['description', 'engine']]]) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if not region: + module.fail_json(msg="Region must be present") + + try: + conn = boto3_conn(module, conn_type='client', resource='rds', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Couldn't connect to AWS: %s" % str(e)) + + state = module.params.get('state') + if state == 'present': + ensure_present(module, conn) + if state == 'absent': + ensure_absent(module, conn) + + +if __name__ == '__main__': + main() diff --git a/rds_snapshot.py b/rds_snapshot.py new file mode 100644 index 00000000000..939948678af --- /dev/null +++ b/rds_snapshot.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# Copyright (c) 2014 Ansible Project +# Copyright (c) 2017, 2018, 2019 Will Thames +# Copyright (c) 2017, 2018 Michael De La Rue +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: rds_snapshot +short_description: manage Amazon RDS snapshots. +description: + - Creates or deletes RDS snapshots. +options: + state: + description: + - Specify the desired state of the snapshot. + default: present + choices: [ 'present', 'absent'] + type: str + db_snapshot_identifier: + description: + - The snapshot to manage. + required: true + aliases: + - id + - snapshot_id + type: str + db_instance_identifier: + description: + - Database instance identifier. Required when state is present. + aliases: + - instance_id + type: str + wait: + description: + - Whether or not to wait for snapshot creation or deletion. + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds. + default: 300 + type: int + tags: + description: + - tags dict to apply to a snapshot. + type: dict + purge_tags: + description: + - whether to remove tags not present in the C(tags) parameter. + default: True + type: bool +requirements: + - "python >= 2.6" + - "boto3" +author: + - "Will Thames (@willthames)" + - "Michael De La Rue (@mikedlr)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create snapshot +- rds_snapshot: + db_instance_identifier: new-database + db_snapshot_identifier: new-database-snapshot + +# Delete snapshot +- rds_snapshot: + db_snapshot_identifier: new-database-snapshot + state: absent +''' + +RETURN = ''' +allocated_storage: + description: How much storage is allocated in GB. + returned: always + type: int + sample: 20 +availability_zone: + description: Availability zone of the database from which the snapshot was created. + returned: always + type: str + sample: us-west-2a +db_instance_identifier: + description: Database from which the snapshot was created. + returned: always + type: str + sample: ansible-test-16638696 +db_snapshot_arn: + description: Amazon Resource Name for the snapshot. + returned: always + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot +db_snapshot_identifier: + description: Name of the snapshot. + returned: always + type: str + sample: ansible-test-16638696-test-snapshot +dbi_resource_id: + description: The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region. + returned: always + type: str + sample: db-MM4P2U35RQRAMWD3QDOXWPZP4U +encrypted: + description: Whether the snapshot is encrypted. + returned: always + type: bool + sample: false +engine: + description: Engine of the database from which the snapshot was created. + returned: always + type: str + sample: mariadb +engine_version: + description: Version of the database from which the snapshot was created. + returned: always + type: str + sample: 10.2.21 +iam_database_authentication_enabled: + description: Whether IAM database authentication is enabled. + returned: always + type: bool + sample: false +instance_create_time: + description: Creation time of the instance from which the snapshot was created. + returned: always + type: str + sample: '2019-06-15T10:15:56.221000+00:00' +license_model: + description: License model of the database. + returned: always + type: str + sample: general-public-license +master_username: + description: Master username of the database. + returned: always + type: str + sample: test +option_group_name: + description: Option group of the database. + returned: always + type: str + sample: default:mariadb-10-2 +percent_progress: + description: How much progress has been made taking the snapshot. Will be 100 for an available snapshot. + returned: always + type: int + sample: 100 +port: + description: Port on which the database is listening. + returned: always + type: int + sample: 3306 +processor_features: + description: List of processor features of the database. + returned: always + type: list + sample: [] +snapshot_create_time: + description: Creation time of the snapshot. + returned: always + type: str + sample: '2019-06-15T10:46:23.776000+00:00' +snapshot_type: + description: How the snapshot was created (always manual for this module!). + returned: always + type: str + sample: manual +status: + description: Status of the snapshot. + returned: always + type: str + sample: available +storage_type: + description: Storage type of the database. + returned: always + type: str + sample: gp2 +tags: + description: Tags applied to the snapshot. + returned: always + type: complex + contains: {} +vpc_id: + description: ID of the VPC in which the DB lives. + returned: always + type: str + sample: vpc-09ff232e222710ae0 +''' + +try: + import botocore +except ImportError: + pass # protected by AnsibleAWSModule + +# import module snippets +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list + + +def get_snapshot(client, module, snapshot_id): + try: + response = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id) + except client.exceptions.DBSnapshotNotFoundFault: + return None + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + return response['DBSnapshots'][0] + + +def snapshot_to_facts(client, module, snapshot): + try: + snapshot['Tags'] = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'], + aws_retry=True)['TagList']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['DBSnapshotIdentifier']) + except KeyError: + module.fail_json(msg=str(snapshot)) + + return camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) + + +def wait_for_snapshot_status(client, module, db_snapshot_id, waiter_name): + if not module.params['wait']: + return + timeout = module.params['wait_timeout'] + try: + client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id, + WaiterConfig=dict( + Delay=5, + MaxAttempts=int((timeout + 2.5) / 5) + )) + except botocore.exceptions.WaiterError as e: + if waiter_name == 'db_snapshot_deleted': + msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id) + else: + msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id) + module.fail_json_aws(e, msg=msg) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_snapshot_id)) + + +def ensure_snapshot_absent(client, module): + snapshot_name = module.params.get('db_snapshot_identifier') + changed = False + + snapshot = get_snapshot(client, module, snapshot_name) + if snapshot and snapshot['Status'] != 'deleting': + try: + client.delete_db_snapshot(DBSnapshotIdentifier=snapshot_name) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="trying to delete snapshot") + + # If we're not waiting for a delete to complete then we're all done + # so just return + if not snapshot or not module.params.get('wait'): + return dict(changed=changed) + try: + wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_deleted') + return dict(changed=changed) + except client.exceptions.DBSnapshotNotFoundFault: + return dict(changed=changed) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "awaiting snapshot deletion") + + +def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): + if tags is None: + return False + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags) + changed = bool(tags_to_add or tags_to_remove) + if tags_to_add: + try: + client.add_tags_to_resource(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't add tags to snapshot {0}".format(resource_arn)) + if tags_to_remove: + try: + client.remove_tags_from_resource(ResourceName=resource_arn, TagKeys=tags_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't remove tags from snapshot {0}".format(resource_arn)) + return changed + + +def ensure_snapshot_present(client, module): + db_instance_identifier = module.params.get('db_instance_identifier') + snapshot_name = module.params.get('db_snapshot_identifier') + changed = False + snapshot = get_snapshot(client, module, snapshot_name) + if not snapshot: + try: + snapshot = client.create_db_snapshot(DBSnapshotIdentifier=snapshot_name, + DBInstanceIdentifier=db_instance_identifier)['DBSnapshot'] + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="trying to create db snapshot") + + if module.params.get('wait'): + wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_available') + + existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'], + aws_retry=True)['TagList']) + desired_tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], existing_tags, desired_tags, purge_tags) + + snapshot = get_snapshot(client, module, snapshot_name) + + return dict(changed=changed, **snapshot_to_facts(client, module, snapshot)) + + +def main(): + + module = AnsibleAWSModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), + db_instance_identifier=dict(aliases=['instance_id']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + ), + required_if=[['state', 'present', ['db_instance_identifier']]] + ) + + client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['DBSnapshotNotFound'])) + + if module.params['state'] == 'absent': + ret_dict = ensure_snapshot_absent(client, module) + else: + ret_dict = ensure_snapshot_present(client, module) + + module.exit_json(**ret_dict) + + +if __name__ == '__main__': + main() diff --git a/rds_snapshot_facts.py b/rds_snapshot_facts.py new file mode 120000 index 00000000000..7281d3b696f --- /dev/null +++ b/rds_snapshot_facts.py @@ -0,0 +1 @@ +rds_snapshot_info.py \ No newline at end of file diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py new file mode 100644 index 00000000000..a4335195e4c --- /dev/null +++ b/rds_snapshot_info.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# Copyright (c) 2014-2017 Ansible Project +# Copyright (c) 2017, 2018 Will Thames +# Copyright (c) 2017, 2018 Michael De La Rue +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: rds_snapshot_info +short_description: obtain information about one or more RDS snapshots +description: + - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora). + - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed. + - This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change. +options: + db_snapshot_identifier: + description: + - Name of an RDS (unclustered) snapshot. + - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) + required: false + aliases: + - snapshot_name + type: str + db_instance_identifier: + description: + - RDS instance name for which to find snapshots. + - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) + required: false + type: str + db_cluster_identifier: + description: + - RDS cluster name for which to find snapshots. + - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier) + required: false + type: str + db_cluster_snapshot_identifier: + description: + - Name of an RDS cluster snapshot. + - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier) + required: false + type: str + snapshot_type: + description: + - Type of snapshot to find. + - By default both automated and manual snapshots will be returned. + required: false + choices: ['automated', 'manual', 'shared', 'public'] + type: str +requirements: + - "python >= 2.6" + - "boto3" +author: + - "Will Thames (@willthames)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Get information about an snapshot +- rds_snapshot_info: + db_snapshot_identifier: snapshot_name + register: new_database_info + +# Get all RDS snapshots for an RDS instance +- rds_snapshot_info: + db_instance_identifier: helloworld-rds-master +''' + +RETURN = ''' +snapshots: + description: List of non-clustered snapshots + returned: When cluster parameters are not passed + type: complex + contains: + allocated_storage: + description: How many gigabytes of storage are allocated + returned: always + type: int + sample: 10 + availability_zone: + description: The availability zone of the database from which the snapshot was taken + returned: always + type: str + sample: us-west-2b + db_instance_identifier: + description: Database instance identifier + returned: always + type: str + sample: hello-world-rds + db_snapshot_arn: + description: Snapshot ARN + returned: always + type: str + sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03 + db_snapshot_identifier: + description: Snapshot name + returned: always + type: str + sample: rds:hello-world-rds-us1-2018-05-16-04-03 + encrypted: + description: Whether the snapshot was encrypted + returned: always + type: bool + sample: true + engine: + description: Database engine + returned: always + type: str + sample: postgres + engine_version: + description: Database engine version + returned: always + type: str + sample: 9.5.10 + iam_database_authentication_enabled: + description: Whether database authentication through IAM is enabled + returned: always + type: bool + sample: false + instance_create_time: + description: Time the Instance was created + returned: always + type: str + sample: '2017-10-10T04:00:07.434000+00:00' + kms_key_id: + description: ID of the KMS Key encrypting the snapshot + returned: always + type: str + sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab + license_model: + description: License model + returned: always + type: str + sample: postgresql-license + master_username: + description: Database master username + returned: always + type: str + sample: dbadmin + option_group_name: + description: Database option group name + returned: always + type: str + sample: default:postgres-9-5 + percent_progress: + description: Percent progress of snapshot + returned: always + type: int + sample: 100 + snapshot_create_time: + description: Time snapshot was created + returned: always + type: str + sample: '2018-05-16T04:03:33.871000+00:00' + snapshot_type: + description: Type of snapshot + returned: always + type: str + sample: automated + status: + description: Status of snapshot + returned: always + type: str + sample: available + storage_type: + description: Storage type of underlying DB + returned: always + type: str + sample: gp2 + tags: + description: Snapshot tags + returned: when snapshot is not shared + type: complex + contains: {} + vpc_id: + description: ID of VPC containing the DB + returned: always + type: str + sample: vpc-abcd1234 +cluster_snapshots: + description: List of cluster snapshots + returned: always + type: complex + contains: + allocated_storage: + description: How many gigabytes of storage are allocated + returned: always + type: int + sample: 1 + availability_zones: + description: The availability zones of the database from which the snapshot was taken + returned: always + type: list + sample: + - ca-central-1a + - ca-central-1b + cluster_create_time: + description: Date and time the cluster was created + returned: always + type: str + sample: '2018-05-17T00:13:40.223000+00:00' + db_cluster_identifier: + description: Database cluster identifier + returned: always + type: str + sample: test-aurora-cluster + db_cluster_snapshot_arn: + description: ARN of the database snapshot + returned: always + type: str + sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot + db_cluster_snapshot_identifier: + description: Snapshot identifier + returned: always + type: str + sample: test-aurora-snapshot + engine: + description: Database engine + returned: always + type: str + sample: aurora + engine_version: + description: Database engine version + returned: always + type: str + sample: 5.6.10a + iam_database_authentication_enabled: + description: Whether database authentication through IAM is enabled + returned: always + type: bool + sample: false + kms_key_id: + description: ID of the KMS Key encrypting the snapshot + returned: always + type: str + sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab + license_model: + description: License model + returned: always + type: str + sample: aurora + master_username: + description: Database master username + returned: always + type: str + sample: shertel + percent_progress: + description: Percent progress of snapshot + returned: always + type: int + sample: 0 + port: + description: Database port + returned: always + type: int + sample: 0 + snapshot_create_time: + description: Date and time when the snapshot was created + returned: always + type: str + sample: '2018-05-17T00:23:23.731000+00:00' + snapshot_type: + description: Type of snapshot + returned: always + type: str + sample: manual + status: + description: Status of snapshot + returned: always + type: str + sample: creating + storage_encrypted: + description: Whether the snapshot is encrypted + returned: always + type: bool + sample: true + tags: + description: Tags of the snapshot + returned: when snapshot is not shared + type: complex + contains: {} + vpc_id: + description: VPC of the database + returned: always + type: str + sample: vpc-abcd1234 +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict + +try: + import botocore +except Exception: + pass # caught by AnsibleAWSModule + + +def common_snapshot_info(module, conn, method, prefix, params): + paginator = conn.get_paginator(method) + try: + results = paginator.paginate(**params).build_full_result()['%ss' % prefix] + except is_boto3_error_code('%sNotFound' % prefix): + results = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "trying to get snapshot information") + + for snapshot in results: + try: + if snapshot['SnapshotType'] != 'shared': + snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], + aws_retry=True)['TagList']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) + + return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results] + + +def cluster_snapshot_info(module, conn): + snapshot_name = module.params.get('db_cluster_snapshot_identifier') + snapshot_type = module.params.get('snapshot_type') + instance_name = module.params.get('db_cluster_identifier') + + params = dict() + if snapshot_name: + params['DBClusterSnapshotIdentifier'] = snapshot_name + if instance_name: + params['DBClusterIdentifier'] = instance_name + if snapshot_type: + params['SnapshotType'] = snapshot_type + if snapshot_type == 'public': + params['IncludePublic'] = True + elif snapshot_type == 'shared': + params['IncludeShared'] = True + + return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params) + + +def standalone_snapshot_info(module, conn): + snapshot_name = module.params.get('db_snapshot_identifier') + snapshot_type = module.params.get('snapshot_type') + instance_name = module.params.get('db_instance_identifier') + + params = dict() + if snapshot_name: + params['DBSnapshotIdentifier'] = snapshot_name + if instance_name: + params['DBInstanceIdentifier'] = instance_name + if snapshot_type: + params['SnapshotType'] = snapshot_type + if snapshot_type == 'public': + params['IncludePublic'] = True + elif snapshot_type == 'shared': + params['IncludeShared'] = True + + return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params) + + +def main(): + argument_spec = dict( + db_snapshot_identifier=dict(aliases=['snapshot_name']), + db_instance_identifier=dict(), + db_cluster_identifier=dict(), + db_cluster_snapshot_identifier=dict(), + snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] + ) + if module._name == 'rds_snapshot_facts': + module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13') + + conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + results = dict() + if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: + results['snapshots'] = standalone_snapshot_info(module, conn) + if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: + results['cluster_snapshots'] = cluster_snapshot_info(module, conn) + + module.exit_json(changed=False, **results) + + +if __name__ == '__main__': + main() diff --git a/rds_subnet_group.py b/rds_subnet_group.py new file mode 100644 index 00000000000..1bba28dfbce --- /dev/null +++ b/rds_subnet_group.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rds_subnet_group +short_description: manage RDS database subnet groups +description: + - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + required: true + choices: [ 'present' , 'absent' ] + type: str + name: + description: + - Database subnet group identifier. + required: true + type: str + description: + description: + - Database subnet group description. + - Required when I(state=present). + type: str + subnets: + description: + - List of subnet IDs that make up the database subnet group. + - Required when I(state=present). + type: list +author: "Scott Anderson (@tastychutney)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Add or change a subnet group +- rds_subnet_group: + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + +# Remove a subnet group +- rds_subnet_group: + state: absent + name: norwegian-blue +''' + +RETURN = ''' +subnet_group: + description: Dictionary of DB subnet group values + returned: I(state=present) + type: complex + contains: + name: + description: The name of the DB subnet group + returned: I(state=present) + type: str + description: + description: The description of the DB subnet group + returned: I(state=present) + type: str + vpc_id: + description: The VpcId of the DB subnet group + returned: I(state=present) + type: str + subnet_ids: + description: Contains a list of Subnet IDs + returned: I(state=present) + type: list + status: + description: The status of the DB subnet group + returned: I(state=present) + type: str +''' + +try: + import boto.rds + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def get_subnet_group_info(subnet_group): + return dict( + name=subnet_group.name, + description=subnet_group.description, + vpc_id=subnet_group.vpc_id, + subnet_ids=subnet_group.subnet_ids, + status=subnet_group.status + ) + + +def create_result(changed, subnet_group=None): + if subnet_group is None: + return dict( + changed=changed + ) + else: + return dict( + changed=changed, + subnet_group=get_subnet_group_info(subnet_group) + ) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + description=dict(required=False), + subnets=dict(required=False, type='list'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + state = module.params.get('state') + group_name = module.params.get('name').lower() + group_description = module.params.get('description') + group_subnets = module.params.get('subnets') or {} + + if state == 'present': + for required in ['description', 'subnets']: + if not module.params.get(required): + module.fail_json(msg=str("Parameter %s required for state='present'" % required)) + else: + for not_allowed in ['description', 'subnets']: + if module.params.get(not_allowed): + module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed)) + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + if not region: + module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + + try: + conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs) + except BotoServerError as e: + module.fail_json(msg=e.error_message) + + try: + exists = False + result = create_result(False) + + try: + matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) + exists = len(matching_groups) > 0 + except BotoServerError as e: + if e.error_code != 'DBSubnetGroupNotFoundFault': + module.fail_json(msg=e.error_message) + + if state == 'absent': + if exists: + conn.delete_db_subnet_group(group_name) + result = create_result(True) + else: + if not exists: + new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) + result = create_result(True, new_group) + else: + # Sort the subnet groups before we compare them + matching_groups[0].subnet_ids.sort() + group_subnets.sort() + if (matching_groups[0].name != group_name or + matching_groups[0].description != group_description or + matching_groups[0].subnet_ids != group_subnets): + changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) + result = create_result(True, changed_group) + else: + result = create_result(False, matching_groups[0]) + except BotoServerError as e: + module.fail_json(msg=e.error_message) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/redshift.py b/redshift.py new file mode 100644 index 00000000000..993e98286eb --- /dev/null +++ b/redshift.py @@ -0,0 +1,623 @@ +#!/usr/bin/python + +# Copyright 2014 Jens Carl, Hothead Games Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +author: + - "Jens Carl (@j-carl), Hothead Games Inc." + - "Rafael Driutti (@rafaeldriutti)" +module: redshift +short_description: create, delete, or modify an Amazon Redshift instance +description: + - Creates, deletes, or modifies Amazon Redshift cluster instances. +options: + command: + description: + - Specifies the action to take. + required: true + choices: [ 'create', 'facts', 'delete', 'modify' ] + type: str + identifier: + description: + - Redshift cluster identifier. + required: true + type: str + node_type: + description: + - The node type of the cluster. + - Require when I(command=create). + choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large','dc2.large', + 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'] + type: str + username: + description: + - Master database username. + - Used only when I(command=create). + type: str + password: + description: + - Master database password. + - Used only when I(command=create). + type: str + cluster_type: + description: + - The type of cluster. + choices: ['multi-node', 'single-node' ] + default: 'single-node' + type: str + db_name: + description: + - Name of the database. + type: str + availability_zone: + description: + - Availability zone in which to launch cluster. + aliases: ['zone', 'aws_zone'] + type: str + number_of_nodes: + description: + - Number of nodes. + - Only used when I(cluster_type=multi-node). + type: int + cluster_subnet_group_name: + description: + - Which subnet to place the cluster. + aliases: ['subnet'] + type: str + cluster_security_groups: + description: + - In which security group the cluster belongs. + type: list + elements: str + aliases: ['security_groups'] + vpc_security_group_ids: + description: + - VPC security group + aliases: ['vpc_security_groups'] + type: list + elements: str + skip_final_cluster_snapshot: + description: + - Skip a final snapshot before deleting the cluster. + - Used only when I(command=delete). + aliases: ['skip_final_snapshot'] + default: false + type: bool + final_cluster_snapshot_identifier: + description: + - Identifier of the final snapshot to be created before deleting the cluster. + - If this parameter is provided, I(skip_final_cluster_snapshot) must be C(false). + - Used only when I(command=delete). + aliases: ['final_snapshot_id'] + type: str + preferred_maintenance_window: + description: + - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))' + - Times are specified in UTC. + - If not specified then a random 30 minute maintenance window is assigned. + aliases: ['maintance_window', 'maint_window'] + type: str + cluster_parameter_group_name: + description: + - Name of the cluster parameter group. + aliases: ['param_group_name'] + type: str + automated_snapshot_retention_period: + description: + - The number of days that automated snapshots are retained. + aliases: ['retention_period'] + type: int + port: + description: + - Which port the cluster is listening on. + type: int + cluster_version: + description: + - Which version the cluster should have. + aliases: ['version'] + choices: ['1.0'] + type: str + allow_version_upgrade: + description: + - When I(allow_version_upgrade=true) the cluster may be automatically + upgraded during the maintenance window. + aliases: ['version_upgrade'] + default: true + type: bool + publicly_accessible: + description: + - If the cluster is accessible publicly or not. + default: false + type: bool + encrypted: + description: + - If the cluster is encrypted or not. + default: false + type: bool + elastic_ip: + description: + - An Elastic IP to use for the cluster. + type: str + new_cluster_identifier: + description: + - Only used when command=modify. + aliases: ['new_identifier'] + type: str + wait: + description: + - When I(command=create), I(command=modify) or I(command=restore) then wait for the database to enter the 'available' state. + - When I(command=delete) wait for the database to be terminated. + type: bool + default: false + wait_timeout: + description: + - When I(wait=true) defines how long in seconds before giving up. + default: 300 + type: int + enhanced_vpc_routing: + description: + - Whether the cluster should have enhanced VPC routing enabled. + default: false + type: bool +requirements: [ 'boto3' ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Basic cluster provisioning example +- redshift: > + command=create + node_type=ds1.xlarge + identifier=new_cluster + username=cluster_admin + password=1nsecure + +# Cluster delete example +- redshift: + command: delete + identifier: new_cluster + skip_final_cluster_snapshot: true + wait: true +''' + +RETURN = ''' +cluster: + description: dictionary containing all the cluster information + returned: success + type: complex + contains: + identifier: + description: Id of the cluster. + returned: success + type: str + sample: "new_redshift_cluster" + create_time: + description: Time of the cluster creation as timestamp. + returned: success + type: float + sample: 1430158536.308 + status: + description: Status of the cluster. + returned: success + type: str + sample: "available" + db_name: + description: Name of the database. + returned: success + type: str + sample: "new_db_name" + availability_zone: + description: Amazon availability zone where the cluster is located. "None" until cluster is available. + returned: success + type: str + sample: "us-east-1b" + maintenance_window: + description: Time frame when maintenance/upgrade are done. + returned: success + type: str + sample: "sun:09:30-sun:10:00" + private_ip_address: + description: Private IP address of the main node. + returned: success + type: str + sample: "10.10.10.10" + public_ip_address: + description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled. + returned: success + type: str + sample: "0.0.0.0" + port: + description: Port of the cluster. "None" until cluster is available. + returned: success + type: int + sample: 5439 + url: + description: FQDN of the main cluster node. "None" until cluster is available. + returned: success + type: str + sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com" + enhanced_vpc_routing: + description: status of the enhanced vpc routing feature. + returned: success + type: bool +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, snake_dict_to_camel_dict +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code + + +def _collect_facts(resource): + """Transform cluster information to dict.""" + facts = { + 'identifier': resource['ClusterIdentifier'], + 'status': resource['ClusterStatus'], + 'username': resource['MasterUsername'], + 'db_name': resource['DBName'], + 'maintenance_window': resource['PreferredMaintenanceWindow'], + 'enhanced_vpc_routing': resource['EnhancedVpcRouting'] + + } + + for node in resource['ClusterNodes']: + if node['NodeRole'] in ('SHARED', 'LEADER'): + facts['private_ip_address'] = node['PrivateIPAddress'] + if facts['enhanced_vpc_routing'] is False: + facts['public_ip_address'] = node['PublicIPAddress'] + else: + facts['public_ip_address'] = None + break + + # Some parameters are not ready instantly if you don't wait for available + # cluster status + facts['create_time'] = None + facts['url'] = None + facts['port'] = None + facts['availability_zone'] = None + + if resource['ClusterStatus'] != "creating": + facts['create_time'] = resource['ClusterCreateTime'] + facts['url'] = resource['Endpoint']['Address'] + facts['port'] = resource['Endpoint']['Port'] + facts['availability_zone'] = resource['AvailabilityZone'] + + return facts + + +@AWSRetry.jittered_backoff() +def _describe_cluster(redshift, identifier): + ''' + Basic wrapper around describe_clusters with a retry applied + ''' + return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + + +@AWSRetry.jittered_backoff() +def _create_cluster(redshift, **kwargs): + ''' + Basic wrapper around create_cluster with a retry applied + ''' + return redshift.create_cluster(**kwargs) + + +# Simple wrapper around delete, try to avoid throwing an error if some other +# operation is in progress +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +def _delete_cluster(redshift, **kwargs): + ''' + Basic wrapper around delete_cluster with a retry applied. + Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that + we can still delete a cluster if some kind of change operation was in + progress. + ''' + return redshift.delete_cluster(**kwargs) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +def _modify_cluster(redshift, **kwargs): + ''' + Basic wrapper around modify_cluster with a retry applied. + Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases + where another modification is still in progress + ''' + return redshift.modify_cluster(**kwargs) + + +def create_cluster(module, redshift): + """ + Create a new cluster + + module: AnsibleModule object + redshift: authenticated redshift connection object + + Returns: + """ + + identifier = module.params.get('identifier') + node_type = module.params.get('node_type') + username = module.params.get('username') + password = module.params.get('password') + d_b_name = module.params.get('db_name') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + changed = True + # Package up the optional parameters + params = {} + for p in ('cluster_type', 'cluster_security_groups', + 'vpc_security_group_ids', 'cluster_subnet_group_name', + 'availability_zone', 'preferred_maintenance_window', + 'cluster_parameter_group_name', + 'automated_snapshot_retention_period', 'port', + 'cluster_version', 'allow_version_upgrade', + 'number_of_nodes', 'publicly_accessible', + 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): + # https://github.com/boto/boto3/issues/400 + if module.params.get(p) is not None: + params[p] = module.params.get(p) + + if d_b_name: + params['d_b_name'] = d_b_name + + try: + _describe_cluster(redshift, identifier) + changed = False + except is_boto3_error_code('ClusterNotFound'): + try: + _create_cluster(redshift, + ClusterIdentifier=identifier, + NodeType=node_type, + MasterUsername=username, + MasterUserPassword=password, + **snake_dict_to_camel_dict(params, capitalize_first=True)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to create cluster") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe cluster") + if wait: + attempts = wait_timeout // 60 + waiter = redshift.get_waiter('cluster_available') + try: + waiter.wait( + ClusterIdentifier=identifier, + WaiterConfig=dict(MaxAttempts=attempts) + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") + try: + resource = _describe_cluster(redshift, identifier) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe cluster") + + return(changed, _collect_facts(resource)) + + +def describe_cluster(module, redshift): + """ + Collect data about the cluster. + + module: Ansible module object + redshift: authenticated redshift connection object + """ + identifier = module.params.get('identifier') + + try: + resource = _describe_cluster(redshift, identifier) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error describing cluster") + + return(True, _collect_facts(resource)) + + +def delete_cluster(module, redshift): + """ + Delete a cluster. + + module: Ansible module object + redshift: authenticated redshift connection object + """ + + identifier = module.params.get('identifier') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + params = {} + for p in ('skip_final_cluster_snapshot', + 'final_cluster_snapshot_identifier'): + if p in module.params: + # https://github.com/boto/boto3/issues/400 + if module.params.get(p) is not None: + params[p] = module.params.get(p) + + try: + _delete_cluster( + redshift, + ClusterIdentifier=identifier, + **snake_dict_to_camel_dict(params, capitalize_first=True)) + except is_boto3_error_code('ClusterNotFound'): + return(False, {}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete cluster") + + if wait: + attempts = wait_timeout // 60 + waiter = redshift.get_waiter('cluster_deleted') + try: + waiter.wait( + ClusterIdentifier=identifier, + WaiterConfig=dict(MaxAttempts=attempts) + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Timeout deleting the cluster") + + return(True, {}) + + +def modify_cluster(module, redshift): + """ + Modify an existing cluster. + + module: Ansible module object + redshift: authenticated redshift connection object + """ + + identifier = module.params.get('identifier') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + # Package up the optional parameters + params = {} + for p in ('cluster_type', 'cluster_security_groups', + 'vpc_security_group_ids', 'cluster_subnet_group_name', + 'availability_zone', 'preferred_maintenance_window', + 'cluster_parameter_group_name', + 'automated_snapshot_retention_period', 'port', 'cluster_version', + 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): + # https://github.com/boto/boto3/issues/400 + if module.params.get(p) is not None: + params[p] = module.params.get(p) + + # enhanced_vpc_routing parameter change needs an exclusive request + if module.params.get('enhanced_vpc_routing') is not None: + try: + _modify_cluster( + redshift, + ClusterIdentifier=identifier, + EnhancedVpcRouting=module.params.get('enhanced_vpc_routing')) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + if wait: + attempts = wait_timeout // 60 + waiter = redshift.get_waiter('cluster_available') + try: + waiter.wait( + ClusterIdentifier=identifier, + WaiterConfig=dict(MaxAttempts=attempts) + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, + msg="Timeout waiting for cluster enhanced vpc routing modification" + ) + + # change the rest + try: + _modify_cluster( + redshift, + ClusterIdentifier=identifier, + **snake_dict_to_camel_dict(params, capitalize_first=True)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + + if module.params.get('new_cluster_identifier'): + identifier = module.params.get('new_cluster_identifier') + + if wait: + attempts = wait_timeout // 60 + waiter2 = redshift.get_waiter('cluster_available') + try: + waiter2.wait( + ClusterIdentifier=identifier, + WaiterConfig=dict(MaxAttempts=attempts) + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Timeout waiting for cluster modification") + try: + resource = _describe_cluster(redshift, identifier) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier) + + return(True, _collect_facts(resource)) + + +def main(): + argument_spec = dict( + command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), + identifier=dict(required=True), + node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', + 'ds2.8xlarge', 'dc1.large', 'dc2.large', + 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', + 'dw2.large', 'dw2.8xlarge'], required=False), + username=dict(required=False), + password=dict(no_log=True, required=False), + db_name=dict(required=False), + cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), + cluster_security_groups=dict(aliases=['security_groups'], type='list'), + vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'), + skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], + type='bool', default=False), + final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), + cluster_subnet_group_name=dict(aliases=['subnet']), + availability_zone=dict(aliases=['aws_zone', 'zone']), + preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']), + cluster_parameter_group_name=dict(aliases=['param_group_name']), + automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'), + port=dict(type='int'), + cluster_version=dict(aliases=['version'], choices=['1.0']), + allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), + number_of_nodes=dict(type='int'), + publicly_accessible=dict(type='bool', default=False), + encrypted=dict(type='bool', default=False), + elastic_ip=dict(required=False), + new_cluster_identifier=dict(aliases=['new_identifier']), + enhanced_vpc_routing=dict(type='bool', default=False), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + ) + + required_if = [ + ('command', 'delete', ['skip_final_cluster_snapshot']), + ('command', 'create', ['node_type', + 'username', + 'password']) + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if + ) + + command = module.params.get('command') + skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot') + final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier') + # can't use module basic required_if check for this case + if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: + module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False") + + conn = module.client('redshift') + + changed = True + if command == 'create': + (changed, cluster) = create_cluster(module, conn) + + elif command == 'facts': + (changed, cluster) = describe_cluster(module, conn) + + elif command == 'delete': + (changed, cluster) = delete_cluster(module, conn) + + elif command == 'modify': + (changed, cluster) = modify_cluster(module, conn) + + module.exit_json(changed=changed, cluster=cluster) + + +if __name__ == '__main__': + main() diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py new file mode 100644 index 00000000000..76de6b2c89e --- /dev/null +++ b/redshift_cross_region_snapshots.py @@ -0,0 +1,205 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, JR Kerkstra +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: redshift_cross_region_snapshots +short_description: Manage Redshift Cross Region Snapshots +description: + - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots. + - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy) +author: JR Kerkstra (@captainkerk) +options: + cluster_name: + description: + - The name of the cluster to configure cross-region snapshots for. + required: true + aliases: [ "cluster" ] + type: str + state: + description: + - Create or remove the cross-region snapshot configuration. + choices: [ "present", "absent" ] + default: present + type: str + region: + description: + - "The cluster's region." + required: true + aliases: [ "source" ] + type: str + destination_region: + description: + - The region to copy snapshots to. + required: true + aliases: [ "destination" ] + type: str + snapshot_copy_grant: + description: + - A grant for Amazon Redshift to use a master key in the I(destination_region). + - See U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.create_snapshot_copy_grant) + aliases: [ "copy_grant" ] + type: str + snapshot_retention_period: + description: + - The number of days to keep cross-region snapshots for. + required: true + aliases: [ "retention_period" ] + type: int +requirements: [ "botocore", "boto3" ] +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +''' + +EXAMPLES = ''' +- name: configure cross-region snapshot on cluster `johniscool` + redshift_cross_region_snapshots: + cluster_name: johniscool + state: present + region: us-east-1 + destination_region: us-west-2 + retention_period: 1 + +- name: configure cross-region snapshot on kms-encrypted cluster + redshift_cross_region_snapshots: + cluster_name: whatever + state: present + region: us-east-1 + destination: us-west-2 + copy_grant: 'my-grant-in-destination' + retention_period: 10 + +- name: disable cross-region snapshots, necessary before most cluster modifications (rename, resize) + redshift_cross_region_snapshots: + cluster_name: whatever + state: absent + region: us-east-1 + destination_region: us-west-2 +''' + +RETURN = ''' # ''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + + +class SnapshotController(object): + + def __init__(self, client, cluster_name): + self.client = client + self.cluster_name = cluster_name + + def get_cluster_snapshot_copy_status(self): + response = self.client.describe_clusters( + ClusterIdentifier=self.cluster_name + ) + return response['Clusters'][0].get('ClusterSnapshotCopyStatus') + + def enable_snapshot_copy(self, destination_region, grant_name, retention_period): + if grant_name: + self.client.enable_snapshot_copy( + ClusterIdentifier=self.cluster_name, + DestinationRegion=destination_region, + RetentionPeriod=retention_period, + SnapshotCopyGrantName=grant_name, + ) + else: + self.client.enable_snapshot_copy( + ClusterIdentifier=self.cluster_name, + DestinationRegion=destination_region, + RetentionPeriod=retention_period, + ) + + def disable_snapshot_copy(self): + self.client.disable_snapshot_copy( + ClusterIdentifier=self.cluster_name + ) + + def modify_snapshot_copy_retention_period(self, retention_period): + self.client.modify_snapshot_copy_retention_period( + ClusterIdentifier=self.cluster_name, + RetentionPeriod=retention_period + ) + + +def requesting_unsupported_modifications(actual, requested): + if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or + actual['DestinationRegion'] != requested['destination_region']): + return True + return False + + +def needs_update(actual, requested): + if actual['RetentionPeriod'] != requested['snapshot_retention_period']: + return True + return False + + +def run_module(): + argument_spec = dict( + cluster_name=dict(type='str', required=True, aliases=['cluster']), + state=dict(type='str', choices=['present', 'absent'], default='present'), + region=dict(type='str', required=True, aliases=['source']), + destination_region=dict(type='str', required=True, aliases=['destination']), + snapshot_copy_grant=dict(type='str', aliases=['copy_grant']), + snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + result = dict( + changed=False, + message='' + ) + connection = module.client('redshift') + + snapshot_controller = SnapshotController(client=connection, + cluster_name=module.params.get('cluster_name')) + + current_config = snapshot_controller.get_cluster_snapshot_copy_status() + if current_config is not None: + if module.params.get('state') == 'present': + if requesting_unsupported_modifications(current_config, module.params): + message = 'Cannot modify destination_region or grant_name. ' \ + 'Please disable cross-region snapshots, and re-run.' + module.fail_json(msg=message, **result) + if needs_update(current_config, module.params): + result['changed'] = True + if not module.check_mode: + snapshot_controller.modify_snapshot_copy_retention_period( + module.params.get('snapshot_retention_period') + ) + else: + result['changed'] = True + if not module.check_mode: + snapshot_controller.disable_snapshot_copy() + else: + if module.params.get('state') == 'present': + result['changed'] = True + if not module.check_mode: + snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'), + module.params.get('snapshot_copy_grant'), + module.params.get('snapshot_retention_period')) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/redshift_facts.py b/redshift_facts.py new file mode 120000 index 00000000000..40a774faad8 --- /dev/null +++ b/redshift_facts.py @@ -0,0 +1 @@ +redshift_info.py \ No newline at end of file diff --git a/redshift_info.py b/redshift_info.py new file mode 100644 index 00000000000..b0355906456 --- /dev/null +++ b/redshift_info.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: redshift_info +author: "Jens Carl (@j-carl)" +short_description: Gather information about Redshift cluster(s) +description: + - Gather information about Redshift cluster(s). + - This module was called C(redshift_facts) before Ansible 2.9. The usage did not change. +requirements: [ boto3 ] +options: + cluster_identifier: + description: + - The prefix of cluster identifier of the Redshift cluster you are searching for. + - "This is a regular expression match with implicit '^'. Append '$' for a complete match." + required: false + aliases: ['name', 'identifier'] + type: str + tags: + description: + - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } + to match against the security group(s) you are searching for." + required: false + type: dict +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +''' + +EXAMPLES = ''' +# Note: These examples do net set authentication details, see the AWS guide for details. + +# Find all clusters +- redshift_info: + register: redshift + +# Find cluster(s) with matching tags +- redshift_info: + tags: + env: prd + stack: monitoring + register: redshift_tags + +# Find cluster(s) with matching name/prefix and tags +- redshift_info: + tags: + env: dev + stack: web + name: user- + register: redshift_web + +# Fail if no cluster(s) is/are found +- redshift_info: + tags: + env: stg + stack: db + register: redshift_user + failed_when: "{{ redshift_user.results | length == 0 }}" +''' + +RETURN = ''' +# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters) +--- +cluster_identifier: + description: Unique key to identify the cluster. + returned: success + type: str + sample: "redshift-identifier" +node_type: + description: The node type for nodes in the cluster. + returned: success + type: str + sample: "ds2.xlarge" +cluster_status: + description: Current state of the cluster. + returned: success + type: str + sample: "available" +modify_status: + description: The status of a modify operation. + returned: optional + type: str + sample: "" +master_username: + description: The master user name for the cluster. + returned: success + type: str + sample: "admin" +db_name: + description: The name of the initial database that was created when the cluster was created. + returned: success + type: str + sample: "dev" +endpoint: + description: The connection endpoint. + returned: success + type: str + sample: { + "address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com", + "port": 5439 + } +cluster_create_time: + description: The date and time that the cluster was created. + returned: success + type: str + sample: "2016-05-10T08:33:16.629000+00:00" +automated_snapshot_retention_period: + description: The number of days that automatic cluster snapshots are retained. + returned: success + type: int + sample: 1 +cluster_security_groups: + description: A list of cluster security groups that are associated with the cluster. + returned: success + type: list + sample: [] +vpc_security_groups: + description: A list of VPC security groups the are associated with the cluster. + returned: success + type: list + sample: [ + { + "status": "active", + "vpc_security_group_id": "sg-12cghhg" + } + ] +cluster_paramater_groups: + description: The list of cluster parameters that are associated with this cluster. + returned: success + type: list + sample: [ + { + "cluster_parameter_status_list": [ + { + "parameter_apply_status": "in-sync", + "parameter_name": "statement_timeout" + }, + { + "parameter_apply_status": "in-sync", + "parameter_name": "require_ssl" + } + ], + "parameter_apply_status": "in-sync", + "parameter_group_name": "tuba" + } + ] +cluster_subnet_group_name: + description: The name of the subnet group that is associated with the cluster. + returned: success + type: str + sample: "redshift-subnet" +vpc_id: + description: The identifier of the VPC the cluster is in, if the cluster is in a VPC. + returned: success + type: str + sample: "vpc-1234567" +availability_zone: + description: The name of the Availability Zone in which the cluster is located. + returned: success + type: str + sample: "us-east-1b" +preferred_maintenance_window: + description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur. + returned: success + type: str + sample: "tue:07:30-tue:08:00" +pending_modified_values: + description: A value that, if present, indicates that changes to the cluster are pending. + returned: success + type: dict + sample: {} +cluster_version: + description: The version ID of the Amazon Redshift engine that is running on the cluster. + returned: success + type: str + sample: "1.0" +allow_version_upgrade: + description: > + A Boolean value that, if true, indicates that major version upgrades will be applied + automatically to the cluster during the maintenance window. + returned: success + type: bool + sample: true|false +number_of_nodes: + description: The number of compute nodes in the cluster. + returned: success + type: int + sample: 12 +publicly_accessible: + description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network. + returned: success + type: bool + sample: true|false +encrypted: + description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest. + returned: success + type: bool + sample: true|false +restore_status: + description: A value that describes the status of a cluster restore action. + returned: success + type: dict + sample: {} +hsm_status: + description: > + A value that reports whether the Amazon Redshift cluster has finished applying any hardware + security module (HSM) settings changes specified in a modify cluster command. + returned: success + type: dict + sample: {} +cluster_snapshot_copy_status: + description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy. + returned: success + type: dict + sample: {} +cluster_public_keys: + description: The public key for the cluster. + returned: success + type: str + sample: "ssh-rsa anjigfam Amazon-Redshift\n" +cluster_nodes: + description: The nodes in the cluster. + returned: success + type: list + sample: [ + { + "node_role": "LEADER", + "private_ip_address": "10.0.0.1", + "public_ip_address": "x.x.x.x" + }, + { + "node_role": "COMPUTE-1", + "private_ip_address": "10.0.0.3", + "public_ip_address": "x.x.x.x" + } + ] +elastic_ip_status: + description: The status of the elastic IP (EIP) address. + returned: success + type: dict + sample: {} +cluster_revision_number: + description: The specific revision number of the database in the cluster. + returned: success + type: str + sample: "1231" +tags: + description: The list of tags for the cluster. + returned: success + type: list + sample: [] +kms_key_id: + description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster. + returned: success + type: str + sample: "" +enhanced_vpc_routing: + description: An option that specifies whether to create the cluster with enhanced VPC routing enabled. + returned: success + type: bool + sample: true|false +iam_roles: + description: List of IAM roles attached to the cluster. + returned: success + type: list + sample: [] +''' + +import re + +try: + from botocore.exception import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +def match_tags(tags_to_match, cluster): + for key, value in tags_to_match.items(): + for tag in cluster['Tags']: + if key == tag['Key'] and value == tag['Value']: + return True + + return False + + +def find_clusters(conn, module, identifier=None, tags=None): + + try: + cluster_paginator = conn.get_paginator('describe_clusters') + clusters = cluster_paginator.paginate().build_full_result() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to fetch clusters.') + + matched_clusters = [] + + if identifier is not None: + identifier_prog = re.compile('^' + identifier) + + for cluster in clusters['Clusters']: + + matched_identifier = True + if identifier: + matched_identifier = identifier_prog.search(cluster['ClusterIdentifier']) + + matched_tags = True + if tags: + matched_tags = match_tags(tags, cluster) + + if matched_identifier and matched_tags: + matched_clusters.append(camel_dict_to_snake_dict(cluster)) + + return matched_clusters + + +def main(): + + argument_spec = dict( + cluster_identifier=dict(type='str', aliases=['identifier', 'name']), + tags=dict(type='dict') + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + if module._name == 'redshift_facts': + module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", version='2.13') + + cluster_identifier = module.params.get('cluster_identifier') + cluster_tags = module.params.get('tags') + + redshift = module.client('redshift') + + results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags) + module.exit_json(results=results) + + +if __name__ == '__main__': + main() diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py new file mode 100644 index 00000000000..7090dcfa314 --- /dev/null +++ b/redshift_subnet_group.py @@ -0,0 +1,182 @@ +#!/usr/bin/python + +# Copyright 2014 Jens Carl, Hothead Games Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +author: + - "Jens Carl (@j-carl), Hothead Games Inc." +module: redshift_subnet_group +short_description: manage Redshift cluster subnet groups +description: + - Create, modifies, and deletes Redshift cluster subnet groups. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + required: true + choices: ['present', 'absent' ] + type: str + group_name: + description: + - Cluster subnet group name. + required: true + aliases: ['name'] + type: str + group_description: + description: + - Database subnet group description. + aliases: ['description'] + type: str + group_subnets: + description: + - List of subnet IDs that make up the cluster subnet group. + aliases: ['subnets'] + type: list + elements: str +requirements: [ 'boto' ] +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create a Redshift subnet group +- local_action: + module: redshift_subnet_group + state: present + group_name: redshift-subnet + group_description: Redshift subnet + group_subnets: + - 'subnet-aaaaa' + - 'subnet-bbbbb' + +# Remove subnet group +- redshift_subnet_group: + state: absent + group_name: redshift-subnet +''' + +RETURN = ''' +group: + description: dictionary containing all Redshift subnet group information + returned: success + type: complex + contains: + name: + description: name of the Redshift subnet group + returned: success + type: str + sample: "redshift_subnet_group_name" + vpc_id: + description: Id of the VPC where the subnet is located + returned: success + type: str + sample: "vpc-aabb1122" +''' + +try: + import boto + import boto.redshift + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + group_name=dict(required=True, aliases=['name']), + group_description=dict(required=False, aliases=['description']), + group_subnets=dict(required=False, aliases=['subnets'], type='list'), + )) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto v2.9.0+ required for this module') + + state = module.params.get('state') + group_name = module.params.get('group_name') + group_description = module.params.get('group_description') + group_subnets = module.params.get('group_subnets') + + if state == 'present': + for required in ('group_name', 'group_description', 'group_subnets'): + if not module.params.get(required): + module.fail_json(msg=str("parameter %s required for state='present'" % required)) + else: + for not_allowed in ('group_description', 'group_subnets'): + if module.params.get(not_allowed): + module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed)) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg=str("Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")) + + # Connect to the Redshift endpoint. + try: + conn = connect_to_aws(boto.redshift, region, **aws_connect_params) + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + try: + changed = False + exists = False + group = None + + try: + matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100) + exists = len(matching_groups) > 0 + except boto.exception.JSONResponseError as e: + if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault': + # if e.code != 'ClusterSubnetGroupNotFoundFault': + module.fail_json(msg=str(e)) + + if state == 'absent': + if exists: + conn.delete_cluster_subnet_group(group_name) + changed = True + + else: + if not exists: + new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets) + group = { + 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['ClusterSubnetGroupName'], + 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['VpcId'], + } + else: + changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description) + group = { + 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['ClusterSubnetGroupName'], + 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['VpcId'], + } + + changed = True + + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, group=group) + + +if __name__ == '__main__': + main() diff --git a/route53.py b/route53.py new file mode 100644 index 00000000000..dda106e3f9b --- /dev/null +++ b/route53.py @@ -0,0 +1,709 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: route53 +short_description: add or delete entries in Amazons Route53 DNS service +description: + - Creates and deletes DNS records in Amazons Route53 service +options: + state: + description: + - Specifies the state of the resource record. As of Ansible 2.4, the I(command) option has been changed + to I(state) as default and the choices 'present' and 'absent' have been added, but I(command) still works as well. + required: true + aliases: [ 'command' ] + choices: [ 'present', 'absent', 'get', 'create', 'delete' ] + type: str + zone: + description: + - The DNS zone to modify. + - This is a required parameter, if parameter I(hosted_zone_id) is not supplied. + type: str + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify. + - This is a required parameter, if parameter I(zone) is not supplied. + type: str + record: + description: + - The full DNS record to create or delete. + required: true + type: str + ttl: + description: + - The TTL, in second, to give the new record. + default: 3600 + type: int + type: + description: + - The type of DNS record to create. + required: true + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA' ] + type: str + alias: + description: + - Indicates if this is an alias record. + type: bool + default: false + alias_hosted_zone_id: + description: + - The hosted zone identifier. + type: str + alias_evaluate_target_health: + description: + - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers. + type: bool + default: false + value: + description: + - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. + - When deleting a record all values for the record must be specified or Route53 will not delete it. + type: list + overwrite: + description: + - Whether an existing record should be overwritten on create if values do not match. + type: bool + retry_interval: + description: + - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. + If you have many domain names, the default of 500 seconds may be too long. + default: 500 + type: int + private_zone: + description: + - If set to C(yes), the private zone matching the requested name within the domain will be used if there are both public and private zones. + The default is to use the public zone. + type: bool + default: false + identifier: + description: + - Have to be specified for Weighted, latency-based and failover resource record sets only. + An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. + type: str + weight: + description: + - Weighted resource record sets only. Among resource record sets that + have the same combination of DNS name and type, a value that + determines what portion of traffic for the current resource record set + is routed to the associated location. + type: int + region: + description: + - Latency-based resource record sets only Among resource record sets + that have the same combination of DNS name and type, a value that + determines which region this should be associated with for the + latency-based routing + type: str + health_check: + description: + - Health check to associate with this record + type: str + failover: + description: + - Failover resource record sets only. Whether this is the primary or + secondary resource record set. Allowed values are PRIMARY and SECONDARY + type: str + choices: ['SECONDARY', 'PRIMARY'] + vpc_id: + description: + - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." + - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. + type: str + wait: + description: + - Wait until the changes have been replicated to all Amazon Route 53 DNS servers. + type: bool + default: false + wait_timeout: + description: + - How long to wait for the changes to be replicated, in seconds. + default: 300 + type: int +author: +- Bruce Pennypacker (@bpennypacker) +- Mike Buzzetti (@jimbydamonk) +extends_documentation_fragment: +- ansible.amazon.aws + +''' + +RETURN = ''' +nameservers: + description: Nameservers associated with the zone. + returned: when state is 'get' + type: list + sample: + - ns-1036.awsdns-00.org. + - ns-516.awsdns-00.net. + - ns-1504.awsdns-00.co.uk. + - ns-1.awsdns-00.com. +set: + description: Info specific to the resource record. + returned: when state is 'get' + type: complex + contains: + alias: + description: Whether this is an alias. + returned: always + type: bool + sample: false + failover: + description: Whether this is the primary or secondary resource record set. + returned: always + type: str + sample: PRIMARY + health_check: + description: health_check associated with this record. + returned: always + type: str + identifier: + description: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. + returned: always + type: str + record: + description: Domain name for the record set. + returned: always + type: str + sample: new.foo.com. + region: + description: Which region this should be associated with for latency-based routing. + returned: always + type: str + sample: us-west-2 + ttl: + description: Resource record cache TTL. + returned: always + type: str + sample: '3600' + type: + description: Resource record set type. + returned: always + type: str + sample: A + value: + description: Record value. + returned: always + type: str + sample: 52.43.18.27 + values: + description: Record Values. + returned: always + type: list + sample: + - 52.43.18.27 + weight: + description: Weight of the record. + returned: always + type: str + sample: '3' + zone: + description: Zone this record set belongs to. + returned: always + type: str + sample: foo.bar.com. +''' + +EXAMPLES = ''' +# Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated +- route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: 1.1.1.1,2.2.2.2,3.3.3.3 + wait: yes + +# Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated +- route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: + - 1.1.1.1 + - 2.2.2.2 + - 3.3.3.3 + wait: yes + +# Retrieve the details for new.foo.com +- route53: + state: get + zone: foo.com + record: new.foo.com + type: A + register: rec + +# Delete new.foo.com A record using the results from the get command +- route53: + state: absent + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" + +# Add an AAAA record. Note that because there are colons in the value +# that the IPv6 address must be quoted. Also shows using the old form command=create. +- route53: + command: create + zone: foo.com + record: localhost.foo.com + type: AAAA + ttl: 7200 + value: "::1" + +# Add a SRV record with multiple fields for a service on port 22222 +# For more information on SRV records see: +# https://en.wikipedia.org/wiki/SRV_record +- route53: + state: present + zone: foo.com + record: "_example-service._tcp.foo.com" + type: SRV + value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" + +# Add a TXT record. Note that TXT and SPF records must be surrounded +# by quotes when sent to Route 53: +- route53: + state: present + zone: foo.com + record: localhost.foo.com + type: TXT + ttl: 7200 + value: '"bar"' + +# Add an alias record that points to an Amazon ELB: +- route53: + state: present + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" + +# Retrieve the details for elb.foo.com +- route53: + state: get + zone: foo.com + record: elb.foo.com + type: A + register: rec + +# Delete an alias record using the results from the get command +- route53: + state: absent + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" + alias: True + alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" + +# Add an alias record that points to an Amazon ELB and evaluates it health: +- route53: + state: present + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" + alias_evaluate_target_health: True + +# Add an AAAA record with Hosted Zone ID. +- route53: + state: present + zone: foo.com + hosted_zone_id: Z2AABBCCDDEEFF + record: localhost.foo.com + type: AAAA + ttl: 7200 + value: "::1" + +# Use a routing policy to distribute traffic: +- route53: + state: present + zone: foo.com + record: www.foo.com + type: CNAME + value: host1.foo.com + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "d994b780-3150-49fd-9205-356abdd42e75" + +# Add a CAA record (RFC 6844): +- route53: + state: present + zone: example.com + record: example.com + type: CAA + value: + - 0 issue "ca.example.net" + - 0 issuewild ";" + - 0 iodef "mailto:security@example.com" + +''' + +import time +import distutils.version + +try: + import boto + import boto.ec2 + from boto.route53 import Route53Connection + from boto.route53.record import Record, ResourceRecordSets + from boto.route53.status import Status + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info + + +MINIMUM_BOTO_VERSION = '2.28.0' +WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls + + +class TimeoutError(Exception): + pass + + +def get_zone_id_by_name(conn, module, zone_name, want_private, want_vpc_id): + """Finds a zone by name or zone_id""" + for zone in invoke_with_throttling_retries(conn.get_zones): + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + private_zone = module.boolean(zone.config.get('PrivateZone', False)) + if private_zone == want_private and zone.name == zone_name: + if want_vpc_id: + # NOTE: These details aren't available in other boto methods, hence the necessary + # extra API call + hosted_zone = invoke_with_throttling_retries(conn.get_hosted_zone, zone.id) + zone_details = hosted_zone['GetHostedZoneResponse'] + # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 + if isinstance(zone_details['VPCs'], dict): + if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: + return zone.id + else: # Forward compatibility for when boto fixes that bug + if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: + return zone.id + else: + return zone.id + return None + + +def commit(changes, retry_interval, wait, wait_timeout): + """Commit changes, but retry PriorRequestNotComplete errors.""" + result = None + retry = 10 + while True: + try: + retry -= 1 + result = changes.commit() + break + except boto.route53.exception.DNSServerError as e: + code = e.body.split("")[1] + code = code.split("")[0] + if code != 'PriorRequestNotComplete' or retry < 0: + raise e + time.sleep(float(retry_interval)) + + if wait: + timeout_time = time.time() + wait_timeout + connection = changes.connection + change = result['ChangeResourceRecordSetsResponse']['ChangeInfo'] + status = Status(connection, change) + while status.status != 'INSYNC' and time.time() < timeout_time: + time.sleep(WAIT_RETRY_SLEEP) + status.update() + if time.time() >= timeout_time: + raise TimeoutError() + return result + + +# Shamelessly copied over from https://git.io/vgmDG +IGNORE_CODE = 'Throttling' +MAX_RETRIES = 5 + + +def invoke_with_throttling_retries(function_ref, *argv, **kwargs): + retries = 0 + while True: + try: + retval = function_ref(*argv, **kwargs) + return retval + except boto.exception.BotoServerError as e: + if e.code != IGNORE_CODE or retries == MAX_RETRIES: + raise e + time.sleep(5 * (2**retries)) + retries += 1 + + +def decode_name(name): + # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round + # tripping of things like * and @. + return name.encode().decode('unicode_escape') + + +def to_dict(rset, zone_in, zone_id): + record = dict() + record['zone'] = zone_in + record['type'] = rset.type + record['record'] = decode_name(rset.name) + record['ttl'] = str(rset.ttl) + record['identifier'] = rset.identifier + record['weight'] = rset.weight + record['region'] = rset.region + record['failover'] = rset.failover + record['health_check'] = rset.health_check + record['hosted_zone_id'] = zone_id + if rset.alias_dns_name: + record['alias'] = True + record['value'] = rset.alias_dns_name + record['values'] = [rset.alias_dns_name] + record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id + record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health + else: + record['alias'] = False + record['value'] = ','.join(sorted(rset.resource_records)) + record['values'] = sorted(rset.resource_records) + return record + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), + zone=dict(type='str'), + hosted_zone_id=dict(type='str'), + record=dict(type='str', required=True), + ttl=dict(type='int', default=3600), + type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']), + alias=dict(type='bool'), + alias_hosted_zone_id=dict(type='str'), + alias_evaluate_target_health=dict(type='bool', default=False), + value=dict(type='list'), + overwrite=dict(type='bool'), + retry_interval=dict(type='int', default=500), + private_zone=dict(type='bool', default=False), + identifier=dict(type='str'), + weight=dict(type='int'), + region=dict(type='str'), + health_check=dict(type='str'), + failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']), + vpc_id=dict(type='str'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['zone', 'hosted_zone_id']], + # If alias is True then you must specify alias_hosted_zone as well + required_together=[['alias', 'alias_hosted_zone_id']], + # state=present, absent, create, delete THEN value is required + required_if=( + ('state', 'present', ['value']), + ('state', 'create', ['value']), + ('state', 'absent', ['value']), + ('state', 'delete', ['value']), + ), + # failover, region and weight are mutually exclusive + mutually_exclusive=[('failover', 'region', 'weight')], + # failover, region and weight require identifier + required_by=dict( + failover=('identifier',), + region=('identifier',), + weight=('identifier',), + ), + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION): + module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION)) + + if module.params['state'] in ('present', 'create'): + command_in = 'create' + elif module.params['state'] in ('absent', 'delete'): + command_in = 'delete' + elif module.params['state'] == 'get': + command_in = 'get' + + zone_in = (module.params.get('zone') or '').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') + ttl_in = module.params.get('ttl') + record_in = module.params.get('record').lower() + type_in = module.params.get('type') + value_in = module.params.get('value') or [] + alias_in = module.params.get('alias') + alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') + alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') + retry_interval_in = module.params.get('retry_interval') + + if module.params['vpc_id'] is not None: + private_zone_in = True + else: + private_zone_in = module.params.get('private_zone') + + identifier_in = module.params.get('identifier') + weight_in = module.params.get('weight') + region_in = module.params.get('region') + health_check_in = module.params.get('health_check') + failover_in = module.params.get('failover') + vpc_id_in = module.params.get('vpc_id') + wait_in = module.params.get('wait') + wait_timeout_in = module.params.get('wait_timeout') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + if zone_in[-1:] != '.': + zone_in += "." + + if record_in[-1:] != '.': + record_in += "." + + if command_in == 'create' or command_in == 'delete': + if alias_in and len(value_in) != 1: + module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") + if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None: + module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.") + + # connect to the route53 endpoint + try: + conn = Route53Connection(**aws_connect_kwargs) + except boto.exception.BotoServerError as e: + module.fail_json(msg=e.error_message) + + # Find the named zone ID + zone_id = hosted_zone_id_in or get_zone_id_by_name(conn, module, zone_in, private_zone_in, vpc_id_in) + + # Verify that the requested zone is already defined in Route53 + if zone_id is None: + errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) + module.fail_json(msg=errmsg) + + record = {} + + found_record = False + wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, + identifier=identifier_in, weight=weight_in, + region=region_in, health_check=health_check_in, + failover=failover_in) + for v in value_in: + if alias_in: + wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in) + else: + wanted_rset.add_value(v) + + need_to_sort_records = (type_in == 'CAA') + + # Sort records for wanted_rset if necessary (keep original list) + unsorted_records = wanted_rset.resource_records + if need_to_sort_records: + wanted_rset.resource_records = sorted(unsorted_records) + + sets = invoke_with_throttling_retries(conn.get_all_rrsets, zone_id, name=record_in, + type=type_in, identifier=identifier_in) + sets_iter = iter(sets) + while True: + try: + rset = invoke_with_throttling_retries(next, sets_iter) + except StopIteration: + break + # Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block + rset.name = decode_name(rset.name) + + if identifier_in is not None: + identifier_in = str(identifier_in) + + if rset.type == type_in and rset.name.lower() == record_in.lower() and rset.identifier == identifier_in: + if need_to_sort_records: + # Sort records + rset.resource_records = sorted(rset.resource_records) + found_record = True + record = to_dict(rset, zone_in, zone_id) + if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): + module.exit_json(changed=False) + + # We need to look only at the first rrset returned by the above call, + # so break here. The returned elements begin with the one matching our + # requested name, type, and identifier, if such an element exists, + # followed by all others that come after it in alphabetical order. + # Therefore, if the first set does not match, no subsequent set will + # match either. + break + + if command_in == 'get': + if type_in == 'NS': + ns = record.get('values', []) + else: + # Retrieve name servers associated to the zone. + z = invoke_with_throttling_retries(conn.get_zone, zone_in) + ns = invoke_with_throttling_retries(z.get_nameservers) + + module.exit_json(changed=False, set=record, nameservers=ns) + + if command_in == 'delete' and not found_record: + module.exit_json(changed=False) + + changes = ResourceRecordSets(conn, zone_id) + + if command_in == 'create' or command_in == 'delete': + if command_in == 'create' and found_record: + if not module.params['overwrite']: + module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") + command = 'UPSERT' + else: + command = command_in.upper() + # Restore original order of records + wanted_rset.resource_records = unsorted_records + changes.add_change_record(command, wanted_rset) + + if not module.check_mode: + try: + invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in) + except boto.route53.exception.DNSServerError as e: + txt = e.body.split("")[1] + txt = txt.split("")[0] + if "but it already exists" in txt: + module.exit_json(changed=False) + else: + module.fail_json(msg=txt) + except TimeoutError: + module.fail_json(msg='Timeout waiting for changes to replicate') + + module.exit_json( + changed=True, + diff=dict( + before=record, + after=to_dict(wanted_rset, zone_in, zone_id) if command != 'delete' else {}, + ), + ) + + +if __name__ == '__main__': + main() diff --git a/route53_facts.py b/route53_facts.py new file mode 120000 index 00000000000..6b40f0529b0 --- /dev/null +++ b/route53_facts.py @@ -0,0 +1 @@ +route53_info.py \ No newline at end of file diff --git a/route53_health_check.py b/route53_health_check.py new file mode 100644 index 00000000000..778b4c0595d --- /dev/null +++ b/route53_health_check.py @@ -0,0 +1,375 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: route53_health_check +short_description: Add or delete health-checks in Amazons Route53 DNS service +description: + - Creates and deletes DNS Health checks in Amazons Route53 service. + - Only the port, resource_path, string_match and request_interval are + considered when updating existing health-checks. +options: + state: + description: + - Specifies the action to take. + choices: [ 'present', 'absent' ] + type: str + default: 'present' + ip_address: + description: + - IP address of the end-point to check. Either this or I(fqdn) has to be provided. + type: str + port: + description: + - The port on the endpoint on which you want Amazon Route 53 to perform + health checks. Required for TCP checks. + type: int + type: + description: + - The type of health check that you want to create, which indicates how + Amazon Route 53 determines whether an endpoint is healthy. + required: true + choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] + type: str + resource_path: + description: + - The path that you want Amazon Route 53 to request when performing + health checks. The path can be any value for which your endpoint will + return an HTTP status code of 2xx or 3xx when the endpoint is healthy, + for example the file /docs/route53-health-check.html. + - Required for all checks except TCP. + - The path must begin with a / + - Maximum 255 characters. + type: str + fqdn: + description: + - Domain name of the endpoint to check. Either this or I(ip_address) has + to be provided. When both are given the `fqdn` is used in the `Host:` + header of the HTTP request. + type: str + string_match: + description: + - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string + that you want Amazon Route 53 to search for in the response body from + the specified resource. If the string appears in the first 5120 bytes + of the response body, Amazon Route 53 considers the resource healthy. + type: str + request_interval: + description: + - The number of seconds between the time that Amazon Route 53 gets a + response from your endpoint and the time that it sends the next + health-check request. + default: 30 + choices: [ 10, 30 ] + type: int + failure_threshold: + description: + - The number of consecutive health checks that an endpoint must pass or + fail for Amazon Route 53 to change the current status of the endpoint + from unhealthy to healthy or vice versa. + default: 3 + choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] + type: int +author: "zimbatm (@zimbatm)" +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Create a health-check for host1.example.com and use it in record +- route53_health_check: + state: present + fqdn: host1.example.com + type: HTTP_STR_MATCH + resource_path: / + string_match: "Hello" + request_interval: 10 + failure_threshold: 2 + register: my_health_check + +- route53: + action: create + zone: "example.com" + type: CNAME + record: "www.example.com" + value: host1.example.com + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "{{ my_health_check.health_check.id }}" + +# Delete health-check +- route53_health_check: + state: absent + fqdn: host1.example.com + +''' + +import uuid + +try: + import boto + import boto.ec2 + from boto import route53 + from boto.route53 import Route53Connection, exception + from boto.route53.healthcheck import HealthCheck + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info + + +# Things that can't get changed: +# protocol +# ip_address or domain +# request_interval +# string_match if not previously enabled +def find_health_check(conn, wanted): + """Searches for health checks that have the exact same set of immutable values""" + + results = conn.get_list_health_checks() + + while True: + for check in results.HealthChecks: + config = check.HealthCheckConfig + if ( + config.get('IPAddress') == wanted.ip_addr and + config.get('FullyQualifiedDomainName') == wanted.fqdn and + config.get('Type') == wanted.hc_type and + config.get('RequestInterval') == str(wanted.request_interval) and + config.get('Port') == str(wanted.port) + ): + return check + + if (results.IsTruncated == 'true'): + results = conn.get_list_health_checks(marker=results.NextMarker) + else: + return None + + +def to_health_check(config): + return HealthCheck( + config.get('IPAddress'), + int(config.get('Port')), + config.get('Type'), + config.get('ResourcePath'), + fqdn=config.get('FullyQualifiedDomainName'), + string_match=config.get('SearchString'), + request_interval=int(config.get('RequestInterval')), + failure_threshold=int(config.get('FailureThreshold')), + ) + + +def health_check_diff(a, b): + a = a.__dict__ + b = b.__dict__ + if a == b: + return {} + diff = {} + for key in set(a.keys()) | set(b.keys()): + if a.get(key) != b.get(key): + diff[key] = b.get(key) + return diff + + +def to_template_params(health_check): + params = { + 'ip_addr_part': '', + 'port': health_check.port, + 'type': health_check.hc_type, + 'resource_path_part': '', + 'fqdn_part': '', + 'string_match_part': '', + 'request_interval': health_check.request_interval, + 'failure_threshold': health_check.failure_threshold, + } + if health_check.ip_addr: + params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr} + if health_check.resource_path: + params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path} + if health_check.fqdn: + params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn} + if health_check.string_match: + params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match} + return params + + +XMLResourcePathPart = """%(resource_path)s""" + +POSTXMLBody = """ + + %(caller_ref)s + + %(ip_addr_part)s + %(port)s + %(type)s + %(resource_path_part)s + %(fqdn_part)s + %(string_match_part)s + %(request_interval)s + %(failure_threshold)s + + + """ + +UPDATEHCXMLBody = """ + + %(health_check_version)s + %(ip_addr_part)s + %(port)s + %(resource_path_part)s + %(fqdn_part)s + %(string_match_part)s + %(failure_threshold)i + + """ + + +def create_health_check(conn, health_check, caller_ref=None): + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + uri = '/%s/healthcheck' % conn.Version + params = to_template_params(health_check) + params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref) + + xml_body = POSTXMLBody % params + response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, response.reason, body) + + +def update_health_check(conn, health_check_id, health_check_version, health_check): + uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id) + params = to_template_params(health_check) + params.update( + xmlns=conn.XMLNameSpace, + health_check_version=health_check_version, + ) + xml_body = UPDATEHCXMLBody % params + response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + ip_address=dict(), + port=dict(type='int'), + type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + resource_path=dict(), + fqdn=dict(), + string_match=dict(), + request_interval=dict(type='int', choices=[10, 30], default=30), + failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto 2.27.0+ required for this module') + + state_in = module.params.get('state') + ip_addr_in = module.params.get('ip_address') + port_in = module.params.get('port') + type_in = module.params.get('type') + resource_path_in = module.params.get('resource_path') + fqdn_in = module.params.get('fqdn') + string_match_in = module.params.get('string_match') + request_interval_in = module.params.get('request_interval') + failure_threshold_in = module.params.get('failure_threshold') + + if ip_addr_in is None and fqdn_in is None: + module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required") + + # Default port + if port_in is None: + if type_in in ['HTTP', 'HTTP_STR_MATCH']: + port_in = 80 + elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: + port_in = 443 + else: + module.fail_json(msg="parameter 'port' is required for 'type' TCP") + + # string_match in relation with type + if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + if string_match_in is None: + module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types") + elif len(string_match_in) > 255: + module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") + elif string_match_in: + module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + # connect to the route53 endpoint + try: + conn = Route53Connection(**aws_connect_kwargs) + except boto.exception.BotoServerError as e: + module.fail_json(msg=e.error_message) + + changed = False + action = None + check_id = None + wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in) + existing_check = find_health_check(conn, wanted_config) + if existing_check: + check_id = existing_check.Id + existing_config = to_health_check(existing_check.HealthCheckConfig) + + if state_in == 'present': + if existing_check is None: + action = "create" + check_id = create_health_check(conn, wanted_config).HealthCheck.Id + changed = True + else: + diff = health_check_diff(existing_config, wanted_config) + if diff: + action = "update" + update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config) + changed = True + elif state_in == 'absent': + if check_id: + action = "delete" + conn.delete_health_check(check_id) + changed = True + else: + module.fail_json(msg="Logic Error: Unknown state") + + module.exit_json(changed=changed, health_check=dict(id=check_id), action=action) + + +if __name__ == '__main__': + main() diff --git a/route53_info.py b/route53_info.py new file mode 100644 index 00000000000..a3d8d76f6de --- /dev/null +++ b/route53_info.py @@ -0,0 +1,499 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: route53_info +short_description: Retrieves route53 details using AWS methods +description: + - Gets various details related to Route53 zone, record set or health check details. + - This module was called C(route53_facts) before Ansible 2.9. The usage did not change. +options: + query: + description: + - Specifies the query action to take. + required: True + choices: [ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ] + type: str + change_id: + description: + - The ID of the change batch request. + - The value that you specify here is the value that + ChangeResourceRecordSets returned in the Id element + when you submitted the request. + - Required if I(query=change). + required: false + type: str + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone. + - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details). + - Required if I(query) is set to I(record_sets). + required: false + type: str + max_items: + description: + - Maximum number of items to return for various get/list requests. + required: false + type: str + next_marker: + description: + - "Some requests such as list_command: hosted_zones will return a maximum + number of entries - EG 100 or the number specified by I(max_items). + If the number of entries exceeds this maximum another request can be sent + using the NextMarker entry from the first response to get the next page + of results." + required: false + type: str + delegation_set_id: + description: + - The DNS Zone delegation set ID. + required: false + type: str + start_record_name: + description: + - "The first name in the lexicographic ordering of domain names that you want + the list_command: record_sets to start listing from." + required: false + type: str + type: + description: + - The type of DNS record. + required: false + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ] + type: str + dns_name: + description: + - The first name in the lexicographic ordering of domain names that you want + the list_command to start listing from. + required: false + type: str + resource_id: + description: + - The ID/s of the specified resource/s. + - Required if I(query=health_check) and I(health_check_method=tags). + - Required if I(query=hosted_zone) and I(hosted_zone_method=tags). + required: false + aliases: ['resource_ids'] + type: list + elements: str + health_check_id: + description: + - The ID of the health check. + - Required if C(query) is set to C(health_check) and + C(health_check_method) is set to C(details) or C(status) or C(failure_reason). + required: false + type: str + hosted_zone_method: + description: + - "This is used in conjunction with query: hosted_zone. + It allows for listing details, counts or tags of various + hosted zone details." + required: false + choices: [ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags', + ] + default: 'list' + type: str + health_check_method: + description: + - "This is used in conjunction with query: health_check. + It allows for listing details, counts or tags of various + health check details." + required: false + choices: [ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ] + default: 'list' + type: str +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Simple example of listing all hosted zones +- name: List all hosted zones + route53_info: + query: hosted_zone + register: hosted_zones + +# Getting a count of hosted zones +- name: Return a count of all hosted zones + route53_info: + query: hosted_zone + hosted_zone_method: count + register: hosted_zone_count + +- name: List the first 20 resource record sets in a given hosted zone + route53_info: + profile: account_name + query: record_sets + hosted_zone_id: ZZZ1111112222 + max_items: 20 + register: record_sets + +- name: List first 20 health checks + route53_info: + query: health_check + health_check_method: list + max_items: 20 + register: health_checks + +- name: Get health check last failure_reason + route53_info: + query: health_check + health_check_method: failure_reason + health_check_id: 00000000-1111-2222-3333-12345678abcd + register: health_check_failure_reason + +- name: Retrieve reusable delegation set details + route53_info: + query: reusable_delegation_set + delegation_set_id: delegation id + register: delegation_sets + +- name: setup of example for using next_marker + route53_info: + query: hosted_zone + max_items: 1 + register: first_info + +- name: example for using next_marker + route53_info: + query: hosted_zone + next_marker: "{{ first_info.NextMarker }}" + max_items: 1 + when: "{{ 'NextMarker' in first_info }}" + +- name: retrieve host entries starting with host1.workshop.test.io + block: + - name: grab zone id + route53_zone: + zone: "test.io" + register: AWSINFO + + - name: grab Route53 record information + route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ AWSINFO.zone_id }}" + start_record_name: "host1.workshop.test.io" + register: RECORDS +''' +try: + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible.module_utils._text import to_native + + +def get_hosted_zone(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['Id'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + return client.get_hosted_zone(**params) + + +def reusable_delegation_set_details(client, module): + params = dict() + if not module.params.get('delegation_set_id'): + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + results = client.list_reusable_delegation_sets(**params) + else: + params['DelegationSetId'] = module.params.get('delegation_set_id') + results = client.get_reusable_delegation_set(**params) + + return results + + +def list_hosted_zones(client, module): + params = dict() + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + if module.params.get('delegation_set_id'): + params['DelegationSetId'] = module.params.get('delegation_set_id') + + paginator = client.get_paginator('list_hosted_zones') + zones = paginator.paginate(**params).build_full_result()['HostedZones'] + return { + "HostedZones": zones, + "list": zones, + } + + +def list_hosted_zones_by_name(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + + if module.params.get('dns_name'): + params['DNSName'] = module.params.get('dns_name') + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + return client.list_hosted_zones_by_name(**params) + + +def change_details(client, module): + params = dict() + + if module.params.get('change_id'): + params['Id'] = module.params.get('change_id') + else: + module.fail_json(msg="change_id is required") + + results = client.get_change(**params) + return results + + +def checker_ip_range_details(client, module): + return client.get_checker_ip_ranges() + + +def get_count(client, module): + if module.params.get('query') == 'health_check': + results = client.get_health_check_count() + else: + results = client.get_hosted_zone_count() + + return results + + +def get_health_check(client, module): + params = dict() + + if not module.params.get('health_check_id'): + module.fail_json(msg="health_check_id is required") + else: + params['HealthCheckId'] = module.params.get('health_check_id') + + if module.params.get('health_check_method') == 'details': + results = client.get_health_check(**params) + elif module.params.get('health_check_method') == 'failure_reason': + results = client.get_health_check_last_failure_reason(**params) + elif module.params.get('health_check_method') == 'status': + results = client.get_health_check_status(**params) + + return results + + +def get_resource_tags(client, module): + params = dict() + + if module.params.get('resource_id'): + params['ResourceIds'] = module.params.get('resource_id') + else: + module.fail_json(msg="resource_id or resource_ids is required") + + if module.params.get('query') == 'health_check': + params['ResourceType'] = 'healthcheck' + else: + params['ResourceType'] = 'hostedzone' + + return client.list_tags_for_resources(**params) + + +def list_health_checks(client, module): + params = dict() + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + paginator = client.get_paginator('list_health_checks') + health_checks = paginator.paginate(**params).build_full_result()['HealthChecks'] + return { + "HealthChecks": health_checks, + "list": health_checks, + } + + +def record_sets_details(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('start_record_name'): + params['StartRecordName'] = module.params.get('start_record_name') + + if module.params.get('type') and not module.params.get('start_record_name'): + module.fail_json(msg="start_record_name must be specified if type is set") + elif module.params.get('type'): + params['StartRecordType'] = module.params.get('type') + + paginator = client.get_paginator('list_resource_record_sets') + record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets'] + return { + "ResourceRecordSets": record_sets, + "list": record_sets, + } + + +def health_check_details(client, module): + health_check_invocations = { + 'list': list_health_checks, + 'details': get_health_check, + 'status': get_health_check, + 'failure_reason': get_health_check, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = health_check_invocations[module.params.get('health_check_method')](client, module) + return results + + +def hosted_zone_details(client, module): + hosted_zone_invocations = { + 'details': get_hosted_zone, + 'list': list_hosted_zones, + 'list_by_name': list_hosted_zones_by_name, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module) + return results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + query=dict(choices=[ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ], required=True), + change_id=dict(), + hosted_zone_id=dict(), + max_items=dict(), + next_marker=dict(), + delegation_set_id=dict(), + start_record_name=dict(), + type=dict(choices=[ + 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' + ]), + dns_name=dict(), + resource_id=dict(type='list', aliases=['resource_ids']), + health_check_id=dict(), + hosted_zone_method=dict(choices=[ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags' + ], default='list'), + health_check_method=dict(choices=[ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ], default='list'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['hosted_zone_method', 'health_check_method'], + ], + ) + if module._name == 'route53_facts': + module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", version='2.13') + + # Validate Requirements + if not (HAS_BOTO or HAS_BOTO3): + module.fail_json(msg='json and boto/boto3 is required.') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs) + + invocations = { + 'change': change_details, + 'checker_ip_range': checker_ip_range_details, + 'health_check': health_check_details, + 'hosted_zone': hosted_zone_details, + 'record_sets': record_sets_details, + 'reusable_delegation_set': reusable_delegation_set_details, + } + + results = dict(changed=False) + try: + results = invocations[module.params.get('query')](route53, module) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/route53_zone.py b/route53_zone.py new file mode 100644 index 00000000000..2d13cb9073e --- /dev/null +++ b/route53_zone.py @@ -0,0 +1,440 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: route53_zone +short_description: add or delete Route53 zones +description: + - Creates and deletes Route53 private and public zones. +requirements: [ boto3 ] +options: + zone: + description: + - "The DNS zone record (eg: foo.com.)" + required: true + type: str + state: + description: + - Whether or not the zone should exist or not. + default: present + choices: [ "present", "absent" ] + type: str + vpc_id: + description: + - The VPC ID the zone should be a part of (if this is going to be a private zone). + type: str + vpc_region: + description: + - The VPC Region the zone should be a part of (if this is going to be a private zone). + type: str + comment: + description: + - Comment associated with the zone. + default: '' + type: str + hosted_zone_id: + description: + - The unique zone identifier you want to delete or "all" if there are many zones with the same domain name. + - Required if there are multiple zones identified with the above options. + type: str + delegation_set_id: + description: + - The reusable delegation set ID to be associated with the zone. + - Note that you can't associate a reusable delegation set with a private hosted zone. + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +author: "Christopher Troup (@minichate)" +''' + +EXAMPLES = ''' +- name: create a public zone + route53_zone: + zone: example.com + comment: this is an example + +- name: delete a public zone + route53_zone: + zone: example.com + state: absent + +- name: create a private zone + route53_zone: + zone: devel.example.com + vpc_id: '{{ myvpc_id }}' + vpc_region: us-west-2 + comment: developer domain + +- name: create a public zone associated with a specific reusable delegation set + route53_zone: + zone: example.com + comment: reusable delegation set example + delegation_set_id: A1BCDEF2GHIJKL +''' + +RETURN = ''' +comment: + description: optional hosted zone comment + returned: when hosted zone exists + type: str + sample: "Private zone" +name: + description: hosted zone name + returned: when hosted zone exists + type: str + sample: "private.local." +private_zone: + description: whether hosted zone is private or public + returned: when hosted zone exists + type: bool + sample: true +vpc_id: + description: id of vpc attached to private hosted zone + returned: for private hosted zone + type: str + sample: "vpc-1d36c84f" +vpc_region: + description: region of vpc attached to private hosted zone + returned: for private hosted zone + type: str + sample: "eu-west-1" +zone_id: + description: hosted zone id + returned: when hosted zone exists + type: str + sample: "Z6JQG9820BEFMW" +delegation_set_id: + description: id of the associated reusable delegation set + returned: for public hosted zones, if they have been associated with a reusable delegation set + type: str + sample: "A1BCDEF2GHIJKL" +''' + +import time +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +def find_zones(module, client, zone_in, private_zone): + try: + paginator = client.get_paginator('list_hosted_zones') + results = paginator.paginate().build_full_result() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not list current hosted zones") + zones = [] + for r53zone in results['HostedZones']: + if r53zone['Name'] != zone_in: + continue + # only save zone names that match the public/private setting + if (r53zone['Config']['PrivateZone'] and private_zone) or \ + (not r53zone['Config']['PrivateZone'] and not private_zone): + zones.append(r53zone) + + return zones + + +def create(module, client, matching_zones): + zone_in = module.params.get('zone').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + comment = module.params.get('comment') + delegation_set_id = module.params.get('delegation_set_id') + + if not zone_in.endswith('.'): + zone_in += "." + + private_zone = bool(vpc_id and vpc_region) + + record = { + 'private_zone': private_zone, + 'vpc_id': vpc_id, + 'vpc_region': vpc_region, + 'comment': comment, + 'name': zone_in, + 'delegation_set_id': delegation_set_id, + 'zone_id': None, + } + + if private_zone: + changed, result = create_or_update_private(module, client, matching_zones, record) + else: + changed, result = create_or_update_public(module, client, matching_zones, record) + + return changed, result + + +def create_or_update_private(module, client, matching_zones, record): + for z in matching_zones: + try: + result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) + zone_details = result['HostedZone'] + vpc_details = result['VPCs'] + current_vpc_id = None + current_vpc_region = None + if isinstance(vpc_details, dict): + if vpc_details['VPC']['VPCId'] == record['vpc_id']: + current_vpc_id = vpc_details['VPC']['VPCId'] + current_vpc_region = vpc_details['VPC']['VPCRegion'] + else: + if record['vpc_id'] in [v['VPCId'] for v in vpc_details]: + current_vpc_id = record['vpc_id'] + if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]: + current_vpc_region = record['vpc_region'] + + if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region: + record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + if not module.check_mode: + try: + client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + return True, record + else: + record['msg'] = "There is already a private hosted zone in the same region with the same VPC \ + you chose. Unable to create a new private hosted zone in the same name space." + return False, record + + if not module.check_mode: + try: + result = client.create_hosted_zone( + Name=record['name'], + HostedZoneConfig={ + 'Comment': record['comment'] if record['comment'] is not None else "", + 'PrivateZone': True, + }, + VPC={ + 'VPCRegion': record['vpc_region'], + 'VPCId': record['vpc_id'], + }, + CallerReference="%s-%s" % (record['name'], time.time()), + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not create hosted zone") + + hosted_zone = result['HostedZone'] + zone_id = hosted_zone['Id'].replace('/hostedzone/', '') + record['zone_id'] = zone_id + + changed = True + return changed, record + + +def create_or_update_public(module, client, matching_zones, record): + zone_details, zone_delegation_set_details = None, {} + for matching_zone in matching_zones: + try: + zone = client.get_hosted_zone(Id=matching_zone['Id']) + zone_details = zone['HostedZone'] + zone_delegation_set_details = zone.get('DelegationSet', {}) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id']) + if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + if not module.check_mode: + try: + client.update_hosted_zone_comment( + Id=zone_details['Id'], + Comment=record['comment'] + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + changed = True + else: + changed = False + break + + if zone_details is None: + if not module.check_mode: + try: + params = dict( + Name=record['name'], + HostedZoneConfig={ + 'Comment': record['comment'] if record['comment'] is not None else "", + 'PrivateZone': False, + }, + CallerReference="%s-%s" % (record['name'], time.time()), + ) + + if record.get('delegation_set_id') is not None: + params['DelegationSetId'] = record['delegation_set_id'] + + result = client.create_hosted_zone(**params) + zone_details = result['HostedZone'] + zone_delegation_set_details = result.get('DelegationSet', {}) + + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not create hosted zone") + changed = True + + if module.check_mode: + if zone_details: + record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + else: + record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + record['name'] = zone_details['Name'] + record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') + + return changed, record + + +def delete_private(module, client, matching_zones, vpc_id, vpc_region): + for z in matching_zones: + try: + result = client.get_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) + zone_details = result['HostedZone'] + vpc_details = result['VPCs'] + if isinstance(vpc_details, dict): + if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + return True, "Successfully deleted %s" % zone_details['Name'] + else: + if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + return True, "Successfully deleted %s" % zone_details['Name'] + + return False, "The vpc_id and the vpc_region do not match a private hosted zone." + + +def delete_public(module, client, matching_zones): + if len(matching_zones) > 1: + changed = False + msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone." + else: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=matching_zones[0]['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id']) + changed = True + msg = "Successfully deleted %s" % matching_zones[0]['Id'] + return changed, msg + + +def delete_hosted_id(module, client, hosted_zone_id, matching_zones): + if hosted_zone_id == "all": + deleted = [] + for z in matching_zones: + deleted.append(z['Id']) + if not module.check_mode: + try: + client.delete_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + changed = True + msg = "Successfully deleted zones: %s" % deleted + elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=hosted_zone_id) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id) + changed = True + msg = "Successfully deleted zone: %s" % hosted_zone_id + else: + changed = False + msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id + return changed, msg + + +def delete(module, client, matching_zones): + zone_in = module.params.get('zone').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + hosted_zone_id = module.params.get('hosted_zone_id') + + if not zone_in.endswith('.'): + zone_in += "." + + private_zone = bool(vpc_id and vpc_region) + + if zone_in in [z['Name'] for z in matching_zones]: + if hosted_zone_id: + changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones) + else: + if private_zone: + changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region) + else: + changed, result = delete_public(module, client, matching_zones) + else: + changed = False + result = "No zone to delete." + + return changed, result + + +def main(): + argument_spec = dict( + zone=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + vpc_id=dict(default=None), + vpc_region=dict(default=None), + comment=dict(default=''), + hosted_zone_id=dict(), + delegation_set_id=dict(), + ) + + mutually_exclusive = [ + ['delegation_set_id', 'vpc_id'], + ['delegation_set_id', 'vpc_region'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + zone_in = module.params.get('zone').lower() + state = module.params.get('state').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + + if not zone_in.endswith('.'): + zone_in += "." + + private_zone = bool(vpc_id and vpc_region) + + client = module.client('route53') + + zones = find_zones(module, client, zone_in, private_zone) + if state == 'present': + changed, result = create(module, client, matching_zones=zones) + elif state == 'absent': + changed, result = delete(module, client, matching_zones=zones) + + if isinstance(result, dict): + module.exit_json(changed=changed, result=result, **result) + else: + module.exit_json(changed=changed, result=result) + + +if __name__ == '__main__': + main() diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py new file mode 100644 index 00000000000..a75e44db5a6 --- /dev/null +++ b/s3_bucket_notification.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# (c) 2019, XLAB d.o.o +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: s3_bucket_notification +short_description: Creates, updates or deletes S3 Bucket notification for lambda +description: + - This module allows the management of AWS Lambda function bucket event mappings via the + Ansible framework. Use module M(lambda) to manage the lambda function itself, M(lambda_alias) + to manage function aliases and M(lambda_policy) to modify lambda permissions. +notes: + - This module heavily depends on M(lambda_policy) as you need to allow C(lambda:InvokeFunction) + permission for your lambda function. + +author: + - XLAB d.o.o. (@xlab-si) + - Aljaz Kosir (@aljazkosir) + - Miha Plesko (@miha-plesko) +options: + event_name: + description: + - Unique name for event notification on bucket. + required: true + type: str + lambda_function_arn: + description: + - The ARN of the lambda function. + aliases: ['function_arn'] + type: str + bucket_name: + description: + - S3 bucket name. + required: true + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + lambda_alias: + description: + - Name of the Lambda function alias. + - Mutually exclusive with I(lambda_version). + type: str + lambda_version: + description: + - Version of the Lambda function. + - Mutually exclusive with I(lambda_alias). + type: int + events: + description: + - Events that you want to be triggering notifications. You can select multiple events to send + to the same destination, you can set up different events to send to different destinations, + and you can set up a prefix or suffix for an event. However, for each bucket, + individual events cannot have multiple configurations with overlapping prefixes or + suffixes that could match the same object key. + - Required when I(state=present). + choices: ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', + 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', + 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', + 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', + 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] + type: list + elements: str + prefix: + description: + - Optional prefix to limit the notifications to objects with keys that start with matching + characters. + type: str + suffix: + description: + - Optional suffix to limit the notifications to objects with keys that end with matching + characters. + type: str +requirements: + - boto3 +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +--- +# Example that creates a lambda event notification for a bucket +- hosts: localhost + gather_facts: no + tasks: + - name: Process jpg image + s3_bucket_notification: + state: present + event_name: on_file_add_or_remove + bucket_name: test-bucket + function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda + events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] + prefix: images/ + suffix: .jpg +''' + +RETURN = ''' +notification_configuration: + description: list of currently applied notifications + returned: success + type: list +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # will be protected by AnsibleAWSModule + + +class AmazonBucket: + def __init__(self, client, bucket_name): + self.client = client + self.bucket_name = bucket_name + self._full_config_cache = None + + def full_config(self): + if self._full_config_cache is None: + self._full_config_cache = [Config.from_api(cfg) for cfg in + self.client.get_bucket_notification_configuration( + Bucket=self.bucket_name).get( + 'LambdaFunctionConfigurations', list())] + return self._full_config_cache + + def current_config(self, config_name): + for config in self.full_config(): + if config.raw['Id'] == config_name: + return config + + def apply_config(self, desired): + configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']] + configs.append(desired.raw) + self._upload_bucket_config(configs) + return configs + + def delete_config(self, desired): + configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']] + self._upload_bucket_config(configs) + return configs + + def _upload_bucket_config(self, config): + self.client.put_bucket_notification_configuration( + Bucket=self.bucket_name, + NotificationConfiguration={ + 'LambdaFunctionConfigurations': config + }) + + +class Config: + def __init__(self, content): + self._content = content + self.name = content['Id'] + + @property + def raw(self): + return self._content + + def __eq__(self, other): + if other: + return self.raw == other.raw + return False + + @classmethod + def from_params(cls, **params): + function_arn = params['lambda_function_arn'] + + qualifier = None + if params['lambda_version'] > 0: + qualifier = str(params['lambda_version']) + elif params['lambda_alias']: + qualifier = str(params['lambda_alias']) + if qualifier: + params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + + return cls({ + 'Id': params['event_name'], + 'LambdaFunctionArn': params['lambda_function_arn'], + 'Events': sorted(params['events']), + 'Filter': { + 'Key': { + 'FilterRules': [{ + 'Name': 'Prefix', + 'Value': params['prefix'] + }, { + 'Name': 'Suffix', + 'Value': params['suffix'] + }] + } + } + }) + + @classmethod + def from_api(cls, config): + return cls(config) + + +def main(): + event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', + 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', + 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', + 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', + 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + event_name=dict(required=True), + lambda_function_arn=dict(aliases=['function_arn']), + bucket_name=dict(required=True), + events=dict(type='list', default=[], choices=event_types), + prefix=dict(default=''), + suffix=dict(default=''), + lambda_alias=dict(), + lambda_version=dict(type='int', default=0), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['lambda_alias', 'lambda_version']], + required_if=[['state', 'present', ['events']]] + ) + + bucket = AmazonBucket(module.client('s3'), module.params['bucket_name']) + current = bucket.current_config(module.params['event_name']) + desired = Config.from_params(**module.params) + notification_configuration = [cfg.raw for cfg in bucket.full_config()] + + state = module.params['state'] + try: + if (state == 'present' and current == desired) or (state == 'absent' and not current): + changed = False + elif module.check_mode: + changed = True + elif state == 'present': + changed = True + notification_configuration = bucket.apply_config(desired) + elif state == 'absent': + changed = True + notification_configuration = bucket.delete_config(desired) + except (ClientError, BotoCoreError) as e: + module.fail_json(msg='{0}'.format(e)) + + module.exit_json(**dict(changed=changed, + notification_configuration=[camel_dict_to_snake_dict(cfg) for cfg in + notification_configuration])) + + +if __name__ == '__main__': + main() diff --git a/s3_lifecycle.py b/s3_lifecycle.py new file mode 100644 index 00000000000..d1275687144 --- /dev/null +++ b/s3_lifecycle.py @@ -0,0 +1,514 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: s3_lifecycle +short_description: Manage s3 bucket lifecycle rules in AWS +description: + - Manage s3 bucket lifecycle rules in AWS +author: "Rob White (@wimnat)" +notes: + - If specifying expiration time as days then transition time must also be specified in days + - If specifying expiration time as a date then transition time must also be specified as a date +requirements: + - python-dateutil +options: + name: + description: + - "Name of the s3 bucket" + required: true + type: str + expiration_date: + description: + - > + Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must + be midnight and a GMT timezone must be specified. + type: str + expiration_days: + description: + - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + type: int + prefix: + description: + - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket." + type: str + purge_transitions: + description: + - > + "Whether to replace all the current transition(s) with the new transition(s). When false, the provided transition(s) + will be added, replacing transitions with the same storage_class. When true, existing transitions will be removed and + replaced with the new transition(s) + default: true + type: bool + noncurrent_version_expiration_days: + description: + - 'Delete noncurrent versions this many days after they become noncurrent' + required: false + type: int + noncurrent_version_storage_class: + description: + - 'Transition noncurrent versions to this storage class' + default: glacier + choices: ['glacier', 'onezone_ia', 'standard_ia'] + required: false + type: str + noncurrent_version_transition_days: + description: + - 'Transition noncurrent versions this many days after they become noncurrent' + required: false + type: int + noncurrent_version_transitions: + description: + - > + A list of transition behaviors to be applied to noncurrent versions for the rule. Each storage class may be used only once. Each transition + behavior contains these elements + I(transition_days) + I(storage_class) + type: list + rule_id: + description: + - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided." + type: str + state: + description: + - "Create or remove the lifecycle rule" + default: present + choices: [ 'present', 'absent' ] + type: str + status: + description: + - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied." + default: enabled + choices: [ 'enabled', 'disabled' ] + type: str + storage_class: + description: + - "The storage class to transition to. Currently there are two supported values - 'glacier', 'onezone_ia', or 'standard_ia'." + - "The 'standard_ia' class is only being available from Ansible version 2.2." + default: glacier + choices: [ 'glacier', 'onezone_ia', 'standard_ia'] + type: str + transition_date: + description: + - > + Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. + The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, + this parameter is required." + type: str + transition_days: + description: + - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required." + type: int + transitions: + description: + - A list of transition behaviors to be applied to the rule. Each storage class may be used only once. Each transition + behavior may contain these elements + I(transition_days) + I(transition_date) + I(storage_class) + type: list + requester_pays: + description: + - The I(requester_pays) option does nothing and will be removed in Ansible 2.14. + type: bool +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days +- s3_lifecycle: + name: mybucket + expiration_days: 30 + prefix: logs/ + status: enabled + state: present + +# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days +- s3_lifecycle: + name: mybucket + transition_days: 7 + expiration_days: 90 + prefix: logs/ + status: enabled + state: present + +# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. +# Note that midnight GMT must be specified. +# Be sure to quote your date strings +- s3_lifecycle: + name: mybucket + transition_date: "2020-12-30T00:00:00.000Z" + expiration_date: "2030-12-30T00:00:00.000Z" + prefix: logs/ + status: enabled + state: present + +# Disable the rule created above +- s3_lifecycle: + name: mybucket + prefix: logs/ + status: disabled + state: present + +# Delete the lifecycle rule created above +- s3_lifecycle: + name: mybucket + prefix: logs/ + state: absent + +# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class. +- s3_lifecycle: + name: mybucket + prefix: backups/ + storage_class: standard_ia + transition_days: 31 + state: present + status: enabled + +# Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90 +- s3_lifecycle: + name: mybucket + prefix: logs/ + state: present + status: enabled + transitions: + - transition_days: 30 + storage_class: standard_ia + - transition_days: 90 + storage_class: glacier +''' + +from copy import deepcopy +import datetime + +try: + import dateutil.parser + HAS_DATEUTIL = True +except ImportError: + HAS_DATEUTIL = False + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAwsModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + + +def create_lifecycle_rule(client, module): + + name = module.params.get("name") + expiration_date = module.params.get("expiration_date") + expiration_days = module.params.get("expiration_days") + noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days") + noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days") + noncurrent_version_transitions = module.params.get("noncurrent_version_transitions") + noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class") + prefix = module.params.get("prefix") or "" + rule_id = module.params.get("rule_id") + status = module.params.get("status") + storage_class = module.params.get("storage_class") + transition_date = module.params.get("transition_date") + transition_days = module.params.get("transition_days") + transitions = module.params.get("transitions") + purge_transitions = module.params.get("purge_transitions") + changed = False + + # Get the bucket's current lifecycle rules + try: + current_lifecycle = client.get_bucket_lifecycle_configuration(Bucket=name) + current_lifecycle_rules = current_lifecycle['Rules'] + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration': + current_lifecycle_rules = [] + else: + module.fail_json_aws(e) + except BotoCoreError as e: + module.fail_json_aws(e) + + rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) + if rule_id is not None: + rule['ID'] = rule_id + # Create expiration + if expiration_days is not None: + rule['Expiration'] = dict(Days=expiration_days) + elif expiration_date is not None: + rule['Expiration'] = dict(Date=expiration_date) + + if noncurrent_version_expiration_days is not None: + rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days) + + if transition_days is not None: + rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ] + + elif transition_date is not None: + rule['Transitions'] = [dict(Date=transition_date, StorageClass=storage_class.upper()), ] + + if transitions is not None: + if not rule.get('Transitions'): + rule['Transitions'] = [] + for transition in transitions: + t_out = dict() + if transition.get('transition_date'): + t_out['Date'] = transition['transition_date'] + elif transition.get('transition_days'): + t_out['Days'] = transition['transition_days'] + if transition.get('storage_class'): + t_out['StorageClass'] = transition['storage_class'].upper() + rule['Transitions'].append(t_out) + + if noncurrent_version_transition_days is not None: + rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days, + StorageClass=noncurrent_version_storage_class.upper()), ] + + if noncurrent_version_transitions is not None: + if not rule.get('NoncurrentVersionTransitions'): + rule['NoncurrentVersionTransitions'] = [] + for noncurrent_version_transition in noncurrent_version_transitions: + t_out = dict() + t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days'] + if noncurrent_version_transition.get('storage_class'): + t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper() + rule['NoncurrentVersionTransitions'].append(t_out) + + lifecycle_configuration = dict(Rules=[]) + appended = False + # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule + if current_lifecycle_rules: + # If rule ID exists, use that for comparison otherwise compare based on prefix + for existing_rule in current_lifecycle_rules: + if rule.get('ID') == existing_rule.get('ID') and rule['Filter']['Prefix'] != existing_rule.get('Filter', {}).get('Prefix', ''): + existing_rule.pop('ID') + elif rule_id is None and rule['Filter']['Prefix'] == existing_rule.get('Filter', {}).get('Prefix', ''): + existing_rule.pop('ID') + if rule.get('ID') == existing_rule.get('ID'): + changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration) + changed = changed_ or changed + appended = appended_ or appended + else: + lifecycle_configuration['Rules'].append(existing_rule) + + # If nothing appended then append now as the rule must not exist + if not appended: + lifecycle_configuration['Rules'].append(rule) + changed = True + else: + lifecycle_configuration['Rules'].append(rule) + changed = True + + # Write lifecycle to bucket + try: + client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_configuration) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed) + + +def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj): + changed = False + if existing_rule['Status'] != new_rule['Status']: + if not new_rule.get('Transitions') and existing_rule.get('Transitions'): + new_rule['Transitions'] = existing_rule['Transitions'] + if not new_rule.get('Expiration') and existing_rule.get('Expiration'): + new_rule['Expiration'] = existing_rule['Expiration'] + if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'): + new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration'] + lifecycle_obj['Rules'].append(new_rule) + changed = True + appended = True + else: + if not purge_transitions: + merge_transitions(new_rule, existing_rule) + if compare_rule(new_rule, existing_rule, purge_transitions): + lifecycle_obj['Rules'].append(new_rule) + appended = True + else: + lifecycle_obj['Rules'].append(new_rule) + changed = True + appended = True + return changed, appended + + +def compare_rule(rule_a, rule_b, purge_transitions): + + # Copy objects + rule1 = deepcopy(rule_a) + rule2 = deepcopy(rule_b) + + if purge_transitions: + return rule1 == rule2 + else: + transitions1 = rule1.pop('Transitions', []) + transitions2 = rule2.pop('Transitions', []) + noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', []) + noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', []) + if rule1 != rule2: + return False + for transition in transitions1: + if transition not in transitions2: + return False + for transition in noncurrent_transtions1: + if transition not in noncurrent_transtions2: + return False + return True + + +def merge_transitions(updated_rule, updating_rule): + # because of the legal s3 transitions, we know only one can exist for each storage class. + # So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only + # in updating_rule to updated_rule + updated_transitions = {} + updating_transitions = {} + for transition in updated_rule.get('Transitions', []): + updated_transitions[transition['StorageClass']] = transition + for transition in updating_rule.get('Transitions', []): + updating_transitions[transition['StorageClass']] = transition + for storage_class, transition in updating_transitions.items(): + if updated_transitions.get(storage_class) is None: + updated_rule['Transitions'].append(transition) + + +def destroy_lifecycle_rule(client, module): + + name = module.params.get("name") + prefix = module.params.get("prefix") + rule_id = module.params.get("rule_id") + changed = False + + if prefix is None: + prefix = "" + + # Get the bucket's current lifecycle rules + try: + current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules'] + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration': + current_lifecycle_rules = [] + else: + module.fail_json_aws(e) + except BotoCoreError as e: + module.fail_json_aws(e) + + # Create lifecycle + lifecycle_obj = dict(Rules=[]) + + # Check if rule exists + # If an ID exists, use that otherwise compare based on prefix + if rule_id is not None: + for existing_rule in current_lifecycle_rules: + if rule_id == existing_rule['ID']: + # We're not keeping the rule (i.e. deleting) so mark as changed + changed = True + else: + lifecycle_obj['Rules'].append(existing_rule) + else: + for existing_rule in current_lifecycle_rules: + if prefix == existing_rule['Filter']['Prefix']: + # We're not keeping the rule (i.e. deleting) so mark as changed + changed = True + else: + lifecycle_obj['Rules'].append(existing_rule) + + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration + try: + if lifecycle_obj['Rules']: + client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_obj) + elif current_lifecycle_rules: + changed = True + client.delete_bucket_lifecycle(Bucket=name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e) + module.exit_json(changed=changed) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + expiration_days=dict(type='int'), + expiration_date=dict(), + noncurrent_version_expiration_days=dict(type='int'), + noncurrent_version_storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']), + noncurrent_version_transition_days=dict(type='int'), + noncurrent_version_transitions=dict(type='list'), + prefix=dict(), + requester_pays=dict(type='bool', removed_in_version='2.14'), + rule_id=dict(), + state=dict(default='present', choices=['present', 'absent']), + status=dict(default='enabled', choices=['enabled', 'disabled']), + storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']), + transition_days=dict(type='int'), + transition_date=dict(), + transitions=dict(type='list'), + purge_transitions=dict(default='yes', type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[ + ['expiration_days', 'expiration_date'], + ['expiration_days', 'transition_date'], + ['transition_days', 'transition_date'], + ['transition_days', 'expiration_date'], + ['transition_days', 'transitions'], + ['transition_date', 'transitions'], + ['noncurrent_version_transition_days', 'noncurrent_version_transitions'], + ],) + + if not HAS_DATEUTIL: + module.fail_json(msg='dateutil required for this module') + + client = module.client('s3') + + expiration_date = module.params.get("expiration_date") + transition_date = module.params.get("transition_date") + state = module.params.get("state") + + if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix + + required_when_present = ('expiration_date', 'expiration_days', 'transition_date', + 'transition_days', 'transitions', 'noncurrent_version_expiration_days', + 'noncurrent_version_transition_days', + 'noncurrent_version_transitions') + for param in required_when_present: + if module.params.get(param): + break + else: + msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present) + module.fail_json(msg=msg) + # If expiration_date set, check string is valid + if expiration_date is not None: + try: + datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z") + except ValueError as e: + module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") + + if transition_date is not None: + try: + datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z") + except ValueError as e: + module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") + + if state == 'present': + create_lifecycle_rule(client, module) + elif state == 'absent': + destroy_lifecycle_rule(client, module) + + +if __name__ == '__main__': + main() diff --git a/s3_logging.py b/s3_logging.py new file mode 100644 index 00000000000..98033d04643 --- /dev/null +++ b/s3_logging.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: s3_logging +short_description: Manage logging facility of an s3 bucket in AWS +description: + - Manage logging facility of an s3 bucket in AWS +author: Rob White (@wimnat) +options: + name: + description: + - "Name of the s3 bucket." + required: true + type: str + state: + description: + - "Enable or disable logging." + default: present + choices: [ 'present', 'absent' ] + type: str + target_bucket: + description: + - "The bucket to log to. Required when state=present." + type: str + target_prefix: + description: + - "The prefix that should be prepended to the generated log files written to the target_bucket." + default: "" + type: str +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs + s3_logging: + name: mywebsite.com + target_bucket: mylogs + target_prefix: logs/mywebsite.com + state: present + +- name: Remove logging on an s3 bucket + s3_logging: + name: mywebsite.com + state: absent + +''' + +try: + import boto.ec2 + from boto.s3.connection import OrdinaryCallingFormat, Location + from boto.exception import S3ResponseError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info + + +def compare_bucket_logging(bucket, target_bucket, target_prefix): + + bucket_log_obj = bucket.get_logging_status() + if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix: + return False + else: + return True + + +def enable_bucket_logging(connection, module): + + bucket_name = module.params.get("name") + target_bucket = module.params.get("target_bucket") + target_prefix = module.params.get("target_prefix") + changed = False + + try: + bucket = connection.get_bucket(bucket_name) + except S3ResponseError as e: + module.fail_json(msg=e.message) + + try: + if not compare_bucket_logging(bucket, target_bucket, target_prefix): + # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket + try: + target_bucket_obj = connection.get_bucket(target_bucket) + except S3ResponseError as e: + if e.status == 301: + module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged") + else: + module.fail_json(msg=e.message) + target_bucket_obj.set_as_logging_target() + + bucket.enable_logging(target_bucket, target_prefix) + changed = True + + except S3ResponseError as e: + module.fail_json(msg=e.message) + + module.exit_json(changed=changed) + + +def disable_bucket_logging(connection, module): + + bucket_name = module.params.get("name") + changed = False + + try: + bucket = connection.get_bucket(bucket_name) + if not compare_bucket_logging(bucket, None, None): + bucket.disable_logging() + changed = True + except S3ResponseError as e: + module.fail_json(msg=e.message) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + target_bucket=dict(required=False, default=None), + target_prefix=dict(required=False, default=""), + state=dict(required=False, default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region in ('us-east-1', '', None): + # S3ism for the US Standard region + location = Location.DEFAULT + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + try: + connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params) + # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases + if connection is None: + connection = boto.connect_s3(**aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + state = module.params.get("state") + + if state == 'present': + enable_bucket_logging(connection, module) + elif state == 'absent': + disable_bucket_logging(connection, module) + + +if __name__ == '__main__': + main() diff --git a/s3_sync.py b/s3_sync.py new file mode 100644 index 00000000000..f252ab4d9e1 --- /dev/null +++ b/s3_sync.py @@ -0,0 +1,565 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: s3_sync +short_description: Efficiently upload multiple files to S3 +description: + - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, + inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping. +options: + mode: + description: + - sync direction. + default: 'push' + choices: [ 'push' ] + type: str + file_change_strategy: + description: + - Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded. + - date_size will upload if file sizes don't match or if local file modified date is newer than s3's version + - checksum will compare etag values based on s3's implementation of chunked md5s. + - force will always upload all files. + required: false + default: 'date_size' + choices: [ 'force', 'checksum', 'date_size' ] + type: str + bucket: + description: + - Bucket name. + required: true + type: str + key_prefix: + description: + - In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary. + required: false + type: str + file_root: + description: + - File/directory path for synchronization. This is a local path. + - This root path is scrubbed from the key name, so subdirectories will remain as keys. + required: true + type: path + permission: + description: + - Canned ACL to apply to synced files. + - Changing this ACL only changes newly synced files, it does not trigger a full reupload. + required: false + choices: + - 'private' + - 'public-read' + - 'public-read-write' + - 'authenticated-read' + - 'aws-exec-read' + - 'bucket-owner-read' + - 'bucket-owner-full-control' + type: str + mime_map: + description: + - > + Dict entry from extension to MIME type. This will override any default/sniffed MIME type. + For example C({".txt": "application/text", ".yml": "application/text"}) + required: false + type: dict + include: + description: + - Shell pattern-style file matching. + - Used before exclude to determine eligible files (for instance, only "*.gif") + - For multiple patterns, comma-separate them. + required: false + default: "*" + type: str + exclude: + description: + - Shell pattern-style file matching. + - Used after include to remove files (for instance, skip "*.txt") + - For multiple patterns, comma-separate them. + required: false + default: ".*" + type: str + cache_control: + description: + - Cache-Control header set on uploaded objects. + - Directives are separated by commas. + required: false + type: str + delete: + description: + - Remove remote files that exist in bucket but are not present in the file root. + required: false + default: no + type: bool + retries: + description: + - The I(retries) option does nothing and will be removed in Ansible 2.14. + type: str + +requirements: + - boto3 >= 1.4.4 + - botocore + - python-dateutil + +author: Ted Timmons (@tedder) +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +- name: basic upload + s3_sync: + bucket: tedder + file_root: roles/s3/files/ + +- name: all the options + s3_sync: + bucket: tedder + file_root: roles/s3/files + mime_map: + .yml: application/text + .json: application/text + key_prefix: config_files/web + file_change_strategy: force + permission: public-read + cache_control: "public, max-age=31536000" + include: "*" + exclude: "*.txt,.*" +''' + +RETURN = ''' +filelist_initial: + description: file listing (dicts) from initial globbing + returned: always + type: list + sample: [{ + "bytes": 151, + "chopped_path": "policy.json", + "fullpath": "roles/cf/files/policy.json", + "modified_epoch": 1477416706 + }] +filelist_local_etag: + description: file listing (dicts) including calculated local etag + returned: always + type: list + sample: [{ + "bytes": 151, + "chopped_path": "policy.json", + "fullpath": "roles/cf/files/policy.json", + "mime_type": "application/json", + "modified_epoch": 1477416706, + "s3_path": "s3sync/policy.json" + }] +filelist_s3: + description: file listing (dicts) including information about previously-uploaded versions + returned: always + type: list + sample: [{ + "bytes": 151, + "chopped_path": "policy.json", + "fullpath": "roles/cf/files/policy.json", + "mime_type": "application/json", + "modified_epoch": 1477416706, + "s3_path": "s3sync/policy.json" + }] +filelist_typed: + description: file listing (dicts) with calculated or overridden mime types + returned: always + type: list + sample: [{ + "bytes": 151, + "chopped_path": "policy.json", + "fullpath": "roles/cf/files/policy.json", + "mime_type": "application/json", + "modified_epoch": 1477416706 + }] +filelist_actionable: + description: file listing (dicts) of files that will be uploaded after the strategy decision + returned: always + type: list + sample: [{ + "bytes": 151, + "chopped_path": "policy.json", + "fullpath": "roles/cf/files/policy.json", + "mime_type": "application/json", + "modified_epoch": 1477931256, + "s3_path": "s3sync/policy.json", + "whysize": "151 / 151", + "whytime": "1477931256 / 1477929260" + }] +uploaded: + description: file listing (dicts) of files that were actually uploaded + returned: always + type: list + sample: [{ + "bytes": 151, + "chopped_path": "policy.json", + "fullpath": "roles/cf/files/policy.json", + "s3_path": "s3sync/policy.json", + "whysize": "151 / 151", + "whytime": "1477931637 / 1477931489" + }] + +''' + +import datetime +import fnmatch +import hashlib +import mimetypes +import os +import stat as osstat # os.stat constants +import traceback + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn, get_aws_connection_info, HAS_BOTO3, boto_exception +from ansible.module_utils._text import to_text + +try: + from dateutil import tz + HAS_DATEUTIL = True +except ImportError: + HAS_DATEUTIL = False + +try: + import botocore +except ImportError: + # Handled by imported HAS_BOTO3 + pass + + +# the following function, calculate_multipart_etag, is from tlastowka +# on github and is used under its (compatible) GPL license. So this +# license applies to the following function. +# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py +# +# calculate_multipart_etag Copyright (C) 2015 +# Tony Lastowka +# https://github.com/tlastowka +# +# +# calculate_multipart_etag is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# calculate_multipart_etag is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with calculate_multipart_etag. If not, see . + +DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 + + +def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): + """ + calculates a multipart upload etag for amazon s3 + + Arguments: + + source_path -- The file to calculate the etag for + chunk_size -- The chunk size to calculate for. + """ + + md5s = [] + + with open(source_path, 'rb') as fp: + while True: + + data = fp.read(chunk_size) + + if not data: + break + md5s.append(hashlib.md5(data)) + + if len(md5s) == 1: + new_etag = '"{0}"'.format(md5s[0].hexdigest()) + else: # > 1 + digests = b"".join(m.digest() for m in md5s) + + new_md5 = hashlib.md5(digests) + new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s)) + + return new_etag + + +def gather_files(fileroot, include=None, exclude=None): + ret = [] + for (dirpath, dirnames, filenames) in os.walk(fileroot): + for fn in filenames: + fullpath = os.path.join(dirpath, fn) + # include/exclude + if include: + found = False + for x in include.split(','): + if fnmatch.fnmatch(fn, x): + found = True + if not found: + # not on the include list, so we don't want it. + continue + + if exclude: + found = False + for x in exclude.split(','): + if fnmatch.fnmatch(fn, x): + found = True + if found: + # skip it, even if previously included. + continue + + chopped_path = os.path.relpath(fullpath, start=fileroot) + fstat = os.stat(fullpath) + f_size = fstat[osstat.ST_SIZE] + f_modified_epoch = fstat[osstat.ST_MTIME] + ret.append({ + 'fullpath': fullpath, + 'chopped_path': chopped_path, + 'modified_epoch': f_modified_epoch, + 'bytes': f_size, + }) + # dirpath = path *to* the directory + # dirnames = subdirs *in* our directory + # filenames + return ret + + +def calculate_s3_path(filelist, key_prefix=''): + ret = [] + for fileentry in filelist: + # don't modify the input dict + retentry = fileentry.copy() + retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path']) + ret.append(retentry) + return ret + + +def calculate_local_etag(filelist, key_prefix=''): + '''Really, "calculate md5", but since AWS uses their own format, we'll just call + it a "local etag". TODO optimization: only calculate if remote key exists.''' + ret = [] + for fileentry in filelist: + # don't modify the input dict + retentry = fileentry.copy() + retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath']) + ret.append(retentry) + return ret + + +def determine_mimetypes(filelist, override_map): + ret = [] + for fileentry in filelist: + retentry = fileentry.copy() + localfile = fileentry['fullpath'] + + # reminder: file extension is '.txt', not 'txt'. + file_extension = os.path.splitext(localfile)[1] + if override_map and override_map.get(file_extension): + # override? use it. + retentry['mime_type'] = override_map[file_extension] + else: + # else sniff it + retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False) + + # might be None or '' from one of the above. Not a great type but better than nothing. + if not retentry['mime_type']: + retentry['mime_type'] = 'application/octet-stream' + + ret.append(retentry) + + return ret + + +def head_s3(s3, bucket, s3keys): + retkeys = [] + for entry in s3keys: + retentry = entry.copy() + # don't modify the input dict + try: + retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path']) + except botocore.exceptions.ClientError as err: + if (hasattr(err, 'response') and + 'ResponseMetadata' in err.response and + 'HTTPStatusCode' in err.response['ResponseMetadata'] and + str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'): + pass + else: + raise Exception(err) + # error_msg = boto_exception(err) + # return {'error': error_msg} + retkeys.append(retentry) + return retkeys + + +def filter_list(s3, bucket, s3filelist, strategy): + keeplist = list(s3filelist) + + for e in keeplist: + e['_strategy'] = strategy + + # init/fetch info from S3 if we're going to use it for comparisons + if not strategy == 'force': + keeplist = head_s3(s3, bucket, s3filelist) + + # now actually run the strategies + if strategy == 'checksum': + for entry in keeplist: + if entry.get('s3_head'): + # since we have a remote s3 object, compare the values. + if entry['s3_head']['ETag'] == entry['local_etag']: + # files match, so remove the entry + entry['skip_flag'] = True + else: + # file etags don't match, keep the entry. + pass + else: # we don't have an etag, so we'll keep it. + pass + elif strategy == 'date_size': + for entry in keeplist: + if entry.get('s3_head'): + # fstat = entry['stat'] + local_modified_epoch = entry['modified_epoch'] + local_size = entry['bytes'] + + # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward. + # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp() + remote_modified_datetime = entry['s3_head']['LastModified'] + delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())) + remote_modified_epoch = delta.seconds + (delta.days * 86400) + + remote_size = entry['s3_head']['ContentLength'] + + entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch) + entry['whysize'] = '{0} / {1}'.format(local_size, remote_size) + + if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: + entry['skip_flag'] = True + else: + entry['why'] = "no s3_head" + # else: probably 'force'. Basically we don't skip with any with other strategies. + else: + pass + + # prune 'please skip' entries, if any. + return [x for x in keeplist if not x.get('skip_flag')] + + +def upload_files(s3, bucket, filelist, params): + ret = [] + for entry in filelist: + args = { + 'ContentType': entry['mime_type'] + } + if params.get('permission'): + args['ACL'] = params['permission'] + if params.get('cache_control'): + args['CacheControl'] = params['cache_control'] + # if this fails exception is caught in main() + s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None) + ret.append(entry) + return ret + + +def remove_files(s3, sourcelist, params): + bucket = params.get('bucket') + key_prefix = params.get('key_prefix') + paginator = s3.get_paginator('list_objects_v2') + current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', [])) + keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist) + delete_keys = list(current_keys - keep_keys) + + # can delete 1000 objects at a time + groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] + for keys in groups_of_keys: + s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]}) + + return delete_keys + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + mode=dict(choices=['push'], default='push'), + file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'), + bucket=dict(required=True), + key_prefix=dict(required=False, default=''), + file_root=dict(required=True, type='path'), + permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', + 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), + retries=dict(required=False, removed_in_version='2.14'), + mime_map=dict(required=False, type='dict'), + exclude=dict(required=False, default=".*"), + include=dict(required=False, default="*"), + cache_control=dict(required=False, default=''), + delete=dict(required=False, type='bool', default=False), + # future options: encoding, metadata, storage_class, retries + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_DATEUTIL: + module.fail_json(msg='dateutil required for this module') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + result = {} + mode = module.params['mode'] + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified") + s3 = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) + + if mode == 'push': + try: + result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) + result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map')) + result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix']) + result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3']) + result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) + result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) + + if module.params['delete']: + result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params) + + # mark changed if we actually upload something. + if result.get('uploads') or result.get('removed'): + result['changed'] = True + # result.update(filelist=actionable_filelist) + except botocore.exceptions.ClientError as err: + error_msg = boto_exception(err) + module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/s3_website.py b/s3_website.py new file mode 100644 index 00000000000..22a73f4d023 --- /dev/null +++ b/s3_website.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: s3_website +short_description: Configure an s3 bucket as a website +description: + - Configure an s3 bucket as a website +requirements: [ boto3 ] +author: Rob White (@wimnat) +options: + name: + description: + - "Name of the s3 bucket" + required: true + type: str + error_key: + description: + - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None." + type: str + redirect_all_requests: + description: + - "Describes the redirect behavior for every request to this s3 bucket website endpoint" + type: str + state: + description: + - "Add or remove s3 website configuration" + choices: [ 'present', 'absent' ] + required: true + type: str + suffix: + description: + - > + Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to + samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash + character. + default: index.html + type: str + +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Configure an s3 bucket to redirect all requests to example.com +- s3_website: + name: mybucket.com + redirect_all_requests: example.com + state: present + +# Remove website configuration from an s3 bucket +- s3_website: + name: mybucket.com + state: absent + +# Configure an s3 bucket as a website with index and error pages +- s3_website: + name: mybucket.com + suffix: home.htm + error_key: errors/404.htm + state: present + +''' + +RETURN = ''' +index_document: + description: index document + type: complex + returned: always + contains: + suffix: + description: suffix that is appended to a request that is for a directory on the website endpoint + returned: success + type: str + sample: index.html +error_document: + description: error document + type: complex + returned: always + contains: + key: + description: object key name to use when a 4XX class error occurs + returned: when error_document parameter set + type: str + sample: error.html +redirect_all_requests_to: + description: where to redirect requests + type: complex + returned: always + contains: + host_name: + description: name of the host where requests will be redirected. + returned: when redirect all requests parameter set + type: str + sample: ansible.com + protocol: + description: protocol to use when redirecting requests. + returned: when redirect all requests parameter set + type: str + sample: https +routing_rules: + description: routing rules + type: list + returned: always + contains: + condition: + type: complex + description: A container for describing a condition that must be met for the specified redirect to apply. + contains: + http_error_code_returned_equals: + description: The HTTP error code when the redirect is applied. + returned: always + type: str + key_prefix_equals: + description: object key name prefix when the redirect is applied. For example, to redirect + requests for ExamplePage.html, the key prefix will be ExamplePage.html + returned: when routing rule present + type: str + sample: docs/ + redirect: + type: complex + description: Container for redirect information. + returned: always + contains: + host_name: + description: name of the host where requests will be redirected. + returned: when host name set as part of redirect rule + type: str + sample: ansible.com + http_redirect_code: + description: The HTTP redirect code to use on the response. + returned: when routing rule present + type: str + protocol: + description: Protocol to use when redirecting requests. + returned: when routing rule present + type: str + sample: http + replace_key_prefix_with: + description: object key prefix to use in the redirect request + returned: when routing rule present + type: str + sample: documents/ + replace_key_with: + description: object key prefix to use in the redirect request + returned: when routing rule present + type: str + sample: documents/ +''' + +import time + +try: + import boto3 + from botocore.exceptions import ClientError, ParamValidationError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, + get_aws_connection_info) + + +def _create_redirect_dict(url): + + redirect_dict = {} + url_split = url.split(':') + + # Did we split anything? + if len(url_split) == 2: + redirect_dict[u'Protocol'] = url_split[0] + redirect_dict[u'HostName'] = url_split[1].replace('//', '') + elif len(url_split) == 1: + redirect_dict[u'HostName'] = url_split[0] + else: + raise ValueError('Redirect URL appears invalid') + + return redirect_dict + + +def _create_website_configuration(suffix, error_key, redirect_all_requests): + + website_configuration = {} + + if error_key is not None: + website_configuration['ErrorDocument'] = {'Key': error_key} + + if suffix is not None: + website_configuration['IndexDocument'] = {'Suffix': suffix} + + if redirect_all_requests is not None: + website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests) + + return website_configuration + + +def enable_or_update_bucket_as_website(client_connection, resource_connection, module): + + bucket_name = module.params.get("name") + redirect_all_requests = module.params.get("redirect_all_requests") + # If redirect_all_requests is set then don't use the default suffix that has been set + if redirect_all_requests is not None: + suffix = None + else: + suffix = module.params.get("suffix") + error_key = module.params.get("error_key") + changed = False + + try: + bucket_website = resource_connection.BucketWebsite(bucket_name) + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + try: + website_config = client_connection.get_bucket_website(Bucket=bucket_name) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': + website_config = None + else: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + if website_config is None: + try: + bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except ValueError as e: + module.fail_json(msg=str(e)) + else: + try: + if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \ + (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \ + (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)): + + try: + bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except KeyError as e: + try: + bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except ValueError as e: + module.fail_json(msg=str(e)) + + # Wait 5 secs before getting the website_config again to give it time to update + time.sleep(5) + + website_config = client_connection.get_bucket_website(Bucket=bucket_name) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config)) + + +def disable_bucket_as_website(client_connection, module): + + changed = False + bucket_name = module.params.get("name") + + try: + client_connection.get_bucket_website(Bucket=bucket_name) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': + module.exit_json(changed=changed) + else: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + try: + client_connection.delete_bucket_website(Bucket=bucket_name) + changed = True + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['present', 'absent']), + suffix=dict(type='str', required=False, default='index.html'), + error_key=dict(type='str', required=False), + redirect_all_requests=dict(type='str', required=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['redirect_all_requests', 'suffix'], + ['redirect_all_requests', 'error_key'] + ]) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) + resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + state = module.params.get("state") + + if state == 'present': + enable_or_update_bucket_as_website(client_connection, resource_connection, module) + elif state == 'absent': + disable_bucket_as_website(client_connection, module) + + +if __name__ == '__main__': + main() diff --git a/sns.py b/sns.py new file mode 100644 index 00000000000..bb4093a0e50 --- /dev/null +++ b/sns.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Michael J. Schultz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: sns +short_description: Send Amazon Simple Notification Service messages +description: + - Sends a notification to a topic on your Amazon SNS account. +author: + - Michael J. Schultz (@mjschultz) + - Paul Arthur (@flowerysong) +options: + msg: + description: + - Default message for subscriptions without a more specific message. + required: true + aliases: [ "default" ] + type: str + subject: + description: + - Message subject + type: str + topic: + description: + - The name or ARN of the topic to publish to. + required: true + type: str + email: + description: + - Message to send to email subscriptions. + type: str + email_json: + description: + - Message to send to email-json subscriptions. + type: str + sqs: + description: + - Message to send to SQS subscriptions. + type: str + sms: + description: + - Message to send to SMS subscriptions. + type: str + http: + description: + - Message to send to HTTP subscriptions. + type: str + https: + description: + - Message to send to HTTPS subscriptions. + type: str + application: + description: + - Message to send to application subscriptions. + type: str + lambda: + description: + - Message to send to Lambda subscriptions. + type: str + message_attributes: + description: + - Dictionary of message attributes. These are optional structured data entries to be sent along to the endpoint. + - This is in AWS's distinct Name/Type/Value format; see example below. + type: dict + message_structure: + description: + - The payload format to use for the message. + - This must be 'json' to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)). + - It must be 'string' to support I(message_attributes). + default: json + choices: ['json', 'string'] + type: str +extends_documentation_fragment: +- ansible.amazon.ec2 +- ansible.amazon.aws + +requirements: + - boto3 + - botocore +''' + +EXAMPLES = """ +- name: Send default notification message via SNS + sns: + msg: '{{ inventory_hostname }} has completed the play.' + subject: Deploy complete! + topic: deploy + delegate_to: localhost + +- name: Send notification messages via SNS with short message for SMS + sns: + msg: '{{ inventory_hostname }} has completed the play.' + sms: deployed! + subject: Deploy complete! + topic: deploy + delegate_to: localhost + +- name: Send message with message_attributes + sns: + topic: "deploy" + msg: "message with extra details!" + message_attributes: + channel: + data_type: String + string_value: "mychannel" + color: + data_type: String + string_value: "green" + delegate_to: localhost +""" + +RETURN = """ +msg: + description: Human-readable diagnostic information + returned: always + type: str + sample: OK +message_id: + description: The message ID of the submitted message + returned: when success + type: str + sample: 2f681ef0-6d76-5c94-99b2-4ae3996ce57b +""" + +import json +import traceback + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule + + +def arn_topic_lookup(module, client, short_topic): + lookup_topic = ':{0}'.format(short_topic) + + try: + paginator = client.get_paginator('list_topics') + topic_iterator = paginator.paginate() + for response in topic_iterator: + for topic in response['Topics']: + if topic['TopicArn'].endswith(lookup_topic): + return topic['TopicArn'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to look up topic ARN') + + return None + + +def main(): + protocols = [ + 'http', + 'https', + 'email', + 'email_json', + 'sms', + 'sqs', + 'application', + 'lambda', + ] + + argument_spec = dict( + msg=dict(required=True, aliases=['default']), + subject=dict(), + topic=dict(required=True), + message_attributes=dict(type='dict'), + message_structure=dict(choices=['json', 'string'], default='json'), + ) + + for p in protocols: + argument_spec[p] = dict() + + module = AnsibleAWSModule(argument_spec=argument_spec) + + sns_kwargs = dict( + Message=module.params['msg'], + Subject=module.params['subject'], + MessageStructure=module.params['message_structure'], + ) + + if module.params['message_attributes']: + if module.params['message_structure'] != 'string': + module.fail_json(msg='message_attributes is only supported when the message_structure is "string".') + sns_kwargs['MessageAttributes'] = module.params['message_attributes'] + + dict_msg = { + 'default': sns_kwargs['Message'] + } + + for p in protocols: + if module.params[p]: + if sns_kwargs['MessageStructure'] != 'json': + module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".') + dict_msg[p.replace('_', '-')] = module.params[p] + + client = module.client('sns') + + topic = module.params['topic'] + if ':' in topic: + # Short names can't contain ':' so we'll assume this is the full ARN + sns_kwargs['TopicArn'] = topic + else: + sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic) + + if not sns_kwargs['TopicArn']: + module.fail_json(msg='Could not find topic: {0}'.format(topic)) + + if sns_kwargs['MessageStructure'] == 'json': + sns_kwargs['Message'] = json.dumps(dict_msg) + + try: + result = client.publish(**sns_kwargs) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to publish message') + + module.exit_json(msg='OK', message_id=result['MessageId']) + + +if __name__ == '__main__': + main() diff --git a/sns_topic.py b/sns_topic.py new file mode 100644 index 00000000000..82c21715e67 --- /dev/null +++ b/sns_topic.py @@ -0,0 +1,529 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: sns_topic +short_description: Manages AWS SNS topics and subscriptions +description: + - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. + - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account. +author: + - "Joel Thompson (@joelthompson)" + - "Fernando Jose Pando (@nand0p)" + - "Will Thames (@willthames)" +options: + name: + description: + - The name or ARN of the SNS topic to manage. + required: true + type: str + state: + description: + - Whether to create or destroy an SNS topic. + default: present + choices: ["absent", "present"] + type: str + display_name: + description: + - Display name of the topic. + type: str + policy: + description: + - Policy to apply to the SNS topic. + type: dict + delivery_policy: + description: + - Delivery policy to apply to the SNS topic. + type: dict + subscriptions: + description: + - List of subscriptions to apply to the topic. Note that AWS requires + subscriptions to be confirmed, so you will need to confirm any new + subscriptions. + suboptions: + endpoint: + description: Endpoint of subscription. + required: true + protocol: + description: Protocol of subscription. + required: true + type: list + elements: dict + default: [] + purge_subscriptions: + description: + - "Whether to purge any subscriptions not listed here. NOTE: AWS does not + allow you to purge any PendingConfirmation subscriptions, so if any + exist and would be purged, they are silently skipped. This means that + somebody could come back later and confirm the subscription. Sorry. + Blame Amazon." + default: true + type: bool +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: [ "boto" ] +''' + +EXAMPLES = """ + +- name: Create alarm SNS topic + sns_topic: + name: "alarms" + state: present + display_name: "alarm SNS topic" + delivery_policy: + http: + defaultHealthyRetryPolicy: + minDelayTarget: 2 + maxDelayTarget: 4 + numRetries: 3 + numMaxDelayRetries: 5 + backoffFunction: "" + disableSubscriptionOverrides: True + defaultThrottlePolicy: + maxReceivesPerSecond: 10 + subscriptions: + - endpoint: "my_email_address@example.com" + protocol: "email" + - endpoint: "my_mobile_number" + protocol: "sms" + +""" + +RETURN = ''' +sns_arn: + description: The ARN of the topic you are modifying + type: str + returned: always + sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name" +sns_topic: + description: Dict of sns topic details + type: complex + returned: always + contains: + attributes_set: + description: list of attributes set during this run + returned: always + type: list + sample: [] + check_mode: + description: whether check mode was on + returned: always + type: bool + sample: false + delivery_policy: + description: Delivery policy for the SNS topic + returned: when topic is owned by this AWS account + type: str + sample: > + {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0, + "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}} + display_name: + description: Display name for SNS topic + returned: when topic is owned by this AWS account + type: str + sample: My topic name + name: + description: Topic name + returned: always + type: str + sample: ansible-test-dummy-topic + owner: + description: AWS account that owns the topic + returned: when topic is owned by this AWS account + type: str + sample: '111111111111' + policy: + description: Policy for the SNS topic + returned: when topic is owned by this AWS account + type: str + sample: > + {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"}, + "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} + state: + description: whether the topic is present or absent + returned: always + type: str + sample: present + subscriptions: + description: List of subscribers to the topic in this AWS account + returned: always + type: list + sample: [] + subscriptions_added: + description: List of subscribers added in this run + returned: always + type: list + sample: [] + subscriptions_confirmed: + description: Count of confirmed subscriptions + returned: when topic is owned by this AWS account + type: str + sample: '0' + subscriptions_deleted: + description: Count of deleted subscriptions + returned: when topic is owned by this AWS account + type: str + sample: '0' + subscriptions_existing: + description: List of existing subscriptions + returned: always + type: list + sample: [] + subscriptions_new: + description: List of new subscriptions + returned: always + type: list + sample: [] + subscriptions_pending: + description: Count of pending subscriptions + returned: when topic is owned by this AWS account + type: str + sample: '0' + subscriptions_purge: + description: Whether or not purge_subscriptions was set + returned: always + type: bool + sample: true + topic_arn: + description: ARN of the SNS topic (equivalent to sns_arn) + returned: when topic is owned by this AWS account + type: str + sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic + topic_created: + description: Whether the topic was created + returned: always + type: bool + sample: false + topic_deleted: + description: Whether the topic was deleted + returned: always + type: bool + sample: false +''' + +import json +import re +import copy + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict + + +class SnsTopicManager(object): + """ Handles SNS Topic creation and destruction """ + + def __init__(self, + module, + name, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + check_mode): + + self.connection = module.client('sns') + self.module = module + self.name = name + self.state = state + self.display_name = display_name + self.policy = policy + self.delivery_policy = delivery_policy + self.subscriptions = subscriptions + self.subscriptions_existing = [] + self.subscriptions_deleted = [] + self.subscriptions_added = [] + self.purge_subscriptions = purge_subscriptions + self.check_mode = check_mode + self.topic_created = False + self.topic_deleted = False + self.topic_arn = None + self.attributes_set = [] + + @AWSRetry.jittered_backoff() + def _list_topics_with_backoff(self): + paginator = self.connection.get_paginator('list_topics') + return paginator.paginate().build_full_result()['Topics'] + + @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) + def _list_topic_subscriptions_with_backoff(self): + paginator = self.connection.get_paginator('list_subscriptions_by_topic') + return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions'] + + @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) + def _list_subscriptions_with_backoff(self): + paginator = self.connection.get_paginator('list_subscriptions') + return paginator.paginate().build_full_result()['Subscriptions'] + + def _list_topics(self): + try: + topics = self._list_topics_with_backoff() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get topic list") + return [t['TopicArn'] for t in topics] + + def _topic_arn_lookup(self): + # topic names cannot have colons, so this captures the full topic name + all_topics = self._list_topics() + lookup_topic = ':%s' % self.name + for topic in all_topics: + if topic.endswith(lookup_topic): + return topic + + def _create_topic(self): + if not self.check_mode: + try: + response = self.connection.create_topic(Name=self.name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) + self.topic_arn = response['TopicArn'] + return True + + def _compare_delivery_policies(self, policy_a, policy_b): + _policy_a = copy.deepcopy(policy_a) + _policy_b = copy.deepcopy(policy_b) + # AWS automatically injects disableSubscriptionOverrides if you set an + # http policy + if 'http' in policy_a: + if 'disableSubscriptionOverrides' not in policy_a['http']: + _policy_a['http']['disableSubscriptionOverrides'] = False + if 'http' in policy_b: + if 'disableSubscriptionOverrides' not in policy_b['http']: + _policy_b['http']['disableSubscriptionOverrides'] = False + comparison = (_policy_a != _policy_b) + return comparison + + def _set_topic_attrs(self): + changed = False + try: + topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn) + + if self.display_name and self.display_name != topic_attributes['DisplayName']: + changed = True + self.attributes_set.append('display_name') + if not self.check_mode: + try: + self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName', + AttributeValue=self.display_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't set display name") + + if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])): + changed = True + self.attributes_set.append('policy') + if not self.check_mode: + try: + self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy', + AttributeValue=json.dumps(self.policy)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't set topic policy") + + if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or + self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): + changed = True + self.attributes_set.append('delivery_policy') + if not self.check_mode: + try: + self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy', + AttributeValue=json.dumps(self.delivery_policy)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") + return changed + + def _canonicalize_endpoint(self, protocol, endpoint): + if protocol == 'sms': + return re.sub('[^0-9]*', '', endpoint) + return endpoint + + def _set_topic_subs(self): + changed = False + subscriptions_existing_list = set() + desired_subscriptions = [(sub['protocol'], + self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in + self.subscriptions] + + for sub in self._list_topic_subscriptions(): + sub_key = (sub['Protocol'], sub['Endpoint']) + subscriptions_existing_list.add(sub_key) + if (self.purge_subscriptions and sub_key not in desired_subscriptions and + sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')): + changed = True + self.subscriptions_deleted.append(sub_key) + if not self.check_mode: + try: + self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") + + for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list): + changed = True + self.subscriptions_added.append((protocol, endpoint)) + if not self.check_mode: + try: + self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) + return changed + + def _list_topic_subscriptions(self): + try: + return self._list_topic_subscriptions_with_backoff() + except is_boto3_error_code('AuthorizationError'): + try: + # potentially AuthorizationError when listing subscriptions for third party topic + return [sub for sub in self._list_subscriptions_with_backoff() + if sub['TopicArn'] == self.topic_arn] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) + + def _delete_subscriptions(self): + # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days + # https://forums.aws.amazon.com/thread.jspa?threadID=85993 + subscriptions = self._list_topic_subscriptions() + if not subscriptions: + return False + for sub in subscriptions: + if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'): + self.subscriptions_deleted.append(sub['SubscriptionArn']) + if not self.check_mode: + try: + self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") + return True + + def _delete_topic(self): + self.topic_deleted = True + if not self.check_mode: + try: + self.connection.delete_topic(TopicArn=self.topic_arn) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn) + return True + + def _name_is_arn(self): + return self.name.startswith('arn:') + + def ensure_ok(self): + changed = False + if self._name_is_arn(): + self.topic_arn = self.name + else: + self.topic_arn = self._topic_arn_lookup() + if not self.topic_arn: + changed = self._create_topic() + if self.topic_arn in self._list_topics(): + changed |= self._set_topic_attrs() + elif self.display_name or self.policy or self.delivery_policy: + self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") + changed |= self._set_topic_subs() + return changed + + def ensure_gone(self): + changed = False + if self._name_is_arn(): + self.topic_arn = self.name + else: + self.topic_arn = self._topic_arn_lookup() + if self.topic_arn: + if self.topic_arn not in self._list_topics(): + self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") + changed = self._delete_subscriptions() + changed |= self._delete_topic() + return changed + + def get_info(self): + info = { + 'name': self.name, + 'state': self.state, + 'subscriptions_new': self.subscriptions, + 'subscriptions_existing': self.subscriptions_existing, + 'subscriptions_deleted': self.subscriptions_deleted, + 'subscriptions_added': self.subscriptions_added, + 'subscriptions_purge': self.purge_subscriptions, + 'check_mode': self.check_mode, + 'topic_created': self.topic_created, + 'topic_deleted': self.topic_deleted, + 'attributes_set': self.attributes_set, + } + if self.state != 'absent': + if self.topic_arn in self._list_topics(): + info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'])) + info['delivery_policy'] = info.pop('effective_delivery_policy') + info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()] + + return info + + +def main(): + argument_spec = dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + display_name=dict(), + policy=dict(type='dict'), + delivery_policy=dict(type='dict'), + subscriptions=dict(default=[], type='list'), + purge_subscriptions=dict(type='bool', default=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) + + name = module.params.get('name') + state = module.params.get('state') + display_name = module.params.get('display_name') + policy = module.params.get('policy') + delivery_policy = module.params.get('delivery_policy') + subscriptions = module.params.get('subscriptions') + purge_subscriptions = module.params.get('purge_subscriptions') + check_mode = module.check_mode + + sns_topic = SnsTopicManager(module, + name, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + check_mode) + + if state == 'present': + changed = sns_topic.ensure_ok() + + elif state == 'absent': + changed = sns_topic.ensure_gone() + + sns_facts = dict(changed=changed, + sns_arn=sns_topic.topic_arn, + sns_topic=sns_topic.get_info()) + + module.exit_json(**sns_facts) + + +if __name__ == '__main__': + main() diff --git a/sqs_queue.py b/sqs_queue.py new file mode 100644 index 00000000000..98b144732de --- /dev/null +++ b/sqs_queue.py @@ -0,0 +1,473 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sqs_queue +short_description: Creates or deletes AWS SQS queues. +description: + - Create or delete AWS SQS queues. + - Update attributes on existing queues. +author: + - Alan Loi (@loia) + - Fernando Jose Pando (@nand0p) + - Nadir Lloret (@nadirollo) + - Dennis Podkovyrin (@sbj-ss) +requirements: + - boto3 +options: + state: + description: + - Create or delete the queue. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the queue. + required: true + type: str + queue_type: + description: + - Standard or FIFO queue. + - I(queue_type) can only be set at queue creation and will otherwise be + ignored. + choices: ['standard', 'fifo'] + default: 'standard' + type: str + visibility_timeout: + description: + - The default visibility timeout in seconds. + aliases: [default_visibility_timeout] + type: int + message_retention_period: + description: + - The message retention period in seconds. + type: int + maximum_message_size: + description: + - The maximum message size in bytes. + type: int + delay_seconds: + description: + - The delivery delay in seconds. + aliases: [delivery_delay] + type: int + receive_message_wait_time_seconds: + description: + - The receive message wait time in seconds. + aliases: [receive_message_wait_time] + type: int + policy: + description: + - The JSON dict policy to attach to queue. + type: dict + redrive_policy: + description: + - JSON dict with the redrive_policy (see example). + type: dict + kms_master_key_id: + description: + - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. + type: str + kms_data_key_reuse_period_seconds: + description: + - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. + aliases: [kms_data_key_reuse_period] + type: int + content_based_deduplication: + type: bool + description: Enables content-based deduplication. Used for FIFOs only. + default: false + tags: + description: + - Tag dict to apply to the queue (requires botocore 1.5.40 or above). + - To remove all tags set I(tags={}) and I(purge_tags=true). + type: dict + purge_tags: + description: + - Remove tags not listed in I(tags). + type: bool + default: false +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +''' + +RETURN = ''' +content_based_deduplication: + description: Enables content-based deduplication. Used for FIFOs only. + type: bool + returned: always + sample: True +visibility_timeout: + description: The default visibility timeout in seconds. + type: int + returned: always + sample: 30 +delay_seconds: + description: The delivery delay in seconds. + type: int + returned: always + sample: 0 +kms_master_key_id: + description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. + type: str + returned: always + sample: alias/MyAlias +kms_data_key_reuse_period_seconds: + description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. + type: int + returned: always + sample: 300 +maximum_message_size: + description: The maximum message size in bytes. + type: int + returned: always + sample: 262144 +message_retention_period: + description: The message retention period in seconds. + type: int + returned: always + sample: 345600 +name: + description: Name of the SQS Queue + type: str + returned: always + sample: "queuename-987d2de0" +queue_arn: + description: The queue's Amazon resource name (ARN). + type: str + returned: on success + sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0' +queue_url: + description: URL to access the queue + type: str + returned: on success + sample: 'https://queue.amazonaws.com/123456789012/MyQueue' +receive_message_wait_time_seconds: + description: The receive message wait time in seconds. + type: int + returned: always + sample: 0 +region: + description: Region that the queue was created within + type: str + returned: always + sample: 'us-east-1' +tags: + description: List of queue tags + type: dict + returned: always + sample: '{"Env": "prod"}' +''' + +EXAMPLES = ''' +# Create SQS queue with redrive policy +- sqs_queue: + name: my-queue + region: ap-southeast-2 + default_visibility_timeout: 120 + message_retention_period: 86400 + maximum_message_size: 1024 + delivery_delay: 30 + receive_message_wait_time: 20 + policy: "{{ json_dict }}" + redrive_policy: + maxReceiveCount: 5 + deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue + +# Drop redrive policy +- sqs_queue: + name: my-queue + region: ap-southeast-2 + redrive_policy: {} + +# Create FIFO queue +- sqs_queue: + name: fifo-queue + region: ap-southeast-2 + queue_type: fifo + content_based_deduplication: yes + +# Tag queue +- sqs_queue: + name: fifo-queue + region: ap-southeast-2 + tags: + example: SomeValue + +# Configure Encryption, automatically uses a new data key every hour +- sqs_queue: + name: fifo-queue + region: ap-southeast-2 + kms_master_key_id: alias/MyQueueKey + kms_data_key_reuse_period_seconds: 3600 + +# Delete SQS queue +- sqs_queue: + name: my-queue + region: ap-southeast-2 + state: absent +''' + +import json +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, snake_dict_to_camel_dict, compare_policies + +try: + from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError +except ImportError: + pass # handled by AnsibleAWSModule + + +def get_queue_name(module, is_fifo=False): + name = module.params.get('name') + if not is_fifo or name.endswith('.fifo'): + return name + return name + '.fifo' + + +# NonExistentQueue is explicitly expected when a queue doesn't exist +@AWSRetry.jittered_backoff() +def get_queue_url(client, name): + try: + return client.get_queue_url(QueueName=name)['QueueUrl'] + except ClientError as e: + if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue': + return None + raise + + +def describe_queue(client, queue_url): + """ + Description a queue in snake format + """ + attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + description = dict(attributes) + description.pop('Policy', None) + description.pop('RedrivePolicy', None) + description = camel_dict_to_snake_dict(description) + description['policy'] = attributes.get('Policy', None) + description['redrive_policy'] = attributes.get('RedrivePolicy', None) + + # Boto3 returns everything as a string, convert them back to integers/dicts if + # that's what we expected. + for key, value in description.items(): + if value is None: + continue + + if key in ['policy', 'redrive_policy']: + policy = json.loads(value) + description[key] = policy + continue + + if key == 'content_based_deduplication': + try: + description[key] = bool(value) + except (TypeError, ValueError): + pass + + try: + if value == str(int(value)): + description[key] = int(value) + except (TypeError, ValueError): + pass + + return description + + +def create_or_update_sqs_queue(client, module): + is_fifo = (module.params.get('queue_type') == 'fifo') + queue_name = get_queue_name(module, is_fifo) + result = dict( + name=queue_name, + region=module.params.get('region'), + changed=False, + ) + + queue_url = get_queue_url(client, queue_name) + result['queue_url'] = queue_url + + if not queue_url: + create_attributes = {'FifoQueue': 'true'} if is_fifo else {} + result['changed'] = True + if module.check_mode: + return result + queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl'] + + changed, arn = update_sqs_queue(module, client, queue_url) + result['changed'] |= changed + result['queue_arn'] = arn + + changed, tags = update_tags(client, queue_url, module) + result['changed'] |= changed + result['tags'] = tags + + result.update(describe_queue(client, queue_url)) + + COMPATABILITY_KEYS = dict( + delay_seconds='delivery_delay', + receive_message_wait_time_seconds='receive_message_wait_time', + visibility_timeout='default_visibility_timeout', + kms_data_key_reuse_period_seconds='kms_data_key_reuse_period', + ) + for key in list(result.keys()): + + # The return values changed between boto and boto3, add the old keys too + # for backwards compatibility + return_name = COMPATABILITY_KEYS.get(key) + if return_name: + result[return_name] = result.get(key) + + return result + + +def update_sqs_queue(module, client, queue_url): + check_mode = module.check_mode + changed = False + existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) + attributes_to_set = dict() + + # Boto3 SQS deals with policies as strings, we want to deal with them as + # dicts + if module.params.get('policy') is not None: + policy = module.params.get('policy') + current_value = existing_attributes.get('Policy', '{}') + current_policy = json.loads(current_value) + if compare_policies(current_policy, policy): + attributes_to_set['Policy'] = json.dumps(policy) + changed = True + if module.params.get('redrive_policy') is not None: + policy = module.params.get('redrive_policy') + current_value = existing_attributes.get('RedrivePolicy', '{}') + current_policy = json.loads(current_value) + if compare_policies(current_policy, policy): + attributes_to_set['RedrivePolicy'] = json.dumps(policy) + changed = True + + for attribute, value in existing_attributes.items(): + # We handle these as a special case because they're IAM policies + if attribute in ['Policy', 'RedrivePolicy']: + continue + + if attribute not in new_attributes.keys(): + continue + + if new_attributes.get(attribute) is None: + continue + + new_value = new_attributes[attribute] + + if isinstance(new_value, bool): + new_value = str(new_value).lower() + existing_value = str(existing_value).lower() + + if new_value == value: + continue + + # Boto3 expects strings + attributes_to_set[attribute] = str(new_value) + changed = True + + if changed and not check_mode: + client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) + + return changed, existing_attributes.get('queue_arn'), + + +def delete_sqs_queue(client, module): + is_fifo = (module.params.get('queue_type') == 'fifo') + queue_name = get_queue_name(module, is_fifo) + result = dict( + name=queue_name, + region=module.params.get('region'), + changed=False + ) + + queue_url = get_queue_url(client, queue_name) + if not queue_url: + return result + + result['changed'] = bool(queue_url) + if not module.check_mode: + AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url) + + return result + + +def update_tags(client, queue_url, module): + new_tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + if new_tags is None: + return False, {} + + try: + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags'] + except (ClientError, KeyError) as e: + existing_tags = {} + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + + if not module.check_mode: + if tags_to_remove: + client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True) + if tags_to_add: + client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add) + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {}) + else: + existing_tags = new_tags + + changed = bool(tags_to_remove) or bool(tags_to_add) + return changed, existing_tags + + +def main(): + + argument_spec = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']), + delay_seconds=dict(type='int', aliases=['delivery_delay']), + maximum_message_size=dict(type='int'), + message_retention_period=dict(type='int'), + policy=dict(type='dict'), + receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']), + redrive_policy=dict(type='dict'), + visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']), + kms_master_key_id=dict(type='str'), + kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period']), + content_based_deduplication=dict(type='bool'), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + state = module.params.get('state') + retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue']) + try: + client = module.client('sqs', retry_decorator=retry_decorator) + if state == 'present': + result = create_or_update_sqs_queue(client, module) + elif state == 'absent': + result = delete_sqs_queue(client, module) + except (BotoCoreError, ClientError, ParamValidationError) as e: + module.fail_json_aws(e, msg='Failed to control sqs queue') + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/sts_assume_role.py b/sts_assume_role.py new file mode 100644 index 00000000000..3c03f291706 --- /dev/null +++ b/sts_assume_role.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sts_assume_role +short_description: Assume a role using AWS Security Token Service and obtain temporary credentials +description: + - Assume a role using AWS Security Token Service and obtain temporary credentials. +author: + - Boris Ekelchik (@bekelchik) + - Marek Piatek (@piontas) +options: + role_arn: + description: + - The Amazon Resource Name (ARN) of the role that the caller is + assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). + required: true + type: str + role_session_name: + description: + - Name of the role's session - will be used by CloudTrail. + required: true + type: str + policy: + description: + - Supplemental policy to use in addition to assumed role's policies. + type: str + duration_seconds: + description: + - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours). + - The max depends on the IAM role's sessions duration setting. + - By default, the value is set to 3600 seconds. + type: int + external_id: + description: + - A unique identifier that is used by third parties to assume a role in their customers' accounts. + type: str + mfa_serial_number: + description: + - The identification number of the MFA device that is associated with the user who is making the AssumeRole call. + type: str + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. + type: str +notes: + - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token. +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore + - python >= 2.6 +''' + +RETURN = ''' +sts_creds: + description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token + returned: always + type: dict + sample: + access_key: XXXXXXXXXXXXXXXXXXXX + expiration: 2017-11-11T11:11:11+00:00 + secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +sts_user: + description: The Amazon Resource Name (ARN) and the assumed role ID + returned: always + type: dict + sample: + assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob + arn: ARO123EXAMPLE123:Bob +changed: + description: True if obtaining the credentials succeeds + type: bool + returned: always +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +- sts_assume_role: + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" + register: assumed_role + +# Use the assumed role above to tag an instance in account 123456789012 +- ec2_tag: + aws_access_key: "{{ assumed_role.sts_creds.access_key }}" + aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" + security_token: "{{ assumed_role.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value + +''' + +from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, ParamValidationError +except ImportError: + pass # caught by AnsibleAWSModule + + +def _parse_response(response): + credentials = response.get('Credentials', {}) + user = response.get('AssumedRoleUser', {}) + + sts_cred = { + 'access_key': credentials.get('AccessKeyId'), + 'secret_key': credentials.get('SecretAccessKey'), + 'session_token': credentials.get('SessionToken'), + 'expiration': credentials.get('Expiration') + + } + sts_user = camel_dict_to_snake_dict(user) + return sts_cred, sts_user + + +def assume_role_policy(connection, module): + params = { + 'RoleArn': module.params.get('role_arn'), + 'RoleSessionName': module.params.get('role_session_name'), + 'Policy': module.params.get('policy'), + 'DurationSeconds': module.params.get('duration_seconds'), + 'ExternalId': module.params.get('external_id'), + 'SerialNumber': module.params.get('mfa_serial_number'), + 'TokenCode': module.params.get('mfa_token') + } + changed = False + + kwargs = dict((k, v) for k, v in params.items() if v is not None) + + try: + response = connection.assume_role(**kwargs) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json_aws(e) + + sts_cred, sts_user = _parse_response(response) + module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user) + + +def main(): + argument_spec = dict( + role_arn=dict(required=True), + role_session_name=dict(required=True), + duration_seconds=dict(required=False, default=None, type='int'), + external_id=dict(required=False, default=None), + policy=dict(required=False, default=None), + mfa_serial_number=dict(required=False, default=None), + mfa_token=dict(required=False, default=None) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + connection = module.client('sts') + + assume_role_policy(connection, module) + + +if __name__ == '__main__': + main() diff --git a/sts_session_token.py b/sts_session_token.py new file mode 100644 index 00000000000..1584dbcc50d --- /dev/null +++ b/sts_session_token.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sts_session_token +short_description: Obtain a session token from the AWS Security Token Service +description: + - Obtain a session token from the AWS Security Token Service. +author: Victor Costan (@pwnall) +options: + duration_seconds: + description: + - The duration, in seconds, of the session token. + See U(https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters) + for acceptable and default values. + type: int + mfa_serial_number: + description: + - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call. + type: str + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the user requires MFA. + type: str +notes: + - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token). +extends_documentation_fragment: +- ansible.amazon.aws +- ansible.amazon.ec2 + +requirements: + - boto3 + - botocore + - python >= 2.6 +''' + +RETURN = """ +sts_creds: + description: The Credentials object returned by the AWS Security Token Service + returned: always + type: list + sample: + access_key: ASXXXXXXXXXXXXXXXXXX + expiration: "2016-04-08T11:59:47+00:00" + secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +changed: + description: True if obtaining the credentials succeeds + type: bool + returned: always +""" + + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Get a session token (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) +sts_session_token: + duration_seconds: 3600 +register: session_credentials + +# Use the session token obtained above to tag an instance in account 123456789012 +ec2_tag: + aws_access_key: "{{ session_credentials.sts_creds.access_key }}" + aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}" + security_token: "{{ session_credentials.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value + +''' + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +def normalize_credentials(credentials): + access_key = credentials.get('AccessKeyId', None) + secret_key = credentials.get('SecretAccessKey', None) + session_token = credentials.get('SessionToken', None) + expiration = credentials.get('Expiration', None) + return { + 'access_key': access_key, + 'secret_key': secret_key, + 'session_token': session_token, + 'expiration': expiration + } + + +def get_session_token(connection, module): + duration_seconds = module.params.get('duration_seconds') + mfa_serial_number = module.params.get('mfa_serial_number') + mfa_token = module.params.get('mfa_token') + changed = False + + args = {} + if duration_seconds is not None: + args['DurationSeconds'] = duration_seconds + if mfa_serial_number is not None: + args['SerialNumber'] = mfa_serial_number + if mfa_token is not None: + args['TokenCode'] = mfa_token + + try: + response = connection.get_session_token(**args) + changed = True + except ClientError as e: + module.fail_json(msg=e) + + credentials = normalize_credentials(response.get('Credentials', {})) + module.exit_json(changed=changed, sts_creds=credentials) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + duration_seconds=dict(required=False, default=None, type='int'), + mfa_serial_number=dict(required=False, default=None), + mfa_token=dict(required=False, default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required.') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if region: + connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) + else: + module.fail_json(msg="region must be specified") + + get_session_token(connection, module) + + +if __name__ == '__main__': + main() From 62cdfc77bb70c7e1b786ccd5492bda4069a5fc23 Mon Sep 17 00:00:00 2001 From: jillr Date: Tue, 3 Mar 2020 19:43:21 +0000 Subject: [PATCH 002/683] migration test cleanup --- aws_direct_connect_connection.py | 7 +++++-- aws_direct_connect_gateway.py | 7 +++++-- aws_direct_connect_link_aggregation_group.py | 16 +++++++++++----- aws_s3_bucket_info.py | 8 ++++++-- aws_step_functions_state_machine.py | 6 +++++- aws_waf_rule.py | 11 +++++++++-- aws_waf_web_acl.py | 8 ++++++-- cloudformation_stack_set.py | 6 +++++- cloudtrail.py | 4 +++- cloudwatchlogs_log_group.py | 7 ++++++- cloudwatchlogs_log_group_info.py | 7 ++++++- dynamodb_ttl.py | 7 ++++++- ec2_customer_gateway.py | 8 ++++++-- ec2_customer_gateway_info.py | 5 ++++- ec2_eip_info.py | 5 +++-- ec2_elb.py | 8 ++++++-- ec2_instance.py | 11 ++++++----- ec2_instance_info.py | 8 ++++++-- ec2_launch_template.py | 6 +++++- ec2_lc.py | 11 +++++++++-- ec2_lc_info.py | 8 ++++++-- ec2_scaling_policy.py | 8 ++++++-- ec2_vpc_endpoint.py | 8 ++++++-- ec2_vpc_endpoint_info.py | 10 ++++++++-- ec2_vpc_igw_info.py | 9 +++++++-- ec2_vpc_nacl_info.py | 7 +++++-- ec2_vpc_nat_gateway.py | 8 ++++++-- ec2_vpc_nat_gateway_info.py | 10 ++++++++-- ec2_vpc_peering_info.py | 9 +++++++-- ec2_vpc_vgw_info.py | 9 +++++++-- ec2_vpc_vpn_info.py | 5 ++++- efs.py | 7 +++++-- elasticache.py | 7 ++++++- elasticache_info.py | 6 +++++- elb_application_lb.py | 7 ++++++- elb_application_lb_info.py | 8 ++++++-- elb_instance.py | 8 ++++++-- elb_target.py | 9 ++++++--- elb_target_group.py | 7 +++++-- elb_target_group_info.py | 8 ++++++-- iam.py | 8 ++++++-- iam_managed_policy.py | 10 ++++++++-- iam_mfa_device_info.py | 8 ++++++-- iam_role.py | 6 +++++- lambda_alias.py | 8 ++++++-- lambda_event.py | 8 ++++++-- rds_instance.py | 8 +++++++- rds_instance_info.py | 6 +++++- s3_sync.py | 8 +++++++- s3_website.py | 8 ++++++-- sqs_queue.py | 7 ++++++- 51 files changed, 307 insertions(+), 92 deletions(-) diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 2dd6c839934..395ac4478d8 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -160,8 +160,11 @@ import traceback from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) -from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, delete_connection, - associate_connection_and_lag, disassociate_connection_and_lag) +from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, + delete_connection, + associate_connection_and_lag, + disassociate_connection_and_lag, + ) try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index 6918c125de3..d157e56a34f 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -110,8 +110,11 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, - get_aws_connection_info, boto3_conn) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + ) from ansible.module_utils._text import to_native diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 2e4b34d0ca7..f7bb86ec077 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -164,13 +164,19 @@ returned: when I(state=present) """ -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3, - get_aws_connection_info, boto3_conn, AWSRetry) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ec2_argument_spec, + HAS_BOTO3, + get_aws_connection_info, + boto3_conn, + AWSRetry, + ) from ansible.module_utils.basic import AnsibleModule from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, - delete_connection, - delete_virtual_interface, - disassociate_connection_and_lag) + delete_connection, + delete_virtual_interface, + disassociate_connection_and_lag, + ) import traceback import time diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 9916e297eaa..1b8d4eefb0d 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -61,8 +61,12 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, + ec2_argument_spec, + HAS_BOTO3, + camel_dict_to_snake_dict, + get_aws_connection_info, + ) def get_bucket_list(module, connection): diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index d1933fcafe7..bde78d7f1e0 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -102,7 +102,11 @@ ''' from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, + AWSRetry, + compare_aws_tags, + boto3_tag_list_to_ansible_dict, + ) try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/aws_waf_rule.py b/aws_waf_rule.py index b32e26999ce..0475c6447b5 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -150,8 +150,15 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, list_regional_rules_with_backoff, MATCH_LOOKUP -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import (run_func_with_change_token_backoff, + list_rules_with_backoff, + list_regional_rules_with_backoff, + MATCH_LOOKUP, + ) +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import (get_web_acl_with_backoff, + list_web_acls_with_backoff, + list_regional_web_acls_with_backoff, + ) def get_rule_by_name(client, module, name): diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index 539bac2f8cb..ee6f103a825 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -169,8 +169,12 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import list_rules_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff, \ - run_func_with_change_token_backoff, list_regional_rules_with_backoff +from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import (list_rules_with_backoff, + list_web_acls_with_backoff, + list_regional_web_acls_with_backoff, + run_func_with_change_token_backoff, + list_regional_rules_with_backoff, + ) def get_web_acl_by_name(client, module, name): diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 5b6eb2198df..5636a084a6b 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -312,7 +312,11 @@ # handled by AnsibleAWSModule pass -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict, + ) from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code from ansible.module_utils._text import to_native diff --git a/cloudtrail.py b/cloudtrail.py index 0dc8feb64af..087419f6917 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -260,7 +260,9 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict) + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + ) def create_trail(module, client, ct_params): diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index 289a3f5efbe..9b6e34c12e0 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -134,7 +134,12 @@ import traceback from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + camel_dict_to_snake_dict, + boto3_conn, + ec2_argument_spec, + get_aws_connection_info, + ) try: import botocore diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index a4f85673bf3..06c857cde06 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -76,7 +76,12 @@ import traceback from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + camel_dict_to_snake_dict, + boto3_conn, + ec2_argument_spec, + get_aws_connection_info, + ) try: import botocore diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 5ed0488d686..b821839cdde 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -77,7 +77,12 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def get_current_ttl_state(c, table_name): diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index fe4d95f3a81..c0e97c97bc8 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -126,8 +126,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, AWSRetry, camel_dict_to_snake_dict, - ec2_argument_spec, get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, + AWSRetry, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) class Ec2CustomerGatewayManager: diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 456b3f226df..ef64be52852 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -88,7 +88,10 @@ pass # caught by AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ) def date_handler(obj): diff --git a/ec2_eip_info.py b/ec2_eip_info.py index de76a29b414..b6212fb6311 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -101,8 +101,9 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict) + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ) try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: diff --git a/ec2_elb.py b/ec2_elb.py index a74887ee41d..fb89714609c 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -96,8 +96,12 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, + HAS_BOTO, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) class ElbManager: diff --git a/ec2_instance.py b/ec2_instance.py index ea7f49c5f32..ca090c13d7c 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -813,11 +813,12 @@ from ansible.module_utils._text import to_bytes, to_native import ansible_collections.ansible.amazon.plugins.module_utils.ec2 as ec2_utils from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_filter_list, - compare_aws_tags, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - camel_dict_to_snake_dict) + ansible_dict_to_boto3_filter_list, + compare_aws_tags, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict, + ) from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule diff --git a/ec2_instance_info.py b/ec2_instance_info.py index e16a3c2f164..865b7d70d06 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -504,8 +504,12 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, - ec2_argument_spec, get_aws_connection_info) + boto3_conn, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def list_ec2_instances(connection, module): diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 94cf2404cdf..0aa7d68b81d 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -371,7 +371,11 @@ from ansible.module_utils._text import to_text from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, + AWSRetry, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + ) try: from botocore.exceptions import ClientError, BotoCoreError, WaiterError diff --git a/ec2_lc.py b/ec2_lc.py index 8764514b0a9..6d168583e49 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -454,8 +454,15 @@ import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, ec2_argument_spec, ec2_connect, camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names, - boto3_conn, snake_dict_to_camel_dict, HAS_BOTO3) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, + ec2_argument_spec, + ec2_connect, + camel_dict_to_snake_dict, + get_ec2_security_group_ids_from_names, + boto3_conn, + snake_dict_to_camel_dict, + HAS_BOTO3, + ) from ansible.module_utils._text import to_text from ansible.module_utils.basic import AnsibleModule diff --git a/ec2_lc_info.py b/ec2_lc_info.py index ed49b946363..d0f340ca597 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -163,8 +163,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def list_launch_configs(connection, module): diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 954a148f374..321047f709b 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -79,8 +79,12 @@ pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, + HAS_BOTO, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) def create_scaling_policy(connection, module): diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 7c1f9f619ab..aa55014c88b 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -190,8 +190,12 @@ pass # will be picked up by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, boto3_conn, ec2_argument_spec, HAS_BOTO3, - camel_dict_to_snake_dict) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, + boto3_conn, + ec2_argument_spec, + HAS_BOTO3, + camel_dict_to_snake_dict, + ) from ansible.module_utils.six import string_types diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index d82a3b8faf6..1436080ef17 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -117,8 +117,14 @@ pass # will be picked up from imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, boto3_conn, get_aws_connection_info, - ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict, AWSRetry) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, + boto3_conn, + get_aws_connection_info, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + camel_dict_to_snake_dict, + AWSRetry, + ) def date_handler(obj): diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 097072c3d5c..07333c1613e 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -98,8 +98,13 @@ pass # will be captured by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, - camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + ) def get_internet_gateway_info(internet_gateway): diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 9db13f104d2..068b62845ff 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -114,8 +114,11 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list, - camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, + ansible_dict_to_boto3_filter_list, + camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict, + ) # VPC-supported IANA protocol numbers diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 2e35459d438..5cb3236885e 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -209,8 +209,12 @@ pass # caught by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, - camel_dict_to_snake_dict, HAS_BOTO3) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + HAS_BOTO3, + ) DRY_RUN_GATEWAYS = [ diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index b86e4bb8114..bd1dde7ce7f 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -88,8 +88,14 @@ pass # will be detected by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, - camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, HAS_BOTO3) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + HAS_BOTO3, + ) def date_handler(obj): diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 4fe5a4a1bd1..b30fb43bf59 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -79,8 +79,13 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, - ec2_argument_spec, boto3_conn, get_aws_connection_info, - ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict) + ec2_argument_spec, + boto3_conn, + get_aws_connection_info, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + camel_dict_to_snake_dict, + ) def date_handler(obj): diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index a1ac5b04c7b..e6b9b96ffd0 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -101,8 +101,13 @@ pass # will be captured by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, - camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + ) def get_virtual_gateway_info(virtual_gateway): diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index fa7a1ea7973..bcb11b657a0 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -170,7 +170,10 @@ pass # caught by AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ) def date_handler(obj): diff --git a/efs.py b/efs.py index 26671a62850..daac58e8fe7 100644 --- a/efs.py +++ b/efs.py @@ -239,8 +239,11 @@ pass # Handled by AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (compare_aws_tags, camel_dict_to_snake_dict, - ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (compare_aws_tags, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + ) def _index_by_key(key, items): diff --git a/elasticache.py b/elasticache.py index e930ab40393..df5b7693557 100644 --- a/elasticache.py +++ b/elasticache.py @@ -133,7 +133,12 @@ from time import sleep from traceback import format_exc from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + HAS_BOTO3, + camel_dict_to_snake_dict, + ) try: import boto3 diff --git a/elasticache_info.py b/elasticache_info.py index 359cca82206..5d8537aa09b 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -227,7 +227,11 @@ ''' from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, + camel_dict_to_snake_dict, + AWSRetry, + boto3_tag_list_to_ansible_dict, + ) try: diff --git a/elb_application_lb.py b/elb_application_lb.py index 5536f3de6f1..bd0dea254e3 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -460,7 +460,12 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.ansible.amazon.plugins.module_utils.aws.elbv2 import ApplicationLoadBalancer, ELBListeners, ELBListener, ELBListenerRules, ELBListenerRule +from ansible_collections.ansible.amazon.plugins.module_utils.aws.elbv2 import (ApplicationLoadBalancer, + ELBListeners, + ELBListener, + ELBListenerRules, + ELBListenerRule, + ) from ansible_collections.ansible.amazon.plugins.module_utils.aws.elb_utils import get_elb_listener_rules diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index d115d029f36..6d68f07f892 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -173,8 +173,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, - ec2_argument_spec, get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def get_elb_listeners(connection, module, elb_arn): diff --git a/elb_instance.py b/elb_instance.py index 13971573b33..e7337a81af7 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -101,8 +101,12 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, + HAS_BOTO, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) class ElbManager: diff --git a/elb_target.py b/elb_target.py index 9c3ff4eea36..e6af7bac817 100644 --- a/elb_target.py +++ b/elb_target.py @@ -114,9 +114,12 @@ from time import time, sleep from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict, - ec2_argument_spec, get_aws_connection_info, - AWSRetry) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + AWSRetry, + ) try: import boto3 diff --git a/elb_target_group.py b/elb_target_group.py index f6194b07ec1..ad27c879826 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -382,8 +382,11 @@ pass # caught by AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, - compare_aws_tags, ansible_dict_to_boto3_tag_list) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict, + compare_aws_tags, + ansible_dict_to_boto3_tag_list, + ) from distutils.version import LooseVersion diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 65cdc0e707e..e1e94c4e3b1 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -218,8 +218,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, - ec2_argument_spec, get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def get_target_group_attributes(connection, module, target_group_arn): diff --git a/iam.py b/iam.py index b77bb219a27..a9a2f1021ae 100644 --- a/iam.py +++ b/iam.py @@ -189,8 +189,12 @@ pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO, + boto_exception, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) def _paginate(func, attr): diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 3b8d4736aef..fd393359f24 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -128,8 +128,14 @@ pass # caught by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry, - camel_dict_to_snake_dict, HAS_BOTO3, compare_policies) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, + get_aws_connection_info, + ec2_argument_spec, + AWSRetry, + camel_dict_to_snake_dict, + HAS_BOTO3, + compare_policies, + ) from ansible.module_utils._text import to_native diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 712a951a5d9..6f7b9f1e3da 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -70,8 +70,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def list_mfa_devices(connection, module): diff --git a/iam_role.py b/iam_role.py index 7b865efe896..d7da07b005c 100644 --- a/iam_role.py +++ b/iam_role.py @@ -198,7 +198,11 @@ from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + compare_aws_tags, + ) try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/lambda_alias.py b/lambda_alias.py index 0e28d2a147c..ca2188c43a4 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -155,8 +155,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) class AWSConnection: diff --git a/lambda_event.py b/lambda_event.py index aea5c8ad4da..b370e6026da 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -134,8 +134,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) # --------------------------------------------------------------------------------------------------- diff --git a/rds_instance.py b/rds_instance.py index 7fa4b8f171d..adf27f78012 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -747,7 +747,13 @@ from ansible.module_utils._text import to_text from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.ansible.amazon.plugins.module_utils.aws.rds import ensure_tags, arg_spec_to_rds_params, call_method, get_rds_method_attribute, get_tags, get_final_identifier +from ansible_collections.ansible.amazon.plugins.module_utils.aws.rds import (ensure_tags, + arg_spec_to_rds_params, + call_method, + get_rds_method_attribute, + get_tags, + get_final_identifier, + ) from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry diff --git a/rds_instance_info.py b/rds_instance_info.py index bbb8d8a08d0..717e68e0f3a 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -348,7 +348,11 @@ ''' from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, AWSRetry, camel_dict_to_snake_dict +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + AWSRetry, + camel_dict_to_snake_dict, + ) try: diff --git a/s3_sync.py b/s3_sync.py index f252ab4d9e1..350d9d56805 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -234,7 +234,13 @@ # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn, get_aws_connection_info, HAS_BOTO3, boto_exception +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ec2_argument_spec, + boto3_conn, + get_aws_connection_info, + HAS_BOTO3, + boto_exception, + ) from ansible.module_utils._text import to_text try: diff --git a/s3_website.py b/s3_website.py index 22a73f4d023..eaa8bfb5baa 100644 --- a/s3_website.py +++ b/s3_website.py @@ -173,8 +173,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, - get_aws_connection_info) +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def _create_redirect_dict(url): diff --git a/sqs_queue.py b/sqs_queue.py index 98b144732de..eddd8eaaa2b 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -224,7 +224,12 @@ import json from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, snake_dict_to_camel_dict, compare_policies +from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, + camel_dict_to_snake_dict, + compare_aws_tags, + snake_dict_to_camel_dict, + compare_policies, + ) try: from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError From 9b2c468e5fe2a7b8f4aa78c92d6e7fdf193d1913 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Wed, 25 Mar 2020 15:39:40 -0700 Subject: [PATCH 003/683] Rename collection (#12) * Rename core collection Rename references to ansible.amazon to amazon.aws. * Rename community.amazon to community.aws Fix pep8 line lengths for rewritten amazon.aws imports * Missed a path in shippable.sh * Dependency repos moved --- aws_acm.py | 8 +++--- aws_acm_info.py | 8 +++--- aws_api_gateway.py | 8 +++--- aws_application_scaling_policy.py | 8 +++--- aws_batch_compute_environment.py | 8 +++--- aws_batch_job_definition.py | 10 +++---- aws_batch_job_queue.py | 10 +++---- aws_codebuild.py | 8 +++--- aws_codecommit.py | 8 +++--- aws_codepipeline.py | 8 +++--- aws_config_aggregation_authorization.py | 8 +++--- aws_config_aggregator.py | 8 +++--- aws_config_delivery_channel.py | 8 +++--- aws_config_recorder.py | 8 +++--- aws_config_rule.py | 8 +++--- aws_direct_connect_connection.py | 18 ++++++------ aws_direct_connect_gateway.py | 14 +++++----- aws_direct_connect_link_aggregation_group.py | 28 +++++++++---------- aws_direct_connect_virtual_interface.py | 10 +++---- aws_eks_cluster.py | 10 +++---- aws_elasticbeanstalk_app.py | 6 ++-- aws_glue_connection.py | 8 +++--- aws_glue_job.py | 8 +++--- aws_inspector_target.py | 10 +++---- aws_kms.py | 12 ++++---- aws_kms_info.py | 10 +++---- aws_region_info.py | 8 +++--- aws_s3_bucket_info.py | 16 +++++------ aws_s3_cors.py | 8 +++--- aws_secret.py | 10 +++---- aws_ses_identity.py | 8 +++--- aws_ses_identity_policy.py | 8 +++--- aws_ses_rule_set.py | 8 +++--- aws_sgw_info.py | 8 +++--- aws_ssm_parameter_store.py | 6 ++-- aws_step_functions_state_machine.py | 16 +++++------ aws_step_functions_state_machine_execution.py | 8 +++--- aws_waf_condition.py | 12 ++++---- aws_waf_info.py | 8 +++--- aws_waf_rule.py | 26 ++++++++--------- aws_waf_web_acl.py | 22 +++++++-------- cloudformation_exports_info.py | 8 +++--- cloudformation_stack_set.py | 16 +++++------ cloudfront_distribution.py | 12 ++++---- cloudfront_info.py | 8 +++--- cloudfront_invalidation.py | 12 ++++---- cloudfront_origin_access_identity.py | 10 +++---- cloudtrail.py | 14 +++++----- cloudwatchevent_rule.py | 8 +++--- cloudwatchlogs_log_group.py | 16 +++++------ cloudwatchlogs_log_group_info.py | 16 +++++------ cloudwatchlogs_log_group_metric_filter.py | 8 +++--- data_pipeline.py | 6 ++-- dms_endpoint.py | 8 +++--- dms_replication_subnet_group.py | 8 +++--- dynamodb_table.py | 8 +++--- dynamodb_ttl.py | 16 +++++------ ec2_ami_copy.py | 8 +++--- ec2_asg.py | 10 +++---- ec2_asg_info.py | 8 +++--- ec2_asg_lifecycle_hook.py | 6 ++-- ec2_customer_gateway.py | 16 +++++------ ec2_customer_gateway_info.py | 14 +++++----- ec2_eip.py | 8 +++--- ec2_eip_info.py | 14 +++++----- ec2_elb.py | 16 +++++------ ec2_elb_info.py | 6 ++-- ec2_instance.py | 24 ++++++++-------- ec2_instance_info.py | 18 ++++++------ ec2_launch_template.py | 16 +++++------ ec2_lc.py | 22 +++++++-------- ec2_lc_find.py | 6 ++-- ec2_lc_info.py | 16 +++++------ ec2_metric_alarm.py | 6 ++-- ec2_placement_group.py | 8 +++--- ec2_placement_group_info.py | 6 ++-- ec2_scaling_policy.py | 16 +++++------ ec2_snapshot_copy.py | 6 ++-- ec2_transit_gateway.py | 8 +++--- ec2_transit_gateway_info.py | 8 +++--- ec2_vpc_egress_igw.py | 8 +++--- ec2_vpc_endpoint.py | 16 +++++------ ec2_vpc_endpoint_info.py | 20 ++++++------- ec2_vpc_igw.py | 10 +++---- ec2_vpc_igw_info.py | 18 ++++++------ ec2_vpc_nacl.py | 8 +++--- ec2_vpc_nacl_info.py | 16 +++++------ ec2_vpc_nat_gateway.py | 16 +++++------ ec2_vpc_nat_gateway_info.py | 20 ++++++------- ec2_vpc_peer.py | 8 +++--- ec2_vpc_peering_info.py | 20 ++++++------- ec2_vpc_route_table.py | 16 +++++------ ec2_vpc_route_table_info.py | 6 ++-- ec2_vpc_vgw.py | 10 +++---- ec2_vpc_vgw_info.py | 18 ++++++------ ec2_vpc_vpn.py | 8 +++--- ec2_vpc_vpn_info.py | 14 +++++----- ec2_win_password.py | 6 ++-- ecs_attribute.py | 6 ++-- ecs_cluster.py | 6 ++-- ecs_ecr.py | 8 +++--- ecs_service.py | 8 +++--- ecs_service_info.py | 8 +++--- ecs_tag.py | 8 +++--- ecs_task.py | 8 +++--- ecs_taskdefinition.py | 8 +++--- ecs_taskdefinition_info.py | 8 +++--- efs.py | 16 +++++------ efs_info.py | 10 +++---- elasticache.py | 16 +++++------ elasticache_info.py | 16 +++++------ elasticache_parameter_group.py | 6 ++-- elasticache_snapshot.py | 6 ++-- elasticache_subnet_group.py | 6 ++-- elb_application_lb.py | 22 +++++++-------- elb_application_lb_info.py | 16 +++++------ elb_classic_lb.py | 6 ++-- elb_classic_lb_info.py | 8 +++--- elb_instance.py | 16 +++++------ elb_network_lb.py | 10 +++---- elb_target.py | 16 +++++------ elb_target_group.py | 16 +++++------ elb_target_group_info.py | 16 +++++------ elb_target_info.py | 8 +++--- execute_lambda.py | 6 ++-- iam.py | 16 +++++------ iam_cert.py | 6 ++-- iam_group.py | 10 +++---- iam_managed_policy.py | 20 ++++++------- iam_mfa_device_info.py | 16 +++++------ iam_password_policy.py | 8 +++--- iam_policy.py | 8 +++--- iam_policy_info.py | 6 ++-- iam_role.py | 18 ++++++------ iam_role_info.py | 8 +++--- iam_saml_federation.py | 8 +++--- iam_server_certificate_info.py | 6 ++-- iam_user.py | 8 +++--- iam_user_info.py | 8 +++--- kinesis_stream.py | 6 ++-- lambda.py | 10 +++---- lambda_alias.py | 16 +++++------ lambda_event.py | 16 +++++------ lambda_facts.py | 8 +++--- lambda_info.py | 8 +++--- lambda_policy.py | 6 ++-- lightsail.py | 8 +++--- rds.py | 8 +++--- rds_instance.py | 24 ++++++++-------- rds_instance_info.py | 16 +++++------ rds_param_group.py | 10 +++---- rds_snapshot.py | 10 +++---- rds_snapshot_info.py | 8 +++--- rds_subnet_group.py | 6 ++-- redshift.py | 8 +++--- redshift_cross_region_snapshots.py | 6 ++-- redshift_info.py | 8 +++--- redshift_subnet_group.py | 6 ++-- route53.py | 4 +-- route53_health_check.py | 6 ++-- route53_info.py | 6 ++-- route53_zone.py | 6 ++-- s3_bucket_notification.py | 8 +++--- s3_lifecycle.py | 6 ++-- s3_logging.py | 6 ++-- s3_sync.py | 18 ++++++------ s3_website.py | 16 +++++------ sns.py | 6 ++-- sns_topic.py | 8 +++--- sqs_queue.py | 18 ++++++------ sts_assume_role.py | 8 +++--- sts_session_token.py | 6 ++-- 172 files changed, 924 insertions(+), 924 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index 421c9e6ad76..3e24adfd364 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -152,8 +152,8 @@ author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -226,8 +226,8 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.acm import ACMServiceManager from ansible.module_utils._text import to_text import base64 import re # regex library diff --git a/aws_acm_info.py b/aws_acm_info.py index 31c4ddef370..094d8a2ac6c 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -46,8 +46,8 @@ author: - Will Thames (@willthames) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -262,8 +262,8 @@ type: str ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.acm import ACMServiceManager def main(): diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 86cfbf129e0..fe3cd969e6f 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -107,8 +107,8 @@ author: - 'Michael De La Rue (@mikedlr)' extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 notes: - A future version of this module will probably use tags or another @@ -181,8 +181,8 @@ pass import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict) def main(): diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index 563a94f2c76..6fefaee2c3d 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -105,8 +105,8 @@ default: no type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -286,8 +286,8 @@ sample: '2017-09-28T08:22:51.881000-03:00' ''' # NOQA -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict try: import botocore diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index a41e8249ecd..6329bba214c 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -121,8 +121,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -231,8 +231,8 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict import re try: diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index b443e6199a7..07bcf127295 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -175,8 +175,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -226,9 +226,9 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.batch import cc, set_api_params -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.batch import cc, set_api_params +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index 0d4828ac576..77726175510 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -65,8 +65,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -114,9 +114,9 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.batch import set_api_params -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.batch import set_api_params +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_codebuild.py b/aws_codebuild.py index ae0726aa1d4..219289ee2db 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -161,8 +161,8 @@ choices: ['present', 'absent'] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -289,8 +289,8 @@ sample: "2018-04-17T16:56:03.245000+02:00" ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict try: diff --git a/aws_codecommit.py b/aws_codecommit.py index c946a95aa7d..f8d7df05d2a 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -44,8 +44,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -144,8 +144,8 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class CodeCommit(object): diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 703f905af20..84afe1a4dc1 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -77,8 +77,8 @@ choices: ['present', 'absent'] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -200,8 +200,8 @@ import traceback from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies try: diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index be29aa1a3ad..78126794905 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -39,8 +39,8 @@ type: str required: true extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -63,8 +63,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def resource_exists(client, module, params): diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 065a5505a09..3f39f57cbc9 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -74,8 +74,8 @@ type: dict required: true extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -100,8 +100,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict def resource_exists(client, module, params): diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index 54fdb6f7ede..0005d68d462 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -52,8 +52,8 @@ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -76,8 +76,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry # this waits for an IAM role to become fully available, at the cost of diff --git a/aws_config_recorder.py b/aws_config_recorder.py index 7ba1b0db535..a614748e739 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -65,8 +65,8 @@ - Before you can set this option, you must set I(all_supported=false). type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -90,8 +90,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry def resource_exists(client, module, params): diff --git a/aws_config_rule.py b/aws_config_rule.py index d3eed699cab..e2a328ce1d3 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -89,8 +89,8 @@ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -118,8 +118,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict def rule_exists(client, module, params): diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 395ac4478d8..601745de4ff 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -20,8 +20,8 @@ The connection may later be associated or disassociated with a link aggregation group. author: "Sloane Hertel (@s-hertel)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -158,13 +158,13 @@ """ import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) -from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, - delete_connection, - associate_connection_and_lag, - disassociate_connection_and_lag, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) +from ansible_collections.amazon.aws.plugins.module_utils.aws.direct_connect import (DirectConnectError, + delete_connection, + associate_connection_and_lag, + disassociate_connection_and_lag, + ) try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index d157e56a34f..d2fc624d747 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -21,8 +21,8 @@ - Attaches Virtual Gateways to Direct Connect Gateway. - Detaches Virtual Gateways to Direct Connect Gateway. extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ boto3 ] options: @@ -110,11 +110,11 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + ) from ansible.module_utils._text import to_native diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index f7bb86ec077..dd7d57dcded 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -18,8 +18,8 @@ - Create, delete, or modify a Direct Connect link aggregation group. author: "Sloane Hertel (@s-hertel)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -164,19 +164,19 @@ returned: when I(state=present) """ -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ec2_argument_spec, - HAS_BOTO3, - get_aws_connection_info, - boto3_conn, - AWSRetry, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ec2_argument_spec, + HAS_BOTO3, + get_aws_connection_info, + boto3_conn, + AWSRetry, + ) from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import (DirectConnectError, - delete_connection, - delete_virtual_interface, - disassociate_connection_and_lag, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.direct_connect import (DirectConnectError, + delete_connection, + delete_virtual_interface, + disassociate_connection_and_lag, + ) import traceback import time diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 96616532587..4a5276631ba 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -81,8 +81,8 @@ - The virtual interface ID. type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -238,9 +238,9 @@ ''' import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 19bf5ed62d5..1b1693be225 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -61,8 +61,8 @@ requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -163,9 +163,9 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter try: import botocore.exceptions diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py index ae69e45092c..c7f45a34e86 100644 --- a/aws_elasticbeanstalk_app.py +++ b/aws_elasticbeanstalk_app.py @@ -45,8 +45,8 @@ - Harpreet Singh (@hsingh) - Stephen Granger (@viper233) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -92,7 +92,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule def describe_app(ebs, app_name, module): diff --git a/aws_glue_connection.py b/aws_glue_connection.py index ae9446a7963..ef1f753abf9 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -64,8 +64,8 @@ - The subnet ID used by the connection. type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -132,8 +132,8 @@ sample: {'subnet-id':'subnet-aabbccddee'} ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names # Non-ansible imports import copy diff --git a/aws_glue_job.py b/aws_glue_job.py index 1bd8e8eaf64..6fbe23603d8 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -78,8 +78,8 @@ - The job timeout in minutes. type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -187,8 +187,8 @@ sample: 300 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict # Non-ansible imports import copy diff --git a/aws_inspector_target.py b/aws_inspector_target.py index c31456ccff7..1d33ec00411 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -38,8 +38,8 @@ - Required if C(state=present). type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -101,9 +101,9 @@ sample: "2018-01-29T13:48:51.958000+00:00" ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/aws_kms.py b/aws_kms.py index 2ba02bf70f7..5ad254ed6c9 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -172,8 +172,8 @@ - Will Thames (@willthames) - Mark Chappell (@tremble) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -396,10 +396,10 @@ 'admin': 'Allow access for Key Administrators' } -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags, compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, compare_policies from ansible.module_utils.six import string_types import json diff --git a/aws_kms_info.py b/aws_kms_info.py index 3e47206ecab..c615d3a293f 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -31,8 +31,8 @@ default: False type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -219,9 +219,9 @@ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3 -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict import traceback diff --git a/aws_region_info.py b/aws_region_info.py index 2427beb0841..dee88d852ba 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -28,8 +28,8 @@ default: {} type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [botocore, boto3] ''' @@ -59,8 +59,8 @@ }]" ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 1b8d4eefb0d..46f2f20117b 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -23,8 +23,8 @@ Note that the M(aws_s3_bucket_info) module no longer returns C(ansible_facts)! author: "Gerben Geijteman (@hyperized)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -61,12 +61,12 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, - ec2_argument_spec, - HAS_BOTO3, - camel_dict_to_snake_dict, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, + ec2_argument_spec, + HAS_BOTO3, + camel_dict_to_snake_dict, + get_aws_connection_info, + ) def get_bucket_list(module, connection): diff --git a/aws_s3_cors.py b/aws_s3_cors.py index 5bb05bc59fb..272b177e6cf 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -34,8 +34,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -101,8 +101,8 @@ except Exception: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies def create_or_update_bucket_cors(connection, module): diff --git a/aws_secret.py b/aws_secret.py index ff6fb88358e..afcb2976c3b 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -72,8 +72,8 @@ default: 30 type: int extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws ''' @@ -134,9 +134,9 @@ ''' from ansible.module_utils._text import to_bytes -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_ses_identity.py b/aws_ses_identity.py index 2afc5d6abfd..ccc40344fe4 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -91,8 +91,8 @@ default: True requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -222,8 +222,8 @@ type: bool ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info import time diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py index 08cf4a7b7ac..5bbf91343b8 100644 --- a/aws_ses_identity_policy.py +++ b/aws_ses_identity_policy.py @@ -42,8 +42,8 @@ type: str requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -86,8 +86,8 @@ sample: [ExamplePolicy] ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_policies, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry import json diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index 4c9fd730a11..b5e78d1b419 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -48,8 +48,8 @@ required: False default: False extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -102,8 +102,8 @@ }] """ -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_sgw_info.py b/aws_sgw_info.py index 2a734cbde2a..b1ac00ef70c 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -46,8 +46,8 @@ required: false default: true extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -175,8 +175,8 @@ region: eu-west-3 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index ebfafee4c41..9e0683d1c85 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -72,8 +72,8 @@ - Bill Wang (@ozbillwang) - Michael De La Rue (@mikedlr) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ botocore, boto3 ] ''' @@ -128,7 +128,7 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule try: from botocore.exceptions import ClientError diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index bde78d7f1e0..9a9bdf365a4 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -60,8 +60,8 @@ type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: @@ -101,12 +101,12 @@ returned: always ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, - AWSRetry, - compare_aws_tags, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, + AWSRetry, + compare_aws_tags, + boto3_tag_list_to_ansible_dict, + ) try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py index 29ed1634ee7..d4c2b7324d4 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/aws_step_functions_state_machine_execution.py @@ -51,8 +51,8 @@ default: '' extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: @@ -93,8 +93,8 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/aws_waf_condition.py b/aws_waf_condition.py index 1b2c887f67f..c9184103cb7 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -21,8 +21,8 @@ - Will Thames (@willthames) - Mike Mochan (@mmochan) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 options: name: @@ -404,10 +404,10 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP +from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff class Condition(object): diff --git a/aws_waf_info.py b/aws_waf_info.py index 5da7e6cff9f..8b3b6e87da7 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -31,8 +31,8 @@ - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -118,8 +118,8 @@ ] ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import list_web_acls, get_web_acl +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import list_web_acls, get_web_acl def main(): diff --git a/aws_waf_rule.py b/aws_waf_rule.py index 0475c6447b5..95dc831acbf 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -21,8 +21,8 @@ - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 options: name: @@ -148,17 +148,17 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import (run_func_with_change_token_backoff, - list_rules_with_backoff, - list_regional_rules_with_backoff, - MATCH_LOOKUP, - ) -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import (get_web_acl_with_backoff, - list_web_acls_with_backoff, - list_regional_web_acls_with_backoff, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import (run_func_with_change_token_backoff, + list_rules_with_backoff, + list_regional_rules_with_backoff, + MATCH_LOOKUP, + ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import (get_web_acl_with_backoff, + list_web_acls_with_backoff, + list_regional_web_acls_with_backoff, + ) def get_rule_by_name(client, module, name): diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index ee6f103a825..a942a4cb77a 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -20,8 +20,8 @@ - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 options: name: @@ -166,15 +166,15 @@ import re -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waf import (list_rules_with_backoff, - list_web_acls_with_backoff, - list_regional_web_acls_with_backoff, - run_func_with_change_token_backoff, - list_regional_rules_with_backoff, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import (list_rules_with_backoff, + list_web_acls_with_backoff, + list_regional_web_acls_with_backoff, + run_func_with_change_token_backoff, + list_regional_rules_with_backoff, + ) def get_web_acl_by_name(client, module, name): diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 465608151aa..e254d1deafa 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -18,8 +18,8 @@ author: - "Michael Moyle (@mmoyle)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -40,8 +40,8 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: from botocore.exceptions import ClientError diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 5636a084a6b..6d20469c764 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -173,8 +173,8 @@ author: "Ryan Scott Brown (@ryansb)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ boto3>=1.6, botocore>=1.10.26 ] ''' @@ -312,12 +312,12 @@ # handled by AnsibleAWSModule pass -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - camel_dict_to_snake_dict, - ) -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict, + ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code from ansible.module_utils._text import to_native diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index e4dce8cd27a..1b7a8f07eab 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -30,8 +30,8 @@ - Will Thames (@willthames) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 options: @@ -1376,11 +1376,11 @@ ''' from ansible.module_utils._text import to_text, to_native -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager from ansible.module_utils.common.dict_transformations import recursive_diff -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict import datetime try: diff --git a/cloudfront_info.py b/cloudfront_info.py index 1850b027c43..71f584b852a 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -149,8 +149,8 @@ type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -265,8 +265,8 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3 -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict from ansible.module_utils.basic import AnsibleModule from functools import partial import traceback diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 5de20501939..1cf6f3dfa7e 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -26,8 +26,8 @@ author: Willem van Ketwich (@wilvk) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 options: @@ -138,10 +138,10 @@ sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager import datetime try: diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index d175483d271..9e788fdc21f 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -29,8 +29,8 @@ author: Willem van Ketwich (@wilvk) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 options: @@ -122,9 +122,9 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule import datetime from functools import partial import json diff --git a/cloudtrail.py b/cloudtrail.py index 087419f6917..5fb0858d9ab 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -103,8 +103,8 @@ type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -258,11 +258,11 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + ) def create_trail(module, client, ct_params): diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 49c6acc65fd..8167ac6a3de 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -18,8 +18,8 @@ description: - This module creates and manages CloudWatch event rules and targets. extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: "Jim Dalton (@jsdalton) " requirements: @@ -158,8 +158,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class CloudWatchEventRule(object): diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index 9b6e34c12e0..af883b32d15 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -67,8 +67,8 @@ required: false type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -134,12 +134,12 @@ import traceback from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - camel_dict_to_snake_dict, - boto3_conn, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + camel_dict_to_snake_dict, + boto3_conn, + ec2_argument_spec, + get_aws_connection_info, + ) try: import botocore diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index 06c857cde06..4bbc25534d2 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -26,8 +26,8 @@ - The name or prefix of the log group to filter by. type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -76,12 +76,12 @@ import traceback from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - camel_dict_to_snake_dict, - boto3_conn, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + camel_dict_to_snake_dict, + boto3_conn, + ec2_argument_spec, + get_aws_connection_info, + ) try: import botocore diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index 512e49a3cba..52f7f3c9306 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -63,8 +63,8 @@ - The value to emit when a filter pattern does not match a log event. type: float extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -99,8 +99,8 @@ log_group_name: metric_filter_count: """ -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError, WaiterError diff --git a/data_pipeline.py b/data_pipeline.py index d734e32e249..5637c590ed4 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -20,8 +20,8 @@ requirements: [ "boto3" ] short_description: Create and manage AWS Datapipelines extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 description: - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) @@ -214,7 +214,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict from ansible.module_utils._text import to_text diff --git a/dms_endpoint.py b/dms_endpoint.py index b4f24643c66..aac3f2aeab4 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -147,8 +147,8 @@ author: - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -172,8 +172,8 @@ RETURN = ''' # ''' __metaclass__ = type import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore except ImportError: diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 2a786ff78fd..3dfc121d3af 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -47,8 +47,8 @@ author: - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -63,8 +63,8 @@ RETURN = ''' # ''' import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore except ImportError: diff --git a/dynamodb_table.py b/dynamodb_table.py index 1ecaf22184f..47a65123a84 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -120,8 +120,8 @@ default: 60 type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -200,13 +200,13 @@ try: import botocore - from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info DYNAMO_TYPE_DEFAULT = 'STRING' diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index b821839cdde..d4ed856c215 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -38,8 +38,8 @@ author: Ted Timmons (@tedder) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ botocore>=1.5.24, boto3 ] ''' @@ -77,12 +77,12 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def get_current_ttl_state(c, table_name): diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 2cffae21342..832d3155a91 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -71,8 +71,8 @@ - Amir Moulavi (@amir343) - Tim C (@defunctio) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -136,8 +136,8 @@ sample: ami-e689729e ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list from ansible.module_utils._text import to_native try: diff --git a/ec2_asg.py b/ec2_asg.py index 136fb4e3cde..3a852e1a0e3 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -239,8 +239,8 @@ type: list elements: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -532,8 +532,8 @@ import traceback from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict ) @@ -543,7 +543,7 @@ except ImportError: pass # will be detected by imported HAS_BOTO3 -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', diff --git a/ec2_asg_info.py b/ec2_asg_info.py index cf3a10b90f5..0f30eaa47bc 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -35,8 +35,8 @@ required: false type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -227,8 +227,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def match_asg_tags(tags_to_match, asg): diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index f648a699207..25f143f1ce5 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -73,8 +73,8 @@ default: ABANDON type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ boto3>=1.4.4 ] @@ -104,7 +104,7 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule try: import botocore diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index c0e97c97bc8..d14ffcfbce4 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -52,8 +52,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -126,12 +126,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, - AWSRetry, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, + AWSRetry, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) class Ec2CustomerGatewayManager: diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index ef64be52852..65ed984f486 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -30,8 +30,8 @@ type: list elements: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -87,11 +87,11 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ) def date_handler(obj): diff --git a/ec2_eip.py b/ec2_eip.py index dba01639c32..b7441826a9b 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -86,8 +86,8 @@ - The I(wait_timeout) option does nothing and will be removed in Ansible 2.14. type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: "Rick Mendes (@rickmendes) " notes: @@ -226,8 +226,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): diff --git a/ec2_eip_info.py b/ec2_eip_info.py index b6212fb6311..047041ff755 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -28,8 +28,8 @@ default: {} type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -99,11 +99,11 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ) try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: diff --git a/ec2_elb.py b/ec2_elb.py index fb89714609c..bf77dbca503 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -56,8 +56,8 @@ default: 0 type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -96,12 +96,12 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, - HAS_BOTO, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AnsibleAWSError, + HAS_BOTO, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) class ElbManager: diff --git a/ec2_elb_info.py b/ec2_elb_info.py index b431c9c98e5..d83b4fe119d 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -37,8 +37,8 @@ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. type: list extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -84,7 +84,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, connect_to_aws, ec2_argument_spec, diff --git a/ec2_instance.py b/ec2_instance.py index ca090c13d7c..0b268a6f05a 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -278,8 +278,8 @@ type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -811,16 +811,16 @@ from ansible.module_utils.six import text_type, string_types from ansible.module_utils.six.moves.urllib import parse as urlparse from ansible.module_utils._text import to_bytes, to_native -import ansible_collections.ansible.amazon.plugins.module_utils.ec2 as ec2_utils -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_filter_list, - compare_aws_tags, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - camel_dict_to_snake_dict, - ) - -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +import ansible_collections.amazon.aws.plugins.module_utils.ec2 as ec2_utils +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, + ansible_dict_to_boto3_filter_list, + compare_aws_tags, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict, + ) + +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule module = None diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 865b7d70d06..9bb1ff56e7d 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -37,8 +37,8 @@ type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -503,13 +503,13 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_conn, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_conn, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def list_ec2_instances(connection, module): diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 0aa7d68b81d..2de514ad247 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -24,8 +24,8 @@ - botocore - boto3 >= 1.6.0 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: - Ryan Scott Brown (@ryansb) @@ -369,13 +369,13 @@ from uuid import uuid4 from ansible.module_utils._text import to_text -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, - AWSRetry, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, + AWSRetry, + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, + ) try: from botocore.exceptions import ClientError, BotoCoreError, WaiterError diff --git a/ec2_lc.py b/ec2_lc.py index 6d168583e49..5211391d1cf 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -184,8 +184,8 @@ type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: @@ -454,15 +454,15 @@ import traceback -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, - ec2_argument_spec, - ec2_connect, - camel_dict_to_snake_dict, - get_ec2_security_group_ids_from_names, - boto3_conn, - snake_dict_to_camel_dict, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, + ec2_argument_spec, + ec2_connect, + camel_dict_to_snake_dict, + get_ec2_security_group_ids_from_names, + boto3_conn, + snake_dict_to_camel_dict, + HAS_BOTO3, + ) from ansible.module_utils._text import to_text from ansible.module_utils.basic import AnsibleModule diff --git a/ec2_lc_find.py b/ec2_lc_find.py index 1ba21ae382f..8ff3ddb0c39 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -46,8 +46,8 @@ - "python >= 2.6" - boto3 extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws ''' @@ -142,7 +142,7 @@ import re from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info def find_launch_configs(client, module): diff --git a/ec2_lc_info.py b/ec2_lc_info.py index d0f340ca597..9a16f2fd0ab 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -51,8 +51,8 @@ - Corresponds to Python slice notation. type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -163,12 +163,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def list_launch_configs(connection, module): diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index 05bd69654b9..6d87ea9595b 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -165,8 +165,8 @@ - 'missing' default: 'missing' extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -207,7 +207,7 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule try: from botocore.exceptions import ClientError diff --git a/ec2_placement_group.py b/ec2_placement_group.py index d1d26535261..f4b3a7e2fbf 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -41,8 +41,8 @@ choices: [ 'cluster', 'spread' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -90,8 +90,8 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index f0e7092e43e..47df4dd7ed4 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -28,8 +28,8 @@ required: false default: [] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -74,7 +74,7 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 321047f709b..01b3210afde 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -53,8 +53,8 @@ - The minimum period of time (in seconds) between which autoscaling actions can take place. type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -79,12 +79,12 @@ pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, - HAS_BOTO, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AnsibleAWSError, + HAS_BOTO, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) def create_scaling_policy(connection, module): diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 89ace145e6c..98ac9c8b4df 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -59,8 +59,8 @@ type: dict author: Deepak Kothandan (@Deepakkothandan) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -116,7 +116,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict) from ansible.module_utils._text import to_native try: diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 8408b4369c7..9ab13d83042 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -87,8 +87,8 @@ author: "Bob Boldin (@BobBoldin)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -230,10 +230,10 @@ pass # handled by imported AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule from time import sleep, time from ansible.module_utils._text import to_text -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( ansible_dict_to_boto3_tag_list, ansible_dict_to_boto3_filter_list, AWSRetry, diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 041e88ae638..e4fbb14821e 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -33,8 +33,8 @@ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters. type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -173,8 +173,8 @@ pass # handled by imported AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index cba7aa66a8c..6f0c1ab36b1 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -30,8 +30,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -61,8 +61,8 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: import botocore diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index aa55014c88b..760af35c62e 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -95,8 +95,8 @@ type: str author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -190,12 +190,12 @@ pass # will be picked up by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, - boto3_conn, - ec2_argument_spec, - HAS_BOTO3, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, + boto3_conn, + ec2_argument_spec, + HAS_BOTO3, + camel_dict_to_snake_dict, + ) from ansible.module_utils.six import string_types diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index 1436080ef17..a43ef54ac13 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -39,8 +39,8 @@ type: dict author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -117,14 +117,14 @@ pass # will be picked up from imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, - boto3_conn, - get_aws_connection_info, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - camel_dict_to_snake_dict, - AWSRetry, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, + boto3_conn, + get_aws_connection_info, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + camel_dict_to_snake_dict, + AWSRetry, + ) def date_handler(obj): diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index 76973a6e341..43b74c163d5 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -36,8 +36,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - botocore @@ -89,9 +89,9 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 07333c1613e..1c407a36b89 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -32,8 +32,8 @@ type: list elements: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -98,13 +98,13 @@ pass # will be captured by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + ) def get_internet_gateway_info(internet_gateway): diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index f7c8be76b44..213cf167cd4 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -80,8 +80,8 @@ default: present author: Mike Mochan (@mmochan) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ botocore, boto3, json ] ''' @@ -159,8 +159,8 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 068b62845ff..b5a8d3bc251 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -39,8 +39,8 @@ - By default, the module will return all Network ACLs. extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -112,13 +112,13 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_filter_list, - camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, + ansible_dict_to_boto3_filter_list, + camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict, + ) # VPC-supported IANA protocol numbers diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 5cb3236885e..09fc70de335 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -80,8 +80,8 @@ - Jon Hadfield (@jonhadfield) - Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -209,12 +209,12 @@ pass # caught by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + HAS_BOTO3, + ) DRY_RUN_GATEWAYS = [ diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index bd1dde7ce7f..a4891391854 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -32,8 +32,8 @@ type: dict author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -88,14 +88,14 @@ pass # will be detected by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + HAS_BOTO3, + ) def date_handler(obj): diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 9e1cdd06112..28de7788ef0 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -57,8 +57,8 @@ type: str author: Mike Mochan (@mmochan) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ botocore, boto3, json ] ''' @@ -229,8 +229,8 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, HAS_BOTO3 -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import is_boto3_error_code def tags_changed(pcx_id, client, module): diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index b30fb43bf59..2561a209283 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -31,8 +31,8 @@ type: dict author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -78,14 +78,14 @@ pass # will be picked up by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, - ec2_argument_spec, - boto3_conn, - get_aws_connection_info, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, + ec2_argument_spec, + boto3_conn, + get_aws_connection_info, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + camel_dict_to_snake_dict, + ) def date_handler(obj): diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 538f40b0f0b..a90242148b2 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -79,8 +79,8 @@ - Required when I(state=present) or I(lookup=tag). type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -229,12 +229,12 @@ import re from time import sleep -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, AWSRetry try: diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index 2ad8b73f499..a86e9542570 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -26,8 +26,8 @@ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -62,7 +62,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info def get_route_table_info(route_table): diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index a8ba032058c..8fce681bbe8 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -62,8 +62,8 @@ type: dict author: Nick Aslanidis (@naslanidis) extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws ''' @@ -125,10 +125,10 @@ except ImportError: HAS_BOTO3 = False -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info, AWSRetry from ansible.module_utils._text import to_native diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index e6b9b96ffd0..64f522d80db 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -32,8 +32,8 @@ elements: str author: "Nick Aslanidis (@naslanidis)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -101,13 +101,13 @@ pass # will be captured by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_filter_list, + HAS_BOTO3, + ) def get_virtual_gateway_info(virtual_gateway): diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 586138d3a1b..a1b6bd82b07 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -18,8 +18,8 @@ - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters option or specifying the VPN connection identifier. extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws requirements: ['boto3', 'botocore'] author: "Sloane Hertel (@s-hertel)" @@ -300,9 +300,9 @@ vpn_connection_id: vpn-781e0e19 """ -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils._text import to_text -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index bcb11b657a0..991977346d0 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -34,8 +34,8 @@ type: list elements: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -169,11 +169,11 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ) def date_handler(obj): diff --git a/ec2_win_password.py b/ec2_win_password.py index ccc9ea1fb90..7ed14ed7204 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -54,8 +54,8 @@ type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: @@ -116,7 +116,7 @@ HAS_CRYPTOGRAPHY = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect from ansible.module_utils._text import to_bytes diff --git a/ecs_attribute.py b/ecs_attribute.py index 7bec7343b64..9e812275e9e 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -58,8 +58,8 @@ required: true type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -121,7 +121,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info class EcsAttributes(object): diff --git a/ecs_cluster.py b/ecs_cluster.py index 25937369ec9..6e6cc54b255 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -47,8 +47,8 @@ type: int default: 10 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -116,7 +116,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info class EcsClusterManager: diff --git a/ecs_ecr.py b/ecs_ecr.py index 7989d404dd2..4d2bf1bab09 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -79,8 +79,8 @@ author: - David M. Lee (@leedm777) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -180,8 +180,8 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict from ansible.module_utils.six import string_types diff --git a/ecs_service.py b/ecs_service.py index 23b1fcfea95..1446704a598 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -184,8 +184,8 @@ choices: ["DAEMON", "REPLICA"] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -473,8 +473,8 @@ 'minimum_healthy_percent': 'int' } -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names try: import botocore diff --git a/ecs_service_info.py b/ecs_service_info.py index 974b4fba867..f82f5c5e6a8 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -47,8 +47,8 @@ type: list elements: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -140,8 +140,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class EcsServiceManager: diff --git a/ecs_tag.py b/ecs_tag.py index 1a093e9fc5e..a9def34ac41 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -55,8 +55,8 @@ type: bool default: false extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -111,8 +111,8 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/ecs_task.py b/ecs_task.py index 717b0d8236d..41c381c269a 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -87,8 +87,8 @@ - Tags that will be added to ecs tasks on start and run required: false extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -216,9 +216,9 @@ type: str ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.basic import missing_required_lib -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list try: import botocore diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 88fe6b47353..895a52d29d1 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -103,8 +103,8 @@ required: false type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -216,8 +216,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils._text import to_text diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index c2a9f15008e..2c6fe847cea 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -32,8 +32,8 @@ required: true type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -302,8 +302,8 @@ type: str ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: import botocore diff --git a/efs.py b/efs.py index daac58e8fe7..5d72daa2c02 100644 --- a/efs.py +++ b/efs.py @@ -105,8 +105,8 @@ type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -238,12 +238,12 @@ except ImportError as e: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (compare_aws_tags, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (compare_aws_tags, + camel_dict_to_snake_dict, + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + ) def _index_by_key(key, items): diff --git a/efs_info.py b/efs_info.py index ca59d179c12..69a584215fe 100644 --- a/efs_info.py +++ b/efs_info.py @@ -43,8 +43,8 @@ type: list elements: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -181,9 +181,9 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, AWSRetry -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict from ansible.module_utils._text import to_native diff --git a/elasticache.py b/elasticache.py index df5b7693557..a59eadc11e9 100644 --- a/elasticache.py +++ b/elasticache.py @@ -96,8 +96,8 @@ type: bool default: false extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -133,12 +133,12 @@ from time import sleep from traceback import format_exc from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - HAS_BOTO3, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, + get_aws_connection_info, + boto3_conn, + HAS_BOTO3, + camel_dict_to_snake_dict, + ) try: import boto3 diff --git a/elasticache_info.py b/elasticache_info.py index 5d8537aa09b..9126ee57f4e 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -25,8 +25,8 @@ author: - Will Thames (@willthames) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -226,12 +226,12 @@ Environment: test ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (get_aws_connection_info, - camel_dict_to_snake_dict, - AWSRetry, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, + camel_dict_to_snake_dict, + AWSRetry, + boto3_tag_list_to_ansible_dict, + ) try: diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index d9d8d19788f..9e64320def2 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -20,8 +20,8 @@ - Returns information about the specified cache cluster. author: "Sloane Hertel (@s-hertel)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ boto3, botocore ] options: @@ -111,7 +111,7 @@ # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict from ansible.module_utils._text import to_text from ansible.module_utils.six import string_types import traceback diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 675e3297801..d5f1d0204b1 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -20,8 +20,8 @@ - Returns information about the specified snapshot. author: "Sloane Hertel (@s-hertel)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ boto3, botocore ] options: @@ -128,7 +128,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict def create(module, connection, replication_id, cluster_id, name): diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 3441439adb3..ed56153c4f9 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -40,8 +40,8 @@ elements: str author: "Tim Mahoney (@timmahoney)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -70,7 +70,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connection_info def main(): diff --git a/elb_application_lb.py b/elb_application_lb.py index bd0dea254e3..9c49e648409 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -176,8 +176,8 @@ default: yes type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. @@ -457,16 +457,16 @@ sample: vpc-0011223344 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.ansible.amazon.plugins.module_utils.aws.elbv2 import (ApplicationLoadBalancer, - ELBListeners, - ELBListener, - ELBListenerRules, - ELBListenerRule, - ) -from ansible_collections.ansible.amazon.plugins.module_utils.aws.elb_utils import get_elb_listener_rules +from ansible_collections.amazon.aws.plugins.module_utils.aws.elbv2 import (ApplicationLoadBalancer, + ELBListeners, + ELBListener, + ELBListenerRules, + ELBListenerRule, + ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.elb_utils import get_elb_listener_rules def create_or_update_elb(elb_obj): diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 6d68f07f892..b347941ee79 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -32,8 +32,8 @@ type: list extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -173,12 +173,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def get_elb_listeners(connection, module, elb_arn): diff --git a/elb_classic_lb.py b/elb_classic_lb.py index b8b08dfb4bd..5959ebaf3f0 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -132,8 +132,8 @@ type: dict extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -375,7 +375,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index f8c7a12f6e8..42be1a2265f 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -37,8 +37,8 @@ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. type: list extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - botocore @@ -144,8 +144,8 @@ vpc_id: vpc-c248fda4 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ( +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict diff --git a/elb_instance.py b/elb_instance.py index e7337a81af7..9bd2560a93a 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -61,8 +61,8 @@ default: 0 type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -101,12 +101,12 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AnsibleAWSError, - HAS_BOTO, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AnsibleAWSError, + HAS_BOTO, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) class ElbManager: diff --git a/elb_network_lb.py b/elb_network_lb.py index 59252419670..8e4e40c60c8 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -127,8 +127,8 @@ - The duration in seconds to wait, used in conjunction with I(wait). type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. @@ -308,9 +308,9 @@ sample: vpc-0011223344 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.ansible.amazon.plugins.module_utils.aws.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.aws.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener def create_or_update_elb(elb_obj): diff --git a/elb_target.py b/elb_target.py index e6af7bac817..438c51e7392 100644 --- a/elb_target.py +++ b/elb_target.py @@ -68,8 +68,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 notes: - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it. @@ -114,12 +114,12 @@ from time import time, sleep from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - AWSRetry, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + AWSRetry, + ) try: import boto3 diff --git a/elb_target_group.py b/elb_target_group.py index ad27c879826..34cbd3828c5 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -162,8 +162,8 @@ default: 200 type: int extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 notes: - Once a target group has been created, only its health check can then be modified using subsequent calls @@ -381,12 +381,12 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict, - compare_aws_tags, - ansible_dict_to_boto3_tag_list, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + boto3_tag_list_to_ansible_dict, + compare_aws_tags, + ansible_dict_to_boto3_tag_list, + ) from distutils.version import LooseVersion diff --git a/elb_target_group_info.py b/elb_target_group_info.py index e1e94c4e3b1..6c3e335d485 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -43,8 +43,8 @@ type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -218,12 +218,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def get_target_group_attributes(connection, module, target_group_arn): diff --git a/elb_target_info.py b/elb_target_info.py index aa2ec02e7db..15ca68338ff 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -34,8 +34,8 @@ - boto3 - botocore extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -219,8 +219,8 @@ # we can handle the lack of boto3 based on the ec2 module pass -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry class Target(object): diff --git a/execute_lambda.py b/execute_lambda.py index 6c3ff264ae1..901b83bafc5 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -18,8 +18,8 @@ - This module executes AWS Lambda functions, allowing synchronous and asynchronous invocation. extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: "Ryan Scott Brown (@ryansb) " requirements: @@ -141,7 +141,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info from ansible.module_utils._text import to_native diff --git a/iam.py b/iam.py index a9a2f1021ae..e195a4a9adb 100644 --- a/iam.py +++ b/iam.py @@ -97,8 +97,8 @@ - "Jonathan I. Davila (@defionscode)" - "Paul Seiffert (@seiffert)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -189,12 +189,12 @@ pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO, - boto_exception, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO, + boto_exception, + connect_to_aws, + ec2_argument_spec, + get_aws_connection_info, + ) def _paginate(func, attr): diff --git a/iam_cert.py b/iam_cert.py index 38a979e9672..c48b122dbd9 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -81,8 +81,8 @@ requirements: [ "boto" ] author: Jonathan I. Davila (@defionscode) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -120,7 +120,7 @@ ''' from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws import os try: diff --git a/iam_group.py b/iam_group.py index 7ce18593e0a..cfac6062c46 100644 --- a/iam_group.py +++ b/iam_group.py @@ -71,8 +71,8 @@ type: bool requirements: [ botocore, boto3 ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -180,9 +180,9 @@ sample: / ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/iam_managed_policy.py b/iam_managed_policy.py index fd393359f24..435d2dcf800 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -54,8 +54,8 @@ author: "Dan Kozlowski (@dkhenry)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -128,14 +128,14 @@ pass # caught by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (boto3_conn, - get_aws_connection_info, - ec2_argument_spec, - AWSRetry, - camel_dict_to_snake_dict, - HAS_BOTO3, - compare_policies, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, + get_aws_connection_info, + ec2_argument_spec, + AWSRetry, + camel_dict_to_snake_dict, + HAS_BOTO3, + compare_policies, + ) from ansible.module_utils._text import to_native diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 6f7b9f1e3da..12dae087a70 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -25,8 +25,8 @@ - The name of the user whose MFA devices will be listed type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -70,12 +70,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def list_mfa_devices(connection, module): diff --git a/iam_password_policy.py b/iam_password_policy.py index 8eb03b96f78..ead34bf8868 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -80,8 +80,8 @@ type: bool aliases: [password_expire, expire] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -107,8 +107,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class IAMConnection(object): diff --git a/iam_policy.py b/iam_policy.py index 97209071845..87c7895bfb8 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -65,8 +65,8 @@ - "Jonathan I. Davila (@defionscode)" - "Dennis Podkovyrin (@sbj-ss)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -121,8 +121,8 @@ except ImportError: pass -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies from ansible.module_utils.six import string_types diff --git a/iam_policy_info.py b/iam_policy_info.py index 5e272784d18..b80428938f1 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -38,8 +38,8 @@ - Mark Chappell (@tremble) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -87,7 +87,7 @@ except ImportError: pass -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.six import string_types diff --git a/iam_role.py b/iam_role.py index d7da07b005c..fafa00541dc 100644 --- a/iam_role.py +++ b/iam_role.py @@ -93,8 +93,8 @@ type: bool requirements: [ botocore, boto3 ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -196,13 +196,13 @@ import json -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - compare_aws_tags, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, + ansible_dict_to_boto3_tag_list, + boto3_tag_list_to_ansible_dict, + compare_aws_tags, + ) try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/iam_role_info.py b/iam_role_info.py index 5a3753fd524..9912cfdaa20 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -35,8 +35,8 @@ - Mutually exclusive with I(name). type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -156,8 +156,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry @AWSRetry.exponential_backoff() diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 34f0db647a9..0d6f3e3f474 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -47,8 +47,8 @@ choices: [ "present", "absent" ] type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: - Tony (@axc450) @@ -111,8 +111,8 @@ except ImportError: pass -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class SAMLProviderManager: diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index a17d460e87e..771d374a9fe 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -26,8 +26,8 @@ - The name of the server certificate you are retrieving attributes for. type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -91,7 +91,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info def get_server_certs(iam, name=None): diff --git a/iam_user.py b/iam_user.py index f66738022bb..8bf60f97ca9 100644 --- a/iam_user.py +++ b/iam_user.py @@ -44,8 +44,8 @@ aliases: ['purge_policy', 'purge_managed_policies'] requirements: [ botocore, boto3 ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -108,8 +108,8 @@ ''' from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import traceback diff --git a/iam_user_info.py b/iam_user_info.py index d478f3306d3..b87a0763823 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -46,8 +46,8 @@ - botocore - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -107,8 +107,8 @@ sample: "test_user" ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore diff --git a/kinesis_stream.py b/kinesis_stream.py index 33db98eca1b..2ed339c5bd0 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -78,8 +78,8 @@ - The GUID or alias for the KMS key. type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -194,7 +194,7 @@ pass # Taken care of by ec2.HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info from ansible.module_utils._text import to_native diff --git a/lambda.py b/lambda.py index bf79bba0967..cad268c72ae 100644 --- a/lambda.py +++ b/lambda.py @@ -115,8 +115,8 @@ author: - 'Steyn Huizinga (@steynovich)' extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -216,9 +216,9 @@ ''' from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags import base64 import hashlib import traceback diff --git a/lambda_alias.py b/lambda_alias.py index ca2188c43a4..649fe95bf10 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -53,8 +53,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -155,12 +155,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) class AWSConnection: diff --git a/lambda_event.py b/lambda_event.py index b370e6026da..ca81d13ae95 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -85,8 +85,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -134,12 +134,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) # --------------------------------------------------------------------------------------------------- diff --git a/lambda_facts.py b/lambda_facts.py index aa93d32ebb9..3a95ee43b2c 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -44,8 +44,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -89,8 +89,8 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import json import datetime import sys diff --git a/lambda_info.py b/lambda_info.py index 425c47f1ea5..11e5e97186e 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -41,8 +41,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -83,8 +83,8 @@ type: dict ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import json import datetime import re diff --git a/lambda_policy.py b/lambda_policy.py index 35e7a273e02..5cf1ff22760 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -102,8 +102,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -143,7 +143,7 @@ import json import re from ansible.module_utils._text import to_native -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule try: from botocore.exceptions import ClientError diff --git a/lightsail.py b/lightsail.py index c09e63283d9..09cd7ac8458 100644 --- a/lightsail.py +++ b/lightsail.py @@ -74,8 +74,8 @@ - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -163,8 +163,8 @@ # will be caught by AnsibleAWSModule pass -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def find_instance_info(module, client, instance_name, fail_if_not_found=False): diff --git a/rds.py b/rds.py index f3eb28cdf47..1ac4a1b15ad 100644 --- a/rds.py +++ b/rds.py @@ -232,8 +232,8 @@ - "Bruce Pennypacker (@bpennypacker)" - "Will Thames (@willthames)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -539,8 +539,8 @@ HAS_RDS2 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info DEFAULT_PORTS = { diff --git a/rds_instance.py b/rds_instance.py index adf27f78012..c0deddd89e3 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -22,8 +22,8 @@ - botocore - boto3 >= 1.5.0 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: - Sloane Hertel (@s-hertel) @@ -746,17 +746,17 @@ ''' from ansible.module_utils._text import to_text -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.ansible.amazon.plugins.module_utils.aws.rds import (ensure_tags, - arg_spec_to_rds_params, - call_method, - get_rds_method_attribute, - get_tags, - get_final_identifier, - ) -from ansible_collections.ansible.amazon.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.aws.rds import (ensure_tags, + arg_spec_to_rds_params, + call_method, + get_rds_method_attribute, + get_tags, + get_final_identifier, + ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry from ansible.module_utils.six import string_types from time import sleep diff --git a/rds_instance_info.py b/rds_instance_info.py index 717e68e0f3a..cc8535d5628 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -39,8 +39,8 @@ - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -347,12 +347,12 @@ sample: sg-abcd1234 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - AWSRetry, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + AWSRetry, + camel_dict_to_snake_dict, + ) try: diff --git a/rds_param_group.py b/rds_param_group.py index 6f9f6e19f2a..bdaf7fe4e7d 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -66,8 +66,8 @@ - "Scott Anderson (@tastychutney)" - "Will Thames (@willthames)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -118,9 +118,9 @@ ''' from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, HAS_BOTO3, compare_aws_tags -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, HAS_BOTO3, compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native diff --git a/rds_snapshot.py b/rds_snapshot.py index 939948678af..c00d21064f8 100644 --- a/rds_snapshot.py +++ b/rds_snapshot.py @@ -65,8 +65,8 @@ - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -206,9 +206,9 @@ pass # protected by AnsibleAWSModule # import module snippets -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list def get_snapshot(client, module, snapshot_id): diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index a4335195e4c..e887c3dd933 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -60,8 +60,8 @@ author: - "Will Thames (@willthames)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -296,8 +296,8 @@ sample: vpc-abcd1234 ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict try: import botocore diff --git a/rds_subnet_group.py b/rds_subnet_group.py index 1bba28dfbce..b0fb417a28f 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -41,8 +41,8 @@ type: list author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -98,7 +98,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info def get_subnet_group_info(subnet_group): diff --git a/redshift.py b/redshift.py index 993e98286eb..997240aaf68 100644 --- a/redshift.py +++ b/redshift.py @@ -172,8 +172,8 @@ type: bool requirements: [ 'boto3' ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -261,8 +261,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AWSRetry, snake_dict_to_camel_dict -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code def _collect_facts(resource): diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index 76de6b2c89e..cd7a7c0d744 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -58,8 +58,8 @@ type: int requirements: [ "botocore", "boto3" ] extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws ''' @@ -91,7 +91,7 @@ RETURN = ''' # ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule class SnapshotController(object): diff --git a/redshift_info.py b/redshift_info.py index b0355906456..98dd0090c94 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -35,8 +35,8 @@ required: false type: dict extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws ''' @@ -286,8 +286,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def match_tags(tags_to_match, cluster): diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 7090dcfa314..f09c028988f 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -46,8 +46,8 @@ elements: str requirements: [ 'boto' ] extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -94,7 +94,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info def main(): diff --git a/route53.py b/route53.py index dda106e3f9b..3347f51aaca 100644 --- a/route53.py +++ b/route53.py @@ -137,7 +137,7 @@ - Bruce Pennypacker (@bpennypacker) - Mike Buzzetti (@jimbydamonk) extends_documentation_fragment: -- ansible.amazon.aws +- amazon.aws.aws ''' @@ -378,7 +378,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info MINIMUM_BOTO_VERSION = '2.28.0' diff --git a/route53_health_check.py b/route53_health_check.py index 778b4c0595d..c974c72a5f1 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -83,8 +83,8 @@ type: int author: "zimbatm (@zimbatm)" extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -133,7 +133,7 @@ # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info # Things that can't get changed: diff --git a/route53_info.py b/route53_info.py index a3d8d76f6de..d3d2f3abae9 100644 --- a/route53_info.py +++ b/route53_info.py @@ -133,8 +133,8 @@ type: str author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -222,7 +222,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info from ansible.module_utils._text import to_native diff --git a/route53_zone.py b/route53_zone.py index 2d13cb9073e..698ef524996 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -53,8 +53,8 @@ - Note that you can't associate a reusable delegation set with a private hosted zone. type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 author: "Christopher Troup (@minichate)" ''' @@ -123,7 +123,7 @@ ''' import time -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index a75e44db5a6..d61ca14e143 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -87,8 +87,8 @@ requirements: - boto3 extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -116,8 +116,8 @@ type: list ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError diff --git a/s3_lifecycle.py b/s3_lifecycle.py index d1275687144..e8fb62c0814 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -123,8 +123,8 @@ - The I(requester_pays) option does nothing and will be removed in Ansible 2.14. type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -208,7 +208,7 @@ except ImportError: pass # handled by AnsibleAwsModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule def create_lifecycle_rule(client, module): diff --git a/s3_logging.py b/s3_logging.py index 98033d04643..b13e5eea086 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -40,8 +40,8 @@ default: "" type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -71,7 +71,7 @@ HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info def compare_bucket_logging(bucket, target_bucket, target_prefix): diff --git a/s3_sync.py b/s3_sync.py index 350d9d56805..1123aea9a06 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -123,8 +123,8 @@ author: Ted Timmons (@tedder) extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -234,13 +234,13 @@ # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ec2_argument_spec, - boto3_conn, - get_aws_connection_info, - HAS_BOTO3, - boto_exception, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, + ec2_argument_spec, + boto3_conn, + get_aws_connection_info, + HAS_BOTO3, + boto_exception, + ) from ansible.module_utils._text import to_text try: diff --git a/s3_website.py b/s3_website.py index eaa8bfb5baa..1c87ed73ba8 100644 --- a/s3_website.py +++ b/s3_website.py @@ -49,8 +49,8 @@ type: str extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -173,12 +173,12 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, + ) def _create_redirect_dict(url): diff --git a/sns.py b/sns.py index bb4093a0e50..97f202fdc19 100644 --- a/sns.py +++ b/sns.py @@ -83,8 +83,8 @@ choices: ['json', 'string'] type: str extends_documentation_fragment: -- ansible.amazon.ec2 -- ansible.amazon.aws +- amazon.aws.ec2 +- amazon.aws.aws requirements: - boto3 @@ -142,7 +142,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule def arn_topic_lookup(module, client, short_topic): diff --git a/sns_topic.py b/sns_topic.py index 82c21715e67..141906edb18 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -71,8 +71,8 @@ default: true type: bool extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: [ "boto" ] ''' @@ -224,8 +224,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict class SnsTopicManager(object): diff --git a/sqs_queue.py b/sqs_queue.py index eddd8eaaa2b..ca8041f11f8 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -100,8 +100,8 @@ type: bool default: false extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 ''' @@ -223,13 +223,13 @@ ''' import json -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import (AWSRetry, - camel_dict_to_snake_dict, - compare_aws_tags, - snake_dict_to_camel_dict, - compare_policies, - ) +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, + camel_dict_to_snake_dict, + compare_aws_tags, + snake_dict_to_camel_dict, + compare_policies, + ) try: from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError diff --git a/sts_assume_role.py b/sts_assume_role.py index 3c03f291706..8283c49d862 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -57,8 +57,8 @@ notes: - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token. extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -110,8 +110,8 @@ ''' -from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, ParamValidationError diff --git a/sts_session_token.py b/sts_session_token.py index 1584dbcc50d..e83d71ffd69 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -36,8 +36,8 @@ notes: - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token). extends_documentation_fragment: -- ansible.amazon.aws -- ansible.amazon.ec2 +- amazon.aws.aws +- amazon.aws.ec2 requirements: - boto3 @@ -90,7 +90,7 @@ HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info def normalize_credentials(credentials): From 664a64d7a635526e93c730c2ca5d8d4f471422be Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 24 Apr 2020 23:09:16 +0200 Subject: [PATCH 004/683] Fix docs issues in aws_s3_bucket_info and ec2_vpc_vpn (#47) * Make sure that example string is parsed as string and not as datetime. * Make sure that expression is treated as string. This is probably not the correct value either, but at least it doesn't cause ansible-doc --json to crash. --- aws_s3_bucket_info.py | 2 +- ec2_vpc_vpn.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 46f2f20117b..a69ae8a8b99 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -47,7 +47,7 @@ description: "List of buckets" returned: always sample: - - creation_date: 2017-07-06 15:05:12 +00:00 + - creation_date: '2017-07-06 15:05:12 +00:00' name: my_bucket type: list ''' diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index a1b6bd82b07..5f8707bf909 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -288,7 +288,7 @@ vgw_telemetry: [{ 'outside_ip_address': 'string', 'status': 'up', - 'last_status_change': datetime(2015, 1, 1), + 'last_status_change': 'datetime(2015, 1, 1)', 'status_message': 'string', 'accepted_route_count': 123 }] From ee5d8df85f95f386bb35177687b79828c61a8b1f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 29 Apr 2020 23:19:49 +0200 Subject: [PATCH 005/683] Fix more doc issues where strings are parsed as datetimes by YAML parser. (#55) --- elasticache_snapshot.py | 6 +++--- elb_classic_lb_info.py | 2 +- sts_assume_role.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index d5f1d0204b1..7bea35dee8c 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -78,7 +78,7 @@ http_headers: content-length: 1490 content-type: text/xml - date: Tue, 07 Feb 2017 16:43:04 GMT + date: 'Tue, 07 Feb 2017 16:43:04 GMT' x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d http_status_code: 200 request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d @@ -89,7 +89,7 @@ type: dict sample: auto_minor_version_upgrade: true - cache_cluster_create_time: 2017-02-01T17:43:58.261000+00:00 + cache_cluster_create_time: '2017-02-01T17:43:58.261000+00:00' cache_cluster_id: test-please-delete cache_node_type: cache.m1.small cache_parameter_group_name: default.redis3.2 @@ -97,7 +97,7 @@ engine: redis engine_version: 3.2.4 node_snapshots: - cache_node_create_time: 2017-02-01T17:43:58.261000+00:00 + cache_node_create_time: '2017-02-01T17:43:58.261000+00:00' cache_node_id: 0001 cache_size: num_cache_nodes: 1 diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 42be1a2265f..8708aa95b32 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -104,7 +104,7 @@ backend_server_description: [] canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com canonical_hosted_zone_name_id: XXXXXXXXXXXXXX - created_time: 2017-08-23T18:25:03.280000+00:00 + created_time: '2017-08-23T18:25:03.280000+00:00' dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com health_check: healthy_threshold: 10 diff --git a/sts_assume_role.py b/sts_assume_role.py index 8283c49d862..7f86c34a475 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -73,7 +73,7 @@ type: dict sample: access_key: XXXXXXXXXXXXXXXXXXXX - expiration: 2017-11-11T11:11:11+00:00 + expiration: '2017-11-11T11:11:11+00:00' secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX sts_user: From 15d789f86de05b0efe9900cbc256fb41ed2c71f7 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Wed, 29 Apr 2020 23:33:40 +0200 Subject: [PATCH 006/683] aws_kms: Fix policy arg to actually work with JSON strings that is needs (#43) * Fix policy arg to actually work with JSON strings that is needs. Also update docs. * Fix typo in docs * Fix long line in example * Update type in docs too Co-Authored-By: Mark Chappell * Remove unecessary documentation for aws_kms policy param Co-Authored-By: Mark Chappell Co-authored-by: Mark Chappell --- aws_kms.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index 5ad254ed6c9..879676286d3 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -164,9 +164,9 @@ type: dict policy: description: - - policy to apply to the KMS key + - policy to apply to the KMS key. - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - type: str + type: json author: - Ted Timmons (@tedder) - Will Thames (@willthames) @@ -224,6 +224,18 @@ operations: - Decrypt - RetireGrant + +- name: Update IAM policy on an existing KMS key + aws_kms: + alias: my-kms-key + policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { } ]}' + state: present + +- name: Example using lookup for policy json + aws_kms: + alias: my-kms-key + policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" + state: present ''' RETURN = ''' @@ -1017,7 +1029,7 @@ def main(): tags=dict(type='dict', default={}), purge_tags=dict(type='bool', default=False), grants=dict(type='list', default=[]), - policy=dict(), + policy=dict(type='json'), purge_grants=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), enable_key_rotation=(dict(type='bool')) From e062ed37a1677926fa19f6cac22e31ae7455e15d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 19 May 2020 03:00:53 +0200 Subject: [PATCH 007/683] elb_target_group only set stickiness options when we're using stickiness (#74) When using UDP AWS will throw an error at you even if stickiness is disabled: botocore.errorfactory.InvalidConfigurationRequestException: An error occurred (InvalidConfigurationRequest) when calling the ModifyTargetGroupAttributes operation: Stickiness type 'lb_cookie' is not supported for target groups with the UDP protocol --- elb_target_group.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 34cbd3828c5..82240052be2 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -107,8 +107,8 @@ type: int stickiness_type: description: - - The type of sticky sessions. The possible value is lb_cookie. - default: lb_cookie + - The type of sticky sessions. + - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers. type: str successful_response_codes: description: @@ -547,7 +547,7 @@ def create_or_update_target_group(connection, module): # Only need to check response code and path for http(s) health checks if tg['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: # Health check path - if 'HealthCheckPath'in params and tg['HealthCheckPath'] != params['HealthCheckPath']: + if 'HealthCheckPath' in params and tg['HealthCheckPath'] != params['HealthCheckPath']: health_check_params['HealthCheckPath'] = params['HealthCheckPath'] # Matcher (successful response codes) @@ -744,8 +744,8 @@ def create_or_update_target_group(connection, module): if stickiness_lb_cookie_duration is not None: if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) - if stickiness_type is not None and "stickiness_type" in current_tg_attributes: - if stickiness_type != current_tg_attributes['stickiness_type']: + if stickiness_type is not None: + if stickiness_type != current_tg_attributes.get('stickiness_type'): update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) if update_attributes: @@ -825,7 +825,7 @@ def main(): protocol=dict(choices=protocols_list), purge_tags=dict(default=True, type='bool'), stickiness_enabled=dict(type='bool'), - stickiness_type=dict(default='lb_cookie'), + stickiness_type=dict(), stickiness_lb_cookie_duration=dict(type='int'), state=dict(required=True, choices=['present', 'absent']), successful_response_codes=dict(), From 04ac277a72f841680849e9016395c8a9a7e3970f Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Tue, 19 May 2020 16:06:12 -0700 Subject: [PATCH 008/683] Remove METADATA and cleanup galaxy.yml (#70) * Remove ANSIBLE_METADATA entirely, see ansible/ansible/pull/69454. Remove `license` field from galaxy.yml, in favor of `license_file`. --- aws_acm.py | 3 --- aws_acm_info.py | 3 --- aws_api_gateway.py | 4 ---- aws_application_scaling_policy.py | 4 ---- aws_batch_compute_environment.py | 3 --- aws_batch_job_definition.py | 3 --- aws_batch_job_queue.py | 3 --- aws_codebuild.py | 4 ---- aws_codecommit.py | 3 --- aws_codepipeline.py | 4 ---- aws_config_aggregation_authorization.py | 5 ----- aws_config_aggregator.py | 5 ----- aws_config_delivery_channel.py | 5 ----- aws_config_recorder.py | 5 ----- aws_config_rule.py | 5 ----- aws_direct_connect_connection.py | 4 ---- aws_direct_connect_gateway.py | 5 ----- aws_direct_connect_link_aggregation_group.py | 4 ---- aws_direct_connect_virtual_interface.py | 4 ---- aws_eks_cluster.py | 5 ----- aws_elasticbeanstalk_app.py | 4 ---- aws_glue_connection.py | 3 --- aws_glue_job.py | 3 --- aws_inspector_target.py | 3 --- aws_kms.py | 4 ---- aws_kms_info.py | 4 ---- aws_region_info.py | 5 ----- aws_s3_bucket_info.py | 4 ---- aws_s3_cors.py | 3 --- aws_secret.py | 4 ---- aws_ses_identity.py | 5 ----- aws_ses_identity_policy.py | 5 ----- aws_ses_rule_set.py | 4 ---- aws_sgw_info.py | 3 --- aws_ssm_parameter_store.py | 3 --- aws_step_functions_state_machine.py | 5 ----- aws_step_functions_state_machine_execution.py | 5 ----- aws_waf_condition.py | 3 --- aws_waf_info.py | 3 --- aws_waf_rule.py | 3 --- aws_waf_web_acl.py | 3 --- cloudformation_exports_info.py | 3 --- cloudformation_stack_set.py | 4 ---- cloudfront_distribution.py | 4 ---- cloudfront_info.py | 5 ----- cloudfront_invalidation.py | 3 --- cloudfront_origin_access_identity.py | 3 --- cloudtrail.py | 5 ----- cloudwatchevent_rule.py | 5 ----- cloudwatchlogs_log_group.py | 4 ---- cloudwatchlogs_log_group_info.py | 3 --- cloudwatchlogs_log_group_metric_filter.py | 3 --- data_pipeline.py | 4 ---- dms_endpoint.py | 5 ----- dms_replication_subnet_group.py | 5 ----- dynamodb_table.py | 5 ----- dynamodb_ttl.py | 4 ---- ec2_ami_copy.py | 4 ---- ec2_asg.py | 4 ---- ec2_asg_info.py | 5 ----- ec2_asg_lifecycle_hook.py | 3 --- ec2_customer_gateway.py | 4 ---- ec2_customer_gateway_info.py | 3 --- ec2_eip.py | 5 ----- ec2_eip_info.py | 4 ---- ec2_elb.py | 5 ----- ec2_elb_info.py | 4 ---- ec2_instance.py | 4 ---- ec2_instance_info.py | 4 ---- ec2_launch_template.py | 5 ----- ec2_lc.py | 4 ---- ec2_lc_find.py | 5 ----- ec2_lc_info.py | 5 ----- ec2_metric_alarm.py | 5 ----- ec2_placement_group.py | 4 ---- ec2_placement_group_info.py | 4 ---- ec2_scaling_policy.py | 5 ----- ec2_snapshot_copy.py | 6 ------ ec2_transit_gateway.py | 5 ----- ec2_transit_gateway_info.py | 5 ----- ec2_vpc_egress_igw.py | 4 ---- ec2_vpc_endpoint.py | 5 ----- ec2_vpc_endpoint_info.py | 4 ---- ec2_vpc_igw.py | 5 ----- ec2_vpc_igw_info.py | 5 ----- ec2_vpc_nacl.py | 5 ----- ec2_vpc_nacl_info.py | 5 ----- ec2_vpc_nat_gateway.py | 5 ----- ec2_vpc_nat_gateway_info.py | 5 ----- ec2_vpc_peer.py | 5 ----- ec2_vpc_peering_info.py | 4 ---- ec2_vpc_route_table.py | 4 ---- ec2_vpc_route_table_info.py | 5 ----- ec2_vpc_vgw.py | 5 ----- ec2_vpc_vgw_info.py | 5 ----- ec2_vpc_vpn.py | 4 ---- ec2_vpc_vpn_info.py | 5 ----- ec2_win_password.py | 5 ----- ecs_attribute.py | 4 ---- ecs_cluster.py | 5 ----- ecs_ecr.py | 3 --- ecs_service.py | 5 ----- ecs_service_info.py | 4 ---- ecs_tag.py | 3 --- ecs_task.py | 5 ----- ecs_taskdefinition.py | 5 ----- ecs_taskdefinition_info.py | 4 ---- efs.py | 5 ----- efs_info.py | 5 ----- elasticache.py | 5 ----- elasticache_info.py | 4 ---- elasticache_parameter_group.py | 5 ----- elasticache_snapshot.py | 5 ----- elasticache_subnet_group.py | 5 ----- elb_application_lb.py | 3 --- elb_application_lb_info.py | 4 ---- elb_classic_lb.py | 5 ----- elb_classic_lb_info.py | 4 ---- elb_instance.py | 5 ----- elb_network_lb.py | 3 --- elb_target.py | 3 --- elb_target_group.py | 4 ---- elb_target_group_info.py | 4 ---- elb_target_info.py | 4 ---- execute_lambda.py | 4 ---- iam.py | 5 ----- iam_cert.py | 4 ---- iam_group.py | 3 --- iam_managed_policy.py | 4 ---- iam_mfa_device_info.py | 5 ----- iam_password_policy.py | 5 ----- iam_policy.py | 3 --- iam_policy_info.py | 4 ---- iam_role.py | 4 ---- iam_role_info.py | 5 ----- iam_saml_federation.py | 3 --- iam_server_certificate_info.py | 5 ----- iam_user.py | 3 --- iam_user_info.py | 7 ------- kinesis_stream.py | 5 ----- lambda.py | 5 ----- lambda_alias.py | 5 ----- lambda_event.py | 5 ----- lambda_facts.py | 4 ---- lambda_info.py | 5 ----- lambda_policy.py | 3 --- lightsail.py | 4 ---- rds.py | 5 ----- rds_instance.py | 5 ----- rds_instance_info.py | 4 ---- rds_param_group.py | 5 ----- rds_snapshot.py | 4 ---- rds_snapshot_info.py | 4 ---- rds_subnet_group.py | 5 ----- redshift.py | 4 ---- redshift_cross_region_snapshots.py | 4 ---- redshift_info.py | 4 ---- redshift_subnet_group.py | 5 ----- route53.py | 5 ----- route53_health_check.py | 5 ----- route53_info.py | 5 ----- route53_zone.py | 5 ----- s3_bucket_notification.py | 4 ---- s3_lifecycle.py | 5 ----- s3_logging.py | 5 ----- s3_sync.py | 4 ---- s3_website.py | 5 ----- sns.py | 5 ----- sns_topic.py | 5 ----- sqs_queue.py | 5 ----- sts_assume_role.py | 5 ----- sts_session_token.py | 5 ----- 172 files changed, 740 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index 3e24adfd364..93034e7e71f 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -24,9 +24,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_acm diff --git a/aws_acm_info.py b/aws_acm_info.py index 094d8a2ac6c..4cc072a7aef 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_acm_info diff --git a/aws_api_gateway.py b/aws_api_gateway.py index fe3cd969e6f..1a508299e06 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -8,10 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: aws_api_gateway diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index 6fefaee2c3d..51d98743da3 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 6329bba214c..38a22ca69cd 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 07bcf127295..6f385ef20fc 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index 77726175510..c888b560e0b 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_codebuild.py b/aws_codebuild.py index 219289ee2db..ca79d056bce 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_codecommit.py b/aws_codecommit.py index f8d7df05d2a..dc7bbaee160 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -6,9 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} DOCUMENTATION = ''' --- diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 84afe1a4dc1..1784d7859c4 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index 78126794905..d2057a18591 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: aws_config_aggregation_authorization diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 3f39f57cbc9..7b97fded3c9 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: aws_config_aggregator diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index 0005d68d462..f0fda8e61f4 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: aws_config_delivery_channel diff --git a/aws_config_recorder.py b/aws_config_recorder.py index a614748e739..970e6f8c0bc 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: aws_config_recorder diff --git a/aws_config_rule.py b/aws_config_rule.py index e2a328ce1d3..9ce254def76 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: aws_config_rule diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 601745de4ff..0b2f16ea083 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index d2fc624d747..7fa8ca23740 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: aws_direct_connect_gateway author: Gobin Sougrakpam (@gobins) diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index dd7d57dcded..92e8433f5ed 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 4a5276631ba..3883d12331b 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 1b1693be225..6cb7d4fe0ce 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -5,11 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py index c7f45a34e86..88c6b58d4af 100644 --- a/aws_elasticbeanstalk_app.py +++ b/aws_elasticbeanstalk_app.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' - } DOCUMENTATION = ''' --- diff --git a/aws_glue_connection.py b/aws_glue_connection.py index ef1f753abf9..d2dec7b8db8 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_glue_job.py b/aws_glue_job.py index 6fbe23603d8..7a9d76d0890 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_inspector_target.py b/aws_inspector_target.py index 1d33ec00411..00d5ac35ba1 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_kms.py b/aws_kms.py index 879676286d3..577ec365d5c 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_kms_info.py b/aws_kms_info.py index c615d3a293f..a51b30a85cc 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_region_info.py b/aws_region_info.py index dee88d852ba..b20bf9f84ef 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -5,11 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'supported_by': 'community', - 'status': ['preview'] -} DOCUMENTATION = ''' module: aws_region_info diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index a69ae8a8b99..4646e40e3d5 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: aws_s3_bucket_info diff --git a/aws_s3_cors.py b/aws_s3_cors.py index 272b177e6cf..7a63596e6ab 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -6,9 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_secret.py b/aws_secret.py index afcb2976c3b..0f4a8e78c9f 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -6,10 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = r''' --- diff --git a/aws_ses_identity.py b/aws_ses_identity.py index ccc40344fe4..81a80630f97 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -5,11 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py index 5bbf91343b8..a657a1a4175 100644 --- a/aws_ses_identity_policy.py +++ b/aws_ses_identity_policy.py @@ -5,11 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index b5e78d1b419..d351f2b8d26 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/aws_sgw_info.py b/aws_sgw_info.py index b1ac00ef70c..f18084d06e7 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -7,9 +7,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 9e0683d1c85..7e9cb76e897 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} DOCUMENTATION = ''' --- diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index 9a9bdf365a4..a5261e593da 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py index d4c2b7324d4..b64efacbaf7 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/aws_step_functions_state_machine_execution.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/aws_waf_condition.py b/aws_waf_condition.py index c9184103cb7..bab1f97772e 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -6,9 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_waf_condition diff --git a/aws_waf_info.py b/aws_waf_info.py index 8b3b6e87da7..a3169453f1d 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_waf_info diff --git a/aws_waf_rule.py b/aws_waf_rule.py index 95dc831acbf..5d1c8d3667b 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -6,9 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_waf_rule diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index a942a4cb77a..22da20a7692 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_waf_web_acl diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index e254d1deafa..50f4f847af5 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -5,9 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' module: cloudformation_exports_info diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 6d20469c764..69f53669f51 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 1b7a8f07eab..6597d37bbef 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/cloudfront_info.py b/cloudfront_info.py index 71f584b852a..eb89ff35fad 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: cloudfront_info diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 1cf6f3dfa7e..fe84099931d 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 9e788fdc21f..fd66d587ab2 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/cloudtrail.py b/cloudtrail.py index 5fb0858d9ab..c4a5f2e6e74 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: cloudtrail diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 8167ac6a3de..23f3efa7aec 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = r''' --- module: cloudwatchevent_rule diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index af883b32d15..54687816f03 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index 4bbc25534d2..61b9ad235e2 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -6,9 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index 52f7f3c9306..83c71176600 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -4,9 +4,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/data_pipeline.py b/data_pipeline.py index 5637c590ed4..ac06922a7b3 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -7,10 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: data_pipeline diff --git a/dms_endpoint.py b/dms_endpoint.py index aac3f2aeab4..1fea45a4a04 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: dms_endpoint diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 3dfc121d3af..9cb0caf060f 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: dms_replication_subnet_group diff --git a/dynamodb_table.py b/dynamodb_table.py index 47a65123a84..1edf139dbfa 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: dynamodb_table diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index d4ed856c215..654b311c72a 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: dynamodb_ttl diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 832d3155a91..974993a42ed 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -6,10 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_asg.py b/ec2_asg.py index 3a852e1a0e3..3f43193f102 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 0f30eaa47bc..b2c971ae81b 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_asg_info diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index 25f143f1ce5..50e483d0ed1 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -6,9 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} DOCUMENTATION = ''' --- diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index d14ffcfbce4..675e69a430f 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 65ed984f486..001c88ae8cf 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -5,9 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} DOCUMENTATION = ''' --- diff --git a/ec2_eip.py b/ec2_eip.py index b7441826a9b..a43fe9a0a50 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -8,11 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_eip diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 047041ff755..6001110b71b 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_elb.py b/ec2_elb.py index bf77dbca503..f820453a2d8 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_elb diff --git a/ec2_elb_info.py b/ec2_elb_info.py index d83b4fe119d..0c4c2dc1b76 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -16,10 +16,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_instance.py b/ec2_instance.py index 0b268a6f05a..4238a7c15e7 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: ec2_instance diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 9bb1ff56e7d..79d056d4ea6 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: ec2_instance_info diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 2de514ad247..d80a226f155 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -4,11 +4,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/ec2_lc.py b/ec2_lc.py index 5211391d1cf..b9f4740a73f 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -6,10 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_lc_find.py b/ec2_lc_find.py index 8ff3ddb0c39..043df722367 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -8,11 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_lc_find diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 9a16f2fd0ab..e7cdd9b24f1 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_lc_info diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index 6d87ea9595b..804474294e3 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -18,11 +18,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_metric_alarm short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" diff --git a/ec2_placement_group.py b/ec2_placement_group.py index f4b3a7e2fbf..4298c6522ec 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 47df4dd7ed4..5a4bc09a842 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 01b3210afde..7beb95c0a0c 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_scaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 98ac9c8b4df..38b22315a2f 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -6,12 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - DOCUMENTATION = ''' --- diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 9ab13d83042..4f2f4dbd0c0 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_transit_gateway short_description: Create and delete AWS Transit Gateways diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index e4fbb14821e..d0f1409a2b5 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -6,11 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'supported_by': 'community', - 'status': ['preview'] -} DOCUMENTATION = ''' module: ec2_transit_gateway_info diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index 6f0c1ab36b1..9b4040484c3 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 760af35c62e..1b89387bf36 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_vpc_endpoint short_description: Create and delete AWS VPC Endpoints. diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index a43ef54ac13..fa4f8c59713 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -5,10 +5,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' module: ec2_vpc_endpoint_info short_description: Retrieves AWS VPC endpoints details using AWS methods. diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index 43b74c163d5..0c85169c7d7 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_vpc_igw diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 1c407a36b89..7bdff093461 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_vpc_igw_info diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 213cf167cd4..23130310720 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_vpc_nacl short_description: create and delete Network ACLs. diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index b5a8d3bc251..658816545b4 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -5,11 +5,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_vpc_nacl_info diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 09fc70de335..d8ee5167b67 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_vpc_nat_gateway diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index a4891391854..a4e7ac6db99 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_vpc_nat_gateway_info short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods. diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 28de7788ef0..c029eb5afa0 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_vpc_peer short_description: create, delete, accept, and reject VPC peering connections between two VPCs. diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 2561a209283..008f75fbd10 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' module: ec2_vpc_peering_info short_description: Retrieves AWS VPC Peering details using AWS methods. diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index a90242148b2..442efca01d7 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index a86e9542570..b0b5b189de1 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_vpc_route_table_info diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 8fce681bbe8..004a64c394c 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: ec2_vpc_vgw short_description: Create and delete AWS VPN Virtual Gateways. diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 64f522d80db..177f4a7ea21 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_vpc_vgw_info diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 5f8707bf909..c344cc8e73e 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -5,10 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 991977346d0..3041a747d69 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} - - DOCUMENTATION = ''' --- module: ec2_vpc_vpn_info diff --git a/ec2_win_password.py b/ec2_win_password.py index 7ed14ed7204..782ff16829c 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ec2_win_password diff --git a/ecs_attribute.py b/ecs_attribute.py index 9e812275e9e..67b1664305e 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: ecs_attribute diff --git a/ecs_cluster.py b/ecs_cluster.py index 6e6cc54b255..bf41601b011 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ecs_cluster diff --git a/ecs_ecr.py b/ecs_ecr.py index 4d2bf1bab09..d2eb786370b 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -8,9 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/ecs_service.py b/ecs_service.py index 1446704a598..96d08b02633 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ecs_service diff --git a/ecs_service_info.py b/ecs_service_info.py index f82f5c5e6a8..4e479bbb443 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/ecs_tag.py b/ecs_tag.py index a9def34ac41..fd49461fb07 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -3,9 +3,6 @@ # Copyright: (c) 2019, Michael Pechner # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = r''' diff --git a/ecs_task.py b/ecs_task.py index 41c381c269a..1f831a413ea 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ecs_task diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 895a52d29d1..def891bb527 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: ecs_taskdefinition diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index 2c6fe847cea..3711c47767a 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/efs.py b/efs.py index 5d72daa2c02..5d6ce13e707 100644 --- a/efs.py +++ b/efs.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: efs diff --git a/efs_info.py b/efs_info.py index 69a584215fe..9835be169e4 100644 --- a/efs_info.py +++ b/efs_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: efs_info diff --git a/elasticache.py b/elasticache.py index a59eadc11e9..e1c255ebbde 100644 --- a/elasticache.py +++ b/elasticache.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: elasticache diff --git a/elasticache_info.py b/elasticache_info.py index 9126ee57f4e..86763b39a0d 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' module: elasticache_info short_description: Retrieve information for AWS ElastiCache clusters diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 9e64320def2..c701d6a763d 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: elasticache_parameter_group diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 7bea35dee8c..f932544b505 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: elasticache_snapshot diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index ed56153c4f9..72b41f08ea6 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: elasticache_subnet_group diff --git a/elb_application_lb.py b/elb_application_lb.py index 9c49e648409..28915d551fa 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -17,9 +17,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index b347941ee79..ca5290e3892 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: elb_application_lb_info diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 5959ebaf3f0..bd309465abe 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: elb_classic_lb diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 8708aa95b32..a38aee6b253 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -16,10 +16,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/elb_instance.py b/elb_instance.py index 9bd2560a93a..0c41ef3a6e4 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: elb_instance diff --git a/elb_network_lb.py b/elb_network_lb.py index 8e4e40c60c8..858cfaaffd9 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -6,9 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/elb_target.py b/elb_target.py index 438c51e7392..553e5f7f860 100644 --- a/elb_target.py +++ b/elb_target.py @@ -5,9 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} DOCUMENTATION = ''' --- diff --git a/elb_target_group.py b/elb_target_group.py index 82240052be2..3818d5da7df 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: elb_target_group diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 6c3e335d485..f285221eb33 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: elb_target_group_info diff --git a/elb_target_info.py b/elb_target_info.py index 15ca68338ff..1101965a318 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -3,10 +3,6 @@ # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -ANSIBLE_METADATA = {"metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community"} - DOCUMENTATION = ''' --- diff --git a/execute_lambda.py b/execute_lambda.py index 901b83bafc5..45d0eef8e2b 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/iam.py b/iam.py index e195a4a9adb..9b5bae141c2 100644 --- a/iam.py +++ b/iam.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: iam diff --git a/iam_cert.py b/iam_cert.py index c48b122dbd9..24e317b71c6 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -17,10 +17,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/iam_group.py b/iam_group.py index cfac6062c46..672de888fba 100644 --- a/iam_group.py +++ b/iam_group.py @@ -17,9 +17,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 435d2dcf800..0631a243dd0 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: iam_managed_policy diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 12dae087a70..2431a57c063 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: iam_mfa_device_info diff --git a/iam_password_policy.py b/iam_password_policy.py index ead34bf8868..53c1d5bc479 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: iam_password_policy diff --git a/iam_policy.py b/iam_policy.py index 87c7895bfb8..105d985df5b 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -5,9 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/iam_policy_info.py b/iam_policy_info.py index b80428938f1..73eded00824 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: iam_policy_info diff --git a/iam_role.py b/iam_role.py index fafa00541dc..432fcab64ad 100644 --- a/iam_role.py +++ b/iam_role.py @@ -4,10 +4,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/iam_role_info.py b/iam_role_info.py index 9912cfdaa20..bf32d32adbf 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: iam_role_info diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 0d6f3e3f474..7f8077bbb8a 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -18,9 +18,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 771d374a9fe..49daffe72c8 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: iam_server_certificate_info diff --git a/iam_user.py b/iam_user.py index 8bf60f97ca9..5b2ee4503b1 100644 --- a/iam_user.py +++ b/iam_user.py @@ -5,9 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- diff --git a/iam_user_info.py b/iam_user_info.py index b87a0763823..7a8b07eb602 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -8,13 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - - DOCUMENTATION = ''' --- module: iam_user_info diff --git a/kinesis_stream.py b/kinesis_stream.py index 2ed339c5bd0..50c7e12adaa 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: kinesis_stream diff --git a/lambda.py b/lambda.py index cad268c72ae..31ab9319cfc 100644 --- a/lambda.py +++ b/lambda.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: lambda diff --git a/lambda_alias.py b/lambda_alias.py index 649fe95bf10..2be5e56eae2 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: lambda_alias diff --git a/lambda_event.py b/lambda_event.py index ca81d13ae95..2122bcaa21f 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: lambda_event diff --git a/lambda_facts.py b/lambda_facts.py index 3a95ee43b2c..f9399769954 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -5,10 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/lambda_info.py b/lambda_info.py index 11e5e97186e..06a356204f7 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: lambda_info diff --git a/lambda_policy.py b/lambda_policy.py index 5cf1ff22760..014dc3b27f6 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -6,9 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} DOCUMENTATION = ''' --- diff --git a/lightsail.py b/lightsail.py index 09cd7ac8458..c110b5e2aa7 100644 --- a/lightsail.py +++ b/lightsail.py @@ -7,10 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: lightsail diff --git a/rds.py b/rds.py index 1ac4a1b15ad..cc123f9c22f 100644 --- a/rds.py +++ b/rds.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: rds diff --git a/rds_instance.py b/rds_instance.py index c0deddd89e3..efbffd8aa8b 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -5,11 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} DOCUMENTATION = ''' --- diff --git a/rds_instance_info.py b/rds_instance_info.py index cc8535d5628..ab6bcbcc0b4 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -8,10 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} - DOCUMENTATION = ''' --- module: rds_instance_info diff --git a/rds_param_group.py b/rds_param_group.py index bdaf7fe4e7d..a30df260a25 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: rds_param_group diff --git a/rds_snapshot.py b/rds_snapshot.py index c00d21064f8..872af39c8e6 100644 --- a/rds_snapshot.py +++ b/rds_snapshot.py @@ -8,10 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} - DOCUMENTATION = ''' --- module: rds_snapshot diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index e887c3dd933..5d330b47e66 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -8,10 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} - DOCUMENTATION = ''' --- module: rds_snapshot_info diff --git a/rds_subnet_group.py b/rds_subnet_group.py index b0fb417a28f..99bfb002752 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: rds_subnet_group diff --git a/redshift.py b/redshift.py index 997240aaf68..86343d4ef79 100644 --- a/redshift.py +++ b/redshift.py @@ -7,10 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index cd7a7c0d744..d7931a812b6 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -7,10 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'metadata_version': '1.1'} - DOCUMENTATION = ''' --- module: redshift_cross_region_snapshots diff --git a/redshift_info.py b/redshift_info.py index 98dd0090c94..6763aee9428 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -7,10 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: redshift_info diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index f09c028988f..cb9d085e8c8 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- author: diff --git a/route53.py b/route53.py index 3347f51aaca..c93d941f39f 100644 --- a/route53.py +++ b/route53.py @@ -8,11 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: route53 diff --git a/route53_health_check.py b/route53_health_check.py index c974c72a5f1..414f27a3eee 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: route53_health_check diff --git a/route53_info.py b/route53_info.py index d3d2f3abae9..454875bb47f 100644 --- a/route53_info.py +++ b/route53_info.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: route53_info short_description: Retrieves route53 details using AWS methods diff --git a/route53_zone.py b/route53_zone.py index 698ef524996..3eee17506f4 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: route53_zone short_description: add or delete Route53 zones diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index d61ca14e143..41eaf3cfe72 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -6,10 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/s3_lifecycle.py b/s3_lifecycle.py index e8fb62c0814..32ac0dfd246 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: s3_lifecycle diff --git a/s3_logging.py b/s3_logging.py index b13e5eea086..9d074f4876a 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: s3_logging diff --git a/s3_sync.py b/s3_sync.py index 1123aea9a06..05f1ffa92df 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -17,10 +17,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- diff --git a/s3_website.py b/s3_website.py index 1c87ed73ba8..5f0822af6fe 100644 --- a/s3_website.py +++ b/s3_website.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: s3_website diff --git a/sns.py b/sns.py index 97f202fdc19..41c346ac317 100644 --- a/sns.py +++ b/sns.py @@ -8,11 +8,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: sns short_description: Send Amazon Simple Notification Service messages diff --git a/sns_topic.py b/sns_topic.py index 141906edb18..52c21a41f0f 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -7,11 +7,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' module: sns_topic short_description: Manages AWS SNS topics and subscriptions diff --git a/sqs_queue.py b/sqs_queue.py index ca8041f11f8..763db04c774 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: sqs_queue diff --git a/sts_assume_role.py b/sts_assume_role.py index 7f86c34a475..f836e478e23 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: sts_assume_role diff --git a/sts_session_token.py b/sts_session_token.py index e83d71ffd69..d39519e8e0f 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -6,11 +6,6 @@ __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['stableinterface'], - 'supported_by': 'community'} - - DOCUMENTATION = ''' --- module: sts_session_token From 5529ea075ece2204e8066029d2f3ab57f7332218 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Thu, 21 May 2020 12:39:57 -0700 Subject: [PATCH 009/683] Fix module documentation schema errors (#80) * Fix module documentation schema errors ecs_service is missing suboptions data for placement_constraints Return docs for cloudwatchlogs_log_group_metric_filter are missing suboption descriptions and are just plain wrong. Correct type and values. Fixes #79 * Also update arg_spec for ecs_service --- cloudwatchlogs_log_group_metric_filter.py | 15 +++++++++------ ecs_service.py | 17 +++++++++++++++-- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index 83c71176600..fd2c9221eff 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -89,12 +89,15 @@ description: Return the origin response value returned: success type: list - contains: - creation_time: - filter_name: - filter_pattern: - log_group_name: - metric_filter_count: + sample: [ + { + "default_value": 3.1415, + "metric_name": "box_free_space", + "metric_namespace": "made_with_ansible", + "metric_value": "$.value" + } + ] + """ from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/ecs_service.py b/ecs_service.py index 96d08b02633..7761d3d3a26 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -103,10 +103,17 @@ placement_constraints: description: - The placement constraints for the tasks in the service. + - See U(https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementConstraint.html) for more details. required: false type: list elements: dict suboptions: + type: + description: The type of constraint. + type: str + expression: + description: A cluster query language expression to apply to the constraint. + type: str placement_strategy: description: - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service. @@ -648,8 +655,14 @@ def main(): repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), deployment_configuration=dict(required=False, default={}, type='dict'), - placement_constraints=dict(required=False, default=[], type='list'), - placement_strategy=dict(required=False, default=[], type='list'), + placement_constraints=dict(required=False, default=[], type='list', options=dict( + type=dict(type='str'), + expression=dict(type='str') + )), + placement_strategy=dict(required=False, default=[], type='list', options=dict( + type=dict(type='str'), + field=dict(type='str'), + )), health_check_grace_period_seconds=dict(required=False, type='int'), network_configuration=dict(required=False, type='dict', options=dict( subnets=dict(type='list'), From 67cf0da86a84aa8a9ea5789cfce78d24f6f6d719 Mon Sep 17 00:00:00 2001 From: Tyler Schwend Date: Tue, 2 Jun 2020 14:59:29 -0400 Subject: [PATCH 010/683] fix: don't create aws_kms keys when in check mode (#30) * fix: don't create aws_kms keys when in check mode https://github.com/ansible/ansible/issues/68019 * fix: ftests for kms check mode * style: avoid the big block * lint: bad space * fix: be sure to pass changed in result * style: replace newlines * fix: ftest ensure that check mode returned `changed` * fix: bomb out early Co-authored-by: Tyler Schwend --- aws_kms.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aws_kms.py b/aws_kms.py index 577ec365d5c..6da965d4b99 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -824,6 +824,10 @@ def create_key(connection, module): Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'), KeyUsage='ENCRYPT_DECRYPT', Origin='AWS_KMS') + + if module.check_mode: + return {'changed': True} + if module.params.get('description'): params['Description'] = module.params['description'] if module.params.get('policy'): @@ -833,8 +837,8 @@ def create_key(connection, module): result = connection.create_key(**params)['KeyMetadata'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create initial key") - key = get_key_details(connection, module, result['KeyId']) + key = get_key_details(connection, module, result['KeyId']) update_alias(connection, module, key, module.params['alias']) update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) From 2a9585fd4a3f55af0febb5665ebf5b8e12877fd4 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Tue, 16 Jun 2020 11:23:52 -0700 Subject: [PATCH 011/683] Collections related fixes for CI (#96) * Update module deprecations Switch version to `removed_at_date` * Don't install amazon.aws from galaxy We've been using galaxy to install amazon.aws in shippable, but that doesn't really work if we aren't publising faster. Get that collection from git so it is most up to date. * We need to declare python test deps now * missed a python dep --- aws_acm_info.py | 2 +- aws_kms.py | 2 +- aws_kms_info.py | 2 +- aws_region_info.py | 2 +- aws_s3_bucket_info.py | 2 +- aws_sgw_info.py | 2 +- aws_waf_info.py | 2 +- cloudfront_info.py | 2 +- cloudwatchlogs_log_group_info.py | 3 ++- data_pipeline.py | 5 ++--- ec2_asg_info.py | 2 +- ec2_customer_gateway_info.py | 3 ++- ec2_eip.py | 4 ++-- ec2_eip_info.py | 2 +- ec2_elb_info.py | 2 +- ec2_instance_info.py | 2 +- ec2_lc.py | 4 ++-- ec2_lc_info.py | 2 +- ec2_metric_alarm.py | 5 +++-- ec2_placement_group_info.py | 3 ++- ec2_vpc_endpoint_info.py | 2 +- ec2_vpc_igw_info.py | 2 +- ec2_vpc_nacl_info.py | 2 +- ec2_vpc_nat_gateway_info.py | 3 ++- ec2_vpc_peering_info.py | 2 +- ec2_vpc_route_table_info.py | 3 ++- ec2_vpc_vgw_info.py | 2 +- ec2_vpc_vpn_info.py | 2 +- ecs_ecr.py | 4 ++-- ecs_service_info.py | 2 +- ecs_taskdefinition_info.py | 3 ++- efs_info.py | 2 +- elasticache_info.py | 2 +- elb_application_lb_info.py | 3 ++- elb_classic_lb_info.py | 2 +- elb_network_lb.py | 2 +- elb_target_group_info.py | 2 +- elb_target_info.py | 2 +- iam_managed_policy.py | 4 ++-- iam_mfa_device_info.py | 2 +- iam_policy.py | 4 ++-- iam_role.py | 2 +- iam_role_info.py | 2 +- iam_server_certificate_info.py | 3 ++- lambda_facts.py | 3 ++- rds_instance_info.py | 2 +- rds_snapshot_info.py | 2 +- redshift_info.py | 2 +- route53_info.py | 2 +- s3_lifecycle.py | 4 ++-- s3_sync.py | 4 ++-- 51 files changed, 70 insertions(+), 61 deletions(-) diff --git a/aws_acm_info.py b/aws_acm_info.py index 4cc072a7aef..ac11eb696a8 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -274,7 +274,7 @@ def main(): acm_info = ACMServiceManager(module) if module._name == 'aws_acm_facts': - module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", version='2.13') + module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", date='2021-12-01', collection_name='community.aws') client = module.client('acm') diff --git a/aws_kms.py b/aws_kms.py index 6da965d4b99..7722a4803f8 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -1052,7 +1052,7 @@ def main(): if module.params.get('policy_grant_types') or mode == 'deny': module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile' - ' and has been deprecated in favour of the policy option.', version='2.13') + ' and has been deprecated in favour of the policy option.', date='2021-12-01', collection_name='community.aws') result = update_policy_grants(kms, module, key_metadata, mode) module.exit_json(**result) diff --git a/aws_kms_info.py b/aws_kms_info.py index a51b30a85cc..4424e8fe2ce 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -409,7 +409,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'aws_kms_facts': - module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", version='2.13') + module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required for this module') diff --git a/aws_region_info.py b/aws_region_info.py index b20bf9f84ef..cafb743ece1 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -70,7 +70,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) if module._name == 'aws_region_facts': - module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", version='2.13') + module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 4646e40e3d5..30964ab1c5a 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -94,7 +94,7 @@ def main(): is_old_facts = module._name == 'aws_s3_bucket_facts' if is_old_facts: module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " - "and the renamed one no longer returns ansible_facts", version='2.13') + "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') # Verify Boto3 is used if not HAS_BOTO3: diff --git a/aws_sgw_info.py b/aws_sgw_info.py index f18084d06e7..5cfc7ab8cc0 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -345,7 +345,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) if module._name == 'aws_sgw_facts': - module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", version='2.13') + module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", date='2021-12-01', collection_name='community.aws') client = module.client('storagegateway') if client is None: # this should never happen diff --git a/aws_waf_info.py b/aws_waf_info.py index a3169453f1d..76fe5d084a7 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -126,7 +126,7 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'aws_waf_facts': - module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", version='2.13') + module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", date='2021-12-01', collection_name='community.aws') resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' client = module.client(resource) diff --git a/cloudfront_info.py b/cloudfront_info.py index eb89ff35fad..46aa714dbf2 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -601,7 +601,7 @@ def main(): is_old_facts = module._name == 'cloudfront_facts' if is_old_facts: module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', " - "and the renamed one no longer returns ansible_facts", version='2.13') + "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index 61b9ad235e2..448bb954d40 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -110,7 +110,8 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'cloudwatchlogs_log_group_facts': - module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", version='2.13') + module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", + date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') diff --git a/data_pipeline.py b/data_pipeline.py index ac06922a7b3..9f9ef5d818b 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -123,8 +123,7 @@ type: dict version: description: - - The version option has never had any effect and will be removed in - Ansible 2.14 + - The version option has never had any effect and will be removed after 2022-06-01. type: str ''' @@ -605,7 +604,7 @@ def main(): argument_spec.update( dict( name=dict(required=True), - version=dict(removed_in_version='2.14'), + version=dict(removed_at_date='2022-06-01', removed_from_collection='community.aws'), description=dict(required=False, default=''), objects=dict(required=False, type='list', default=[]), parameters=dict(required=False, type='list', default=[]), diff --git a/ec2_asg_info.py b/ec2_asg_info.py index b2c971ae81b..4c523db50de 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -394,7 +394,7 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec) if module._name == 'ec2_asg_facts': - module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", version='2.13') + module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", date='2021-12-01', collection_name='community.aws') asg_name = module.params.get('name') asg_tags = module.params.get('tags') diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 001c88ae8cf..f37a0f35567 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -126,7 +126,8 @@ def main(): mutually_exclusive=[['customer_gateway_ids', 'filters']], supports_check_mode=True) if module._module._name == 'ec2_customer_gateway_facts': - module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'", version='2.13') + module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'", + date='2021-12-01', collection_name='community.aws') connection = module.client('ec2') diff --git a/ec2_eip.py b/ec2_eip.py index a43fe9a0a50..f4ba39f75a4 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -78,7 +78,7 @@ type: str wait_timeout: description: - - The I(wait_timeout) option does nothing and will be removed in Ansible 2.14. + - The I(wait_timeout) option does nothing and will be removed after 2022-06-01 type: int extends_documentation_fragment: - amazon.aws.aws @@ -525,7 +525,7 @@ def main(): default=False), release_on_disassociation=dict(required=False, type='bool', default=False), allow_reassociation=dict(type='bool', default=False), - wait_timeout=dict(type='int', removed_in_version='2.14'), + wait_timeout=dict(type='int', removed_at_date='2022-06-01', removed_from_collection='community.aws'), private_ip_address=dict(), tag_name=dict(), tag_value=dict(), diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 6001110b71b..61c3a49ad8a 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -133,7 +133,7 @@ def main(): supports_check_mode=True ) if module._module._name == 'ec2_eip_facts': - module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", version='2.13') + module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", date='2021-12-01', collection_name='community.aws') module.exit_json(changed=False, addresses=get_eips_details(module)) diff --git a/ec2_elb_info.py b/ec2_elb_info.py index 0c4c2dc1b76..215483a093f 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -239,7 +239,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_elb_facts': - module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", version='2.13') + module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 79d056d4ea6..d2da8b96b6f 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -551,7 +551,7 @@ def main(): supports_check_mode=True ) if module._name == 'ec2_instance_facts': - module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", version='2.13') + module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') diff --git a/ec2_lc.py b/ec2_lc.py index b9f4740a73f..8e13c7ab13e 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -176,7 +176,7 @@ choices: ['default', 'dedicated'] associate_public_ip_address: description: - - The I(associate_public_ip_address) option does nothing and will be removed in Ansible 2.14. + - The I(associate_public_ip_address) option does nothing and will be removed after 2022-06-01 type: bool extends_documentation_fragment: @@ -669,7 +669,7 @@ def main(): ramdisk_id=dict(), instance_profile_name=dict(), ebs_optimized=dict(default=False, type='bool'), - associate_public_ip_address=dict(type='bool', removed_in_version='2.14'), + associate_public_ip_address=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), instance_monitoring=dict(default=False, type='bool'), assign_public_ip=dict(type='bool'), classic_link_vpc_security_groups=dict(type='list'), diff --git a/ec2_lc_info.py b/ec2_lc_info.py index e7cdd9b24f1..5e032332af3 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -217,7 +217,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) if module._name == 'ec2_lc_facts': - module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", version='2.13') + module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index 804474294e3..debc170c123 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -58,7 +58,7 @@ comparison: description: - Determines how the threshold value is compared - - Symbolic comparison operators have been deprecated, and will be removed in 2.14 + - Symbolic comparison operators have been deprecated, and will be removed after 2022-06-22. required: false type: str choices: @@ -238,7 +238,8 @@ def create_metric_alarm(connection, module): '>': 'GreaterThanThreshold'} if comparison in ('<=', '<', '>', '>='): module.deprecate('Using the <=, <, > and >= operators for comparison has been deprecated. Please use LessThanOrEqualToThreshold, ' - 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.', version="2.14") + 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.', + date='2022-06-01', collection_name='community.aws') comparison = comparisons[comparison] if not isinstance(dimensions, list): diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 5a4bc09a842..33be33516d9 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -113,7 +113,8 @@ def main(): supports_check_mode=True ) if module._module._name == 'ec2_placement_group_facts': - module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'", version='2.13') + module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'", + date='2021-12-01', collection_name='community.aws') connection = module.client('ec2') diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index fa4f8c59713..75ceb6b9bc7 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -174,7 +174,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_endpoint_facts': - module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", version='2.13') + module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements if not HAS_BOTO3: diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 7bdff093461..29845d2ccf5 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -137,7 +137,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_igw_facts': - module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", version='2.13') + module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements if not HAS_BOTO3: diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 658816545b4..d4c0c431465 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -209,7 +209,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_nacl_facts': - module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", version='2.13') + module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index a4e7ac6db99..85f96cc7340 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -133,7 +133,8 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_nat_gateway_facts': - module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'", version='2.13') + module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'", + date='2021-12-01', collection_name='community.aws') # Validate Requirements if not HAS_BOTO3: diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 008f75fbd10..75af1b65613 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -113,7 +113,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_peering_facts': - module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", version='2.13') + module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements if not HAS_BOTO3: diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index b0b5b189de1..acb203f1eab 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -107,7 +107,8 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_route_table_facts': - module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", version='2.13') + module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", + date='2021-12-01', collection_name='community.aws') if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 177f4a7ea21..a8c8da426c9 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -143,7 +143,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_vgw_facts': - module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", version='2.13') + module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements if not HAS_BOTO3: diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 3041a747d69..427b4b8cec8 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -205,7 +205,7 @@ def main(): mutually_exclusive=[['vpn_connection_ids', 'filters']], supports_check_mode=True) if module._module._name == 'ec2_vpc_vpn_facts': - module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", version='2.13') + module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2') diff --git a/ecs_ecr.py b/ecs_ecr.py index d2eb786370b..de9ab574fdb 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -43,7 +43,7 @@ purge_policy: description: - If yes, remove the policy from the repository. - - Alias C(delete_policy) has been deprecated and will be removed in Ansible 2.14 + - Alias C(delete_policy) has been deprecated and will be removed after 2022-06-01. required: false default: false type: bool @@ -502,7 +502,7 @@ def main(): image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], default='mutable'), purge_policy=dict(required=False, type='bool', aliases=['delete_policy'], - deprecated_aliases=[dict(name='delete_policy', version='2.14')]), + deprecated_aliases=[dict(name='delete_policy', date='2022-06-01', collection_name='community.aws')]), lifecycle_policy=dict(required=False, type='json'), purge_lifecycle_policy=dict(required=False, type='bool') ) diff --git a/ecs_service_info.py b/ecs_service_info.py index 4e479bbb443..e7628d36881 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -225,7 +225,7 @@ def main(): is_old_facts = module._name == 'ecs_service_facts' if is_old_facts: module.deprecate("The 'ecs_service_facts' module has been renamed to 'ecs_service_info', " - "and the renamed one no longer returns ansible_facts", version='2.13') + "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') show_details = module.params.get('details') diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index 3711c47767a..a6b1c627f4c 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -314,7 +314,8 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ecs_taskdefinition_facts': - module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", version='2.13') + module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", + date='2021-12-01', collection_name='community.aws') ecs = module.client('ecs') diff --git a/efs_info.py b/efs_info.py index 9835be169e4..992b650627e 100644 --- a/efs_info.py +++ b/efs_info.py @@ -365,7 +365,7 @@ def main(): is_old_facts = module._name == 'efs_facts' if is_old_facts: module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', " - "and the renamed one no longer returns ansible_facts", version='2.13') + "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') connection = EFSConnection(module) diff --git a/elasticache_info.py b/elasticache_info.py index 86763b39a0d..f7000116ca3 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -299,7 +299,7 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'elasticache_facts': - module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", version='2.13') + module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", date='2021-12-01', collection_name='community.aws') client = module.client('elasticache') diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index ca5290e3892..2f9c1c39cc9 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -273,7 +273,8 @@ def main(): supports_check_mode=True ) if module._name == 'elb_application_lb_facts': - module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", version='2.13') + module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", + date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index a38aee6b253..915bf19aece 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -197,7 +197,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'elb_classic_lb_facts': - module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", version='2.13') + module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('elb') diff --git a/elb_network_lb.py b/elb_network_lb.py index 858cfaaffd9..616c8e061be 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -440,7 +440,7 @@ def main(): # See below, unless state==present we delete. Ouch. module.deprecate('State currently defaults to absent. This is inconsistent with other modules' ' and the default will be changed to `present` in Ansible 2.14', - version='2.14') + date='2022-06-01', collection_name='community.aws') # Quick check of listeners parameters listeners = module.params.get("listeners") diff --git a/elb_target_group_info.py b/elb_target_group_info.py index f285221eb33..2505026b0d7 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -309,7 +309,7 @@ def main(): supports_check_mode=True ) if module._name == 'elb_target_group_facts': - module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", version='2.13') + module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') diff --git a/elb_target_info.py b/elb_target_info.py index 1101965a318..40a9ac26420 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -416,7 +416,7 @@ def main(): supports_check_mode=True, ) if module._name == 'elb_target_facts': - module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", version='2.13') + module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", date='2021-12-01', collection_name='community.aws') instance_id = module.params["instance_id"] get_unused_target_groups = module.params["get_unused_target_groups"] diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 0631a243dd0..06e31a906d5 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -45,7 +45,7 @@ type: str fail_on_delete: description: - - The I(fail_on_delete) option does nothing and will be removed in Ansible 2.14. + - The I(fail_on_delete) option does nothing and will be removed after 2022-06-01 type: bool author: "Dan Kozlowski (@dkhenry)" @@ -289,7 +289,7 @@ def main(): policy=dict(type='json'), make_default=dict(type='bool', default=True), only_version=dict(type='bool', default=False), - fail_on_delete=dict(type='bool', removed_in_version='2.14'), + fail_on_delete=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), state=dict(default='present', choices=['present', 'absent']), )) diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 2431a57c063..bb59d8d8177 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -98,7 +98,7 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) if module._name == 'iam_mfa_device_facts': - module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", version='2.13') + module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') diff --git a/iam_policy.py b/iam_policy.py index 105d985df5b..5eb4694c3c7 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -304,13 +304,13 @@ def main(): if (skip_duplicates is None): module.deprecate('The skip_duplicates behaviour has caused confusion and' ' will be disabled by default in Ansible 2.14', - version='2.14') + date='2022-06-01', collection_name='community.aws') skip_duplicates = True if module.params.get('policy_document'): module.deprecate('The policy_document option has been deprecated and' ' will be removed in Ansible 2.14', - version='2.14') + date='2022-06-01', collection_name='community.aws') args = dict( client=module.client('iam'), diff --git a/iam_role.py b/iam_role.py index 432fcab64ad..dc96bc93f3f 100644 --- a/iam_role.py +++ b/iam_role.py @@ -629,7 +629,7 @@ def main(): if module.params.get('purge_policies') is None: module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.' - ' To maintain the existing behaviour explicity set purge_policies=true', version='2.14') + ' To maintain the existing behaviour explicity set purge_policies=true', date='2022-06-01', collection_name='community.aws') if module.params.get('boundary'): if module.params.get('create_instance_profile'): diff --git a/iam_role_info.py b/iam_role_info.py index bf32d32adbf..7fdb4ac58fa 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -242,7 +242,7 @@ def main(): supports_check_mode=True, mutually_exclusive=[['name', 'path_prefix']]) if module._name == 'iam_role_facts': - module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", version='2.13') + module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", date='2021-12-01', collection_name='community.aws') client = module.client('iam') diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 49daffe72c8..8bd9b0f1969 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -147,7 +147,8 @@ def main(): module = AnsibleModule(argument_spec=argument_spec,) if module._name == 'iam_server_certificate_facts': - module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", version='2.13') + module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", + date='2021-12-01', collection_name='community.aws') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') diff --git a/lambda_facts.py b/lambda_facts.py index f9399769954..aac0bd1e92a 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -10,7 +10,8 @@ --- module: lambda_facts deprecated: - removed_in: '2.13' + removed_at_date: '2021-12-01' + removed_from_collection: 'community.aws' why: Deprecated in favour of C(_info) module. alternative: Use M(lambda_info) instead. short_description: Gathers AWS Lambda function details as Ansible facts diff --git a/rds_instance_info.py b/rds_instance_info.py index ab6bcbcc0b4..8a23c392ddd 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -396,7 +396,7 @@ def main(): supports_check_mode=True, ) if module._name == 'rds_instance_facts': - module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", version='2.13') + module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", date='2021-12-01', collection_name='community.aws') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index 5d330b47e66..30f30a815c1 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -376,7 +376,7 @@ def main(): mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] ) if module._name == 'rds_snapshot_facts': - module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13') + module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", date='2021-12-01', collection_name='community.aws') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() diff --git a/redshift_info.py b/redshift_info.py index 6763aee9428..77aa5e1fb36 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -335,7 +335,7 @@ def main(): supports_check_mode=True ) if module._name == 'redshift_facts': - module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", version='2.13') + module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", date='2021-12-01', collection_name='community.aws') cluster_identifier = module.params.get('cluster_identifier') cluster_tags = module.params.get('tags') diff --git a/route53_info.py b/route53_info.py index 454875bb47f..cb6f74b369a 100644 --- a/route53_info.py +++ b/route53_info.py @@ -463,7 +463,7 @@ def main(): ], ) if module._name == 'route53_facts': - module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", version='2.13') + module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements if not (HAS_BOTO or HAS_BOTO3): diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 32ac0dfd246..9ab279d2b50 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -115,7 +115,7 @@ type: list requester_pays: description: - - The I(requester_pays) option does nothing and will be removed in Ansible 2.14. + - The I(requester_pays) option does nothing and will be removed after 2022-06-01 type: bool extends_documentation_fragment: - amazon.aws.aws @@ -443,7 +443,7 @@ def main(): noncurrent_version_transition_days=dict(type='int'), noncurrent_version_transitions=dict(type='list'), prefix=dict(), - requester_pays=dict(type='bool', removed_in_version='2.14'), + requester_pays=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), rule_id=dict(), state=dict(default='present', choices=['present', 'absent']), status=dict(default='enabled', choices=['enabled', 'disabled']), diff --git a/s3_sync.py b/s3_sync.py index 05f1ffa92df..aa527092d3c 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -109,7 +109,7 @@ type: bool retries: description: - - The I(retries) option does nothing and will be removed in Ansible 2.14. + - The I(retries) option does nothing and will be removed after 2022-06-01 type: str requirements: @@ -512,7 +512,7 @@ def main(): file_root=dict(required=True, type='path'), permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), - retries=dict(required=False, removed_in_version='2.14'), + retries=dict(required=False, removed_at_date='2022-06-01', removed_from_collection='community.aws'), mime_map=dict(required=False, type='dict'), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), From 1716c47f20996f47d5f417db54ad7cd9f2391791 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Wed, 17 Jun 2020 01:24:54 +0530 Subject: [PATCH 012/683] Update Examples with FQCN (#67) Updated module examples with FQCN Signed-off-by: Abhijeet Kasurde --- aws_acm.py | 10 +- aws_acm_info.py | 10 +- aws_api_gateway.py | 6 +- aws_application_scaling_policy.py | 6 +- aws_batch_compute_environment.py | 6 +- aws_batch_job_definition.py | 6 +- aws_batch_job_queue.py | 6 +- aws_codebuild.py | 2 +- aws_codecommit.py | 4 +- aws_codepipeline.py | 2 +- aws_config_aggregation_authorization.py | 6 +- aws_config_aggregator.py | 2 +- aws_config_delivery_channel.py | 2 +- aws_config_recorder.py | 2 +- aws_config_rule.py | 2 +- aws_direct_connect_connection.py | 8 +- aws_direct_connect_gateway.py | 4 +- aws_direct_connect_link_aggregation_group.py | 2 +- aws_direct_connect_virtual_interface.py | 4 +- aws_eks_cluster.py | 4 +- aws_elasticbeanstalk_app.py | 4 +- aws_glue_connection.py | 4 +- aws_glue_job.py | 4 +- aws_inspector_target.py | 6 +- aws_kms.py | 16 +- aws_kms_info.py | 6 +- aws_region_info.py | 4 +- aws_s3_bucket_info.py | 4 +- aws_s3_cors.py | 4 +- aws_secret.py | 4 +- aws_ses_identity.py | 14 +- aws_ses_identity_policy.py | 8 +- aws_ses_rule_set.py | 12 +- aws_sgw_info.py | 4 +- aws_ssm_parameter_store.py | 13 +- aws_step_functions_state_machine.py | 6 +- aws_step_functions_state_machine_execution.py | 4 +- aws_waf_condition.py | 14 +- aws_waf_info.py | 6 +- aws_waf_rule.py | 6 +- aws_waf_web_acl.py | 4 +- cloudformation_exports_info.py | 2 +- cloudformation_stack_set.py | 8 +- cloudfront_distribution.py | 43 ++- cloudfront_info.py | 35 +-- cloudfront_invalidation.py | 4 +- cloudfront_origin_access_identity.py | 6 +- cloudtrail.py | 10 +- cloudwatchevent_rule.py | 6 +- cloudwatchlogs_log_group.py | 8 +- cloudwatchlogs_log_group_info.py | 2 +- cloudwatchlogs_log_group_metric_filter.py | 6 +- data_pipeline.py | 8 +- dms_endpoint.py | 6 +- dms_replication_subnet_group.py | 2 +- dynamodb_table.py | 16 +- dynamodb_ttl.py | 4 +- ec2_ami_copy.py | 24 +- ec2_asg.py | 16 +- ec2_asg_info.py | 24 +- ec2_asg_lifecycle_hook.py | 8 +- ec2_customer_gateway.py | 9 +- ec2_customer_gateway_info.py | 6 +- ec2_eip.py | 32 +-- ec2_eip_info.py | 25 +- ec2_elb.py | 8 +- ec2_elb_info.py | 27 +- ec2_instance.py | 40 +-- ec2_instance_info.py | 20 +- ec2_launch_template.py | 10 +- ec2_lc.py | 14 +- ec2_lc_find.py | 4 +- ec2_lc_info.py | 12 +- ec2_metric_alarm.py | 4 +- ec2_placement_group.py | 12 +- ec2_placement_group_info.py | 13 +- ec2_scaling_policy.py | 2 +- ec2_snapshot_copy.py | 20 +- ec2_transit_gateway.py | 8 +- ec2_transit_gateway_info.py | 16 +- ec2_vpc_egress_igw.py | 8 +- ec2_vpc_endpoint.py | 10 +- ec2_vpc_endpoint_info.py | 8 +- ec2_vpc_igw.py | 8 +- ec2_vpc_igw_info.py | 6 +- ec2_vpc_nacl.py | 10 +- ec2_vpc_nacl_info.py | 8 +- ec2_vpc_nat_gateway.py | 16 +- ec2_vpc_nat_gateway_info.py | 8 +- ec2_vpc_peer.py | 28 +- ec2_vpc_peering_info.py | 6 +- ec2_vpc_route_table.py | 6 +- ec2_vpc_route_table_info.py | 16 +- ec2_vpc_vgw.py | 8 +- ec2_vpc_vgw_info.py | 6 +- ec2_vpc_vpn.py | 16 +- ec2_vpc_vpn_info.py | 6 +- ec2_win_password.py | 8 +- ecs_attribute.py | 8 +- ecs_cluster.py | 10 +- ecs_ecr.py | 23 +- ecs_service.py | 8 +- ecs_service_info.py | 6 +- ecs_tag.py | 8 +- ecs_task.py | 8 +- ecs_taskdefinition.py | 8 +- ecs_taskdefinition_info.py | 2 +- efs.py | 12 +- efs_info.py | 8 +- elasticache.py | 12 +- elasticache_info.py | 4 +- elasticache_parameter_group.py | 8 +- elasticache_snapshot.py | 17 +- elasticache_subnet_group.py | 8 +- elb_application_lb.py | 8 +- elb_application_lb_info.py | 16 +- elb_classic_lb.py | 28 +- elb_classic_lb_info.py | 8 +- elb_instance.py | 6 +- elb_network_lb.py | 12 +- elb_target.py | 16 +- elb_target_group.py | 28 +- elb_target_group_info.py | 12 +- elb_target_info.py | 14 +- execute_lambda.py | 10 +- iam.py | 12 +- iam_cert.py | 16 +- iam_group.py | 27 +- iam_managed_policy.py | 10 +- iam_mfa_device_info.py | 12 +- iam_password_policy.py | 2 +- iam_policy.py | 12 +- iam_policy_info.py | 8 +- iam_role.py | 10 +- iam_role_info.py | 12 +- iam_saml_federation.py | 6 +- iam_server_certificate_info.py | 8 +- iam_user.py | 18 +- iam_user_info.py | 6 +- kinesis_stream.py | 12 +- lambda.py | 6 +- lambda_alias.py | 10 +- lambda_event.py | 39 ++- lambda_facts.py | 14 +- lambda_info.py | 10 +- lambda_policy.py | 42 ++- lightsail.py | 8 +- rds.py | 62 +++-- rds_instance.py | 8 +- rds_instance_info.py | 8 +- rds_param_group.py | 8 +- rds_snapshot.py | 8 +- rds_snapshot_info.py | 8 +- rds_subnet_group.py | 8 +- redshift.py | 20 +- redshift_cross_region_snapshots.py | 6 +- redshift_info.py | 16 +- redshift_subnet_group.py | 9 +- route53.py | 252 +++++++++--------- route53_health_check.py | 10 +- route53_info.py | 20 +- route53_zone.py | 8 +- s3_bucket_notification.py | 27 +- s3_lifecycle.py | 28 +- s3_logging.py | 4 +- s3_sync.py | 4 +- s3_website.py | 12 +- sns.py | 6 +- sns_topic.py | 6 +- sqs_queue.py | 24 +- sts_assume_role.py | 4 +- sts_session_token.py | 29 +- 172 files changed, 1057 insertions(+), 1089 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index 93034e7e71f..de20833ac03 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -157,14 +157,14 @@ EXAMPLES = ''' - name: upload a self-signed certificate - aws_acm: + community.aws.aws_acm: certificate: "{{ lookup('file', 'cert.pem' ) }}" privateKey: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert" region: ap-southeast-2 # AWS region - name: create/update a certificate with a chain - aws_acm: + community.aws.aws_acm: certificate: "{{ lookup('file', 'cert.pem' ) }}" privateKey: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert @@ -178,19 +178,19 @@ var: cert_create.certificate.arn - name: delete the cert we just created - aws_acm: + community.aws.aws_acm: name_tag: my_cert state: absent region: ap-southeast-2 - name: delete a certificate with a particular ARN - aws_acm: + community.aws.aws_acm: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" state: absent region: ap-southeast-2 - name: delete all certificates with a particular domain name - aws_acm: + community.aws.aws_acm: domain_name: acm.ansible.com state: absent region: ap-southeast-2 diff --git a/aws_acm_info.py b/aws_acm_info.py index ac11eb696a8..dfbd955a178 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -50,19 +50,19 @@ EXAMPLES = ''' - name: obtain all ACM certificates - aws_acm_info: + community.aws.aws_acm_info: - name: obtain all information for a single ACM certificate - aws_acm_info: + community.aws.aws_acm_info: domain_name: "*.example_com" - name: obtain all certificates pending validation - aws_acm_info: + community.aws.aws_acm_info: statuses: - PENDING_VALIDATION - name: obtain all certificates with tag Name=foo and myTag=bar - aws_acm_info: + community.aws.aws_acm_info: tags: Name: foo myTag: bar @@ -70,7 +70,7 @@ # The output is still a list of certificates, just one item long. - name: obtain information about a certificate with a particular ARN - aws_acm_info: + community.aws.aws_acm_info: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" ''' diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 1a508299e06..49b1a1f8a4e 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -116,7 +116,7 @@ EXAMPLES = ''' - name: Setup AWS API Gateway setup on AWS and deploy API definition - aws_api_gateway: + community.aws.aws_api_gateway: swagger_file: my_api.yml stage: production cache_enabled: true @@ -126,7 +126,7 @@ state: present - name: Update API definition to deploy new version - aws_api_gateway: + community.aws.aws_api_gateway: api_id: 'abc123321cba' swagger_file: my_api.yml deploy_desc: Make auth fix available. @@ -136,7 +136,7 @@ state: present - name: Update API definitions and settings and deploy as canary - aws_api_gateway: + community.aws.aws_api_gateway: api_id: 'abc123321cba' swagger_file: my_api.yml cache_enabled: true diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index 51d98743da3..369302d7a9b 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -111,7 +111,7 @@ # Create step scaling policy for ECS Service - name: scaling_policy - aws_application_scaling_policy: + community.aws.aws_application_scaling_policy: state: present policy_name: test_policy service_namespace: ecs @@ -132,7 +132,7 @@ # Create target tracking scaling policy for ECS Service - name: scaling_policy - aws_application_scaling_policy: + community.aws.aws_application_scaling_policy: state: present policy_name: test_policy service_namespace: ecs @@ -150,7 +150,7 @@ # Remove scalable target for ECS Service - name: scaling_policy - aws_application_scaling_policy: + community.aws.aws_application_scaling_policy: state: absent policy_name: test_policy policy_type: StepScaling diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 38a22ca69cd..04738ffefae 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -12,8 +12,8 @@ short_description: Manage AWS Batch Compute Environments description: - This module allows the management of AWS Batch Compute Environments. - It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute - environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. + It is idempotent and supports "Check" mode. Use module M(community.aws.aws_batch_compute_environment) to manage the compute + environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. author: Jon Meran (@jonmer85) @@ -131,7 +131,7 @@ state: present tasks: - name: My Batch Compute Environment - aws_batch_compute_environment: + community.aws.aws_batch_compute_environment: compute_environment_name: computeEnvironmentName state: present region: us-east-1 diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 6f385ef20fc..7debf759156 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -12,8 +12,8 @@ short_description: Manage AWS Batch Job Definitions description: - This module allows the management of AWS Batch Job Definitions. - It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute - environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. + It is idempotent and supports "Check" mode. Use module M(community.aws.aws_batch_compute_environment) to manage the compute + environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. author: Jon Meran (@jonmer85) @@ -185,7 +185,7 @@ state: present tasks: - name: My Batch Job Definition - aws_batch_job_definition: + community.aws.aws_batch_job_definition: job_definition_name: My Batch Job Definition state: present type: container diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index c888b560e0b..3ca0333b940 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -12,8 +12,8 @@ short_description: Manage AWS Batch Job Queues description: - This module allows the management of AWS Batch Job Queues. - It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute - environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. + It is idempotent and supports "Check" mode. Use module M(community.aws.aws_batch_compute_environment) to manage the compute + environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. author: Jon Meran (@jonmer85) @@ -75,7 +75,7 @@ state: present tasks: - name: My Batch Job Queue - aws_batch_job_queue: + community.aws.aws_batch_job_queue: job_queue_name: jobQueueName state: present region: us-east-1 diff --git a/aws_codebuild.py b/aws_codebuild.py index ca79d056bce..8b4a7bf04c4 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -165,7 +165,7 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- aws_codebuild: +- community.aws.aws_codebuild: name: my_project description: My nice little project service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role" diff --git a/aws_codecommit.py b/aws_codecommit.py index dc7bbaee160..8f26be4ed48 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -126,12 +126,12 @@ EXAMPLES = ''' # Create a new repository -- aws_codecommit: +- community.aws.aws_codecommit: name: repo state: present # Delete a repository -- aws_codecommit: +- community.aws.aws_codecommit: name: repo state: absent ''' diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 1784d7859c4..90fea4016cd 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -82,7 +82,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) -- aws_codepipeline: +- community.aws.aws_codepipeline: name: my_deploy_pipeline role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service artifact_store: diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index d2057a18591..a29eda64394 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -41,12 +41,12 @@ EXAMPLES = ''' - name: Get current account ID - aws_caller_info: + community.aws.aws_caller_info: register: whoami -- aws_config_aggregation_authorization: +- community.aws.aws_config_aggregation_authorization: state: present authorized_account_id: '{{ whoami.account }}' - authorzed_aws_region: us-east-1 + authorized_aws_region: us-east-1 ''' RETURN = '''#''' diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 7b97fded3c9..5976c9058fb 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -76,7 +76,7 @@ EXAMPLES = ''' - name: Create cross-account aggregator - aws_config_aggregator: + community.aws.aws_config_aggregator: name: test_config_rule state: present account_sources: diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index f0fda8e61f4..afaef581de7 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -54,7 +54,7 @@ EXAMPLES = ''' - name: Create Delivery Channel for AWS Config - aws_config_delivery_channel: + community.aws.aws_config_delivery_channel: name: test_delivery_channel state: present s3_bucket: 'test_aws_config_bucket' diff --git a/aws_config_recorder.py b/aws_config_recorder.py index 970e6f8c0bc..7b576b6cda7 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -67,7 +67,7 @@ EXAMPLES = ''' - name: Create Configuration Recorder for AWS Config - aws_config_recorder: + community.aws.aws_config_recorder: name: test_configuration_recorder state: present role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' diff --git a/aws_config_rule.py b/aws_config_rule.py index 9ce254def76..50c8d82c552 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -91,7 +91,7 @@ EXAMPLES = ''' - name: Create Config Rule for AWS Config - aws_config_rule: + community.aws.aws_config_rule: name: test_config_rule state: present description: 'This AWS Config rule checks for public write access on S3 buckets' diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 0b2f16ea083..61a0caf0149 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -71,7 +71,7 @@ EXAMPLES = """ # create a Direct Connect connection -- aws_direct_connect_connection: +- community.aws.aws_direct_connect_connection: name: ansible-test-connection state: present location: EqDC2 @@ -80,14 +80,14 @@ register: dc # disassociate the LAG from the connection -- aws_direct_connect_connection: +- community.aws.aws_direct_connect_connection: state: present connection_id: dc.connection.connection_id location: EqDC2 bandwidth: 1Gbps # replace the connection with one with more bandwidth -- aws_direct_connect_connection: +- community.aws.aws_direct_connect_connection: state: present name: ansible-test-connection location: EqDC2 @@ -95,7 +95,7 @@ forced_update: True # delete the connection -- aws_direct_connect_connection: +- community.aws.aws_direct_connect_connection: state: absent name: ansible-test-connection """ diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index 7fa8ca23740..1524e17fd7a 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -55,7 +55,7 @@ EXAMPLES = ''' - name: Create a new direct connect gateway attached to virtual private gateway - dxgw: + community.aws.aws_direct_connect_gateway: state: present name: my-dx-gateway amazon_asn: 7224 @@ -63,7 +63,7 @@ register: created_dxgw - name: Create a new unattached dxgw - dxgw: + community.aws.aws_direct_connect_gateway: state: present name: my-dx-gateway amazon_asn: 7224 diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 92e8433f5ed..30b0656af5f 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -82,7 +82,7 @@ EXAMPLES = """ # create a Direct Connect connection -- aws_direct_connect_link_aggregation_group: +- community.aws.aws_direct_connect_link_aggregation_group: state: present location: EqDC2 lag_id: dxlag-xxxxxxxx diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 3883d12331b..6450be0ab08 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -219,14 +219,14 @@ EXAMPLES = ''' --- - name: create an association between a LAG and connection - aws_direct_connect_virtual_interface: + community.aws.aws_direct_connect_virtual_interface: state: present name: "{{ name }}" link_aggregation_group_id: LAG-XXXXXXXX connection_id: dxcon-XXXXXXXX - name: remove an association between a connection and virtual interface - aws_direct_connect_virtual_interface: + community.aws.aws_direct_connect_virtual_interface: state: absent connection_id: dxcon-XXXXXXXX virtual_interface_id: dxv-XXXXXXXX diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 6cb7d4fe0ce..27200f55908 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -65,7 +65,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS cluster - aws_eks_cluster: + community.aws.aws_eks_cluster: name: my_cluster version: 1.14 role_arn: my_eks_role @@ -77,7 +77,7 @@ register: caller_facts - name: Remove an EKS cluster - aws_eks_cluster: + community.aws.aws_eks_cluster: name: my_cluster wait: yes state: absent diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py index 88c6b58d4af..67f5bc611a8 100644 --- a/aws_elasticbeanstalk_app.py +++ b/aws_elasticbeanstalk_app.py @@ -48,13 +48,13 @@ EXAMPLES = ''' # Create or update an application -- aws_elasticbeanstalk_app: +- community.aws.aws_elasticbeanstalk_app: app_name: Sample_App description: "Hello World App" state: present # Delete application -- aws_elasticbeanstalk_app: +- community.aws.aws_elasticbeanstalk_app: app_name: Sample_App state: absent diff --git a/aws_glue_connection.py b/aws_glue_connection.py index d2dec7b8db8..1810a6df2e9 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -70,7 +70,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue connection -- aws_glue_connection: +- community.aws.aws_glue_connection: name: my-glue-connection connection_properties: JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename @@ -79,7 +79,7 @@ state: present # Delete an AWS Glue connection -- aws_glue_connection: +- community.aws.aws_glue_connection: name: my-glue-connection state: absent diff --git a/aws_glue_job.py b/aws_glue_job.py index 7a9d76d0890..966029ce325 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -84,14 +84,14 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue job -- aws_glue_job: +- community.aws.aws_glue_job: command_script_location: s3bucket/script.py name: my-glue-job role: my-iam-role state: present # Delete an AWS Glue job -- aws_glue_job: +- community.aws.aws_glue_job: name: my-glue-job state: absent diff --git a/aws_inspector_target.py b/aws_inspector_target.py index 00d5ac35ba1..d7e668038fd 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -45,20 +45,20 @@ EXAMPLES = ''' - name: Create my_target Assessment Target - aws_inspector_target: + community.aws.aws_inspector_target: name: my_target tags: role: scan_target - name: Update Existing my_target Assessment Target with Additional Tags - aws_inspector_target: + community.aws.aws_inspector_target: name: my_target tags: env: dev role: scan_target - name: Delete my_target Assessment Target - aws_inspector_target: + community.aws.aws_inspector_target: name: my_target state: absent ''' diff --git a/aws_kms.py b/aws_kms.py index 7722a4803f8..0a0bba626e6 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -17,7 +17,7 @@ description: An alias for a key. For safety, even though KMS does not require keys to have an alias, this module expects all new keys to be given an alias to make them easier to manage. Existing keys without an alias may be - referred to by I(key_id). Use M(aws_kms_info) to find key ids. Required + referred to by I(key_id). Use M(community.aws.aws_kms_info) to find key ids. Required if I(key_id) is not given. Note that passing a I(key_id) and I(alias) will only cause a new alias to be added, an alias will never be renamed. The 'alias/' prefix is optional. @@ -177,28 +177,28 @@ # Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile # and has been deprecated in favour of the policy option. - name: grant user-style access to production secrets - aws_kms: + community.aws.aws_kms: args: alias: "alias/my_production_secrets" policy_mode: grant policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" policy_grant_types: "role,role grant" - name: remove access to production secrets from role - aws_kms: + community.aws.aws_kms: args: alias: "alias/my_production_secrets" policy_mode: deny policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" # Create a new KMS key -- aws_kms: +- community.aws.aws_kms: alias: mykey tags: Name: myKey Purpose: protect_stuff # Update previous key with more tags -- aws_kms: +- community.aws.aws_kms: alias: mykey tags: Name: myKey @@ -208,7 +208,7 @@ # Update a known key with grants allowing an instance with the billing-prod IAM profile # to decrypt data encrypted with the environment: production, application: billing # encryption context -- aws_kms: +- community.aws.aws_kms: key_id: abcd1234-abcd-1234-5678-ef1234567890 grants: - name: billing_prod @@ -222,13 +222,13 @@ - RetireGrant - name: Update IAM policy on an existing KMS key - aws_kms: + community.aws.aws_kms: alias: my-kms-key policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { } ]}' state: present - name: Example using lookup for policy json - aws_kms: + community.aws.aws_kms: alias: my-kms-key policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" state: present diff --git a/aws_kms_info.py b/aws_kms_info.py index 4424e8fe2ce..1f6f9f394e3 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -36,15 +36,15 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all KMS keys -- aws_kms_info: +- community.aws.aws_kms_info: # Gather information about all keys with a Name tag -- aws_kms_info: +- community.aws.aws_kms_info: filters: tag-key: Name # Gather information about all keys with a specific name -- aws_kms_info: +- community.aws.aws_kms_info: filters: "tag:Name": Example ''' diff --git a/aws_region_info.py b/aws_region_info.py index cafb743ece1..719cce0cec8 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -33,10 +33,10 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all regions -- aws_region_info: +- community.aws.aws_region_info: # Gather information about a single region -- aws_region_info: +- community.aws.aws_region_info: filters: region-name: eu-west-1 ''' diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 30964ab1c5a..7b250f89ed6 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -16,7 +16,7 @@ description: - Lists S3 buckets in AWS - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(aws_s3_bucket_info) module no longer returns C(ansible_facts)! + Note that the M(community.aws.aws_s3_bucket_info) module no longer returns C(ansible_facts)! author: "Gerben Geijteman (@hyperized)" extends_documentation_fragment: - amazon.aws.aws @@ -30,7 +30,7 @@ # Note: Only AWS S3 is currently supported # Lists all s3 buckets -- aws_s3_bucket_info: +- community.aws.aws_s3_bucket_info: register: result - name: List buckets diff --git a/aws_s3_cors.py b/aws_s3_cors.py index 7a63596e6ab..130b20966e2 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -40,7 +40,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple cors for s3 bucket -- aws_s3_cors: +- community.aws.aws_s3_cors: name: mys3bucket state: present rules: @@ -57,7 +57,7 @@ max_age_seconds: 30000 # Remove cors for s3 bucket -- aws_s3_cors: +- community.aws.aws_s3_cors: name: mys3bucket state: absent ''' diff --git a/aws_secret.py b/aws_secret.py index 0f4a8e78c9f..a007cf564f5 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -76,14 +76,14 @@ EXAMPLES = r''' - name: Add string to AWS Secrets Manager - aws_secret: + community.aws.aws_secret: name: 'test_secret_string' state: present secret_type: 'string' secret: "{{ super_secret_string }}" - name: remove string from AWS Secrets Manager - aws_secret: + community.aws.aws_secret: name: 'test_secret_string' state: absent secret_type: 'string' diff --git a/aws_ses_identity.py b/aws_ses_identity.py index 81a80630f97..2185d07d0e8 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -95,31 +95,31 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Ensure example@example.com email identity exists - aws_ses_identity: + community.aws.aws_ses_identity: identity: example@example.com state: present - name: Delete example@example.com email identity - aws_ses_identity: + community.aws.aws_ses_identity: email: example@example.com state: absent - name: Ensure example.com domain identity exists - aws_ses_identity: + community.aws.aws_ses_identity: identity: example.com state: present # Create an SNS topic and send bounce and complaint notifications to it # instead of emailing the identity owner - name: Ensure complaints-topic exists - sns_topic: + community.aws.sns_topic: name: "complaints-topic" state: present purge_subscriptions: False register: topic_info - name: Deliver feedback to topic instead of owner email - aws_ses_identity: + community.aws.aws_ses_identity: identity: example@example.com state: present complaint_notifications: @@ -133,14 +133,14 @@ # Create an SNS topic for delivery notifications and leave complaints # Being forwarded to the identity owner email - name: Ensure delivery-notifications-topic exists - sns_topic: + community.aws.sns_topic: name: "delivery-notifications-topic" state: present purge_subscriptions: False register: topic_info - name: Delivery notifications to topic - aws_ses_identity: + community.aws.aws_ses_identity: identity: example@example.com state: present delivery_notifications: diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py index a657a1a4175..bb166523585 100644 --- a/aws_ses_identity_policy.py +++ b/aws_ses_identity_policy.py @@ -46,28 +46,28 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: add sending authorization policy to domain identity - aws_ses_identity_policy: + community.aws.aws_ses_identity_policy: identity: example.com policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: add sending authorization policy to email identity - aws_ses_identity_policy: + community.aws.aws_ses_identity_policy: identity: example@example.com policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: add sending authorization policy to identity using ARN - aws_ses_identity_policy: + community.aws.aws_ses_identity_policy: identity: "arn:aws:ses:us-east-1:12345678:identity/example.com" policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: remove sending authorization policy - aws_ses_identity_policy: + community.aws.aws_ses_identity_policy: identity: example.com policy_name: ExamplePolicy state: absent diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index d351f2b8d26..b6b45afce75 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -11,7 +11,7 @@ module: aws_ses_rule_set short_description: Manages SES inbound receipt rule sets description: - - The M(aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets + - The M(community.aws.aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets author: - "Ben Tomasik (@tomislacker)" - "Ed Costello (@orthanc)" @@ -54,29 +54,29 @@ # It is assumed that their matching environment variables are set. --- - name: Create default rule set and activate it if not already - aws_ses_rule_set: + community.aws.aws_ses_rule_set: name: default-rule-set state: present active: yes - name: Create some arbitrary rule set but do not activate it - aws_ses_rule_set: + community.aws.aws_ses_rule_set: name: arbitrary-rule-set state: present - name: Explicitly deactivate the default rule set leaving no active rule set - aws_ses_rule_set: + community.aws.aws_ses_rule_set: name: default-rule-set state: present active: no - name: Remove an arbitrary inactive rule set - aws_ses_rule_set: + community.aws.aws_ses_rule_set: name: arbitrary-rule-set state: absent - name: Remove an ruleset even if we have to first deactivate it to remove it - aws_ses_rule_set: + community.aws.aws_ses_rule_set: name: default-rule-set state: absent force: yes diff --git a/aws_sgw_info.py b/aws_sgw_info.py index 5cfc7ab8cc0..7963e11bfc0 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -165,10 +165,10 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: "Get AWS storage gateway information" - aws_sgw_info: + community.aws.aws_sgw_info: - name: "Get AWS storage gateway information for region eu-west-3" - aws_sgw_info: + community.aws.aws_sgw_info: region: eu-west-3 ''' diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 7e9cb76e897..c721fe3385d 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -77,25 +77,25 @@ EXAMPLES = ''' - name: Create or update key/value pair in aws parameter store - aws_ssm_parameter_store: + community.aws.aws_ssm_parameter_store: name: "Hello" description: "This is your first key" value: "World" - name: Delete the key - aws_ssm_parameter_store: + community.aws.aws_ssm_parameter_store: name: "Hello" state: absent - name: Create or update secure key/value pair with default kms key (aws/ssm) - aws_ssm_parameter_store: + community.aws.aws_ssm_parameter_store: name: "Hello" description: "This is your first key" string_type: "SecureString" value: "World" - name: Create or update secure key/value pair with nominated kms key - aws_ssm_parameter_store: + community.aws.aws_ssm_parameter_store: name: "Hello" description: "This is your first key" string_type: "SecureString" @@ -103,7 +103,7 @@ value: "World" - name: Always update a parameter store value and create a new version - aws_ssm_parameter_store: + community.aws.aws_ssm_parameter_store: name: "overwrite_example" description: "This example will always overwrite the value" string_type: "String" @@ -111,7 +111,8 @@ overwrite_value: "always" - name: recommend to use with aws_ssm lookup plugin - debug: msg="{{ lookup('aws_ssm', 'hello') }}" + debug: + msg: "{{ lookup('amazon.aws.aws_ssm', 'hello') }}" ''' RETURN = ''' diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index a5261e593da..a283a57ce6d 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -66,7 +66,7 @@ EXAMPLES = ''' # Create a new AWS Step Functions state machine - name: Setup HelloWorld state machine - aws_step_functions_state_machine: + community.aws.aws_step_functions_state_machine: name: "HelloWorldStateMachine" definition: "{{ lookup('file','state_machine.json') }}" role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole @@ -75,7 +75,7 @@ # Update an existing state machine - name: Change IAM Role and tags of HelloWorld state machine - aws_step_functions_state_machine: + community.aws.aws_step_functions_state_machine: name: HelloWorldStateMachine definition: "{{ lookup('file','state_machine.json') }}" role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole @@ -84,7 +84,7 @@ # Remove the AWS Step Functions state machine - name: Delete HelloWorld state machine - aws_step_functions_state_machine: + community.aws.aws_step_functions_state_machine: name: HelloWorldStateMachine state: absent ''' diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py index b64efacbaf7..65ed30453c7 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/aws_step_functions_state_machine_execution.py @@ -56,13 +56,13 @@ EXAMPLES = ''' - name: Start an execution of a state machine - aws_step_functions_state_machine_execution: + community.aws.aws_step_functions_state_machine_execution: name: an_execution_name execution_input: '{ "IsHelloWorldExample": true }' state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine" - name: Stop an execution of a state machine - aws_step_functions_state_machine_execution: + community.aws.aws_step_functions_state_machine_execution: action: stop execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" cause: "cause of task failure" diff --git a/aws_waf_condition.py b/aws_waf_condition.py index bab1f97772e..df6632ce1d6 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -138,7 +138,7 @@ EXAMPLES = ''' - name: create WAF byte condition - aws_waf_condition: + community.aws.aws_waf_condition: name: my_byte_condition filters: - field_to_match: header @@ -148,7 +148,7 @@ type: byte - name: create WAF geo condition - aws_waf_condition: + community.aws.aws_waf_condition: name: my_geo_condition filters: - country: US @@ -157,7 +157,7 @@ type: geo - name: create IP address condition - aws_waf_condition: + community.aws.aws_waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "10.0.0.0/8" @@ -165,7 +165,7 @@ type: ip - name: create WAF regex condition - aws_waf_condition: + community.aws.aws_waf_condition: name: my_regex_condition filters: - field_to_match: query_string @@ -178,7 +178,7 @@ type: regex - name: create WAF size condition - aws_waf_condition: + community.aws.aws_waf_condition: name: my_size_condition filters: - field_to_match: query_string @@ -187,7 +187,7 @@ type: size - name: create WAF sql injection condition - aws_waf_condition: + community.aws.aws_waf_condition: name: my_sql_condition filters: - field_to_match: query_string @@ -195,7 +195,7 @@ type: sql - name: create WAF xss condition - aws_waf_condition: + community.aws.aws_waf_condition: name: my_xss_condition filters: - field_to_match: query_string diff --git a/aws_waf_info.py b/aws_waf_info.py index 76fe5d084a7..9a895c847ea 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -35,14 +35,14 @@ EXAMPLES = ''' - name: obtain all WAF information - aws_waf_info: + community.aws.aws_waf_info: - name: obtain all information for a single WAF - aws_waf_info: + community.aws.aws_waf_info: name: test_waf - name: obtain all information for a single WAF Regional - aws_waf_info: + community.aws.aws_waf_info: name: test_waf waf_regional: true ''' diff --git a/aws_waf_rule.py b/aws_waf_rule.py index 5d1c8d3667b..54fb1b23f8b 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -42,7 +42,7 @@ type: str conditions: description: > - List of conditions used in the rule. M(aws_waf_condition) can be used to + List of conditions used in the rule. M(community.aws.aws_waf_condition) can be used to create new conditions. type: list elements: dict @@ -75,7 +75,7 @@ EXAMPLES = ''' - name: create WAF rule - aws_waf_rule: + community.aws.aws_waf_rule: name: my_waf_rule conditions: - name: my_regex_condition @@ -89,7 +89,7 @@ negated: yes - name: remove WAF rule - aws_waf_rule: + community.aws.aws_waf_rule: name: "my_waf_rule" state: absent diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index 22da20a7692..fe954dda1b2 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -85,7 +85,7 @@ EXAMPLES = ''' - name: create web ACL - aws_waf_web_acl: + community.aws.aws_waf_web_acl: name: my_web_acl rules: - name: my_rule @@ -96,7 +96,7 @@ state: present - name: delete the web acl - aws_waf_web_acl: + community.aws.aws_waf_web_acl: name: my_web_acl state: absent ''' diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 50f4f847af5..2c6166dc0d5 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -22,7 +22,7 @@ EXAMPLES = ''' - name: Get Exports - cloudformation_exports_info: + community.aws.cloudformation_exports_info: profile: 'my_aws_profile' region: 'my_region' register: cf_exports diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 69f53669f51..a7b476d032e 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -13,7 +13,7 @@ description: - Launches/updates/deletes AWS CloudFormation Stack Sets. notes: - - To make an individual stack, you want the M(cloudformation) module. + - To make an individual stack, you want the M(amazon.aws.cloudformation) module. options: name: description: @@ -177,7 +177,7 @@ EXAMPLES = ''' - name: Create a stack set with instances in two accounts - cloudformation_stack_set: + community.aws.cloudformation_stack_set: name: my-stack description: Test stack in two accounts state: present @@ -187,7 +187,7 @@ - us-east-1 - name: on subsequent calls, templates are optional but parameters and tags can be altered - cloudformation_stack_set: + community.aws.cloudformation_stack_set: name: my-stack state: present parameters: @@ -200,7 +200,7 @@ - us-east-1 - name: The same type of update, but wait for the update to complete in all stacks - cloudformation_stack_set: + community.aws.cloudformation_stack_set: name: my-stack state: present wait: true diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 6597d37bbef..26237ea1851 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -580,11 +580,9 @@ ''' -EXAMPLES = ''' - -# create a basic distribution with defaults and tags - -- cloudfront_distribution: +EXAMPLES = r''' +- name: create a basic distribution with defaults and tags + community.aws.cloudfront_distribution: state: present default_origin_domain_name: www.my-cloudfront-origin.com tags: @@ -592,31 +590,27 @@ Project: example project Priority: '1' -# update a distribution comment by distribution_id - -- cloudfront_distribution: +- name: update a distribution comment by distribution_id + community.aws.cloudfront_distribution: state: present distribution_id: E1RP5A2MJ8073O comment: modified by ansible cloudfront.py -# update a distribution comment by caller_reference - -- cloudfront_distribution: +- name: update a distribution comment by caller_reference + community.aws.cloudfront_distribution: state: present caller_reference: my cloudfront distribution 001 comment: modified by ansible cloudfront.py -# update a distribution's aliases and comment using the distribution_id as a reference - -- cloudfront_distribution: +- name: update a distribution's aliases and comment using the distribution_id as a reference + community.aws.cloudfront_distribution: state: present distribution_id: E1RP5A2MJ8073O comment: modified by cloudfront.py again aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ] -# update a distribution's aliases and comment using an alias as a reference - -- cloudfront_distribution: +- name: update a distribution's aliases and comment using an alias as a reference + community.aws.cloudfront_distribution: state: present caller_reference: my test distribution comment: modified by cloudfront.py again @@ -624,9 +618,8 @@ - www.my-distribution-source.com - zzz.aaa.io -# update a distribution's comment and aliases and tags and remove existing tags - -- cloudfront_distribution: +- name: update a distribution's comment and aliases and tags and remove existing tags + community.aws.cloudfront_distribution: state: present distribution_id: E15BU8SDCGSG57 comment: modified by cloudfront.py again @@ -636,9 +629,8 @@ Project: distribution 1.2 purge_tags: yes -# create a distribution with an origin, logging and default cache behavior - -- cloudfront_distribution: +- name: create a distribution with an origin, logging and default cache behavior + community.aws.cloudfront_distribution: state: present caller_reference: unique test distribution ID origins: @@ -674,9 +666,8 @@ enabled: false comment: this is a CloudFront distribution with logging -# delete a distribution - -- cloudfront_distribution: +- name: delete a distribution + community.aws.cloudfront_distribution: state: absent caller_reference: replaceable distribution ''' diff --git a/cloudfront_info.py b/cloudfront_info.py index 46aa714dbf2..a5bcb4ca572 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -13,7 +13,7 @@ description: - Gets information about an AWS CloudFront distribution. - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(cloudfront_info) module no longer returns C(ansible_facts)! + Note that the M(community.aws.cloudfront_info) module no longer returns C(ansible_facts)! requirements: - boto3 >= 1.0.0 - python >= 2.6 @@ -152,21 +152,21 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Get a summary of distributions -- cloudfront_info: +- name: Get a summary of distributions + community.aws.cloudfront_info: summary: true register: result -# Get information about a distribution -- cloudfront_info: +- name: Get information about a distribution + community.aws.cloudfront_info: distribution: true distribution_id: my-cloudfront-distribution-id register: result_did - debug: msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}" -# Get information about a distribution using the CNAME of the cloudfront distribution. -- cloudfront_info: +- name: Get information about a distribution using the CNAME of the cloudfront distribution. + community.aws.cloudfront_info: distribution: true domain_name_alias: www.my-website.com register: result_website @@ -176,36 +176,37 @@ # When the module is called as cloudfront_facts, return values are published # in ansible_facts['cloudfront'][] and can be used as follows. # Note that this is deprecated and will stop working in Ansible 2.13. -- cloudfront_facts: +- name: Gather facts + community.aws.cloudfront_facts: distribution: true distribution_id: my-cloudfront-distribution-id - debug: msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}" -- cloudfront_facts: +- community.aws.cloudfront_facts: distribution: true domain_name_alias: www.my-website.com - debug: msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}" -# Get all information about an invalidation for a distribution. -- cloudfront_facts: +- name: Get all information about an invalidation for a distribution. + community.aws.cloudfront_info: invalidation: true distribution_id: my-cloudfront-distribution-id invalidation_id: my-cloudfront-invalidation-id -# Get all information about a CloudFront origin access identity. -- cloudfront_facts: +- name: Get all information about a CloudFront origin access identity. + community.aws.cloudfront_info: origin_access_identity: true origin_access_identity_id: my-cloudfront-origin-access-identity-id -# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) -- cloudfront_facts: +- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) + community.aws.cloudfront_info: origin_access_identity: true origin_access_identity_id: my-cloudfront-origin-access-identity-id -# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) -- cloudfront_facts: +- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) + community.aws.cloudfront_info: all_lists: true ''' diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index fe84099931d..13a7d8c30b3 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -60,7 +60,7 @@ EXAMPLES = ''' - name: create a batch of invalidations using a distribution_id for a reference - cloudfront_invalidation: + community.aws.cloudfront_invalidation: distribution_id: E15BU8SDCGSG57 caller_reference: testing 123 target_paths: @@ -69,7 +69,7 @@ - /testpaththree/test3.ss - name: create a batch of invalidations using an alias as a reference and one path using a wildcard match - cloudfront_invalidation: + community.aws.cloudfront_invalidation: alias: alias.test.com caller_reference: testing 123 target_paths: diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index fd66d587ab2..17bfb6a71d1 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -62,19 +62,19 @@ EXAMPLES = ''' - name: create an origin access identity - cloudfront_origin_access_identity: + community.aws.cloudfront_origin_access_identity: state: present caller_reference: this is an example reference comment: this is an example comment - name: update an existing origin access identity using caller_reference as an identifier - cloudfront_origin_access_identity: + community.aws.cloudfront_origin_access_identity: origin_access_identity_id: E17DRN9XUOAHZX caller_reference: this is an example reference comment: this is a new comment - name: delete an existing origin access identity using caller_reference as an identifier - cloudfront_origin_access_identity: + community.aws.cloudfront_origin_access_identity: state: absent caller_reference: this is an example reference comment: this is a new comment diff --git a/cloudtrail.py b/cloudtrail.py index c4a5f2e6e74..83e6cc0b0f1 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -105,7 +105,7 @@ EXAMPLES = ''' - name: create single region cloudtrail - cloudtrail: + community.aws.cloudtrail: state: present name: default s3_bucket_name: mylogbucket @@ -113,7 +113,7 @@ region: us-east-1 - name: create multi-region trail with validation and tags - cloudtrail: + community.aws.cloudtrail: state: present name: default s3_bucket_name: mylogbucket @@ -128,7 +128,7 @@ Name: default - name: show another valid kms_key_id - cloudtrail: + community.aws.cloudtrail: state: present name: default s3_bucket_name: mylogbucket @@ -136,7 +136,7 @@ # simply "12345678-1234-1234-1234-123456789012" would be valid too. - name: pause logging the trail we just created - cloudtrail: + community.aws.cloudtrail: state: present name: default enable_logging: false @@ -149,7 +149,7 @@ Name: default - name: delete a trail - cloudtrail: + community.aws.cloudtrail: state: absent name: default ''' diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 23f3efa7aec..29854fcc10b 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -107,7 +107,7 @@ ''' EXAMPLES = ''' -- cloudwatchevent_rule: +- community.aws.cloudwatchevent_rule: name: MyCronTask schedule_expression: "cron(0 20 * * ? *)" description: Run my scheduled task @@ -115,7 +115,7 @@ - id: MyTargetId arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction -- cloudwatchevent_rule: +- community.aws.cloudwatchevent_rule: name: MyDisabledCronTask schedule_expression: "rate(5 minutes)" description: Run my disabled scheduled task @@ -125,7 +125,7 @@ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction input: '{"foo": "bar"}' -- cloudwatchevent_rule: +- community.aws.cloudwatchevent_rule: name: MyCronTask state: absent ''' diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index 54687816f03..c2f10956f34 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -71,21 +71,21 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- cloudwatchlogs_log_group: +- community.aws.cloudwatchlogs_log_group: log_group_name: test-log-group -- cloudwatchlogs_log_group: +- community.aws.cloudwatchlogs_log_group: state: present log_group_name: test-log-group tags: { "Name": "test-log-group", "Env" : "QA" } -- cloudwatchlogs_log_group: +- community.aws.cloudwatchlogs_log_group: state: present log_group_name: test-log-group tags: { "Name": "test-log-group", "Env" : "QA" } kms_key_id: arn:aws:kms:region:account-id:key/key-id -- cloudwatchlogs_log_group: +- community.aws.cloudwatchlogs_log_group: state: absent log_group_name: test-log-group diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index 448bb954d40..f1b87c8d52d 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -30,7 +30,7 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- cloudwatchlogs_log_group_info: +- community.aws.cloudwatchlogs_log_group_info: log_group_name: test-log-group ''' diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index fd2c9221eff..b606a9ef8a9 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -13,7 +13,7 @@ short_description: Manage CloudWatch log group metric filter description: - Create, modify and delete CloudWatch log group metric filter. - - CloudWatch log group metric filter can be use with M(ec2_metric_alarm). + - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). requirements: - boto3 - botocore @@ -67,7 +67,7 @@ EXAMPLES = ''' - name: set metric filter on log group /fluentd/testcase - cloudwatchlogs_log_group_metric_filter: + community.aws.cloudwatchlogs_log_group_metric_filter: log_group_name: /fluentd/testcase filter_name: BoxFreeStorage filter_pattern: '{($.value = *) && ($.hostname = "box")}' @@ -78,7 +78,7 @@ metric_value: "$.value" - name: delete metric filter on log group /fluentd/testcase - cloudwatchlogs_log_group_metric_filter: + community.aws.cloudwatchlogs_log_group_metric_filter: log_group_name: /fluentd/testcase filter_name: BoxFreeStorage state: absent diff --git a/data_pipeline.py b/data_pipeline.py index 9f9ef5d818b..f52cf3f842e 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -131,7 +131,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Create pipeline -- data_pipeline: +- community.aws.data_pipeline: name: test-dp region: us-west-2 objects: "{{pipelineObjects}}" @@ -143,7 +143,7 @@ state: present # Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects -- data_pipeline: +- community.aws.data_pipeline: name: test-dp objects: - "id": "DefaultSchedule" @@ -166,13 +166,13 @@ state: active # Activate pipeline -- data_pipeline: +- community.aws.data_pipeline: name: test-dp region: us-west-2 state: active # Delete pipeline -- data_pipeline: +- community.aws.data_pipeline: name: test-dp region: us-west-2 state: absent diff --git a/dms_endpoint.py b/dms_endpoint.py index 1fea45a4a04..7fc1a253a9f 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -149,8 +149,8 @@ EXAMPLES = ''' # Note: These examples do not set authentication details -# Endpoint Creation -- dms_endpoint: +- name: Endpoint Creation + community.aws.dms_endpoint: state: absent endpointidentifier: 'testsource' endpointtype: source @@ -165,7 +165,7 @@ ''' RETURN = ''' # ''' -__metaclass__ = type + import traceback from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 9cb0caf060f..9354eeabc86 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -48,7 +48,7 @@ ''' EXAMPLES = ''' -- dms_replication_subnet_group: +- community.aws.dms_replication_subnet_group: state: present identifier: "dev-sngroup" description: "Development Subnet Group asdasdas" diff --git a/dynamodb_table.py b/dynamodb_table.py index 1edf139dbfa..ee5cd8470c0 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -121,8 +121,8 @@ ''' EXAMPLES = ''' -# Create dynamo table with hash and range primary key -- dynamodb_table: +- name: Create dynamo table with hash and range primary key + community.aws.dynamodb_table: name: my-table region: us-east-1 hash_key_name: id @@ -134,15 +134,15 @@ tags: tag_name: tag_value -# Update capacity on existing dynamo table -- dynamodb_table: +- name: Update capacity on existing dynamo table + community.aws.dynamodb_table: name: my-table region: us-east-1 read_capacity: 10 write_capacity: 10 -# set index on existing dynamo table -- dynamodb_table: +- name: set index on existing dynamo table + community.aws.dynamodb_table: name: my-table region: us-east-1 indexes: @@ -156,8 +156,8 @@ read_capacity: 10 write_capacity: 10 -# Delete dynamo table -- dynamodb_table: +- name: Delete dynamo table + community.aws.dynamodb_table: name: my-table region: us-east-1 state: absent diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 654b311c72a..dbf7bcfc53c 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -42,13 +42,13 @@ EXAMPLES = ''' - name: enable TTL on my cowfacts table - dynamodb_ttl: + community.aws.dynamodb_ttl: state: enable table_name: cowfacts attribute_name: cow_deleted_date - name: disable TTL on my cowfacts table - dynamodb_ttl: + community.aws.dynamodb_ttl: state: disable table_name: cowfacts attribute_name: cow_deleted_date diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 974993a42ed..c6a1bb0ee45 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -75,14 +75,14 @@ ''' EXAMPLES = ''' -# Basic AMI Copy -- ec2_ami_copy: +- name: Basic AMI Copy + community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx -# AMI copy wait until available -- ec2_ami_copy: +- name: AMI copy wait until available + community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx @@ -90,16 +90,16 @@ wait_timeout: 1200 # Default timeout is 600 register: image_id -# Named AMI copy -- ec2_ami_copy: +- name: Named AMI copy + community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx name: My-Awesome-AMI description: latest patch -# Tagged AMI copy (will not copy the same AMI twice) -- ec2_ami_copy: +- name: Tagged AMI copy (will not copy the same AMI twice) + community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx @@ -108,15 +108,15 @@ Patch: 1.2.3 tag_equality: yes -# Encrypted AMI copy -- ec2_ami_copy: +- name: Encrypted AMI copy + community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx encrypted: yes -# Encrypted AMI copy with specified key -- ec2_ami_copy: +- name: Encrypted AMI copy with specified key + community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx diff --git a/ec2_asg.py b/ec2_asg.py index 3f43193f102..3bfd6f131a9 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -12,7 +12,7 @@ short_description: Create or delete AWS AutoScaling Groups (ASGs) description: - Can create or delete AWS AutoScaling Groups. - - Can be used with the M(ec2_lc) module to manage Launch Configurations. + - Can be used with the M(community.aws.ec2_lc) module to manage Launch Configurations. author: "Gareth Rushgrove (@garethr)" requirements: [ "boto3", "botocore" ] options: @@ -45,7 +45,7 @@ elements: str launch_config_name: description: - - Name of the Launch configuration to use for the group. See the M(ec2_lc) module for managing these. + - Name of the Launch configuration to use for the group. See the community.aws.ec2_lc) module for managing these. - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided. type: str launch_template: @@ -243,7 +243,7 @@ EXAMPLES = ''' # Basic configuration with Launch Configuration -- ec2_asg: +- community.aws.ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] @@ -269,7 +269,7 @@ # will have the current launch configuration. - name: create launch config - ec2_lc: + community.aws.ec2_lc: name: my_new_lc image_id: ami-lkajsf key_name: mykey @@ -278,7 +278,7 @@ instance_type: m1.small assign_public_ip: yes -- ec2_asg: +- community.aws.ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 @@ -292,7 +292,7 @@ # To only replace a couple of instances instead of all of them, supply a list # to "replace_instances": -- ec2_asg: +- community.aws.ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 @@ -307,7 +307,7 @@ # Basic Configuration with Launch Template -- ec2_asg: +- community.aws.ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] @@ -325,7 +325,7 @@ # Basic Configuration with Launch Template using mixed instance policy -- ec2_asg: +- community.aws.ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 4c523db50de..2cce6380fd6 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -38,37 +38,37 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Find all groups -- ec2_asg_info: +- name: Find all groups + community.aws.ec2_asg_info: register: asgs -# Find a group with matching name/prefix -- ec2_asg_info: +- name: Find a group with matching name/prefix + community.aws.ec2_asg_info: name: public-webserver-asg register: asgs -# Find a group with matching tags -- ec2_asg_info: +- name: Find a group with matching tags + community.aws.ec2_asg_info: tags: project: webapp env: production register: asgs -# Find a group with matching name/prefix and tags -- ec2_asg_info: +- name: Find a group with matching name/prefix and tags + community.aws.ec2_asg_info: name: myproject tags: env: production register: asgs -# Fail if no groups are found -- ec2_asg_info: +- name: Fail if no groups are found + community.aws.ec2_asg_info: name: public-webserver-asg register: asgs failed_when: "{{ asgs.results | length == 0 }}" -# Fail if more than 1 group is found -- ec2_asg_info: +- name: Fail if more than 1 group is found + community.aws.ec2_asg_info: name: public-webserver-asg register: asgs failed_when: "{{ asgs.results | length > 1 }}" diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index 50e483d0ed1..9e01ca21aee 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -78,8 +78,8 @@ ''' EXAMPLES = ''' -# Create / Update lifecycle hook -- ec2_asg_lifecycle_hook: +- name: Create / Update lifecycle hook + community.aws.ec2_asg_lifecycle_hook: region: eu-central-1 state: present autoscaling_group_name: example @@ -88,8 +88,8 @@ heartbeat_timeout: 7000 default_result: ABANDON -# Delete lifecycle hook -- ec2_asg_lifecycle_hook: +- name: Delete lifecycle hook + community.aws.ec2_asg_lifecycle_hook: region: eu-central-1 state: absent autoscaling_group_name: example diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 675e69a430f..8ac3f73d46a 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -54,17 +54,16 @@ ''' EXAMPLES = ''' - -# Create Customer Gateway -- ec2_customer_gateway: +- name: Create Customer Gateway + community.aws.ec2_customer_gateway: bgp_asn: 12345 ip_address: 1.2.3.4 name: IndianapolisOffice region: us-east-1 register: cgw -# Delete Customer Gateway -- ec2_customer_gateway: +- name: Delete Customer Gateway + community.aws.ec2_customer_gateway: ip_address: 1.2.3.4 name: IndianapolisOffice state: absent diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index f37a0f35567..4872e691023 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -36,10 +36,10 @@ # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all customer gateways - ec2_customer_gateway_info: + community.aws.ec2_customer_gateway_info: - name: Gather information about a filtered list of customer gateways, based on tags - ec2_customer_gateway_info: + community.aws.ec2_customer_gateway_info: region: ap-southeast-2 filters: "tag:Name": test-customer-gateway @@ -47,7 +47,7 @@ register: cust_gw_info - name: Gather information about a specific customer gateway by specifying customer gateway ID - ec2_customer_gateway_info: + community.aws.ec2_customer_gateway_info: region: ap-southeast-2 customer_gateway_ids: - 'cgw-48841a09' diff --git a/ec2_eip.py b/ec2_eip.py index f4ba39f75a4..2859ccaee7f 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -99,39 +99,39 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: associate an elastic IP with an instance - ec2_eip: + community.aws.ec2_eip: device_id: i-1212f003 ip: 93.184.216.119 - name: associate an elastic IP with a device - ec2_eip: + community.aws.ec2_eip: device_id: eni-c8ad70f3 ip: 93.184.216.119 - name: associate an elastic IP with a device and allow reassociation - ec2_eip: + community.aws.ec2_eip: device_id: eni-c8ad70f3 public_ip: 93.184.216.119 allow_reassociation: true - name: disassociate an elastic IP from an instance - ec2_eip: + community.aws.ec2_eip: device_id: i-1212f003 ip: 93.184.216.119 state: absent - name: disassociate an elastic IP with a device - ec2_eip: + community.aws.ec2_eip: device_id: eni-c8ad70f3 ip: 93.184.216.119 state: absent - name: allocate a new elastic IP and associate it with an instance - ec2_eip: + community.aws.ec2_eip: device_id: i-1212f003 - name: allocate a new elastic IP without associating it to anything - ec2_eip: + community.aws.ec2_eip: state: present register: eip @@ -140,7 +140,7 @@ msg: "Allocated IP is {{ eip.public_ip }}" - name: provision new instances with ec2 - ec2: + amazon.aws.ec2: keypair: mykey instance_type: c1.medium image: ami-40603AD1 @@ -150,12 +150,12 @@ register: ec2 - name: associate new elastic IPs with each of the instances - ec2_eip: + community.aws.ec2_eip: device_id: "{{ item }}" loop: "{{ ec2.instance_ids }}" - name: allocate a new elastic IP inside a VPC in us-west-2 - ec2_eip: + community.aws.ec2_eip: region: us-west-2 in_vpc: true register: eip @@ -165,14 +165,14 @@ msg: "Allocated IP inside a VPC is {{ eip.public_ip }}" - name: allocate eip - reuse unallocated ips (if found) with FREE tag - ec2_eip: + community.aws.ec2_eip: region: us-east-1 in_vpc: true reuse_existing_ip_allowed: true tag_name: FREE -- name: allocate eip - reuse unallocted ips if tag reserved is nope - ec2_eip: +- name: allocate eip - reuse unallocated ips if tag reserved is nope + community.aws.ec2_eip: region: us-east-1 in_vpc: true reuse_existing_ip_allowed: true @@ -180,13 +180,13 @@ tag_value: nope - name: allocate new eip - from servers given ipv4 pool - ec2_eip: + community.aws.ec2_eip: region: us-east-1 in_vpc: true public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 - name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic) - ec2_eip: + community.aws.ec2_eip: region: us-east-1 in_vpc: true reuse_existing_ip_allowed: true @@ -194,7 +194,7 @@ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 - name: allocate eip from pool - check if tag reserved_for exists and value is our hostname - ec2_eip: + community.aws.ec2_eip: region: us-east-1 in_vpc: true reuse_existing_ip_allowed: true diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 61c3a49ad8a..4c2f8c6756d 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -29,38 +29,39 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details or the AWS region, # see the AWS Guide for details. -# List all EIP addresses in the current region. -- ec2_eip_info: +- name: List all EIP addresses in the current region. + community.aws.ec2_eip_info: register: regional_eip_addresses -# List all EIP addresses for a VM. -- ec2_eip_info: +- name: List all EIP addresses for a VM. + community.aws.ec2_eip_info: filters: instance-id: i-123456789 register: my_vm_eips -- debug: msg="{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}" +- debug: + msg: "{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}" -# List all EIP addresses for several VMs. -- ec2_eip_info: +- name: List all EIP addresses for several VMs. + community.aws.ec2_eip_info: filters: instance-id: - i-123456789 - i-987654321 register: my_vms_eips -# List all EIP addresses using the 'Name' tag as a filter. -- ec2_eip_info: +- name: List all EIP addresses using the 'Name' tag as a filter. + community.aws.ec2_eip_info: filters: tag:Name: www.example.com register: my_vms_eips -# List all EIP addresses using the Allocation-id as a filter -- ec2_eip_info: +- name: List all EIP addresses using the Allocation-id as a filter + community.aws.ec2_eip_info: filters: allocation-id: eipalloc-64de1b01 register: my_vms_eips diff --git a/ec2_elb.py b/ec2_elb.py index f820453a2d8..9ae1dc08b58 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -59,19 +59,15 @@ EXAMPLES = """ # basic pre_task and post_task example pre_tasks: - - name: Gathering ec2 facts - action: ec2_facts - name: Instance De-register - local_action: - module: ec2_elb + community.aws.ec2_elb: instance_id: "{{ ansible_ec2_instance_id }}" state: absent roles: - myrole post_tasks: - name: Instance Register - local_action: - module: ec2_elb + community.aws.ec2_elb: instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: present diff --git a/ec2_elb_info.py b/ec2_elb_info.py index 215483a093f..bf753c2cbf6 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -40,38 +40,31 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Output format tries to match ec2_elb_lb module input parameters +# Output format tries to match amazon.aws.ec2_elb_lb module input parameters -# Gather information about all ELBs -- action: - module: ec2_elb_info +- name: Gather information about all ELBs + ec2_elb_info: register: elb_info - -- action: - module: debug +- debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" -# Gather information about a particular ELB -- action: - module: ec2_elb_info +- name: Gather information about a particular ELB + community.aws.ec2_elb_info: names: frontend-prod-elb register: elb_info -- action: - module: debug +- debug: msg: "{{ elb_info.elbs.0.dns_name }}" -# Gather information about a set of ELBs -- action: - module: ec2_elb_info +- name: Gather information about a set of ELBs + ec2_elb_info: names: - frontend-prod-elb - backend-prod-elb register: elb_info -- action: - module: debug +- debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" diff --git a/ec2_instance.py b/ec2_instance.py index 4238a7c15e7..8a682c56e12 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -14,7 +14,7 @@ - Create and manage AWS EC2 instances. - > Note: This module does not support creating - L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(ec2) module + L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(amazon.aws.ec2) module can create and manage spot instances. author: - Ryan Scott Brown (@ryansb) @@ -82,7 +82,7 @@ type: bool image: description: - - An image to use for the instance. The M(ec2_ami_info) module may be used to retrieve images. + - An image to use for the instance. The M(amazon.aws.ec2_ami_info) module may be used to retrieve images. One of I(image) or I(image_id) are required when instance is not already present. type: dict suboptions: @@ -117,14 +117,14 @@ vpc_subnet_id: description: - The subnet ID in which to launch the instance (VPC) - If none is provided, ec2_instance will chose the default zone of the default VPC. + If none is provided, M(community.aws.ec2_instance) will chose the default zone of the default VPC. aliases: ['subnet_id'] type: str network: description: - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or containing specifications for a single network interface. - - Use the ec2_eni module to create ENIs with special settings. + - Use the M(amazon.aws.ec2_eni) module to create ENIs with special settings. type: dict suboptions: interfaces: @@ -282,20 +282,20 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Terminate every running instance in a region. Use with EXTREME caution. -- ec2_instance: +- name: Terminate every running instance in a region. Use with EXTREME caution. + community.aws.ec2_instance: state: absent filters: instance-state-name: running -# restart a particular instance by its ID -- ec2_instance: +- name: restart a particular instance by its ID + community.aws.ec2_instance: state: restarted instance_ids: - i-12345678 -# start an instance with a public IP address -- ec2_instance: +- name: start an instance with a public IP address + community.aws.ec2_instance: name: "public-compute-instance" key_name: "prod-ssh-key" vpc_subnet_id: subnet-5ca1ab1e @@ -307,8 +307,8 @@ tags: Environment: Testing -# start an instance and Add EBS -- ec2_instance: +- name: start an instance and Add EBS + community.aws.ec2_instance: name: "public-withebs-instance" vpc_subnet_id: subnet-5ca1ab1e instance_type: t2.micro @@ -320,8 +320,8 @@ volume_size: 16 delete_on_termination: true -# start an instance with a cpu_options -- ec2_instance: +- name: start an instance with a cpu_options + community.aws.ec2_instance: name: "public-cpuoption-instance" vpc_subnet_id: subnet-5ca1ab1e tags: @@ -335,8 +335,8 @@ core_count: 1 threads_per_core: 1 -# start an instance and have it begin a Tower callback on boot -- ec2_instance: +- name: start an instance and have it begin a Tower callback on boot + community.aws.ec2_instance: name: "tower-callback-test" key_name: "prod-ssh-key" vpc_subnet_id: subnet-5ca1ab1e @@ -353,8 +353,8 @@ tags: SomeThing: "A value" -# start an instance with ENI (An existing ENI ID is required) -- ec2_instance: +- name: start an instance with ENI (An existing ENI ID is required) + community.aws.ec2_instance: name: "public-eni-instance" key_name: "prod-ssh-key" vpc_subnet_id: subnet-5ca1ab1e @@ -370,8 +370,8 @@ instance_type: t2.micro image_id: ami-123456 -# add second ENI interface -- ec2_instance: +- name: add second ENI interface + community.aws.ec2_instance: name: "public-eni-instance" network: interfaces: diff --git a/ec2_instance_info.py b/ec2_instance_info.py index d2da8b96b6f..e94aaa74b21 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -41,26 +41,26 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather information about all instances -- ec2_instance_info: +- name: Gather information about all instances + community.aws.ec2_instance_info: -# Gather information about all instances in AZ ap-southeast-2a -- ec2_instance_info: +- name: Gather information about all instances in AZ ap-southeast-2a + community.aws.ec2_instance_info: filters: availability-zone: ap-southeast-2a -# Gather information about a particular instance using ID -- ec2_instance_info: +- name: Gather information about a particular instance using ID + community.aws.ec2_instance_info: instance_ids: - i-12345678 -# Gather information about any instance with a tag key Name and value Example -- ec2_instance_info: +- name: Gather information about any instance with a tag key Name and value Example + community.aws.ec2_instance_info: filters: "tag:Name": Example -# Gather information about any instance in states "shutting-down", "stopping", "stopped" -- ec2_instance_info: +- name: Gather information about any instance in states "shutting-down", "stopping", "stopped" + community.aws.ec2_instance_info: filters: instance-state-name: [ "shutting-down", "stopping", "stopped" ] diff --git a/ec2_launch_template.py b/ec2_launch_template.py index d80a226f155..5c1a993dd58 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -12,7 +12,7 @@ description: - Create, modify, and delete EC2 Launch Templates, which can be used to create individual instances or with Autoscaling Groups. - - The I(ec2_instance) and I(ec2_asg) modules can, instead of specifying all + - The M(community.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all parameters on those tasks, be passed a Launch Template which contains settings like instance size, disk type, subnet, and more. requirements: @@ -219,7 +219,7 @@ type: str key_name: description: - - The name of the key pair. You can create a key pair using M(ec2_key). + - The name of the key pair. You can create a key pair using M(amazon.aws.ec2_key). - If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in. @@ -326,7 +326,7 @@ EXAMPLES = ''' - name: Create an ec2 launch template - ec2_launch_template: + community.aws.ec2_launch_template: name: "my_template" image_id: "ami-04b762b4289fba92b" key_name: my_ssh_key @@ -337,13 +337,13 @@ - name: > Create a new version of an existing ec2 launch template with a different instance type, while leaving an older version as the default version - ec2_launch_template: + community.aws.ec2_launch_template: name: "my_template" default_version: 1 instance_type: c5.4xlarge - name: Delete an ec2 launch template - ec2_launch_template: + community.aws.ec2_launch_template: name: "my_template" state: absent diff --git a/ec2_lc.py b/ec2_lc.py index 8e13c7ab13e..a8e6d87378a 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -194,7 +194,7 @@ # create a launch configuration using an AMI image and instance type as a basis - name: note that encrypted volumes are only supported in >= Ansible 2.4 - ec2_lc: + community.aws.ec2_lc: name: special image_id: ami-XXX key_name: default @@ -210,9 +210,8 @@ - device_name: /dev/sdb ephemeral: ephemeral0 -# create a launch configuration using a running instance id as a basis - -- ec2_lc: +- name: create a launch configuration using a running instance id as a basis + community.aws.ec2_lc: name: special instance_id: i-00a48b207ec59e948 key_name: default @@ -224,9 +223,8 @@ iops: 3000 delete_on_termination: true -# create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image - -- ec2_lc: +- name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image + community.aws.ec2_lc: name: special image_id: ami-XXX key_name: default @@ -250,7 +248,7 @@ encrypted: no - name: Create launch configuration - ec2_lc: + community.aws.ec2_lc: name: lc1 image_id: ami-xxxx assign_public_ip: yes diff --git a/ec2_lc_find.py b/ec2_lc_find.py index 043df722367..b1c457b945a 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -49,8 +49,8 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Search for the Launch Configurations that start with "app" -- ec2_lc_find: +- name: Search for the Launch Configurations that start with "app" + community.aws.ec2_lc_find: name_regex: app.* sort_order: descending limit: 2 diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 5e032332af3..8e1cf258851 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -54,15 +54,15 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather information about all launch configurations -- ec2_lc_info: +- name: Gather information about all launch configurations + community.aws.ec2_lc_info: -# Gather information about launch configuration with name "example" -- ec2_lc_info: +- name: Gather information about launch configuration with name "example" + community.aws.ec2_lc_info: name: example -# Gather information sorted by created_time from most recent to least recent -- ec2_lc_info: +- name: Gather information sorted by created_time from most recent to least recent + community.aws.ec2_lc_info: sort: created_time sort_order: descending ''' diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index debc170c123..42791c518bb 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -167,7 +167,7 @@ EXAMPLES = ''' - name: create alarm - ec2_metric_alarm: + community.aws.ec2_metric_alarm: state: present region: ap-southeast-2 name: "cpu-low" @@ -184,7 +184,7 @@ alarm_actions: ["action1","action2"] - name: Create an alarm to recover a failed instance - ec2_metric_alarm: + community.aws.ec2_metric_alarm: state: present region: us-west-1 name: "recover-instance" diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 4298c6522ec..b95069065aa 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -46,19 +46,19 @@ # Note: These examples do not set authentication details, see the AWS Guide # for details. -# Create a placement group. -- ec2_placement_group: +- name: Create a placement group. + community.aws.ec2_placement_group: name: my-cluster state: present -# Create a Spread placement group. -- ec2_placement_group: +- name: Create a Spread placement group. + community.aws.ec2_placement_group: name: my-cluster state: present strategy: spread -# Delete a placement group. -- ec2_placement_group: +- name: Delete a placement group. + community.aws.ec2_placement_group: name: my-cluster state: absent diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 33be33516d9..e9fa6338bad 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -29,22 +29,23 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details or the AWS region, # see the AWS Guide for details. -# List all placement groups. -- ec2_placement_group_info: +- name: List all placement groups. + community.aws.ec2_placement_group_info: register: all_ec2_placement_groups -# List two placement groups. -- ec2_placement_group_info: +- name: List two placement groups. + community.aws.ec2_placement_group_info: names: - my-cluster - my-other-cluster register: specific_ec2_placement_groups -- debug: msg="{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}" +- debug: + msg: "{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}" ''' diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 7beb95c0a0c..540b70527df 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -54,7 +54,7 @@ ''' EXAMPLES = ''' -- ec2_scaling_policy: +- community.aws.ec2_scaling_policy: state: present region: US-XXX name: "scaledown-policy" diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 38b22315a2f..68378d3b9c3 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -61,14 +61,14 @@ ''' EXAMPLES = ''' -# Basic Snapshot Copy -- ec2_snapshot_copy: +- name: Basic Snapshot Copy + community.aws.ec2_snapshot_copy: source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx -# Copy Snapshot and wait until available -- ec2_snapshot_copy: +- name: Copy Snapshot and wait until available + community.aws.ec2_snapshot_copy: source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx @@ -76,23 +76,23 @@ wait_timeout: 1200 # Default timeout is 600 register: snapshot_id -# Tagged Snapshot copy -- ec2_snapshot_copy: +- name: Tagged Snapshot copy + community.aws.ec2_snapshot_copy: source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx tags: Name: Snapshot-Name -# Encrypted Snapshot copy -- ec2_snapshot_copy: +- name: Encrypted Snapshot copy + community.aws.ec2_snapshot_copy: source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx encrypted: yes -# Encrypted Snapshot copy with specified key -- ec2_snapshot_copy: +- name: Encrypted Snapshot copy with specified key + community.aws.ec2_snapshot_copy: source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 4f2f4dbd0c0..b75eb5510a4 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -89,14 +89,14 @@ EXAMPLES = ''' - name: Create a new transit gateway using defaults - ec2_transit_gateway: + community.aws.ec2_transit_gateway: state: present region: us-east-1 description: personal-testing register: created_tgw - name: Create a new transit gateway with options - ec2_transit_gateway: + community.aws.ec2_transit_gateway: asn: 64514 auto_associate: no auto_propagate: no @@ -110,13 +110,13 @@ status: testing - name: Remove a transit gateway by description - ec2_transit_gateway: + community.aws.ec2_transit_gateway: state: absent region: us-east-1 description: personal-testing - name: Remove a transit gateway by id - ec2_transit_gateway: + community.aws.ec2_transit_gateway: state: absent region: ap-southeast-2 transit_gateway_id: tgw-3a9aa123 diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index d0f1409a2b5..7e5f69c5917 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -36,22 +36,22 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather info about all transit gateways -- ec2_transit_gateway_info: +- name: Gather info about all transit gateways + community.aws.ec2_transit_gateway_info: -# Gather info about a particular transit gateway using filter transit gateway ID -- ec2_transit_gateway_info: +- name: Gather info about a particular transit gateway using filter transit gateway ID + community.aws.ec2_transit_gateway_info: filters: transit-gateway-id: tgw-02c42332e6b7da829 -# Gather info about a particular transit gateway using multiple option filters -- ec2_transit_gateway_info: +- name: Gather info about a particular transit gateway using multiple option filters + community.aws.ec2_transit_gateway_info: filters: options.dns-support: enable options.vpn-ecmp-support: enable -# Gather info about multiple transit gateways using module param -- ec2_transit_gateway_info: +- name: Gather info about multiple transit gateways using module param + community.aws.ec2_transit_gateway_info: transit_gateway_ids: - tgw-02c42332e6b7da829 - tgw-03c53443d5a8cb716 diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index 9b4040484c3..e93ce7791e9 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -36,10 +36,10 @@ # Ensure that the VPC has an Internet Gateway. # The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc. -ec2_vpc_egress_igw: - vpc_id: vpc-abcdefgh - state: present -register: eigw +- community.aws.ec2_vpc_egress_igw: + vpc_id: vpc-abcdefgh + state: present + register: eigw ''' diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 1b89387bf36..7978c48dfde 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -22,7 +22,7 @@ type: str service: description: - - An AWS supported vpc endpoint service. Use the M(ec2_vpc_endpoint_info) + - An AWS supported vpc endpoint service. Use the M(community.aws.ec2_vpc_endpoint_info) module to describe the supported endpoint services. - Required when creating an endpoint. required: false @@ -99,7 +99,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new vpc endpoint with a json template for policy - ec2_vpc_endpoint: + community.aws.ec2_vpc_endpoint: state: present region: ap-southeast-2 vpc_id: vpc-12345678 @@ -111,7 +111,7 @@ register: new_vpc_endpoint - name: Create new vpc endpoint with the default policy - ec2_vpc_endpoint: + community.aws.ec2_vpc_endpoint: state: present region: ap-southeast-2 vpc_id: vpc-12345678 @@ -122,7 +122,7 @@ register: new_vpc_endpoint - name: Create new vpc endpoint with json file - ec2_vpc_endpoint: + community.aws.ec2_vpc_endpoint: state: present region: ap-southeast-2 vpc_id: vpc-12345678 @@ -134,7 +134,7 @@ register: new_vpc_endpoint - name: Delete newly created vpc endpoint - ec2_vpc_endpoint: + community.aws.ec2_vpc_endpoint: state: absent vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}" region: ap-southeast-2 diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index 75ceb6b9bc7..0f23ca53217 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -43,19 +43,19 @@ EXAMPLES = ''' # Simple example of listing all support AWS services for VPC endpoints - name: List supported AWS endpoint services - ec2_vpc_endpoint_info: + community.aws.ec2_vpc_endpoint_info: query: services region: ap-southeast-2 register: supported_endpoint_services - name: Get all endpoints in ap-southeast-2 region - ec2_vpc_endpoint_info: + community.aws.ec2_vpc_endpoint_info: query: endpoints region: ap-southeast-2 register: existing_endpoints - name: Get all endpoints with specific filters - ec2_vpc_endpoint_info: + community.aws.ec2_vpc_endpoint_info: query: endpoints region: ap-southeast-2 filters: @@ -68,7 +68,7 @@ register: existing_endpoints - name: Get details on specific endpoint - ec2_vpc_endpoint_info: + community.aws.ec2_vpc_endpoint_info: query: endpoints region: ap-southeast-2 vpc_endpoint_ids: diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index 0c85169c7d7..6b1a69911d4 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -44,10 +44,10 @@ # Ensure that the VPC has an Internet Gateway. # The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc. -ec2_vpc_igw: - vpc_id: vpc-abcdefgh - state: present -register: igw +- community.aws.ec2_vpc_igw: + vpc_id: vpc-abcdefgh + state: present + register: igw ''' diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 29845d2ccf5..dcc07a4349b 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -36,13 +36,13 @@ # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all Internet Gateways for an account or profile - ec2_vpc_igw_info: + community.aws.ec2_vpc_igw_info: region: ap-southeast-2 profile: production register: igw_info - name: Gather information about a filtered list of Internet Gateways - ec2_vpc_igw_info: + community.aws.ec2_vpc_igw_info: region: ap-southeast-2 profile: production filters: @@ -50,7 +50,7 @@ register: igw_info - name: Gather information about a specific internet gateway by InternetGatewayId - ec2_vpc_igw_info: + community.aws.ec2_vpc_igw_info: region: ap-southeast-2 profile: production internet_gateway_ids: igw-c1231234 diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 23130310720..5c14fec8040 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -86,7 +86,7 @@ # Complete example to create and delete a network ACL # that allows SSH, HTTP and ICMP in, and all traffic out. - name: "Create and associate production DMZ network ACL with DMZ subnets" - ec2_vpc_nacl: + community.aws.ec2_vpc_nacl: vpc_id: vpc-12345678 name: prod-dmz-nacl region: ap-southeast-2 @@ -106,7 +106,7 @@ state: 'present' - name: "Remove the ingress and egress rules - defaults to deny all" - ec2_vpc_nacl: + community.aws.ec2_vpc_nacl: vpc_id: vpc-12345678 name: prod-dmz-nacl region: ap-southeast-2 @@ -120,20 +120,20 @@ state: present - name: "Remove the NACL subnet associations and tags" - ec2_vpc_nacl: + community.aws.ec2_vpc_nacl: vpc_id: 'vpc-12345678' name: prod-dmz-nacl region: ap-southeast-2 state: present - name: "Delete nacl and subnet associations" - ec2_vpc_nacl: + community.aws.ec2_vpc_nacl: vpc_id: vpc-12345678 name: prod-dmz-nacl state: absent - name: "Delete nacl by its id" - ec2_vpc_nacl: + community.aws.ec2_vpc_nacl: nacl_id: acl-33b4ee5b state: absent ''' diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index d4c0c431465..31b1099b04c 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -44,17 +44,17 @@ # Gather information about all Network ACLs: - name: Get All NACLs - register: all_nacls - ec2_vpc_nacl_info: + community.aws.ec2_vpc_nacl_info: region: us-west-2 + register: all_nacls # Retrieve default Network ACLs: - name: Get Default NACLs - register: default_nacls - ec2_vpc_nacl_info: + community.aws.ec2_vpc_nacl_info: region: us-west-2 filters: 'default': 'true' + register: default_nacls ''' RETURN = ''' diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index d8ee5167b67..306c8ac49c4 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -84,7 +84,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new nat gateway with client token. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: present subnet_id: subnet-12345678 eip_address: 52.1.1.1 @@ -93,7 +93,7 @@ register: new_nat_gateway - name: Create new nat gateway using an allocation-id. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: present subnet_id: subnet-12345678 allocation_id: eipalloc-12345678 @@ -101,7 +101,7 @@ register: new_nat_gateway - name: Create new nat gateway, using an EIP address and wait for available status. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: present subnet_id: subnet-12345678 eip_address: 52.1.1.1 @@ -110,7 +110,7 @@ register: new_nat_gateway - name: Create new nat gateway and allocate new EIP. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: present subnet_id: subnet-12345678 wait: true @@ -118,7 +118,7 @@ register: new_nat_gateway - name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: present subnet_id: subnet-12345678 wait: true @@ -127,7 +127,7 @@ register: new_nat_gateway - name: Delete nat gateway using discovered nat gateways from facts module. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: absent region: ap-southeast-2 wait: true @@ -137,7 +137,7 @@ loop: "{{ gateways_to_remove.result }}" - name: Delete nat gateway and wait for deleted status. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: absent nat_gateway_id: nat-12345678 wait: true @@ -145,7 +145,7 @@ region: ap-southeast-2 - name: Delete nat gateway and release EIP. - ec2_vpc_nat_gateway: + community.aws.ec2_vpc_nat_gateway: state: absent nat_gateway_id: nat-12345678 release_eip: true diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index 85f96cc7340..83fb9b0f182 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -35,7 +35,7 @@ EXAMPLES = ''' # Simple example of listing all nat gateways - name: List all managed nat gateways in ap-southeast-2 - ec2_vpc_nat_gateway_info: + community.aws.ec2_vpc_nat_gateway_info: region: ap-southeast-2 register: all_ngws @@ -44,7 +44,7 @@ msg: "{{ all_ngws.result }}" - name: Get details on specific nat gateways - ec2_vpc_nat_gateway_info: + community.aws.ec2_vpc_nat_gateway_info: nat_gateway_ids: - nat-1234567891234567 - nat-7654321987654321 @@ -52,14 +52,14 @@ register: specific_ngws - name: Get all nat gateways with specific filters - ec2_vpc_nat_gateway_info: + community.aws.ec2_vpc_nat_gateway_info: region: ap-southeast-2 filters: state: ['pending'] register: pending_ngws - name: Get nat gateways with specific filter - ec2_vpc_nat_gateway_info: + community.aws.ec2_vpc_nat_gateway_info: region: ap-southeast-2 filters: subnet-id: subnet-12345678 diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index c029eb5afa0..b8e263c1242 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -61,7 +61,7 @@ EXAMPLES = ''' # Complete example to create and accept a local peering connection. - name: Create local account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-87654321 @@ -73,7 +73,7 @@ register: vpc_peer - name: Accept local VPC peering request - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 peering_id: "{{ vpc_peer.peering_id }}" state: accept @@ -81,7 +81,7 @@ # Complete example to delete a local peering connection. - name: Create local account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-87654321 @@ -93,7 +93,7 @@ register: vpc_peer - name: delete a local VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 peering_id: "{{ vpc_peer.peering_id }}" state: absent @@ -101,7 +101,7 @@ # Complete example to create and accept a cross account peering connection. - name: Create cross account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-12345678 @@ -114,7 +114,7 @@ register: vpc_peer - name: Accept peering connection from remote account - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 peering_id: "{{ vpc_peer.peering_id }}" profile: bot03_profile_for_cross_account @@ -123,7 +123,7 @@ # Complete example to create and accept an intra-region peering connection. - name: Create intra-region VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: us-east-1 vpc_id: vpc-12345678 peer_vpc_id: vpc-87654321 @@ -136,7 +136,7 @@ register: vpc_peer - name: Accept peering connection from peer region - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: us-west-2 peering_id: "{{ vpc_peer.peering_id }}" state: accept @@ -144,7 +144,7 @@ # Complete example to create and reject a local peering connection. - name: Create local account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-87654321 @@ -156,14 +156,14 @@ register: vpc_peer - name: Reject a local VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 peering_id: "{{ vpc_peer.peering_id }}" state: reject # Complete example to create and accept a cross account peering connection. - name: Create cross account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-12345678 @@ -176,7 +176,7 @@ register: vpc_peer - name: Accept a cross account VPC peering connection request - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 peering_id: "{{ vpc_peer.peering_id }}" profile: bot03_profile_for_cross_account @@ -188,7 +188,7 @@ # Complete example to create and reject a cross account peering connection. - name: Create cross account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-12345678 @@ -201,7 +201,7 @@ register: vpc_peer - name: Reject a cross account VPC peering Connection - ec2_vpc_peer: + community.aws.ec2_vpc_peer: region: ap-southeast-2 peering_id: "{{ vpc_peer.peering_id }}" profile: bot03_profile_for_cross_account diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 75af1b65613..8472fc4f58c 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -35,7 +35,7 @@ EXAMPLES = ''' # Simple example of listing all VPC Peers - name: List all vpc peers - ec2_vpc_peering_info: + community.aws.ec2_vpc_peering_info: region: ap-southeast-2 register: all_vpc_peers @@ -44,7 +44,7 @@ msg: "{{ all_vpc_peers.result }}" - name: Get details on specific VPC peer - ec2_vpc_peering_info: + community.aws.ec2_vpc_peering_info: peer_connection_ids: - pcx-12345678 - pcx-87654321 @@ -52,7 +52,7 @@ register: all_vpc_peers - name: Get all vpc peers with specific filters - ec2_vpc_peering_info: + community.aws.ec2_vpc_peering_info: region: ap-southeast-2 filters: status-code: ['pending-acceptance'] diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 442efca01d7..0b8230ac7e1 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -85,7 +85,7 @@ # Basic creation example: - name: Set up public subnet route table - ec2_vpc_route_table: + community.aws.ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 tags: @@ -100,7 +100,7 @@ register: public_route_table - name: Set up NAT-protected route table - ec2_vpc_route_table: + community.aws.ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 tags: @@ -115,7 +115,7 @@ register: nat_route_table - name: delete route table - ec2_vpc_route_table: + community.aws.ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 route_table_id: "{{ route_table.id }}" diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index acb203f1eab..4823f2db49e 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -29,21 +29,21 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather information about all VPC route tables -- ec2_vpc_route_table_info: +- name: Gather information about all VPC route tables + community.aws.ec2_vpc_route_table_info: -# Gather information about a particular VPC route table using route table ID -- ec2_vpc_route_table_info: +- name: Gather information about a particular VPC route table using route table ID + community.aws.ec2_vpc_route_table_info: filters: route-table-id: rtb-00112233 -# Gather information about any VPC route table with a tag key Name and value Example -- ec2_vpc_route_table_info: +- name: Gather information about any VPC route table with a tag key Name and value Example + community.aws.ec2_vpc_route_table_info: filters: "tag:Name": Example -# Gather information about any VPC route table within VPC with ID vpc-abcdef00 -- ec2_vpc_route_table_info: +- name: Gather information about any VPC route table within VPC with ID vpc-abcdef00 + community.aws.ec2_vpc_route_table_info: filters: vpc-id: vpc-abcdef00 diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 004a64c394c..d1ea852d0e3 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -64,7 +64,7 @@ EXAMPLES = ''' - name: Create a new vgw attached to a specific VPC - ec2_vpc_vgw: + community.aws.ec2_vpc_vgw: state: present region: ap-southeast-2 profile: personal @@ -74,7 +74,7 @@ register: created_vgw - name: Create a new unattached vgw - ec2_vpc_vgw: + community.aws.ec2_vpc_vgw: state: present region: ap-southeast-2 profile: personal @@ -86,7 +86,7 @@ register: created_vgw - name: Remove a new vgw using the name - ec2_vpc_vgw: + community.aws.ec2_vpc_vgw: state: absent region: ap-southeast-2 profile: personal @@ -95,7 +95,7 @@ register: deleted_vgw - name: Remove a new vgw using the vpn_gateway_id - ec2_vpc_vgw: + community.aws.ec2_vpc_vgw: state: absent region: ap-southeast-2 profile: personal diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index a8c8da426c9..d526b54a372 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -36,13 +36,13 @@ # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all virtual gateways for an account or profile - ec2_vpc_vgw_info: + community.aws.ec2_vpc_vgw_info: region: ap-southeast-2 profile: production register: vgw_info - name: Gather information about a filtered list of Virtual Gateways - ec2_vpc_vgw_info: + community.aws.ec2_vpc_vgw_info: region: ap-southeast-2 profile: production filters: @@ -50,7 +50,7 @@ register: vgw_info - name: Gather information about a specific virtual gateway by VpnGatewayIds - ec2_vpc_vgw_info: + community.aws.ec2_vpc_vgw_info: region: ap-southeast-2 profile: production vpn_gateway_ids: vgw-c432f6a7 diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index c344cc8e73e..4d0f06fb5a3 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -145,13 +145,13 @@ # It is assumed that their matching environment variables are set. - name: create a VPN connection - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: present vpn_gateway_id: vgw-XXXXXXXX customer_gateway_id: cgw-XXXXXXXX - name: modify VPN connection tags - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: present vpn_connection_id: vpn-XXXXXXXX tags: @@ -159,12 +159,12 @@ Other: ansible-tag-2 - name: delete a connection - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: vpn_connection_id: vpn-XXXXXXXX state: absent - name: modify VPN tags (identifying VPN by filters) - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: present filters: cidr: 194.168.1.0/24 @@ -177,7 +177,7 @@ static_only: true - name: set up VPN with tunnel options utilizing 'TunnelInsideCidr' only - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: present filters: vpn: vpn-XXXXXXXX @@ -189,7 +189,7 @@ TunnelInsideCidr: '169.254.100.5/30' - name: add routes and remove any preexisting ones - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: present filters: vpn: vpn-XXXXXXXX @@ -199,14 +199,14 @@ purge_routes: true - name: remove all routes - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: present vpn_connection_id: vpn-XXXXXXXX routes: [] purge_routes: true - name: delete a VPN identified by filters - ec2_vpc_vpn: + community.aws.ec2_vpc_vpn: state: absent filters: tags: diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 427b4b8cec8..f34ddb8a937 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -37,16 +37,16 @@ EXAMPLES = ''' # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all vpn connections - ec2_vpc_vpn_info: + community.aws.ec2_vpc_vpn_info: - name: Gather information about a filtered list of vpn connections, based on tags - ec2_vpc_vpn_info: + community.aws.ec2_vpc_vpn_info: filters: "tag:Name": test-connection register: vpn_conn_info - name: Gather information about vpn connections by specifying connection IDs. - ec2_vpc_vpn_info: + community.aws.ec2_vpc_vpn_info: filters: vpn-gateway-id: vgw-cbe66beb register: vpn_conn_info diff --git a/ec2_win_password.py b/ec2_win_password.py index 782ff16829c..9ae8cd52dc8 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -64,7 +64,7 @@ EXAMPLES = ''' # Example of getting a password - name: get the Administrator password - ec2_win_password: + community.aws.ec2_win_password: profile: my-boto-profile instance_id: i-XXXXXX region: us-east-1 @@ -72,7 +72,7 @@ # Example of getting a password using a variable - name: get the Administrator password - ec2_win_password: + community.aws.ec2_win_password: profile: my-boto-profile instance_id: i-XXXXXX region: us-east-1 @@ -80,7 +80,7 @@ # Example of getting a password with a password protected key - name: get the Administrator password - ec2_win_password: + community.aws.ec2_win_password: profile: my-boto-profile instance_id: i-XXXXXX region: us-east-1 @@ -89,7 +89,7 @@ # Example of waiting for a password - name: get the Administrator password - ec2_win_password: + community.aws.ec2_win_password: profile: my-boto-profile instance_id: i-XXXXXX region: us-east-1 diff --git a/ecs_attribute.py b/ecs_attribute.py index 67b1664305e..37faa28ac03 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -62,8 +62,8 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Set attributes -- ecs_attribute: +- name: Set attributes + community.aws.ecs_attribute: state: present cluster: test-cluster ec2_instance_id: "{{ ec2_id }}" @@ -72,8 +72,8 @@ - migrated delegate_to: localhost -# Delete attributes -- ecs_attribute: +- name: Delete attributes + community.aws.ecs_attribute: state: absent cluster: test-cluster ec2_instance_id: "{{ ec2_id }}" diff --git a/ecs_cluster.py b/ecs_cluster.py index bf41601b011..3610dcc6a6e 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -50,18 +50,18 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Cluster creation -- ecs_cluster: +- name: Cluster creation + community.aws.ecs_cluster: name: default state: present -# Cluster deletion -- ecs_cluster: +- name: Cluster deletion + community.aws.ecs_cluster: name: default state: absent - name: Wait for register - ecs_cluster: + community.aws.ecs_cluster: name: "{{ new_cluster }}" state: has_instances delay: 10 diff --git a/ecs_ecr.py b/ecs_ecr.py index de9ab574fdb..23e1018c1f7 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -85,16 +85,21 @@ # If the repository does not exist, it is created. If it does exist, would not # affect any policies already on it. - name: ecr-repo - ecs_ecr: name=super/cool + community.aws.ecs_ecr: + name: super/cool - name: destroy-ecr-repo - ecs_ecr: name=old/busted state=absent + community.aws.ecs_ecr: + name: old/busted + state: absent - name: Cross account ecr-repo - ecs_ecr: registry_id=999999999999 name=cross/account + community.aws.ecs_ecr: + registry_id: 999999999999 + name: cross/account - name: set-policy as object - ecs_ecr: + community.aws.ecs_ecr: name: needs-policy-object policy: Version: '2008-10-17' @@ -109,22 +114,22 @@ - ecr:BatchCheckLayerAvailability - name: set-policy as string - ecs_ecr: + community.aws.ecs_ecr: name: needs-policy-string policy: "{{ lookup('template', 'policy.json.j2') }}" - name: delete-policy - ecs_ecr: + community.aws.ecs_ecr: name: needs-no-policy purge_policy: yes - name: create immutable ecr-repo - ecs_ecr: + community.aws.ecs_ecr: name: super/cool image_tag_mutability: immutable - name: set-lifecycle-policy - ecs_ecr: + community.aws.ecs_ecr: name: needs-lifecycle-policy lifecycle_policy: rules: @@ -139,7 +144,7 @@ type: expire - name: purge-lifecycle-policy - ecs_ecr: + community.aws.ecs_ecr: name: needs-no-lifecycle-policy purge_lifecycle_policy: true ''' diff --git a/ecs_service.py b/ecs_service.py index 7761d3d3a26..ddb2b8c1bba 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -195,7 +195,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example -- ecs_service: +- community.aws.ecs_service: state: present name: console-test-service cluster: new_cluster @@ -203,7 +203,7 @@ desired_count: 0 - name: create ECS service on VPC network - ecs_service: + community.aws.ecs_service: state: present name: console-test-service cluster: new_cluster @@ -217,13 +217,13 @@ - my_security_group # Simple example to delete -- ecs_service: +- community.aws.ecs_service: name: default state: absent cluster: new_cluster # With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4) -- ecs_service: +- community.aws.ecs_service: state: present name: test-service cluster: test-cluster diff --git a/ecs_service_info.py b/ecs_service_info.py index e7628d36881..9843f638d55 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -13,7 +13,7 @@ description: - Lists or describes services in ECS. - This module was called C(ecs_service_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(ecs_service_info) module no longer returns C(ansible_facts)! + Note that the M(community.aws.ecs_service_info) module no longer returns C(ansible_facts)! author: - "Mark Chance (@Java1Guy)" - "Darek Kaczynski (@kaczynskid)" @@ -52,14 +52,14 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic listing example -- ecs_service_info: +- community.aws.ecs_service_info: cluster: test-cluster service: console-test-service details: true register: output # Basic listing example -- ecs_service_info: +- community.aws.ecs_service_info: cluster: test-cluster register: output ''' diff --git a/ecs_tag.py b/ecs_tag.py index fd49461fb07..a3c16d74681 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -59,7 +59,7 @@ EXAMPLES = r''' - name: Ensure tags are present on a resource - ecs_tag: + community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster state: present @@ -68,7 +68,7 @@ env: prod - name: Remove the Env tag - ecs_tag: + community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster tags: @@ -76,7 +76,7 @@ state: absent - name: Remove the Env tag if it's currently 'development' - ecs_tag: + community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster tags: @@ -84,7 +84,7 @@ state: absent - name: Remove all tags except for Name from a cluster - ecs_tag: + community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster tags: diff --git a/ecs_task.py b/ecs_task.py index 1f831a413ea..c11c6b2a792 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -90,7 +90,7 @@ EXAMPLES = ''' # Simple example of run task - name: Run task - ecs_task: + community.aws.ecs_task: operation: run cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef @@ -101,7 +101,7 @@ # Simple example of start task - name: Start a task - ecs_task: + community.aws.ecs_task: operation: start cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef @@ -123,7 +123,7 @@ register: task_output - name: RUN a task on Fargate - ecs_task: + community.aws.ecs_task: operation: run cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef @@ -139,7 +139,7 @@ register: task_output - name: Stop a task - ecs_task: + community.aws.ecs_task: operation: stop cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index def891bb527..e7d3864a785 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -105,7 +105,7 @@ EXAMPLES = ''' - name: Create task definition - ecs_taskdefinition: + community.aws.ecs_taskdefinition: containers: - name: simple-app cpu: 10 @@ -146,7 +146,7 @@ register: task_output - name: Create task definition - ecs_taskdefinition: + community.aws.ecs_taskdefinition: family: nginx containers: - name: nginx @@ -160,7 +160,7 @@ state: present - name: Create task definition - ecs_taskdefinition: + community.aws.ecs_taskdefinition: family: nginx containers: - name: nginx @@ -177,7 +177,7 @@ # Create Task Definition with Environment Variables and Secrets - name: Create task definition - ecs_taskdefinition: + community.aws.ecs_taskdefinition: family: nginx containers: - name: nginx diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index a6b1c627f4c..e4e93e0a90f 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -36,7 +36,7 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- ecs_taskdefinition_info: +- community.aws.ecs_taskdefinition_info: task_definition: test-td ''' diff --git a/efs.py b/efs.py index 5d6ce13e707..95e5df78499 100644 --- a/efs.py +++ b/efs.py @@ -106,8 +106,8 @@ ''' EXAMPLES = ''' -# EFS provisioning -- efs: +- name: EFS provisioning + community.aws.efs: state: present name: myTestEFS tags: @@ -117,8 +117,8 @@ - subnet_id: subnet-748c5d03 security_groups: [ "sg-1a2b3c4d" ] -# Modifying EFS data -- efs: +- name: Modifying EFS data + community.aws.efs: state: present name: myTestEFS tags: @@ -127,8 +127,8 @@ - subnet_id: subnet-7654fdca security_groups: [ "sg-4c5d6f7a" ] -# Deleting EFS -- efs: +- name: Deleting EFS + community.aws.efs: state: absent name: myTestEFS ''' diff --git a/efs_info.py b/efs_info.py index 992b650627e..bc2ddeda5a5 100644 --- a/efs_info.py +++ b/efs_info.py @@ -13,7 +13,7 @@ description: - This module can be used to search Amazon EFS file systems. - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(efs_info) module no longer returns C(ansible_facts)! + Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! requirements: [ boto3 ] author: - "Ryan Sydnor (@ryansydnor)" @@ -45,16 +45,16 @@ EXAMPLES = ''' - name: Find all existing efs - efs_info: + community.aws.efs_info: register: result - name: Find efs using id - efs_info: + community.aws.efs_info: id: fs-1234abcd register: result - name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a' - efs_info: + community.aws.efs_info: tags: Name: myTestNameTag targets: diff --git a/elasticache.py b/elasticache.py index e1c255ebbde..f649ea8010c 100644 --- a/elasticache.py +++ b/elasticache.py @@ -100,8 +100,8 @@ # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. -# Basic example -- elasticache: +- name: Basic example + community.aws.elasticache: name: "test-please-delete" state: present engine: memcached @@ -114,13 +114,13 @@ zone: us-east-1d -# Ensure cache cluster is gone -- elasticache: +- name: Ensure cache cluster is gone + community.aws.elasticache: name: "test-please-delete" state: absent -# Reboot cache cluster -- elasticache: +- name: Reboot cache cluster + community.aws.elasticache: name: "test-please-delete" state: rebooted diff --git a/elasticache_info.py b/elasticache_info.py index f7000116ca3..ffefc9b53a0 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -28,10 +28,10 @@ EXAMPLES = ''' - name: obtain all ElastiCache information - elasticache_info: + community.aws.elasticache_info: - name: obtain all information for a single ElastiCache cluster - elasticache_info: + community.aws.elasticache_info: name: test_elasticache ''' diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index c701d6a763d..c866fa9c83c 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -55,24 +55,24 @@ connection: local tasks: - name: 'Create a test parameter group' - elasticache_parameter_group: + community.aws.elasticache_parameter_group: name: 'test-param-group' group_family: 'redis3.2' description: 'This is a cache parameter group' state: 'present' - name: 'Modify a test parameter group' - elasticache_parameter_group: + community.aws.elasticache_parameter_group: name: 'test-param-group' values: activerehashing: yes client-output-buffer-limit-normal-hard-limit: 4 state: 'present' - name: 'Reset all modifiable parameters for the test parameter group' - elasticache_parameter_group: + community.aws.elasticache_parameter_group: name: 'test-param-group' state: reset - name: 'Delete a test parameter group' - elasticache_parameter_group: + community.aws.elasticache_parameter_group: name: 'test-param-group' state: 'absent' """ diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index f932544b505..4784dd53a29 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -52,16 +52,13 @@ EXAMPLES = """ # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. ---- -- hosts: localhost - connection: local - tasks: - - name: 'Create a snapshot' - elasticache_snapshot: - name: 'test-snapshot' - state: 'present' - cluster_id: '{{ cluster }}' - replication_id: '{{ replication }}' + +- name: 'Create a snapshot' + community.aws.elasticache_snapshot: + name: 'test-snapshot' + state: 'present' + cluster_id: '{{ cluster }}' + replication_id: '{{ replication }}' """ RETURN = """ diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 72b41f08ea6..3048f0a7baa 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -41,8 +41,8 @@ ''' EXAMPLES = ''' -# Add or change a subnet group -- elasticache_subnet_group: +- name: Add or change a subnet group + community.aws.elasticache_subnet_group: state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group @@ -50,8 +50,8 @@ - subnet-aaaaaaaa - subnet-bbbbbbbb -# Remove a subnet group -- elasticache_subnet_group: +- name: Remove a subnet group + community.aws.elasticache_subnet_group: state: absent name: norwegian-blue ''' diff --git a/elb_application_lb.py b/elb_application_lb.py index 28915d551fa..c314a60aa40 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -185,7 +185,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ELB and attach a listener -- elb_application_lb: +- community.aws.elb_application_lb: name: myelb security_groups: - sg-12345678 @@ -206,7 +206,7 @@ state: present # Create an ELB and attach a listener with logging enabled -- elb_application_lb: +- community.aws.elb_application_lb: access_logs_enabled: yes access_logs_s3_bucket: mybucket access_logs_s3_prefix: "logs" @@ -230,7 +230,7 @@ state: present # Create an ALB with listeners and rules -- elb_application_lb: +- community.aws.elb_application_lb: name: test-alb subnets: - subnet-12345678 @@ -293,7 +293,7 @@ state: present # Remove an ELB -- elb_application_lb: +- community.aws.elb_application_lb: name: myelb state: absent diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 2f9c1c39cc9..e1711dbef45 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -36,22 +36,22 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather information about all target groups -- elb_application_lb_info: +- name: Gather information about all target groups + community.aws.elb_application_lb_info: -# Gather information about the target group attached to a particular ELB -- elb_application_lb_info: +- name: Gather information about the target group attached to a particular ELB + community.aws.elb_application_lb_info: load_balancer_arns: - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" -# Gather information about a target groups named 'tg1' and 'tg2' -- elb_application_lb_info: +- name: Gather information about a target groups named 'tg1' and 'tg2' + community.aws.elb_application_lb_info: names: - elb1 - elb2 -# Gather information about specific ALB -- elb_application_lb_info: +- name: Gather information about specific ALB + community.aws.elb_application_lb_info: names: "alb-name" region: "aws-region" register: alb_info diff --git a/elb_classic_lb.py b/elb_classic_lb.py index bd309465abe..0ae1bc7dda5 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -138,7 +138,7 @@ # Basic provisioning example (non-VPC) -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: present zones: @@ -159,7 +159,7 @@ # Internal ELB example -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-vpc" scheme: internal state: present @@ -176,7 +176,7 @@ delegate_to: localhost # Configure a health check and the access logs -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: present zones: @@ -200,20 +200,20 @@ delegate_to: localhost # Ensure ELB is gone -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: absent delegate_to: localhost # Ensure ELB is gone and wait for check (for default timeout) -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: absent wait: yes delegate_to: localhost # Ensure ELB is gone and wait for check with timeout value -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: absent wait: yes @@ -223,7 +223,7 @@ # Normally, this module will purge any listeners that exist on the ELB # but aren't specified in the listeners parameter. If purge_listeners is # false it leaves them alone -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: present zones: @@ -239,7 +239,7 @@ # Normally, this module will leave availability zones that are enabled # on the ELB alone. If purge_zones is true, then any extraneous zones # will be removed -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "test-please-delete" state: present zones: @@ -253,7 +253,7 @@ delegate_to: localhost # Creates a ELB and assigns a list of subnets to it. -- elb_classic_lb: +- community.aws.elb_classic_lb: state: present name: 'New ELB' security_group_ids: 'sg-123456, sg-67890' @@ -268,7 +268,7 @@ # Create an ELB with connection draining, increased idle timeout and cross availability # zone load balancing -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "New ELB" state: present connection_draining_timeout: 60 @@ -285,7 +285,7 @@ delegate_to: localhost # Create an ELB with load balancer stickiness enabled -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -303,7 +303,7 @@ delegate_to: localhost # Create an ELB with application stickiness enabled -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -321,7 +321,7 @@ delegate_to: localhost # Create an ELB and add tags -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "New ELB" state: present region: us-east-1 @@ -339,7 +339,7 @@ delegate_to: localhost # Delete all tags from an ELB -- elb_classic_lb: +- community.aws.elb_classic_lb: name: "New ELB" state: present region: us-east-1 diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 915bf19aece..9341cb59b4b 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -43,10 +43,10 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Output format tries to match ec2_elb_lb module input parameters +# Output format tries to match amazon.aws.ec2_elb_lb module input parameters # Gather information about all ELBs -- elb_classic_lb_info: +- community.aws.elb_classic_lb_info: register: elb_info - debug: @@ -54,7 +54,7 @@ loop: "{{ elb_info.elbs }}" # Gather information about a particular ELB -- elb_classic_lb_info: +- community.aws.elb_classic_lb_info: names: frontend-prod-elb register: elb_info @@ -62,7 +62,7 @@ msg: "{{ elb_info.elbs.0.dns_name }}" # Gather information about a set of ELBs -- elb_classic_lb_info: +- community.aws.elb_classic_lb_info: names: - frontend-prod-elb - backend-prod-elb diff --git a/elb_instance.py b/elb_instance.py index 0c41ef3a6e4..20992459f57 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -64,10 +64,8 @@ EXAMPLES = """ # basic pre_task and post_task example pre_tasks: - - name: Gathering ec2 facts - action: ec2_facts - name: Instance De-register - elb_instance: + community.aws.elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" state: absent delegate_to: localhost @@ -75,7 +73,7 @@ - myrole post_tasks: - name: Instance Register - elb_instance: + community.aws.elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: present diff --git a/elb_network_lb.py b/elb_network_lb.py index 616c8e061be..a3405fd5cae 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -135,8 +135,8 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Create an ELB and attach a listener -- elb_network_lb: +- name: Create an ELB and attach a listener + community.aws.elb_network_lb: name: myelb subnets: - subnet-012345678 @@ -149,8 +149,8 @@ TargetGroupName: mytargetgroup # Required. The name of the target group state: present -# Create an ELB with an attached Elastic IP address -- elb_network_lb: +- name: Create an ELB with an attached Elastic IP address + community.aws.elb_network_lb: name: myelb subnet_mappings: - SubnetId: subnet-012345678 @@ -163,8 +163,8 @@ TargetGroupName: mytargetgroup # Required. The name of the target group state: present -# Remove an ELB -- elb_network_lb: +- name: Remove an ELB + community.aws.elb_network_lb: name: myelb state: absent diff --git a/elb_target.py b/elb_target.py index 553e5f7f860..53d715578b8 100644 --- a/elb_target.py +++ b/elb_target.py @@ -75,27 +75,27 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Register an IP address target to a target group -- elb_target: +- name: Register an IP address target to a target group + community.aws.elb_target: target_group_name: myiptargetgroup target_id: i-1234567 state: present -# Register an instance target to a target group -- elb_target: +- name: Register an instance target to a target group + community.aws.elb_target: target_group_name: mytargetgroup target_id: i-1234567 state: present -# Deregister a target from a target group -- elb_target: +- name: Deregister a target from a target group + community.aws.elb_target: target_group_name: mytargetgroup target_id: i-1234567 state: absent # Modify a target to use a different port -# Register a target to a target group -- elb_target: +- name: Register a target to a target group + community.aws.elb_target: target_group_name: mytargetgroup target_id: i-1234567 target_port: 8080 diff --git a/elb_target_group.py b/elb_target_group.py index 3818d5da7df..43723bfd6e9 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -168,16 +168,16 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Create a target group with a default health check -- elb_target_group: +- name: Create a target group with a default health check + community.aws.elb_target_group: name: mytargetgroup protocol: http port: 80 vpc_id: vpc-01234567 state: present -# Modify the target group with a custom health check -- elb_target_group: +- name: Modify the target group with a custom health check + community.aws.elb_target_group: name: mytargetgroup protocol: http port: 80 @@ -192,13 +192,13 @@ unhealthy_threshold_count: 3 state: present -# Delete a target group -- elb_target_group: +- name: Delete a target group + community.aws.elb_target_group: name: mytargetgroup state: absent -# Create a target group with instance targets -- elb_target_group: +- name: Create a target group with instance targets + community.aws.elb_target_group: name: mytargetgroup protocol: http port: 81 @@ -215,8 +215,8 @@ wait_timeout: 200 wait: True -# Create a target group with IP address targets -- elb_target_group: +- name: Create a target group with IP address targets + community.aws.elb_target_group: name: mytargetgroup protocol: http port: 81 @@ -239,10 +239,10 @@ # itself is allow to invoke the lambda function. # therefore you need first to create an empty target group # to receive its arn, second, allow the target group -# to invoke the lamba function and third, add the target +# to invoke the lambda function and third, add the target # to the target group - name: first, create empty target group - elb_target_group: + community.aws.elb_target_group: name: my-lambda-targetgroup target_type: lambda state: present @@ -250,7 +250,7 @@ register: out - name: second, allow invoke of the lambda - lambda_policy: + community.aws.lambda_policy: state: "{{ state | default('present') }}" function_name: my-lambda-function statement_id: someID @@ -259,7 +259,7 @@ source_arn: "{{ out.target_group_arn }}" - name: third, add target - elb_target_group: + community.aws.elb_target_group: name: my-lambda-targetgroup target_type: lambda state: present diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 2505026b0d7..5c4fa2f1f64 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -47,15 +47,15 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather information about all target groups -- elb_target_group_info: +- name: Gather information about all target groups + community.aws.elb_target_group_info: -# Gather information about the target group attached to a particular ELB -- elb_target_group_info: +- name: Gather information about the target group attached to a particular ELB + community.aws.elb_target_group_info: load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" -# Gather information about a target groups named 'tg1' and 'tg2' -- elb_target_group_info: +- name: Gather information about a target groups named 'tg1' and 'tg2' + community.aws.elb_target_group_info: names: - tg1 - tg2 diff --git a/elb_target_info.py b/elb_target_info.py index 40a9ac26420..af2dc55cd5b 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -36,14 +36,14 @@ ''' EXAMPLES = """ -# practical use case - dynamically deregistering and reregistering nodes +# practical use case - dynamically de-registering and re-registering nodes - name: Get EC2 Metadata - action: ec2_metadata_facts + amazon.aws.ec2_metadata_facts: - name: Get initial list of target groups delegate_to: localhost - elb_target_info: + community.aws.elb_target_info: instance_id: "{{ ansible_ec2_instance_id }}" region: "{{ ansible_ec2_placement_region }}" register: target_info @@ -54,7 +54,7 @@ - name: Deregister instance from all target groups delegate_to: localhost - elb_target: + community.aws.elb_target: target_group_arn: "{{ item.0.target_group_arn }}" target_port: "{{ item.1.target_port }}" target_az: "{{ item.1.target_az }}" @@ -72,7 +72,7 @@ - name: wait for all targets to deregister simultaneously delegate_to: localhost - elb_target_info: + community.aws.elb_target_info: get_unused_target_groups: false instance_id: "{{ ansible_ec2_instance_id }}" region: "{{ ansible_ec2_placement_region }}" @@ -82,7 +82,7 @@ delay: 10 - name: reregister in elbv2s - elb_target: + community.aws.elb_target: region: "{{ ansible_ec2_placement_region }}" target_group_arn: "{{ item.0.target_group_arn }}" target_port: "{{ item.1.target_port }}" @@ -97,7 +97,7 @@ # wait until all groups associated with this instance are 'healthy' or # 'unused' - name: wait for registration - elb_target_info: + community.aws.elb_target_info: get_unused_target_groups: false instance_id: "{{ ansible_ec2_instance_id }}" region: "{{ ansible_ec2_placement_region }}" diff --git a/execute_lambda.py b/execute_lambda.py index 45d0eef8e2b..846cf47d22a 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -74,7 +74,7 @@ ''' EXAMPLES = ''' -- execute_lambda: +- community.aws.execute_lambda: name: test-function # the payload is automatically serialized and sent to the function payload: @@ -84,11 +84,11 @@ # Test that you have sufficient permissions to execute a Lambda function in # another account -- execute_lambda: +- community.aws.execute_lambda: function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function dry_run: true -- execute_lambda: +- community.aws.execute_lambda: name: test-function payload: foo: bar @@ -99,12 +99,12 @@ # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda # Pass the Lambda event payload as a json file. -- execute_lambda: +- community.aws.execute_lambda: name: test-function payload: "{{ lookup('file','lambda_event.json') }}" register: response -- execute_lambda: +- community.aws.execute_lambda: name: test-function version_qualifier: PRODUCTION ''' diff --git a/iam.py b/iam.py index 9b5bae141c2..57d7ca653e3 100644 --- a/iam.py +++ b/iam.py @@ -99,9 +99,8 @@ EXAMPLES = ''' # Basic user creation example -tasks: - name: Create two new IAM users with API keys - iam: + community.aws.iam: iam_type: user name: "{{ item }}" state: present @@ -113,9 +112,8 @@ # Advanced example, create two new groups and add the pre-existing user # jdavila to both groups. -task: - name: Create Two Groups, Mario and Luigi - iam: + community.aws.iam: iam_type: group name: "{{ item }}" state: present @@ -124,8 +122,8 @@ - Luigi register: new_groups -- name: - iam: +- name: Update user + community.aws.iam: iam_type: user name: jdavila state: update @@ -134,7 +132,7 @@ # Example of role with custom trust policy for Lambda service - name: Create IAM role with custom trust relationship - iam: + community.aws.iam: iam_type: role name: AAALambdaTestRole state: present diff --git a/iam_cert.py b/iam_cert.py index 24e317b71c6..1ea54c859d3 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -83,24 +83,24 @@ ''' EXAMPLES = ''' -# Basic server certificate upload from local file -- iam_cert: +- name: Basic server certificate upload from local file + community.aws.iam_cert: name: very_ssl state: present cert: "{{ lookup('file', 'path/to/cert') }}" key: "{{ lookup('file', 'path/to/key') }}" cert_chain: "{{ lookup('file', 'path/to/certchain') }}" -# Basic server certificate upload -- iam_cert: +- name: Basic server certificate upload + community.aws.iam_cert: name: very_ssl state: present cert: path/to/cert key: path/to/key cert_chain: path/to/certchain -# Server certificate upload using key string -- iam_cert: +- name: Server certificate upload using key string + community.aws.iam_cert: name: very_ssl state: present path: "/a/cert/path/" @@ -108,8 +108,8 @@ key: vault_body_of_privcertkey cert_chain: body_of_myverytrustedchain -# Basic rename of existing certificate -- iam_cert: +- name: Basic rename of existing certificate + community.aws.iam_cert: name: very_ssl new_name: new_very_ssl state: present diff --git a/iam_group.py b/iam_group.py index 672de888fba..121801275eb 100644 --- a/iam_group.py +++ b/iam_group.py @@ -36,7 +36,7 @@ managed_policies: description: - A list of managed policy ARNs or friendly names to attach to the role. - - To embed an inline policy, use M(iam_policy). + - To embed an inline policy, use M(community.aws.iam_policy). required: false type: list elements: str @@ -76,20 +76,20 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Create a group -- iam_group: +- name: Create a group + community.aws.iam_group: name: testgroup1 state: present -# Create a group and attach a managed policy using its ARN -- iam_group: +- name: Create a group and attach a managed policy using its ARN + community.aws.iam_group: name: testgroup1 managed_policies: - arn:aws:iam::aws:policy/AmazonSNSFullAccess state: present -# Create a group with users as members and attach a managed policy using its ARN -- iam_group: +- name: Create a group with users as members and attach a managed policy using its ARN + community.aws.iam_group: name: testgroup1 managed_policies: - arn:aws:iam::aws:policy/AmazonSNSFullAccess @@ -98,23 +98,22 @@ - test_user2 state: present -# Remove all managed policies from an existing group with an empty list -- iam_group: +- name: Remove all managed policies from an existing group with an empty list + community.aws.iam_group: name: testgroup1 state: present purge_policies: true -# Remove all group members from an existing group -- iam_group: +- name: Remove all group members from an existing group + community.aws.iam_group: name: testgroup1 managed_policies: - arn:aws:iam::aws:policy/AmazonSNSFullAccess purge_users: true state: present - -# Delete the group -- iam_group: +- name: Delete the group + community.aws.iam_group: name: testgroup1 state: absent diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 06e31a906d5..0abe10faf5d 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -61,7 +61,7 @@ EXAMPLES = ''' # Create Policy ex nihilo - name: Create IAM Managed Policy - iam_managed_policy: + community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy_description: "A Helpful managed policy" policy: "{{ lookup('template', 'managed_policy.json.j2') }}" @@ -69,14 +69,14 @@ # Update a policy with a new default version - name: Create IAM Managed Policy - iam_managed_policy: + community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: "{{ lookup('file', 'managed_policy_update.json') }}" state: present # Update a policy with a new non default version - name: Create IAM Managed Policy - iam_managed_policy: + community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: "{{ lookup('file', 'managed_policy_update.json') }}" make_default: false @@ -84,7 +84,7 @@ # Update a policy and make it the only version and the default version - name: Create IAM Managed Policy - iam_managed_policy: + community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: "{ 'Version': '2012-10-17', 'Statement':[{'Effect': 'Allow','Action': '*','Resource': '*'}]}" only_version: true @@ -92,7 +92,7 @@ # Remove a policy - name: Create IAM Managed Policy - iam_managed_policy: + community.aws.iam_managed_policy: policy_name: "ManagedPolicy" state: absent ''' diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index bb59d8d8177..07e98d6851c 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -42,15 +42,17 @@ user_name: pwnall """ -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# List MFA devices (more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html) -- iam_mfa_device_info: +# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html +- name: List MFA devices + community.aws.iam_mfa_device_info: register: mfa_devices -# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) -- sts_assume_role: +# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html +- name: Assume an existing role + community.aws.sts_assume_role: mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" role_arn: "arn:aws:iam::123456789012:role/someRole" role_session_name: "someRoleSession" diff --git a/iam_password_policy.py b/iam_password_policy.py index 53c1d5bc479..a26821e10a4 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -82,7 +82,7 @@ EXAMPLES = ''' - name: Password policy for AWS account - iam_password_policy: + community.aws.iam_password_policy: state: present min_pw_length: 8 require_symbols: false diff --git a/iam_policy.py b/iam_policy.py index 5eb4694c3c7..7ff98790146 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -12,8 +12,8 @@ short_description: Manage inline IAM policies for users, groups, and roles description: - Allows uploading or removing inline IAM policies for IAM users, groups or roles. - - To administer managed policies please see M(iam_user), M(iam_role), - M(iam_group) and M(iam_managed_policy) + - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), + M(community.aws.iam_group) and M(community.aws.iam_managed_policy) options: iam_type: description: @@ -70,7 +70,7 @@ EXAMPLES = ''' # Create a policy with the name of 'Admin' to the group 'administrators' - name: Assign a policy called Admin to the administrators group - iam_policy: + community.aws.iam_policy: iam_type: group iam_name: administrators policy_name: Admin @@ -80,7 +80,7 @@ # Advanced example, create two new groups and add a READ-ONLY policy to both # groups. - name: Create Two Groups, Mario and Luigi - iam: + community.aws.iam: iam_type: group name: "{{ item }}" state: present @@ -90,7 +90,7 @@ register: new_groups - name: Apply READ-ONLY policy to new groups that have been recently created - iam_policy: + community.aws.iam_policy: iam_type: group iam_name: "{{ item.created_group.group_name }}" policy_name: "READ-ONLY" @@ -100,7 +100,7 @@ # Create a new S3 policy with prefix per user - name: Create S3 policy from template - iam_policy: + community.aws.iam_policy: iam_type: user iam_name: "{{ item.user }}" policy_name: "s3_limited_access_{{ item.prefix }}" diff --git a/iam_policy_info.py b/iam_policy_info.py index 73eded00824..8df1c9fc216 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -40,13 +40,13 @@ ''' EXAMPLES = ''' -# Describe all inline IAM policies on an IAM User -- iam_policy_info: +- name: Describe all inline IAM policies on an IAM User + community.aws.iam_policy_info: iam_type: user iam_name: example_user -# Describe a specific inline policy on an IAM Role -- iam_policy_info: +- name: Describe a specific inline policy on an IAM Role + community.aws.iam_policy_info: iam_type: role iam_name: example_role policy_name: example_policy diff --git a/iam_role.py b/iam_role.py index dc96bc93f3f..1ce2ceae9d0 100644 --- a/iam_role.py +++ b/iam_role.py @@ -45,7 +45,7 @@ description: - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names. - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]). - - To embed an inline policy, use M(iam_policy). + - To embed an inline policy, use M(community.aws.iam_policy). aliases: ['managed_policy'] type: list max_session_duration: @@ -98,7 +98,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a role with description and tags - iam_role: + community.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file','policy.json') }}" description: This is My New Role @@ -106,20 +106,20 @@ env: dev - name: "Create a role and attach a managed policy called 'PowerUserAccess'" - iam_role: + community.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file','policy.json') }}" managed_policies: - arn:aws:iam::aws:policy/PowerUserAccess - name: Keep the role created above but remove all managed policies - iam_role: + community.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file','policy.json') }}" managed_policies: [] - name: Delete the role - iam_role: + community.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" state: absent diff --git a/iam_role_info.py b/iam_role_info.py index 7fdb4ac58fa..ac000ae8552 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -36,16 +36,16 @@ ''' EXAMPLES = ''' -# find all existing IAM roles -- iam_role_info: +- name: find all existing IAM roles + community.aws.iam_role_info: register: result -# describe a single role -- iam_role_info: +- name: describe a single role + community.aws.iam_role_info: name: MyIAMRole -# describe all roles matching a path prefix -- iam_role_info: +- name: describe all roles matching a path prefix + community.aws.iam_role_info: path_prefix: /application/path ''' diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 7f8077bbb8a..0172a4cf47e 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -57,7 +57,7 @@ # It is assumed that their matching environment variables are set. # Creates a new iam saml identity provider if not present - name: saml provider - iam_saml_federation: + community.aws.iam_saml_federation: name: example1 # the > below opens an indented block, so no escaping/quoting is needed when in the indentation level under this key saml_metadata_document: > @@ -65,13 +65,13 @@ /home/ubuntu/test.txt" register: my_instance -# Delete an instance -- lightsail: +- name: Delete an instance + community.aws.lightsail: state: absent region: us-east-1 name: my_instance diff --git a/rds.py b/rds.py index cc123f9c22f..2f4728bbbad 100644 --- a/rds.py +++ b/rds.py @@ -15,7 +15,7 @@ - When creating an instance it can be either a new instance or a read-only replica of an existing instance. - This module has a dependency on python-boto >= 2.5 and will soon be deprecated. - The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0). - - Please use boto3 based M(rds_instance) instead. + - Please use boto3 based M(community.aws.rds_instance) instead. options: command: description: @@ -235,8 +235,8 @@ # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD EXAMPLES = ''' -# Basic mysql provisioning example -- rds: +- name: Basic mysql provisioning example + community.aws.rds: command: create instance_name: new-database db_engine: MySQL @@ -248,35 +248,35 @@ Environment: testing Application: cms -# Create a read-only replica and wait for it to become available -- rds: +- name: Create a read-only replica and wait for it to become available + community.aws.rds: command: replicate instance_name: new-database-replica source_instance: new_database wait: yes wait_timeout: 600 -# Delete an instance, but create a snapshot before doing so -- rds: +- name: Delete an instance, but create a snapshot before doing so + community.aws.rds: command: delete instance_name: new-database snapshot: new_database_snapshot -# Get facts about an instance -- rds: +- name: Get facts about an instance + community.aws.rds: command: facts instance_name: new-database register: new_database_facts -# Rename an instance and wait for the change to take effect -- rds: +- name: Rename an instance and wait for the change to take effect + community.aws.rds: command: modify instance_name: new-database new_instance_name: renamed-database wait: yes -# Reboot an instance and wait for it to become available again -- rds: +- name: Reboot an instance and wait for it to become available again + community.aws.rds: command: reboot instance_name: database wait: yes @@ -284,27 +284,25 @@ # Restore a Postgres db instance from a snapshot, wait for it to become available again, and # then modify it to add your security group. Also, display the new endpoint. # Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI -- local_action: - module: rds - command: restore - snapshot: mypostgres-snapshot - instance_name: MyNewInstanceName - region: us-west-2 - zone: us-west-2b - subnet: default-vpc-xx441xxx - publicly_accessible: yes - wait: yes - wait_timeout: 600 - tags: - Name: pg1_test_name_tag +- community.aws.rds: + command: restore + snapshot: mypostgres-snapshot + instance_name: MyNewInstanceName + region: us-west-2 + zone: us-west-2b + subnet: default-vpc-xx441xxx + publicly_accessible: yes + wait: yes + wait_timeout: 600 + tags: + Name: pg1_test_name_tag register: rds -- local_action: - module: rds - command: modify - instance_name: MyNewInstanceName - region: us-west-2 - vpc_security_groups: sg-xxx945xx +- community.aws.rds: + command: modify + instance_name: MyNewInstanceName + region: us-west-2 + vpc_security_groups: sg-xxx945xx - debug: msg: "The new db endpoint is {{ rds.instance.endpoint }}" diff --git a/rds_instance.py b/rds_instance.py index efbffd8aa8b..bd40dd086d4 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -416,7 +416,7 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create minimal aurora instance in default VPC and default subnet group - rds_instance: + community.aws.rds_instance: engine: aurora db_instance_identifier: ansible-test-aurora-db-instance instance_type: db.t2.small @@ -425,7 +425,7 @@ cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it - name: Create a DB instance using the default AWS KMS encryption key - rds_instance: + community.aws.rds_instance: id: test-encrypted-db state: present engine: mariadb @@ -436,13 +436,13 @@ allocated_storage: "{{ allocated_storage }}" - name: remove the DB instance without a final snapshot - rds_instance: + community.aws.rds_instance: id: "{{ instance_id }}" state: absent skip_final_snapshot: True - name: remove the DB instance with a final snapshot - rds_instance: + community.aws.rds_instance: id: "{{ instance_id }}" state: absent final_snapshot_identifier: "{{ snapshot_id }}" diff --git a/rds_instance_info.py b/rds_instance_info.py index 8a23c392ddd..d26965a7970 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -41,13 +41,13 @@ ''' EXAMPLES = ''' -# Get information about an instance -- rds_instance_info: +- name: Get information about an instance + community.aws.rds_instance_info: db_instance_identifier: new-database register: new_database_info -# Get all RDS instances -- rds_instance_info: +- name: Get all RDS instances + community.aws.rds_instance_info: ''' RETURN = ''' diff --git a/rds_param_group.py b/rds_param_group.py index a30df260a25..e5cd2457458 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -67,8 +67,8 @@ ''' EXAMPLES = ''' -# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 -- rds_param_group: +- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 + community.aws.rds_param_group: state: present name: norwegian-blue description: 'My Fancy Ex Parrot Group' @@ -79,8 +79,8 @@ Environment: production Application: parrot -# Remove a parameter group -- rds_param_group: +- name: Remove a parameter group + community.aws.rds_param_group: state: absent name: norwegian-blue ''' diff --git a/rds_snapshot.py b/rds_snapshot.py index 872af39c8e6..3db9afe0b85 100644 --- a/rds_snapshot.py +++ b/rds_snapshot.py @@ -67,13 +67,13 @@ ''' EXAMPLES = ''' -# Create snapshot -- rds_snapshot: +- name: Create snapshot + community.aws.rds_snapshot: db_instance_identifier: new-database db_snapshot_identifier: new-database-snapshot -# Delete snapshot -- rds_snapshot: +- name: Delete snapshot + community.aws.rds_snapshot: db_snapshot_identifier: new-database-snapshot state: absent ''' diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index 30f30a815c1..3e47a444677 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -62,13 +62,13 @@ ''' EXAMPLES = ''' -# Get information about an snapshot -- rds_snapshot_info: +- name: Get information about an snapshot + community.aws.rds_snapshot_info: db_snapshot_identifier: snapshot_name register: new_database_info -# Get all RDS snapshots for an RDS instance -- rds_snapshot_info: +- name: Get all RDS snapshots for an RDS instance + community.aws.rds_snapshot_info: db_instance_identifier: helloworld-rds-master ''' diff --git a/rds_subnet_group.py b/rds_subnet_group.py index 99bfb002752..f913d41f296 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -42,8 +42,8 @@ ''' EXAMPLES = ''' -# Add or change a subnet group -- rds_subnet_group: +- name: Add or change a subnet group + community.aws.rds_subnet_group: state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group @@ -51,8 +51,8 @@ - subnet-aaaaaaaa - subnet-bbbbbbbb -# Remove a subnet group -- rds_subnet_group: +- name: Remove a subnet group + community.aws.rds_subnet_group: state: absent name: norwegian-blue ''' diff --git a/redshift.py b/redshift.py index 86343d4ef79..6939204d819 100644 --- a/redshift.py +++ b/redshift.py @@ -174,16 +174,16 @@ ''' EXAMPLES = ''' -# Basic cluster provisioning example -- redshift: > - command=create - node_type=ds1.xlarge - identifier=new_cluster - username=cluster_admin - password=1nsecure - -# Cluster delete example -- redshift: +- name: Basic cluster provisioning example + community.aws.redshift: + command: create + node_type: ds1.xlarge + identifier: new_cluster + username: cluster_admin + password: 1nsecure + +- name: Cluster delete example + community.aws.redshift: command: delete identifier: new_cluster skip_final_cluster_snapshot: true diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index d7931a812b6..e97bf0795cd 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -61,7 +61,7 @@ EXAMPLES = ''' - name: configure cross-region snapshot on cluster `johniscool` - redshift_cross_region_snapshots: + community.aws.redshift_cross_region_snapshots: cluster_name: johniscool state: present region: us-east-1 @@ -69,7 +69,7 @@ retention_period: 1 - name: configure cross-region snapshot on kms-encrypted cluster - redshift_cross_region_snapshots: + community.aws.redshift_cross_region_snapshots: cluster_name: whatever state: present region: us-east-1 @@ -78,7 +78,7 @@ retention_period: 10 - name: disable cross-region snapshots, necessary before most cluster modifications (rename, resize) - redshift_cross_region_snapshots: + community.aws.redshift_cross_region_snapshots: cluster_name: whatever state: absent region: us-east-1 diff --git a/redshift_info.py b/redshift_info.py index 77aa5e1fb36..56e7f7139b7 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -39,27 +39,27 @@ EXAMPLES = ''' # Note: These examples do net set authentication details, see the AWS guide for details. -# Find all clusters -- redshift_info: +- name: Find all clusters + community.aws.redshift_info: register: redshift -# Find cluster(s) with matching tags -- redshift_info: +- name: Find cluster(s) with matching tags + community.aws.redshift_info: tags: env: prd stack: monitoring register: redshift_tags -# Find cluster(s) with matching name/prefix and tags -- redshift_info: +- name: Find cluster(s) with matching name/prefix and tags + community.aws.redshift_info: tags: env: dev stack: web name: user- register: redshift_web -# Fail if no cluster(s) is/are found -- redshift_info: +- name: Fail if no cluster(s) is/are found + community.aws.redshift_info: tags: env: stg stack: db diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index cb9d085e8c8..4351ac9e717 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -47,9 +47,8 @@ ''' EXAMPLES = ''' -# Create a Redshift subnet group -- local_action: - module: redshift_subnet_group +- name: Create a Redshift subnet group + community.aws.redshift_subnet_group: state: present group_name: redshift-subnet group_description: Redshift subnet @@ -57,8 +56,8 @@ - 'subnet-aaaaa' - 'subnet-bbbbb' -# Remove subnet group -- redshift_subnet_group: +- name: Remove subnet group + community.aws.redshift_subnet_group: state: absent group_name: redshift-subnet ''' diff --git a/route53.py b/route53.py index c93d941f39f..385a1d10ec4 100644 --- a/route53.py +++ b/route53.py @@ -212,108 +212,110 @@ sample: foo.bar.com. ''' -EXAMPLES = ''' -# Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated -- route53: - state: present - zone: foo.com - record: new.foo.com - type: A - ttl: 7200 - value: 1.1.1.1,2.2.2.2,3.3.3.3 - wait: yes - -# Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated -- route53: - state: present - zone: foo.com - record: new.foo.com - type: A - ttl: 7200 - value: - - 1.1.1.1 - - 2.2.2.2 - - 3.3.3.3 - wait: yes - -# Retrieve the details for new.foo.com -- route53: - state: get - zone: foo.com - record: new.foo.com - type: A +EXAMPLES = r''' +- name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated + community.aws.route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: 1.1.1.1,2.2.2.2,3.3.3.3 + wait: yes + +- name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated + community.aws.route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: + - 1.1.1.1 + - 2.2.2.2 + - 3.3.3.3 + wait: yes + +- name: Retrieve the details for new.foo.com + community.aws.route53: + state: get + zone: foo.com + record: new.foo.com + type: A register: rec -# Delete new.foo.com A record using the results from the get command -- route53: - state: absent - zone: foo.com - record: "{{ rec.set.record }}" - ttl: "{{ rec.set.ttl }}" - type: "{{ rec.set.type }}" - value: "{{ rec.set.value }}" +- name: Delete new.foo.com A record using the results from the get command + community.aws.route53: + state: absent + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" # Add an AAAA record. Note that because there are colons in the value # that the IPv6 address must be quoted. Also shows using the old form command=create. -- route53: - command: create - zone: foo.com - record: localhost.foo.com - type: AAAA - ttl: 7200 - value: "::1" - -# Add a SRV record with multiple fields for a service on port 22222 +- name: Add an AAAA record + community.aws.route53: + command: create + zone: foo.com + record: localhost.foo.com + type: AAAA + ttl: 7200 + value: "::1" + # For more information on SRV records see: # https://en.wikipedia.org/wiki/SRV_record -- route53: - state: present - zone: foo.com - record: "_example-service._tcp.foo.com" - type: SRV - value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" - -# Add a TXT record. Note that TXT and SPF records must be surrounded +- name: Add a SRV record with multiple fields for a service on port 22222 + community.aws.route53: + state: present + zone: foo.com + record: "_example-service._tcp.foo.com" + type: SRV + value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" + +# Note that TXT and SPF records must be surrounded # by quotes when sent to Route 53: -- route53: - state: present - zone: foo.com - record: localhost.foo.com - type: TXT - ttl: 7200 - value: '"bar"' - -# Add an alias record that points to an Amazon ELB: -- route53: - state: present - zone: foo.com - record: elb.foo.com - type: A - value: "{{ elb_dns_name }}" - alias: True - alias_hosted_zone_id: "{{ elb_zone_id }}" - -# Retrieve the details for elb.foo.com -- route53: - state: get - zone: foo.com - record: elb.foo.com - type: A +- name: Add a TXT record. + community.aws.route53: + state: present + zone: foo.com + record: localhost.foo.com + type: TXT + ttl: 7200 + value: '"bar"' + +- name: Add an alias record that points to an Amazon ELB + community.aws.route53: + state: present + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" + +- name: Retrieve the details for elb.foo.com + community.aws.route53: + state: get + zone: foo.com + record: elb.foo.com + type: A register: rec -# Delete an alias record using the results from the get command -- route53: - state: absent - zone: foo.com - record: "{{ rec.set.record }}" - ttl: "{{ rec.set.ttl }}" - type: "{{ rec.set.type }}" - value: "{{ rec.set.value }}" - alias: True - alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" - -# Add an alias record that points to an Amazon ELB and evaluates it health: -- route53: +- name: Delete an alias record using the results from the get command + community.aws.route53: + state: absent + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" + alias: True + alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" + +- name: Add an alias record that points to an Amazon ELB and evaluates it health + community.aws.route53: state: present zone: foo.com record: elb.foo.com @@ -323,39 +325,39 @@ alias_hosted_zone_id: "{{ elb_zone_id }}" alias_evaluate_target_health: True -# Add an AAAA record with Hosted Zone ID. -- route53: - state: present - zone: foo.com - hosted_zone_id: Z2AABBCCDDEEFF - record: localhost.foo.com - type: AAAA - ttl: 7200 - value: "::1" - -# Use a routing policy to distribute traffic: -- route53: - state: present - zone: foo.com - record: www.foo.com - type: CNAME - value: host1.foo.com - ttl: 30 - # Routing policy - identifier: "host1@www" - weight: 100 - health_check: "d994b780-3150-49fd-9205-356abdd42e75" - -# Add a CAA record (RFC 6844): -- route53: - state: present - zone: example.com - record: example.com - type: CAA - value: - - 0 issue "ca.example.net" - - 0 issuewild ";" - - 0 iodef "mailto:security@example.com" +- name: Add an AAAA record with Hosted Zone ID + community.aws.route53: + state: present + zone: foo.com + hosted_zone_id: Z2AABBCCDDEEFF + record: localhost.foo.com + type: AAAA + ttl: 7200 + value: "::1" + +- name: Use a routing policy to distribute traffic + community.aws.route53: + state: present + zone: foo.com + record: www.foo.com + type: CNAME + value: host1.foo.com + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "d994b780-3150-49fd-9205-356abdd42e75" + +- name: Add a CAA record (RFC 6844) + community.aws.route53: + state: present + zone: example.com + record: example.com + type: CAA + value: + - 0 issue "ca.example.net" + - 0 issuewild ";" + - 0 iodef "mailto:security@example.com" ''' diff --git a/route53_health_check.py b/route53_health_check.py index 414f27a3eee..80f6691407a 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -84,8 +84,8 @@ ''' EXAMPLES = ''' -# Create a health-check for host1.example.com and use it in record -- route53_health_check: +- name: Create a health-check for host1.example.com and use it in record + community.aws.route53_health_check: state: present fqdn: host1.example.com type: HTTP_STR_MATCH @@ -95,7 +95,7 @@ failure_threshold: 2 register: my_health_check -- route53: +- community.aws.route53: action: create zone: "example.com" type: CNAME @@ -107,8 +107,8 @@ weight: 100 health_check: "{{ my_health_check.health_check.id }}" -# Delete health-check -- route53_health_check: +- name: Delete health-check + community.aws.route53_health_check: state: absent fqdn: host1.example.com diff --git a/route53_info.py b/route53_info.py index cb6f74b369a..466e39bd79f 100644 --- a/route53_info.py +++ b/route53_info.py @@ -136,19 +136,19 @@ EXAMPLES = ''' # Simple example of listing all hosted zones - name: List all hosted zones - route53_info: + community.aws.route53_info: query: hosted_zone register: hosted_zones # Getting a count of hosted zones - name: Return a count of all hosted zones - route53_info: + community.aws.route53_info: query: hosted_zone hosted_zone_method: count register: hosted_zone_count - name: List the first 20 resource record sets in a given hosted zone - route53_info: + community.aws.route53_info: profile: account_name query: record_sets hosted_zone_id: ZZZ1111112222 @@ -156,33 +156,33 @@ register: record_sets - name: List first 20 health checks - route53_info: + community.aws.route53_info: query: health_check health_check_method: list max_items: 20 register: health_checks - name: Get health check last failure_reason - route53_info: + community.aws.route53_info: query: health_check health_check_method: failure_reason health_check_id: 00000000-1111-2222-3333-12345678abcd register: health_check_failure_reason - name: Retrieve reusable delegation set details - route53_info: + community.aws.route53_info: query: reusable_delegation_set delegation_set_id: delegation id register: delegation_sets - name: setup of example for using next_marker - route53_info: + community.aws.route53_info: query: hosted_zone max_items: 1 register: first_info - name: example for using next_marker - route53_info: + community.aws.route53_info: query: hosted_zone next_marker: "{{ first_info.NextMarker }}" max_items: 1 @@ -191,12 +191,12 @@ - name: retrieve host entries starting with host1.workshop.test.io block: - name: grab zone id - route53_zone: + community.aws.route53_zone: zone: "test.io" register: AWSINFO - name: grab Route53 record information - route53_info: + community.aws.route53_info: type: A query: record_sets hosted_zone_id: "{{ AWSINFO.zone_id }}" diff --git a/route53_zone.py b/route53_zone.py index 3eee17506f4..b7a2b6858ce 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -56,24 +56,24 @@ EXAMPLES = ''' - name: create a public zone - route53_zone: + community.aws.route53_zone: zone: example.com comment: this is an example - name: delete a public zone - route53_zone: + community.aws.route53_zone: zone: example.com state: absent - name: create a private zone - route53_zone: + community.aws.route53_zone: zone: devel.example.com vpc_id: '{{ myvpc_id }}' vpc_region: us-west-2 comment: developer domain - name: create a public zone associated with a specific reusable delegation set - route53_zone: + community.aws.route53_zone: zone: example.com comment: reusable delegation set example delegation_set_id: A1BCDEF2GHIJKL diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 41eaf3cfe72..6732e9a4432 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -13,10 +13,10 @@ short_description: Creates, updates or deletes S3 Bucket notification for lambda description: - This module allows the management of AWS Lambda function bucket event mappings via the - Ansible framework. Use module M(lambda) to manage the lambda function itself, M(lambda_alias) - to manage function aliases and M(lambda_policy) to modify lambda permissions. + Ansible framework. Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) + to manage function aliases and M(community.aws.lambda_policy) to modify lambda permissions. notes: - - This module heavily depends on M(lambda_policy) as you need to allow C(lambda:InvokeFunction) + - This module heavily depends on M(community.aws.lambda_policy) as you need to allow C(lambda:InvokeFunction) permission for your lambda function. author: @@ -91,18 +91,15 @@ EXAMPLES = ''' --- # Example that creates a lambda event notification for a bucket -- hosts: localhost - gather_facts: no - tasks: - - name: Process jpg image - s3_bucket_notification: - state: present - event_name: on_file_add_or_remove - bucket_name: test-bucket - function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda - events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] - prefix: images/ - suffix: .jpg +- name: Process jpg image + community.aws.s3_bucket_notification: + state: present + event_name: on_file_add_or_remove + bucket_name: test-bucket + function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda + events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] + prefix: images/ + suffix: .jpg ''' RETURN = ''' diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 9ab279d2b50..ceef7fd97d5 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -126,16 +126,16 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days -- s3_lifecycle: +- name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days + community.aws.s3_lifecycle: name: mybucket expiration_days: 30 prefix: logs/ status: enabled state: present -# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days -- s3_lifecycle: +- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days + community.aws.s3_lifecycle: name: mybucket transition_days: 7 expiration_days: 90 @@ -143,10 +143,10 @@ status: enabled state: present -# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. # Note that midnight GMT must be specified. # Be sure to quote your date strings -- s3_lifecycle: +- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. + community.aws.s3_lifecycle: name: mybucket transition_date: "2020-12-30T00:00:00.000Z" expiration_date: "2030-12-30T00:00:00.000Z" @@ -154,21 +154,21 @@ status: enabled state: present -# Disable the rule created above -- s3_lifecycle: +- name: Disable the rule created above + community.aws.s3_lifecycle: name: mybucket prefix: logs/ status: disabled state: present -# Delete the lifecycle rule created above -- s3_lifecycle: +- name: Delete the lifecycle rule created above + community.aws.s3_lifecycle: name: mybucket prefix: logs/ state: absent -# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class. -- s3_lifecycle: +- name: Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class. + community.aws.s3_lifecycle: name: mybucket prefix: backups/ storage_class: standard_ia @@ -176,8 +176,8 @@ state: present status: enabled -# Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90 -- s3_lifecycle: +- name: Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90 + community.aws.s3_lifecycle: name: mybucket prefix: logs/ state: present diff --git a/s3_logging.py b/s3_logging.py index 9d074f4876a..b672562131e 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -44,14 +44,14 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs - s3_logging: + community.aws.s3_logging: name: mywebsite.com target_bucket: mylogs target_prefix: logs/mywebsite.com state: present - name: Remove logging on an s3 bucket - s3_logging: + community.aws.s3_logging: name: mywebsite.com state: absent diff --git a/s3_sync.py b/s3_sync.py index aa527092d3c..879452af057 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -126,12 +126,12 @@ EXAMPLES = ''' - name: basic upload - s3_sync: + community.aws.s3_sync: bucket: tedder file_root: roles/s3/files/ - name: all the options - s3_sync: + community.aws.s3_sync: bucket: tedder file_root: roles/s3/files mime_map: diff --git a/s3_website.py b/s3_website.py index 5f0822af6fe..08d786cf0f1 100644 --- a/s3_website.py +++ b/s3_website.py @@ -52,19 +52,19 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Configure an s3 bucket to redirect all requests to example.com -- s3_website: +- name: Configure an s3 bucket to redirect all requests to example.com + community.aws.s3_website: name: mybucket.com redirect_all_requests: example.com state: present -# Remove website configuration from an s3 bucket -- s3_website: +- name: Remove website configuration from an s3 bucket + community.aws.s3_website: name: mybucket.com state: absent -# Configure an s3 bucket as a website with index and error pages -- s3_website: +- name: Configure an s3 bucket as a website with index and error pages + community.aws.s3_website: name: mybucket.com suffix: home.htm error_key: errors/404.htm diff --git a/sns.py b/sns.py index 41c346ac317..0a0ceda75d6 100644 --- a/sns.py +++ b/sns.py @@ -88,14 +88,14 @@ EXAMPLES = """ - name: Send default notification message via SNS - sns: + community.aws.sns: msg: '{{ inventory_hostname }} has completed the play.' subject: Deploy complete! topic: deploy delegate_to: localhost - name: Send notification messages via SNS with short message for SMS - sns: + community.aws.sns: msg: '{{ inventory_hostname }} has completed the play.' sms: deployed! subject: Deploy complete! @@ -103,7 +103,7 @@ delegate_to: localhost - name: Send message with message_attributes - sns: + community.aws.sns: topic: "deploy" msg: "message with extra details!" message_attributes: diff --git a/sns_topic.py b/sns_topic.py index 52c21a41f0f..2f0865406d1 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -11,7 +11,7 @@ module: sns_topic short_description: Manages AWS SNS topics and subscriptions description: - - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. + - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account. author: - "Joel Thompson (@joelthompson)" @@ -75,7 +75,7 @@ EXAMPLES = """ - name: Create alarm SNS topic - sns_topic: + community.aws.sns_topic: name: "alarms" state: present display_name: "alarm SNS topic" @@ -104,7 +104,7 @@ type: str returned: always sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name" -sns_topic: +community.aws.sns_topic: description: Dict of sns topic details type: complex returned: always diff --git a/sqs_queue.py b/sqs_queue.py index 763db04c774..1682d424592 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -169,8 +169,8 @@ ''' EXAMPLES = ''' -# Create SQS queue with redrive policy -- sqs_queue: +- name: Create SQS queue with redrive policy + community.aws.sqs_queue: name: my-queue region: ap-southeast-2 default_visibility_timeout: 120 @@ -183,35 +183,35 @@ maxReceiveCount: 5 deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue -# Drop redrive policy -- sqs_queue: +- name: Drop redrive policy + community.aws.sqs_queue: name: my-queue region: ap-southeast-2 redrive_policy: {} -# Create FIFO queue -- sqs_queue: +- name: Create FIFO queue + community.aws.sqs_queue: name: fifo-queue region: ap-southeast-2 queue_type: fifo content_based_deduplication: yes -# Tag queue -- sqs_queue: +- name: Tag queue + community.aws.sqs_queue: name: fifo-queue region: ap-southeast-2 tags: example: SomeValue -# Configure Encryption, automatically uses a new data key every hour -- sqs_queue: +- name: Configure Encryption, automatically uses a new data key every hour + community.aws.sqs_queue: name: fifo-queue region: ap-southeast-2 kms_master_key_id: alias/MyQueueKey kms_data_key_reuse_period_seconds: 3600 -# Delete SQS queue -- sqs_queue: +- name: Delete SQS queue + community.aws.sqs_queue: name: my-queue region: ap-southeast-2 state: absent diff --git a/sts_assume_role.py b/sts_assume_role.py index f836e478e23..4048373c614 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -88,13 +88,13 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) -- sts_assume_role: +- community.aws.sts_assume_role: role_arn: "arn:aws:iam::123456789012:role/someRole" role_session_name: "someRoleSession" register: assumed_role # Use the assumed role above to tag an instance in account 123456789012 -- ec2_tag: +- amazon.aws.ec2_tag: aws_access_key: "{{ assumed_role.sts_creds.access_key }}" aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" security_token: "{{ assumed_role.sts_creds.session_token }}" diff --git a/sts_session_token.py b/sts_session_token.py index d39519e8e0f..50c537623f6 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -60,20 +60,21 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Get a session token (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) -sts_session_token: - duration_seconds: 3600 -register: session_credentials - -# Use the session token obtained above to tag an instance in account 123456789012 -ec2_tag: - aws_access_key: "{{ session_credentials.sts_creds.access_key }}" - aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}" - security_token: "{{ session_credentials.sts_creds.session_token }}" - resource: i-xyzxyz01 - state: present - tags: - MyNewTag: value +# (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) +- name: Get a session token + community.aws.sts_session_token: + duration_seconds: 3600 + register: session_credentials + +- name: Use the session token obtained above to tag an instance in account 123456789012 + amazon.aws.ec2_tag: + aws_access_key: "{{ session_credentials.sts_creds.access_key }}" + aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}" + security_token: "{{ session_credentials.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value ''' From e8e7d5cd809adff4bd6f1166b952fd3e94c5e1db Mon Sep 17 00:00:00 2001 From: flowerysong Date: Tue, 16 Jun 2020 19:30:00 -0400 Subject: [PATCH 013/683] Update module_utils paths to remove aws subdir (#23) Co-authored-by: Ezekiel Hendrickson --- aws_acm.py | 4 +-- aws_acm_info.py | 4 +-- aws_api_gateway.py | 2 +- aws_application_scaling_policy.py | 2 +- aws_batch_compute_environment.py | 2 +- aws_batch_job_definition.py | 4 +-- aws_batch_job_queue.py | 4 +-- aws_codebuild.py | 2 +- aws_codecommit.py | 2 +- aws_codepipeline.py | 2 +- aws_config_aggregation_authorization.py | 2 +- aws_config_aggregator.py | 2 +- aws_config_delivery_channel.py | 2 +- aws_config_recorder.py | 2 +- aws_config_rule.py | 2 +- aws_direct_connect_connection.py | 13 +++++----- aws_direct_connect_link_aggregation_group.py | 26 ++++++++++--------- aws_direct_connect_virtual_interface.py | 4 +-- aws_eks_cluster.py | 4 +-- aws_elasticbeanstalk_app.py | 2 +- aws_glue_connection.py | 2 +- aws_glue_job.py | 2 +- aws_inspector_target.py | 2 +- aws_kms.py | 2 +- aws_region_info.py | 2 +- aws_s3_cors.py | 2 +- aws_secret.py | 2 +- aws_ses_identity.py | 2 +- aws_ses_identity_policy.py | 2 +- aws_ses_rule_set.py | 2 +- aws_sgw_info.py | 2 +- aws_ssm_parameter_store.py | 2 +- aws_step_functions_state_machine.py | 2 +- aws_step_functions_state_machine_execution.py | 2 +- aws_waf_condition.py | 6 ++--- aws_waf_info.py | 4 +-- aws_waf_rule.py | 22 +++++++++------- aws_waf_web_acl.py | 17 ++++++------ cloudformation_exports_info.py | 2 +- cloudformation_stack_set.py | 2 +- cloudfront_distribution.py | 4 +-- cloudfront_invalidation.py | 4 +-- cloudfront_origin_access_identity.py | 4 +-- cloudtrail.py | 2 +- cloudwatchevent_rule.py | 2 +- cloudwatchlogs_log_group_metric_filter.py | 2 +- dms_endpoint.py | 2 +- dms_replication_subnet_group.py | 2 +- ec2_ami_copy.py | 2 +- ec2_asg.py | 4 +-- ec2_asg_info.py | 2 +- ec2_asg_lifecycle_hook.py | 2 +- ec2_customer_gateway_info.py | 2 +- ec2_eip.py | 2 +- ec2_eip_info.py | 2 +- ec2_instance.py | 2 +- ec2_launch_template.py | 2 +- ec2_metric_alarm.py | 2 +- ec2_placement_group.py | 2 +- ec2_placement_group_info.py | 2 +- ec2_transit_gateway.py | 2 +- ec2_transit_gateway_info.py | 2 +- ec2_vpc_egress_igw.py | 2 +- ec2_vpc_igw.py | 4 +-- ec2_vpc_nacl.py | 2 +- ec2_vpc_nacl_info.py | 2 +- ec2_vpc_peer.py | 2 +- ec2_vpc_route_table.py | 4 +-- ec2_vpc_vgw.py | 4 +-- ec2_vpc_vpn.py | 2 +- ec2_vpc_vpn_info.py | 2 +- ecs_ecr.py | 2 +- ecs_service.py | 2 +- ecs_service_info.py | 2 +- ecs_tag.py | 2 +- ecs_task.py | 2 +- ecs_taskdefinition.py | 2 +- ecs_taskdefinition_info.py | 2 +- efs.py | 2 +- efs_info.py | 2 +- elasticache_info.py | 2 +- elb_application_lb.py | 17 ++++++------ elb_classic_lb_info.py | 2 +- elb_network_lb.py | 4 +-- elb_target_group.py | 2 +- elb_target_info.py | 2 +- iam_group.py | 2 +- iam_password_policy.py | 2 +- iam_policy.py | 2 +- iam_policy_info.py | 2 +- iam_role.py | 2 +- iam_role_info.py | 2 +- iam_saml_federation.py | 2 +- iam_user.py | 2 +- iam_user_info.py | 2 +- lambda.py | 2 +- lambda_facts.py | 2 +- lambda_info.py | 2 +- lambda_policy.py | 2 +- lightsail.py | 2 +- rds_instance.py | 19 +++++++------- rds_instance_info.py | 2 +- rds_snapshot.py | 2 +- rds_snapshot_info.py | 2 +- redshift.py | 2 +- redshift_cross_region_snapshots.py | 2 +- redshift_info.py | 2 +- route53_zone.py | 2 +- s3_bucket_notification.py | 2 +- s3_lifecycle.py | 2 +- sns.py | 2 +- sns_topic.py | 2 +- sqs_queue.py | 2 +- sts_assume_role.py | 2 +- 114 files changed, 186 insertions(+), 178 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index de20833ac03..5da9ab617e1 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -223,8 +223,8 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager from ansible.module_utils._text import to_text import base64 import re # regex library diff --git a/aws_acm_info.py b/aws_acm_info.py index dfbd955a178..d23fe502ea7 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -259,8 +259,8 @@ type: str ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager def main(): diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 49b1a1f8a4e..380745a4408 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -177,7 +177,7 @@ pass import traceback -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict) diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index 369302d7a9b..5a2441c5a02 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -282,7 +282,7 @@ sample: '2017-09-28T08:22:51.881000-03:00' ''' # NOQA -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict try: diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 04738ffefae..dd7ce4cb9f2 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -228,7 +228,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict import re diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 7debf759156..347af23ee4e 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -223,8 +223,8 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.batch import cc, set_api_params -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index 3ca0333b940..59cc0b3a24c 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -111,8 +111,8 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.batch import set_api_params -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/aws_codebuild.py b/aws_codebuild.py index 8b4a7bf04c4..5ad56103123 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -285,7 +285,7 @@ sample: "2018-04-17T16:56:03.245000+02:00" ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict diff --git a/aws_codecommit.py b/aws_codecommit.py index 8f26be4ed48..8b929454e9d 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -141,7 +141,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 90fea4016cd..ff38319ac7b 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -196,7 +196,7 @@ import traceback from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index a29eda64394..7bd95fc3b0b 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -58,7 +58,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 5976c9058fb..c750edae99b 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -95,7 +95,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index afaef581de7..a50de66a3a9 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -71,7 +71,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry diff --git a/aws_config_recorder.py b/aws_config_recorder.py index 7b576b6cda7..b769e6278ff 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -85,7 +85,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry diff --git a/aws_config_rule.py b/aws_config_rule.py index 50c8d82c552..08675f9a514 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -113,7 +113,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 61a0caf0149..9975206e050 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -154,13 +154,14 @@ """ import traceback -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) -from ansible_collections.amazon.aws.plugins.module_utils.aws.direct_connect import (DirectConnectError, - delete_connection, - associate_connection_and_lag, - disassociate_connection_and_lag, - ) +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import ( + DirectConnectError, + delete_connection, + associate_connection_and_lag, + disassociate_connection_and_lag, +) try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 30b0656af5f..557be09929f 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -160,19 +160,21 @@ returned: when I(state=present) """ -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ec2_argument_spec, - HAS_BOTO3, - get_aws_connection_info, - boto3_conn, - AWSRetry, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( + AWSRetry, + HAS_BOTO3, + boto3_conn, + camel_dict_to_snake_dict, + ec2_argument_spec, + get_aws_connection_info, +) from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.direct_connect import (DirectConnectError, - delete_connection, - delete_virtual_interface, - disassociate_connection_and_lag, - ) +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import ( + DirectConnectError, + delete_connection, + delete_virtual_interface, + disassociate_connection_and_lag, +) import traceback import time diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 6450be0ab08..4139e5fd7f1 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -234,8 +234,8 @@ ''' import traceback -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError, delete_virtual_interface from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict try: diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 27200f55908..5851b52661c 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -158,9 +158,9 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter try: import botocore.exceptions diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py index 67f5bc611a8..cb610a29f82 100644 --- a/aws_elasticbeanstalk_app.py +++ b/aws_elasticbeanstalk_app.py @@ -88,7 +88,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule def describe_app(ebs, app_name, module): diff --git a/aws_glue_connection.py b/aws_glue_connection.py index 1810a6df2e9..9aafb4c1d8a 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -129,7 +129,7 @@ sample: {'subnet-id':'subnet-aabbccddee'} ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names # Non-ansible imports diff --git a/aws_glue_job.py b/aws_glue_job.py index 966029ce325..f3c7513fdda 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -184,7 +184,7 @@ sample: 300 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict # Non-ansible imports diff --git a/aws_inspector_target.py b/aws_inspector_target.py index d7e668038fd..3d7af6b6c4a 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -98,7 +98,7 @@ sample: "2018-01-29T13:48:51.958000+00:00" ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( ansible_dict_to_boto3_tag_list, diff --git a/aws_kms.py b/aws_kms.py index 0a0bba626e6..e83fd9657e2 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -404,7 +404,7 @@ 'admin': 'Allow access for Key Administrators' } -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, compare_policies diff --git a/aws_region_info.py b/aws_region_info.py index 719cce0cec8..dfe7892164b 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -54,7 +54,7 @@ }]" ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict try: diff --git a/aws_s3_cors.py b/aws_s3_cors.py index 130b20966e2..d9a13e302fe 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -98,7 +98,7 @@ except Exception: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies diff --git a/aws_secret.py b/aws_secret.py index a007cf564f5..4ee846f1bb6 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -130,7 +130,7 @@ ''' from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list diff --git a/aws_ses_identity.py b/aws_ses_identity.py index 2185d07d0e8..5f649873ab3 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -217,7 +217,7 @@ type: bool ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info import time diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py index bb166523585..4be04d8ecfd 100644 --- a/aws_ses_identity_policy.py +++ b/aws_ses_identity_policy.py @@ -81,7 +81,7 @@ sample: [ExamplePolicy] ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry import json diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index b6b45afce75..86b5f9bf2b5 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -98,7 +98,7 @@ }] """ -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: diff --git a/aws_sgw_info.py b/aws_sgw_info.py index 7963e11bfc0..409eeb8126b 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -172,7 +172,7 @@ region: eu-west-3 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index c721fe3385d..2c397ab5326 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -126,7 +126,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: from botocore.exceptions import ClientError diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index a283a57ce6d..1220f69f42a 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -96,7 +96,7 @@ returned: always ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py index 65ed30453c7..a3a1d13d6c4 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/aws_step_functions_state_machine_execution.py @@ -88,7 +88,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/aws_waf_condition.py b/aws_waf_condition.py index df6632ce1d6..7774d13f06f 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -401,10 +401,10 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP -from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff, MATCH_LOOKUP +from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff class Condition(object): diff --git a/aws_waf_info.py b/aws_waf_info.py index 9a895c847ea..15c1ef98f81 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -115,8 +115,8 @@ ] ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import list_web_acls, get_web_acl +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl def main(): diff --git a/aws_waf_rule.py b/aws_waf_rule.py index 54fb1b23f8b..d2a9ad395ae 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -145,17 +145,19 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import (run_func_with_change_token_backoff, - list_rules_with_backoff, - list_regional_rules_with_backoff, - MATCH_LOOKUP, - ) -from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import (get_web_acl_with_backoff, - list_web_acls_with_backoff, - list_regional_web_acls_with_backoff, - ) +from ansible_collections.amazon.aws.plugins.module_utils.waf import ( + MATCH_LOOKUP, + list_regional_rules_with_backoff, + list_rules_with_backoff, + run_func_with_change_token_backoff, +) +from ansible_collections.amazon.aws.plugins.module_utils.waf import ( + get_web_acl_with_backoff, + list_web_acls_with_backoff, + list_regional_web_acls_with_backoff, +) def get_rule_by_name(client, module, name): diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index fe954dda1b2..57ff9ea3515 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -163,15 +163,16 @@ import re -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.aws.waf import (list_rules_with_backoff, - list_web_acls_with_backoff, - list_regional_web_acls_with_backoff, - run_func_with_change_token_backoff, - list_regional_rules_with_backoff, - ) +from ansible_collections.amazon.aws.plugins.module_utils.waf import ( + list_regional_rules_with_backoff, + list_regional_web_acls_with_backoff, + list_rules_with_backoff, + list_web_acls_with_backoff, + run_func_with_change_token_backoff, +) def get_web_acl_by_name(client, module, name): diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 2c6166dc0d5..2308eb8e02e 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -37,7 +37,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index a7b476d032e..986db8dac3a 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -313,7 +313,7 @@ ansible_dict_to_boto3_tag_list, camel_dict_to_snake_dict, ) -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible.module_utils._text import to_native diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 26237ea1851..80be45e1268 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1363,8 +1363,8 @@ ''' from ansible.module_utils._text import to_text, to_native -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager from ansible.module_utils.common.dict_transformations import recursive_diff from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 13a7d8c30b3..b1da91c1c66 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -137,8 +137,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager import datetime try: diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 17bfb6a71d1..af1ea4964b1 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -119,9 +119,9 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule import datetime from functools import partial import json diff --git a/cloudtrail.py b/cloudtrail.py index 83e6cc0b0f1..fe8d500a4c3 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -253,7 +253,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 29854fcc10b..00a1908145a 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -153,7 +153,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index b606a9ef8a9..852d7900e3a 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -99,7 +99,7 @@ ] """ -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/dms_endpoint.py b/dms_endpoint.py index 7fc1a253a9f..438a9a57271 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -167,7 +167,7 @@ RETURN = ''' # ''' import traceback -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 9354eeabc86..0c4a37d5b1f 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -58,7 +58,7 @@ RETURN = ''' # ''' import traceback -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index c6a1bb0ee45..2430d04cbb7 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -132,7 +132,7 @@ sample: ami-e689729e ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list from ansible.module_utils._text import to_native diff --git a/ec2_asg.py b/ec2_asg.py index 3bfd6f131a9..323120035f8 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -528,7 +528,7 @@ import traceback from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict @@ -539,7 +539,7 @@ except ImportError: pass # will be detected by imported HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 2cce6380fd6..d3fb1e101bb 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -222,7 +222,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index 9e01ca21aee..0127ac9137c 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -101,7 +101,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: import botocore diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 4872e691023..d4a10d23cd0 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -84,7 +84,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/ec2_eip.py b/ec2_eip.py index 2859ccaee7f..16ce4ddf70f 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -221,7 +221,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 4c2f8c6756d..33899dd648c 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -96,7 +96,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/ec2_instance.py b/ec2_instance.py index 8a682c56e12..912fa7cbe72 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -816,7 +816,7 @@ camel_dict_to_snake_dict, ) -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule module = None diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 5c1a993dd58..3605b0a2db5 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -364,7 +364,7 @@ from uuid import uuid4 from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, AWSRetry, diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index 42791c518bb..a07639aa387 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -202,7 +202,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: from botocore.exceptions import ClientError diff --git a/ec2_placement_group.py b/ec2_placement_group.py index b95069065aa..5d4b0087086 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -86,7 +86,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: from botocore.exceptions import (BotoCoreError, ClientError) diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index e9fa6338bad..3559debfe60 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -71,7 +71,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index b75eb5510a4..b4eca57de97 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -225,7 +225,7 @@ pass # handled by imported AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from time import sleep, time from ansible.module_utils._text import to_text from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 7e5f69c5917..42f180ab97f 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -168,7 +168,7 @@ pass # handled by imported AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, boto3_tag_list_to_ansible_dict, diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index e93ce7791e9..2981d194ef9 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -57,7 +57,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index 6b1a69911d4..c1960c14bbb 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -84,8 +84,8 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict, diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 5c14fec8040..14853edf767 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -154,7 +154,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry # VPC-supported IANA protocol numbers diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 31b1099b04c..2cc4f012d58 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -107,7 +107,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list, diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index b8e263c1242..5c94d4e399b 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -225,7 +225,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def tags_changed(pcx_id, client, module): diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 0b8230ac7e1..fbbae5c5a00 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -225,8 +225,8 @@ import re from time import sleep -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index d1ea852d0e3..511616a0838 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -120,8 +120,8 @@ except ImportError: HAS_BOTO3 = False -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible.module_utils.basic import AnsibleModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info, AWSRetry from ansible.module_utils._text import to_native diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 4d0f06fb5a3..72a3b9a3eb1 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -296,7 +296,7 @@ vpn_connection_id: vpn-781e0e19 """ -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils._text import to_text from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( camel_dict_to_snake_dict, diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index f34ddb8a937..b9830d7d31e 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -164,7 +164,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/ecs_ecr.py b/ecs_ecr.py index 23e1018c1f7..f0b0df8b7f1 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -182,7 +182,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict from ansible.module_utils.six import string_types diff --git a/ecs_service.py b/ecs_service.py index ddb2b8c1bba..27ee4b8fc39 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -475,7 +475,7 @@ 'minimum_healthy_percent': 'int' } -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names try: diff --git a/ecs_service_info.py b/ecs_service_info.py index 9843f638d55..eb7f6215ef4 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -136,7 +136,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ecs_tag.py b/ecs_tag.py index a3c16d74681..364546a4bee 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -108,7 +108,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags try: diff --git a/ecs_task.py b/ecs_task.py index c11c6b2a792..0c926954023 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -211,7 +211,7 @@ type: str ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils.basic import missing_required_lib from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index e7d3864a785..f61595fcd77 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -211,7 +211,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils._text import to_text diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index e4e93e0a90f..82a9bc968c7 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -298,7 +298,7 @@ type: str ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/efs.py b/efs.py index 95e5df78499..b5174777ff4 100644 --- a/efs.py +++ b/efs.py @@ -233,7 +233,7 @@ except ImportError as e: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (compare_aws_tags, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list, diff --git a/efs_info.py b/efs_info.py index bc2ddeda5a5..edc8e0daac6 100644 --- a/efs_info.py +++ b/efs_info.py @@ -176,7 +176,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict from ansible.module_utils._text import to_native diff --git a/elasticache_info.py b/elasticache_info.py index ffefc9b53a0..93e8ae8d43d 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -222,7 +222,7 @@ Environment: test ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, camel_dict_to_snake_dict, AWSRetry, diff --git a/elb_application_lb.py b/elb_application_lb.py index c314a60aa40..6daaad7a49c 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -454,16 +454,17 @@ sample: vpc-0011223344 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.aws.elbv2 import (ApplicationLoadBalancer, - ELBListeners, - ELBListener, - ELBListenerRules, - ELBListenerRule, - ) -from ansible_collections.amazon.aws.plugins.module_utils.aws.elb_utils import get_elb_listener_rules +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( + ApplicationLoadBalancer, + ELBListener, + ELBListenerRule, + ELBListenerRules, + ELBListeners, +) +from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules def create_or_update_elb(elb_obj): diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 9341cb59b4b..4b2a2db64bb 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -140,7 +140,7 @@ vpc_id: vpc-c248fda4 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict, diff --git a/elb_network_lb.py b/elb_network_lb.py index a3405fd5cae..5e1b52213cc 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -305,9 +305,9 @@ sample: vpc-0011223344 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.aws.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener def create_or_update_elb(elb_obj): diff --git a/elb_target_group.py b/elb_target_group.py index 43723bfd6e9..f3c0723ac27 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -377,7 +377,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, diff --git a/elb_target_info.py b/elb_target_info.py index af2dc55cd5b..f6b0f104032 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -215,7 +215,7 @@ # we can handle the lack of boto3 based on the ec2 module pass -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry diff --git a/iam_group.py b/iam_group.py index 121801275eb..4a53a870833 100644 --- a/iam_group.py +++ b/iam_group.py @@ -176,7 +176,7 @@ sample: / ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/iam_password_policy.py b/iam_password_policy.py index a26821e10a4..fda220c0097 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -102,7 +102,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/iam_policy.py b/iam_policy.py index 7ff98790146..1775a7a749e 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -118,7 +118,7 @@ except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies from ansible.module_utils.six import string_types diff --git a/iam_policy_info.py b/iam_policy_info.py index 8df1c9fc216..9f250e37b8d 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -83,7 +83,7 @@ except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils.six import string_types diff --git a/iam_role.py b/iam_role.py index 1ce2ceae9d0..09db6ed9643 100644 --- a/iam_role.py +++ b/iam_role.py @@ -192,7 +192,7 @@ import json -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_tag_list, diff --git a/iam_role_info.py b/iam_role_info.py index ac000ae8552..6b15c186360 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -151,7 +151,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 0172a4cf47e..27310083802 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -108,7 +108,7 @@ except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/iam_user.py b/iam_user.py index 17e126641de..15a972ebc09 100644 --- a/iam_user.py +++ b/iam_user.py @@ -105,7 +105,7 @@ ''' from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import traceback diff --git a/iam_user_info.py b/iam_user_info.py index cfb8f886324..97a01ca142f 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -100,7 +100,7 @@ sample: "test_user" ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: diff --git a/lambda.py b/lambda.py index 887bdec2fac..705344ab9f6 100644 --- a/lambda.py +++ b/lambda.py @@ -211,7 +211,7 @@ ''' from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags import base64 diff --git a/lambda_facts.py b/lambda_facts.py index 43ad5e6b406..ad56e9c70a5 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -88,7 +88,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import json import datetime diff --git a/lambda_info.py b/lambda_info.py index 818a9713f0b..568d1f72d2a 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -78,7 +78,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import json import datetime diff --git a/lambda_policy.py b/lambda_policy.py index fedfcbc5059..81bd7ec9200 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -134,7 +134,7 @@ import json import re from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: from botocore.exceptions import ClientError diff --git a/lightsail.py b/lightsail.py index 722d9e0ed89..f65c39ea0c8 100644 --- a/lightsail.py +++ b/lightsail.py @@ -159,7 +159,7 @@ # will be caught by AnsibleAWSModule pass -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/rds_instance.py b/rds_instance.py index bd40dd086d4..f626d114dee 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -741,15 +741,16 @@ ''' from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.aws.rds import (ensure_tags, - arg_spec_to_rds_params, - call_method, - get_rds_method_attribute, - get_tags, - get_final_identifier, - ) -from ansible_collections.amazon.aws.plugins.module_utils.aws.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.rds import ( + arg_spec_to_rds_params, + call_method, + ensure_tags, + get_final_identifier, + get_rds_method_attribute, + get_tags, +) +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry from ansible.module_utils.six import string_types diff --git a/rds_instance_info.py b/rds_instance_info.py index d26965a7970..9cc95e378ad 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -343,7 +343,7 @@ sample: sg-abcd1234 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, AWSRetry, diff --git a/rds_snapshot.py b/rds_snapshot.py index 3db9afe0b85..fe6b827f6c7 100644 --- a/rds_snapshot.py +++ b/rds_snapshot.py @@ -202,7 +202,7 @@ pass # protected by AnsibleAWSModule # import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index 3e47a444677..470ed0e2c6d 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -292,7 +292,7 @@ sample: vpc-abcd1234 ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict try: diff --git a/redshift.py b/redshift.py index 6939204d819..8c3b5ccdfac 100644 --- a/redshift.py +++ b/redshift.py @@ -258,7 +258,7 @@ pass # caught by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code def _collect_facts(resource): diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index e97bf0795cd..b022aaafbba 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -87,7 +87,7 @@ RETURN = ''' # ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule class SnapshotController(object): diff --git a/redshift_info.py b/redshift_info.py index 56e7f7139b7..b1fbe802b5f 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -282,7 +282,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/route53_zone.py b/route53_zone.py index b7a2b6858ce..bcab3b2e167 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -118,7 +118,7 @@ ''' import time -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 6732e9a4432..f61e5607a66 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -109,7 +109,7 @@ type: list ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/s3_lifecycle.py b/s3_lifecycle.py index ceef7fd97d5..5bdf65ab1e3 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -203,7 +203,7 @@ except ImportError: pass # handled by AnsibleAwsModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule def create_lifecycle_rule(client, module): diff --git a/sns.py b/sns.py index 0a0ceda75d6..e1edfb99660 100644 --- a/sns.py +++ b/sns.py @@ -137,7 +137,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule def arn_topic_lookup(module, client, short_topic): diff --git a/sns_topic.py b/sns_topic.py index 2f0865406d1..ae3b960b7d8 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -219,7 +219,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict diff --git a/sqs_queue.py b/sqs_queue.py index 1682d424592..1756a6e33b4 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -218,7 +218,7 @@ ''' import json -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, diff --git a/sts_assume_role.py b/sts_assume_role.py index 4048373c614..fca345ad4ac 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -105,7 +105,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: From b78b751d56e6bafe8719e8a8d09f97ae16bc2908 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Wed, 17 Jun 2020 09:31:32 -0700 Subject: [PATCH 014/683] Update docs (#99) * Update docs Remove .git from repo url so links in readme will generate correctly Add required ansible version Run latest version of add_docs.py Add version_added string to modules * galaxy.yml was missing authors --- aws_acm.py | 1 + aws_acm_info.py | 1 + aws_api_gateway.py | 1 + aws_application_scaling_policy.py | 1 + aws_batch_compute_environment.py | 1 + aws_batch_job_definition.py | 1 + aws_batch_job_queue.py | 1 + aws_codebuild.py | 1 + aws_codecommit.py | 1 + aws_codepipeline.py | 1 + aws_config_aggregation_authorization.py | 1 + aws_config_aggregator.py | 1 + aws_config_delivery_channel.py | 1 + aws_config_recorder.py | 1 + aws_config_rule.py | 1 + aws_direct_connect_connection.py | 1 + aws_direct_connect_gateway.py | 1 + aws_direct_connect_link_aggregation_group.py | 1 + aws_direct_connect_virtual_interface.py | 1 + aws_eks_cluster.py | 1 + aws_elasticbeanstalk_app.py | 1 + aws_glue_connection.py | 1 + aws_glue_job.py | 1 + aws_inspector_target.py | 1 + aws_kms.py | 1 + aws_kms_info.py | 1 + aws_region_info.py | 1 + aws_s3_bucket_info.py | 1 + aws_s3_cors.py | 1 + aws_secret.py | 1 + aws_ses_identity.py | 1 + aws_ses_identity_policy.py | 1 + aws_ses_rule_set.py | 1 + aws_sgw_info.py | 1 + aws_ssm_parameter_store.py | 1 + aws_step_functions_state_machine.py | 1 + aws_step_functions_state_machine_execution.py | 1 + aws_waf_condition.py | 1 + aws_waf_info.py | 1 + aws_waf_rule.py | 1 + aws_waf_web_acl.py | 1 + cloudformation_exports_info.py | 1 + cloudformation_stack_set.py | 1 + cloudfront_distribution.py | 1 + cloudfront_info.py | 1 + cloudfront_invalidation.py | 1 + cloudfront_origin_access_identity.py | 1 + cloudtrail.py | 1 + cloudwatchevent_rule.py | 1 + cloudwatchlogs_log_group.py | 1 + cloudwatchlogs_log_group_info.py | 1 + cloudwatchlogs_log_group_metric_filter.py | 1 + data_pipeline.py | 1 + dms_endpoint.py | 1 + dms_replication_subnet_group.py | 1 + dynamodb_table.py | 1 + dynamodb_ttl.py | 1 + ec2_ami_copy.py | 1 + ec2_asg.py | 1 + ec2_asg_info.py | 1 + ec2_asg_lifecycle_hook.py | 1 + ec2_customer_gateway.py | 1 + ec2_customer_gateway_info.py | 1 + ec2_eip.py | 1 + ec2_eip_info.py | 1 + ec2_elb.py | 1 + ec2_elb_info.py | 1 + ec2_instance.py | 1 + ec2_instance_info.py | 1 + ec2_launch_template.py | 1 + ec2_lc.py | 1 + ec2_lc_find.py | 1 + ec2_lc_info.py | 1 + ec2_metric_alarm.py | 1 + ec2_placement_group.py | 1 + ec2_placement_group_info.py | 1 + ec2_scaling_policy.py | 1 + ec2_snapshot_copy.py | 1 + ec2_transit_gateway.py | 1 + ec2_transit_gateway_info.py | 1 + ec2_vpc_egress_igw.py | 1 + ec2_vpc_endpoint.py | 1 + ec2_vpc_endpoint_info.py | 1 + ec2_vpc_igw.py | 1 + ec2_vpc_igw_info.py | 1 + ec2_vpc_nacl.py | 1 + ec2_vpc_nacl_info.py | 1 + ec2_vpc_nat_gateway.py | 1 + ec2_vpc_nat_gateway_info.py | 1 + ec2_vpc_peer.py | 1 + ec2_vpc_peering_info.py | 1 + ec2_vpc_route_table.py | 1 + ec2_vpc_route_table_info.py | 1 + ec2_vpc_vgw.py | 1 + ec2_vpc_vgw_info.py | 1 + ec2_vpc_vpn.py | 1 + ec2_vpc_vpn_info.py | 1 + ec2_win_password.py | 1 + ecs_attribute.py | 1 + ecs_cluster.py | 1 + ecs_ecr.py | 1 + ecs_service.py | 1 + ecs_service_info.py | 1 + ecs_tag.py | 1 + ecs_task.py | 1 + ecs_taskdefinition.py | 1 + ecs_taskdefinition_info.py | 1 + efs.py | 1 + efs_info.py | 1 + elasticache.py | 1 + elasticache_info.py | 1 + elasticache_parameter_group.py | 1 + elasticache_snapshot.py | 1 + elasticache_subnet_group.py | 1 + elb_application_lb.py | 1 + elb_application_lb_info.py | 1 + elb_classic_lb.py | 1 + elb_classic_lb_info.py | 1 + elb_instance.py | 1 + elb_network_lb.py | 1 + elb_target.py | 1 + elb_target_group.py | 1 + elb_target_group_info.py | 1 + elb_target_info.py | 1 + execute_lambda.py | 1 + iam.py | 1 + iam_cert.py | 1 + iam_group.py | 1 + iam_managed_policy.py | 1 + iam_mfa_device_info.py | 1 + iam_password_policy.py | 1 + iam_policy.py | 1 + iam_policy_info.py | 1 + iam_role.py | 1 + iam_role_info.py | 1 + iam_saml_federation.py | 1 + iam_server_certificate_info.py | 1 + iam_user.py | 1 + iam_user_info.py | 1 + kinesis_stream.py | 1 + lambda.py | 1 + lambda_alias.py | 1 + lambda_event.py | 1 + lambda_facts.py | 1 + lambda_info.py | 1 + lambda_policy.py | 1 + lightsail.py | 1 + rds.py | 1 + rds_instance.py | 1 + rds_instance_info.py | 1 + rds_param_group.py | 1 + rds_snapshot.py | 1 + rds_snapshot_info.py | 1 + rds_subnet_group.py | 1 + redshift.py | 1 + redshift_cross_region_snapshots.py | 1 + redshift_info.py | 1 + redshift_subnet_group.py | 1 + route53.py | 1 + route53_health_check.py | 1 + route53_info.py | 1 + route53_zone.py | 1 + s3_bucket_notification.py | 1 + s3_lifecycle.py | 1 + s3_logging.py | 1 + s3_sync.py | 1 + s3_website.py | 1 + sns.py | 1 + sns_topic.py | 1 + sqs_queue.py | 1 + sts_assume_role.py | 1 + sts_session_token.py | 1 + 172 files changed, 172 insertions(+) diff --git a/aws_acm.py b/aws_acm.py index 5da9ab617e1..b57618b1ac5 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' module: aws_acm short_description: Upload and delete certificates in the AWS Certificate Manager service +version_added: 1.0.0 description: - Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM). - > diff --git a/aws_acm_info.py b/aws_acm_info.py index d23fe502ea7..16656021f36 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: aws_acm_info short_description: Retrieve certificate information from AWS Certificate Manager service +version_added: 1.0.0 description: - Retrieve information for ACM certificates - This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change. diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 380745a4408..f67bae92808 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: aws_api_gateway +version_added: 1.0.0 short_description: Manage AWS API Gateway APIs description: - Allows for the management of API Gateway APIs diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index 5a2441c5a02..468705b1938 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_application_scaling_policy +version_added: 1.0.0 short_description: Manage Application Auto Scaling Scaling Policies notes: - for details of the parameters and returns see diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index dd7ce4cb9f2..3ead4a8512b 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_batch_compute_environment +version_added: 1.0.0 short_description: Manage AWS Batch Compute Environments description: - This module allows the management of AWS Batch Compute Environments. diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 347af23ee4e..b93ff5febe7 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_batch_job_definition +version_added: 1.0.0 short_description: Manage AWS Batch Job Definitions description: - This module allows the management of AWS Batch Job Definitions. diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index 59cc0b3a24c..afc49015397 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_batch_job_queue +version_added: 1.0.0 short_description: Manage AWS Batch Job Queues description: - This module allows the management of AWS Batch Job Queues. diff --git a/aws_codebuild.py b/aws_codebuild.py index 5ad56103123..22011422229 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_codebuild +version_added: 1.0.0 short_description: Create or delete an AWS CodeBuild project notes: - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html). diff --git a/aws_codecommit.py b/aws_codecommit.py index 8b929454e9d..5fe907cc37d 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_codecommit +version_added: 1.0.0 short_description: Manage repositories in AWS CodeCommit description: - Supports creation and deletion of CodeCommit repositories. diff --git a/aws_codepipeline.py b/aws_codepipeline.py index ff38319ac7b..5406389a129 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_codepipeline +version_added: 1.0.0 short_description: Create or delete AWS CodePipelines notes: - for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html) diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index 7bd95fc3b0b..5a4ee38bf81 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_config_aggregation_authorization +version_added: 1.0.0 short_description: Manage cross-account AWS Config authorizations description: - Module manages AWS Config resources. diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index c750edae99b..fac17574c4b 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_config_aggregator +version_added: 1.0.0 short_description: Manage AWS Config aggregations across multiple accounts description: - Module manages AWS Config resources diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index a50de66a3a9..6e7fe5b2fa9 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_config_delivery_channel +version_added: 1.0.0 short_description: Manage AWS Config delivery channels description: - This module manages AWS Config delivery locations for rule checks and configuration info. diff --git a/aws_config_recorder.py b/aws_config_recorder.py index b769e6278ff..2d3bf003d3b 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_config_recorder +version_added: 1.0.0 short_description: Manage AWS Config Recorders description: - Module manages AWS Config configuration recorder settings. diff --git a/aws_config_rule.py b/aws_config_rule.py index 08675f9a514..80550586aa8 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_config_rule +version_added: 1.0.0 short_description: Manage AWS Config resources description: - Module manages AWS Config rules diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 9975206e050..2fbda9124bb 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_direct_connect_connection +version_added: 1.0.0 short_description: Creates, deletes, modifies a DirectConnect connection description: - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location. diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index 1524e17fd7a..a7084faaeb7 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: aws_direct_connect_gateway author: Gobin Sougrakpam (@gobins) +version_added: 1.0.0 short_description: Manage AWS Direct Connect gateway description: - Creates AWS Direct Connect Gateway. diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 557be09929f..28af9bc103c 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_direct_connect_link_aggregation_group +version_added: 1.0.0 short_description: Manage Direct Connect LAG bundles description: - Create, delete, or modify a Direct Connect link aggregation group. diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 4139e5fd7f1..3c4cd886a81 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_direct_connect_virtual_interface +version_added: 1.0.0 short_description: Manage Direct Connect virtual interfaces description: - Create, delete, or modify a Direct Connect public or private virtual interface. diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 5851b52661c..c39f8464bec 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_eks_cluster +version_added: 1.0.0 short_description: Manage Elastic Kubernetes Service Clusters description: - Manage Elastic Kubernetes Service Clusters diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py index cb610a29f82..bab889f0b07 100644 --- a/aws_elasticbeanstalk_app.py +++ b/aws_elasticbeanstalk_app.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_elasticbeanstalk_app +version_added: 1.0.0 short_description: Create, update, and delete an elastic beanstalk application diff --git a/aws_glue_connection.py b/aws_glue_connection.py index 9aafb4c1d8a..7502af6f9e6 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_glue_connection +version_added: 1.0.0 short_description: Manage an AWS Glue connection description: - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details. diff --git a/aws_glue_job.py b/aws_glue_job.py index f3c7513fdda..c0edd59328c 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_glue_job +version_added: 1.0.0 short_description: Manage an AWS Glue job description: - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. diff --git a/aws_inspector_target.py b/aws_inspector_target.py index 3d7af6b6c4a..b71fbf61c0d 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_inspector_target +version_added: 1.0.0 short_description: Create, Update and Delete Amazon Inspector Assessment Targets description: Creates, updates, or deletes Amazon Inspector Assessment Targets diff --git a/aws_kms.py b/aws_kms.py index e83fd9657e2..be4394f2caf 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_kms +version_added: 1.0.0 short_description: Perform various KMS management tasks. description: - Manage role/user access to a KMS key. Not designed for encrypting/decrypting. diff --git a/aws_kms_info.py b/aws_kms_info.py index 1f6f9f394e3..defccf70342 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_kms_info +version_added: 1.0.0 short_description: Gather information about AWS KMS keys description: - Gather information about AWS KMS keys including tags and grants diff --git a/aws_region_info.py b/aws_region_info.py index dfe7892164b..09a548b54a7 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: aws_region_info short_description: Gather information about AWS regions. +version_added: 1.0.0 description: - Gather information about AWS regions. - This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change. diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 7b250f89ed6..90e07a1b62b 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_s3_bucket_info +version_added: 1.0.0 short_description: Lists S3 buckets in AWS requirements: - boto3 >= 1.4.4 diff --git a/aws_s3_cors.py b/aws_s3_cors.py index d9a13e302fe..f56f9ce06b5 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_s3_cors +version_added: 1.0.0 short_description: Manage CORS for S3 buckets in AWS description: - Manage CORS for S3 buckets in AWS diff --git a/aws_secret.py b/aws_secret.py index 4ee846f1bb6..962501d5d02 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -10,6 +10,7 @@ DOCUMENTATION = r''' --- module: aws_secret +version_added: 1.0.0 short_description: Manage secrets stored in AWS Secrets Manager. description: - Create, update, and delete secrets stored in AWS Secrets Manager. diff --git a/aws_ses_identity.py b/aws_ses_identity.py index 5f649873ab3..710ec01817e 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_ses_identity +version_added: 1.0.0 short_description: Manages SES email and domain identity description: - This module allows the user to manage verified email and domain identity for SES. diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py index 4be04d8ecfd..bb743c6b14e 100644 --- a/aws_ses_identity_policy.py +++ b/aws_ses_identity_policy.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_ses_identity_policy +version_added: 1.0.0 short_description: Manages SES sending authorization policies description: - This module allows the user to manage sending authorization policies associated with an SES identity (email or domain). diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index 86b5f9bf2b5..0996497c5ca 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_ses_rule_set +version_added: 1.0.0 short_description: Manages SES inbound receipt rule sets description: - The M(community.aws.aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets diff --git a/aws_sgw_info.py b/aws_sgw_info.py index 409eeb8126b..adf7dde86aa 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: aws_sgw_info +version_added: 1.0.0 short_description: Fetch AWS Storage Gateway information description: - Fetch AWS Storage Gateway information diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 2c397ab5326..82138868760 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: aws_ssm_parameter_store +version_added: 1.0.0 short_description: Manage key-value pairs in aws parameter store. description: - Manage key-value pairs in aws parameter store. diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index 1220f69f42a..5ab13baa76c 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_step_functions_state_machine +version_added: 1.0.0 short_description: Manage AWS Step Functions state machines diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py index a3a1d13d6c4..f9e1d3fa44c 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/aws_step_functions_state_machine_execution.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: aws_step_functions_state_machine_execution +version_added: 1.0.0 short_description: Start or stop execution of an AWS Step Functions state machine. diff --git a/aws_waf_condition.py b/aws_waf_condition.py index 7774d13f06f..e0f4dea2cc5 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' module: aws_waf_condition short_description: Create and delete WAF Conditions +version_added: 1.0.0 description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/) diff --git a/aws_waf_info.py b/aws_waf_info.py index 15c1ef98f81..98840668656 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: aws_waf_info short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters. +version_added: 1.0.0 description: - Retrieve information for WAF ACLs, Rule , Conditions and Filters. - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change. diff --git a/aws_waf_rule.py b/aws_waf_rule.py index d2a9ad395ae..3ce660a3cbe 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' module: aws_waf_rule short_description: Create and delete WAF Rules +version_added: 1.0.0 description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/). diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index 57ff9ea3515..da7b378a55c 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: aws_waf_web_acl short_description: Create and delete WAF Web ACLs. +version_added: 1.0.0 description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/). diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 2308eb8e02e..d53d83bd027 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: cloudformation_exports_info short_description: Read a value from CloudFormation Exports +version_added: 1.0.0 description: - Module retrieves a value from CloudFormation Exports requirements: ['boto3 >= 1.11.15'] diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 986db8dac3a..990dfdc33c8 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: cloudformation_stack_set +version_added: 1.0.0 short_description: Manage groups of CloudFormation stacks description: - Launches/updates/deletes AWS CloudFormation Stack Sets. diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 80be45e1268..f5abb2a0d8e 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- +version_added: 1.0.0 module: cloudfront_distribution short_description: Create, update and delete AWS CloudFront distributions. diff --git a/cloudfront_info.py b/cloudfront_info.py index a5bcb4ca572..cc6f9472bb0 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: cloudfront_info +version_added: 1.0.0 short_description: Obtain facts about an AWS CloudFront distribution description: - Gets information about an AWS CloudFront distribution. diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index b1da91c1c66..1f3e50331ca 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- +version_added: 1.0.0 module: cloudfront_invalidation short_description: create invalidations for AWS CloudFront distributions diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index af1ea4964b1..00f188222c5 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- +version_added: 1.0.0 module: cloudfront_origin_access_identity short_description: Create, update and delete origin access identities for a diff --git a/cloudtrail.py b/cloudtrail.py index fe8d500a4c3..c0bf3f4db07 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: cloudtrail +version_added: 1.0.0 short_description: manage CloudTrail create, delete, update description: - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled. diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 00a1908145a..b90b7feca6c 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -9,6 +9,7 @@ DOCUMENTATION = r''' --- module: cloudwatchevent_rule +version_added: 1.0.0 short_description: Manage CloudWatch Event rules and targets description: - This module creates and manages CloudWatch event rules and targets. diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index c2f10956f34..cd1e94cfb53 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: cloudwatchlogs_log_group +version_added: 1.0.0 short_description: create or delete log_group in CloudWatchLogs notes: - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html). diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index f1b87c8d52d..eae18b97c37 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: cloudwatchlogs_log_group_info +version_added: 1.0.0 short_description: Get information about log_group in CloudWatchLogs description: - Lists the specified log groups. You can list all your log groups or filter the results by prefix. diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index 852d7900e3a..a05c7fe2029 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -8,6 +8,7 @@ DOCUMENTATION = ''' --- module: cloudwatchlogs_log_group_metric_filter +version_added: 1.0.0 author: - "Markus Bergholz (@markuman)" short_description: Manage CloudWatch log group metric filter diff --git a/data_pipeline.py b/data_pipeline.py index f52cf3f842e..d25563e45ac 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: data_pipeline +version_added: 1.0.0 author: - Raghu Udiyar (@raags) - Sloane Hertel (@s-hertel) diff --git a/dms_endpoint.py b/dms_endpoint.py index 438a9a57271..829aae2773d 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: dms_endpoint +version_added: 1.0.0 short_description: Creates or destroys a data migration services endpoint description: - Creates or destroys a data migration services endpoint, diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 0c4a37d5b1f..5aa633b44f3 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: dms_replication_subnet_group +version_added: 1.0.0 short_description: creates or destroys a data migration services subnet group description: - Creates or destroys a data migration services subnet group. diff --git a/dynamodb_table.py b/dynamodb_table.py index ee5cd8470c0..d528e460078 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: dynamodb_table +version_added: 1.0.0 short_description: Create, update or delete AWS Dynamo DB tables description: - Create or delete AWS Dynamo DB tables. diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index dbf7bcfc53c..330bf30f14e 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: dynamodb_ttl +version_added: 1.0.0 short_description: Set TTL for a given DynamoDB table description: - Uses boto3 to set TTL. diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 2430d04cbb7..638db1c3404 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: ec2_ami_copy +version_added: 1.0.0 short_description: copies AMI between AWS regions, return new image id description: - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.) diff --git a/ec2_asg.py b/ec2_asg.py index 323120035f8..43afa725385 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_asg +version_added: 1.0.0 short_description: Create or delete AWS AutoScaling Groups (ASGs) description: - Can create or delete AWS AutoScaling Groups. diff --git a/ec2_asg_info.py b/ec2_asg_info.py index d3fb1e101bb..07df498968a 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_asg_info +version_added: 1.0.0 short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS description: - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index 0127ac9137c..bab1ef37f32 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: ec2_asg_lifecycle_hook +version_added: 1.0.0 short_description: Create, delete or update AWS ASG Lifecycle Hooks. description: - Will create a new hook when I(state=present) and no given Hook is found. diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 8ac3f73d46a..bda1626640b 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_customer_gateway +version_added: 1.0.0 short_description: Manage an AWS customer gateway description: - Manage an AWS customer gateway. diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index d4a10d23cd0..1526cb639d9 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_customer_gateway_info +version_added: 1.0.0 short_description: Gather information about customer gateways in AWS description: - Gather information about customer gateways in AWS. diff --git a/ec2_eip.py b/ec2_eip.py index 16ce4ddf70f..c43363e3b6d 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: ec2_eip +version_added: 1.0.0 short_description: manages EC2 elastic IP (EIP) addresses. description: - This module can allocate or release an EIP. diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 33899dd648c..c31ec738f09 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_eip_info +version_added: 1.0.0 short_description: List EC2 EIP details description: - List details of EC2 Elastic IP addresses. diff --git a/ec2_elb.py b/ec2_elb.py index 9ae1dc08b58..06fcc9601e2 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_elb +version_added: 1.0.0 short_description: De-registers or registers instances from EC2 ELBs description: - This module de-registers or registers an AWS EC2 instance from the ELBs diff --git a/ec2_elb_info.py b/ec2_elb_info.py index bf753c2cbf6..57cd296d262 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: ec2_elb_info +version_added: 1.0.0 short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - Gather information about EC2 Elastic Load Balancers in AWS diff --git a/ec2_instance.py b/ec2_instance.py index 912fa7cbe72..9382659f71b 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_instance +version_added: 1.0.0 short_description: Create & manage EC2 instances description: - Create and manage AWS EC2 instances. diff --git a/ec2_instance_info.py b/ec2_instance_info.py index e94aaa74b21..8883be6923d 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_instance_info +version_added: 1.0.0 short_description: Gather information about ec2 instances in AWS description: - Gather information about ec2 instances in AWS diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 3605b0a2db5..6ac54a1fee3 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -8,6 +8,7 @@ DOCUMENTATION = ''' --- module: ec2_launch_template +version_added: 1.0.0 short_description: Manage EC2 launch templates description: - Create, modify, and delete EC2 Launch Templates, which can be used to diff --git a/ec2_lc.py b/ec2_lc.py index a8e6d87378a..46e125a19ce 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: ec2_lc +version_added: 1.0.0 short_description: Create or delete AWS Autoscaling Launch Configurations diff --git a/ec2_lc_find.py b/ec2_lc_find.py index b1c457b945a..700c84610c5 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: ec2_lc_find +version_added: 1.0.0 short_description: Find AWS Autoscaling Launch Configurations description: - Returns list of matching Launch Configurations for a given name, along with other useful information. diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 8e1cf258851..899db903fd9 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: ec2_lc_info +version_added: 1.0.0 short_description: Gather information about AWS Autoscaling Launch Configurations. description: - Gather information about AWS Autoscaling Launch Configurations. diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index a07639aa387..3aef6799b23 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' module: ec2_metric_alarm short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" +version_added: 1.0.0 description: - Can create or delete AWS metric alarms. - Metrics you wish to alarm on must already exist. diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 5d4b0087086..7d9a8004544 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_placement_group +version_added: 1.0.0 short_description: Create or delete an EC2 Placement Group description: - Create an EC2 Placement Group; if the placement group already exists, diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 3559debfe60..96451d69ce5 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_placement_group_info +version_added: 1.0.0 short_description: List EC2 Placement Group(s) details description: - List details of EC2 Placement Group(s). diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 540b70527df..141731199d9 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_scaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups +version_added: 1.0.0 description: - Can create or delete scaling policies for autoscaling groups. - Referenced autoscaling groups must already exist. diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 68378d3b9c3..65ee1c980f5 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: ec2_snapshot_copy +version_added: 1.0.0 short_description: Copies an EC2 snapshot and returns the new Snapshot ID. description: - Copies an EC2 Snapshot from a source region to a destination region. diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index b4eca57de97..5a7ea4b248c 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_transit_gateway short_description: Create and delete AWS Transit Gateways +version_added: 1.0.0 description: - Creates AWS Transit Gateways. - Deletes AWS Transit Gateways. diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 42f180ab97f..ddae796cf47 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' module: ec2_transit_gateway_info short_description: Gather information about ec2 transit gateways in AWS +version_added: 1.0.0 description: - Gather information about ec2 transit gateways in AWS author: "Bob Boldin (@BobBoldin)" diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index 2981d194ef9..b2f481b86dd 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_egress_igw +version_added: 1.0.0 short_description: Manage an AWS VPC Egress Only Internet gateway description: - Manage an AWS VPC Egress Only Internet gateway diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 7978c48dfde..920cf45ca6e 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_vpc_endpoint short_description: Create and delete AWS VPC Endpoints. +version_added: 1.0.0 description: - Creates AWS VPC endpoints. - Deletes AWS VPC endpoints. diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index 0f23ca53217..a1f3ff0a901 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -8,6 +8,7 @@ DOCUMENTATION = ''' module: ec2_vpc_endpoint_info short_description: Retrieves AWS VPC endpoints details using AWS methods. +version_added: 1.0.0 description: - Gets various details related to AWS VPC Endpoints. - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change. diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index c1960c14bbb..b920682b76c 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_igw +version_added: 1.0.0 short_description: Manage an AWS VPC Internet gateway description: - Manage an AWS VPC Internet gateway diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index dcc07a4349b..f33020e0c24 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_igw_info +version_added: 1.0.0 short_description: Gather information about internet gateways in AWS description: - Gather information about internet gateways in AWS. diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 14853edf767..f2ca5cda6f6 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_vpc_nacl short_description: create and delete Network ACLs. +version_added: 1.0.0 description: - Read the AWS documentation for Network ACLS U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 2cc4f012d58..337a91d1c93 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -8,6 +8,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_nacl_info +version_added: 1.0.0 short_description: Gather information about Network ACLs in an AWS VPC description: - Gather information about Network ACLs in an AWS VPC diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 306c8ac49c4..4272dc648c4 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_nat_gateway +version_added: 1.0.0 short_description: Manage AWS VPC NAT Gateways. description: - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids. diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index 83fb9b0f182..b734721b5ea 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_vpc_nat_gateway_info short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods. +version_added: 1.0.0 description: - Gets various details related to AWS VPC Managed Nat Gateways - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change. diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 5c94d4e399b..2a08618a73f 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_vpc_peer short_description: create, delete, accept, and reject VPC peering connections between two VPCs. +version_added: 1.0.0 description: - Read the AWS documentation for VPC Peering Connections U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html). diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 8472fc4f58c..f552358e362 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_vpc_peering_info short_description: Retrieves AWS VPC Peering details using AWS methods. +version_added: 1.0.0 description: - Gets various details related to AWS VPC Peers - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change. diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index fbbae5c5a00..39f0ffc42bf 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_route_table +version_added: 1.0.0 short_description: Manage route tables for AWS virtual private clouds description: - Manage route tables for AWS virtual private clouds diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index 4823f2db49e..8af3935bd36 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_route_table_info +version_added: 1.0.0 short_description: Gather information about ec2 VPC route tables in AWS description: - Gather information about ec2 VPC route tables in AWS diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 511616a0838..1ce3df5672e 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: ec2_vpc_vgw short_description: Create and delete AWS VPN Virtual Gateways. +version_added: 1.0.0 description: - Creates AWS VPN Virtual Gateways - Deletes AWS VPN Virtual Gateways diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index d526b54a372..5c7b866c7d6 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_vgw_info +version_added: 1.0.0 short_description: Gather information about virtual gateways in AWS description: - Gather information about virtual gateways in AWS. diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 72a3b9a3eb1..1ac818485bb 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_vpn +version_added: 1.0.0 short_description: Create, modify, and delete EC2 VPN connections. description: - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index b9830d7d31e..1ba8210a722 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_vpc_vpn_info +version_added: 1.0.0 short_description: Gather information about VPN Connections in AWS. description: - Gather information about VPN Connections in AWS. diff --git a/ec2_win_password.py b/ec2_win_password.py index 9ae8cd52dc8..7c7716b8a44 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ec2_win_password +version_added: 1.0.0 short_description: Gets the default administrator password for ec2 windows instances description: - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). diff --git a/ecs_attribute.py b/ecs_attribute.py index 37faa28ac03..24e71a29bdb 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_attribute +version_added: 1.0.0 short_description: manage ecs attributes description: - Create, update or delete ECS container instance attributes. diff --git a/ecs_cluster.py b/ecs_cluster.py index 3610dcc6a6e..12d453f6ae9 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_cluster +version_added: 1.0.0 short_description: Create or terminate ECS clusters. notes: - When deleting a cluster, the information returned is the state of the cluster prior to deletion. diff --git a/ecs_ecr.py b/ecs_ecr.py index f0b0df8b7f1..533792877eb 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -12,6 +12,7 @@ DOCUMENTATION = ''' --- module: ecs_ecr +version_added: 1.0.0 short_description: Manage Elastic Container Registry repositories description: - Manage Elastic Container Registry repositories. diff --git a/ecs_service.py b/ecs_service.py index 27ee4b8fc39..b3995f7e8c9 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_service +version_added: 1.0.0 short_description: Create, terminate, start or stop a service in ECS description: - Creates or terminates ECS. services. diff --git a/ecs_service_info.py b/ecs_service_info.py index eb7f6215ef4..4d04fdf4986 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_service_info +version_added: 1.0.0 short_description: List or describe services in ECS description: - Lists or describes services in ECS. diff --git a/ecs_tag.py b/ecs_tag.py index 364546a4bee..9e4f97989f8 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -8,6 +8,7 @@ DOCUMENTATION = r''' --- module: ecs_tag +version_added: 1.0.0 short_description: create and remove tags on Amazon ECS resources notes: - none diff --git a/ecs_task.py b/ecs_task.py index 0c926954023..2039b8c69d0 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_task +version_added: 1.0.0 short_description: Run, start or stop a task in ecs description: - Creates or deletes instances of task definitions. diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index f61595fcd77..f48a442ab1f 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_taskdefinition +version_added: 1.0.0 short_description: register a task definition in ecs description: - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS). diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index 82a9bc968c7..ef5b20c4602 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: ecs_taskdefinition_info +version_added: 1.0.0 short_description: Describe a task definition in ECS notes: - For details of the parameters and returns see diff --git a/efs.py b/efs.py index b5174777ff4..43c81d9d0d1 100644 --- a/efs.py +++ b/efs.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: efs +version_added: 1.0.0 short_description: create and maintain EFS file systems description: - Module allows create, search and destroy Amazon EFS file systems. diff --git a/efs_info.py b/efs_info.py index edc8e0daac6..95e82926486 100644 --- a/efs_info.py +++ b/efs_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: efs_info +version_added: 1.0.0 short_description: Get information about Amazon EFS file systems description: - This module can be used to search Amazon EFS file systems. diff --git a/elasticache.py b/elasticache.py index f649ea8010c..b6b52302baa 100644 --- a/elasticache.py +++ b/elasticache.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: elasticache +version_added: 1.0.0 short_description: Manage cache clusters in Amazon ElastiCache description: - Manage cache clusters in Amazon ElastiCache. diff --git a/elasticache_info.py b/elasticache_info.py index 93e8ae8d43d..8f3850ddf96 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: elasticache_info short_description: Retrieve information for AWS ElastiCache clusters +version_added: 1.0.0 description: - Retrieve information from AWS ElastiCache clusters - This module was called C(elasticache_facts) before Ansible 2.9. The usage did not change. diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index c866fa9c83c..95dacf52b23 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elasticache_parameter_group +version_added: 1.0.0 short_description: Manage cache parameter groups in Amazon ElastiCache. description: - Manage cache security groups in Amazon ElastiCache. diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 4784dd53a29..9f65d6081f0 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elasticache_snapshot +version_added: 1.0.0 short_description: Manage cache snapshots in Amazon ElastiCache description: - Manage cache snapshots in Amazon ElastiCache. diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 3048f0a7baa..7a874f3b1ae 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elasticache_subnet_group +version_added: 1.0.0 short_description: manage ElastiCache subnet groups description: - Creates, modifies, and deletes ElastiCache subnet groups. This module has a dependency on python-boto >= 2.5. diff --git a/elb_application_lb.py b/elb_application_lb.py index 6daaad7a49c..3f8c44c9f36 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' --- module: elb_application_lb +version_added: 1.0.0 short_description: Manage an Application load balancer description: - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index e1711dbef45..c9300f5ed01 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elb_application_lb_info +version_added: 1.0.0 short_description: Gather information about application ELBs in AWS description: - Gather information about application ELBs in AWS diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 0ae1bc7dda5..1f8679b30d2 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elb_classic_lb +version_added: 1.0.0 description: - Returns information about the load balancer. - Will be marked changed when called only if state is changed. diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 4b2a2db64bb..da8f6c5af11 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: elb_classic_lb_info +version_added: 1.0.0 short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - Gather information about EC2 Elastic Load Balancers in AWS diff --git a/elb_instance.py b/elb_instance.py index 20992459f57..dd541ef2e58 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elb_instance +version_added: 1.0.0 short_description: De-registers or registers instances from EC2 ELBs description: - This module de-registers or registers an AWS EC2 instance from the ELBs diff --git a/elb_network_lb.py b/elb_network_lb.py index 5e1b52213cc..2f824c09b59 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: elb_network_lb +version_added: 1.0.0 short_description: Manage a Network Load Balancer description: - Manage an AWS Network Elastic Load Balancer. See diff --git a/elb_target.py b/elb_target.py index 53d715578b8..b47de9f457d 100644 --- a/elb_target.py +++ b/elb_target.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elb_target +version_added: 1.0.0 short_description: Manage a target in a target group description: - Used to register or deregister a target in a target group diff --git a/elb_target_group.py b/elb_target_group.py index f3c0723ac27..e0c8e57bfac 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elb_target_group +version_added: 1.0.0 short_description: Manage a target group for an Application or Network load balancer description: - Manage an AWS Elastic Load Balancer target group. See diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 5c4fa2f1f64..78ce88613bc 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: elb_target_group_info +version_added: 1.0.0 short_description: Gather information about ELB target groups in AWS description: - Gather information about ELB target groups in AWS diff --git a/elb_target_info.py b/elb_target_info.py index f6b0f104032..dda76f08c24 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -7,6 +7,7 @@ DOCUMENTATION = ''' --- module: elb_target_info +version_added: 1.0.0 short_description: Gathers which target groups a target is associated with. description: - This module will search through every target group in a region to find diff --git a/execute_lambda.py b/execute_lambda.py index 846cf47d22a..5e789e009ba 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: execute_lambda +version_added: 1.0.0 short_description: Execute an AWS Lambda function description: - This module executes AWS Lambda functions, allowing synchronous and asynchronous diff --git a/iam.py b/iam.py index 57d7ca653e3..74cf77d3c7f 100644 --- a/iam.py +++ b/iam.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam +version_added: 1.0.0 short_description: Manage IAM users, groups, roles and keys description: - Allows for the management of IAM users, user API keys, groups, roles. diff --git a/iam_cert.py b/iam_cert.py index 1ea54c859d3..2aad121ea77 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' --- module: iam_cert +version_added: 1.0.0 short_description: Manage server certificates for use on ELBs and CloudFront description: - Allows for the management of server certificates. diff --git a/iam_group.py b/iam_group.py index 4a53a870833..7a9da3e6f57 100644 --- a/iam_group.py +++ b/iam_group.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' --- module: iam_group +version_added: 1.0.0 short_description: Manage AWS IAM groups description: - Manage AWS IAM groups. diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 0abe10faf5d..3e5f14a7ddb 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_managed_policy +version_added: 1.0.0 short_description: Manage User Managed IAM policies description: - Allows creating and removing managed IAM policies diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 07e98d6851c..f8c37a91acf 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_mfa_device_info +version_added: 1.0.0 short_description: List the MFA (Multi-Factor Authentication) devices registered for a user description: - List the MFA (Multi-Factor Authentication) devices registered for a user diff --git a/iam_password_policy.py b/iam_password_policy.py index fda220c0097..d654a846cfd 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: iam_password_policy +version_added: 1.0.0 short_description: Update an IAM Password Policy description: - Module updates an IAM Password Policy on a given AWS account diff --git a/iam_policy.py b/iam_policy.py index 1775a7a749e..e4debd7f1ce 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_policy +version_added: 1.0.0 short_description: Manage inline IAM policies for users, groups, and roles description: - Allows uploading or removing inline IAM policies for IAM users, groups or roles. diff --git a/iam_policy_info.py b/iam_policy_info.py index 9f250e37b8d..f9ea30b8cc4 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_policy_info +version_added: 1.0.0 short_description: Retrieve inline IAM policies for users, groups, and roles description: - Supports fetching of inline IAM policies for IAM users, groups and roles. diff --git a/iam_role.py b/iam_role.py index 09db6ed9643..b20c564734a 100644 --- a/iam_role.py +++ b/iam_role.py @@ -8,6 +8,7 @@ DOCUMENTATION = ''' --- module: iam_role +version_added: 1.0.0 short_description: Manage AWS IAM roles description: - Manage AWS IAM roles. diff --git a/iam_role_info.py b/iam_role_info.py index 6b15c186360..95eabdb95ab 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_role_info +version_added: 1.0.0 short_description: Gather information on IAM roles description: - Gathers information about IAM roles. diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 27310083802..214cbe74179 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -22,6 +22,7 @@ DOCUMENTATION = ''' --- module: iam_saml_federation +version_added: 1.0.0 short_description: Maintain IAM SAML federation configuration. requirements: - boto3 diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 7ec6dca4c38..d57ef77ca86 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_server_certificate_info +version_added: 1.0.0 short_description: Retrieve the information of a server certificate description: - Retrieve the attributes of a server certificate. diff --git a/iam_user.py b/iam_user.py index 15a972ebc09..9dc9eb45eca 100644 --- a/iam_user.py +++ b/iam_user.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: iam_user +version_added: 1.0.0 short_description: Manage AWS IAM users description: - Manage AWS IAM users. diff --git a/iam_user_info.py b/iam_user_info.py index 97a01ca142f..8e1856b1763 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: iam_user_info +version_added: 1.0.0 short_description: Gather IAM user(s) facts in AWS description: - This module can be used to gather IAM user(s) facts in AWS. diff --git a/kinesis_stream.py b/kinesis_stream.py index c9d9fe266cb..c3142137c1b 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: kinesis_stream +version_added: 1.0.0 short_description: Manage a Kinesis Stream. description: - Create or Delete a Kinesis Stream. diff --git a/lambda.py b/lambda.py index 705344ab9f6..2f417469c15 100644 --- a/lambda.py +++ b/lambda.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: lambda +version_added: 1.0.0 short_description: Manage AWS Lambda functions description: - Allows for the management of Lambda functions. diff --git a/lambda_alias.py b/lambda_alias.py index 9c78b8dc575..2b74cdb6352 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: lambda_alias +version_added: 1.0.0 short_description: Creates, updates or deletes AWS Lambda function aliases description: - This module allows the management of AWS Lambda functions aliases via the Ansible diff --git a/lambda_event.py b/lambda_event.py index 8aaf22ab2c2..e1a35220b74 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: lambda_event +version_added: 1.0.0 short_description: Creates, updates or deletes AWS Lambda function event mappings description: - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream diff --git a/lambda_facts.py b/lambda_facts.py index ad56e9c70a5..0d102fd7340 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: lambda_facts +version_added: 1.0.0 deprecated: removed_at_date: '2021-12-01' removed_from_collection: 'community.aws' diff --git a/lambda_info.py b/lambda_info.py index 568d1f72d2a..b81f8521013 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: lambda_info +version_added: 1.0.0 short_description: Gathers AWS Lambda function details description: - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. diff --git a/lambda_policy.py b/lambda_policy.py index 81bd7ec9200..09c74423a0d 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: lambda_policy +version_added: 1.0.0 short_description: Creates, updates or deletes AWS Lambda policy statements. description: - This module allows the management of AWS Lambda policy statements. diff --git a/lightsail.py b/lightsail.py index f65c39ea0c8..4be2fc3f458 100644 --- a/lightsail.py +++ b/lightsail.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: lightsail +version_added: 1.0.0 short_description: Manage instances in AWS Lightsail description: - Manage instances in AWS Lightsail. diff --git a/rds.py b/rds.py index 2f4728bbbad..5b15934fc03 100644 --- a/rds.py +++ b/rds.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: rds +version_added: 1.0.0 short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts description: - Creates, deletes, or modifies rds resources. diff --git a/rds_instance.py b/rds_instance.py index f626d114dee..f4018a3d6c1 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: rds_instance +version_added: 1.0.0 short_description: Manage RDS instances description: - Create, modify, and delete RDS instances. diff --git a/rds_instance_info.py b/rds_instance_info.py index 9cc95e378ad..cccd2b3f271 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: rds_instance_info +version_added: 1.0.0 short_description: obtain information about one or more RDS instances description: - Obtain information about one or more RDS instances. diff --git a/rds_param_group.py b/rds_param_group.py index e5cd2457458..ce271712211 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: rds_param_group +version_added: 1.0.0 short_description: manage RDS parameter groups description: - Creates, modifies, and deletes RDS parameter groups. diff --git a/rds_snapshot.py b/rds_snapshot.py index fe6b827f6c7..dd9f502886a 100644 --- a/rds_snapshot.py +++ b/rds_snapshot.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: rds_snapshot +version_added: 1.0.0 short_description: manage Amazon RDS snapshots. description: - Creates or deletes RDS snapshots. diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index 470ed0e2c6d..1d7003ae55c 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: rds_snapshot_info +version_added: 1.0.0 short_description: obtain information about one or more RDS snapshots description: - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora). diff --git a/rds_subnet_group.py b/rds_subnet_group.py index f913d41f296..3e207468e8e 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: rds_subnet_group +version_added: 1.0.0 short_description: manage RDS database subnet groups description: - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5. diff --git a/redshift.py b/redshift.py index 8c3b5ccdfac..01e604ff30f 100644 --- a/redshift.py +++ b/redshift.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- author: +version_added: 1.0.0 - "Jens Carl (@j-carl), Hothead Games Inc." - "Rafael Driutti (@rafaeldriutti)" module: redshift diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index b022aaafbba..fbcf5543aee 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: redshift_cross_region_snapshots +version_added: 1.0.0 short_description: Manage Redshift Cross Region Snapshots description: - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots. diff --git a/redshift_info.py b/redshift_info.py index b1fbe802b5f..679f53c58d2 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: redshift_info +version_added: 1.0.0 author: "Jens Carl (@j-carl)" short_description: Gather information about Redshift cluster(s) description: diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 4351ac9e717..eded969cb76 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- author: +version_added: 1.0.0 - "Jens Carl (@j-carl), Hothead Games Inc." module: redshift_subnet_group short_description: manage Redshift cluster subnet groups diff --git a/route53.py b/route53.py index 385a1d10ec4..72ca73faeb1 100644 --- a/route53.py +++ b/route53.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' --- module: route53 +version_added: 1.0.0 short_description: add or delete entries in Amazons Route53 DNS service description: - Creates and deletes DNS records in Amazons Route53 service diff --git a/route53_health_check.py b/route53_health_check.py index 80f6691407a..77fcf912e08 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: route53_health_check +version_added: 1.0.0 short_description: Add or delete health-checks in Amazons Route53 DNS service description: - Creates and deletes DNS Health checks in Amazons Route53 service. diff --git a/route53_info.py b/route53_info.py index 466e39bd79f..77d72603ffe 100644 --- a/route53_info.py +++ b/route53_info.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: route53_info short_description: Retrieves route53 details using AWS methods +version_added: 1.0.0 description: - Gets various details related to Route53 zone, record set or health check details. - This module was called C(route53_facts) before Ansible 2.9. The usage did not change. diff --git a/route53_zone.py b/route53_zone.py index bcab3b2e167..6467dd04527 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' module: route53_zone short_description: add or delete Route53 zones +version_added: 1.0.0 description: - Creates and deletes Route53 private and public zones. requirements: [ boto3 ] diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index f61e5607a66..854f3cadd84 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' --- module: s3_bucket_notification +version_added: 1.0.0 short_description: Creates, updates or deletes S3 Bucket notification for lambda description: - This module allows the management of AWS Lambda function bucket event mappings via the diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 5bdf65ab1e3..72fe1616818 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: s3_lifecycle +version_added: 1.0.0 short_description: Manage s3 bucket lifecycle rules in AWS description: - Manage s3 bucket lifecycle rules in AWS diff --git a/s3_logging.py b/s3_logging.py index b672562131e..1bb585acd55 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: s3_logging +version_added: 1.0.0 short_description: Manage logging facility of an s3 bucket in AWS description: - Manage logging facility of an s3 bucket in AWS diff --git a/s3_sync.py b/s3_sync.py index 879452af057..3d6de33074b 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' --- module: s3_sync +version_added: 1.0.0 short_description: Efficiently upload multiple files to S3 description: - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, diff --git a/s3_website.py b/s3_website.py index 08d786cf0f1..f2196836537 100644 --- a/s3_website.py +++ b/s3_website.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: s3_website +version_added: 1.0.0 short_description: Configure an s3 bucket as a website description: - Configure an s3 bucket as a website diff --git a/sns.py b/sns.py index e1edfb99660..49b73aa68f4 100644 --- a/sns.py +++ b/sns.py @@ -11,6 +11,7 @@ DOCUMENTATION = ''' module: sns short_description: Send Amazon Simple Notification Service messages +version_added: 1.0.0 description: - Sends a notification to a topic on your Amazon SNS account. author: diff --git a/sns_topic.py b/sns_topic.py index ae3b960b7d8..4240a746754 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -10,6 +10,7 @@ DOCUMENTATION = ''' module: sns_topic short_description: Manages AWS SNS topics and subscriptions +version_added: 1.0.0 description: - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account. diff --git a/sqs_queue.py b/sqs_queue.py index 1756a6e33b4..40eda404b46 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: sqs_queue +version_added: 1.0.0 short_description: Creates or deletes AWS SQS queues. description: - Create or delete AWS SQS queues. diff --git a/sts_assume_role.py b/sts_assume_role.py index fca345ad4ac..378eb0031f8 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: sts_assume_role +version_added: 1.0.0 short_description: Assume a role using AWS Security Token Service and obtain temporary credentials description: - Assume a role using AWS Security Token Service and obtain temporary credentials. diff --git a/sts_session_token.py b/sts_session_token.py index 50c537623f6..aa4792e94d8 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -9,6 +9,7 @@ DOCUMENTATION = ''' --- module: sts_session_token +version_added: 1.0.0 short_description: Obtain a session token from the AWS Security Token Service description: - Obtain a session token from the AWS Security Token Service. From d10fdfa6d4c89861f4c91c5de33543039d3ef865 Mon Sep 17 00:00:00 2001 From: flowerysong Date: Wed, 8 Jul 2020 06:36:08 -0400 Subject: [PATCH 015/683] Fix documentation YAML for redshift and redshift_subnet_group (#128) --- redshift.py | 2 +- redshift_subnet_group.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/redshift.py b/redshift.py index 01e604ff30f..f6308392694 100644 --- a/redshift.py +++ b/redshift.py @@ -11,10 +11,10 @@ DOCUMENTATION = ''' --- author: -version_added: 1.0.0 - "Jens Carl (@j-carl), Hothead Games Inc." - "Rafael Driutti (@rafaeldriutti)" module: redshift +version_added: 1.0.0 short_description: create, delete, or modify an Amazon Redshift instance description: - Creates, deletes, or modifies Amazon Redshift cluster instances. diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index eded969cb76..615e667b9c8 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -10,9 +10,9 @@ DOCUMENTATION = ''' --- author: -version_added: 1.0.0 - "Jens Carl (@j-carl), Hothead Games Inc." module: redshift_subnet_group +version_added: 1.0.0 short_description: manage Redshift cluster subnet groups description: - Create, modifies, and deletes Redshift cluster subnet groups. From 4a96e6a760d98d4256c154430844b896f6645356 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 9 Jul 2020 02:46:44 +0530 Subject: [PATCH 016/683] sanity: Doc fixes (#130) Signed-off-by: Abhijeet Kasurde --- aws_acm_info.py | 12 ++-- aws_batch_compute_environment.py | 89 ++++++++++++------------- aws_batch_job_definition.py | 21 +++--- aws_batch_job_queue.py | 51 ++++++-------- aws_codebuild.py | 8 +-- aws_codepipeline.py | 8 +-- aws_config_aggregator.py | 8 +-- aws_direct_connect_virtual_interface.py | 8 +-- aws_eks_cluster.py | 10 +-- aws_glue_connection.py | 10 +-- aws_glue_job.py | 8 +-- aws_kms.py | 10 +-- aws_s3_cors.py | 9 +-- aws_waf_condition.py | 8 +-- aws_waf_rule.py | 12 ++-- aws_waf_web_acl.py | 8 +-- 16 files changed, 135 insertions(+), 145 deletions(-) diff --git a/aws_acm_info.py b/aws_acm_info.py index 16656021f36..97d9a879152 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: aws_acm_info short_description: Retrieve certificate information from AWS Certificate Manager service version_added: 1.0.0 @@ -49,7 +49,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: obtain all ACM certificates community.aws.aws_acm_info: @@ -76,7 +76,7 @@ ''' -RETURN = ''' +RETURN = r''' certificates: description: A list of certificates returned: always @@ -268,7 +268,11 @@ def main(): argument_spec = dict( certificate_arn=dict(aliases=['arn']), domain_name=dict(aliases=['name']), - statuses=dict(type='list', choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']), + statuses=dict( + type='list', + elements='str', + choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] + ), tags=dict(type='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 3ead4a8512b..21eb4808f62 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -6,14 +6,15 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_batch_compute_environment version_added: 1.0.0 short_description: Manage AWS Batch Compute Environments description: - This module allows the management of AWS Batch Compute Environments. - It is idempotent and supports "Check" mode. Use module M(community.aws.aws_batch_compute_environment) to manage the compute + - It is idempotent and supports "Check" mode. + - Use module M(community.aws.aws_batch_compute_environment) to manage the compute environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. @@ -21,8 +22,8 @@ options: compute_environment_name: description: - - The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores - are allowed. + - The name for your compute environment. + - Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed. required: true type: str type: @@ -39,7 +40,8 @@ type: str compute_environment_state: description: - - The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs + - The state of the compute environment. + - If the state is C(ENABLED), then the compute environment accepts jobs from a queue and can scale out automatically based on queues. default: "ENABLED" choices: ["ENABLED", "DISABLED"] @@ -108,7 +110,8 @@ bid_percentage: description: - The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that - instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price + instance type before instances are launched. + - For example, if your bid percentage is 20%, then the Spot price must be below 20% of the current On-Demand price for that EC2 instance. type: int spot_iam_fleet_role: @@ -124,45 +127,39 @@ ''' -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: no - vars: +EXAMPLES = r''' +- name: My Batch Compute Environment + community.aws.aws_batch_compute_environment: + compute_environment_name: computeEnvironmentName state: present - tasks: - - name: My Batch Compute Environment - community.aws.aws_batch_compute_environment: - compute_environment_name: computeEnvironmentName - state: present - region: us-east-1 - compute_environment_state: ENABLED - type: MANAGED - compute_resource_type: EC2 - minv_cpus: 0 - maxv_cpus: 2 - desiredv_cpus: 1 - instance_types: - - optimal - subnets: - - my-subnet1 - - my-subnet2 - security_group_ids: - - my-sg1 - - my-sg2 - instance_role: arn:aws:iam:::instance-profile/ - tags: - tag1: value1 - tag2: value2 - service_role: arn:aws:iam:::role/service-role/ - register: aws_batch_compute_environment_action - - - name: show results - debug: - var: aws_batch_compute_environment_action + region: us-east-1 + compute_environment_state: ENABLED + type: MANAGED + compute_resource_type: EC2 + minv_cpus: 0 + maxv_cpus: 2 + desiredv_cpus: 1 + instance_types: + - optimal + subnets: + - my-subnet1 + - my-subnet2 + security_group_ids: + - my-sg1 + - my-sg2 + instance_role: arn:aws:iam:::instance-profile/ + tags: + tag1: value1 + tag2: value2 + service_role: arn:aws:iam:::role/service-role/ + register: aws_batch_compute_environment_action + +- name: show results + debug: + var: aws_batch_compute_environment_action ''' -RETURN = ''' +RETURN = r''' --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -229,9 +226,9 @@ type: dict ''' +import re from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict -import re try: from botocore.exceptions import ClientError, BotoCoreError @@ -459,10 +456,10 @@ def main(): minv_cpus=dict(type='int', required=True), maxv_cpus=dict(type='int', required=True), desiredv_cpus=dict(type='int'), - instance_types=dict(type='list', required=True), + instance_types=dict(type='list', required=True, elements='str'), image_id=dict(), - subnets=dict(type='list', required=True), - security_group_ids=dict(type='list', required=True), + subnets=dict(type='list', required=True, elements='str'), + security_group_ids=dict(type='list', required=True, elements='str'), ec2_key_pair=dict(), instance_role=dict(required=True), tags=dict(type='dict'), diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index b93ff5febe7..1c30d72efc5 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -6,17 +6,16 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_batch_job_definition version_added: 1.0.0 short_description: Manage AWS Batch Job Definitions description: - This module allows the management of AWS Batch Job Definitions. - It is idempotent and supports "Check" mode. Use module M(community.aws.aws_batch_compute_environment) to manage the compute + - It is idempotent and supports "Check" mode. + - Use module M(community.aws.aws_batch_compute_environment) to manage the compute environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. - - author: Jon Meran (@jonmer85) options: job_definition_arn: @@ -178,7 +177,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' --- - hosts: localhost gather_facts: no @@ -208,7 +207,7 @@ debug: var=job_definition_create_result ''' -RETURN = ''' +RETURN = r''' --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -427,14 +426,14 @@ def main(): image=dict(required=True), vcpus=dict(type='int', required=True), memory=dict(type='int', required=True), - command=dict(type='list', default=[]), + command=dict(type='list', default=[], elements='str'), job_role_arn=dict(), - volumes=dict(type='list', default=[]), - environment=dict(type='list', default=[]), - mount_points=dict(type='list', default=[]), + volumes=dict(type='list', default=[], elements='dict'), + environment=dict(type='list', default=[], elements='dict'), + mount_points=dict(type='list', default=[], elements='dict'), readonly_root_filesystem=dict(), privileged=dict(), - ulimits=dict(type='list', default=[]), + ulimits=dict(type='list', default=[], elements='dict'), user=dict(), attempts=dict(type='int') ) diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index afc49015397..e95940dbb8f 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -6,17 +6,16 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_batch_job_queue version_added: 1.0.0 short_description: Manage AWS Batch Job Queues description: - This module allows the management of AWS Batch Job Queues. - It is idempotent and supports "Check" mode. Use module M(community.aws.aws_batch_compute_environment) to manage the compute + - It is idempotent and supports "Check" mode. + - Use module M(community.aws.aws_batch_compute_environment) to manage the compute environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. - - author: Jon Meran (@jonmer85) options: job_queue_name: @@ -32,7 +31,7 @@ type: str job_queue_state: description: - - The state of the job queue. If the job queue state is ENABLED , it is able to accept jobs. + - The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs. default: "ENABLED" choices: ["ENABLED", "DISABLED"] type: str @@ -69,32 +68,26 @@ ''' EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: no - vars: +- name: My Batch Job Queue + community.aws.aws_batch_job_queue: + job_queue_name: jobQueueName state: present - tasks: - - name: My Batch Job Queue - community.aws.aws_batch_job_queue: - job_queue_name: jobQueueName - state: present - region: us-east-1 - job_queue_state: ENABLED - priority: 1 - compute_environment_order: - - order: 1 - compute_environment: my_compute_env1 - - order: 2 - compute_environment: my_compute_env2 - register: batch_job_queue_action - - - name: show results - debug: - var: batch_job_queue_action + region: us-east-1 + job_queue_state: ENABLED + priority: 1 + compute_environment_order: + - order: 1 + compute_environment: my_compute_env1 + - order: 2 + compute_environment: my_compute_env2 + register: batch_job_queue_action + +- name: show results + debug: + var: batch_job_queue_action ''' -RETURN = ''' +RETURN = r''' --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -293,7 +286,7 @@ def main(): job_queue_name=dict(required=True), job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), priority=dict(type='int', required=True), - compute_environment_order=dict(type='list', required=True), + compute_environment_order=dict(type='list', required=True, elements='dict'), ) module = AnsibleAWSModule( diff --git a/aws_codebuild.py b/aws_codebuild.py index 22011422229..7c5e7500a50 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_codebuild version_added: 1.0.0 @@ -163,7 +163,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - community.aws.aws_codebuild: @@ -191,7 +191,7 @@ state: present ''' -RETURN = ''' +RETURN = r''' project: description: Returns the dictionary describing the code project configuration. returned: success @@ -379,7 +379,7 @@ def main(): service_role=dict(), timeout_in_minutes=dict(type='int', default=60), encryption_key=dict(), - tags=dict(type='list'), + tags=dict(type='list', elements='dict'), vpc_config=dict(type='dict'), state=dict(choices=['present', 'absent'], default='present') ) diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 5406389a129..8b44dc7614e 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_codepipeline version_added: 1.0.0 @@ -79,7 +79,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) @@ -148,7 +148,7 @@ state: present ''' -RETURN = ''' +RETURN = r''' pipeline: description: Returns the dictionary describing the code pipeline configuration. returned: success @@ -266,7 +266,7 @@ def main(): name=dict(required=True, type='str'), role_arn=dict(required=True, type='str'), artifact_store=dict(required=True, type='dict'), - stages=dict(required=True, type='list'), + stages=dict(required=True, type='list', elements='dict'), version=dict(type='int'), state=dict(choices=['present', 'absent'], default='present') ) diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index fac17574c4b..250f004a0f7 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_config_aggregator version_added: 1.0.0 @@ -75,7 +75,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create cross-account aggregator community.aws.aws_config_aggregator: name: test_config_rule @@ -88,7 +88,7 @@ all_aws_regions: yes ''' -RETURN = '''#''' +RETURN = r'''#''' try: @@ -165,7 +165,7 @@ def main(): argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'account_sources': dict(type='list', required=True), + 'account_sources': dict(type='list', required=True, elements='dict'), 'organization_source': dict(type='dict', required=True) }, supports_check_mode=False, diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 3c4cd886a81..7547b027f0c 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_direct_connect_virtual_interface version_added: 1.0.0 @@ -83,7 +83,7 @@ ''' -RETURN = ''' +RETURN = r''' address_family: description: The address family for the BGP peer. returned: always @@ -217,7 +217,7 @@ sample: 100 ''' -EXAMPLES = ''' +EXAMPLES = r''' --- - name: create an association between a LAG and connection community.aws.aws_direct_connect_virtual_interface: @@ -467,7 +467,7 @@ def main(): amazon_address=dict(), customer_address=dict(), address_type=dict(), - cidr=dict(type='list'), + cidr=dict(type='list', elements='str'), virtual_gateway_id=dict(), virtual_interface_id=dict() ) diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index c39f8464bec..d6df16093f3 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_eks_cluster version_added: 1.0.0 @@ -62,7 +62,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS cluster @@ -84,7 +84,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' arn: description: ARN of the EKS cluster returned: when state is present @@ -270,8 +270,8 @@ def main(): name=dict(required=True), version=dict(), role_arn=dict(), - subnets=dict(type='list'), - security_groups=dict(type='list'), + subnets=dict(type='list', elements='str'), + security_groups=dict(type='list', elements='str'), state=dict(choices=['absent', 'present'], default='present'), wait=dict(default=False, type='bool'), wait_timeout=dict(default=1200, type='int') diff --git a/aws_glue_connection.py b/aws_glue_connection.py index 7502af6f9e6..0df4ab915d1 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_glue_connection version_added: 1.0.0 @@ -67,7 +67,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue connection @@ -86,7 +86,7 @@ ''' -RETURN = ''' +RETURN = r''' connection_properties: description: A dict of key-value pairs used as parameters for this connection. returned: when state is present @@ -306,9 +306,9 @@ def main(): connection_properties=dict(type='dict'), connection_type=dict(type='str', default='JDBC', choices=['JDBC', 'SFTP']), description=dict(type='str'), - match_criteria=dict(type='list'), + match_criteria=dict(type='list', elements='str'), name=dict(required=True, type='str'), - security_groups=dict(type='list'), + security_groups=dict(type='list', elements='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), subnet_id=dict(type='str') ) diff --git a/aws_glue_job.py b/aws_glue_job.py index c0edd59328c..1d991f52f41 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_glue_job version_added: 1.0.0 @@ -81,7 +81,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue job @@ -98,7 +98,7 @@ ''' -RETURN = ''' +RETURN = r''' allocated_capacity: description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power @@ -337,7 +337,7 @@ def main(): allocated_capacity=dict(type='int'), command_name=dict(type='str', default='glueetl'), command_script_location=dict(type='str'), - connections=dict(type='list'), + connections=dict(type='list', elements='str'), default_arguments=dict(type='dict'), description=dict(type='str'), max_concurrent_runs=dict(type='int'), diff --git a/aws_kms.py b/aws_kms.py index be4394f2caf..b86686cd264 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_kms version_added: 1.0.0 @@ -174,7 +174,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile # and has been deprecated in favour of the policy option. - name: grant user-style access to production secrets @@ -235,7 +235,7 @@ state: present ''' -RETURN = ''' +RETURN = r''' key_id: description: ID of key type: str @@ -1022,14 +1022,14 @@ def main(): policy_mode=dict(aliases=['mode'], choices=['grant', 'deny'], default='grant'), policy_role_name=dict(aliases=['role_name']), policy_role_arn=dict(aliases=['role_arn']), - policy_grant_types=dict(aliases=['grant_types'], type='list'), + policy_grant_types=dict(aliases=['grant_types'], type='list', elements='str'), policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True), key_id=dict(aliases=['key_arn']), description=dict(), enabled=dict(type='bool', default=True), tags=dict(type='dict', default={}), purge_tags=dict(type='bool', default=False), - grants=dict(type='list', default=[]), + grants=dict(type='list', default=[], elements='dict'), policy=dict(type='json'), purge_grants=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), diff --git a/aws_s3_cors.py b/aws_s3_cors.py index f56f9ce06b5..0577c955e91 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_s3_cors version_added: 1.0.0 @@ -25,6 +25,7 @@ description: - Cors rules to put on the s3 bucket type: list + elements: str state: description: - Create or remove cors on the s3 bucket @@ -37,7 +38,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple cors for s3 bucket @@ -63,7 +64,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' changed: description: check to see if a change was made to the rules returned: always @@ -146,7 +147,7 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), - rules=dict(type='list'), + rules=dict(type='list', elements='str'), state=dict(type='str', choices=['present', 'absent'], required=True) ) diff --git a/aws_waf_condition.py b/aws_waf_condition.py index e0f4dea2cc5..006caaad7cd 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: aws_waf_condition short_description: Create and delete WAF Conditions version_added: 1.0.0 @@ -137,7 +137,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: create WAF byte condition community.aws.aws_waf_condition: name: my_byte_condition @@ -205,7 +205,7 @@ ''' -RETURN = ''' +RETURN = r''' condition: description: Condition returned by operation. returned: always @@ -705,7 +705,7 @@ def main(): argument_spec = dict( name=dict(required=True), type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']), - filters=dict(type='list'), + filters=dict(type='list', elements='dict'), purge_filters=dict(type='bool', default=False), waf_regional=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), diff --git a/aws_waf_rule.py b/aws_waf_rule.py index 3ce660a3cbe..ce28559b35f 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: aws_waf_rule short_description: Create and delete WAF Rules version_added: 1.0.0 @@ -73,8 +73,7 @@ type: bool ''' -EXAMPLES = ''' - +EXAMPLES = r''' - name: create WAF rule community.aws.aws_waf_rule: name: my_waf_rule @@ -93,10 +92,9 @@ community.aws.aws_waf_rule: name: "my_waf_rule" state: absent - ''' -RETURN = ''' +RETURN = r''' rule: description: WAF rule contents returned: always @@ -153,8 +151,6 @@ list_regional_rules_with_backoff, list_rules_with_backoff, run_func_with_change_token_backoff, -) -from ansible_collections.amazon.aws.plugins.module_utils.waf import ( get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff, @@ -340,7 +336,7 @@ def main(): name=dict(required=True), metric_name=dict(), state=dict(default='present', choices=['present', 'absent']), - conditions=dict(type='list'), + conditions=dict(type='list', elements='dict'), purge_conditions=dict(type='bool', default=False), waf_regional=dict(type='bool', default=False), ) diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index da7b378a55c..7cdf770aa38 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: aws_waf_web_acl short_description: Create and delete WAF Web ACLs. version_added: 1.0.0 @@ -84,7 +84,7 @@ type: bool ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: create web ACL community.aws.aws_waf_web_acl: name: my_web_acl @@ -102,7 +102,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' web_acl: description: contents of the Web ACL. returned: always @@ -339,7 +339,7 @@ def main(): default_action=dict(choices=['block', 'allow', 'count']), metric_name=dict(), state=dict(default='present', choices=['present', 'absent']), - rules=dict(type='list'), + rules=dict(type='list', elements='dict'), purge_rules=dict(type='bool', default=False), waf_regional=dict(type='bool', default=False) ) From bb39dae14f1b301297774f9e91d846729d404a24 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Wed, 8 Jul 2020 15:15:25 -0700 Subject: [PATCH 017/683] Cleanup sanity items that were not previously caught (#131) --- rds_param_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rds_param_group.py b/rds_param_group.py index ce271712211..4870d0657fb 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -55,7 +55,7 @@ type: dict purge_tags: description: - - Whether or not to remove tags that do not appear in the M(tags) list. + - Whether or not to remove tags that do not appear in the C(tags) list. type: bool default: False author: From bd27e332e788b6a8a14b94e12aa97434430cfbf9 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 9 Jul 2020 01:42:48 +0200 Subject: [PATCH 018/683] use module.region to get aws region instead of get_aws_connection_info (#4) --- aws_ses_identity.py | 5 +++-- efs_info.py | 9 +++++---- elasticache_info.py | 10 ++++------ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/aws_ses_identity.py b/aws_ses_identity.py index 710ec01817e..d3c88156114 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -218,8 +218,9 @@ type: bool ''' +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry import time @@ -530,7 +531,7 @@ def main(): state = module.params.get("state") if state == 'present': - region = get_aws_connection_info(module, boto3=True)[0] + region = module.region account_id = get_account_id(module) validate_params_for_identity_present(module) create_or_update_identity(connection, module, region, account_id) diff --git a/efs_info.py b/efs_info.py index 95e82926486..00f74e677b2 100644 --- a/efs_info.py +++ b/efs_info.py @@ -177,10 +177,11 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict class EFSConnection(object): @@ -196,7 +197,7 @@ def __init__(self, module): except Exception as e: module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) - self.region = get_aws_connection_info(module, boto3=True)[0] + self.region = module.region @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) def list_file_systems(self, **kwargs): diff --git a/elasticache_info.py b/elasticache_info.py index 8f3850ddf96..5b22c5cec1c 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -223,12 +223,10 @@ Environment: test ''' +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, - camel_dict_to_snake_dict, - AWSRetry, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict try: @@ -272,7 +270,7 @@ def get_aws_account_id(module): def get_elasticache_clusters(client, module): - region = get_aws_connection_info(module, boto3=True)[0] + region = module.region try: clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: From 8173d02042f8f95ff1b73a8fc94074073cf0804c Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 16 Jul 2020 01:31:41 +0530 Subject: [PATCH 019/683] Docs: sanity fixes (#133) Signed-off-by: Abhijeet Kasurde --- cloudformation_stack_set.py | 12 +++++------ cloudfront_distribution.py | 18 ++++++++-------- cloudfront_invalidation.py | 8 +++---- cloudwatchevent_rule.py | 6 +++--- data_pipeline.py | 14 ++++++------ dynamodb_table.py | 10 ++++----- ec2_asg.py | 30 +++++++++++++------------- ec2_customer_gateway_info.py | 8 +++---- ec2_elb.py | 10 +++++---- ec2_elb_info.py | 17 ++++++--------- ec2_instance.py | 11 ++++++---- ec2_instance_info.py | 9 ++++---- ec2_launch_template.py | 13 ++++++----- ec2_lc.py | 12 +++++------ ec2_lc_info.py | 8 +++---- ec2_metric_alarm.py | 10 ++++----- ec2_placement_group_info.py | 6 +++--- ec2_transit_gateway_info.py | 7 +++--- ec2_vpc_endpoint.py | 8 +++---- ec2_vpc_endpoint_info.py | 8 +++---- ec2_vpc_igw_info.py | 8 +++---- ec2_vpc_nacl.py | 15 +++++++------ ec2_vpc_nacl_info.py | 9 ++++---- ec2_vpc_nat_gateway_info.py | 8 +++---- ec2_vpc_peering_info.py | 8 +++---- ec2_vpc_route_table.py | 12 +++++------ ec2_vpc_vgw_info.py | 8 +++---- ec2_vpc_vpn.py | 10 ++++----- ec2_vpc_vpn_info.py | 8 +++---- ecs_attribute.py | 8 +++---- ecs_service.py | 42 +++++++++++++++++++++++------------- ecs_service_info.py | 8 +++---- ecs_task.py | 8 +++---- ecs_taskdefinition.py | 10 ++++----- efs.py | 8 +++---- efs_info.py | 8 +++---- elasticache.py | 8 +++---- elasticache_subnet_group.py | 6 +++--- elb_application_lb.py | 22 ++++++++++++------- elb_application_lb_info.py | 12 ++++++----- elb_classic_lb.py | 22 ++++++++++++------- elb_classic_lb_info.py | 9 ++++---- elb_instance.py | 7 +++--- elb_network_lb.py | 15 +++++++------ elb_target_group.py | 9 ++++---- elb_target_group_info.py | 12 ++++++----- iam.py | 12 ++++++----- iam_group.py | 10 ++++----- iam_role.py | 11 +++++----- iam_user.py | 9 ++++---- lambda.py | 10 ++++----- rds.py | 12 +++++------ rds_instance.py | 15 +++++++------ rds_subnet_group.py | 9 ++++---- redshift.py | 10 ++++----- redshift_subnet_group.py | 8 +++---- route53.py | 7 +++--- route53_info.py | 6 +++--- s3_bucket_notification.py | 8 +++---- s3_lifecycle.py | 10 +++++---- sns_topic.py | 8 +++---- 61 files changed, 362 insertions(+), 308 deletions(-) diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 990dfdc33c8..148cbe61047 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: cloudformation_stack_set version_added: 1.0.0 @@ -176,7 +176,7 @@ requirements: [ boto3>=1.6, botocore>=1.10.26 ] ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create a stack set with instances in two accounts community.aws.cloudformation_stack_set: name: my-stack @@ -215,7 +215,7 @@ - us-east-1 ''' -RETURN = ''' +RETURN = r''' operations_log: type: list description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. @@ -505,9 +505,9 @@ def main(): template=dict(type='path'), template_url=dict(), template_body=dict(), - capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), - regions=dict(type='list'), - accounts=dict(type='list'), + capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), + regions=dict(type='list', elements='str'), + accounts=dict(type='list', elements='str'), failure_tolerance=dict( type='dict', default={}, diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index f5abb2a0d8e..36e8e6bd1b8 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- version_added: 1.0.0 @@ -220,7 +220,7 @@ whitelisted_names: type: list elements: str - description: A list of coockies to forward to the origin for this cache behavior. + description: A list of cookies to forward to the origin for this cache behavior. headers: description: - A list of headers to forward to the origin for this cache behavior. @@ -337,7 +337,7 @@ whitelisted_names: type: list elements: str - description: A list of coockies to forward to the origin for this cache behavior. + description: A list of cookies to forward to the origin for this cache behavior. headers: description: - A list of headers to forward to the origin for this cache behavior. @@ -534,7 +534,7 @@ - A config element that is a complex object that describes how a distribution should restrict it's content. suboptions: geo_restriction: - description: Apply a restriciton based on the location of the requester. + description: Apply a restriction based on the location of the requester. type: dict suboptions: restriction_type: @@ -673,7 +673,7 @@ caller_reference: replaceable distribution ''' -RETURN = ''' +RETURN = r''' active_trusted_signers: description: Key pair IDs that CloudFront is aware of for each trusted signer. returned: always @@ -2093,15 +2093,15 @@ def main(): tags=dict(type='dict', default={}), purge_tags=dict(type='bool', default=False), alias=dict(), - aliases=dict(type='list', default=[]), + aliases=dict(type='list', default=[], elements='str'), purge_aliases=dict(type='bool', default=False), default_root_object=dict(), - origins=dict(type='list'), + origins=dict(type='list', elements='dict'), purge_origins=dict(type='bool', default=False), default_cache_behavior=dict(type='dict'), - cache_behaviors=dict(type='list'), + cache_behaviors=dict(type='list', elements='dict'), purge_cache_behaviors=dict(type='bool', default=False), - custom_error_responses=dict(type='list'), + custom_error_responses=dict(type='list', elements='dict'), purge_custom_error_responses=dict(type='bool', default=False), logging=dict(type='dict'), price_class=dict(), diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 1f3e50331ca..52e3aea1873 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- version_added: 1.0.0 @@ -58,7 +58,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: create a batch of invalidations using a distribution_id for a reference community.aws.cloudfront_invalidation: @@ -80,7 +80,7 @@ ''' -RETURN = ''' +RETURN = r''' invalidation: description: The invalidation's information. returned: always @@ -247,7 +247,7 @@ def main(): caller_reference=dict(), distribution_id=dict(), alias=dict(), - target_paths=dict(required=True, type='list') + target_paths=dict(required=True, type='list', elements='str') ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index b90b7feca6c..0ba66909d25 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -107,7 +107,7 @@ required: false ''' -EXAMPLES = ''' +EXAMPLES = r''' - community.aws.cloudwatchevent_rule: name: MyCronTask schedule_expression: "cron(0 20 * * ? *)" @@ -131,7 +131,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' rule: description: CloudWatch Event rule data. returned: success @@ -430,7 +430,7 @@ def main(): default='present'), description=dict(), role_arn=dict(), - targets=dict(type='list', default=[]), + targets=dict(type='list', default=[], elements='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec) diff --git a/data_pipeline.py b/data_pipeline.py index d25563e45ac..8394078fe68 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: data_pipeline version_added: 1.0.0 @@ -128,7 +128,7 @@ type: str ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create pipeline @@ -180,7 +180,7 @@ ''' -RETURN = ''' +RETURN = r''' changed: description: whether the data pipeline has been modified type: bool @@ -278,7 +278,7 @@ def pipeline_field(client, dp_id, field): def run_with_timeout(timeout, func, *func_args, **func_kwargs): - """Run func with the provided args and kwargs, and wait utill + """Run func with the provided args and kwargs, and wait until timeout for truthy return value :param int timeout: time to wait for status @@ -607,13 +607,13 @@ def main(): name=dict(required=True), version=dict(removed_at_date='2022-06-01', removed_from_collection='community.aws'), description=dict(required=False, default=''), - objects=dict(required=False, type='list', default=[]), - parameters=dict(required=False, type='list', default=[]), + objects=dict(required=False, type='list', default=[], elements='dict'), + parameters=dict(required=False, type='list', default=[], elements='dict'), timeout=dict(required=False, type='int', default=300), state=dict(default='present', choices=['present', 'absent', 'active', 'inactive']), tags=dict(required=False, type='dict', default={}), - values=dict(required=False, type='list', default=[]) + values=dict(required=False, type='list', default=[], elements='dict') ) ) module = AnsibleModule(argument_spec, supports_check_mode=False) diff --git a/dynamodb_table.py b/dynamodb_table.py index d528e460078..e6ae5b1af1f 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: dynamodb_table version_added: 1.0.0 @@ -121,7 +121,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create dynamo table with hash and range primary key community.aws.dynamodb_table: name: my-table @@ -164,7 +164,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' table_status: description: The current status of the table. returned: success @@ -196,12 +196,12 @@ try: import botocore - from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info @@ -467,7 +467,7 @@ def main(): range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), read_capacity=dict(default=1, type='int'), write_capacity=dict(default=1, type='int'), - indexes=dict(default=[], type='list'), + indexes=dict(default=[], type='list', elements='dict'), tags=dict(type='dict'), wait_for_active_timeout=dict(default=60, type='int'), )) diff --git a/ec2_asg.py b/ec2_asg.py index 43afa725385..f466a8664f4 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_asg version_added: 1.0.0 @@ -241,7 +241,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Basic configuration with Launch Configuration - community.aws.ec2_asg: @@ -348,7 +348,7 @@ propagate_at_launch: no ''' -RETURN = ''' +RETURN = r''' --- auto_scaling_group_name: description: The unique name of the auto scaling group @@ -540,8 +540,6 @@ except ImportError: pass # will be detected by imported HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', @@ -1678,9 +1676,9 @@ def asg_exists(connection): def main(): argument_spec = dict( name=dict(required=True, type='str'), - load_balancers=dict(type='list'), - target_group_arns=dict(type='list'), - availability_zones=dict(type='list'), + load_balancers=dict(type='list', elements='str'), + target_group_arns=dict(type='list', elements='str'), + availability_zones=dict(type='list', elements='str'), launch_config_name=dict(type='str'), launch_template=dict( type='dict', @@ -1706,20 +1704,20 @@ def main(): ), placement_group=dict(type='str'), desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='list'), + vpc_zone_identifier=dict(type='list', elements='str'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), - replace_instances=dict(type='list', default=[]), + replace_instances=dict(type='list', default=[], elements='str'), lc_check=dict(type='bool', default=True), lt_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', default=[]), + tags=dict(type='list', default=[], elements='dict'), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), - termination_policies=dict(type='list', default='Default'), + termination_policies=dict(type='list', default='Default', elements='str'), notification_topic=dict(type='str', default=None), notification_types=dict( type='list', @@ -1728,9 +1726,10 @@ def main(): 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' - ] + ], + elements='str' ), - suspend_processes=dict(type='list', default=[]), + suspend_processes=dict(type='list', default=[], elements='str'), metrics_collection=dict(type='bool', default=False), metrics_granularity=dict(type='str', default='1Minute'), metrics_list=dict( @@ -1744,7 +1743,8 @@ def main(): 'GroupStandbyInstances', 'GroupTerminatingInstances', 'GroupTotalInstances' - ] + ], + elements='str' ) ) diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 1526cb639d9..12c6320e6a8 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_customer_gateway_info version_added: 1.0.0 @@ -33,7 +33,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all customer gateways @@ -56,7 +56,7 @@ register: cust_gw_info ''' -RETURN = ''' +RETURN = r''' customer_gateways: description: List of one or more customer gateways. returned: always @@ -119,7 +119,7 @@ def list_customer_gateways(connection, module): def main(): argument_spec = dict( - customer_gateway_ids=dict(default=[], type='list'), + customer_gateway_ids=dict(default=[], type='list', elements='str'), filters=dict(default={}, type='dict') ) diff --git a/ec2_elb.py b/ec2_elb.py index 06fcc9601e2..349002d5852 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_elb version_added: 1.0.0 @@ -32,8 +32,10 @@ type: str ec2_elbs: description: - - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. + - List of ELB names, required for registration. + - The ec2_elbs fact should be used if there was a previous de-register. type: list + elements: str enable_availability_zone: description: - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already @@ -57,7 +59,7 @@ ''' -EXAMPLES = """ +EXAMPLES = r""" # basic pre_task and post_task example pre_tasks: - name: Instance De-register @@ -316,7 +318,7 @@ def main(): argument_spec.update(dict( state={'required': True, 'choices': ['present', 'absent']}, instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type': 'list'}, + ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'required': False, 'default': 0, 'type': 'int'} diff --git a/ec2_elb_info.py b/ec2_elb_info.py index 57cd296d262..a66d130deb1 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -17,7 +17,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_elb_info version_added: 1.0.0 @@ -33,18 +33,19 @@ description: - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. type: list + elements: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Output format tries to match amazon.aws.ec2_elb_lb module input parameters - name: Gather information about all ELBs - ec2_elb_info: + community.aws.ec2_elb_info: register: elb_info - debug: msg: "{{ item.dns_name }}" @@ -59,7 +60,7 @@ msg: "{{ elb_info.elbs.0.dns_name }}" - name: Gather information about a set of ELBs - ec2_elb_info: + community.aws.ec2_elb_info: names: - frontend-prod-elb - backend-prod-elb @@ -93,11 +94,7 @@ class ElbInformation(object): """Handles ELB information.""" - def __init__(self, - module, - names, - region, - **aws_connect_params): + def __init__(self, module, names, region, **aws_connect_params): self.module = module self.names = names @@ -227,7 +224,7 @@ def list_elbs(self): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - names={'default': [], 'type': 'list'} + names={'default': [], 'type': 'list', 'elements': 'str'} ) ) module = AnsibleModule(argument_spec=argument_spec, diff --git a/ec2_instance.py b/ec2_instance.py index 9382659f71b..bbaa092bd5c 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_instance version_added: 1.0.0 @@ -25,6 +25,7 @@ description: - If you specify one or more instance IDs, only instances that have the specified IDs are returned. type: list + elements: str state: description: - Goal state for the instances. @@ -107,6 +108,7 @@ description: - A list of security group IDs or names (strings). Mutually exclusive with I(security_group). type: list + elements: str security_group: description: - A security group ID or name. Mutually exclusive with I(security_groups). @@ -180,6 +182,7 @@ ebs.iops, and ebs.delete_on_termination. - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). type: list + elements: dict launch_template: description: - The EC2 launch template to base instance configuration on. @@ -1681,7 +1684,7 @@ def main(): ebs_optimized=dict(type='bool'), vpc_subnet_id=dict(type='str', aliases=['subnet_id']), availability_zone=dict(type='str'), - security_groups=dict(default=[], type='list'), + security_groups=dict(default=[], type='list', elements='str'), security_group=dict(type='str'), instance_role=dict(type='str'), name=dict(type='str'), @@ -1700,9 +1703,9 @@ def main(): instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), termination_protection=dict(type='bool'), detailed_monitoring=dict(type='bool'), - instance_ids=dict(default=[], type='list'), + instance_ids=dict(default=[], type='list', elements='str'), network=dict(default=None, type='dict'), - volumes=dict(default=None, type='list'), + volumes=dict(default=None, type='list', elements='dict'), ) # running/present are synonyms # as are terminated/absent diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 8883be6923d..c9820a58f59 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_instance_info version_added: 1.0.0 @@ -24,6 +24,7 @@ - If you specify one or more instance IDs, only instances that have the specified IDs are returned. required: false type: list + elements: str filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See @@ -39,7 +40,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all instances @@ -67,7 +68,7 @@ ''' -RETURN = ''' +RETURN = r''' instances: description: a list of ec2 instances returned: always @@ -540,7 +541,7 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - instance_ids=dict(default=[], type='list'), + instance_ids=dict(default=[], type='list', elements='str'), filters=dict(default={}, type='dict') ) ) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 6ac54a1fee3..2f7b529146e 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -5,7 +5,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_launch_template version_added: 1.0.0 @@ -567,6 +567,7 @@ def main(): template_options = dict( block_device_mappings=dict( type='list', + elements='dict', options=dict( device_name=dict(), ebs=dict( @@ -603,6 +604,7 @@ def main(): elastic_gpu_specifications=dict( options=dict(type=dict()), type='list', + elements='dict', ), iam_instance_profile=dict(), image_id=dict(), @@ -633,14 +635,15 @@ def main(): ), network_interfaces=dict( type='list', + elements='dict', options=dict( associate_public_ip_address=dict(type='bool'), delete_on_termination=dict(type='bool'), description=dict(), device_index=dict(type='int'), - groups=dict(type='list'), + groups=dict(type='list', elements='str'), ipv6_address_count=dict(type='int'), - ipv6_addresses=dict(type='list'), + ipv6_addresses=dict(type='list', elements='str'), network_interface_id=dict(), private_ip_address=dict(), subnet_id=dict(), @@ -657,8 +660,8 @@ def main(): type='dict', ), ram_disk_id=dict(), - security_group_ids=dict(type='list'), - security_groups=dict(type='list'), + security_group_ids=dict(type='list', elements='str'), + security_groups=dict(type='list', elements='str'), tags=dict(type='dict'), user_data=dict(), ) diff --git a/ec2_lc.py b/ec2_lc.py index 46e125a19ce..deb6633adeb 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_lc version_added: 1.0.0 @@ -190,7 +190,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # create a launch configuration using an AMI image and instance type as a basis @@ -260,7 +260,7 @@ register: lc_info ''' -RETURN = ''' +RETURN = r''' arn: description: The Amazon Resource Name of the launch configuration. returned: when I(state=present) @@ -657,11 +657,11 @@ def main(): image_id=dict(), instance_id=dict(), key_name=dict(), - security_groups=dict(default=[], type='list'), + security_groups=dict(default=[], type='list', elements='str'), user_data=dict(), user_data_path=dict(type='path'), kernel_id=dict(), - volumes=dict(type='list'), + volumes=dict(type='list', elements='dict'), instance_type=dict(), state=dict(default='present', choices=['present', 'absent']), spot_price=dict(type='float'), @@ -671,7 +671,7 @@ def main(): associate_public_ip_address=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), instance_monitoring=dict(default=False, type='bool'), assign_public_ip=dict(type='bool'), - classic_link_vpc_security_groups=dict(type='list'), + classic_link_vpc_security_groups=dict(type='list', elements='str'), classic_link_vpc_id=dict(), vpc_id=dict(), placement_tenancy=dict(choices=['default', 'dedicated']) diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 899db903fd9..d3e46cc298e 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_lc_info version_added: 1.0.0 @@ -52,7 +52,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all launch configurations @@ -68,7 +68,7 @@ sort_order: descending ''' -RETURN = ''' +RETURN = r''' block_device_mapping: description: Block device mapping for the instances of launch configuration type: list @@ -206,7 +206,7 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - name=dict(required=False, default=[], type='list'), + name=dict(required=False, default=[], type='list', elements='str'), sort=dict(required=False, default=None, choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), sort_order=dict(required=False, default='ascending', diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index 3aef6799b23..09e95d2fd6c 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -18,7 +18,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_metric_alarm short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" version_added: 1.0.0 @@ -166,7 +166,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: create alarm community.aws.ec2_metric_alarm: state: present @@ -383,9 +383,9 @@ def main(): evaluation_periods=dict(type='int'), description=dict(type='str'), dimensions=dict(type='dict', default={}), - alarm_actions=dict(type='list', default=[]), - insufficient_data_actions=dict(type='list', default=[]), - ok_actions=dict(type='list', default=[]), + alarm_actions=dict(type='list', default=[], elements='str'), + insufficient_data_actions=dict(type='list', default=[], elements='str'), + ok_actions=dict(type='list', default=[], elements='str'), treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'), state=dict(default='present', choices=['present', 'absent']), ) diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 96451d69ce5..7ec7f62fd92 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_placement_group_info version_added: 1.0.0 @@ -51,7 +51,7 @@ ''' -RETURN = ''' +RETURN = r''' placement_groups: description: Placement group attributes returned: always @@ -107,7 +107,7 @@ def get_placement_groups_details(connection, module): def main(): argument_spec = dict( - names=dict(type='list', default=[]) + names=dict(type='list', default=[], elements='str') ) module = AnsibleAWSModule( diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index ddae796cf47..676862185b4 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_transit_gateway_info short_description: Gather information about ec2 transit gateways in AWS version_added: 1.0.0 @@ -23,6 +23,7 @@ - A list of transit gateway IDs to gather information for. aliases: [transit_gateway_id] type: list + elements: str filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. @@ -34,7 +35,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather info about all transit gateways @@ -58,7 +59,7 @@ - tgw-03c53443d5a8cb716 ''' -RETURN = ''' +RETURN = r''' transit_gateways: description: > Transit gateways that match the provided filters. Each element consists of a dict with all the information diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 920cf45ca6e..833e64ae1db 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_vpc_endpoint short_description: Create and delete AWS VPC Endpoints. version_added: 1.0.0 @@ -96,7 +96,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new vpc endpoint with a json template for policy @@ -141,7 +141,7 @@ region: ap-southeast-2 ''' -RETURN = ''' +RETURN = r''' endpoints: description: The resulting endpoints from the module call returned: success @@ -345,7 +345,7 @@ def main(): state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), - route_table_ids=dict(type='list'), + route_table_ids=dict(type='list', elements='str'), vpc_endpoint_id=dict(), client_token=dict(), ) diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index a1f3ff0a901..eeb7a7d80d1 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -5,7 +5,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_vpc_endpoint_info short_description: Retrieves AWS VPC endpoints details using AWS methods. version_added: 1.0.0 @@ -41,7 +41,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Simple example of listing all support AWS services for VPC endpoints - name: List supported AWS endpoint services community.aws.ec2_vpc_endpoint_info: @@ -77,7 +77,7 @@ register: endpoint_details ''' -RETURN = ''' +RETURN = r''' service_names: description: AWS VPC endpoint service names returned: I(query) is C(services) @@ -169,7 +169,7 @@ def main(): dict( query=dict(choices=['services', 'endpoints'], required=True), filters=dict(default={}, type='dict'), - vpc_endpoint_ids=dict(type='list'), + vpc_endpoint_ids=dict(type='list', elements='str'), ) ) diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index f33020e0c24..2d8244f1282 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_igw_info version_added: 1.0.0 @@ -33,7 +33,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all Internet Gateways for an account or profile @@ -58,7 +58,7 @@ register: igw_info ''' -RETURN = ''' +RETURN = r''' internet_gateways: description: The internet gateways for the account. returned: always @@ -132,7 +132,7 @@ def main(): argument_spec.update( dict( filters=dict(type='dict', default=dict()), - internet_gateway_ids=dict(type='list', default=None) + internet_gateway_ids=dict(type='list', default=None, elements='str') ) ) diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index f2ca5cda6f6..387ceb48f26 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_vpc_nacl short_description: create and delete Network ACLs. version_added: 1.0.0 @@ -39,6 +39,7 @@ - Each subnet can be specified as subnet ID, or its tagged name. required: false type: list + elements: str egress: description: - A list of rules for outgoing traffic. Each rule must be specified as a list. @@ -50,6 +51,7 @@ default: [] required: false type: list + elements: list ingress: description: - List of rules for incoming traffic. Each rule must be specified as a list. @@ -61,6 +63,7 @@ default: [] required: false type: list + elements: list tags: description: - Dictionary of tags to look for and apply when creating a network ACL. @@ -82,7 +85,7 @@ requirements: [ botocore, boto3, json ] ''' -EXAMPLES = ''' +EXAMPLES = r''' # Complete example to create and delete a network ACL # that allows SSH, HTTP and ICMP in, and all traffic out. @@ -138,7 +141,7 @@ nacl_id: acl-33b4ee5b state: absent ''' -RETURN = ''' +RETURN = r''' task: description: The result of the create, or delete action. returned: success @@ -602,10 +605,10 @@ def main(): vpc_id=dict(), name=dict(), nacl_id=dict(), - subnets=dict(required=False, type='list', default=list()), + subnets=dict(required=False, type='list', default=list(), elements='str'), tags=dict(required=False, type='dict'), - ingress=dict(required=False, type='list', default=list()), - egress=dict(required=False, type='list', default=list()), + ingress=dict(required=False, type='list', default=list(), elements='list'), + egress=dict(required=False, type='list', default=list(), elements='list'), state=dict(default='present', choices=['present', 'absent']), ) module = AnsibleAWSModule(argument_spec=argument_spec, diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 337a91d1c93..aabe489c112 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -5,7 +5,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_nacl_info version_added: 1.0.0 @@ -23,6 +23,7 @@ default: [] aliases: [nacl_id] type: list + elements: str filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See @@ -40,7 +41,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all Network ACLs: @@ -58,7 +59,7 @@ register: default_nacls ''' -RETURN = ''' +RETURN = r''' nacls: description: Returns an array of complex objects as described below. returned: success @@ -205,7 +206,7 @@ def nacl_entry_to_list(entry): def main(): argument_spec = dict( - nacl_ids=dict(default=[], type='list', aliases=['nacl_id']), + nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'), filters=dict(default={}, type='dict')) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index b734721b5ea..bb164a2b50b 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_vpc_nat_gateway_info short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods. version_added: 1.0.0 @@ -33,7 +33,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Simple example of listing all nat gateways - name: List all managed nat gateways in ap-southeast-2 community.aws.ec2_vpc_nat_gateway_info: @@ -68,7 +68,7 @@ register: existing_nat_gateways ''' -RETURN = ''' +RETURN = r''' result: description: The result of the describe, converted to ansible snake case style. See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response. @@ -127,7 +127,7 @@ def main(): argument_spec.update( dict( filters=dict(default={}, type='dict'), - nat_gateway_ids=dict(default=[], type='list'), + nat_gateway_ids=dict(default=[], type='list', elements='str'), ) ) diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index f552358e362..2d577227fac 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_vpc_peering_info short_description: Retrieves AWS VPC Peering details using AWS methods. version_added: 1.0.0 @@ -33,7 +33,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Simple example of listing all VPC Peers - name: List all vpc peers community.aws.ec2_vpc_peering_info: @@ -60,7 +60,7 @@ register: pending_vpc_peers ''' -RETURN = ''' +RETURN = r''' result: description: The result of the describe. returned: success @@ -107,7 +107,7 @@ def main(): argument_spec.update( dict( filters=dict(default=dict(), type='dict'), - peer_connection_ids=dict(default=None, type='list'), + peer_connection_ids=dict(default=None, type='list', elements='str'), ) ) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 39f0ffc42bf..ca5d586b2e7 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_route_table version_added: 1.0.0 @@ -81,7 +81,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: @@ -124,7 +124,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' route_table: description: Route Table result returned: always @@ -710,14 +710,14 @@ def ensure_route_table_present(connection, module): def main(): argument_spec = dict( lookup=dict(default='tag', choices=['tag', 'id']), - propagating_vgw_ids=dict(type='list'), + propagating_vgw_ids=dict(type='list', elements='str'), purge_routes=dict(default=True, type='bool'), purge_subnets=dict(default=True, type='bool'), purge_tags=dict(default=False, type='bool'), route_table_id=dict(), - routes=dict(default=[], type='list'), + routes=dict(default=[], type='list', elements='dict'), state=dict(default='present', choices=['present', 'absent']), - subnets=dict(type='list'), + subnets=dict(type='list', elements='str'), tags=dict(type='dict', aliases=['resource_tags']), vpc_id=dict() ) diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 5c7b866c7d6..43a5c2f8c4e 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_vgw_info version_added: 1.0.0 @@ -33,7 +33,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all virtual gateways for an account or profile @@ -58,7 +58,7 @@ register: vgw_info ''' -RETURN = ''' +RETURN = r''' virtual_gateways: description: The virtual gateways for the account. returned: always @@ -138,7 +138,7 @@ def main(): argument_spec.update( dict( filters=dict(type='dict', default=dict()), - vpn_gateway_ids=dict(type='list', default=None) + vpn_gateway_ids=dict(type='list', default=None, elements='str') ) ) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 1ac818485bb..9067d522129 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_vpn version_added: 1.0.0 @@ -141,7 +141,7 @@ default: 15 ''' -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. @@ -214,7 +214,7 @@ Ansible: Tag """ -RETURN = """ +RETURN = r""" changed: description: If the VPN connection has changed. type: bool @@ -742,12 +742,12 @@ def main(): vpn_gateway_id=dict(type='str'), tags=dict(default={}, type='dict'), connection_type=dict(default='ipsec.1', type='str'), - tunnel_options=dict(no_log=True, type='list', default=[]), + tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'), static_only=dict(default=False, type='bool'), customer_gateway_id=dict(type='str'), vpn_connection_id=dict(type='str'), purge_tags=dict(type='bool', default=False), - routes=dict(type='list', default=[]), + routes=dict(type='list', default=[], elements='str'), purge_routes=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), delay=dict(type='int', default=15), diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 1ba8210a722..e96583f669e 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_vpn_info version_added: 1.0.0 @@ -35,7 +35,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all vpn connections community.aws.ec2_vpc_vpn_info: @@ -53,7 +53,7 @@ register: vpn_conn_info ''' -RETURN = ''' +RETURN = r''' vpn_connections: description: List of one or more VPN Connections. returned: always @@ -198,7 +198,7 @@ def list_vpn_connections(connection, module): def main(): argument_spec = dict( - vpn_connection_ids=dict(default=[], type='list'), + vpn_connection_ids=dict(default=[], type='list', elements='str'), filters=dict(default={}, type='dict') ) diff --git a/ecs_attribute.py b/ecs_attribute.py index 24e71a29bdb..db9de79b480 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ecs_attribute version_added: 1.0.0 @@ -60,7 +60,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Set attributes @@ -84,7 +84,7 @@ delegate_to: localhost ''' -RETURN = ''' +RETURN = r''' attributes: description: attributes type: complex @@ -259,7 +259,7 @@ def main(): state=dict(required=False, default='present', choices=['present', 'absent']), cluster=dict(required=True, type='str'), ec2_instance_id=dict(required=True, type='str'), - attributes=dict(required=True, type='list'), + attributes=dict(required=True, type='list', elements='dict'), )) required_together = [['cluster', 'ec2_instance_id', 'attributes']] diff --git a/ecs_service.py b/ecs_service.py index b3995f7e8c9..462f60cb946 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ecs_service version_added: 1.0.0 @@ -192,7 +192,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example @@ -241,7 +241,7 @@ field: memory ''' -RETURN = ''' +RETURN = r''' service: description: Details of created service. returned: when creating a service @@ -648,7 +648,7 @@ def main(): name=dict(required=True, type='str'), cluster=dict(required=False, type='str'), task_definition=dict(required=False, type='str'), - load_balancers=dict(required=False, default=[], type='list'), + load_balancers=dict(required=False, default=[], type='list', elements='str'), desired_count=dict(required=False, type='int'), client_token=dict(required=False, default='', type='str'), role=dict(required=False, default='', type='str'), @@ -656,22 +656,34 @@ def main(): repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), deployment_configuration=dict(required=False, default={}, type='dict'), - placement_constraints=dict(required=False, default=[], type='list', options=dict( - type=dict(type='str'), - expression=dict(type='str') - )), - placement_strategy=dict(required=False, default=[], type='list', options=dict( - type=dict(type='str'), - field=dict(type='str'), - )), + placement_constraints=dict( + required=False, + default=[], + type='list', + elements='dict', + options=dict( + type=dict(type='str'), + expression=dict(type='str') + ) + ), + placement_strategy=dict( + required=False, + default=[], + type='list', + elements='dict', + options=dict( + type=dict(type='str'), + field=dict(type='str'), + ) + ), health_check_grace_period_seconds=dict(required=False, type='int'), network_configuration=dict(required=False, type='dict', options=dict( - subnets=dict(type='list'), - security_groups=dict(type='list'), + subnets=dict(type='list', elements='str'), + security_groups=dict(type='list', elements='str'), assign_public_ip=dict(type='bool') )), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - service_registries=dict(required=False, type='list', default=[]), + service_registries=dict(required=False, type='list', default=[], elements='dict'), scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']) ) diff --git a/ecs_service_info.py b/ecs_service_info.py index 4d04fdf4986..d428dde8835 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ecs_service_info version_added: 1.0.0 @@ -49,7 +49,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic listing example @@ -65,7 +65,7 @@ register: output ''' -RETURN = ''' +RETURN = r''' services: description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below. returned: success @@ -219,7 +219,7 @@ def main(): details=dict(type='bool', default=False), events=dict(type='bool', default=True), cluster=dict(), - service=dict(type='list') + service=dict(type='list', elements='str') ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/ecs_task.py b/ecs_task.py index 2039b8c69d0..f43cd700d27 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ecs_task version_added: 1.0.0 @@ -88,7 +88,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Simple example of run task - name: Run task community.aws.ecs_task: @@ -146,7 +146,7 @@ task_definition: console-sample-app-static-taskdef task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" ''' -RETURN = ''' +RETURN = r''' task: description: details about the task that was started returned: success @@ -340,7 +340,7 @@ def main(): overrides=dict(required=False, type='dict'), # R S count=dict(required=False, type='int'), # R task=dict(required=False, type='str'), # P* - container_instances=dict(required=False, type='list'), # S* + container_instances=dict(required=False, type='list', elements='str'), # S* started_by=dict(required=False, type='str'), # R S network_configuration=dict(required=False, type='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index f48a442ab1f..98831a850e8 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ecs_taskdefinition version_added: 1.0.0 @@ -104,7 +104,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create task definition community.aws.ecs_taskdefinition: containers: @@ -200,7 +200,7 @@ state: present network_mode: awsvpc ''' -RETURN = ''' +RETURN = r''' taskdefinition: description: a reflection of the input parameters type: dict @@ -321,11 +321,11 @@ def main(): family=dict(required=False, type='str'), revision=dict(required=False, type='int'), force_create=dict(required=False, default=False, type='bool'), - containers=dict(required=False, type='list'), + containers=dict(required=False, type='list', elements='str'), network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), task_role_arn=dict(required=False, default='', type='str'), execution_role_arn=dict(required=False, default='', type='str'), - volumes=dict(required=False, type='list'), + volumes=dict(required=False, type='list', elements='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), memory=dict(required=False, type='str') diff --git a/efs.py b/efs.py index 43c81d9d0d1..56ec6980e56 100644 --- a/efs.py +++ b/efs.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: efs version_added: 1.0.0 @@ -106,7 +106,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: EFS provisioning community.aws.efs: state: present @@ -134,7 +134,7 @@ name: myTestEFS ''' -RETURN = ''' +RETURN = r''' creation_time: description: timestamp of creation date returned: always @@ -692,7 +692,7 @@ def main(): id=dict(required=False, type='str', default=None), name=dict(required=False, type='str', default=None), tags=dict(required=False, type="dict", default={}), - targets=dict(required=False, type="list", default=[]), + targets=dict(required=False, type="list", default=[], elements='dict'), performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None), provisioned_throughput_in_mibps=dict(required=False, type='float'), diff --git a/efs_info.py b/efs_info.py index 00f74e677b2..a1b310fe7bc 100644 --- a/efs_info.py +++ b/efs_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: efs_info version_added: 1.0.0 @@ -44,7 +44,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Find all existing efs community.aws.efs_info: register: result @@ -67,7 +67,7 @@ msg: "{{ result['efs'] }}" ''' -RETURN = ''' +RETURN = r''' creation_time: description: timestamp of creation date returned: always @@ -359,7 +359,7 @@ def main(): id=dict(), name=dict(aliases=['creation_token']), tags=dict(type="dict", default={}), - targets=dict(type="list", default=[]) + targets=dict(type="list", default=[], elements='str') ) module = AnsibleAWSModule(argument_spec=argument_spec, diff --git a/elasticache.py b/elasticache.py index b6b52302baa..a1e0f88be89 100644 --- a/elasticache.py +++ b/elasticache.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elasticache version_added: 1.0.0 @@ -97,7 +97,7 @@ ''' -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. @@ -497,8 +497,8 @@ def main(): cache_parameter_group=dict(default="", aliases=['parameter_group']), cache_port=dict(type='int'), cache_subnet_group=dict(default=""), - cache_security_groups=dict(default=[], type='list'), - security_group_ids=dict(default=[], type='list'), + cache_security_groups=dict(default=[], type='list', elements='str'), + security_group_ids=dict(default=[], type='list', elements='str'), zone=dict(), wait=dict(default=True, type='bool'), hard_modify=dict(type='bool') diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 7a874f3b1ae..29e7afa65d3 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elasticache_subnet_group version_added: 1.0.0 @@ -41,7 +41,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Add or change a subnet group community.aws.elasticache_subnet_group: state: present @@ -75,7 +75,7 @@ def main(): state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list'), + subnets=dict(required=False, type='list', elements='str'), ) ) module = AnsibleModule(argument_spec=argument_spec) diff --git a/elb_application_lb.py b/elb_application_lb.py index 3f8c44c9f36..dc138fb2294 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -18,7 +18,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_application_lb version_added: 1.0.0 @@ -66,6 +66,7 @@ - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys are CamelCased. type: list + elements: dict suboptions: Port: description: The port on which the load balancer is listening. @@ -78,6 +79,7 @@ Certificates: description: The SSL server certificate. type: list + elements: dict suboptions: CertificateArn: description: The Amazon Resource Name (ARN) of the certificate. @@ -89,6 +91,7 @@ description: The default actions for the listener. required: true type: list + elements: dict suboptions: Type: description: The type of action. @@ -98,6 +101,7 @@ type: str Rules: type: list + elements: dict description: - A list of ALB Listener Rules. - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:' @@ -136,12 +140,14 @@ at least two Availability Zones. - Required if I(state=present). type: list + elements: str security_groups: description: - A list of the names or IDs of the security groups to assign to the load balancer. - Required if I(state=present). default: [] type: list + elements: str scheme: description: - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. @@ -182,7 +188,7 @@ - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ELB and attach a listener @@ -300,7 +306,7 @@ ''' -RETURN = ''' +RETURN = r''' access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. returned: when state is present @@ -603,16 +609,16 @@ def main(): Protocol=dict(type='str', required=True), Port=dict(type='int', required=True), SslPolicy=dict(type='str'), - Certificates=dict(type='list'), - DefaultActions=dict(type='list', required=True), - Rules=dict(type='list') + Certificates=dict(type='list', elements='dict'), + DefaultActions=dict(type='list', required=True, elements='dict'), + Rules=dict(type='list', elements='dict') ) ), name=dict(required=True, type='str'), purge_listeners=dict(default=True, type='bool'), purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list'), - security_groups=dict(type='list'), + subnets=dict(type='list', elements='str'), + security_groups=dict(type='list', elements='str'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], default='present'), tags=dict(type='dict'), diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index c9300f5ed01..21952633a43 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_application_lb_info version_added: 1.0.0 @@ -22,11 +22,13 @@ - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call. required: false type: list + elements: str names: description: - The names of the load balancers. required: false type: list + elements: str extends_documentation_fragment: - amazon.aws.aws @@ -34,7 +36,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all target groups @@ -60,7 +62,7 @@ var: alb_info ''' -RETURN = ''' +RETURN = r''' load_balancers: description: a list of load balancers returned: always @@ -264,8 +266,8 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - load_balancer_arns=dict(type='list'), - names=dict(type='list') + load_balancer_arns=dict(type='list', elements='str'), + names=dict(type='list', elements='str') ) ) diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 1f8679b30d2..9c5f1641677 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_classic_lb version_added: 1.0.0 @@ -32,6 +32,7 @@ description: - List of ports/protocols for this ELB to listen on (see example) type: list + elements: dict purge_listeners: description: - Purge existing listeners on ELB that are not found in listeners @@ -41,6 +42,7 @@ description: - List of instance ids to attach to this ELB type: list + elements: str purge_instance_ids: description: - Purge existing instance ids on ELB that are not found in instance_ids @@ -50,6 +52,7 @@ description: - List of availability zones to enable on this ELB type: list + elements: str purge_zones: description: - Purge existing availability zones on ELB that are not found in zones @@ -59,10 +62,12 @@ description: - A list of security groups to apply to the elb type: list + elements: str security_group_names: description: - A list of security group names to apply to the elb type: list + elements: str health_check: description: - An associative array of health check configuration settings (see example) @@ -75,6 +80,7 @@ description: - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. type: list + elements: str purge_subnets: description: - Purge existing subnet on ELB that are not found in subnets @@ -133,7 +139,7 @@ ''' -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. @@ -1221,16 +1227,16 @@ def main(): argument_spec.update(dict( state={'required': True, 'choices': ['present', 'absent']}, name={'required': True}, - listeners={'default': None, 'required': False, 'type': 'list'}, + listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'}, purge_listeners={'default': True, 'required': False, 'type': 'bool'}, - instance_ids={'default': None, 'required': False, 'type': 'list'}, + instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, purge_instance_ids={'default': False, 'required': False, 'type': 'bool'}, - zones={'default': None, 'required': False, 'type': 'list'}, + zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, purge_zones={'default': False, 'required': False, 'type': 'bool'}, - security_group_ids={'default': None, 'required': False, 'type': 'list'}, - security_group_names={'default': None, 'required': False, 'type': 'list'}, + security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, + security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, health_check={'default': None, 'required': False, 'type': 'dict'}, - subnets={'default': None, 'required': False, 'type': 'list'}, + subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, purge_subnets={'default': False, 'required': False, 'type': 'bool'}, scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']}, connection_draining_timeout={'default': None, 'required': False, 'type': 'int'}, diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index da8f6c5af11..88d44ee8125 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -17,7 +17,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_classic_lb_info version_added: 1.0.0 @@ -33,6 +33,7 @@ description: - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. type: list + elements: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -42,7 +43,7 @@ - boto3 ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Output format tries to match amazon.aws.ec2_elb_lb module input parameters @@ -75,7 +76,7 @@ ''' -RETURN = ''' +RETURN = r''' elbs: description: a list of load balancers returned: always @@ -193,7 +194,7 @@ def lb_instance_health(connection, load_balancer_name, instances, state): def main(): argument_spec = dict( - names={'default': [], 'type': 'list'} + names={'default': [], 'type': 'list', 'elements': 'str'} ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/elb_instance.py b/elb_instance.py index dd541ef2e58..187f6bee136 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_instance version_added: 1.0.0 @@ -34,6 +34,7 @@ description: - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. type: list + elements: str enable_availability_zone: description: - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already @@ -62,7 +63,7 @@ ''' -EXAMPLES = """ +EXAMPLES = r""" # basic pre_task and post_task example pre_tasks: - name: Instance De-register @@ -319,7 +320,7 @@ def main(): argument_spec.update(dict( state={'required': True, 'choices': ['present', 'absent']}, instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type': 'list'}, + ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'required': False, 'default': 0, 'type': 'int'} diff --git a/elb_network_lb.py b/elb_network_lb.py index 2f824c09b59..83e1ea416dc 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_network_lb version_added: 1.0.0 @@ -98,6 +98,7 @@ - Required when I(state=present). - This parameter is mutually exclusive with I(subnet_mappings). type: list + elements: str scheme: description: - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. @@ -133,7 +134,7 @@ - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an ELB and attach a listener @@ -171,7 +172,7 @@ ''' -RETURN = ''' +RETURN = r''' availability_zones: description: The Availability Zones for the load balancer. returned: when state is present @@ -411,15 +412,15 @@ def main(): Protocol=dict(type='str', required=True), Port=dict(type='int', required=True), SslPolicy=dict(type='str'), - Certificates=dict(type='list'), - DefaultActions=dict(type='list', required=True) + Certificates=dict(type='list', elements='dict'), + DefaultActions=dict(type='list', required=True, elements='dict') ) ), name=dict(required=True, type='str'), purge_listeners=dict(default=True, type='bool'), purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list'), - subnet_mappings=dict(type='list'), + subnets=dict(type='list', elements='str'), + subnet_mappings=dict(type='list', elements='dict'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], type='str'), tags=dict(type='dict'), diff --git a/elb_target_group.py b/elb_target_group.py index e0c8e57bfac..fe4b749a63a 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_target_group version_added: 1.0.0 @@ -138,6 +138,7 @@ all existing targets will be removed from the group. The list should be an Id and a Port parameter. See the Examples for detail. required: false type: list + elements: dict unhealthy_threshold_count: description: - The number of consecutive health check failures required before considering a target unhealthy. @@ -166,7 +167,7 @@ - Once a target group has been created, only its health check can then be modified using subsequent calls ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a target group with a default health check @@ -269,7 +270,7 @@ ''' -RETURN = ''' +RETURN = r''' deregistration_delay_timeout_seconds: description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. returned: when state present @@ -828,7 +829,7 @@ def main(): successful_response_codes=dict(), tags=dict(default={}, type='dict'), target_type=dict(choices=['instance', 'ip', 'lambda']), - targets=dict(type='list'), + targets=dict(type='list', elements='dict'), unhealthy_threshold_count=dict(type='int'), vpc_id=dict(), wait_timeout=dict(type='int', default=200), diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 78ce88613bc..7ab462c26a7 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: elb_target_group_info version_added: 1.0.0 @@ -27,11 +27,13 @@ - The Amazon Resource Names (ARN) of the target groups. required: false type: list + elements: str names: description: - The names of the target groups. required: false type: list + elements: str collect_targets_health: description: - When set to "yes", output contains targets health description @@ -45,7 +47,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all target groups @@ -63,7 +65,7 @@ ''' -RETURN = ''' +RETURN = r''' target_groups: description: a list of target groups returned: always @@ -299,8 +301,8 @@ def main(): argument_spec.update( dict( load_balancer_arn=dict(type='str'), - target_group_arns=dict(type='list'), - names=dict(type='list'), + target_group_arns=dict(type='list', elements='str'), + names=dict(type='list', elements='str'), collect_targets_health=dict(default=False, type='bool', required=False) ) ) diff --git a/iam.py b/iam.py index 74cf77d3c7f..b4c1bcb68d7 100644 --- a/iam.py +++ b/iam.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: iam version_added: 1.0.0 @@ -69,10 +69,12 @@ description: - A list of the keys that you want affected by the I(access_key_state) parameter. type: list + elements: str groups: description: - A list of groups the user should belong to. When I(state=update), will gracefully remove groups not listed. type: list + elements: str password: description: - When I(type=user) and either I(state=present) or I(state=update), define the users login password. @@ -98,7 +100,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Basic user creation example - name: Create two new IAM users with API keys community.aws.iam: @@ -146,7 +148,7 @@ Service: lambda.amazonaws.com ''' -RETURN = ''' +RETURN = r''' role_result: description: the IAM.role dict returned by Boto type: str @@ -620,14 +622,14 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( iam_type=dict(required=True, choices=['user', 'group', 'role']), - groups=dict(type='list', default=None, required=False), + groups=dict(type='list', default=None, required=False, elements='str'), state=dict(required=True, choices=['present', 'absent', 'update']), password=dict(default=None, required=False, no_log=True), update_password=dict(default='always', required=False, choices=['always', 'on_create']), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', 'Active', 'Inactive', 'Create', 'Remove']), - access_key_ids=dict(type='list', default=None, required=False), + access_key_ids=dict(type='list', default=None, required=False, elements='str'), key_count=dict(type='int', default=1, required=False), name=dict(required=True), trust_policy_filepath=dict(default=None, required=False), diff --git a/iam_group.py b/iam_group.py index 7a9da3e6f57..b55e32218a2 100644 --- a/iam_group.py +++ b/iam_group.py @@ -18,7 +18,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: iam_group version_added: 1.0.0 @@ -74,7 +74,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a group @@ -119,7 +119,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' iam_group: description: dictionary containing all the group information including group membership returned: success @@ -410,8 +410,8 @@ def main(): argument_spec = dict( name=dict(required=True), - managed_policies=dict(default=[], type='list', aliases=['managed_policy']), - users=dict(default=[], type='list'), + managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), + users=dict(default=[], type='list', elements='str'), state=dict(choices=['present', 'absent'], required=True), purge_users=dict(default=False, type='bool'), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) diff --git a/iam_role.py b/iam_role.py index b20c564734a..9a2eaca8cfe 100644 --- a/iam_role.py +++ b/iam_role.py @@ -5,7 +5,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: iam_role version_added: 1.0.0 @@ -49,6 +49,7 @@ - To embed an inline policy, use M(community.aws.iam_policy). aliases: ['managed_policy'] type: list + elements: str max_session_duration: description: - The maximum duration (in seconds) of a session when assuming the role. @@ -95,7 +96,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a role with description and tags @@ -126,7 +127,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' iam_role: description: dictionary containing the IAM Role data returned: success @@ -613,7 +614,7 @@ def main(): name=dict(type='str', required=True), path=dict(type='str', default="/"), assume_role_policy_document=dict(type='json'), - managed_policies=dict(type='list', aliases=['managed_policy']), + managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'), max_session_duration=dict(type='int'), state=dict(type='str', choices=['present', 'absent'], default='present'), description=dict(type='str'), @@ -630,7 +631,7 @@ def main(): if module.params.get('purge_policies') is None: module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.' - ' To maintain the existing behaviour explicity set purge_policies=true', date='2022-06-01', collection_name='community.aws') + ' To maintain the existing behaviour explicitly set purge_policies=true', date='2022-06-01', collection_name='community.aws') if module.params.get('boundary'): if module.params.get('create_instance_profile'): diff --git a/iam_user.py b/iam_user.py index 9dc9eb45eca..6b8efcda811 100644 --- a/iam_user.py +++ b/iam_user.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: iam_user version_added: 1.0.0 @@ -26,6 +26,7 @@ - To embed an inline policy, use M(community.aws.iam_policy). required: false type: list + elements: str aliases: ['managed_policy'] state: description: @@ -47,7 +48,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Note: This module does not allow management of groups that users belong to. # Groups should manage their membership directly using `iam_group`, @@ -77,7 +78,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' user: description: dictionary containing all the user information returned: success @@ -344,7 +345,7 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), - managed_policies=dict(default=[], type='list', aliases=['managed_policy']), + managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), state=dict(choices=['present', 'absent'], required=True), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) ) diff --git a/lambda.py b/lambda.py index 2f417469c15..8975163cc30 100644 --- a/lambda.py +++ b/lambda.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: lambda version_added: 1.0.0 @@ -116,7 +116,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Create Lambda functions - name: looped creation community.aws.lambda: @@ -168,7 +168,7 @@ - ByeBye ''' -RETURN = ''' +RETURN = r''' code: description: the lambda function location returned by get_function in boto3 returned: success @@ -344,8 +344,8 @@ def main(): description=dict(default=''), timeout=dict(type='int', default=3), memory_size=dict(type='int', default=128), - vpc_subnet_ids=dict(type='list'), - vpc_security_group_ids=dict(type='list'), + vpc_subnet_ids=dict(type='list', elements='str'), + vpc_security_group_ids=dict(type='list', elements='str'), environment_variables=dict(type='dict'), dead_letter_arn=dict(), tracing_mode=dict(choices=['Active', 'PassThrough']), diff --git a/rds.py b/rds.py index 5b15934fc03..87dd07f518c 100644 --- a/rds.py +++ b/rds.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: rds version_added: 1.0.0 @@ -188,7 +188,7 @@ type: int apply_immediately: description: - - When I(apply_immediately=trye), the modifications will be applied as soon as possible rather than waiting for the + - When I(apply_immediately=true), the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. - Used only when I(command=modify). type: bool @@ -235,7 +235,7 @@ # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD -EXAMPLES = ''' +EXAMPLES = r''' - name: Basic mysql provisioning example community.aws.rds: command: create @@ -309,7 +309,7 @@ msg: "The new db endpoint is {{ rds.instance.endpoint }}" ''' -RETURN = ''' +RETURN = r''' instance: description: the rds instance returned: always @@ -352,7 +352,7 @@ sample: "1489707802.0" secondary_availability_zone: description: the name of the secondary AZ for a DB instance with multi-AZ support - returned: when RDS instance exists and is multy-AZ + returned: when RDS instance exists and is multi-AZ type: str sample: "eu-west-1b" backup_window: @@ -1329,7 +1329,7 @@ def main(): multi_zone=dict(type='bool', required=False), iops=dict(required=False), security_groups=dict(required=False), - vpc_security_groups=dict(type='list', required=False), + vpc_security_groups=dict(type='list', required=False, elements='str'), port=dict(required=False, type='int'), upgrade=dict(type='bool', default=False), option_group=dict(required=False), diff --git a/rds_instance.py b/rds_instance.py index f4018a3d6c1..95781a48d54 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: rds_instance version_added: 1.0.0 @@ -152,6 +152,7 @@ description: - (EC2-Classic platform) A list of DB security groups to associate with this DB instance. type: list + elements: str db_snapshot_identifier: description: - The identifier for the DB snapshot to restore from if using I(creation_source=snapshot). @@ -176,6 +177,7 @@ aliases: - cloudwatch_log_exports type: list + elements: str enable_iam_database_authentication: description: - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. @@ -412,9 +414,10 @@ description: - A list of EC2 VPC security groups to associate with the DB cluster. type: list + elements: str ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create minimal aurora instance in default VPC and default subnet group community.aws.rds_instance: @@ -449,7 +452,7 @@ final_snapshot_identifier: "{{ snapshot_id }}" ''' -RETURN = ''' +RETURN = r''' allocated_storage: description: The allocated storage size in gibibytes. This is always 1 for aurora database engines. returned: always @@ -1094,12 +1097,12 @@ def main(): db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), db_name=dict(), db_parameter_group_name=dict(), - db_security_groups=dict(type='list'), + db_security_groups=dict(type='list', elements='str'), db_snapshot_identifier=dict(), db_subnet_group_name=dict(aliases=['subnet_group']), domain=dict(), domain_iam_role_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports']), + enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), enable_iam_database_authentication=dict(type='bool'), enable_performance_insights=dict(type='bool'), engine=dict(), @@ -1142,7 +1145,7 @@ def main(): tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), timezone=dict(), use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), - vpc_security_group_ids=dict(type='list') + vpc_security_group_ids=dict(type='list', elements='str') ) arg_spec.update(parameter_options) diff --git a/rds_subnet_group.py b/rds_subnet_group.py index 3e207468e8e..818b46bd6fe 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: rds_subnet_group version_added: 1.0.0 @@ -35,6 +35,7 @@ - List of subnet IDs that make up the database subnet group. - Required when I(state=present). type: list + elements: str author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: - amazon.aws.aws @@ -42,7 +43,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Add or change a subnet group community.aws.rds_subnet_group: state: present @@ -58,7 +59,7 @@ name: norwegian-blue ''' -RETURN = ''' +RETURN = r''' subnet_group: description: Dictionary of DB subnet group values returned: I(state=present) @@ -125,7 +126,7 @@ def main(): state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list'), + subnets=dict(required=False, type='list', elements='str'), ) ) module = AnsibleModule(argument_spec=argument_spec) diff --git a/redshift.py b/redshift.py index f6308392694..2b74c5f1fd0 100644 --- a/redshift.py +++ b/redshift.py @@ -8,7 +8,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- author: - "Jens Carl (@j-carl), Hothead Games Inc." @@ -174,7 +174,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Basic cluster provisioning example community.aws.redshift: command: create @@ -191,7 +191,7 @@ wait: true ''' -RETURN = ''' +RETURN = r''' cluster: description: dictionary containing all the cluster information returned: success @@ -556,8 +556,8 @@ def main(): password=dict(no_log=True, required=False), db_name=dict(required=False), cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), - cluster_security_groups=dict(aliases=['security_groups'], type='list'), - vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'), + cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'), + vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'), skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False), final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 615e667b9c8..68eb42496c3 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- author: - "Jens Carl (@j-carl), Hothead Games Inc." @@ -47,7 +47,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create a Redshift subnet group community.aws.redshift_subnet_group: state: present @@ -63,7 +63,7 @@ group_name: redshift-subnet ''' -RETURN = ''' +RETURN = r''' group: description: dictionary containing all Redshift subnet group information returned: success @@ -98,7 +98,7 @@ def main(): state=dict(required=True, choices=['present', 'absent']), group_name=dict(required=True, aliases=['name']), group_description=dict(required=False, aliases=['description']), - group_subnets=dict(required=False, aliases=['subnets'], type='list'), + group_subnets=dict(required=False, aliases=['subnets'], type='list', elements='str'), )) module = AnsibleModule(argument_spec=argument_spec) diff --git a/route53.py b/route53.py index 72ca73faeb1..ad25e38ecd0 100644 --- a/route53.py +++ b/route53.py @@ -8,7 +8,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: route53 version_added: 1.0.0 @@ -69,6 +69,7 @@ - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. - When deleting a record all values for the record must be specified or Route53 will not delete it. type: list + elements: str overwrite: description: - Whether an existing record should be overwritten on create if values do not match. @@ -137,7 +138,7 @@ ''' -RETURN = ''' +RETURN = r''' nameservers: description: Nameservers associated with the zone. returned: when state is 'get' @@ -501,7 +502,7 @@ def main(): alias=dict(type='bool'), alias_hosted_zone_id=dict(type='str'), alias_evaluate_target_health=dict(type='bool', default=False), - value=dict(type='list'), + value=dict(type='list', elements='str'), overwrite=dict(type='bool'), retry_interval=dict(type='int', default=500), private_zone=dict(type='bool', default=False), diff --git a/route53_info.py b/route53_info.py index 77d72603ffe..3a937a40653 100644 --- a/route53_info.py +++ b/route53_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: route53_info short_description: Retrieves route53 details using AWS methods version_added: 1.0.0 @@ -134,7 +134,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Simple example of listing all hosted zones - name: List all hosted zones community.aws.route53_info: @@ -436,7 +436,7 @@ def main(): 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ]), dns_name=dict(), - resource_id=dict(type='list', aliases=['resource_ids']), + resource_id=dict(type='list', aliases=['resource_ids'], elements='str'), health_check_id=dict(), hosted_zone_method=dict(choices=[ 'details', diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 854f3cadd84..f42c64a0028 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: s3_bucket_notification version_added: 1.0.0 @@ -89,7 +89,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' --- # Example that creates a lambda event notification for a bucket - name: Process jpg image @@ -103,7 +103,7 @@ suffix: .jpg ''' -RETURN = ''' +RETURN = r''' notification_configuration: description: list of currently applied notifications returned: success @@ -216,7 +216,7 @@ def main(): event_name=dict(required=True), lambda_function_arn=dict(aliases=['function_arn']), bucket_name=dict(required=True), - events=dict(type='list', default=[], choices=event_types), + events=dict(type='list', default=[], choices=event_types, elements='str'), prefix=dict(default=''), suffix=dict(default=''), lambda_alias=dict(), diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 72fe1616818..a2518a88570 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: s3_lifecycle version_added: 1.0.0 @@ -72,6 +72,7 @@ I(transition_days) I(storage_class) type: list + elements: dict rule_id: description: - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided." @@ -114,6 +115,7 @@ I(transition_date) I(storage_class) type: list + elements: dict requester_pays: description: - The I(requester_pays) option does nothing and will be removed after 2022-06-01 @@ -124,7 +126,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days @@ -442,7 +444,7 @@ def main(): noncurrent_version_expiration_days=dict(type='int'), noncurrent_version_storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']), noncurrent_version_transition_days=dict(type='int'), - noncurrent_version_transitions=dict(type='list'), + noncurrent_version_transitions=dict(type='list', elements='dict'), prefix=dict(), requester_pays=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), rule_id=dict(), @@ -451,7 +453,7 @@ def main(): storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']), transition_days=dict(type='int'), transition_date=dict(), - transitions=dict(type='list'), + transitions=dict(type='list', elements='dict'), purge_transitions=dict(default='yes', type='bool') ) diff --git a/sns_topic.py b/sns_topic.py index 4240a746754..79070cbabc5 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -7,7 +7,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: sns_topic short_description: Manages AWS SNS topics and subscriptions version_added: 1.0.0 @@ -73,7 +73,7 @@ requirements: [ "boto" ] ''' -EXAMPLES = """ +EXAMPLES = r""" - name: Create alarm SNS topic community.aws.sns_topic: @@ -99,7 +99,7 @@ """ -RETURN = ''' +RETURN = r''' sns_arn: description: The ARN of the topic you are modifying type: str @@ -482,7 +482,7 @@ def main(): display_name=dict(), policy=dict(type='dict'), delivery_policy=dict(type='dict'), - subscriptions=dict(default=[], type='list'), + subscriptions=dict(default=[], type='list', elements='dict'), purge_subscriptions=dict(type='bool', default=True), ) From 0a768e045ba83387d903fad968f57d1f14577214 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Fri, 17 Jul 2020 21:10:09 +0300 Subject: [PATCH 020/683] aws modules: fix examples to use FQCN for builtin modules/plugins (#144) --- aws_acm.py | 2 +- aws_batch_compute_environment.py | 2 +- aws_batch_job_definition.py | 2 +- aws_batch_job_queue.py | 2 +- aws_s3_bucket_info.py | 2 +- aws_ssm_parameter_store.py | 2 +- cloudformation_exports_info.py | 2 +- cloudfront_info.py | 8 ++++---- ec2_eip.py | 4 ++-- ec2_eip_info.py | 4 ++-- ec2_elb_info.py | 6 +++--- ec2_lc.py | 2 +- ec2_placement_group_info.py | 2 +- ec2_vpc_nat_gateway_info.py | 2 +- ec2_vpc_peering_info.py | 2 +- efs_info.py | 2 +- elb_application_lb_info.py | 2 +- elb_classic_lb_info.py | 6 +++--- elb_target_info.py | 4 ++-- lambda_alias.py | 2 +- lambda_event.py | 2 +- lambda_facts.py | 2 +- lambda_info.py | 2 +- lambda_policy.py | 2 +- rds.py | 2 +- 25 files changed, 35 insertions(+), 35 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index b57618b1ac5..25581db1a39 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -175,7 +175,7 @@ register: cert_create - name: print ARN of cert we just created - debug: + ansible.builtin.debug: var: cert_create.certificate.arn - name: delete the cert we just created diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 21eb4808f62..39ff11e2576 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -155,7 +155,7 @@ register: aws_batch_compute_environment_action - name: show results - debug: + ansible.builtin.debug: var: aws_batch_compute_environment_action ''' diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 1c30d72efc5..18d0429a831 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -204,7 +204,7 @@ register: job_definition_create_result - name: show results - debug: var=job_definition_create_result + ansible.builtin.debug: var=job_definition_create_result ''' RETURN = r''' diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index e95940dbb8f..b472371eb84 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -83,7 +83,7 @@ register: batch_job_queue_action - name: show results - debug: + ansible.builtin.debug: var: batch_job_queue_action ''' diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 90e07a1b62b..735bba97f74 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -35,7 +35,7 @@ register: result - name: List buckets - debug: + ansible.builtin.debug: msg: "{{ result['buckets'] }}" ''' diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 82138868760..32c1df62536 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -112,7 +112,7 @@ overwrite_value: "always" - name: recommend to use with aws_ssm lookup plugin - debug: + ansible.builtin.debug: msg: "{{ lookup('amazon.aws.aws_ssm', 'hello') }}" ''' diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index d53d83bd027..8eab5325be3 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -27,7 +27,7 @@ profile: 'my_aws_profile' region: 'my_region' register: cf_exports -- debug: +- ansible.builtin.debug: msg: "{{ cf_exports }}" ''' diff --git a/cloudfront_info.py b/cloudfront_info.py index cc6f9472bb0..bd81ac53b54 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -163,7 +163,7 @@ distribution: true distribution_id: my-cloudfront-distribution-id register: result_did -- debug: +- ansible.builtin.debug: msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}" - name: Get information about a distribution using the CNAME of the cloudfront distribution. @@ -171,7 +171,7 @@ distribution: true domain_name_alias: www.my-website.com register: result_website -- debug: +- ansible.builtin.debug: msg: "{{ result_website['cloudfront']['www.my-website.com'] }}" # When the module is called as cloudfront_facts, return values are published @@ -181,13 +181,13 @@ community.aws.cloudfront_facts: distribution: true distribution_id: my-cloudfront-distribution-id -- debug: +- ansible.builtin.debug: msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}" - community.aws.cloudfront_facts: distribution: true domain_name_alias: www.my-website.com -- debug: +- ansible.builtin.debug: msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}" - name: Get all information about an invalidation for a distribution. diff --git a/ec2_eip.py b/ec2_eip.py index c43363e3b6d..42909d8ff36 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -137,7 +137,7 @@ register: eip - name: output the IP - debug: + ansible.builtin.debug: msg: "Allocated IP is {{ eip.public_ip }}" - name: provision new instances with ec2 @@ -162,7 +162,7 @@ register: eip - name: output the IP - debug: + ansible.builtin.debug: msg: "Allocated IP inside a VPC is {{ eip.public_ip }}" - name: allocate eip - reuse unallocated ips (if found) with FREE tag diff --git a/ec2_eip_info.py b/ec2_eip_info.py index c31ec738f09..553930db67a 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -44,7 +44,7 @@ instance-id: i-123456789 register: my_vm_eips -- debug: +- ansible.builtin.debug: msg: "{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}" - name: List all EIP addresses for several VMs. @@ -69,7 +69,7 @@ # Set the variable eip_alloc to the value of the first allocation_id # and set the variable my_pub_ip to the value of the first public_ip -- set_fact: +- ansible.builtin.set_fact: eip_alloc: my_vms_eips.addresses[0].allocation_id my_pub_ip: my_vms_eips.addresses[0].public_ip diff --git a/ec2_elb_info.py b/ec2_elb_info.py index a66d130deb1..c4b1bd67360 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -47,7 +47,7 @@ - name: Gather information about all ELBs community.aws.ec2_elb_info: register: elb_info -- debug: +- ansible.builtin.debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" @@ -56,7 +56,7 @@ names: frontend-prod-elb register: elb_info -- debug: +- ansible.builtin.debug: msg: "{{ elb_info.elbs.0.dns_name }}" - name: Gather information about a set of ELBs @@ -66,7 +66,7 @@ - backend-prod-elb register: elb_info -- debug: +- ansible.builtin.debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" diff --git a/ec2_lc.py b/ec2_lc.py index deb6633adeb..813bfe04cb4 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -238,7 +238,7 @@ - name: Use EBS snapshot ID for volume block: - name: Set Volume Facts - set_fact: + ansible.builtin.set_fact: volumes: - device_name: /dev/sda1 volume_size: 20 diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 7ec7f62fd92..354d3eb3276 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -45,7 +45,7 @@ - my-other-cluster register: specific_ec2_placement_groups -- debug: +- ansible.builtin.debug: msg: "{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}" ''' diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index bb164a2b50b..f076d38a833 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -41,7 +41,7 @@ register: all_ngws - name: Debugging the result - debug: + ansible.builtin.debug: msg: "{{ all_ngws.result }}" - name: Get details on specific nat gateways diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 2d577227fac..cffcf6f9aed 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -41,7 +41,7 @@ register: all_vpc_peers - name: Debugging the result - debug: + ansible.builtin.debug: msg: "{{ all_vpc_peers.result }}" - name: Get details on specific VPC peer diff --git a/efs_info.py b/efs_info.py index a1b310fe7bc..62fd583785d 100644 --- a/efs_info.py +++ b/efs_info.py @@ -63,7 +63,7 @@ - sg-4d3c2b1a register: result -- debug: +- ansible.builtin.debug: msg: "{{ result['efs'] }}" ''' diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 21952633a43..796803d8c2f 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -58,7 +58,7 @@ names: "alb-name" region: "aws-region" register: alb_info -- debug: +- ansible.builtin.debug: var: alb_info ''' diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 88d44ee8125..12a6a43771a 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -51,7 +51,7 @@ - community.aws.elb_classic_lb_info: register: elb_info -- debug: +- ansible.builtin.debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" @@ -60,7 +60,7 @@ names: frontend-prod-elb register: elb_info -- debug: +- ansible.builtin.debug: msg: "{{ elb_info.elbs.0.dns_name }}" # Gather information about a set of ELBs @@ -70,7 +70,7 @@ - backend-prod-elb register: elb_info -- debug: +- ansible.builtin.debug: msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" diff --git a/elb_target_info.py b/elb_target_info.py index dda76f08c24..924632339de 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -50,7 +50,7 @@ register: target_info - name: save fact for later - set_fact: + ansible.builtin.set_fact: original_tgs: "{{ target_info.instance_target_groups }}" - name: Deregister instance from all target groups @@ -118,7 +118,7 @@ # instance - useful in case the playbook fails mid-run and manual # rollback is required - name: "reregistration commands: ELBv2s" - debug: + ansible.builtin.debug: msg: > aws --region {{ansible_ec2_placement_region}} elbv2 register-targets --target-group-arn {{item.target_group_arn}} diff --git a/lambda_alias.py b/lambda_alias.py index 2b74cdb6352..75193221b5b 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -86,7 +86,7 @@ name: myLambdaFunction register: lambda_info - name: show results - debug: + ansible.builtin.debug: msg: "{{ lambda_info['lambda_facts'] }}" # The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) diff --git a/lambda_event.py b/lambda_event.py index e1a35220b74..6dbbfb6590b 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -102,7 +102,7 @@ register: event - name: Show source event - debug: + ansible.builtin.debug: var: event.lambda_stream_events ''' diff --git a/lambda_facts.py b/lambda_facts.py index 0d102fd7340..4c02947c998 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -69,7 +69,7 @@ query: all max_items: 20 - name: show Lambda facts - debug: + ansible.builtin.debug: var: lambda_facts ''' diff --git a/lambda_info.py b/lambda_info.py index b81f8521013..1e40aec4ca1 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -63,7 +63,7 @@ max_items: 20 register: output - name: show Lambda information - debug: + ansible.builtin.debug: msg: "{{ output['function'] }}" ''' diff --git a/lambda_policy.py b/lambda_policy.py index 09c74423a0d..2860e3a6540 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -120,7 +120,7 @@ register: lambda_policy_action - name: show results - debug: + ansible.builtin.debug: var: lambda_policy_action ''' diff --git a/rds.py b/rds.py index 87dd07f518c..e259ef757e5 100644 --- a/rds.py +++ b/rds.py @@ -305,7 +305,7 @@ region: us-west-2 vpc_security_groups: sg-xxx945xx -- debug: +- ansible.builtin.debug: msg: "The new db endpoint is {{ rds.instance.endpoint }}" ''' From 8a8f6de3d13ef9de10c914fa91f313bdfd8004e6 Mon Sep 17 00:00:00 2001 From: Philipp Hoffmann Date: Thu, 16 Apr 2020 15:53:00 +0200 Subject: [PATCH 021/683] Added explicit encoding for key_data string. --- ec2_win_password.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_win_password.py b/ec2_win_password.py index 7c7716b8a44..aeed396ff1c 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -177,7 +177,7 @@ def main(): module.fail_json(msg="unable to parse key file") elif key_data is not None and key_file is None: try: - key = load_pem_private_key(key_data, b_key_passphrase, default_backend()) + key = load_pem_private_key(key_data.encode('ascii'), b_key_passphrase, default_backend()) except (ValueError, TypeError) as e: module.fail_json(msg="unable to parse key data") From 57a2d6b38d3581f758f789831d4a37644a028fa0 Mon Sep 17 00:00:00 2001 From: Philipp Hoffmann Date: Fri, 24 Jul 2020 10:02:28 +0200 Subject: [PATCH 022/683] Refactoring of ec2_win_password in preparation for testing Switched from AnsibleModule to AnsibleAWSModule --- ec2_win_password.py | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/ec2_win_password.py b/ec2_win_password.py index aeed396ff1c..e42fa09e35a 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -111,12 +111,12 @@ except ImportError: HAS_CRYPTOGRAPHY = False -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect from ansible.module_utils._text import to_bytes -def main(): +def setup_module_object(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance_id=dict(required=True), @@ -127,21 +127,21 @@ def main(): wait_timeout=dict(default=120, required=False, type='int'), ) ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec) + return module - if not HAS_BOTO: - module.fail_json(msg='Boto required for this module.') - - if not HAS_CRYPTOGRAPHY: - module.fail_json(msg='cryptography package required for this module.') +def ec2_win_password(module): instance_id = module.params.get('instance_id') key_file = module.params.get('key_file') - key_data = module.params.get('key_data') if module.params.get('key_passphrase') is None: b_key_passphrase = None else: b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict') + if module.params.get('key_data') is None: + b_key_data = None + else: + b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') @@ -165,7 +165,7 @@ def main(): if wait and datetime.datetime.now() >= end: module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout) - if key_file is not None and key_data is None: + if key_file is not None and b_key_data is None: try: with open(key_file, 'rb') as f: key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) @@ -175,9 +175,9 @@ def main(): except (ValueError, TypeError) as e: # Handle issues loading key module.fail_json(msg="unable to parse key file") - elif key_data is not None and key_file is None: + elif b_key_data is not None and key_file is None: try: - key = load_pem_private_key(key_data.encode('ascii'), b_key_passphrase, default_backend()) + key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend()) except (ValueError, TypeError) as e: module.fail_json(msg="unable to parse key data") @@ -196,5 +196,17 @@ def main(): module.exit_json(win_password=decrypted, changed=True) +def main(): + module = setup_module_object() + + if not HAS_BOTO: + module.fail_json(msg='Boto required for this module.') + + if not HAS_CRYPTOGRAPHY: + module.fail_json(msg='cryptography package required for this module.') + + ec2_win_password(module) + + if __name__ == '__main__': main() From 927e756d77bbfd0aa38e5dfec24ff9eb790c3866 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 29 Jul 2020 09:10:43 +0200 Subject: [PATCH 023/683] lambda AnsibleAWSModule cleanup (also enable retries) (#5) * Bomb out early if someone tries to set tags without the necessary botocore version * Simplify some error handling by using fail_json_aws * Use BotoCoreError rather than the sub-errors We still bomb out, but fail_json_aws is more graceful and user friendly than an uncaught Boto3 error. * use is_boto3_error_code to limit what we catch rather than catching and re-raising. * Cleanup get_account_info - use module.client to avoid the mass of extra args - use is_boto3_error_code('AccessDenied') to be a little cleaner - fix text search (re.search(, mystring) rather than mystring.search()) * Use module.client helpers * Delete dead code path - we test for having *both* vpc_subnet_ids and vpc_security_group_ids when we parse the arguments * Enable basic AWS Retries * Tweak integration test to allow for common 'missing region' error message * Rename lambda tests from 'aws_lambda' to 'lambda' (matching the module name) * Use omit rather than 'null' in the tests - https://github.com/ansible/ansible/issues/69190 * Ignore duplicate-except warnings (it's caused by the way is_boto3_error works) * change expected error messages now we're using an AnsibleAWSModule feature --- lambda.py | 116 +++++++++++++++++++++++------------------------------- 1 file changed, 50 insertions(+), 66 deletions(-) diff --git a/lambda.py b/lambda.py index 8975163cc30..9cb2e0286cc 100644 --- a/lambda.py +++ b/lambda.py @@ -212,21 +212,24 @@ ''' from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + import base64 import hashlib import traceback import re try: - from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # protected by AnsibleAWSModule -def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs): +def get_account_info(module): """return the account information (account id and partition) we are currently working on get_account_info tries too find out the account that we are working @@ -237,27 +240,25 @@ def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs): account_id = None partition = None try: - sts_client = boto3_conn(module, conn_type='client', resource='sts', - region=region, endpoint=endpoint, **aws_connect_kwargs) - caller_id = sts_client.get_caller_identity() + sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + caller_id = sts_client.get_caller_identity(aws_retry=True) account_id = caller_id.get('Account') partition = caller_id.get('Arn').split(':')[1] - except ClientError: + except (BotoCoreError, ClientError): try: - iam_client = boto3_conn(module, conn_type='client', resource='iam', - region=region, endpoint=endpoint, **aws_connect_kwargs) - arn, partition, service, reg, account_id, resource = iam_client.get_user()['User']['Arn'].split(':') - except ClientError as e: - if (e.response['Error']['Code'] == 'AccessDenied'): + iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') + except is_boto3_error_code('AccessDenied') as e: + try: except_msg = to_native(e.message) - m = except_msg.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/") - account_id = m.group(4) - partition = m.group(1) - if account_id is None: + except AttributeError: + except_msg = to_native(e) + m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg) + if m is None: module.fail_json_aws(e, msg="getting account information") - if partition is None: - module.fail_json_aws(e, msg="getting account information: partition") - except Exception as e: + account_id = m.group(4) + partition = m.group(1) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="getting account information") return account_id, partition @@ -266,15 +267,10 @@ def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs): def get_current_function(connection, function_name, qualifier=None): try: if qualifier is not None: - return connection.get_function(FunctionName=function_name, Qualifier=qualifier) - return connection.get_function(FunctionName=function_name) - except ClientError as e: - try: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - return None - except (KeyError, AttributeError): - pass - raise e + return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True) + return connection.get_function(FunctionName=function_name, aws_retry=True) + except is_boto3_error_code('ResourceNotFoundException'): + return None def sha256sum(filename): @@ -290,17 +286,14 @@ def sha256sum(filename): def set_tag(client, module, tags, function): - if not hasattr(client, "list_tags"): - module.fail_json(msg="Using tags requires botocore 1.5.40 or above") changed = False arn = function['Configuration']['FunctionArn'] try: - current_tags = client.list_tags(Resource=arn).get('Tags', {}) - except ClientError as e: - module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)), - exception=traceback.format_exc()) + current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {}) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to list tags") tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True) @@ -308,24 +301,21 @@ def set_tag(client, module, tags, function): if tags_to_remove: client.untag_resource( Resource=arn, - TagKeys=tags_to_remove + TagKeys=tags_to_remove, + aws_retry=True ) changed = True if tags_to_add: client.tag_resource( Resource=arn, - Tags=tags_to_add + Tags=tags_to_add, + aws_retry=True ) changed = True - except ClientError as e: - module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn, - to_native(e)), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except BotoCoreError as e: - module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn, - to_native(e)), exception=traceback.format_exc()) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn)) return changed @@ -389,22 +379,21 @@ def main(): check_mode = module.check_mode changed = False - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg='region must be specified') - try: - client = boto3_conn(module, conn_type='client', resource='lambda', - region=region, endpoint=ec2_url, **aws_connect_kwargs) - except (ClientError, ValidationError) as e: + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Trying to connect to AWS") + if tags is not None: + if not hasattr(client, "list_tags"): + module.fail_json(msg="Using tags requires botocore 1.5.40 or above") + if state == 'present': if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): role_arn = role else: # get account ID and assemble ARN - account_id, partition = get_account_info(module, region=region, endpoint=ec2_url, **aws_connect_kwargs) + account_id, partition = get_account_info(module) role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role) # Get function configuration if present, False otherwise @@ -447,9 +436,7 @@ def main(): func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) # If VPC configuration is desired - if vpc_subnet_ids or vpc_security_group_ids: - if not vpc_subnet_ids or not vpc_security_group_ids: - module.fail_json(msg='vpc connectivity requires at least one security group and one subnet') + if vpc_subnet_ids: if 'VpcConfig' in current_config: # Compare VPC config with current config @@ -472,10 +459,10 @@ def main(): if len(func_kwargs) > 1: try: if not check_mode: - response = client.update_function_configuration(**func_kwargs) + response = client.update_function_configuration(aws_retry=True, **func_kwargs) current_version = response['Version'] changed = True - except (ParamValidationError, ClientError) as e: + except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to update lambda configuration") # Update code configuration @@ -513,10 +500,10 @@ def main(): if len(code_kwargs) > 2: try: if not check_mode: - response = client.update_function_code(**code_kwargs) + response = client.update_function_code(aws_retry=True, **code_kwargs) current_version = response['Version'] changed = True - except (ParamValidationError, ClientError) as e: + except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to upload new code") # Describe function code and configuration @@ -573,10 +560,7 @@ def main(): func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) # If VPC configuration is given - if vpc_subnet_ids or vpc_security_group_ids: - if not vpc_subnet_ids or not vpc_security_group_ids: - module.fail_json(msg='vpc connectivity requires at least one security group and one subnet') - + if vpc_subnet_ids: func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, 'SecurityGroupIds': vpc_security_group_ids}}) @@ -584,10 +568,10 @@ def main(): current_version = None try: if not check_mode: - response = client.create_function(**func_kwargs) + response = client.create_function(aws_retry=True, **func_kwargs) current_version = response['Version'] changed = True - except (ParamValidationError, ClientError) as e: + except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to create function") # Tag Function @@ -604,9 +588,9 @@ def main(): if state == 'absent' and current_function: try: if not check_mode: - client.delete_function(FunctionName=name) + client.delete_function(FunctionName=name, aws_retry=True) changed = True - except (ParamValidationError, ClientError) as e: + except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to delete Lambda function") module.exit_json(changed=changed) From ea31609650ddfd6298c11e87d12140bc7537c7f4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 30 Jul 2020 19:04:44 +0200 Subject: [PATCH 024/683] Add check_mode support and integration tests for aws_region_info (#139) * Add integration tests for aws_region_info * Add support for check_mode * aws_region_info: clarify "-" vs "_" precedence and make the implementation deterministic --- aws_region_info.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/aws_region_info.py b/aws_region_info.py index 09a548b54a7..d0b74e3f112 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -17,10 +17,12 @@ options: filters: description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See - U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for - possible filters. Filter names and values are case sensitive. You can also use underscores - instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters. + - Filter names and values are case sensitive. + - You can use underscores instead of dashes (-) in the filter keys. + - Filter keys with underscores will take precedence in case of conflict. default: {} type: dict extends_documentation_fragment: @@ -69,14 +71,18 @@ def main(): filters=dict(default={}, type='dict') ) - module = AnsibleAWSModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'aws_region_facts': module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items()) + sanitized_filters = dict(module.params.get('filters')) + for k in module.params.get('filters').keys(): + if "_" in k: + sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + del sanitized_filters[k] try: regions = connection.describe_regions( From d45213f23458e0154b0ddd5585923b2269de7ecb Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 1 Aug 2020 14:59:33 +0200 Subject: [PATCH 025/683] ec2_vpc_peer: Remove duplicate 'profile' parameter, it's automatically added by ec2_argument_spec (#171) --- ec2_vpc_peer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 2a08618a73f..99c8139b6a4 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -401,7 +401,6 @@ def main(): peering_id=dict(), peer_owner_id=dict(), tags=dict(required=False, type='dict'), - profile=dict(), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']) ) ) From 12ed339b1929bd8d37a61c33b5d1601de1ea6045 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 5 Aug 2020 11:08:43 +0200 Subject: [PATCH 026/683] iam_managed_policy: fix json in documentation (#178) --- iam_managed_policy.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 3e5f14a7ddb..3b1adece098 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -79,7 +79,12 @@ - name: Create IAM Managed Policy community.aws.iam_managed_policy: policy_name: "ManagedPolicy" - policy: "{{ lookup('file', 'managed_policy_update.json') }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Allow" + Action: "logs:CreateLogGroup" + Resource: "*" make_default: false state: present @@ -87,7 +92,15 @@ - name: Create IAM Managed Policy community.aws.iam_managed_policy: policy_name: "ManagedPolicy" - policy: "{ 'Version': '2012-10-17', 'Statement':[{'Effect': 'Allow','Action': '*','Resource': '*'}]}" + policy: | + { + "Version": "2012-10-17", + "Statement":[{ + "Effect": "Allow", + "Action": "logs:PutRetentionPolicy", + "Resource": "*" + }] + } only_version: true state: present From ae7325db46189b6e8eb3389578076c037b96e61c Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Thu, 6 Aug 2020 13:08:02 -0700 Subject: [PATCH 027/683] Fix VpcId dict key in ec2_eip and add tests (#181) * Fix VpcId dict key in ec2_eip and add tests We're not converting camel_to_snake in this module, however ensure_present has been using snake_cased keys for vpc_id. This prevents attaching an EIP to an instance. This appears to have been overlooked in the boto3 migration in ansible/ansible/pull/61575. Also adds tests to cover the `if is_instance` codepath in ensure_present. --- ec2_eip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_eip.py b/ec2_eip.py index 42909d8ff36..00130bf3f93 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -436,7 +436,7 @@ def ensure_present(ec2, module, domain, address, private_ip_address, device_id, if is_instance: instance = find_device(ec2, module, device_id) if reuse_existing_ip_allowed: - if instance.vpc_id and len(instance.vpc_id) > 0 and domain is None: + if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None: msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc" module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) From a76233698de2a402f6b4fd2e4d71190022c7a2e6 Mon Sep 17 00:00:00 2001 From: Matt Traynham Date: Tue, 11 Aug 2020 16:23:18 -0400 Subject: [PATCH 028/683] Add direct_connect_gateway_id to virtual_interface (#52) * Add direct_connect_gateway_id to virtual_interface This adds the direct_connect_gateway_id to aws_direct_connect_virtual_interface. This field is only applicable in private VIF cases (public=False) and is mutually exclusive to virtual_gateway_id. * Add tests for aws_direct_connect_virtual_interface * Addressing code review feedback for vif unit tests - Remove print statement * Correct vif tests --- aws_direct_connect_virtual_interface.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 7547b027f0c..ba8391a00a0 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -72,6 +72,14 @@ virtual_gateway_id: description: - The virtual gateway ID required for creating a private virtual interface. + - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required. + These options are mutually exclusive. + type: str + direct_connect_gateway_id: + description: + - The direct connect gateway ID for creating a private virtual interface. + - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required. + These options are mutually exclusive. type: str virtual_interface_id: description: @@ -190,6 +198,11 @@ returned: when I(public=False) type: str sample: vgw-f3ce259a +direct_connect_gateway_id: + description: The ID of the Direct Connect gateway. This only applies to private virtual interfaces. + returned: when I(public=False) + type: str + sample: f7593767-eded-44e8-926d-a2234175835d virtual_interface_id: description: The ID of the virtual interface. returned: always @@ -364,6 +377,7 @@ def assemble_params_for_creating_vi(params): family_addr = params['address_type'] cidr = params['cidr'] virtual_gateway_id = params['virtual_gateway_id'] + direct_connect_gateway_id = params['direct_connect_gateway_id'] parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn) opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr) @@ -376,7 +390,10 @@ def assemble_params_for_creating_vi(params): if public and cidr: parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr] if not public: - parameters['virtualGatewayId'] = virtual_gateway_id + if virtual_gateway_id: + parameters['virtualGatewayId'] = virtual_gateway_id + elif direct_connect_gateway_id: + parameters['directConnectGatewayId'] = direct_connect_gateway_id return parameters @@ -469,16 +486,17 @@ def main(): address_type=dict(), cidr=dict(type='list', elements='str'), virtual_gateway_id=dict(), + direct_connect_gateway_id=dict(), virtual_interface_id=dict() ) module = AnsibleAWSModule(argument_spec=argument_spec, required_one_of=[['virtual_interface_id', 'name']], required_if=[['state', 'present', ['public']], - ['public', False, ['virtual_gateway_id']], ['public', True, ['amazon_address']], ['public', True, ['customer_address']], - ['public', True, ['cidr']]]) + ['public', True, ['cidr']]], + mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']]) connection = module.client('directconnect') From a096cfb7ef295914254b6ced52a811dd85a52467 Mon Sep 17 00:00:00 2001 From: Matt Traynham Date: Tue, 11 Aug 2020 16:39:27 -0400 Subject: [PATCH 029/683] Adds module aws_direct_connect_confirm_connection (#53) * Adds module aws_direct_connect_confirm_connection DirectConnect connections that are created by a Hosted provider require approval by users. This module simply finds the DirectConnect connection and confirms it if it's in the 'ordering' state. * Adding unit tests * Correcting test cases * Correct linting issue * Switch to AWSRetry decorator to correct test cases --- aws_direct_connect_confirm_connection.py | 156 +++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 aws_direct_connect_confirm_connection.py diff --git a/aws_direct_connect_confirm_connection.py b/aws_direct_connect_confirm_connection.py new file mode 100644 index 00000000000..ba85f94eff3 --- /dev/null +++ b/aws_direct_connect_confirm_connection.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aws_direct_connect_confirm_connection +short_description: Confirms the creation of a hosted DirectConnect connection. +description: + - Confirms the creation of a hosted DirectConnect, which requires approval before it can be used. + - DirectConnect connections that require approval would be in the 'ordering'. + - After confirmation, they will move to the 'pending' state and finally the 'available' state. +author: "Matt Traynham (@mtraynham)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +requirements: + - boto3 + - botocore +options: + name: + description: + - The name of the Direct Connect connection. + - One of I(connection_id) or I(name) must be specified. + type: str + connection_id: + description: + - The ID of the Direct Connect connection. + - One of I(connection_id) or I(name) must be specified. + type: str +''' + +EXAMPLES = ''' + +# confirm a Direct Connect by name +- name: confirm the connection id + aws_direct_connect_confirm_connection: + name: my_host_direct_connect + +# confirm a Direct Connect by connection_id +- name: confirm the connection id + aws_direct_connect_confirm_connection: + connection_id: dxcon-xxxxxxxx +''' + +RETURN = ''' + +connection_state: + description: The state of the connection. + returned: always + type: str + sample: pending +''' + +import traceback +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) + +try: + from botocore.exceptions import BotoCoreError, ClientError +except Exception: + pass + # handled by imported AnsibleAWSModule + +retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} + + +@AWSRetry.backoff(**retry_params) +def describe_connections(client, params): + return client.describe_connections(**params) + + +def find_connection_id(client, connection_id=None, connection_name=None): + params = {} + if connection_id: + params['connectionId'] = connection_id + try: + response = describe_connections(client, params) + except (BotoCoreError, ClientError) as e: + if connection_id: + msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + else: + msg = "Failed to describe DirectConnect connections" + raise DirectConnectError(msg=msg, + last_traceback=traceback.format_exc(), + exception=e) + + match = [] + if len(response.get('connections', [])) == 1 and connection_id: + if response['connections'][0]['connectionState'] != 'deleted': + match.append(response['connections'][0]['connectionId']) + + for conn in response.get('connections', []): + if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': + match.append(conn['connectionId']) + + if len(match) == 1: + return match[0] + else: + raise DirectConnectError(msg="Could not find a valid DirectConnect connection") + + +def get_connection_state(client, connection_id): + try: + response = describe_connections(client, dict(connectionId=connection_id)) + return response['connections'][0]['connectionState'] + except (BotoCoreError, ClientError, IndexError) as e: + raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id), + last_traceback=traceback.format_exc(), + exception=e) + + +def main(): + argument_spec = dict( + connection_id=dict(), + name=dict() + ) + module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[['connection_id', 'name']], + required_one_of=[['connection_id', 'name']]) + client = module.client('directconnect') + + connection_id = module.params['connection_id'] + connection_name = module.params['name'] + + changed = False + connection_state = None + try: + connection_id = find_connection_id(client, + connection_id, + connection_name) + connection_state = get_connection_state(client, connection_id) + if connection_state == 'ordering': + client.confirm_connection(connectionId=connection_id) + changed = True + connection_state = get_connection_state(client, connection_id) + except DirectConnectError as e: + if e.last_traceback: + module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response)) + else: + module.fail_json(msg=e.msg) + + module.exit_json(changed=changed, connection_state=connection_state) + + +if __name__ == '__main__': + main() From ad7241b13cca57024d20317abcf46d7a59959926 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 12 Aug 2020 13:06:35 +0200 Subject: [PATCH 030/683] Bulk migration to AnsibleAWSModule (#173) * Update comments to reference AnsibleAWSModule rather than AnsibleModule * Bulk re-order imports and split onto one from import per-line. * Add AnsibleAWSModule imports * Migrate boto 2 based modules to AnsibleAWSModule * Move boto3-only modules over to AnsibleAWSModule * Remove extra ec2_argument_spec calls - not needed now we're using AnsibleAWSModule * Remove most HAS_BOTO3 code, it's handled by AnsibleAWSModule * Handle missing Boto 2 consistently (HAS_BOTO) * Remove AnsibleModule imports * Changelog fragment --- aws_api_gateway.py | 6 +- aws_direct_connect_gateway.py | 38 +++++----- aws_direct_connect_link_aggregation_group.py | 45 +++++------ aws_kms_info.py | 30 ++++---- aws_s3_bucket_info.py | 22 ++---- cloudfront_info.py | 23 +++--- cloudwatchlogs_log_group.py | 29 +++---- cloudwatchlogs_log_group_info.py | 27 +++---- data_pipeline.py | 40 +++++----- dynamodb_table.py | 33 ++++---- dynamodb_ttl.py | 24 +++--- ec2_ami_copy.py | 5 +- ec2_asg.py | 2 +- ec2_customer_gateway.py | 54 +++++-------- ec2_eip.py | 8 +- ec2_elb.py | 26 +++---- ec2_elb_info.py | 25 +++---- ec2_instance_info.py | 42 +++++------ ec2_lc.py | 79 +++++++++----------- ec2_lc_find.py | 11 ++- ec2_lc_info.py | 38 ++++------ ec2_scaling_policy.py | 33 ++++---- ec2_snapshot_copy.py | 26 +++---- ec2_vpc_egress_igw.py | 6 +- ec2_vpc_endpoint.py | 48 +++++------- ec2_vpc_endpoint_info.py | 33 +++----- ec2_vpc_igw_info.py | 29 +++---- ec2_vpc_nat_gateway.py | 46 +++++------- ec2_vpc_nat_gateway_info.py | 34 +++------ ec2_vpc_peer.py | 30 ++++---- ec2_vpc_peering_info.py | 34 +++------ ec2_vpc_route_table_info.py | 21 +++--- ec2_vpc_vgw.py | 23 +++--- ec2_vpc_vgw_info.py | 30 +++----- ec2_win_password.py | 10 +-- ecs_attribute.py | 23 +++--- ecs_cluster.py | 22 +++--- elasticache.py | 26 +++---- elasticache_parameter_group.py | 37 +++++---- elasticache_snapshot.py | 31 ++++---- elasticache_subnet_group.py | 14 ++-- elb_application_lb_info.py | 36 ++++----- elb_classic_lb.py | 22 +++--- elb_instance.py | 26 +++---- elb_target.py | 52 ++++++------- elb_target_group_info.py | 40 ++++------ execute_lambda.py | 24 +++--- iam.py | 21 +++--- iam_cert.py | 19 ++--- iam_managed_policy.py | 32 ++++---- iam_mfa_device_info.py | 26 ++----- iam_server_certificate_info.py | 18 ++--- kinesis_stream.py | 38 +++++----- lambda_alias.py | 37 ++++----- lambda_event.py | 39 ++++------ rds.py | 15 ++-- rds_param_group.py | 52 ++++++------- rds_subnet_group.py | 15 ++-- redshift.py | 8 +- redshift_subnet_group.py | 16 ++-- route53.py | 16 ++-- route53_health_check.py | 14 ++-- route53_info.py | 23 +++--- s3_logging.py | 24 +++--- s3_sync.py | 32 +++----- s3_website.py | 37 ++++----- sts_session_token.py | 24 +++--- 67 files changed, 788 insertions(+), 1081 deletions(-) diff --git a/aws_api_gateway.py b/aws_api_gateway.py index f67bae92808..8be32b12289 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -174,12 +174,12 @@ try: import botocore except ImportError: - # HAS_BOTOCORE taken care of in AnsibleAWSModule - pass + pass # Handled by AnsibleAWSModule import traceback from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def main(): diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index a7084faaeb7..2e0c3a0fa81 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -101,18 +101,16 @@ try: import botocore - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - ) + pass # Handled by AnsibleAWSModule + from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn + def dx_gateway_info(client, gateway_id, module): try: @@ -340,20 +338,18 @@ def ensure_absent(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']), - name=dict(), - amazon_asn=dict(), - virtual_gateway_id=dict(), - direct_connect_gateway_id=dict(), - wait_timeout=dict(type='int', default=320))) + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(), + amazon_asn=dict(), + virtual_gateway_id=dict(), + direct_connect_gateway_id=dict(), + wait_timeout=dict(type='int', default=320), + ) required_if = [('state', 'present', ['name', 'amazon_asn']), ('state', 'absent', ['direct_connect_gateway_id'])] - module = AnsibleModule(argument_spec=argument_spec, - required_if=required_if) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required for this module') + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=required_if) state = module.params.get('state') diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 28af9bc103c..aef1576c528 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -161,29 +161,24 @@ returned: when I(state=present) """ -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, -) -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import ( - DirectConnectError, - delete_connection, - delete_virtual_interface, - disassociate_connection_and_lag, -) import traceback import time try: import botocore except Exception: - pass - # handled by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info + +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag def lag_status(client, lag_id): @@ -408,8 +403,7 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), name=dict(), link_aggregation_group_id=dict(), @@ -422,14 +416,13 @@ def main(): force_delete=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=120), - )) - - module = AnsibleModule(argument_spec=argument_spec, - required_one_of=[('link_aggregation_group_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))]) + ) - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[('link_aggregation_group_id', 'name')], + required_if=[('state', 'present', ('location', 'bandwidth'))], + ) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: diff --git a/aws_kms_info.py b/aws_kms_info.py index defccf70342..eced250e158 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -215,17 +215,19 @@ ''' -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - import traceback try: import botocore except ImportError: - pass # caught by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict # Caching lookup for aliases _aliases = dict() @@ -399,22 +401,16 @@ def get_kms_info(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - filters=dict(type='dict'), - pending_deletion=dict(type='bool', default=False) - ) + argument_spec = dict( + filters=dict(type='dict'), + pending_deletion=dict(type='bool', default=False), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) if module._name == 'aws_kms_facts': module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 and botocore are required for this module') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 735bba97f74..ca47bf42db0 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -54,16 +54,14 @@ try: import botocore except ImportError: - pass # will be detected by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, - ec2_argument_spec, - HAS_BOTO3, - camel_dict_to_snake_dict, - get_aws_connection_info, - ) + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_bucket_list(module, connection): @@ -91,18 +89,14 @@ def main(): result = {} # Including ec2 argument spec - module = AnsibleModule(argument_spec=ec2_argument_spec(), supports_check_mode=True) + module = AnsibleAWSModule(argument_spec={}, supports_check_mode=True) is_old_facts = module._name == 'aws_s3_bucket_facts' if is_old_facts: module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') - # Verify Boto3 is used - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - # Set up connection - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=HAS_BOTO3) + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) diff --git a/cloudfront_info.py b/cloudfront_info.py index bd81ac53b54..f395ee801dc 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -262,16 +262,19 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict -from ansible.module_utils.basic import AnsibleModule from functools import partial import traceback try: import botocore except ImportError: - pass # will be caught by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class CloudFrontServiceManager: @@ -577,8 +580,7 @@ def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, ali def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( distribution_id=dict(required=False, type='str'), invalidation_id=dict(required=False, type='str'), origin_access_identity_id=dict(required=False, type='str'), @@ -596,18 +598,15 @@ def main(): list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'), list_invalidations=dict(required=False, default=False, type='bool'), list_streaming_distributions=dict(required=False, default=False, type='bool'), - summary=dict(required=False, default=False, type='bool') - )) + summary=dict(required=False, default=False, type='bool'), + ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) is_old_facts = module._name == 'cloudfront_facts' if is_old_facts: module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', " "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') - service_mgr = CloudFrontServiceManager(module) distribution_id = module.params.get('distribution_id') diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index cd1e94cfb53..93138c13773 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -129,19 +129,18 @@ ''' import traceback -from ansible.module_utils._text import to_native -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - camel_dict_to_snake_dict, - boto3_conn, - ec2_argument_spec, - get_aws_connection_info, - ) try: import botocore except ImportError: - pass # will be detected by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): @@ -237,8 +236,7 @@ def describe_log_group(client, log_group_name, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( log_group_name=dict(required=True, type='str'), state=dict(choices=['present', 'absent'], default='present'), @@ -246,14 +244,11 @@ def main(): tags=dict(required=False, type='dict'), retention=dict(required=False, type='int'), purge_retention_policy=dict(required=False, type='bool', default=False), - overwrite=dict(required=False, type='bool', default=False) - )) + overwrite=dict(required=False, type='bool', default=False), + ) mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] - module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') + module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index eae18b97c37..98fe63195b7 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -72,19 +72,18 @@ ''' import traceback -from ansible.module_utils._text import to_native -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - camel_dict_to_snake_dict, - boto3_conn, - ec2_argument_spec, - get_aws_connection_info, - ) try: import botocore except ImportError: - pass # will be detected by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def describe_log_group(client, log_group_name, module): @@ -104,19 +103,15 @@ def describe_log_group(client, log_group_name, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( log_group_name=dict(), - )) + ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'cloudwatchlogs_log_group_facts': module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/data_pipeline.py b/data_pipeline.py index 8394078fe68..34cf4df343c 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -205,14 +205,16 @@ try: import boto3 from botocore.exceptions import ClientError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict from ansible.module_utils._text import to_text +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] @@ -601,25 +603,19 @@ def create_pipeline(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - version=dict(removed_at_date='2022-06-01', removed_from_collection='community.aws'), - description=dict(required=False, default=''), - objects=dict(required=False, type='list', default=[], elements='dict'), - parameters=dict(required=False, type='list', default=[], elements='dict'), - timeout=dict(required=False, type='int', default=300), - state=dict(default='present', choices=['present', 'absent', - 'active', 'inactive']), - tags=dict(required=False, type='dict', default={}), - values=dict(required=False, type='list', default=[], elements='dict') - ) + argument_spec = dict( + name=dict(required=True), + version=dict(removed_at_date='2022-06-01', removed_from_collection='community.aws'), + description=dict(required=False, default=''), + objects=dict(required=False, type='list', default=[], elements='dict'), + parameters=dict(required=False, type='list', default=[], elements='dict'), + timeout=dict(required=False, type='int', default=300), + state=dict(default='present', choices=['present', 'absent', + 'active', 'inactive']), + tags=dict(required=False, type='dict', default={}), + values=dict(required=False, type='list', default=[], elements='dict'), ) - module = AnsibleModule(argument_spec, supports_check_mode=False) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required for the datapipeline module!') + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) diff --git a/dynamodb_table.py b/dynamodb_table.py index e6ae5b1af1f..47b8bc9f678 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -183,26 +183,24 @@ from boto.dynamodb2.types import STRING, NUMBER, BINARY from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError from boto.dynamodb2.exceptions import ValidationException - HAS_BOTO = True - DYNAMO_TYPE_MAP = { 'STRING': STRING, 'NUMBER': NUMBER, 'BINARY': BINARY } - -except ImportError: - HAS_BOTO = False - -try: + # Boto 2 is mandatory, Boto3 is only needed for tagging import botocore - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by ec2.HAS_BOTO and ec2.HAS_BOTO3 -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 DYNAMO_TYPE_DEFAULT = 'STRING' @@ -457,8 +455,7 @@ def get_indexes(all_indexes): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), hash_key_name=dict(type='str'), @@ -470,11 +467,13 @@ def main(): indexes=dict(default=[], type='list', elements='dict'), tags=dict(type='dict'), wait_for_active_timeout=dict(default=60, type='int'), - )) + ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True) + supports_check_mode=True, + check_boto3=False, + ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 330bf30f14e..52b5055db8b 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -71,15 +71,12 @@ try: import botocore except ImportError: - pass + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_current_ttl_state(c, table_name): @@ -123,19 +120,16 @@ def set_ttl_state(c, table_name, state, attribute_name): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(choices=['enable', 'disable']), table_name=dict(required=True), - attribute_name=dict(required=True)) + attribute_name=dict(required=True), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, ) - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - elif distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'): + if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'): # TTL was added in this version. module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24')) diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 638db1c3404..79ebf577394 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -134,7 +134,8 @@ ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible.module_utils._text import to_native try: @@ -147,7 +148,7 @@ def copy_image(module, ec2): """ Copies an AMI - module : AnsibleModule object + module : AnsibleAWSModule object ec2: ec2 connection object """ diff --git a/ec2_asg.py b/ec2_asg.py index f466a8664f4..568b0fca2ca 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -538,7 +538,7 @@ try: import botocore except ImportError: - pass # will be detected by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index bda1626640b..5c10f4655e4 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -111,23 +111,15 @@ try: from botocore.exceptions import ClientError - HAS_BOTOCORE = True -except ImportError: - HAS_BOTOCORE = False - -try: import boto3 - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, - AWSRetry, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class Ec2CustomerGatewayManager: @@ -199,29 +191,21 @@ def describe_gateways(self, ip_address): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - bgp_asn=dict(required=False, type='int'), - ip_address=dict(required=True), - name=dict(required=True), - routing=dict(default='dynamic', choices=['dynamic', 'static']), - state=dict(default='present', choices=['present', 'absent']), - ) + argument_spec = dict( + bgp_asn=dict(required=False, type='int'), + ip_address=dict(required=True), + name=dict(required=True), + routing=dict(default='dynamic', choices=['dynamic', 'static']), + state=dict(default='present', choices=['present', 'absent']), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[ - ('routing', 'dynamic', ['bgp_asn']) - ] - ) - - if not HAS_BOTOCORE: - module.fail_json(msg='botocore is required.') - - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ('routing', 'dynamic', ['bgp_asn']) + ] + ) gw_mgr = Ec2CustomerGatewayManager(module) diff --git a/ec2_eip.py b/ec2_eip.py index 00130bf3f93..6aa2a531069 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -222,8 +222,10 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): @@ -499,7 +501,7 @@ def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool def generate_tag_dict(module, tag_name, tag_value): - # type: (AnsibleModule, str, str) -> Optional[Dict] + # type: (AnsibleAWSModule, str, str) -> Optional[Dict] """ Generates a dictionary to be passed as a filter to Amazon """ if tag_name and not tag_value: if tag_name.startswith('tag:'): diff --git a/ec2_elb.py b/ec2_elb.py index 349002d5852..d9a6231f6b5 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -85,17 +85,14 @@ import boto.ec2.autoscale import boto.ec2.elb from boto.regioninfo import RegionInfo - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AnsibleAWSError, - HAS_BOTO, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class ElbManager: @@ -314,20 +311,19 @@ def _get_instance(self): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state={'required': True, 'choices': ['present', 'absent']}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'required': False, 'default': 0, 'type': 'int'} - ) + wait_timeout={'required': False, 'default': 0, 'type': 'int'}, ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, + check_boto3=False, ) if not HAS_BOTO: diff --git a/ec2_elb_info.py b/ec2_elb_info.py index c4b1bd67360..b18e502de34 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -74,21 +74,18 @@ import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, -) - try: import boto.ec2.elb from boto.ec2.tag import Tag from boto.exception import BotoServerError - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by ec2.HAS_BOTO + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO class ElbInformation(object): @@ -222,13 +219,11 @@ def list_elbs(self): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( names={'default': [], 'type': 'list', 'elements': 'str'} ) - ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) if module._name == 'ec2_elb_facts': module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", date='2021-12-01', collection_name='community.aws') diff --git a/ec2_instance_info.py b/ec2_instance_info.py index c9820a58f59..707df983c1b 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -496,18 +496,15 @@ try: import boto3 from botocore.exceptions import ClientError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_conn, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def list_ec2_instances(connection, module): @@ -538,26 +535,21 @@ def list_ec2_instances(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - instance_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') - ) + argument_spec = dict( + instance_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type='dict') ) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[ - ['instance_ids', 'filters'] - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['instance_ids', 'filters'] + ], + supports_check_mode=True, + ) if module._name == 'ec2_instance_facts': module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/ec2_lc.py b/ec2_lc.py index 813bfe04cb4..59d2ec4cd7e 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -449,22 +449,21 @@ import traceback -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, - ec2_argument_spec, - ec2_connect, - camel_dict_to_snake_dict, - get_ec2_security_group_ids_from_names, - boto3_conn, - snake_dict_to_camel_dict, - HAS_BOTO3, - ) -from ansible.module_utils._text import to_text -from ansible.module_utils.basic import AnsibleModule try: import botocore except ImportError: - pass + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_text + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict def create_block_device_meta(module, volume): @@ -650,42 +649,36 @@ def delete_launch_config(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - image_id=dict(), - instance_id=dict(), - key_name=dict(), - security_groups=dict(default=[], type='list', elements='str'), - user_data=dict(), - user_data_path=dict(type='path'), - kernel_id=dict(), - volumes=dict(type='list', elements='dict'), - instance_type=dict(), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='float'), - ramdisk_id=dict(), - instance_profile_name=dict(), - ebs_optimized=dict(default=False, type='bool'), - associate_public_ip_address=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), - instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool'), - classic_link_vpc_security_groups=dict(type='list', elements='str'), - classic_link_vpc_id=dict(), - vpc_id=dict(), - placement_tenancy=dict(choices=['default', 'dedicated']) - ) + argument_spec = dict( + name=dict(required=True), + image_id=dict(), + instance_id=dict(), + key_name=dict(), + security_groups=dict(default=[], type='list', elements='str'), + user_data=dict(), + user_data_path=dict(type='path'), + kernel_id=dict(), + volumes=dict(type='list', elements='dict'), + instance_type=dict(), + state=dict(default='present', choices=['present', 'absent']), + spot_price=dict(type='float'), + ramdisk_id=dict(), + instance_profile_name=dict(), + ebs_optimized=dict(default=False, type='bool'), + associate_public_ip_address=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), + instance_monitoring=dict(default=False, type='bool'), + assign_public_ip=dict(type='bool'), + classic_link_vpc_security_groups=dict(type='list', elements='str'), + classic_link_vpc_id=dict(), + vpc_id=dict(), + placement_tenancy=dict(choices=['default', 'dedicated']) ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['user_data', 'user_data_path']] + mutually_exclusive=[['user_data', 'user_data_path']], ) - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/ec2_lc_find.py b/ec2_lc_find.py index 700c84610c5..e2a31cef47c 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -137,8 +137,9 @@ ''' import re -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def find_launch_configs(client, module): @@ -191,15 +192,13 @@ def find_launch_configs(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( name_regex=dict(required=True), sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), limit=dict(required=False, type='int'), ) - ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, ) diff --git a/ec2_lc_info.py b/ec2_lc_info.py index d3e46cc298e..8ddc71083e9 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -154,17 +154,13 @@ try: import boto3 from botocore.exceptions import ClientError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def list_launch_configs(connection, module): @@ -203,26 +199,20 @@ def list_launch_configs(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=False, default=[], type='list', elements='str'), - sort=dict(required=False, default=None, - choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), - sort_order=dict(required=False, default='ascending', - choices=['ascending', 'descending']), - sort_start=dict(required=False, type='int'), - sort_end=dict(required=False, type='int'), - ) + argument_spec = dict( + name=dict(required=False, default=[], type='list', elements='str'), + sort=dict(required=False, default=None, + choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), + sort_order=dict(required=False, default='ascending', + choices=['ascending', 'descending']), + sort_start=dict(required=False, type='int'), + sort_end=dict(required=False, type='int'), ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec) if module._name == 'ec2_lc_facts': module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 141731199d9..0b3eca1c3a1 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -74,13 +74,11 @@ except ImportError: pass # Taken care of by ec2.HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AnsibleAWSError, - HAS_BOTO, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def create_scaling_policy(connection, module): @@ -156,20 +154,17 @@ def delete_scaling_policy(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), - asg_name=dict(required=True, type='str'), - scaling_adjustment=dict(type='int'), - min_adjustment_step=dict(type='int'), - cooldown=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - ) + argument_spec = dict( + name=dict(required=True, type='str'), + adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), + asg_name=dict(required=True, type='str'), + scaling_adjustment=dict(type='int'), + min_adjustment_step=dict(type='int'), + cooldown=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 65ee1c980f5..25101cbac17 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -110,23 +110,26 @@ ''' import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict) -from ansible.module_utils._text import to_native try: import boto3 from botocore.exceptions import ClientError, WaiterError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def copy_snapshot(module, ec2): """ Copies an EC2 Snapshot to another region - module : AnsibleModule object + module : AnsibleAWSModule object ec2: ec2 connection object """ @@ -168,8 +171,7 @@ def copy_snapshot(module, ec2): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( source_region=dict(required=True), source_snapshot_id=dict(required=True), description=dict(default=''), @@ -177,12 +179,10 @@ def main(): kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), - tags=dict(type='dict'))) - - module = AnsibleModule(argument_spec=argument_spec) + tags=dict(type='dict'), + ) - if not HAS_BOTO3: - module.fail_json(msg='botocore and boto3 are required.') + module = AnsibleAWSModule(argument_spec=argument_spec) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index b2f481b86dd..d462696d0af 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -71,7 +71,7 @@ def delete_eigw(module, conn, eigw_id): """ Delete EIGW. - module : AnsibleModule object + module : AnsibleAWSModule object conn : boto3 client connection object eigw_id : ID of the EIGW to delete """ @@ -99,7 +99,7 @@ def create_eigw(module, conn, vpc_id): """ Create EIGW. - module : AnsibleModule object + module : AnsibleAWSModule object conn : boto3 client connection object vpc_id : ID of the VPC we are operating on """ @@ -139,7 +139,7 @@ def describe_eigws(module, conn, vpc_id): """ Describe EIGWs. - module : AnsibleModule object + module : AnsibleAWSModule object conn : boto3 client connection object vpc_id : ID of the VPC we are operating on """ diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 833e64ae1db..e4e98fb4067 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -183,17 +183,15 @@ try: import botocore except ImportError: - pass # will be picked up by imported HAS_BOTO3 - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (get_aws_connection_info, - boto3_conn, - ec2_argument_spec, - HAS_BOTO3, - camel_dict_to_snake_dict, - ) + pass # Handled by AnsibleAWSModule + from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + def date_handler(obj): return obj.isoformat() if hasattr(obj, 'isoformat') else obj @@ -335,35 +333,29 @@ def setup_removal(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - vpc_id=dict(), - service=dict(), - policy=dict(type='json'), - policy_file=dict(type='path', aliases=['policy_path']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - route_table_ids=dict(type='list', elements='str'), - vpc_endpoint_id=dict(), - client_token=dict(), - ) + argument_spec = dict( + vpc_id=dict(), + service=dict(), + policy=dict(type='json'), + policy_file=dict(type='path', aliases=['policy_path']), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + route_table_ids=dict(type='list', elements='str'), + vpc_endpoint_id=dict(), + client_token=dict(), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['policy', 'policy_file']], required_if=[ ['state', 'present', ['vpc_id', 'service']], ['state', 'absent', ['vpc_endpoint_id']], - ] + ], ) # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='botocore and boto3 are required for this module') - state = module.params.get('state') try: diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index eeb7a7d80d1..a48b886a179 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -111,17 +111,14 @@ try: import botocore except ImportError: - pass # will be picked up from imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, - boto3_conn, - get_aws_connection_info, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - camel_dict_to_snake_dict, - AWSRetry, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def date_handler(obj): @@ -164,23 +161,17 @@ def get_endpoints(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - query=dict(choices=['services', 'endpoints'], required=True), - filters=dict(default={}, type='dict'), - vpc_endpoint_ids=dict(type='list', elements='str'), - ) + argument_spec = dict( + query=dict(choices=['services', 'endpoints'], required=True), + filters=dict(default={}, type='dict'), + vpc_endpoint_ids=dict(type='list', elements='str'), ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_endpoint_facts': module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='botocore and boto3 are required.') - try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 2d8244f1282..9cca904fa7e 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -91,16 +91,13 @@ try: import botocore except ImportError: - pass # will be captured by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list def get_internet_gateway_info(internet_gateway): @@ -128,22 +125,16 @@ def list_internet_gateways(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - filters=dict(type='dict', default=dict()), - internet_gateway_ids=dict(type='list', default=None, elements='str') - ) + argument_spec = dict( + filters=dict(type='dict', default=dict()), + internet_gateway_ids=dict(type='list', default=None, elements='str'), ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_igw_facts': module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", date='2021-12-01', collection_name='community.aws') # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='botocore and boto3 are required.') - try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 4272dc648c4..2216ffe2276 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -202,15 +202,12 @@ try: import botocore except ImportError: - pass # caught by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict DRY_RUN_GATEWAYS = [ @@ -933,35 +930,28 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - subnet_id=dict(type='str'), - eip_address=dict(type='str'), - allocation_id=dict(type='str'), - if_exist_do_not_create=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - release_eip=dict(type='bool', default=False), - nat_gateway_id=dict(type='str'), - client_token=dict(type='str'), - ) + argument_spec = dict( + subnet_id=dict(type='str'), + eip_address=dict(type='str'), + allocation_id=dict(type='str'), + if_exist_do_not_create=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + release_eip=dict(type='bool', default=False), + nat_gateway_id=dict(type='str'), + client_token=dict(type='str'), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['allocation_id', 'eip_address'] ], required_if=[['state', 'absent', ['nat_gateway_id']], - ['state', 'present', ['subnet_id']]] + ['state', 'present', ['subnet_id']]], ) - # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='botocore/boto3 is required.') - state = module.params.get('state').lower() check_mode = module.check_mode subnet_id = module.params.get('subnet_id') diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index f076d38a833..7f49c708857 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -81,17 +81,14 @@ try: import botocore except ImportError: - pass # will be detected by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def date_handler(obj): @@ -123,24 +120,17 @@ def get_nat_gateways(client, module, nat_gateway_id=None): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - filters=dict(default={}, type='dict'), - nat_gateway_ids=dict(default=[], type='list', elements='str'), - ) + argument_spec = dict( + filters=dict(default={}, type='dict'), + nat_gateway_ids=dict(default=[], type='list', elements='str'), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True,) if module._name == 'ec2_vpc_nat_gateway_facts': module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'", date='2021-12-01', collection_name='community.aws') - # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='botocore/boto3 is required.') - try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 99c8139b6a4..9b74a5f2c3f 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -219,13 +219,14 @@ try: import botocore except ImportError: - pass # caught by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule import distutils.version import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code @@ -392,17 +393,14 @@ def find_pcx_by_id(pcx_id, client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - vpc_id=dict(), - peer_vpc_id=dict(), - peer_region=dict(), - peering_id=dict(), - peer_owner_id=dict(), - tags=dict(required=False, type='dict'), - state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']) - ) + argument_spec = dict( + vpc_id=dict(), + peer_vpc_id=dict(), + peer_region=dict(), + peering_id=dict(), + peer_owner_id=dict(), + tags=dict(required=False, type='dict'), + state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), ) required_if = [ ('state', 'present', ['vpc_id', 'peer_vpc_id']), @@ -410,10 +408,8 @@ def main(): ('state', 'reject', ['peering_id']) ] - module = AnsibleModule(argument_spec=argument_spec, required_if=required_if) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) - if not HAS_BOTO3: - module.fail_json(msg='json, botocore and boto3 are required.') state = module.params.get('state') peering_id = module.params.get('peering_id') vpc_id = module.params.get('vpc_id') diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index cffcf6f9aed..acd5aed83e1 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -72,17 +72,14 @@ try: import botocore except ImportError: - pass # will be picked up by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, - ec2_argument_spec, - boto3_conn, - get_aws_connection_info, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - camel_dict_to_snake_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def date_handler(obj): @@ -103,23 +100,16 @@ def get_vpc_peers(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - filters=dict(default=dict(), type='dict'), - peer_connection_ids=dict(default=None, type='list', elements='str'), - ) + argument_spec = dict( + filters=dict(default=dict(), type='dict'), + peer_connection_ids=dict(default=None, type='list', elements='str'), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True,) if module._name == 'ec2_vpc_peering_facts': module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws') - # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='botocore and boto3 are required.') - try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) except NameError as e: diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index 8af3935bd36..1b8dc09c6c5 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -53,12 +53,14 @@ try: import boto.vpc from boto.exception import BotoServerError - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO def get_route_table_info(route_table): @@ -98,15 +100,12 @@ def list_ec2_vpc_route_tables(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - filters=dict(default=None, type='dict') - ) + argument_spec = dict( + filters=dict(default=None, type='dict'), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) if module._name == 'ec2_vpc_route_table_facts': module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", date='2021-12-01', collection_name='community.aws') diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 1ce3df5672e..b44d36c1142 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -117,15 +117,17 @@ try: import botocore import boto3 - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info, AWSRetry -from ansible.module_utils._text import to_native def get_vgw_info(vgws): @@ -539,8 +541,7 @@ def ensure_vgw_absent(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), name=dict(), vpn_gateway_id=dict(), @@ -550,12 +551,8 @@ def main(): type=dict(default='ipsec.1', choices=['ipsec.1']), tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), ) - ) - module = AnsibleModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['name']]]) - - if not HAS_BOTO3: - module.fail_json(msg='json and boto3 is required.') + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['name']]]) state = module.params.get('state').lower() diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 43a5c2f8c4e..4c8f0af1c28 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -94,16 +94,13 @@ try: import botocore except ImportError: - pass # will be captured by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list, - HAS_BOTO3, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list def get_virtual_gateway_info(virtual_gateway): @@ -134,22 +131,15 @@ def list_virtual_gateways(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - filters=dict(type='dict', default=dict()), - vpn_gateway_ids=dict(type='list', default=None, elements='str') - ) + argument_spec = dict( + filters=dict(type='dict', default=dict()), + vpn_gateway_ids=dict(type='list', default=None, elements='str') ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_vgw_facts': module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", date='2021-12-01', collection_name='community.aws') - # Validate Requirements - if not HAS_BOTO3: - module.fail_json(msg='json and boto3 is required.') - try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/ec2_win_password.py b/ec2_win_password.py index e42fa09e35a..8f46da5602d 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -111,14 +111,15 @@ except ImportError: HAS_CRYPTOGRAPHY = False -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect from ansible.module_utils._text import to_bytes +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect + def setup_module_object(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( instance_id=dict(required=True), key_file=dict(required=False, default=None, type='path'), key_passphrase=dict(no_log=True, default=None, required=False), @@ -126,7 +127,6 @@ def setup_module_object(): wait=dict(type='bool', default=False, required=False), wait_timeout=dict(default=120, required=False, type='int'), ) - ) module = AnsibleAWSModule(argument_spec=argument_spec) return module diff --git a/ecs_attribute.py b/ecs_attribute.py index db9de79b480..d96e81dd000 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -113,12 +113,12 @@ try: import boto3 from botocore.exceptions import ClientError, EndpointConnectionError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class EcsAttributes(object): @@ -254,21 +254,20 @@ def attrs_get_by_name(self, attrs): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=False, default='present', choices=['present', 'absent']), cluster=dict(required=True, type='str'), ec2_instance_id=dict(required=True, type='str'), attributes=dict(required=True, type='list', elements='dict'), - )) + ) required_together = [['cluster', 'ec2_instance_id', 'attributes']] - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_together=required_together) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=required_together, + ) cluster = module.params['cluster'] ec2_instance_id = module.params['ec2_instance_id'] diff --git a/ecs_cluster.py b/ecs_cluster.py index 12d453f6ae9..eb68b933ce3 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -107,12 +107,12 @@ try: import boto3 - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class EcsClusterManager: @@ -157,19 +157,19 @@ def delete_cluster(self, clusterName): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent', 'has_instances']), name=dict(required=True, type='str'), delay=dict(required=False, type='int', default=10), repeat=dict(required=False, type='int', default=10) - )) + ) required_together = [['state', 'name']] - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=required_together, + ) cluster_mgr = EcsClusterManager(module) try: diff --git a/elasticache.py b/elasticache.py index a1e0f88be89..b74dce611e2 100644 --- a/elasticache.py +++ b/elasticache.py @@ -128,19 +128,17 @@ """ from time import sleep from traceback import format_exc -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec, - get_aws_connection_info, - boto3_conn, - HAS_BOTO3, - camel_dict_to_snake_dict, - ) try: import boto3 import botocore except ImportError: - pass # will be detected by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class ElastiCacheManager(object): @@ -485,8 +483,7 @@ def _get_nodes_to_remove(self): def main(): """ elasticache ansible module """ - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent', 'rebooted']), name=dict(required=True), engine=dict(default='memcached'), @@ -501,16 +498,13 @@ def main(): security_group_ids=dict(default=[], type='list', elements='str'), zone=dict(), wait=dict(default=True, type='bool'), - hard_modify=dict(type='bool') - )) + hard_modify=dict(type='bool'), + ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, ) - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) name = module.params['name'] diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 95dacf52b23..b30a00bfacd 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -105,19 +105,22 @@ changed: true """ -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict -from ansible.module_utils._text import to_text -from ansible.module_utils.six import string_types import traceback try: import boto3 import botocore - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.six import string_types + +# import module snippets +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def create(module, conn, name, group_family, description): @@ -275,20 +278,14 @@ def get_info(conn, name): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']), - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - state=dict(required=True, choices=['present', 'absent', 'reset']), - values=dict(type='dict'), - ) + argument_spec = dict( + group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']), + name=dict(required=True, type='str'), + description=dict(default='', type='str'), + state=dict(required=True, choices=['present', 'absent', 'reset']), + values=dict(type='dict'), ) - module = AnsibleModule(argument_spec=argument_spec) - - if not HAS_BOTO3: - module.fail_json(msg='boto required for this module') + module = AnsibleAWSModule(argument_spec=argument_spec) parameter_group_family = module.params.get('group_family') parameter_group_name = module.params.get('name') diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 9f65d6081f0..373654d99fa 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -116,12 +116,13 @@ try: import boto3 import botocore - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def create(module, connection, replication_id, cluster_id, name): @@ -170,22 +171,16 @@ def delete(module, connection, name): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), - replication_id=dict(type='str'), - cluster_id=dict(type='str'), - target=dict(type='str'), - bucket=dict(type='str'), - ) + argument_spec = dict( + name=dict(required=True, type='str'), + state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), + replication_id=dict(type='str'), + cluster_id=dict(type='str'), + target=dict(type='str'), + bucket=dict(type='str'), ) - module = AnsibleModule(argument_spec=argument_spec) - - if not HAS_BOTO3: - module.fail_json(msg='boto required for this module') + module = AnsibleAWSModule(argument_spec=argument_spec) name = module.params.get('name') state = module.params.get('state') diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 29e7afa65d3..15cbd596843 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -61,24 +61,22 @@ import boto from boto.elasticache import connect_to_region from boto.exception import BotoServerError - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), description=dict(required=False), subnets=dict(required=False, type='list', elements='str'), ) - ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 796803d8c2f..ef02d59e54f 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -167,17 +167,14 @@ try: import boto3 from botocore.exceptions import ClientError, NoCredentialsError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_elb_listeners(connection, module, elb_arn): @@ -263,25 +260,20 @@ def list_load_balancers(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - load_balancer_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str') - ) + argument_spec = dict( + load_balancer_arns=dict(type='list', elements='str'), + names=dict(type='list', elements='str') ) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arns', 'names']], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['load_balancer_arns', 'names']], + supports_check_mode=True, + ) if module._name == 'elb_application_lb_facts': module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 9c5f1641677..52aee159373 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -372,15 +372,18 @@ import boto.vpc from boto.ec2.elb.healthcheck import HealthCheck from boto.ec2.tag import Tag - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO + def _throttleable_operation(max_retries): def _operation_wrapper(op): @@ -1223,8 +1226,7 @@ def _get_health_check_target(self): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state={'required': True, 'choices': ['present', 'absent']}, name={'required': True}, listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'}, @@ -1246,13 +1248,13 @@ def main(): access_logs={'default': None, 'required': False, 'type': 'dict'}, wait={'default': False, 'type': 'bool', 'required': False}, wait_timeout={'default': 60, 'type': 'int', 'required': False}, - tags={'default': None, 'required': False, 'type': 'dict'} - ) + tags={'default': None, 'required': False, 'type': 'dict'}, ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['security_group_ids', 'security_group_names']] + mutually_exclusive=[['security_group_ids', 'security_group_names']], + check_boto3=False, ) if not HAS_BOTO: diff --git a/elb_instance.py b/elb_instance.py index 187f6bee136..fe10d6cd8dc 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -91,17 +91,14 @@ import boto.ec2.autoscale import boto.ec2.elb from boto.regioninfo import RegionInfo - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AnsibleAWSError, - HAS_BOTO, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class ElbManager: @@ -316,20 +313,19 @@ def _get_instance(self): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state={'required': True, 'choices': ['present', 'absent']}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'required': False, 'default': 0, 'type': 'int'} - ) + wait_timeout={'required': False, 'default': 0, 'type': 'int'}, ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, + check_boto3=False, ) if not HAS_BOTO: diff --git a/elb_target.py b/elb_target.py index b47de9f457d..3e001eccdac 100644 --- a/elb_target.py +++ b/elb_target.py @@ -110,21 +110,20 @@ import traceback from time import time, sleep -from ansible.module_utils._text import to_native -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - AWSRetry, - ) try: import boto3 from botocore.exceptions import ClientError, BotoCoreError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) @@ -318,27 +317,22 @@ def target_status_check(connection, module, target_group_arn, target, target_sta def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - deregister_unused=dict(type='bool', default=False), - target_az=dict(type='str'), - target_group_arn=dict(type='str'), - target_group_name=dict(type='str'), - target_id=dict(type='str', required=True), - target_port=dict(type='int'), - target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'), - target_status_timeout=dict(type='int', default=60), - state=dict(required=True, choices=['present', 'absent'], type='str'), - ) + argument_spec = dict( + deregister_unused=dict(type='bool', default=False), + target_az=dict(type='str'), + target_group_arn=dict(type='str'), + target_group_name=dict(type='str'), + target_id=dict(type='str', required=True), + target_port=dict(type='int'), + target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'), + target_status_timeout=dict(type='int', default=60), + state=dict(required=True, choices=['present', 'absent'], type='str'), ) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[['target_group_arn', 'target_group_name']] - ) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['target_group_arn', 'target_group_name']], + ) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 7ab462c26a7..88f670f8e04 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -212,17 +212,14 @@ try: import boto3 from botocore.exceptions import ClientError, NoCredentialsError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_target_group_attributes(connection, module, target_group_arn): @@ -297,26 +294,21 @@ def list_target_groups(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - load_balancer_arn=dict(type='str'), - target_group_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str'), - collect_targets_health=dict(default=False, type='bool', required=False) - ) + argument_spec = dict( + load_balancer_arn=dict(type='str'), + target_group_arns=dict(type='list', elements='str'), + names=dict(type='list', elements='str'), + collect_targets_health=dict(default=False, type='bool', required=False), ) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], + supports_check_mode=True, + ) if module._name == 'elb_target_group_facts': module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: diff --git a/execute_lambda.py b/execute_lambda.py index 5e789e009ba..c1372dfaf22 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -133,18 +133,18 @@ try: import botocore - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info + def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( name=dict(), function_arn=dict(), wait=dict(default=True, type='bool'), @@ -152,8 +152,8 @@ def main(): dry_run=dict(default=False, type='bool'), version_qualifier=dict(), payload=dict(default={}, type='dict'), - )) - module = AnsibleModule( + ) + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ @@ -161,9 +161,6 @@ def main(): ] ) - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - name = module.params.get('name') function_arn = module.params.get('function_arn') await_return = module.params.get('wait') @@ -172,13 +169,10 @@ def main(): version_qualifier = module.params.get('version_qualifier') payload = module.params.get('payload') - if not HAS_BOTO3: - module.fail_json(msg='Python module "boto3" is missing, please install it') - if not (name or function_arn): module.fail_json(msg="Must provide either a function_arn or a name to invoke.") - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3) + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="The AWS region must be specified as an " "environment variable or in the AWS credentials " diff --git a/iam.py b/iam.py index b4c1bcb68d7..eee782c1b1f 100644 --- a/iam.py +++ b/iam.py @@ -184,13 +184,11 @@ except ImportError: pass # Taken care of by ec2.HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO, - boto_exception, - connect_to_aws, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def _paginate(func, attr): @@ -619,8 +617,7 @@ def delete_role(module, iam, name, role_list, prof_list): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( iam_type=dict(required=True, choices=['user', 'group', 'role']), groups=dict(type='list', default=None, required=False, elements='str'), state=dict(required=True, choices=['present', 'absent', 'update']), @@ -636,13 +633,13 @@ def main(): trust_policy=dict(type='dict', default=None, required=False), new_name=dict(default=None, required=False), path=dict(default='/', required=False), - new_path=dict(default=None, required=False) - ) + new_path=dict(default=None, required=False), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['trust_policy', 'trust_policy_filepath']], + check_boto3=False, ) if not HAS_BOTO: diff --git a/iam_cert.py b/iam_cert.py index 2aad121ea77..8ad5bb88b33 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -116,17 +116,19 @@ state: present ''' -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws import os try: import boto import boto.iam import boto.ec2 - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO def cert_meta(iam, name): @@ -239,8 +241,7 @@ def load_data(cert, key, cert_chain): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), cert=dict(), @@ -249,11 +250,10 @@ def main(): new_name=dict(), path=dict(default='/'), new_path=dict(), - dup_ok=dict(type='bool') - ) + dup_ok=dict(type='bool'), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ ['new_path', 'key'], @@ -263,6 +263,7 @@ def main(): ['new_name', 'cert'], ['new_name', 'cert_chain'], ], + check_boto3=False, ) if not HAS_BOTO: diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 3b1adece098..552b93b1b23 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -135,19 +135,17 @@ try: import botocore except ImportError: - pass # caught by imported HAS_BOTO3 - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (boto3_conn, - get_aws_connection_info, - ec2_argument_spec, - AWSRetry, - camel_dict_to_snake_dict, - HAS_BOTO3, - compare_policies, - ) + pass # Handled by AnsibleAWSModule + from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) def list_policies_with_backoff(iam): @@ -296,8 +294,7 @@ def detach_all_entities(module, iam, policy, **kwargs): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( policy_name=dict(required=True), policy_description=dict(default=''), policy=dict(type='json'), @@ -305,16 +302,13 @@ def main(): only_version=dict(type='bool', default=False), fail_on_delete=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), state=dict(default='present', choices=['present', 'absent']), - )) + ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['policy']]] + required_if=[['state', 'present', ['policy']]], ) - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required for this module') - name = module.params.get('policy_name') description = module.params.get('policy_description') state = module.params.get('state') diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index f8c37a91acf..727242751a6 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -63,17 +63,13 @@ try: import boto3 from botocore.exceptions import ClientError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def list_mfa_devices(connection, module): @@ -92,20 +88,14 @@ def list_mfa_devices(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - user_name=dict(required=False, default=None) - ) + argument_spec = dict( + user_name=dict(required=False, default=None), ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec) if module._name == 'iam_mfa_device_facts': module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index d57ef77ca86..9b41aa3bab8 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -82,12 +82,12 @@ try: import boto3 import botocore.exceptions - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_server_certs(iam, name=None): @@ -141,19 +141,15 @@ def get_server_certs(iam, name=None): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( name=dict(type='str'), - )) + ) - module = AnsibleModule(argument_spec=argument_spec,) + module = AnsibleAWSModule(argument_spec=argument_spec,) if module._name == 'iam_server_certificate_facts': module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) diff --git a/kinesis_stream.py b/kinesis_stream.py index c3142137c1b..c16adbea3c2 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -187,12 +187,14 @@ try: import botocore.exceptions except ImportError: - pass # Taken care of by ec2.HAS_BOTO3 + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info + def convert_to_lower(data): """Convert all uppercase keys in dict with lowercase_ @@ -1325,22 +1327,19 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - shards=dict(default=None, required=False, type='int'), - retention_period=dict(default=None, required=False, type='int'), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - wait=dict(default=True, required=False, type='bool'), - wait_timeout=dict(default=300, required=False, type='int'), - state=dict(default='present', choices=['present', 'absent']), - encryption_type=dict(required=False, choices=['NONE', 'KMS']), - key_id=dict(required=False, type='str'), - encryption_state=dict(required=False, choices=['enabled', 'disabled']), - ) + argument_spec = dict( + name=dict(required=True), + shards=dict(default=None, required=False, type='int'), + retention_period=dict(default=None, required=False, type='int'), + tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), + wait=dict(default=True, required=False, type='bool'), + wait_timeout=dict(default=300, required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + encryption_type=dict(required=False, choices=['NONE', 'KMS']), + key_id=dict(required=False, type='str'), + encryption_state=dict(required=False, choices=['enabled', 'disabled']), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) @@ -1363,9 +1362,6 @@ def main(): if retention_period < 24: module.fail_json(msg='Retention period can not be less than 24 hours.') - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') - check_mode = module.check_mode try: region, ec2_url, aws_connect_kwargs = ( diff --git a/lambda_alias.py b/lambda_alias.py index 75193221b5b..bd547a41341 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -146,17 +146,13 @@ try: import boto3 from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class AWSConnection: @@ -354,28 +350,21 @@ def main(): :return dict: ansible facts """ - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - function_name=dict(required=True), - name=dict(required=True, aliases=['alias_name']), - function_version=dict(type='int', required=False, default=0, aliases=['version']), - description=dict(required=False, default=None), - ) + argument_spec = dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + function_name=dict(required=True), + name=dict(required=True, aliases=['alias_name']), + function_version=dict(type='int', required=False, default=0, aliases=['version']), + description=dict(required=False, default=None), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], - required_together=[] + required_together=[], ) - # validate dependencies - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required for this module.') - aws = AWSConnection(module, ['lambda']) validate_params(module, aws) diff --git a/lambda_event.py b/lambda_event.py index 6dbbfb6590b..e0009d13582 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -120,17 +120,13 @@ try: import boto3 from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info # --------------------------------------------------------------------------------------------------- @@ -404,29 +400,22 @@ def main(): """Produce a list of function suffixes which handle lambda events.""" source_choices = ["stream", "sqs"] - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), - event_source=dict(required=False, default="stream", choices=source_choices), - source_params=dict(type='dict', required=True), - alias=dict(required=False, default=None), - version=dict(type='int', required=False, default=0), - ) + argument_spec = dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), + event_source=dict(required=False, default="stream", choices=source_choices), + source_params=dict(type='dict', required=True), + alias=dict(required=False, default=None), + version=dict(type='int', required=False, default=0), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['alias', 'version']], - required_together=[] + required_together=[], ) - # validate dependencies - if not HAS_BOTO3: - module.fail_json(msg='boto3 is required for this module.') - aws = AWSConnection(module, ['lambda']) validate_params(module, aws) diff --git a/rds.py b/rds.py index e259ef757e5..38e60662c05 100644 --- a/rds.py +++ b/rds.py @@ -532,9 +532,11 @@ except ImportError: HAS_RDS2 = False -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info DEFAULT_PORTS = { @@ -1311,8 +1313,7 @@ def validate_parameters(required_vars, valid_vars, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), instance_name=dict(required=False), source_instance=dict(required=False), @@ -1346,12 +1347,12 @@ def main(): tags=dict(type='dict', required=False), publicly_accessible=dict(required=False), character_set_name=dict(required=False), - force_failover=dict(type='bool', required=False, default=False) - ) + force_failover=dict(type='bool', required=False, default=False), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, + check_boto3=False, ) if not HAS_BOTO: diff --git a/rds_param_group.py b/rds_param_group.py index 4870d0657fb..92261e61f8b 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -113,20 +113,24 @@ returned: when state is present ''' -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, HAS_BOTO3, compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict -from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE -from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native - import traceback try: import botocore except ImportError: - pass # caught by imported HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict INT_MODIFIERS = { 'K': 1024, @@ -309,24 +313,20 @@ def ensure_absent(module, connection): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - state=dict(required=True, choices=['present', 'absent']), - name=dict(required=True), - engine=dict(), - description=dict(), - params=dict(aliases=['parameters'], type='dict'), - immediate=dict(type='bool', aliases=['apply_immediately']), - tags=dict(type='dict', default={}), - purge_tags=dict(type='bool', default=False) - ) + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + engine=dict(), + description=dict(), + params=dict(aliases=['parameters'], type='dict'), + immediate=dict(type='bool', aliases=['apply_immediately']), + tags=dict(type='dict', default={}), + purge_tags=dict(type='bool', default=False), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['description', 'engine']]], ) - module = AnsibleModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['description', 'engine']]]) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 and botocore are required for this module') # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) diff --git a/rds_subnet_group.py b/rds_subnet_group.py index 818b46bd6fe..daa35abd0ad 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -90,12 +90,13 @@ try: import boto.rds from boto.exception import BotoServerError - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_subnet_group_info(subnet_group): @@ -121,15 +122,13 @@ def create_result(changed, subnet_group=None): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), description=dict(required=False), subnets=dict(required=False, type='list', elements='str'), ) - ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/redshift.py b/redshift.py index 2b74c5f1fd0..9b6cd5bbbe3 100644 --- a/redshift.py +++ b/redshift.py @@ -258,8 +258,10 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def _collect_facts(resource): @@ -342,7 +344,7 @@ def create_cluster(module, redshift): """ Create a new cluster - module: AnsibleModule object + module: AnsibleAWSModule object redshift: authenticated redshift connection object Returns: diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 68eb42496c3..be42fa2d720 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -84,23 +84,23 @@ try: import boto import boto.redshift - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), group_name=dict(required=True, aliases=['name']), group_description=dict(required=False, aliases=['description']), group_subnets=dict(required=False, aliases=['subnets'], type='list', elements='str'), - )) - module = AnsibleModule(argument_spec=argument_spec) + ) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) if not HAS_BOTO: module.fail_json(msg='boto v2.9.0+ required for this module') diff --git a/route53.py b/route53.py index ad25e38ecd0..6b69363f2d0 100644 --- a/route53.py +++ b/route53.py @@ -372,12 +372,12 @@ from boto.route53 import Route53Connection from boto.route53.record import Record, ResourceRecordSets from boto.route53.status import Status - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO MINIMUM_BOTO_VERSION = '2.28.0' @@ -491,8 +491,7 @@ def to_dict(rset, zone_in, zone_id): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), zone=dict(type='str'), hosted_zone_id=dict(type='str'), @@ -514,9 +513,9 @@ def main(): vpc_id=dict(type='str'), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), - )) + ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['zone', 'hosted_zone_id']], @@ -537,6 +536,7 @@ def main(): region=('identifier',), weight=('identifier',), ), + check_boto3=False, ) if not HAS_BOTO: diff --git a/route53_health_check.py b/route53_health_check.py index 77fcf912e08..a1f9c9a268c 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -123,13 +123,13 @@ from boto import route53 from boto.route53 import Route53Connection, exception from boto.route53.healthcheck import HealthCheck - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO # import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO # Things that can't get changed: @@ -280,8 +280,7 @@ def update_health_check(conn, health_check_id, health_check_version, health_chec def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( state=dict(choices=['present', 'absent'], default='present'), ip_address=dict(), port=dict(type='int'), @@ -292,8 +291,7 @@ def main(): request_interval=dict(type='int', choices=[10, 30], default=30), failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3), ) - ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) if not HAS_BOTO: module.fail_json(msg='boto 2.27.0+ required for this module') diff --git a/route53_info.py b/route53_info.py index 3a937a40653..64caeaef23c 100644 --- a/route53_info.py +++ b/route53_info.py @@ -207,20 +207,18 @@ try: import boto import botocore - HAS_BOTO = True -except ImportError: - HAS_BOTO = False - -try: import boto3 - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by HAS_BOTO and HAS_BOTO3 -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + def get_hosted_zone(client, module): params = dict() @@ -416,8 +414,7 @@ def hosted_zone_details(client, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( query=dict(choices=[ 'change', 'checker_ip_range', @@ -454,14 +451,14 @@ def main(): 'tags', ], default='list'), ) - ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['hosted_zone_method', 'health_check_method'], ], + check_boto3=False, ) if module._name == 'route53_facts': module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", date='2021-12-01', collection_name='community.aws') diff --git a/s3_logging.py b/s3_logging.py index 1bb585acd55..7cc2f58d733 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -62,12 +62,13 @@ import boto.ec2 from boto.s3.connection import OrdinaryCallingFormat, Location from boto.exception import S3ResponseError - HAS_BOTO = True except ImportError: - HAS_BOTO = False + pass # Handled by HAS_BOTO -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO def compare_bucket_logging(bucket, target_bucket, target_prefix): @@ -130,17 +131,14 @@ def disable_bucket_logging(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - target_bucket=dict(required=False, default=None), - target_prefix=dict(required=False, default=""), - state=dict(required=False, default='present', choices=['present', 'absent']) - ) + argument_spec = dict( + name=dict(required=True), + target_bucket=dict(required=False, default=None), + target_prefix=dict(required=False, default=""), + state=dict(required=False, default='present', choices=['present', 'absent']), ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') diff --git a/s3_sync.py b/s3_sync.py index 3d6de33074b..12828c51958 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -229,17 +229,6 @@ import stat as osstat # os.stat constants import traceback -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ec2_argument_spec, - boto3_conn, - get_aws_connection_info, - HAS_BOTO3, - boto_exception, - ) -from ansible.module_utils._text import to_text - try: from dateutil import tz HAS_DATEUTIL = True @@ -249,8 +238,16 @@ try: import botocore except ImportError: - # Handled by imported HAS_BOTO3 - pass + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_text + +# import module snippets +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception # the following function, calculate_multipart_etag, is from tlastowka @@ -504,8 +501,7 @@ def remove_files(s3, sourcelist, params): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( + argument_spec = dict( mode=dict(choices=['push'], default='push'), file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'), bucket=dict(required=True), @@ -521,18 +517,14 @@ def main(): delete=dict(required=False, type='bool', default=False), # future options: encoding, metadata, storage_class, retries ) - ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, ) if not HAS_DATEUTIL: module.fail_json(msg='dateutil required for this module') - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') - result = {} mode = module.params['mode'] diff --git a/s3_website.py b/s3_website.py index f2196836537..a003331adaf 100644 --- a/s3_website.py +++ b/s3_website.py @@ -164,17 +164,13 @@ try: import boto3 from botocore.exceptions import ClientError, ParamValidationError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (HAS_BOTO3, - boto3_conn, - camel_dict_to_snake_dict, - ec2_argument_spec, - get_aws_connection_info, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def _create_redirect_dict(url): @@ -294,26 +290,21 @@ def disable_bucket_as_website(client_connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['present', 'absent']), - suffix=dict(type='str', required=False, default='index.html'), - error_key=dict(type='str', required=False), - redirect_all_requests=dict(type='str', required=False) - ) + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['present', 'absent']), + suffix=dict(type='str', required=False, default='index.html'), + error_key=dict(type='str', required=False), + redirect_all_requests=dict(type='str', required=False), ) - module = AnsibleModule( + module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ ['redirect_all_requests', 'suffix'], ['redirect_all_requests', 'error_key'] - ]) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 required for this module') + ], + ) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) diff --git a/sts_session_token.py b/sts_session_token.py index aa4792e94d8..48f05efe1a8 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -82,12 +82,12 @@ try: import boto3 from botocore.exceptions import ClientError - HAS_BOTO3 = True except ImportError: - HAS_BOTO3 = False + pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def normalize_credentials(credentials): @@ -128,19 +128,13 @@ def get_session_token(connection, module): def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - duration_seconds=dict(required=False, default=None, type='int'), - mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None) - ) + argument_spec = dict( + duration_seconds=dict(required=False, default=None, type='int'), + mfa_serial_number=dict(required=False, default=None), + mfa_token=dict(required=False, default=None), ) - module = AnsibleModule(argument_spec=argument_spec) - - if not HAS_BOTO3: - module.fail_json(msg='boto3 and botocore are required.') + module = AnsibleAWSModule(argument_spec=argument_spec) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region: From 86ee746447ae3f45fd69e835ab52d2b3354689a6 Mon Sep 17 00:00:00 2001 From: Jean-Marc Saad Date: Wed, 12 Aug 2020 23:16:48 +0300 Subject: [PATCH 031/683] [Docs] Updated rds_instance backup_retention_period parameter docs (#183) --- rds_instance.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/rds_instance.py b/rds_instance.py index 95781a48d54..e93322cc61b 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -99,8 +99,9 @@ type: str backup_retention_period: description: - - The number of days for which automated backups are retained (must be greater or equal to 1). - May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. + - The number of days for which automated backups are retained. + - When set to C(0), automated backups will be disabled. (Not applicable if the DB instance is a source to read replicas) + - May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. type: int ca_certificate_identifier: description: From 4961e3f6e6b9c0ed209f21d69ec854926aef19e0 Mon Sep 17 00:00:00 2001 From: flowerysong Date: Sat, 15 Aug 2020 07:02:41 -0400 Subject: [PATCH 032/683] ec2_instance: Fix spurious error message when we lose a race (#7) It is possible for all instances to stop matching the filters between the initial check for existing instances and the first call to find_instances() in change_instance_state(). If this happened, find_instances() would previously be called a second time with an empty list of instance IDs and no filters, which should not happen and immediately ends module execution with the error "No filters provided when they were required". --- ec2_instance.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ec2_instance.py b/ec2_instance.py index bbaa092bd5c..595cac73157 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -1556,7 +1556,9 @@ def change_instance_state(filters, desired_state, ec2=None): await_instances(ids=list(changed) + list(unchanged), state=desired_state) change_failed = list(to_change - changed) - instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances)) + + if instances: + instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances)) return changed, change_failed, instances, failure_reason From fac0583268676010b2366b67ffea203d3c040f43 Mon Sep 17 00:00:00 2001 From: Vincent Vinet Date: Sat, 15 Aug 2020 09:11:59 -0400 Subject: [PATCH 033/683] =?UTF-8?q?Python=203=20compatibility=20error=20ha?= =?UTF-8?q?ndling:=20use=20to=5Fnative(e)=20instead=20of=20str(e)=20or=20e?= =?UTF-8?q?.me=E2=80=A6=20(#26)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Py3 compat error handling: use to_native(e) instead of str(e) or e.message * PR comment changes, use fail_json_aws and is_boto3_error_code --- dynamodb_ttl.py | 7 +++---- ec2_customer_gateway.py | 2 +- ec2_instance_info.py | 2 +- ec2_lc_info.py | 2 +- ec2_vpc_endpoint.py | 38 +++++++++++++++++-------------------- ec2_vpc_endpoint_info.py | 3 ++- ec2_vpc_nat_gateway.py | 18 +++++++++++------- ec2_vpc_nat_gateway_info.py | 3 ++- ec2_vpc_peering_info.py | 5 +++-- ec2_vpc_route_table_info.py | 2 +- ecs_taskdefinition.py | 2 +- elasticache.py | 15 +++++---------- elasticache_subnet_group.py | 3 ++- elb_application_lb_info.py | 21 ++++++++++---------- elb_classic_lb.py | 6 +++--- elb_target_group_info.py | 19 ++++++++++--------- iam_mfa_device_info.py | 2 +- rds.py | 19 ++++++++++--------- s3_logging.py | 9 +++++---- s3_website.py | 29 ++++++++++++++-------------- 20 files changed, 104 insertions(+), 103 deletions(-) diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 52b5055db8b..ed2dc49fd4a 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -75,7 +75,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -157,11 +156,11 @@ def main(): result['current_status'] = current_state except botocore.exceptions.ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to get or update ttl state") except botocore.exceptions.ParamValidationError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed due to invalid parameters") except ValueError as e: - module.fail_json(msg=str(e)) + module.fail_json_aws(e, msg="Failed") module.exit_json(**result) diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 5c10f4655e4..bcaf9aca2f3 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -133,7 +133,7 @@ def __init__(self, module): module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except ClientError as e: - module.fail_json(msg=e.message) + module.fail_json_aws(e, msg="Failed to get connection") @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) def ensure_cgw_absent(self, gw_id): diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 707df983c1b..88a07d05f61 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -516,7 +516,7 @@ def list_ec2_instances(connection, module): reservations_paginator = connection.get_paginator('describe_instances') reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result() except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to list ec2 instances") # Get instances from reservations instances = [] diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 8ddc71083e9..1bed66e0f65 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -175,7 +175,7 @@ def list_launch_configs(connection, module): pg = connection.get_paginator('describe_launch_configurations') launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() except ClientError as e: - module.fail_json(msg=e.message) + module.fail_json_aws(e, msg="Failed to list launch configs") snaked_launch_configs = [] for launch_config in launch_configs['LaunchConfigurations']: diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index e4e98fb4067..3eaf2850e6e 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -186,8 +186,10 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -289,19 +291,15 @@ def create_vpc_endpoint(client, module): status_achieved, result = wait_for_status(client, module, result['vpc_endpoint_id'], 'available') if not status_achieved: module.fail_json(msg='Error waiting for vpc endpoint to become available - please check the AWS console') - except botocore.exceptions.ClientError as e: - if "DryRunOperation" in e.message: - changed = True - result = 'Would have created VPC Endpoint if not in check mode' - elif "IdempotentParameterMismatch" in e.message: - module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") - elif "RouteAlreadyExists" in e.message: - module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") - else: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('DryRunOperation'): + changed = True + result = 'Would have created VPC Endpoint if not in check mode' + except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except + module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") + except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except + module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), + module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) return changed, result @@ -319,15 +317,13 @@ def setup_removal(client, module): result = client.delete_vpc_endpoints(**params)['Unsuccessful'] if not module.check_mode and (result != []): module.fail_json(msg=result) - except botocore.exceptions.ClientError as e: - if "DryRunOperation" in e.message: - changed = True - result = 'Would have deleted VPC Endpoint if not in check mode' - else: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('DryRunOperation'): + changed = True + result = 'Would have deleted VPC Endpoint if not in check mode' + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Failed to delete VPC endpoint") except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), + module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) return changed, result @@ -362,7 +358,7 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) except NameError as e: # Getting around the get_aws_connection_info boto reliance for region - if "global name 'boto' is not defined" in e.message: + if "global name 'boto' is not defined" in to_native(e): module.params['region'] = botocore.session.get_session().get_config_variable('region') if not module.params['region']: module.fail_json(msg="Error - no region provided") diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index a48b886a179..f2b6da3adfa 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -113,6 +113,7 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -156,7 +157,7 @@ def get_endpoints(client, module): try: results = json.loads(json.dumps(results, default=date_handler)) except Exception as e: - module.fail_json(msg=str(e.message)) + module.fail_json_aws(e, msg="Failed to get endpoints") return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 2216ffe2276..37dd9160084 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -204,7 +204,9 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -698,13 +700,15 @@ def create(client, subnet_id, allocation_id, client_token=None, 'NAT gateway {0} created'.format(result['nat_gateway_id']) ) - except botocore.exceptions.ClientError as e: - if "IdempotentParameterMismatch" in e.message: - err_msg = ( - 'NAT Gateway does not support update and token has already been provided: ' + str(e) - ) - else: - err_msg = str(e) + except is_boto3_error_code('IdempotentParameterMismatch'): + err_msg = ( + 'NAT Gateway does not support update and token has already been provided: ' + err_msg + ) + success = False + changed = False + result = None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + err_msg = to_native(e) success = False changed = False result = None diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index 7f49c708857..9ebeb63fcbb 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -83,6 +83,7 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn @@ -105,7 +106,7 @@ def get_nat_gateways(client, module, nat_gateway_id=None): try: result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler)) except Exception as e: - module.fail_json(msg=str(e.message)) + module.fail_json(msg=to_native(e)) for gateway in result['NatGateways']: # Turn the boto3 result into ansible_friendly_snaked_names diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index acd5aed83e1..423a04962da 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -74,6 +74,7 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn @@ -94,7 +95,7 @@ def get_vpc_peers(client, module): try: result = json.loads(json.dumps(client.describe_vpc_peering_connections(**params), default=date_handler)) except Exception as e: - module.fail_json(msg=str(e.message)) + module.fail_json(msg=to_native(e)) return result['VpcPeeringConnections'] @@ -114,7 +115,7 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) except NameError as e: # Getting around the get_aws_connection_info boto reliance for region - if "global name 'boto' is not defined" in e.message: + if "global name 'boto' is not defined" in to_native(e): module.params['region'] = botocore.session.get_session().get_config_variable('region') if not module.params['region']: module.fail_json(msg="Error - no region provided") diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index 1b8dc09c6c5..9ff9959c271 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -91,7 +91,7 @@ def list_ec2_vpc_route_tables(connection, module): try: all_route_tables = connection.get_all_route_tables(filters=filters) except BotoServerError as e: - module.fail_json(msg=e.message) + module.fail_json_aws(e, msg="Failed to get route tables") for route_table in all_route_tables: route_table_dict_array.append(get_route_table_info(route_table)) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 98831a850e8..b7afe864ee8 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -273,7 +273,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, try: response = self.ecs.register_task_definition(**params) except botocore.exceptions.ClientError as e: - self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + self.module.fail_json_aws(e, msg="Failed to register task") return response['taskDefinition'] diff --git a/elasticache.py b/elasticache.py index b74dce611e2..ab2a9f18fdb 100644 --- a/elasticache.py +++ b/elasticache.py @@ -227,8 +227,7 @@ def create(self): self.conn.create_cache_cluster(**kwargs) except botocore.exceptions.ClientError as e: - self.module.fail_json(msg=e.message, exception=format_exc(), - **camel_dict_to_snake_dict(e.response)) + self.module.fail_json_aws(e, msg="Failed to create cache cluster") self._refresh_data() @@ -255,8 +254,7 @@ def delete(self): try: response = self.conn.delete_cache_cluster(CacheClusterId=self.name) except botocore.exceptions.ClientError as e: - self.module.fail_json(msg=e.message, exception=format_exc(), - **camel_dict_to_snake_dict(e.response)) + self.module.fail_json_aws(e, msg="Failed to delete cache cluster") cache_cluster_data = response['CacheCluster'] self._refresh_data(cache_cluster_data) @@ -306,8 +304,7 @@ def modify(self): ApplyImmediately=True, EngineVersion=self.cache_engine_version) except botocore.exceptions.ClientError as e: - self.module.fail_json(msg=e.message, exception=format_exc(), - **camel_dict_to_snake_dict(e.response)) + self.module.fail_json_aws(e, msg="Failed to modify cache cluster") self._refresh_data() @@ -335,8 +332,7 @@ def reboot(self): self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids) except botocore.exceptions.ClientError as e: - self.module.fail_json(msg=e.message, exception=format_exc(), - **camel_dict_to_snake_dict(e.response)) + self.module.fail_json_aws(e, msg="Failed to reboot cache cluster") self._refresh_data() @@ -455,8 +451,7 @@ def _refresh_data(self, cache_cluster_data=None): self.status = 'gone' return else: - self.module.fail_json(msg=e.message, exception=format_exc(), - **camel_dict_to_snake_dict(e.response)) + self.module.fail_json_aws(e, msg="Failed to describe cache clusters") cache_cluster_data = response['CacheClusters'][0] self.data = cache_cluster_data self.status = self.data['CacheClusterStatus'] diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 15cbd596843..ab25e294eeb 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -64,6 +64,7 @@ except ImportError: pass # Handled by HAS_BOTO +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -105,7 +106,7 @@ def main(): try: conn = connect_to_region(region_name=region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) try: changed = False diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index ef02d59e54f..1ed3d4c7cd2 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -170,7 +170,9 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -182,7 +184,7 @@ def get_elb_listeners(connection, module, elb_arn): try: return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners'] except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to describe elb listeners") def get_listener_rules(connection, module, listener_arn): @@ -190,7 +192,7 @@ def get_listener_rules(connection, module, listener_arn): try: return connection.describe_rules(ListenerArn=listener_arn)['Rules'] except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to describe listener rules") def get_load_balancer_attributes(connection, module, load_balancer_arn): @@ -198,7 +200,7 @@ def get_load_balancer_attributes(connection, module, load_balancer_arn): try: load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to describe load balancer attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley for k, v in list(load_balancer_attributes.items()): @@ -213,7 +215,7 @@ def get_load_balancer_tags(connection, module, load_balancer_arn): try: return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to describe load balancer tags") def list_load_balancers(connection, module): @@ -229,13 +231,12 @@ def list_load_balancers(connection, module): load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result() if names: load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result() - except ClientError as e: - if e.response['Error']['Code'] == 'LoadBalancerNotFound': - module.exit_json(load_balancers=[]) - else: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('LoadBalancerNotFound'): + module.exit_json(load_balancers=[]) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list load balancers") except NoCredentialsError as e: - module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc()) + module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc()) for load_balancer in load_balancers['LoadBalancers']: # Get the attributes for each elb diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 52aee159373..5d35fca3bc5 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -459,7 +459,7 @@ def __init__(self, module, name, listeners=None, purge_listeners=None, try: self.elb = self._get_elb() except boto.exception.BotoServerError as e: - module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc()) + module.fail_json(msg='unable to get all load balancers: %s' % to_native(e), exception=traceback.format_exc()) self.ec2_conn = self._get_ec2_connection() @@ -820,7 +820,7 @@ def _enable_zones(self, zones): try: self.elb.enable_zones(zones) except boto.exception.BotoServerError as e: - self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc()) + self.module.fail_json(msg='unable to enable zones: %s' % to_native(e), exception=traceback.format_exc()) self.changed = True @@ -828,7 +828,7 @@ def _disable_zones(self, zones): try: self.elb.disable_zones(zones) except boto.exception.BotoServerError as e: - self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc()) + self.module.fail_json(msg='unable to disable zones: %s' % to_native(e), exception=traceback.format_exc()) self.changed = True def _attach_subnets(self, subnets): diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 88f670f8e04..c444521d887 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -215,7 +215,9 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -227,7 +229,7 @@ def get_target_group_attributes(connection, module, target_group_arn): try: target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to describe target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley return dict((k.replace('.', '_'), v) @@ -239,7 +241,7 @@ def get_target_group_tags(connection, module, target_group_arn): try: return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to describe group tags") def get_target_group_targets_health(connection, module, target_group_arn): @@ -247,7 +249,7 @@ def get_target_group_targets_health(connection, module, target_group_arn): try: return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] except ClientError as e: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to get target health") def list_target_groups(connection, module): @@ -267,13 +269,12 @@ def list_target_groups(connection, module): target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result() if names: target_groups = target_group_paginator.paginate(Names=names).build_full_result() - except ClientError as e: - if e.response['Error']['Code'] == 'TargetGroupNotFound': - module.exit_json(target_groups=[]) - else: - module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('TargetGroupNotFound'): + module.exit_json(target_groups=[]) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list target groups") except NoCredentialsError as e: - module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc()) + module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc()) # Get the attributes and tags for each target group for target_group in target_groups['TargetGroups']: diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 727242751a6..c107c39f67b 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -82,7 +82,7 @@ def list_mfa_devices(connection, module): try: response = connection.list_mfa_devices(**args) except ClientError as e: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to list MFA devices") module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) diff --git a/rds.py b/rds.py index 38e60662c05..1321186497a 100644 --- a/rds.py +++ b/rds.py @@ -532,6 +532,7 @@ except ImportError: HAS_RDS2 = False +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO @@ -983,7 +984,7 @@ def create_db_instance(module, conn): module.params.get('username'), module.params.get('password'), **params) changed = True except RDSException as e: - module.fail_json(msg="Failed to create instance: %s" % e.message) + module.fail_json(msg="Failed to create instance: %s" % to_native(e)) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -1010,7 +1011,7 @@ def replicate_db_instance(module, conn): result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True except RDSException as e: - module.fail_json(msg="Failed to create replica instance: %s " % e.message) + module.fail_json(msg="Failed to create replica instance: %s " % to_native(e)) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -1049,7 +1050,7 @@ def delete_db_instance_or_snapshot(module, conn): else: result = conn.delete_db_snapshot(snapshot) except RDSException as e: - module.fail_json(msg="Failed to delete instance: %s" % e.message) + module.fail_json(msg="Failed to delete instance: %s" % to_native(e)) # If we're not waiting for a delete to complete then we're all done # so just return @@ -1062,7 +1063,7 @@ def delete_db_instance_or_snapshot(module, conn): if e.code == 'DBInstanceNotFound': module.exit_json(changed=True) else: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) except Exception as e: module.fail_json(msg=str(e)) @@ -1099,7 +1100,7 @@ def modify_db_instance(module, conn): try: result = conn.modify_db_instance(instance_name, **params) except RDSException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) if params.get('apply_immediately'): if new_instance_name: # Wait until the new instance name is valid @@ -1137,7 +1138,7 @@ def promote_db_instance(module, conn): result = conn.promote_read_replica(instance_name, **params) changed = True except RDSException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) else: changed = False @@ -1162,7 +1163,7 @@ def snapshot_db_instance(module, conn): result = conn.create_db_snapshot(snapshot, instance_name, **params) changed = True except RDSException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -1187,7 +1188,7 @@ def reboot_db_instance(module, conn): result = conn.reboot_db_instance(instance_name, **params) changed = True except RDSException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -1218,7 +1219,7 @@ def restore_db_instance(module, conn): result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) changed = True except RDSException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) diff --git a/s3_logging.py b/s3_logging.py index 7cc2f58d733..24f4004eec7 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -65,6 +65,7 @@ except ImportError: pass # Handled by HAS_BOTO +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -90,7 +91,7 @@ def enable_bucket_logging(connection, module): try: bucket = connection.get_bucket(bucket_name) except S3ResponseError as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) try: if not compare_bucket_logging(bucket, target_bucket, target_prefix): @@ -101,14 +102,14 @@ def enable_bucket_logging(connection, module): if e.status == 301: module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged") else: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) target_bucket_obj.set_as_logging_target() bucket.enable_logging(target_bucket, target_prefix) changed = True except S3ResponseError as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) module.exit_json(changed=changed) @@ -124,7 +125,7 @@ def disable_bucket_logging(connection, module): bucket.disable_logging() changed = True except S3ResponseError as e: - module.fail_json(msg=e.message) + module.fail_json(msg=to_native(e)) module.exit_json(changed=changed) diff --git a/s3_website.py b/s3_website.py index a003331adaf..a281f60f37e 100644 --- a/s3_website.py +++ b/s3_website.py @@ -168,6 +168,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -221,22 +222,21 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m try: bucket_website = resource_connection.BucketWebsite(bucket_name) except ClientError as e: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to get bucket") try: website_config = client_connection.get_bucket_website(Bucket=bucket_name) - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': - website_config = None - else: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('NoSuchWebsiteConfiguration'): + website_config = None + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get website configuration") if website_config is None: try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True except (ClientError, ParamValidationError) as e: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to set bucket website configuration") except ValueError as e: module.fail_json(msg=str(e)) else: @@ -249,13 +249,13 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True except (ClientError, ParamValidationError) as e: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to update bucket website configuration") except KeyError as e: try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True except (ClientError, ParamValidationError) as e: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + module.fail_json(e, msg="Failed to update bucket website configuration") except ValueError as e: module.fail_json(msg=str(e)) @@ -273,17 +273,16 @@ def disable_bucket_as_website(client_connection, module): try: client_connection.get_bucket_website(Bucket=bucket_name) - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': - module.exit_json(changed=changed) - else: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('NoSuchWebsiteConfiguration'): + module.exit_json(changed=changed) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket website") try: client_connection.delete_bucket_website(Bucket=bucket_name) changed = True except ClientError as e: - module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to delete bucket website") module.exit_json(changed=changed) From ea234e0e5f4d1f2d9c7110ad9f78534ab01a11cb Mon Sep 17 00:00:00 2001 From: Rob White Date: Sun, 16 Aug 2020 00:04:56 +1000 Subject: [PATCH 034/683] Update documentation for security_groups parameter (#61) Clarified documentation surrounding using security_groups for any VPC other than Default. --- ec2_launch_template.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 2f7b529146e..4553a8e794d 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -307,7 +307,9 @@ type: list elements: str security_groups: - description: A list of security group names (VPC or EC2-Classic) that the new instances will be added to. + description: > + A list of security group names (Default VPC or EC2-Classic) that the new instances will be added to. + For any VPC other than Default, you must use I(security_group_ids). type: list elements: str tags: From f85cd5287d93c24270240a8c5538b28047c40f77 Mon Sep 17 00:00:00 2001 From: Josh Date: Sun, 16 Aug 2020 09:41:00 -0400 Subject: [PATCH 035/683] Bugfix/ec2 instance mod sgs (#22) Fixes #54174 * Added SG handling for existing instances + some cleanup * tests(ec2_instance): Tests for SG modifications to existing instances * tests(ec2_instance): Test simultaneous state and SG changes * refactor(ec2_instance): Move security out of for loop * style(ec2_instance): Update fail message to reflect security groups * Add changelog Co-authored-by: Andrea Tartaglia Co-authored-by: Mark Chappell --- ec2_instance.py | 54 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/ec2_instance.py b/ec2_instance.py index 595cac73157..ddedd379573 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -808,9 +808,9 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils.six import text_type, string_types +from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib import parse as urlparse -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils._text import to_native import ansible_collections.amazon.aws.plugins.module_utils.ec2 as ec2_utils from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list, @@ -1337,15 +1337,47 @@ def value_wrapper(v): ] for mapping in param_mappings: - if params.get(mapping.param_key) is not None and mapping.instance_key not in skip: - value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_) - if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key): - arguments = dict( - InstanceId=instance['InstanceId'], - # Attribute=mapping.attribute_name, - ) - arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) - changes_to_apply.append(arguments) + if params.get(mapping.param_key) is None: + continue + if mapping.instance_key in skip: + continue + + value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_) + if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): + arguments = dict( + InstanceId=instance['InstanceId'], + # Attribute=mapping.attribute_name, + ) + arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) + changes_to_apply.append(arguments) + + if params.get('security_group') or params.get('security_groups'): + value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute="groupSet", InstanceId=id_) + # managing security groups + if params.get('vpc_subnet_id'): + subnet_id = params.get('vpc_subnet_id') + else: + default_vpc = get_default_vpc(ec2) + if default_vpc is None: + module.fail_json( + msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.") + else: + sub = get_default_subnet(ec2, default_vpc) + subnet_id = sub['SubnetId'] + + groups = discover_security_groups( + group=params.get('security_group'), + groups=params.get('security_groups'), + subnet_id=subnet_id, + ec2=ec2 + ) + expected_groups = [g['GroupId'] for g in groups] + instance_groups = [g['GroupId'] for g in value['Groups']] + if set(instance_groups) != set(expected_groups): + changes_to_apply.append(dict( + Groups=expected_groups, + InstanceId=instance['InstanceId'] + )) if (params.get('network') or {}).get('source_dest_check') is not None: # network.source_dest_check is nested, so needs to be treated separately From e7922c01863af4af28430206f7a87fab76cb53e4 Mon Sep 17 00:00:00 2001 From: ichekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Wed, 19 Aug 2020 13:37:52 -0400 Subject: [PATCH 036/683] Correctly handle a situation when a repository has no description (#195) * Correctly handle a situation when a repository has no description If a repository was created without a description, API call response will not include 'repositoryDescription' attribute: ``` $ aws codecommit get-repository --repository-name test { "repositoryMetadata": { "accountId": "123412341234", "repositoryId": "abcd1234-abcd-abcd-1234-abcd1234abc", "repositoryName": "test", "defaultBranch": "master", "lastModifiedDate": 1597770987.868, "creationDate": 1579544888.152, "cloneUrlHttp": "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/test", "cloneUrlSsh": "ssh://git-codecommit.us-east-1.amazonaws.com/v1/repos/test", "Arn": "arn:aws:codecommit:us-east-1:123412341234:test" } } ``` As a result, module execution fails with the following stacktrace: ``` Traceback (most recent call last): File \"/root/.ansible/tmp/ansible-tmp-1597769457.193254-7427-16306174619296/AnsiballZ_aws_codecommit.py\", line 102, in _ansiballz_main() File \"/root/.ansible/tmp/ansible-tmp-1597769457.193254-7427-16306174619296/AnsiballZ_aws_codecommit.py\", line 94, in _ansiballz_main invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS) File \"/root/.ansible/tmp/ansible-tmp-1597769457.193254-7427-16306174619296/AnsiballZ_aws_codecommit.py\", line 40, in invoke_module runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.aws_codecommit', init_globals=None, run_name='__main__', alter_sys=True) File \"/root/.pyenv/versions/3.8.1/lib/python3.8/runpy.py\", line 206, in run_module return _run_module_code(code, init_globals, run_name, mod_spec) File \"/root/.pyenv/versions/3.8.1/lib/python3.8/runpy.py\", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File \"/root/.pyenv/versions/3.8.1/lib/python3.8/runpy.py\", line 86, in _run_code exec(code, run_globals) File \"/tmp/ansible_community.aws.aws_codecommit_payload_0zfnkbv7/ansible_community.aws.aws_codecommit_payload.zip/ansible_collections/community/aws/plugins/modules/aws_codecommit.py\", line 245, in File \"/tmp/ansible_community.aws.aws_codecommit_payload_0zfnkbv7/ansible_community.aws.aws_codecommit_payload.zip/ansible_collections/community/aws/plugins/modules/aws_codecommit.py\", line 240, in main File \"/tmp/ansible_community.aws.aws_codecommit_payload_0zfnkbv7/ansible_community.aws.aws_codecommit_payload.zip/ansible_collections/community/aws/plugins/modules/aws_codecommit.py\", line 165, in process KeyError: 'repositoryDescription' ``` * Add integration tests Additional tests do the following: - Create a new repository with no description - Update a repository with no description - Delete a repository * Add change log fragment Co-authored-by: Ivan Chekaldin --- aws_codecommit.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws_codecommit.py b/aws_codecommit.py index 5fe907cc37d..18fc10a2d69 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -162,6 +162,8 @@ def process(self): result['changed'] = True else: metadata = self._get_repository()['repositoryMetadata'] + if not metadata.get('repositoryDescription'): + metadata['repositoryDescription'] = '' if metadata['repositoryDescription'] != self._module.params['description']: if not self._check_mode: self._update_repository() From de61e774c0e9b52598a154ee46c217d407c366c9 Mon Sep 17 00:00:00 2001 From: ichekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Mon, 24 Aug 2020 14:28:42 -0400 Subject: [PATCH 037/683] aws_kms_info - Gracefully Handle Keys That Don't Allow kms:GetKeyRotationStatus API Calls (#199) * Gracefully handle keys that don't allow kms:GetKeyRotationStatus API calls Some AWS KMS keys (e.g. aws/acm) do not allow permissions to call the API kms:GetKeyRotationStatus. As a result, module execution fails, even if the user execuing it has full admin privileges. Example: https://forums.aws.amazon.com/thread.jspa?threadID=312992 * change log fragment * Return None if key rotation status can't be determined Update documentation to reflect this use case. Use helper to track the exception. * Add integration tests --- aws_kms_info.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index eced250e158..d0c741eea0a 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -105,7 +105,7 @@ returned: always sample: false enable_key_rotation: - description: Whether the automatically key rotation every year is enabled. + description: Whether the automatically key rotation every year is enabled. Returns None if key rotation status can't be determined. type: bool returned: always sample: false @@ -223,6 +223,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -290,7 +291,11 @@ def get_key_policy_with_backoff(connection, key_id, policy_name): @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) def get_enable_key_rotation_with_backoff(connection, key_id): - current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + try: + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + except is_boto3_error_code('AccessDeniedException') as e: + return None + return current_rotation_status.get('KeyRotationEnabled') From 7f962fb8da8dc82527fffe65c1b6ed80d52cf618 Mon Sep 17 00:00:00 2001 From: Matt Clay Date: Tue, 25 Aug 2020 23:13:21 -0700 Subject: [PATCH 038/683] Fix false positive no_log warning in iam module. (#202) --- iam.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/iam.py b/iam.py index eee782c1b1f..823bfb89925 100644 --- a/iam.py +++ b/iam.py @@ -622,7 +622,8 @@ def main(): groups=dict(type='list', default=None, required=False, elements='str'), state=dict(required=True, choices=['present', 'absent', 'update']), password=dict(default=None, required=False, no_log=True), - update_password=dict(default='always', required=False, choices=['always', 'on_create']), + # setting no_log=False on update_password avoids a false positive warning about not setting no_log + update_password=dict(default='always', required=False, choices=['always', 'on_create'], no_log=False), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', 'Active', 'Inactive', 'Create', 'Remove']), From b1d667f38ba2843d38cfbbaa6ea2d1b14e699489 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 26 Aug 2020 11:35:32 +0200 Subject: [PATCH 039/683] Cleanup: Bulk Migration from boto3_conn to module.client() (#188) * Migrate from boto3_conn to module.client * Simplify error handling when creating connections * Simplify Region handling * Remove unused imports * Changelog --- aws_direct_connect_gateway.py | 8 +++---- aws_direct_connect_link_aggregation_group.py | 13 ++++------- aws_kms_info.py | 12 ++++------ aws_s3_bucket_info.py | 9 ++++---- cloudfront_info.py | 16 +++---------- cloudwatchlogs_log_group.py | 8 +++---- cloudwatchlogs_log_group_info.py | 8 +++---- data_pipeline.py | 14 ++++-------- dynamodb_table.py | 10 ++++---- dynamodb_ttl.py | 9 +++----- ec2_customer_gateway.py | 12 ++++------ ec2_instance_info.py | 13 ++++------- ec2_lc.py | 18 ++++++--------- ec2_lc_find.py | 13 +++++++---- ec2_lc_info.py | 13 ++++------- ec2_snapshot_copy.py | 10 ++++---- ec2_vpc_endpoint.py | 24 +++----------------- ec2_vpc_endpoint_info.py | 12 +++------- ec2_vpc_igw_info.py | 9 +++----- ec2_vpc_nat_gateway.py | 16 +++---------- ec2_vpc_nat_gateway_info.py | 12 +++------- ec2_vpc_peer.py | 11 ++++----- ec2_vpc_peering_info.py | 20 +++------------- ec2_vpc_vgw.py | 9 +++----- ec2_vpc_vgw_info.py | 9 +++----- ecs_attribute.py | 14 ++++-------- ecs_cluster.py | 12 ++++------ elasticache.py | 11 ++++----- elasticache_parameter_group.py | 14 ++++-------- elasticache_snapshot.py | 14 ++++-------- elb_application_lb_info.py | 13 ++++------- elb_target.py | 9 ++++---- elb_target_group_info.py | 13 ++++------- execute_lambda.py | 15 +++--------- iam_managed_policy.py | 11 +++------ iam_mfa_device_info.py | 12 ++++------ iam_server_certificate_info.py | 10 ++++---- kinesis_stream.py | 19 +++------------- rds_param_group.py | 14 +++--------- route53_info.py | 8 +++---- s3_sync.py | 10 ++++---- s3_website.py | 15 +++++------- sts_session_token.py | 12 ++++------ 43 files changed, 178 insertions(+), 356 deletions(-) diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index 2e0c3a0fa81..b34d6c52a15 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -108,8 +108,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn def dx_gateway_info(client, gateway_id, module): @@ -353,8 +351,10 @@ def main(): state = module.params.get('state') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + client = module.client('directconnect') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') if state == 'present': (changed, results) = ensure_present(client, module) diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index aef1576c528..fec3f3dfc99 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -171,9 +171,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection @@ -424,13 +422,10 @@ def main(): required_if=[('state', 'present', ('location', 'bandwidth'))], ) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.") - - connection = boto3_conn(module, conn_type='client', - resource='directconnect', region=region, - endpoint=ec2_url, **aws_connect_kwargs) + try: + connection = module.client('directconnect') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get('state') response = {} diff --git a/aws_kms_info.py b/aws_kms_info.py index d0c741eea0a..160ca5e13c7 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -224,8 +224,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @@ -416,12 +414,10 @@ def main(): if module._name == 'aws_kms_facts': module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - - if region: - connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('kms') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') all_keys = get_kms_info(connection, module) module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])]) diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index ca47bf42db0..cd8b81f36c9 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -59,9 +59,7 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_bucket_list(module, connection): @@ -96,9 +94,10 @@ def main(): "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') # Set up connection - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, - **aws_connect_params) + try: + connection = module.client('s3') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') # Gather results result['buckets'] = get_bucket_list(module, connection) diff --git a/cloudfront_info.py b/cloudfront_info.py index f395ee801dc..293cd2f0aa6 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -271,8 +271,6 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -284,17 +282,9 @@ def __init__(self, module): self.module = module try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - self.client = boto3_conn(module, conn_type='client', - resource='cloudfront', region=region, - endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoRegionError: - self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION " - "environment variable or in boto configuration file") - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Can't establish connection - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + self.client = module.client('cloudfront') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') def get_distribution(self, distribution_id): try: diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index 93138c13773..e8890988509 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -139,8 +139,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): @@ -250,8 +248,10 @@ def main(): mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + logs = module.client('logs') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get('state') changed = False diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index 98fe63195b7..153aac7baf0 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -82,8 +82,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def describe_log_group(client, log_group_name, module): @@ -112,8 +110,10 @@ def main(): module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + logs = module.client('logs') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], diff --git a/data_pipeline.py b/data_pipeline.py index 34cf4df343c..2e49dcc6aaa 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -204,6 +204,7 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -211,8 +212,6 @@ from ansible.module_utils._text import to_text from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -618,14 +617,9 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - client = boto3_conn(module, conn_type='client', - resource='datapipeline', region=region, - endpoint=ec2_url, **aws_connect_kwargs) - except ClientError as e: - module.fail_json(msg="Can't authorize connection - " + str(e)) + client = module.client('datapipeline') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get('state') if state == 'present': diff --git a/dynamodb_table.py b/dynamodb_table.py index 47b8bc9f678..35d9cd4d64a 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -195,7 +195,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -492,13 +491,12 @@ def main(): if module.params.get('tags'): try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs) + boto3_dynamodb = module.client('dynamodb') if not hasattr(boto3_dynamodb, 'tag_resource'): module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version') - boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc()) + boto3_sts = module.client('sts') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') else: boto3_dynamodb = None boto3_sts = None diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index ed2dc49fd4a..b23c0ab076e 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -74,8 +74,6 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_current_ttl_state(c, table_name): @@ -133,10 +131,9 @@ def main(): module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24')) try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - dbclient = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg=str(e)) + dbclient = module.client('dynamodb') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') result = {'changed': False} state = module.params['state'] diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index bcaf9aca2f3..1e9fc1ded47 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -112,14 +112,13 @@ try: from botocore.exceptions import ClientError import boto3 + import botocore except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class Ec2CustomerGatewayManager: @@ -128,12 +127,9 @@ def __init__(self, module): self.module = module try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except ClientError as e: - module.fail_json_aws(e, msg="Failed to get connection") + self.ec2 = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) def ensure_cgw_absent(self, gw_id): diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 88a07d05f61..1c4c1f0df33 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -495,16 +495,15 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def list_ec2_instances(connection, module): @@ -550,12 +549,10 @@ def main(): if module._name == 'ec2_instance_facts': module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - - if region: - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') list_ec2_instances(connection, module) diff --git a/ec2_lc.py b/ec2_lc.py index 59d2ec4cd7e..7555cf68a0c 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -458,11 +458,8 @@ from ansible.module_utils._text import to_text from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict @@ -516,11 +513,13 @@ def create_launch_config(connection, module): name = module.params.get('name') vpc_id = module.params.get('vpc_id') try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - ec2_connection = boto3_conn(module, 'client', 'ec2', region, ec2_url, **aws_connect_kwargs) + ec2_connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + try: security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to get Security Group IDs') except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) user_data = module.params.get('user_data') @@ -680,10 +679,7 @@ def main(): ) try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoRegionError: - module.fail_json(msg=("region must be specified as a parameter in AWS_DEFAULT_REGION environment variable or in boto configuration file")) + connection = module.client('autoscaling') except botocore.exceptions.ClientError as e: module.fail_json(msg="unable to establish connection - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) diff --git a/ec2_lc_find.py b/ec2_lc_find.py index e2a31cef47c..1db33a20036 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -137,9 +137,12 @@ ''' import re +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def find_launch_configs(client, module): @@ -202,9 +205,11 @@ def main(): argument_spec=argument_spec, ) - region, ec2_url, aws_connect_params = get_aws_connection_info(module, True) + try: + client = module.client('autoscaling') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') - client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params) find_launch_configs(client, module) diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 1bed66e0f65..1d680c37bc9 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -153,14 +153,13 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def list_launch_configs(connection, module): @@ -213,12 +212,10 @@ def main(): if module._name == 'ec2_lc_facts': module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - - if region: - connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('autoscaling') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') list_launch_configs(connection, module) diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 25101cbac17..85f44d60000 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -113,6 +113,7 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError, WaiterError except ImportError: pass # Handled by AnsibleAWSModule @@ -120,8 +121,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -184,9 +183,10 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - client = boto3_conn(module, conn_type='client', resource='ec2', - region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + client = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') copy_snapshot(module, client) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 3eaf2850e6e..771ea52ba75 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -190,8 +190,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -355,25 +353,9 @@ def main(): state = module.params.get('state') try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - except NameError as e: - # Getting around the get_aws_connection_info boto reliance for region - if "global name 'boto' is not defined" in to_native(e): - module.params['region'] = botocore.session.get_session().get_config_variable('region') - if not module.params['region']: - module.fail_json(msg="Error - no region provided") - else: - module.fail_json(msg="Can't retrieve connection information - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - - try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg="Failed to connect to AWS due to wrong or missing credentials: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + ec2 = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') # Ensure resource is present if state == 'present': diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index f2b6da3adfa..e72b487db3d 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -115,8 +115,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -174,13 +172,9 @@ def main(): # Validate Requirements try: - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - if region: - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg=str(e)) + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') invocations = { 'services': get_supported_services, diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 9cca904fa7e..4719d495fd8 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -94,8 +94,6 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list @@ -136,10 +134,9 @@ def main(): # Validate Requirements try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg="Can't authorize connection - " + str(e)) + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') # call your function here results = list_internet_gateways(connection, module) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 37dd9160084..9072a8e32b6 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -207,8 +207,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -969,17 +967,9 @@ def main(): if_exist_do_not_create = module.params.get('if_exist_do_not_create') try: - region, ec2_url, aws_connect_kwargs = ( - get_aws_connection_info(module, boto3=True) - ) - client = ( - boto3_conn( - module, conn_type='client', resource='ec2', - region=region, endpoint=ec2_url, **aws_connect_kwargs - ) - ) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) + client = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') changed = False err_msg = '' diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index 9ebeb63fcbb..97816c72362 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -85,8 +85,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @@ -133,13 +131,9 @@ def main(): date='2021-12-01', collection_name='community.aws') try: - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - if region: - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg=str(e)) + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') results = get_nat_gateways(connection, module) diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 9b74a5f2c3f..31f6ea203a7 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -225,8 +225,6 @@ import traceback from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code @@ -414,12 +412,11 @@ def main(): peering_id = module.params.get('peering_id') vpc_id = module.params.get('vpc_id') peer_vpc_id = module.params.get('peer_vpc_id') + try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - client = boto3_conn(module, conn_type='client', resource='ec2', - region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg="Can't authorize connection - " + str(e)) + client = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') if state == 'present': (changed, results) = create_peer_connection(client, module) diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 423a04962da..117992e76c6 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -77,8 +77,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -112,21 +110,9 @@ def main(): module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws') try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - except NameError as e: - # Getting around the get_aws_connection_info boto reliance for region - if "global name 'boto' is not defined" in to_native(e): - module.params['region'] = botocore.session.get_session().get_config_variable('region') - if not module.params['region']: - module.fail_json(msg="Error - no region provided") - else: - module.fail_json(msg="Can't retrieve connection information - " + str(e)) - - try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg=str(e)) + ec2 = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') # Turn the boto3 result in to ansible friendly_snaked_names results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)] diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index b44d36c1142..2f8702ecace 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -124,8 +124,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -557,10 +555,9 @@ def main(): state = module.params.get('state').lower() try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg="Can't authorize connection - %s" % to_native(e), exception=traceback.format_exc()) + client = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') if state == 'present': (changed, results) = ensure_vgw_present(client, module) diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 4c8f0af1c28..692c291a87b 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -97,8 +97,6 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list @@ -141,10 +139,9 @@ def main(): module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", date='2021-12-01', collection_name='community.aws') try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg="Can't authorize connection - " + str(e)) + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') # call your function here results = list_virtual_gateways(connection, module) diff --git a/ecs_attribute.py b/ecs_attribute.py index d96e81dd000..552747ba10c 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -112,13 +112,12 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError, EndpointConnectionError except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class EcsAttributes(object): @@ -188,13 +187,10 @@ def __init__(self, module, cluster, ec2_id): self.cluster = cluster self.ec2_id = ec2_id - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg=("Region must be specified as a parameter," - " in EC2_REGION or AWS_REGION environment" - " variables or in boto configuration file")) - self.ecs = boto3_conn(module, conn_type='client', resource='ecs', - region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + self.ecs = module.client('ecs') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') self.ecs_arn = self._get_ecs_arn() diff --git a/ecs_cluster.py b/ecs_cluster.py index eb68b933ce3..ed0dc1c78ff 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -107,12 +107,11 @@ try: import boto3 + import botocore except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info class EcsClusterManager: @@ -120,11 +119,10 @@ class EcsClusterManager: def __init__(self, module): self.module = module - - # self.ecs = boto3.client('ecs') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - self.ecs = boto3_conn(module, conn_type='client', resource='ecs', - region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + self.ecs = module.client('ecs') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'): for c in array_of_clusters: diff --git a/elasticache.py b/elasticache.py index ab2a9f18fdb..b8848f324af 100644 --- a/elasticache.py +++ b/elasticache.py @@ -137,7 +137,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -423,12 +422,10 @@ def _requires_destroy_and_create(self): def _get_elasticache_connection(self): """Get an elasticache connection""" - region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True) - if region: - return boto3_conn(self.module, conn_type='client', resource='elasticache', - region=region, endpoint=ec2_url, **aws_connect_params) - else: - self.module.fail_json(msg="region must be specified") + try: + return self.module.client('elasticache') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to connect to AWS') def _get_port(self): """Get the port. Where this information is retrieved from is engine dependent.""" diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index b30a00bfacd..1e9c574178d 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -118,8 +118,6 @@ # import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -293,14 +291,10 @@ def main(): state = module.params.get('state') values = module.params.get('values') - # Retrieve any AWS settings from the environment. - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.") - - connection = boto3_conn(module, conn_type='client', - resource='elasticache', region=region, - endpoint=ec2_url, **aws_connect_kwargs) + try: + connection = module.client('elasticache') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') exists = get_info(connection, parameter_group_name) diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 373654d99fa..dc92df6b3c2 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -120,8 +120,6 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -189,14 +187,10 @@ def main(): target = module.params.get('target') bucket = module.params.get('bucket') - # Retrieve any AWS settings from the environment. - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) - - connection = boto3_conn(module, conn_type='client', - resource='elasticache', region=region, - endpoint=ec2_url, **aws_connect_kwargs) + try: + connection = module.client('elasticache') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') changed = False response = {} diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 1ed3d4c7cd2..06e1f3ae229 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -166,6 +166,7 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError, NoCredentialsError except ImportError: pass # Handled by AnsibleAWSModule @@ -173,10 +174,8 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_elb_listeners(connection, module, elb_arn): @@ -275,12 +274,10 @@ def main(): module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - - if region: - connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('elbv2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') list_load_balancers(connection, module) diff --git a/elb_target.py b/elb_target.py index 3e001eccdac..06ff79e2653 100644 --- a/elb_target.py +++ b/elb_target.py @@ -113,6 +113,7 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # Handled by AnsibleAWSModule @@ -120,9 +121,7 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -334,8 +333,10 @@ def main(): mutually_exclusive=[['target_group_arn', 'target_group_name']], ) - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) + try: + connection = module.client('elbv2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get("state") diff --git a/elb_target_group_info.py b/elb_target_group_info.py index c444521d887..a9694428872 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -211,6 +211,7 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError, NoCredentialsError except ImportError: pass # Handled by AnsibleAWSModule @@ -218,10 +219,8 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_target_group_attributes(connection, module, target_group_arn): @@ -310,12 +309,10 @@ def main(): if module._name == 'elb_target_group_facts': module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - - if region: - connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('elbv2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') list_target_groups(connection, module) diff --git a/execute_lambda.py b/execute_lambda.py index c1372dfaf22..ca97f6619c9 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -139,8 +139,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def main(): @@ -172,17 +170,10 @@ def main(): if not (name or function_arn): module.fail_json(msg="Must provide either a function_arn or a name to invoke.") - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg="The AWS region must be specified as an " - "environment variable or in the AWS credentials " - "profile.") - try: - client = boto3_conn(module, conn_type='client', resource='lambda', - region=region, endpoint=ec2_url, **aws_connect_kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: - module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc()) + client = module.client('lambda') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') invoke_params = {} diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 552b93b1b23..a0b7c3c48af 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -140,8 +140,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies @@ -321,12 +319,9 @@ def main(): policy = json.dumps(json.loads(module.params.get('policy'))) try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - iam = boto3_conn(module, conn_type='client', resource='iam', - region=region, endpoint=ec2_url, **aws_connect_kwargs) - except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: - module.fail_json(msg="Can't authorize connection. Check your credentials and profile.", - exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + iam = module.client('iam') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') p = get_policy_by_name(module, iam, name) if state == 'present': diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index c107c39f67b..c79afab095f 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -62,14 +62,13 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def list_mfa_devices(connection, module): @@ -96,11 +95,10 @@ def main(): if module._name == 'iam_mfa_device_facts': module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if region: - connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('iam') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') list_mfa_devices(connection, module) diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 9b41aa3bab8..6e37185680f 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -81,13 +81,12 @@ try: import boto3 + import botocore import botocore.exceptions except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def get_server_certs(iam, name=None): @@ -151,10 +150,9 @@ def main(): date='2021-12-01', collection_name='community.aws') try: - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) + iam = module.client('iam') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') cert_name = module.params.get('name') results = get_server_certs(iam, cert_name) diff --git a/kinesis_stream.py b/kinesis_stream.py index c16adbea3c2..51ca85ddc94 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -192,8 +192,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def convert_to_lower(data): @@ -1364,20 +1362,9 @@ def main(): check_mode = module.check_mode try: - region, ec2_url, aws_connect_kwargs = ( - get_aws_connection_info(module, boto3=True) - ) - client = ( - boto3_conn( - module, conn_type='client', resource='kinesis', - region=region, endpoint=ec2_url, **aws_connect_kwargs - ) - ) - except botocore.exceptions.ClientError as e: - err_msg = 'Boto3 Client Error - {0}'.format(to_native(e.msg)) - module.fail_json( - success=False, changed=False, result={}, msg=err_msg - ) + client = module.client('kinesis') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') if state == 'present': success, changed, err_msg, results = ( diff --git a/rds_param_group.py b/rds_param_group.py index 92261e61f8b..536698473e1 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -125,8 +125,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list @@ -328,16 +326,10 @@ def main(): required_if=[['state', 'present', ['description', 'engine']]], ) - # Retrieve any AWS settings from the environment. - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - - if not region: - module.fail_json(msg="Region must be present") - try: - conn = boto3_conn(module, conn_type='client', resource='rds', region=region, endpoint=ec2_url, **aws_connect_kwargs) - except botocore.exceptions.NoCredentialsError as e: - module.fail_json(msg="Couldn't connect to AWS: %s" % str(e)) + conn = module.client('rds') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get('state') if state == 'present': diff --git a/route53_info.py b/route53_info.py index 64caeaef23c..38d0bc540f5 100644 --- a/route53_info.py +++ b/route53_info.py @@ -214,8 +214,6 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 @@ -467,8 +465,10 @@ def main(): if not (HAS_BOTO or HAS_BOTO3): module.fail_json(msg='json and boto/boto3 is required.') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + route53 = module.client('route53') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') invocations = { 'change': change_details, diff --git a/s3_sync.py b/s3_sync.py index 12828c51958..8909b3524f3 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -245,8 +245,6 @@ # import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception @@ -528,10 +526,10 @@ def main(): result = {} mode = module.params['mode'] - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg="Region must be specified") - s3 = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) + try: + s3 = module.client('s3') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') if mode == 'push': try: diff --git a/s3_website.py b/s3_website.py index a281f60f37e..6f7aa898391 100644 --- a/s3_website.py +++ b/s3_website.py @@ -163,15 +163,14 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError, ParamValidationError except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def _create_redirect_dict(url): @@ -305,13 +304,11 @@ def main(): ], ) - region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) - - if region: - client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) - resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) - else: - module.fail_json(msg="region must be specified") + try: + client_connection = module.client('s3') + resource_connection = module.resource('s3') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get("state") diff --git a/sts_session_token.py b/sts_session_token.py index 48f05efe1a8..4183b976d15 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -81,13 +81,12 @@ try: import boto3 + import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info def normalize_credentials(credentials): @@ -136,11 +135,10 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if region: - connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) - else: - module.fail_json(msg="region must be specified") + try: + connection = module.client('sts') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') get_session_token(connection, module) From ca9fe5660bf139db7f1f1a4eb98fcb4109875f78 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 28 Aug 2020 02:12:41 +0200 Subject: [PATCH 040/683] ec2_instance - Fix check_mode behaviour with tags (#189) * Add test for changing tags in check_mode * ec2_instance: Fix check_mode behaviour with tags * Add changelog fragment --- ec2_instance.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ec2_instance.py b/ec2_instance.py index ddedd379573..e87f64cdf29 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -894,6 +894,8 @@ def manage_tags(match, new_tags, purge_tags, ec2): old_tags, new_tags, purge_tags=purge_tags, ) + if module.check_mode: + return bool(tags_to_delete or tags_to_set) if tags_to_set: ec2.create_tags( Resources=[match['InstanceId']], From 52f2fa645990a30357a9b7c6231a8687e2a85c55 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 28 Aug 2020 02:12:49 +0200 Subject: [PATCH 041/683] aws_api_gateway : Switch to jittered backoff and retry on TooManyRequests (#161) * aws_api_gateway : Switch to jittered backoff and retry on TooManyRequests * Mark aws_api_gateway stable again... * Add changelog --- aws_api_gateway.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 8be32b12289..08f276b6303 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -315,25 +315,25 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): return configure_response, deploy_response -retry_params = {"tries": 10, "delay": 5, "backoff": 1.2} +retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']} -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def create_api(client, name=None, description=None, endpoint_type=None): return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]}) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def delete_api(client, api_id): return client.delete_rest_api(restApiId=api_id) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def configure_api(client, api_id, api_data=None, mode="overwrite"): return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def create_deployment(client, rest_api_id, **params): canary_settings = params.get('stage_canary_settings') From 6aa7c590c2866cf3e331d5582827a53350366b44 Mon Sep 17 00:00:00 2001 From: Pascal Morin Date: Thu, 10 Sep 2020 10:46:36 +0200 Subject: [PATCH 042/683] #223 Port rds_subnet_group to boto3 (#224) * Port rds_subnet_group to boto3 * Linting fixes * Add more meaningful error messages, add changelog fragment * Remove test on mandatory args for state absent --- rds_subnet_group.py | 185 +++++++++++++++++++++++++------------------- 1 file changed, 106 insertions(+), 79 deletions(-) diff --git a/rds_subnet_group.py b/rds_subnet_group.py index daa35abd0ad..bb0cc685a8a 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -1,8 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -66,10 +69,18 @@ type: complex contains: name: + description: The name of the DB subnet group (maintained for backward compatibility) + returned: I(state=present) + type: str + db_subnet_group_name: description: The name of the DB subnet group returned: I(state=present) type: str description: + description: The description of the DB subnet group (maintained for backward compatibility) + returned: I(state=present) + type: str + db_subnet_group_description: description: The description of the DB subnet group returned: I(state=present) type: str @@ -81,32 +92,32 @@ description: Contains a list of Subnet IDs returned: I(state=present) type: list + subnets: + description: Contains a list of Subnet elements (@see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups) # noqa + returned: I(state=present) + type: list status: + description: The status of the DB subnet group (maintained for backward compatibility) + returned: I(state=present) + type: str + subnet_group_status: description: The status of the DB subnet group returned: I(state=present) type: str + db_subnet_group_arn: + description: The ARN of the DB subnet group + returned: I(state=present) + type: str ''' -try: - import boto.rds - from boto.exception import BotoServerError -except ImportError: - pass # Handled by HAS_BOTO +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - -def get_subnet_group_info(subnet_group): - return dict( - name=subnet_group.name, - description=subnet_group.description, - vpc_id=subnet_group.vpc_id, - subnet_ids=subnet_group.subnet_ids, - status=subnet_group.status - ) +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule def create_result(changed, subnet_group=None): @@ -114,11 +125,34 @@ def create_result(changed, subnet_group=None): return dict( changed=changed ) - else: - return dict( - changed=changed, - subnet_group=get_subnet_group_info(subnet_group) - ) + result_subnet_group = dict(camel_dict_to_snake_dict(subnet_group)) + result_subnet_group['name'] = result_subnet_group.get( + 'db_subnet_group_name') + result_subnet_group['description'] = result_subnet_group.get( + 'db_subnet_group_description') + result_subnet_group['status'] = result_subnet_group.get( + 'subnet_group_status') + result_subnet_group['subnet_ids'] = create_subnet_list( + subnet_group.get('Subnets')) + return dict( + changed=changed, + subnet_group=result_subnet_group + ) + + +def create_subnet_list(subnets): + ''' + Construct a list of subnet ids from a list of subnets dicts returned by boto. + Parameters: + subnets (list): A list of subnets definitions. + @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups + Returns: + (list): List of subnet ids (str) + ''' + subnets_ids = [] + for subnet in subnets: + subnets_ids.append(subnet.get('SubnetIdentifier')) + return subnets_ids def main(): @@ -128,70 +162,63 @@ def main(): description=dict(required=False), subnets=dict(required=False, type='list', elements='str'), ) - module = AnsibleAWSModule(argument_spec=argument_spec) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - + required_if = [('state', 'present', ['description', 'subnets'])] + module = AnsibleAWSModule( + argument_spec=argument_spec, required_if=required_if) state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or {} - - if state == 'present': - for required in ['description', 'subnets']: - if not module.params.get(required): - module.fail_json(msg=str("Parameter %s required for state='present'" % required)) - else: - for not_allowed in ['description', 'subnets']: - if module.params.get(not_allowed): - module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed)) - - # Retrieve any AWS settings from the environment. - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - - if not region: - module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + group_subnets = module.params.get('subnets') or [] try: - conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs) - except BotoServerError as e: - module.fail_json(msg=e.error_message) + conn = module.client('rds') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to instantiate AWS connection') + # Default. + result = create_result(False) try: - exists = False - result = create_result(False) - + matching_groups = conn.describe_db_subnet_groups( + DBSubnetGroupName=group_name, MaxRecords=100).get('DBSubnetGroups') + except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + # No existing subnet, create it if needed, else we can just exit. + if state == 'present': + try: + new_group = conn.create_db_subnet_group( + DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets) + result = create_result(True, new_group.get('DBSubnetGroup')) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to create a new subnet group') + module.exit_json(**result) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, 'Failed to get subnet groups description') + # We have one or more subnets at this point. + if state == 'absent': try: - matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) - exists = len(matching_groups) > 0 - except BotoServerError as e: - if e.error_code != 'DBSubnetGroupNotFoundFault': - module.fail_json(msg=e.error_message) - - if state == 'absent': - if exists: - conn.delete_db_subnet_group(group_name) - result = create_result(True) - else: - if not exists: - new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) - result = create_result(True, new_group) - else: - # Sort the subnet groups before we compare them - matching_groups[0].subnet_ids.sort() - group_subnets.sort() - if (matching_groups[0].name != group_name or - matching_groups[0].description != group_description or - matching_groups[0].subnet_ids != group_subnets): - changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) - result = create_result(True, changed_group) - else: - result = create_result(False, matching_groups[0]) - except BotoServerError as e: - module.fail_json(msg=e.error_message) - - module.exit_json(**result) + conn.delete_db_subnet_group(DBSubnetGroupName=group_name) + result = create_result(True) + module.exit_json(**result) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to delete a subnet group') + + # Sort the subnet groups before we compare them + existing_subnets = create_subnet_list(matching_groups[0].get('Subnets')) + existing_subnets.sort() + group_subnets.sort() + # See if anything changed. + if (matching_groups[0].get('DBSubnetGroupName') == group_name and + matching_groups[0].get('DBSubnetGroupDescription') == group_description and + existing_subnets == group_subnets): + result = create_result(False, matching_groups[0]) + module.exit_json(**result) + # Modify existing group. + try: + changed_group = conn.modify_db_subnet_group( + DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets) + result = create_result(True, changed_group.get('DBSubnetGroup')) + module.exit_json(**result) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to update a subnet group') if __name__ == '__main__': From e1fafef6e08e2620aff18e30bf7e6d1522607602 Mon Sep 17 00:00:00 2001 From: Andreas Jonsson Date: Fri, 11 Sep 2020 07:32:25 -0700 Subject: [PATCH 043/683] cloudfront_distribution - added support for TLSv1.2_2019 as a minimum_protocol_versions (#226) Co-authored-by: Andreas Jonsson --- cloudfront_distribution.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 36e8e6bd1b8..5df16dc69db 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1579,7 +1579,8 @@ def __init__(self, module): 'TLSv1', 'TLSv1_2016', 'TLSv1.1_2016', - 'TLSv1.2_2018' + 'TLSv1.2_2018', + 'TLSv1.2_2019' ]) self.__valid_viewer_certificate_certificate_sources = set([ 'cloudfront', From 3d458c5f39e740f34e57f07fcecd313709b52e7d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 18 Sep 2020 23:08:45 +0200 Subject: [PATCH 044/683] ec2_scaling_policy - migrate to boto3 (#197) * ec2_scaling_policy - migrate to boto3 * Add changelog * docs linting * Add AWSRetry * Address review recommendations * Remove UI quirk work around --- ec2_scaling_policy.py | 383 ++++++++++++++++++++++++++++++++---------- 1 file changed, 291 insertions(+), 92 deletions(-) diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 0b3eca1c3a1..656519b43cb 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -6,178 +6,377 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: ec2_scaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups version_added: 1.0.0 description: - Can create or delete scaling policies for autoscaling groups. - Referenced autoscaling groups must already exist. -author: "Zacharie Eakin (@Zeekin)" +author: + - Zacharie Eakin (@zeekin) + - Will Thames (@willthames) options: state: + type: str description: - Register or deregister the policy. - default: present choices: ['present', 'absent'] - type: str + default: 'present' name: + type: str description: - Unique name for the scaling policy. required: true - type: str asg_name: + type: str description: - Name of the associated autoscaling group. - required: true - type: str + - Required if I(state) is C(present). adjustment_type: + type: str description: - The type of change in capacity of the autoscaling group. - choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity'] - type: str + - Required if I(state) is C(present). + choices: + - ChangeInCapacity + - ExactCapacity + - PercentChangeInCapacity scaling_adjustment: + type: int description: - The amount by which the autoscaling group is adjusted by the policy. - type: int + - A negative number has the effect of scaling down the ASG. + - Units are numbers of instances for C(ExactCapacity) or C(ChangeInCapacity) or percent + of existing instances for C(PercentChangeInCapacity). + - Required when I(policy_type) is C(SimpleScaling). min_adjustment_step: + type: int description: - Minimum amount of adjustment when policy is triggered. - type: int + - Only used when I(adjustment_type) is C(PercentChangeInCapacity). cooldown: + type: int description: - The minimum period of time (in seconds) between which autoscaling actions can take place. + - Only used when I(policy_type) is C(SimpleScaling). + policy_type: + type: str + description: + - Auto scaling adjustment policy. + choices: + - StepScaling + - SimpleScaling + default: SimpleScaling + metric_aggregation: + type: str + description: + - The aggregation type for the CloudWatch metrics. + - Only used when I(policy_type) is not C(SimpleScaling). + choices: + - Minimum + - Maximum + - Average + default: Average + step_adjustments: + type: list + description: + - list of dicts containing I(lower_bound), I(upper_bound) and I(scaling_adjustment) + - Intervals must not overlap or have a gap between them. + - At most, one item can have an undefined I(lower_bound). + If any item has a negative lower_bound, then there must be a step adjustment with an undefined I(lower_bound). + - At most, one item can have an undefined I(upper_bound). + If any item has a positive upper_bound, then there must be a step adjustment with an undefined I(upper_bound). + - The bounds are the amount over the alarm threshold at which the adjustment will trigger. + This means that for an alarm threshold of 50, triggering at 75 requires a lower bound of 25. + See U(http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_StepAdjustment.html). + elements: dict + suboptions: + lower_bound: + type: int + description: + - The lower bound for the difference between the alarm threshold and + the CloudWatch metric. + upper_bound: + type: int + description: + - The upper bound for the difference between the alarm threshold and + the CloudWatch metric. + scaling_adjustment: + type: int + description: + - The amount by which to scale. + required: true + estimated_instance_warmup: type: int + description: + - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 ''' - EXAMPLES = ''' -- community.aws.ec2_scaling_policy: +- name: Simple Scale Down policy + community.aws.ec2_scaling_policy: state: present region: US-XXX name: "scaledown-policy" adjustment_type: "ChangeInCapacity" - asg_name: "slave-pool" + asg_name: "application-asg" scaling_adjustment: -1 min_adjustment_step: 1 cooldown: 300 + +# For an alarm with a breach threshold of 20, the +# following creates a stepped policy: +# From 20-40 (0-20 above threshold), increase by 50% of existing capacity +# From 41-infinity, increase by 100% of existing capacity +- community.aws.ec2_scaling_policy: + state: present + region: US-XXX + name: "step-scale-up-policy" + policy_type: StepScaling + metric_aggregation: Maximum + step_adjustments: + - upper_bound: 20 + scaling_adjustment: 50 + - lower_bound: 20 + scaling_adjustment: 100 + adjustment_type: "PercentChangeInCapacity" + asg_name: "application-asg" +''' + +RETURN = ''' +adjustment_type: + description: Scaling policy adjustment type + returned: always + type: str + sample: PercentChangeInCapacity +alarms: + description: Cloudwatch alarms related to the policy + returned: always + type: complex + contains: + alarm_name: + description: name of the Cloudwatch alarm + returned: always + type: str + sample: cpu-very-high + alarm_arn: + description: ARN of the Cloudwatch alarm + returned: always + type: str + sample: arn:aws:cloudwatch:us-east-2:1234567890:alarm:cpu-very-high +arn: + description: ARN of the scaling policy. Provided for backward compatibility, value is the same as I(policy_arn) + returned: always + type: str + sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy +as_name: + description: Auto Scaling Group name. Provided for backward compatibility, value is the same as I(auto_scaling_group_name) + returned: always + type: str + sample: app-asg +auto_scaling_group_name: + description: Name of Auto Scaling Group + returned: always + type: str + sample: app-asg +metric_aggregation_type: + description: Method used to aggregate metrics + returned: when I(policy_type) is C(StepScaling) + type: str + sample: Maximum +name: + description: Name of the scaling policy. Provided for backward compatibility, value is the same as I(policy_name) + returned: always + type: str + sample: app-policy +policy_arn: + description: ARN of scaling policy. + returned: always + type: str + sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy +policy_name: + description: Name of scaling policy + returned: always + type: str + sample: app-policy +policy_type: + description: Type of auto scaling policy + returned: always + type: str + sample: StepScaling +scaling_adjustment: + description: Adjustment to make when alarm is triggered + returned: When I(policy_type) is C(SimpleScaling) + type: int + sample: 1 +step_adjustments: + description: List of step adjustments + returned: always + type: complex + contains: + metric_interval_lower_bound: + description: Lower bound for metric interval + returned: if step has a lower bound + type: float + sample: 20.0 + metric_interval_upper_bound: + description: Upper bound for metric interval + returned: if step has an upper bound + type: float + sample: 40.0 + scaling_adjustment: + description: Adjustment to make if this step is reached + returned: always + type: int + sample: 50 ''' try: - import boto.ec2.autoscale - import boto.exception - from boto.ec2.autoscale import ScalingPolicy - from boto.exception import BotoServerError + import botocore except ImportError: - pass # Taken care of by ec2.HAS_BOTO + pass # caught by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def create_scaling_policy(connection, module): - sp_name = module.params.get('name') - adjustment_type = module.params.get('adjustment_type') - asg_name = module.params.get('asg_name') - scaling_adjustment = module.params.get('scaling_adjustment') - min_adjustment_step = module.params.get('min_adjustment_step') - cooldown = module.params.get('cooldown') - - scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name]) - - if not scalingPolicies: - sp = ScalingPolicy( - name=sp_name, - adjustment_type=adjustment_type, - as_name=asg_name, - scaling_adjustment=scaling_adjustment, - min_adjustment_step=min_adjustment_step, - cooldown=cooldown) + changed = False + asg_name = module.params['asg_name'] + policy_type = module.params['policy_type'] + policy_name = module.params['name'] - try: - connection.create_scaling_policy(sp) - policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0] - module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, - cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError as e: - module.fail_json(msg=str(e)) - else: - policy = scalingPolicies[0] - changed = False + params = dict(PolicyName=policy_name, + PolicyType=policy_type, + AutoScalingGroupName=asg_name, + AdjustmentType=module.params['adjustment_type']) - # min_adjustment_step attribute is only relevant if the adjustment_type - # is set to percentage change in capacity, so it is a special case - if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity': - if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'): - changed = True + # min_adjustment_step attribute is only relevant if the adjustment_type + # is set to percentage change in capacity, so it is a special case + if module.params['adjustment_type'] == 'PercentChangeInCapacity': + if module.params['min_adjustment_step']: + params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step'] + + if policy_type == 'SimpleScaling': + # can't use required_if because it doesn't allow multiple criteria - + # it's only required if policy is SimpleScaling and state is present + if not module.params['scaling_adjustment']: + module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling ' + 'and state is present') + params['ScalingAdjustment'] = module.params['scaling_adjustment'] + if module.params['cooldown']: + params['Cooldown'] = module.params['cooldown'] + + if policy_type == 'StepScaling': + if not module.params['step_adjustments']: + module.fail_json(msg='step_adjustments is required when policy_type is StepScaling ' + 'and state is present') + params['StepAdjustments'] = [] + for step_adjustment in module.params['step_adjustments']: + step_adjust_params = dict(ScalingAdjustment=step_adjustment['scaling_adjustment']) + if step_adjustment.get('lower_bound'): + step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound'] + if step_adjustment.get('upper_bound'): + step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound'] + params['StepAdjustments'].append(step_adjust_params) + if module.params['metric_aggregation']: + params['MetricAggregationType'] = module.params['metric_aggregation'] + if module.params['estimated_instance_warmup']: + params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] - # set the min adjustment step in case the user decided to change their - # adjustment type to percentage - setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) + try: + policies = connection.describe_policies(aws_retry=True, + AutoScalingGroupName=asg_name, + PolicyNames=[policy_name])['ScalingPolicies'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) - # check the remaining attributes - for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'): - if getattr(policy, attr) != module.params.get(attr): + before = after = {} + if not policies: + changed = True + else: + policy = policies[0] + for key in params: + if params[key] != policy.get(key): changed = True - setattr(policy, attr, module.params.get(attr)) + before[key] = params[key] + after[key] = policy.get(key) + if changed: + try: + connection.put_scaling_policy(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create autoscaling policy") try: - if changed: - connection.create_scaling_policy(policy) - policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0] - module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, - cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError as e: - module.fail_json(msg=str(e)) + policies = connection.describe_policies(aws_retry=True, + AutoScalingGroupName=asg_name, + PolicyNames=[policy_name])['ScalingPolicies'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(msg="Failed to obtain autoscaling policy %s" % policy_name) + + policy = camel_dict_to_snake_dict(policies[0]) + # Backward compatible return values + policy['arn'] = policy['policy_arn'] + policy['as_name'] = policy['auto_scaling_group_name'] + policy['name'] = policy['policy_name'] + + if before and after: + module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy) + else: + module.exit_json(changed=changed, **policy) def delete_scaling_policy(connection, module): - sp_name = module.params.get('name') - asg_name = module.params.get('asg_name') + policy_name = module.params.get('name') - scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name]) + try: + policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) - if scalingPolicies: + if policy['ScalingPolicies']: try: - connection.delete_policy(sp_name, asg_name) + connection.delete_policy(aws_retry=True, + AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'], + PolicyName=policy_name) module.exit_json(changed=True) - except BotoServerError as e: - module.exit_json(changed=False, msg=str(e)) - else: - module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete autoscaling policy") + + module.exit_json(changed=False) def main(): + step_adjustment_spec = dict( + lower_bound=dict(type='int'), + upper_bound=dict(type='int'), + scaling_adjustment=dict(type='int', required=True)) + argument_spec = dict( - name=dict(required=True, type='str'), - adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), - asg_name=dict(required=True, type='str'), + name=dict(required=True), + adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), + asg_name=dict(), scaling_adjustment=dict(type='int'), min_adjustment_step=dict(type='int'), cooldown=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), + metric_aggregation=dict(default='Average', choices=['Minimum', 'Maximum', 'Average']), + policy_type=dict(default='SimpleScaling', choices=['SimpleScaling', 'StepScaling']), + step_adjustments=dict(type='list', options=step_adjustment_spec, elements='dict'), + estimated_instance_warmup=dict(type='int') ) - module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['state', 'present', ['asg_name', 'adjustment_type']]]) - region, ec2_url, aws_connect_params = get_aws_connection_info(module) + connection = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get('state') - - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - module.fail_json(msg=str(e)) - if state == 'present': create_scaling_policy(connection, module) elif state == 'absent': From 2d53cf76c34c129096204c7a95a42f4d8d6dd6ee Mon Sep 17 00:00:00 2001 From: Tom Benninger Date: Fri, 25 Sep 2020 08:50:48 -0400 Subject: [PATCH 045/683] Type conversion issue for creating read replicas (#229) * Type conversion issue for creating read replicas * Added changelog snippet * Updated tests --- rds_instance.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rds_instance.py b/rds_instance.py index e93322cc61b..ea2e8ba11b1 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -842,8 +842,10 @@ def get_parameters(client, module, parameters, method_name): if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': parameters.pop('ProcessorFeatures') - if method_name == 'create_db_instance' and parameters.get('Tags'): - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if method_name == 'create_db_instance' or method_name == 'create_db_instance_read_replica': + if parameters.get('Tags'): + parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if method_name == 'modify_db_instance': parameters = get_options_with_changing_values(client, module, parameters) From 472b20196c5826e5fa71e6a122cef2f76452581c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 8 Oct 2020 20:47:10 +0200 Subject: [PATCH 046/683] Initial tests for kinesis_stream (#42) * Initial tests for kinesis_stream * Add exceptions for broken check_mode behaviour (idemmpotency) * kinesis_stream requires shards to be defined, update the docs to match * Add tests for tagging and retention period * Yet more tests (and bugs) * Disable Kinesis KMS tests - not supported in CI right now * Apply minor changes from review Co-authored-by: Jill R <4121322+jillr@users.noreply.github.com> Co-authored-by: Jill R <4121322+jillr@users.noreply.github.com> --- kinesis_stream.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kinesis_stream.py b/kinesis_stream.py index 51ca85ddc94..4183444ebb3 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -128,6 +128,7 @@ community.aws.kinesis_stream: name: test-stream state: present + shards: 1 encryption_state: enabled encryption_type: KMS key_id: alias/aws/kinesis @@ -140,6 +141,7 @@ community.aws.kinesis_stream: name: test-stream state: present + shards: 1 encryption_state: disabled encryption_type: KMS key_id: alias/aws/kinesis From 65047be844d2c28c3ebe9bb7c4d0b8a860d7d90d Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Fri, 9 Oct 2020 03:02:36 +0200 Subject: [PATCH 047/683] support scan_on_push for ecs_ecr (#248) * #247 first draft to support scan_on_push * #247 use *aws_connection_info for integration test * #247 fix linting * #247 remove q * #247 use module_defaults for integrationtest * #247 remove last connection fragment * #247 set module_defaults on block * #247 handle check_mode when registry does not exist * #247 fix output, append test with checkmode and test result values * #247 add note about botocore version Co-authored-by: Markus Bergholz --- ecs_ecr.py | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index 533792877eb..8e344665a0e 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -58,12 +58,12 @@ type: str lifecycle_policy: description: - - JSON or dict that represents the new lifecycle policy + - JSON or dict that represents the new lifecycle policy. required: false type: json purge_lifecycle_policy: description: - - if yes, remove the lifecycle policy from the repository + - if yes, remove the lifecycle policy from the repository. required: false default: false type: bool @@ -74,6 +74,14 @@ choices: [present, absent] default: 'present' type: str + scan_on_push: + description: + - if yes, images are scanned for known vulnerabilities after being pushed to the repository. + - I(scan_on_push) requires botocore >= 1.13.3 + required: false + default: false + type: bool + version_added: 1.3.0 author: - David M. Lee (@leedm777) extends_documentation_fragment: @@ -132,6 +140,7 @@ - name: set-lifecycle-policy community.aws.ecs_ecr: name: needs-lifecycle-policy + scan_on_push: yes lifecycle_policy: rules: - rulePriority: 1 @@ -355,6 +364,25 @@ def purge_lifecycle_policy(self, registry_id, name): return policy return None + def put_image_scanning_configuration(self, registry_id, name, scan_on_push): + if not self.check_mode: + if registry_id: + scan = self.ecr.put_image_scanning_configuration( + registryId=registry_id, + repositoryName=name, + imageScanningConfiguration={'scanOnPush': scan_on_push} + ) + else: + scan = self.ecr.put_image_scanning_configuration( + repositoryName=name, + imageScanningConfiguration={'scanOnPush': scan_on_push} + ) + self.changed = True + return scan + else: + self.skipped = True + return None + def sort_lists_of_strings(policy): for statement_index in range(0, len(policy.get('Statement', []))): @@ -378,6 +406,7 @@ def run(ecr, params): image_tag_mutability = params['image_tag_mutability'].upper() lifecycle_policy_text = params['lifecycle_policy'] purge_lifecycle_policy = params['purge_lifecycle_policy'] + scan_on_push = params['scan_on_push'] # Parse policies, if they are given try: @@ -474,6 +503,13 @@ def run(ecr, params): result['policy'] = policy_text raise + original_scan_on_push = ecr.get_repository(registry_id, name) + if original_scan_on_push is not None: + if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']: + result['changed'] = True + result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push + response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) + elif state == 'absent': result['name'] = name if repo: @@ -510,7 +546,8 @@ def main(): purge_policy=dict(required=False, type='bool', aliases=['delete_policy'], deprecated_aliases=[dict(name='delete_policy', date='2022-06-01', collection_name='community.aws')]), lifecycle_policy=dict(required=False, type='json'), - purge_lifecycle_policy=dict(required=False, type='bool') + purge_lifecycle_policy=dict(required=False, type='bool'), + scan_on_push=(dict(required=False, type='bool', default=False)) ) mutually_exclusive = [ ['policy', 'purge_policy'], From 734e56c2dd3958dc5b3a48b568d844e0a46195a3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 19 Oct 2020 19:59:22 +0200 Subject: [PATCH 048/683] catch ImportError rather than Exception when testing Boto3 imports (#267) --- aws_direct_connect_confirm_connection.py | 5 ++--- aws_direct_connect_connection.py | 5 ++--- aws_direct_connect_link_aggregation_group.py | 2 +- aws_s3_cors.py | 2 +- ec2_transit_gateway.py | 5 ++--- ec2_transit_gateway_info.py | 5 ++--- lambda_policy.py | 2 +- rds_snapshot_info.py | 2 +- 8 files changed, 12 insertions(+), 16 deletions(-) diff --git a/aws_direct_connect_confirm_connection.py b/aws_direct_connect_confirm_connection.py index ba85f94eff3..948aa63c81c 100644 --- a/aws_direct_connect_confirm_connection.py +++ b/aws_direct_connect_confirm_connection.py @@ -67,9 +67,8 @@ try: from botocore.exceptions import BotoCoreError, ClientError -except Exception: - pass - # handled by imported AnsibleAWSModule +except ImportError: + pass # handled by imported AnsibleAWSModule retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 2fbda9124bb..9899b742fb4 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -166,9 +166,8 @@ try: from botocore.exceptions import BotoCoreError, ClientError -except Exception: - pass - # handled by imported AnsibleAWSModule +except ImportError: + pass # handled by imported AnsibleAWSModule retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index fec3f3dfc99..e3fae3ccf06 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -166,7 +166,7 @@ try: import botocore -except Exception: +except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule diff --git a/aws_s3_cors.py b/aws_s3_cors.py index 0577c955e91..820530dc08d 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -97,7 +97,7 @@ try: from botocore.exceptions import ClientError, BotoCoreError -except Exception: +except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 5a7ea4b248c..a0595b4b7e1 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -222,9 +222,8 @@ try: from botocore.exceptions import BotoCoreError, ClientError -except Exception: - pass - # handled by imported AnsibleAWSModule +except ImportError: + pass # handled by imported AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from time import sleep, time diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 676862185b4..2eacf01cd96 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -166,9 +166,8 @@ try: from botocore.exceptions import BotoCoreError, ClientError -except Exception: - pass - # handled by imported AnsibleAWSModule +except ImportError: + pass # handled by imported AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( diff --git a/lambda_policy.py b/lambda_policy.py index 2860e3a6540..2fb4b4ddead 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -139,7 +139,7 @@ try: from botocore.exceptions import ClientError -except Exception: +except ImportError: pass # caught by AnsibleAWSModule diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index 1d7003ae55c..63a5e47b09b 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -298,7 +298,7 @@ try: import botocore -except Exception: +except ImportError: pass # caught by AnsibleAWSModule From 346772e8e6fd7114cced33d8a718fc418d86df79 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 19 Oct 2020 21:51:34 +0200 Subject: [PATCH 049/683] Improve elb_target_group stability (#263) * Mark elb_target as stable so we run it a bit more. * Split imports for easier patches * Use module helper for botocore versioning * Add retry decorator to most AWS calls. This improves stability in a busy account. * When creating a new target group retry the describe_target_groups calls. This improves stability for eventually consistent calls. --- elb_target_group.py | 77 ++++++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 35 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index fe4b749a63a..e6c94f06286 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -380,17 +380,18 @@ pass # caught by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict, - compare_aws_tags, - ansible_dict_to_boto3_tag_list, - ) -from distutils.version import LooseVersion +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list def get_tg_attributes(connection, module, tg_arn): try: - tg_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=tg_arn)['Attributes']) + _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True) + tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group attributes") @@ -400,20 +401,24 @@ def get_tg_attributes(connection, module, tg_arn): def get_target_group_tags(connection, module, target_group_arn): try: - return connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'] + _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True) + return _tags['TagDescriptions'][0]['Tags'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group tags") -def get_target_group(connection, module): +def get_target_group(connection, module, retry_missing=False): + extra_codes = ['TargetGroupNotFound'] if retry_missing else [] try: - target_group_paginator = connection.get_paginator('describe_target_groups') - return (target_group_paginator.paginate(Names=[module.params.get("name")]).build_full_result())['TargetGroups'][0] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if e.response['Error']['Code'] == 'TargetGroupNotFound': - return None - else: - module.fail_json_aws(e, msg="Couldn't get target group") + target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")]) + jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes) + result = jittered_retry(target_group_paginator.build_full_result)() + except is_boto3_error_code('TargetGroupNotFound'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get target group") + + return result['TargetGroups'][0] def wait_for_status(connection, module, target_group_arn, targets, status): @@ -423,7 +428,7 @@ def wait_for_status(connection, module, target_group_arn, targets, status): for x in range(0, max_retries): try: - response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets) + response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True) if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status: status_achieved = True break @@ -437,7 +442,7 @@ def wait_for_status(connection, module, target_group_arn, targets, status): def fail_if_ip_target_type_not_supported(module): - if LooseVersion(botocore.__version__) < LooseVersion('1.7.2'): + if not module.botocore_at_least('1.7.2'): module.fail_json(msg="target_type ip requires botocore version 1.7.2 or later. Version %s is installed" % botocore.__version__) @@ -559,7 +564,7 @@ def create_or_update_target_group(connection, module): try: if health_check_params: - connection.modify_target_group(TargetGroupArn=tg['TargetGroupArn'], **health_check_params) + connection.modify_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True, **health_check_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update target group") @@ -570,7 +575,7 @@ def create_or_update_target_group(connection, module): # describe_target_health seems to be the only way to get them try: current_targets = connection.describe_target_health( - TargetGroupArn=tg['TargetGroupArn']) + TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group health") @@ -602,7 +607,7 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_add) + connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_add, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") @@ -621,7 +626,7 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove) + connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") @@ -651,7 +656,8 @@ def create_or_update_target_group(connection, module): { "Id": target['Id'] } - ] + ], + aws_retry=True ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -669,7 +675,7 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove) + connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") @@ -687,22 +693,22 @@ def create_or_update_target_group(connection, module): target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] if changed: connection.deregister_targets( - TargetGroupArn=tg['TargetGroupArn'], Targets=[{"Id": target_to_remove}]) + TargetGroupArn=tg['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True) else: try: - connection.create_target_group(**params) + connection.create_target_group(aws_retry=True, **params) changed = True new_target_group = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create target group") - tg = get_target_group(connection, module) + tg = get_target_group(connection, module, retry_missing=True) if module.params.get("targets"): if target_type != "lambda": params['Targets'] = module.params.get("targets") try: - connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=params['Targets']) + connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=params['Targets'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") @@ -720,7 +726,8 @@ def create_or_update_target_group(connection, module): { "Id": target["Id"] } - ] + ], + aws_retry=True ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -748,12 +755,12 @@ def create_or_update_target_group(connection, module): if update_attributes: try: - connection.modify_target_group_attributes(TargetGroupArn=tg['TargetGroupArn'], Attributes=update_attributes) + connection.modify_target_group_attributes(TargetGroupArn=tg['TargetGroupArn'], Attributes=update_attributes, aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state if new_target_group: - connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn']) + connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) module.fail_json_aws(e, msg="Couldn't delete target group") # Tags - only need to play with tags if tags parameter has been set to something @@ -765,7 +772,7 @@ def create_or_update_target_group(connection, module): tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags) if tags_to_delete: try: - connection.remove_tags(ResourceArns=[tg['TargetGroupArn']], TagKeys=tags_to_delete) + connection.remove_tags(ResourceArns=[tg['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags from target group") changed = True @@ -773,7 +780,7 @@ def create_or_update_target_group(connection, module): # Add/update tags if tags_need_modify: try: - connection.add_tags(ResourceArns=[tg['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + connection.add_tags(ResourceArns=[tg['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to target group") changed = True @@ -798,7 +805,7 @@ def delete_target_group(connection, module): if tg: try: - connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn']) + connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete target group") @@ -846,7 +853,7 @@ def main(): if module.params.get('target_type') is None: module.params['target_type'] = 'instance' - connection = module.client('elbv2') + connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) if module.params.get('state') == 'present': create_or_update_target_group(connection, module) From dcc8da72b4fdb827ec155c6ea4ba2cc39a775cac Mon Sep 17 00:00:00 2001 From: philltomlinson <6190209+philltomlinson@users.noreply.github.com> Date: Tue, 20 Oct 2020 17:40:46 +0100 Subject: [PATCH 050/683] Add additional storage classes to S3 lifecycle transition list. (#270) * Add additional storage classes to S3 lifecycle transition list. * Add minor changes changelog for S3 lifecycle transition list change. --- s3_lifecycle.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index a2518a88570..5edceea50bf 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -56,7 +56,7 @@ description: - 'Transition noncurrent versions to this storage class' default: glacier - choices: ['glacier', 'onezone_ia', 'standard_ia'] + choices: ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] required: false type: str noncurrent_version_transition_days: @@ -91,10 +91,10 @@ type: str storage_class: description: - - "The storage class to transition to. Currently there are two supported values - 'glacier', 'onezone_ia', or 'standard_ia'." + - "The storage class to transition to." - "The 'standard_ia' class is only being available from Ansible version 2.2." default: glacier - choices: [ 'glacier', 'onezone_ia', 'standard_ia'] + choices: [ 'glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] type: str transition_date: description: @@ -437,12 +437,13 @@ def destroy_lifecycle_rule(client, module): def main(): + s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] argument_spec = dict( name=dict(required=True, type='str'), expiration_days=dict(type='int'), expiration_date=dict(), noncurrent_version_expiration_days=dict(type='int'), - noncurrent_version_storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']), + noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class), noncurrent_version_transition_days=dict(type='int'), noncurrent_version_transitions=dict(type='list', elements='dict'), prefix=dict(), @@ -450,7 +451,7 @@ def main(): rule_id=dict(), state=dict(default='present', choices=['present', 'absent']), status=dict(default='enabled', choices=['enabled', 'disabled']), - storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']), + storage_class=dict(default='glacier', type='str', choices=s3_storage_class), transition_days=dict(type='int'), transition_date=dict(), transitions=dict(type='list', elements='dict'), From 7a4ff3c411d09e43ec867bf42f70c3d82ae21a64 Mon Sep 17 00:00:00 2001 From: Rafael Driutti Date: Wed, 21 Oct 2020 17:07:25 +0200 Subject: [PATCH 051/683] Add tag support for redshift module (#34) * tag support for redshift module * add changelog * Switch to using module_defaults for the tests --- redshift.py | 85 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 74 insertions(+), 11 deletions(-) diff --git a/redshift.py b/redshift.py index 9b6cd5bbbe3..7c992685494 100644 --- a/redshift.py +++ b/redshift.py @@ -167,11 +167,22 @@ - Whether the cluster should have enhanced VPC routing enabled. default: false type: bool + tags: + description: + - A dictionary of resource tags. + type: dict + aliases: ['resource_tags'] + version_added: "1.3.0" + purge_tags: + description: + - Purge existing tags that are not found in the cluster + type: bool + default: 'yes' + version_added: "1.3.0" requirements: [ 'boto3' ] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' @@ -251,6 +262,10 @@ description: status of the enhanced vpc routing feature. returned: success type: bool + tags: + description: aws tags for cluster. + returned: success + type: dict ''' try: @@ -258,10 +273,41 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id + + +def _ensure_tags(redshift, identifier, existing_tags, module): + """Compares and update resource tags""" + + account_id = get_aws_account_id(module) + region = module.params.get('region') + resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier) + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + + tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags) + + if tags_to_add: + try: + redshift.create_tags(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to add tags to cluster") + + if tags_to_remove: + try: + redshift.delete_tags(ResourceName=resource_arn, TagKeys=tags_to_remove) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete tags on cluster") + + changed = bool(tags_to_add or tags_to_remove) + return changed def _collect_facts(resource): @@ -291,12 +337,14 @@ def _collect_facts(resource): facts['url'] = None facts['port'] = None facts['availability_zone'] = None + facts['tags'] = {} if resource['ClusterStatus'] != "creating": facts['create_time'] = resource['ClusterCreateTime'] facts['url'] = resource['Endpoint']['Address'] facts['port'] = resource['Endpoint']['Port'] facts['availability_zone'] = resource['AvailabilityZone'] + facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags']) return facts @@ -357,6 +405,7 @@ def create_cluster(module, redshift): d_b_name = module.params.get('db_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') + tags = module.params.get('tags') changed = True # Package up the optional parameters @@ -367,14 +416,17 @@ def create_cluster(module, redshift): 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', - 'number_of_nodes', 'publicly_accessible', - 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): + 'number_of_nodes', 'publicly_accessible', 'encrypted', + 'elastic_ip', 'enhanced_vpc_routing'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: params['d_b_name'] = d_b_name + if tags: + tags = ansible_dict_to_boto3_tag_list(tags) + params['tags'] = tags try: _describe_cluster(redshift, identifier) @@ -406,6 +458,11 @@ def create_cluster(module, redshift): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe cluster") + if tags: + if _ensure_tags(redshift, identifier, resource['Tags'], module): + changed = True + resource = _describe_cluster(redshift, identifier) + return(changed, _collect_facts(resource)) @@ -481,6 +538,9 @@ def modify_cluster(module, redshift): identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + region = region = module.params.get('region') # Package up the optional parameters params = {} @@ -509,12 +569,10 @@ def modify_cluster(module, redshift): try: waiter.wait( ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, - msg="Timeout waiting for cluster enhanced vpc routing modification" - ) + msg="Timeout waiting for cluster enhanced vpc routing modification") # change the rest try: @@ -543,6 +601,9 @@ def modify_cluster(module, redshift): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier) + if _ensure_tags(redshift, identifier, resource['Tags'], module): + resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + return(True, _collect_facts(resource)) @@ -579,6 +640,8 @@ def main(): enhanced_vpc_routing=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True) ) required_if = [ From 11d69e74f911fe35054fda64f38b1d72104b66b6 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 23 Oct 2020 00:25:24 +0200 Subject: [PATCH 052/683] stability: Increase the number of retries on ec2_instance tests (#187) * Split imports into a single line * Increase the max_attempts retries for the ec2_instance tests, we're running a *lot* in parallel which triggers RateLimiting errors --- ec2_instance.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ec2_instance.py b/ec2_instance.py index e87f64cdf29..aba7ac26b10 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -810,17 +810,17 @@ from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib import parse as urlparse +from ansible.module_utils._text import to_bytes from ansible.module_utils._text import to_native -import ansible_collections.amazon.aws.plugins.module_utils.ec2 as ec2_utils -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_filter_list, - compare_aws_tags, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - camel_dict_to_snake_dict, - ) from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict module = None @@ -917,7 +917,7 @@ def build_volume_spec(params): for int_value in ['volume_size', 'iops']: if int_value in volume['ebs']: volume['ebs'][int_value] = int(volume['ebs'][int_value]) - return [ec2_utils.snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] + return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] def add_or_update_instance_profile(instance, desired_profile_name): From 0dccd0cabf119825a8c6eb621c1cfecf36af6c03 Mon Sep 17 00:00:00 2001 From: Giuseppe Chiesa <68604164+GiuseppeChiesa-TomTom@users.noreply.github.com> Date: Tue, 3 Nov 2020 13:46:07 +0100 Subject: [PATCH 053/683] fix chunk_size calculation by using boto3 S3 Transport defaults (#273) * fix chunk_size calculation by using boto3 S3 Transport defaults since defaults are used also for the upload function * implemented some integration tests for s3_sync * added changelog fragment --- s3_sync.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/s3_sync.py b/s3_sync.py index 8909b3524f3..78326587941 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -237,7 +237,10 @@ try: import botocore + from boto3.s3.transfer import TransferConfig + DEFAULT_CHUNK_SIZE = TransferConfig().multipart_chunksize except ImportError: + DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_text @@ -270,10 +273,6 @@ # # You should have received a copy of the GNU General Public License # along with calculate_multipart_etag. If not, see . - -DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 - - def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): """ calculates a multipart upload etag for amazon s3 From ead9bfc73f4b1dd01bbab4965dfe2d2d9c0e7381 Mon Sep 17 00:00:00 2001 From: nikhilpatne <66066035+nikhilpatne@users.noreply.github.com> Date: Mon, 9 Nov 2020 21:54:45 +0530 Subject: [PATCH 054/683] [BUG_FIX] ec2_win_password issue. (#283) * [BUG_FIX] ec2_win_password issue ( #142 ) * Creating a changelog fragment --- ec2_win_password.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ec2_win_password.py b/ec2_win_password.py index 8f46da5602d..ed06f705485 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -187,13 +187,13 @@ def ec2_win_password(module): decrypted = None if decrypted is None: - module.exit_json(win_password='', changed=False) + module.fail_json(msg="unable to decrypt password", win_password='', changed=False) else: if wait: elapsed = datetime.datetime.now() - start - module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds) + module.exit_json(win_password=decrypted, changed=False, elapsed=elapsed.seconds) else: - module.exit_json(win_password=decrypted, changed=True) + module.exit_json(win_password=decrypted, changed=False) def main(): From d46d1d47aead45c6ea3670b178d6ba7f53d65376 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 24 Sep 2020 20:07:25 +0200 Subject: [PATCH 055/683] rds_instance: Set no_log=False on force_update_password --- rds_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rds_instance.py b/rds_instance.py index ea2e8ba11b1..3aa9c7f67dc 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -1078,7 +1078,7 @@ def main(): arg_spec = dict( state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), creation_source=dict(choices=['snapshot', 's3', 'instance']), - force_update_password=dict(type='bool', default=False), + force_update_password=dict(type='bool', default=False, no_log=False), purge_cloudwatch_logs_exports=dict(type='bool', default=True), purge_tags=dict(type='bool', default=True), read_replica=dict(type='bool'), From 304300850f9ed829da42c55e8b6a08cfafb954e0 Mon Sep 17 00:00:00 2001 From: Sakar Date: Thu, 12 Nov 2020 00:54:27 -0500 Subject: [PATCH 056/683] Minor documentation fix [aws_api_gateway] (#291) --- aws_api_gateway.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 08f276b6303..5267daf7079 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -17,12 +17,12 @@ - Allows for the management of API Gateway APIs - Normally you should give the api_id since there is no other stable guaranteed unique identifier for the API. If you do - not give api_id then a new API will be create each time + not give api_id then a new API will be created each time this is run. - Beware that there are very hard limits on the rate that you can call API Gateway's REST API. You may need to patch your boto. See U(https://github.com/boto/boto3/issues/876) - and discuss with your AWS rep. + and discuss it with your AWS rep. - swagger_file and swagger_text are passed directly on to AWS transparently whilst swagger_dict is an ansible dict which is converted to JSON before the API definitions are uploaded. @@ -97,7 +97,7 @@ description: - Type of endpoint configuration, use C(EDGE) for an edge optimized API endpoint, - C(REGIONAL) for just a regional deploy or PRIVATE for a private API. - - This will flag will only be used when creating a new API Gateway setup, not for updates. + - This flag will only be used when creating a new API Gateway setup, not for updates. choices: ['EDGE', 'REGIONAL', 'PRIVATE'] type: str default: EDGE @@ -109,7 +109,7 @@ notes: - A future version of this module will probably use tags or another - ID so that an API can be create only once. + ID so that an API can be created only once. - As an early work around an intermediate version will probably do the same using a tag embedded in the API name. From aae25feda4be574cbeac0cacbf751232044b7858 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 12 Nov 2020 12:18:50 +0100 Subject: [PATCH 057/683] fix element type for load_balancers in ecs_service (#286) * #265 fix element type for load_balancers in ecs_service --- ecs_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 462f60cb946..499fa0d4711 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -52,7 +52,7 @@ - The list of ELBs defined for this service. required: false type: list - elements: str + elements: dict desired_count: description: - The count of how many instances of the service. @@ -648,7 +648,7 @@ def main(): name=dict(required=True, type='str'), cluster=dict(required=False, type='str'), task_definition=dict(required=False, type='str'), - load_balancers=dict(required=False, default=[], type='list', elements='str'), + load_balancers=dict(required=False, default=[], type='list', elements='dict'), desired_count=dict(required=False, type='int'), client_token=dict(required=False, default='', type='str'), role=dict(required=False, default='', type='str'), From ca9735ed3a1d7bb97b0e84186ba9aefe456a68f8 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 12 Nov 2020 12:20:16 +0100 Subject: [PATCH 058/683] fix wrong element type for containers in ecs_taskdefinition (#284) * #264 wrong element type for parameter --- ecs_taskdefinition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index b7afe864ee8..c1a30b33307 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -47,7 +47,7 @@ - A list of containers definitions. required: False type: list - elements: str + elements: dict network_mode: description: - The Docker networking mode to use for the containers in the task. @@ -321,7 +321,7 @@ def main(): family=dict(required=False, type='str'), revision=dict(required=False, type='int'), force_create=dict(required=False, default=False, type='bool'), - containers=dict(required=False, type='list', elements='str'), + containers=dict(required=False, type='list', elements='dict'), network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), task_role_arn=dict(required=False, default='', type='str'), execution_role_arn=dict(required=False, default='', type='str'), From 158e1fb3d4dad5ec202c916690bd14bd23748bb7 Mon Sep 17 00:00:00 2001 From: Dmytro Vorotyntsev <2937451+vorotech@users.noreply.github.com> Date: Thu, 12 Nov 2020 13:41:32 +0200 Subject: [PATCH 059/683] Adding new module s3_metrics_configuration (#217) * new module s3_metrics_configuration * fixed dict comparison * added missed quote at doc section * fixing lint issues * prefer to use module_defaults for aws variables * addressed code review comments * simplified imports * log command results to understand the issue * install awscli --- s3_metrics_configuration.py | 221 ++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 s3_metrics_configuration.py diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py new file mode 100644 index 00000000000..729503cf88b --- /dev/null +++ b/s3_metrics_configuration.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: s3_metrics_configuration +version_added: 1.3.0 +short_description: Manage s3 bucket metrics configuration in AWS +description: + - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket +author: Dmytro Vorotyntsev (@vorotech) +notes: + - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations + - To request metrics for the entire bucket, create a metrics configuration without a filter + - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on +options: + bucket_name: + description: + - "Name of the s3 bucket" + required: true + type: str + id: + description: + - "The ID used to identify the metrics configuration" + required: true + type: str + filter_prefix: + description: + - "A prefix used when evaluating a metrics filter" + required: false + type: str + filter_tags: + description: + - "A dictionary of one or more tags used when evaluating a metrics filter" + required: false + aliases: ['filter_tag'] + type: dict + state: + description: + - "Create or delete metrics configuration" + default: present + choices: ['present', 'absent'] + type: str +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a metrics configuration that enables metrics for an entire bucket + community.aws.s3_metrics_configuration: + bucket_name: my-bucket + id: EntireBucket + state: present + +- name: Put a metrics configuration that enables metrics for objects starting with a prefix + community.aws.s3_metrics_configuration: + bucket_name: my-bucket + id: Assets + filter_prefix: assets + state: present + +- name: Put a metrics configuration that enables metrics for objects with specific tag + community.aws.s3_metrics_configuration: + bucket_name: my-bucket + id: Assets + filter_tag: + kind: asset + state: present + +- name: Put a metrics configuration that enables metrics for objects that start with a particular prefix and have specific tags applied + community.aws.s3_metrics_configuration: + bucket_name: my-bucket + id: ImportantBlueDocuments + filter_prefix: documents + filter_tags: + priority: high + class: blue + state: present + +- name: Delete metrics configuration + community.aws.s3_metrics_configuration: + bucket_name: my-bucket + id: EntireBucket + state: absent + +''' + +try: + import boto3 + import botocore + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list + + +def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): + payload = { + 'Id': mc_id + } + # Just a filter_prefix or just a single tag filter is a special case + if filter_prefix and not filter_tags: + payload['Filter'] = { + 'Prefix': filter_prefix + } + elif not filter_prefix and len(filter_tags) == 1: + payload['Filter'] = { + 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0] + } + # Otherwise we need to use 'And' + elif filter_tags: + payload['Filter'] = { + 'And': { + 'Tags': ansible_dict_to_boto3_tag_list(filter_tags) + } + } + if filter_prefix: + payload['Filter']['And']['Prefix'] = filter_prefix + + return payload + + +def create_or_update_metrics_configuration(client, module): + bucket_name = module.params.get('bucket_name') + mc_id = module.params.get('id') + filter_prefix = module.params.get('filter_prefix') + filter_tags = module.params.get('filter_tags') + + try: + response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) + metrics_configuration = response['MetricsConfiguration'] + except is_boto3_error_code('NoSuchConfiguration'): + metrics_configuration = None + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") + + new_configuration = _create_metrics_configuration(mc_id, filter_prefix, filter_tags) + + if metrics_configuration: + if metrics_configuration == new_configuration: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + try: + client.put_bucket_metrics_configuration( + aws_retry=True, + Bucket=bucket_name, + Id=mc_id, + MetricsConfiguration=new_configuration + ) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id) + + module.exit_json(changed=True) + + +def delete_metrics_configuration(client, module): + bucket_name = module.params.get('bucket_name') + mc_id = module.params.get('id') + + try: + client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) + except is_boto3_error_code('NoSuchConfiguration'): + module.exit_json(changed=False) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") + + if module.check_mode: + module.exit_json(changed=True) + + try: + client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) + except is_boto3_error_code('NoSuchConfiguration'): + module.exit_json(changed=False) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id) + + module.exit_json(changed=True) + + +def main(): + argument_spec = dict( + bucket_name=dict(type='str', required=True), + id=dict(type='str', required=True), + filter_prefix=dict(type='str', required=False), + filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']), + state=dict(default='present', type='str', choices=['present', 'absent']), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params.get('state') + + try: + client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + if state == 'present': + create_or_update_metrics_configuration(client, module) + elif state == 'absent': + delete_metrics_configuration(client, module) + + +if __name__ == '__main__': + main() From 0a55ebabbdcc3fdf13ae9ddb0e7d519166712839 Mon Sep 17 00:00:00 2001 From: Will Date: Sat, 28 Nov 2020 09:33:16 +0000 Subject: [PATCH 060/683] Fix for aws_kms_info with external/custom key store keys (#311) * Fix for aws_kms_info with external/custom key store keys * Added changelog fragment --- aws_kms_info.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index 160ca5e13c7..235b7bc5b1e 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -373,7 +373,11 @@ def get_key_details(connection, module, key_id, tokens=None): exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) result['aliases'] = aliases.get(result['KeyId'], []) - result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) + + if result['Origin'] == 'AWS_KMS': + result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) + else: + result['enable_key_rotation'] = None if module.params.get('pending_deletion'): return camel_dict_to_snake_dict(result) From 867f8638204505d979d3b4a8e7b0fa630496bf8b Mon Sep 17 00:00:00 2001 From: Sid Patel Date: Tue, 1 Dec 2020 09:03:31 -0700 Subject: [PATCH 061/683] fix: ansible kinesis stream paginated shards bug (#93) * fix: ansible kinesis stream paginated shards bug * only set shardid params when more shards * add changelog fragment --- kinesis_stream.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kinesis_stream.py b/kinesis_stream.py index 4183444ebb3..d9b3cc0d938 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -364,6 +364,8 @@ def find_stream(client, stream_name, check_mode=False): ) shards.extend(results.pop('Shards')) has_more_shards = results['HasMoreShards'] + if has_more_shards: + params['ExclusiveStartShardId'] = shards[-1]['ShardId'] results['Shards'] = shards num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) results['OpenShardsCount'] = len(shards) - num_closed_shards From 97dd1c356fdfb508eb6c77576989d90ce485d50d Mon Sep 17 00:00:00 2001 From: Romain GAGNAIRE <2607260+TheOptimisticFactory@users.noreply.github.com> Date: Tue, 1 Dec 2020 19:35:07 +0100 Subject: [PATCH 062/683] fix: Cloudfront distribution now uses provided origin_access_identity (#39) * fix: Cloudfront distribution now uses provided origin_access_identity - This is the same fix that had been originally pushed to https://github.com/ansible/ansible/pull/68845 * test: Added new test case * test: corrected typo in task name Co-authored-by: Mark Chappell * test: Adjusted test task labeling Co-authored-by: Romain Gagnaire Co-authored-by: Mark Chappell --- cloudfront_distribution.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 5df16dc69db..7e8fe152d5f 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1674,10 +1674,15 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): - if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'): - return existing_config['s3_origin_config']['origin_access_identity'] if not origin['s3_origin_access_identity_enabled']: return None + + if origin.get('s3_origin_config', {}).get('origin_access_identity'): + return origin['s3_origin_config']['origin_access_identity'] + + if existing_config.get('s3_origin_config', {}).get('origin_access_identity'): + return existing_config['s3_origin_config']['origin_access_identity'] + try: comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) From ad336042b309eb04de50ba5b360b014601f0b927 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 7 Dec 2020 08:48:06 +0100 Subject: [PATCH 063/683] Sanity test / doc-default-does-not-match-spec fixups (#320) * Sanity test / doc-default-does-not-match-spec fixups * General docs cleanup --- aws_api_gateway.py | 24 ++++---- aws_application_scaling_policy.py | 15 ++--- aws_direct_connect_connection.py | 7 ++- aws_direct_connect_link_aggregation_group.py | 3 + cloudfront_distribution.py | 24 ++++---- ec2_ami_copy.py | 10 ++-- ec2_vpc_vpn.py | 21 +++---- ecs_ecr.py | 8 +-- ecs_service.py | 14 +++-- ecs_taskdefinition.py | 3 +- elasticache.py | 10 ++-- elb_application_lb.py | 16 ++--- elb_classic_lb.py | 61 ++++++++++---------- elb_network_lb.py | 4 +- elb_target.py | 17 +++--- iam_cert.py | 10 ++-- route53.py | 18 +++--- sqs_queue.py | 7 ++- 18 files changed, 145 insertions(+), 127 deletions(-) diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 5267daf7079..f7466b1d1e2 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -14,7 +14,7 @@ version_added: 1.0.0 short_description: Manage AWS API Gateway APIs description: - - Allows for the management of API Gateway APIs + - Allows for the management of API Gateway APIs. - Normally you should give the api_id since there is no other stable guaranteed unique identifier for the API. If you do not give api_id then a new API will be created each time @@ -40,7 +40,7 @@ swagger_file: description: - JSON or YAML file containing swagger definitions for API. - Exactly one of swagger_file, swagger_text or swagger_dict must + Exactly one of I(swagger_file), I(swagger_text) or I(swagger_dict) must be present. type: path aliases: ['src', 'api_file'] @@ -60,13 +60,13 @@ type: str deploy_desc: description: - - Description of the deployment - recorded and visible in the - AWS console. + - Description of the deployment. + - Recorded and visible in the AWS console. default: Automatic deployment by Ansible. type: str cache_enabled: description: - - Enable API GW caching of backend responses. Defaults to false. + - Enable API GW caching of backend responses. type: bool default: false cache_size: @@ -83,20 +83,22 @@ description: - Canary settings for the deployment of the stage. - 'Dict with following settings:' - - 'percentTraffic: The percent (0-100) of traffic diverted to a canary deployment.' - - 'deploymentId: The ID of the canary deployment.' - - 'stageVariableOverrides: Stage variables overridden for a canary release deployment.' - - 'useStageCache: A Boolean flag to indicate whether the canary deployment uses the stage cache or not.' + - 'C(percentTraffic): The percent (0-100) of traffic diverted to a canary deployment.' + - 'C(deploymentId): The ID of the canary deployment.' + - 'C(stageVariableOverrides): Stage variables overridden for a canary release deployment.' + - 'C(useStageCache): A Boolean flag to indicate whether the canary deployment uses the stage cache or not.' - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage) type: dict tracing_enabled: description: - Specifies whether active tracing with X-ray is enabled for the API GW stage. type: bool + default: false endpoint_type: description: - - Type of endpoint configuration, use C(EDGE) for an edge optimized API endpoint, - - C(REGIONAL) for just a regional deploy or PRIVATE for a private API. + - Type of endpoint configuration. + - Use C(EDGE) for an edge optimized API endpoint, + C(REGIONAL) for just a regional deploy or C(PRIVATE) for a private API. - This flag will only be used when creating a new API Gateway setup, not for updates. choices: ['EDGE', 'REGIONAL', 'PRIVATE'] type: str diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index 468705b1938..dcc8b8b1691 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -15,14 +15,14 @@ - for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy) description: - - Creates, updates or removes a Scaling Policy + - Creates, updates or removes a Scaling Policy. author: - Gustavo Maia (@gurumaia) - Chen Leibovich (@chenl87) requirements: [ json, botocore, boto3 ] options: state: - description: Whether a policy should be present or absent + description: Whether a policy should be C(present) or C(absent). required: yes choices: ['absent', 'present'] type: str @@ -57,12 +57,12 @@ choices: ['StepScaling', 'TargetTrackingScaling'] type: str step_scaling_policy_configuration: - description: A step scaling policy. This parameter is required if you are creating a policy and the policy type is StepScaling. + description: A step scaling policy. This parameter is required if you are creating a policy and I(policy_type=StepScaling). required: no type: dict target_tracking_scaling_policy_configuration: description: - - A target tracking policy. This parameter is required if you are creating a new policy and the policy type is TargetTrackingScaling. + - A target tracking policy. This parameter is required if you are creating a new policy and I(policy_type=TargetTrackingScaling). - 'Full documentation of the suboptions can be found in the API documentation:' - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)' required: no @@ -84,7 +84,7 @@ description: The time (in seconds) to wait after scaling-out before another scaling action can occur. type: int TargetValue: - description: The target value for the metric + description: The target value for the metric. type: float minimum_tasks: description: The minimum value to scale to in response to a scale in event. @@ -97,9 +97,10 @@ required: no type: int override_task_capacity: - description: Whether or not to override values of minimum and/or maximum tasks if it's already set. + description: + - Whether or not to override values of minimum and/or maximum tasks if it's already set. + - Defaults to C(false). required: no - default: no type: bool extends_documentation_fragment: - amazon.aws.aws diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 9899b742fb4..a84e5f98523 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -64,9 +64,10 @@ type: str forced_update: description: - - To modify bandwidth or location the connection will need to be deleted and recreated. - By default this will not happen - this option must be set to True. + - To modify I(bandwidth) or I(location) the connection needs to be deleted and recreated. + - By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location). type: bool + default: false ''' EXAMPLES = """ @@ -93,7 +94,7 @@ name: ansible-test-connection location: EqDC2 bandwidth: 10Gbps - forced_update: True + forced_update: true # delete the connection - community.aws.aws_direct_connect_connection: diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index e3fae3ccf06..41c50134dab 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -59,6 +59,7 @@ - This allows the minimum number of links to be set to 0, any hosted connections disassociated, and any virtual interfaces associated to the LAG deleted. type: bool + default: false connection_id: description: - A connection ID to link with the link aggregation group upon creation. @@ -67,12 +68,14 @@ description: - To be used with I(state=absent) to delete connections after disassociating them with the LAG. type: bool + default: false wait: description: - Whether or not to wait for the operation to complete. - May be useful when waiting for virtual interfaces to be deleted. - The time to wait can be controlled by setting I(wait_timeout). type: bool + default: false wait_timeout: description: - The duration in seconds to wait if I(wait=true). diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 7e8fe152d5f..a48f687a890 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -66,7 +66,7 @@ tags: description: - Should be input as a dict of key-value pairs. - - Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1' + - "Note that numeric keys or values must be wrapped in quotes. e.g. C(Priority: '1')" type: dict purge_tags: @@ -87,7 +87,7 @@ aliases: description: - - A list) of domain name aliases (CNAMEs) as strings to be used for the distribution. + - A list of domain name aliases (CNAMEs) as strings to be used for the distribution. - Each alias must be unique across all distribution for the AWS account. type: list elements: str @@ -141,7 +141,7 @@ description: - Custom headers you wish to add to the request before passing it to the origin. - For more information see the CloudFront documentation - at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html) + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html). type: list elements: dict suboptions: @@ -191,7 +191,7 @@ description: - A dict specifying the default cache behavior of the distribution. - If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid - I(cache_behavior) in I(cache_behaviors) with defaults. + cache_behavior in I(cache_behaviors) with defaults. suboptions: target_origin_id: description: @@ -492,7 +492,7 @@ enabled: description: - A boolean value that specifies whether the distribution is enabled or disabled. - default: false + - Defaults to C(false). type: bool viewer_certificate: @@ -504,18 +504,18 @@ type: bool description: - If you're using the CloudFront domain name for your distribution, such as C(123456789abcde.cloudfront.net) - you should set I(cloudfront_default_certificate=true) + you should set I(cloudfront_default_certificate=true). - If I(cloudfront_default_certificate=true) do not set I(ssl_support_method). iam_certificate_id: type: str description: - The ID of a certificate stored in IAM to use for HTTPS connections. - - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method) + - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method). acm_certificate_arn: type: str description: - The ID of a certificate stored in ACM to use for HTTPS connections. - - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method) + - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method). ssl_support_method: type: str description: @@ -541,12 +541,12 @@ type: str description: - The method that you want to use to restrict distribution of your content by country. - - Valid values are C(none), C(whitelist), C(blacklist) + - Valid values are C(none), C(whitelist), C(blacklist). items: description: - A list of ISO 3166-1 two letter (Alpha 2) country codes that the restriction should apply to. - - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/)' + - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/).' type: list web_acl_id: @@ -558,14 +558,14 @@ description: - The version of the http protocol to use for the distribution. - AWS defaults this to C(http2). - - Valid values are C(http1.1) and C(http2) + - Valid values are C(http1.1) and C(http2). type: str ipv6_enabled: description: - Determines whether IPv6 support is enabled or not. + - Defaults to C(false). type: bool - default: false wait: description: diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 79ebf577394..38f1123168f 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -27,7 +27,7 @@ type: str name: description: - - The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.) + - The name of the new AMI to copy. (As of 2.3 the default is C(default), in prior versions it was C(null).) default: "default" type: str description: @@ -38,20 +38,22 @@ description: - Whether or not the destination snapshots of the copied AMI should be encrypted. type: bool + default: false kms_key_id: description: - KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account. type: str wait: description: - - Wait for the copied AMI to be in state 'available' before returning. + - Wait for the copied AMI to be in state C(available) before returning. type: bool default: 'no' wait_timeout: description: - - How long before wait gives up, in seconds. Prior to 2.3 the default was 1200. + - How long before wait gives up, in seconds. + - Prior to 2.3 the default was C(1200). - From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults. - This was reenabled in 2.6 to allow timeouts greater than 10 minutes. + - This was reenabled in 2.6 to allow timeouts greater than 10 minutes. default: 600 type: int tags: diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 9067d522129..6e18e724258 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -35,7 +35,7 @@ connection_type: description: - The type of VPN connection. - - At this time only 'ipsec.1' is supported. + - At this time only C(ipsec.1) is supported. default: ipsec.1 type: str vpn_gateway_id: @@ -63,8 +63,8 @@ required: no tunnel_options: description: - - An optional list object containing no more than two dict members, each of which may contain 'TunnelInsideCidr' - and/or 'PreSharedKey' keys with appropriate string values. AWS defaults will apply in absence of either of + - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr) + and/or I(PreSharedKey) keys with appropriate string values. AWS defaults will apply in absence of either of the aforementioned keys. required: no type: list @@ -78,11 +78,11 @@ description: The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway. filters: description: - - An alternative to using vpn_connection_id. If multiple matches are found, vpn_connection_id is required. + - An alternative to using I(vpn_connection_id). If multiple matches are found, vpn_connection_id is required. If one of the following suboptions is a list of items to filter by, only one item needs to match to find the VPN - that correlates. e.g. if the filter 'cidr' is ['194.168.2.0/24', '192.168.2.0/24'] and the VPN route only has the - destination cidr block of '192.168.2.0/24' it will be found with this filter (assuming there are not multiple - VPNs that are matched). Another example, if the filter 'vpn' is equal to ['vpn-ccf7e7ad', 'vpn-cb0ae2a2'] and one + that correlates. e.g. if the filter I(cidr) is C(['194.168.2.0/24', '192.168.2.0/24']) and the VPN route only has the + destination cidr block of C(192.168.2.0/24) it will be found with this filter (assuming there are not multiple + VPNs that are matched). Another example, if the filter I(vpn) is equal to C(['vpn-ccf7e7ad', 'vpn-cb0ae2a2']) and one of of the VPNs has the state deleted (exists but is unmodifiable) and the other exists and is not deleted, it will be found via this filter. See examples. suboptions: @@ -91,7 +91,7 @@ - The customer gateway configuration of the VPN as a string (in the format of the return value) or a list of those strings. static-routes-only: description: - - The type of routing; true or false. + - The type of routing; C(true) or C(false). cidr: description: - The destination cidr of the VPN's route as a string or a list of those strings. @@ -127,15 +127,16 @@ description: - Whether or not to delete VPN connections routes that are not specified in the task. type: bool + default: false wait_timeout: description: - - How long before wait gives up, in seconds. + - How long, in seconds, before wait gives up. default: 600 type: int required: false delay: description: - - The time to wait before checking operation again. in seconds. + - The time, in seconds, to wait before checking operation again. required: false type: int default: 15 diff --git a/ecs_ecr.py b/ecs_ecr.py index 8e344665a0e..4ae7d40cd2a 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -45,8 +45,8 @@ description: - If yes, remove the policy from the repository. - Alias C(delete_policy) has been deprecated and will be removed after 2022-06-01. + - Defaults to C(false). required: false - default: false type: bool aliases: [ delete_policy ] image_tag_mutability: @@ -63,9 +63,9 @@ type: json purge_lifecycle_policy: description: - - if yes, remove the lifecycle policy from the repository. + - if C(true), remove the lifecycle policy from the repository. + - Defaults to C(false). required: false - default: false type: bool state: description: @@ -76,7 +76,7 @@ type: str scan_on_push: description: - - if yes, images are scanned for known vulnerabilities after being pushed to the repository. + - if C(true), images are scanned for known vulnerabilities after being pushed to the repository. - I(scan_on_push) requires botocore >= 1.13.3 required: false default: false diff --git a/ecs_service.py b/ecs_service.py index 499fa0d4711..7bc3d467df7 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -88,6 +88,7 @@ - Force deployment of service even if there are no changes. required: false type: bool + default: false deployment_configuration: description: - Optional parameters that control the deployment_configuration. @@ -136,12 +137,12 @@ suboptions: subnets: description: - - A list of subnet IDs to associate with the task + - A list of subnet IDs to associate with the task. type: list elements: str security_groups: description: - - A list of security group names or group IDs to associate with the task + - A list of security group names or group IDs to associate with the task. type: list elements: str assign_public_ip: @@ -170,19 +171,20 @@ suboptions: container_name: description: - - container name for service discovery registration + - Container name for service discovery registration. type: str container_port: description: - - container port for service discovery registration + - Container port for service discovery registration. type: int arn: description: - - Service discovery registry ARN + - Service discovery registry ARN. type: str scheduling_strategy: description: - - The scheduling strategy, defaults to "REPLICA" if not given to preserve previous behavior + - The scheduling strategy. + - Defaults to C(REPLICA) if not given to preserve previous behavior. required: false choices: ["DAEMON", "REPLICA"] type: str diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index c1a30b33307..6158fb4ec21 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -42,6 +42,7 @@ - Always create new task definition. required: False type: bool + default: false containers: description: - A list of containers definitions. @@ -95,7 +96,7 @@ memory: description: - The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. - - If using the Fargate launch type, this field is required and is limited by the cpu. + - If using the Fargate launch type, this field is required and is limited by the CPU. required: false type: str extends_documentation_fragment: diff --git a/elasticache.py b/elasticache.py index b8848f324af..d6a649ba17b 100644 --- a/elasticache.py +++ b/elasticache.py @@ -64,17 +64,17 @@ type: str cache_subnet_group: description: - - The subnet group name to associate with. Only use if inside a vpc. - - Required if inside a vpc + - The subnet group name to associate with. Only use if inside a VPC. + - Required if inside a VPC. type: str security_group_ids: description: - - A list of vpc security group IDs to associate with this cache cluster. Only use if inside a vpc. + - A list of VPC security group IDs to associate with this cache cluster. Only use if inside a VPC. type: list elements: str cache_security_groups: description: - - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc. + - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a VPC. type: list elements: str zone: @@ -89,8 +89,8 @@ hard_modify: description: - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state. + - Defaults to C(false). type: bool - default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/elb_application_lb.py b/elb_application_lb.py index dc138fb2294..f18a00816b8 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -22,7 +22,7 @@ --- module: elb_application_lb version_added: 1.0.0 -short_description: Manage an Application load balancer +short_description: Manage an Application Load Balancer description: - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. requirements: [ boto3 ] @@ -50,12 +50,12 @@ deletion_protection: description: - Indicates whether deletion protection for the ELB is enabled. - default: no + - Defaults to C(false). type: bool http2: description: - Indicates whether to enable HTTP2 routing. - default: no + - Defaults to C(false). type: bool idle_timeout: description: @@ -124,14 +124,14 @@ type: str purge_listeners: description: - - If yes, existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. If the I(listeners) parameter is - not set then listeners will not be modified + - If C(yes), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. + - If the I(listeners) parameter is not set then listeners will not be modified. default: yes type: bool purge_tags: description: - - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then - tags will not be modified. + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified. default: yes type: bool subnets: @@ -176,7 +176,7 @@ type: int purge_rules: description: - - When set to no, keep the existing load balancer rules in place. Will modify and add, but will not delete. + - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. default: yes type: bool extends_documentation_fragment: diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 5d35fca3bc5..cd7d45875d1 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -19,62 +19,62 @@ options: state: description: - - Create or destroy the ELB + - Create or destroy the ELB. choices: ["present", "absent"] required: true type: str name: description: - - The name of the ELB + - The name of the ELB. required: true type: str listeners: description: - - List of ports/protocols for this ELB to listen on (see example) + - List of ports/protocols for this ELB to listen on (see example). type: list elements: dict purge_listeners: description: - - Purge existing listeners on ELB that are not found in listeners + - Purge existing listeners on ELB that are not found in listeners. type: bool - default: 'yes' + default: true instance_ids: description: - - List of instance ids to attach to this ELB + - List of instance ids to attach to this ELB. type: list elements: str purge_instance_ids: description: - - Purge existing instance ids on ELB that are not found in instance_ids + - Purge existing instance ids on ELB that are not found in I(instance_ids). type: bool - default: 'no' + default: false zones: description: - - List of availability zones to enable on this ELB + - List of availability zones to enable on this ELB. type: list elements: str purge_zones: description: - - Purge existing availability zones on ELB that are not found in zones + - Purge existing availability zones on ELB that are not found in zones. type: bool - default: 'no' + default: false security_group_ids: description: - - A list of security groups to apply to the elb + - A list of security groups to apply to the ELB. type: list elements: str security_group_names: description: - - A list of security group names to apply to the elb + - A list of security group names to apply to the ELB. type: list elements: str health_check: description: - - An associative array of health check configuration settings (see example) + - An associative array of health check configuration settings (see example). type: dict access_logs: description: - - An associative array of access logs configuration settings (see example) + - An associative array of access logs configuration settings (see example). type: dict subnets: description: @@ -83,49 +83,50 @@ elements: str purge_subnets: description: - - Purge existing subnet on ELB that are not found in subnets + - Purge existing subnets on ELB that are not found in subnets. type: bool - default: 'no' + default: false scheme: description: - - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'. - If you choose to update your scheme with a different value the ELB will be destroyed and - recreated. To update scheme you must use the option wait. + - The scheme to use when creating the ELB. + - For a private VPC-visible ELB use C(internal). + - If you choose to update your scheme with a different value the ELB will be destroyed and + recreated. To update scheme you must set I(wait=true). choices: ["internal", "internet-facing"] default: 'internet-facing' type: str validate_certs: description: - - When set to C(no), SSL certificates will not be validated for boto versions >= 2.6.0. + - When set to C(false), SSL certificates will not be validated for boto versions >= 2.6.0. type: bool - default: 'yes' + default: true connection_draining_timeout: description: - - Wait a specified timeout allowing connections to drain before terminating an instance + - Wait a specified timeout allowing connections to drain before terminating an instance. type: int idle_timeout: description: - - ELB connections from clients and to servers are timed out after this amount of time + - ELB connections from clients and to servers are timed out after this amount of time. type: int cross_az_load_balancing: description: - - Distribute load across all configured Availability Zones + - Distribute load across all configured Availability Zones. + - Defaults to C(false). type: bool - default: 'no' stickiness: description: - - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example ) + - An associative array of stickiness policy settings. Policy will be applied to all listeners (see example). type: dict wait: description: - When specified, Ansible will check the status of the load balancer to ensure it has been successfully removed from AWS. type: bool - default: 'no' + default: false wait_timeout: description: - - Used in conjunction with wait. Number of seconds to wait for the elb to be terminated. - A maximum of 600 seconds (10 minutes) is allowed. + - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated. + A maximum of C(600) seconds (10 minutes) is allowed. default: 60 type: int tags: diff --git a/elb_network_lb.py b/elb_network_lb.py index 83e1ea416dc..5e34c527276 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -21,12 +21,12 @@ cross_zone_load_balancing: description: - Indicates whether cross-zone load balancing is enabled. - default: false + - Defaults to C(false). type: bool deletion_protection: description: - Indicates whether deletion protection for the ELB is enabled. - default: false + - Defaults to C(false). type: bool listeners: description: diff --git a/elb_target.py b/elb_target.py index 06ff79e2653..b8cda233814 100644 --- a/elb_target.py +++ b/elb_target.py @@ -12,27 +12,30 @@ version_added: 1.0.0 short_description: Manage a target in a target group description: - - Used to register or deregister a target in a target group + - Used to register or deregister a target in a target group. author: "Rob White (@wimnat)" options: deregister_unused: description: - - The default behaviour for targets that are unused is to leave them registered. If instead you would like to remove them - set I(deregister_unused) to yes. + - The default behaviour for targets that are unused is to leave them registered. + - If instead you would like to remove them set I(deregister_unused=true). + default: false type: bool target_az: description: - - An Availability Zone or all. This determines whether the target receives traffic from the load balancer nodes in the specified + - An Availability Zone or C(all). This determines whether the target receives traffic from the load balancer nodes in the specified Availability Zone or from all enabled Availability Zones for the load balancer. This parameter is not supported if the target type of the target group is instance. type: str target_group_arn: description: - - The Amazon Resource Name (ARN) of the target group. Mutually exclusive of I(target_group_name). + - The Amazon Resource Name (ARN) of the target group. + - Mutually exclusive of I(target_group_name). type: str target_group_name: description: - - The name of the target group. Mutually exclusive of I(target_group_arn). + - The name of the target group. + - Mutually exclusive of I(target_group_arn). type: str target_id: description: @@ -55,7 +58,7 @@ type: str target_status_timeout: description: - - Maximum time in seconds to wait for target_status change + - Maximum time in seconds to wait for I(target_status) change. required: false default: 60 type: int diff --git a/iam_cert.py b/iam_cert.py index 8ad5bb88b33..96c9bccae7c 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -33,17 +33,17 @@ type: str new_name: description: - - When state is present, this will update the name of the cert. - - The cert, key and cert_chain parameters will be ignored if this is defined. + - When I(state=present), this will update the name of the cert. + - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined. type: str new_path: description: - - When state is present, this will update the path of the cert. + - When I(state=present), this will update the path of the cert. - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined. type: str state: description: - - Whether to create(or update) or delete the certificate. + - Whether to create (or update) or delete the certificate. - If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these. required: true choices: [ "present", "absent" ] @@ -72,7 +72,7 @@ description: - By default the module will not upload a certificate that is already uploaded into AWS. - If I(dup_ok=True), it will upload the certificate as long as the name is unique. - default: False + - Defaults to C(false). type: bool requirements: [ "boto" ] diff --git a/route53.py b/route53.py index 6b69363f2d0..6caf385002f 100644 --- a/route53.py +++ b/route53.py @@ -12,14 +12,14 @@ --- module: route53 version_added: 1.0.0 -short_description: add or delete entries in Amazons Route53 DNS service +short_description: add or delete entries in Amazons Route 53 DNS service description: - - Creates and deletes DNS records in Amazons Route53 service + - Creates and deletes DNS records in Amazons Route 53 service. options: state: description: - Specifies the state of the resource record. As of Ansible 2.4, the I(command) option has been changed - to I(state) as default and the choices 'present' and 'absent' have been added, but I(command) still works as well. + to I(state) as default and the choices C(present) and C(absent) have been added, but I(command) still works as well. required: true aliases: [ 'command' ] choices: [ 'present', 'absent', 'get', 'create', 'delete' ] @@ -53,8 +53,8 @@ alias: description: - Indicates if this is an alias record. + - Defaults to C(false). type: bool - default: false alias_hosted_zone_id: description: - The hosted zone identifier. @@ -67,7 +67,7 @@ value: description: - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. - - When deleting a record all values for the record must be specified or Route53 will not delete it. + - When deleting a record all values for the record must be specified or Route 53 will not delete it. type: list elements: str overwrite: @@ -76,14 +76,14 @@ type: bool retry_interval: description: - - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. - If you have many domain names, the default of 500 seconds may be too long. + - In the case that Route 53 is still servicing a prior request, this module will wait and try again after this many seconds. + If you have many domain names, the default of C(500) seconds may be too long. default: 500 type: int private_zone: description: - - If set to C(yes), the private zone matching the requested name within the domain will be used if there are both public and private zones. - The default is to use the public zone. + - If set to C(true), the private zone matching the requested name within the domain will be used if there are both public and private zones. + - The default is to use the public zone. type: bool default: false identifier: diff --git a/sqs_queue.py b/sqs_queue.py index 40eda404b46..5d65967974a 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -10,7 +10,7 @@ --- module: sqs_queue version_added: 1.0.0 -short_description: Creates or deletes AWS SQS queues. +short_description: Creates or deletes AWS SQS queues description: - Create or delete AWS SQS queues. - Update attributes on existing queues. @@ -83,8 +83,9 @@ type: int content_based_deduplication: type: bool - description: Enables content-based deduplication. Used for FIFOs only. - default: false + description: + - Enables content-based deduplication. Used for FIFOs only. + - Defaults to C(false). tags: description: - Tag dict to apply to the queue (requires botocore 1.5.40 or above). From 9d32feaf463973e5b936691be0989567713369c6 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 9 Dec 2020 20:23:53 +0100 Subject: [PATCH 064/683] Cleanup IGW modules (#318) * import order * Add retry decorators * Switch tests to using module_defaults * module_defaults * Add initial _info tests * Handle Boto Errors with fail_json_aws * Test state=absent when IGW missing * Support not purging tags * Support converting Tags from boto to dict * Add tagging tests * Use random CIDR for VPC * Add check_mode tests * changelog --- ec2_vpc_igw.py | 65 +++++++++++++++++++++++++++------------------ ec2_vpc_igw_info.py | 45 ++++++++++++++++++++++++------- 2 files changed, 75 insertions(+), 35 deletions(-) diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index b920682b76c..3d8d9f3bf25 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -22,9 +22,16 @@ type: str tags: description: - - "A dict of tags to apply to the internet gateway. Any tags currently applied to the internet gateway and not present here will be removed." + - A dict of tags to apply to the internet gateway. + - To remove all tags set I(tags={}) and I(purge_tags=true). aliases: [ 'resource_tags' ] type: dict + purge_tags: + description: + - Remove tags not listed in I(tags). + type: bool + default: true + version_added: 1.3.0 state: description: - Create or terminate the IGW @@ -85,17 +92,16 @@ except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.six import string_types + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_filter_list, - ansible_dict_to_boto3_tag_list, - compare_aws_tags -) -from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags class AnsibleEc2Igw(object): @@ -103,16 +109,17 @@ class AnsibleEc2Igw(object): def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client('ec2') + self._connection = self._module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) self._check_mode = self._module.check_mode def process(self): vpc_id = self._module.params.get('vpc_id') state = self._module.params.get('state', 'present') tags = self._module.params.get('tags') + purge_tags = self._module.params.get('purge_tags') if state == 'present': - self.ensure_igw_present(vpc_id, tags) + self.ensure_igw_present(vpc_id, tags, purge_tags) elif state == 'absent': self.ensure_igw_absent(vpc_id) @@ -120,7 +127,7 @@ def get_matching_igw(self, vpc_id): filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) igws = [] try: - response = self._connection.describe_internet_gateways(Filters=filters) + response = self._connection.describe_internet_gateways(aws_retry=True, Filters=filters) igws = response.get('InternetGateways', []) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e) @@ -135,21 +142,25 @@ def get_matching_igw(self, vpc_id): return igw def check_input_tags(self, tags): + if tags is None: + return nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)] if nonstring_tags: self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags)) - def ensure_tags(self, igw_id, tags, add_only): + def ensure_tags(self, igw_id, tags, purge_tags): final_tags = [] filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'}) cur_tags = None try: - cur_tags = self._connection.describe_tags(Filters=filters) + cur_tags = self._connection.describe_tags(aws_retry=True, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't describe tags") - purge_tags = bool(not add_only) + if tags is None: + return boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) @@ -159,7 +170,8 @@ def ensure_tags(self, igw_id, tags, add_only): # update tags final_tags.update(to_update) else: - AWSRetry.exponential_backoff()(self._connection.create_tags)( + self._connection.create_tags( + aws_retry=True, Resources=[igw_id], Tags=ansible_dict_to_boto3_tag_list(to_update) ) @@ -179,7 +191,7 @@ def ensure_tags(self, igw_id, tags, add_only): for key in to_delete: tags_list.append({'Key': key}) - AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list) + self._connection.delete_tags(aws_retry=True, Resources=[igw_id], Tags=tags_list) self._results['changed'] = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -187,7 +199,7 @@ def ensure_tags(self, igw_id, tags, add_only): if not self._check_mode and (to_update or to_delete): try: - response = self._connection.describe_tags(Filters=filters) + response = self._connection.describe_tags(aws_retry=True, Filters=filters) final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't describe tags") @@ -213,14 +225,14 @@ def ensure_igw_absent(self, vpc_id): try: self._results['changed'] = True - self._connection.detach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) - self._connection.delete_internet_gateway(InternetGatewayId=igw['internet_gateway_id']) + self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) + self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway") return self._results - def ensure_igw_present(self, vpc_id, tags): + def ensure_igw_present(self, vpc_id, tags, purge_tags): self.check_input_tags(tags) igw = self.get_matching_igw(vpc_id) @@ -232,21 +244,21 @@ def ensure_igw_present(self, vpc_id, tags): return self._results try: - response = self._connection.create_internet_gateway() + response = self._connection.create_internet_gateway(aws_retry=True) # Ensure the gateway exists before trying to attach it or add tags waiter = get_waiter(self._connection, 'internet_gateway_exists') waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']]) igw = camel_dict_to_snake_dict(response['InternetGateway']) - self._connection.attach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) + self._connection.attach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) self._results['changed'] = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') igw['vpc_id'] = vpc_id - igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, add_only=False) + igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, purge_tags=purge_tags) igw_info = self.get_igw_info(igw) self._results.update(igw_info) @@ -258,7 +270,8 @@ def main(): argument_spec = dict( vpc_id=dict(required=True), state=dict(default='present', choices=['present', 'absent']), - tags=dict(default=dict(), required=False, type='dict', aliases=['resource_tags']) + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), ) module = AnsibleAWSModule( diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 4719d495fd8..ab7d26a80b4 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -27,6 +27,12 @@ - Get details of specific Internet Gateway ID. Provide this value as a list. type: list elements: str + convert_tags: + description: + - Convert tags from boto3 format (list of dictionaries) to the standard dictionary format. + - This currently defaults to C(False). The default will be changed to C(True) after 2022-06-22. + type: bool + version_added: 1.3.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -94,31 +100,45 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -def get_internet_gateway_info(internet_gateway): +def get_internet_gateway_info(internet_gateway, convert_tags): + if convert_tags: + tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags']) + ignore_list = ["Tags"] + else: + tags = internet_gateway['Tags'] + ignore_list = [] internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'], 'Attachments': internet_gateway['Attachments'], - 'Tags': internet_gateway['Tags']} + 'Tags': tags} + + internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list) return internet_gateway_info -def list_internet_gateways(client, module): +def list_internet_gateways(connection, module): params = dict() params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + convert_tags = module.params.get('convert_tags') if module.params.get("internet_gateway_ids"): params['InternetGatewayIds'] = module.params.get("internet_gateway_ids") try: - all_internet_gateways = client.describe_internet_gateways(**params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) + all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params) + except is_boto3_error_code('InvalidInternetGatewayID.NotFound'): + module.fail_json('InternetGateway not found') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, 'Unable to describe internet gateways') - return [camel_dict_to_snake_dict(get_internet_gateway_info(igw)) + return [get_internet_gateway_info(igw, convert_tags) for igw in all_internet_gateways['InternetGateways']] @@ -126,15 +146,22 @@ def main(): argument_spec = dict( filters=dict(type='dict', default=dict()), internet_gateway_ids=dict(type='list', default=None, elements='str'), + convert_tags=dict(type='bool'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_igw_facts': module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", date='2021-12-01', collection_name='community.aws') + if module.params.get('convert_tags') is None: + module.deprecate('This module currently returns boto3 style tags by default. ' + 'This default has been deprecated and the module will return a simple dictionary in future. ' + 'This behaviour can be controlled through the convert_tags parameter.', + date='2021-12-01', collection_name='community.aws') + # Validate Requirements try: - connection = module.client('ec2') + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') From 25a47dc8974956b5e7e330d88bffeffb6895af80 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Thu, 10 Dec 2020 12:26:03 +0000 Subject: [PATCH 065/683] Add jittered_backoff to the iam_policy & iam_policy_info modules to handle AWS rate limiting (#324) * Add jittered_backoff to handle AWS rate limiting * Fix for failing test * Add changelog fragment --- iam_policy.py | 28 ++++++++++++++-------------- iam_policy_info.py | 15 ++++++++------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/iam_policy.py b/iam_policy.py index e4debd7f1ce..819ed369a31 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -120,7 +120,7 @@ pass from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry from ansible.module_utils.six import string_types @@ -236,16 +236,16 @@ def _iam_type(): return 'user' def _list(self, name): - return self.client.list_user_policies(UserName=name) + return self.client.list_user_policies(aws_retry=True, UserName=name) def _get(self, name, policy_name): - return self.client.get_user_policy(UserName=name, PolicyName=policy_name) + return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) def _delete(self, name, policy_name): - return self.client.delete_user_policy(UserName=name, PolicyName=policy_name) + return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) class RolePolicy(Policy): @@ -255,16 +255,16 @@ def _iam_type(): return 'role' def _list(self, name): - return self.client.list_role_policies(RoleName=name) + return self.client.list_role_policies(aws_retry=True, RoleName=name) def _get(self, name, policy_name): - return self.client.get_role_policy(RoleName=name, PolicyName=policy_name) + return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_role_policy(RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) def _delete(self, name, policy_name): - return self.client.delete_role_policy(RoleName=name, PolicyName=policy_name) + return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) class GroupPolicy(Policy): @@ -274,16 +274,16 @@ def _iam_type(): return 'group' def _list(self, name): - return self.client.list_group_policies(GroupName=name) + return self.client.list_group_policies(aws_retry=True, GroupName=name) def _get(self, name, policy_name): - return self.client.get_group_policy(GroupName=name, PolicyName=policy_name) + return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) def _delete(self, name, policy_name): - return self.client.delete_group_policy(GroupName=name, PolicyName=policy_name) + return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) def main(): @@ -314,7 +314,7 @@ def main(): date='2022-06-01', collection_name='community.aws') args = dict( - client=module.client('iam'), + client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), name=module.params.get('iam_name'), policy_name=module.params.get('policy_name'), policy_document=module.params.get('policy_document'), diff --git a/iam_policy_info.py b/iam_policy_info.py index f9ea30b8cc4..c919caec816 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -85,6 +85,7 @@ pass from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible.module_utils.six import string_types @@ -147,10 +148,10 @@ def _iam_type(): return 'user' def _list(self, name): - return self.client.list_user_policies(UserName=name) + return self.client.list_user_policies(aws_retry=True, UserName=name) def _get(self, name, policy_name): - return self.client.get_user_policy(UserName=name, PolicyName=policy_name) + return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) class RolePolicy(Policy): @@ -160,10 +161,10 @@ def _iam_type(): return 'role' def _list(self, name): - return self.client.list_role_policies(RoleName=name) + return self.client.list_role_policies(aws_retry=True, RoleName=name) def _get(self, name, policy_name): - return self.client.get_role_policy(RoleName=name, PolicyName=policy_name) + return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) class GroupPolicy(Policy): @@ -173,10 +174,10 @@ def _iam_type(): return 'group' def _list(self, name): - return self.client.list_group_policies(GroupName=name) + return self.client.list_group_policies(aws_retry=True, GroupName=name) def _get(self, name, policy_name): - return self.client.get_group_policy(GroupName=name, PolicyName=policy_name) + return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) def main(): @@ -189,7 +190,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) args = dict( - client=module.client('iam'), + client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), name=module.params.get('iam_name'), policy_name=module.params.get('policy_name'), ) From 4f23ad600e9e878ed9e5c5392a2fb785956fe81e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 15 Dec 2020 01:29:41 +0100 Subject: [PATCH 066/683] Launch Template cleanup retries and add tests into CI (#326) * Mark ec2_launch_template tests as supported * Update launch template test to use permitted harmless managed policy * Make sure we delete the instance profile * Cleanup imports * use ansible_dict_to_boto3_tag_list * Make sure retries are enabled * Update role names in line with CI policy * Lookup AMI name rather than hardcoding IDs * Add changelog fragment --- ec2_launch_template.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 4553a8e794d..4a35812cfb4 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -367,13 +367,14 @@ from uuid import uuid4 from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, - AWSRetry, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - ) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict try: from botocore.exceptions import ClientError, BotoCoreError, WaiterError @@ -399,9 +400,9 @@ def existing_templates(module): matches = None try: if module.params.get('template_id'): - matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')]) + matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')], aws_retry=True) elif module.params.get('template_name'): - matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')]) + matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')], aws_retry=True) except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: # no named template was found, return nothing/empty versions return None, [] @@ -418,20 +419,18 @@ def existing_templates(module): template = matches['LaunchTemplates'][0] template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] try: - return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions'] + return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)['LaunchTemplateVersions'] except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id)) def params_to_launch_data(module, template_params): if template_params.get('tags'): + tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags')) template_params['tag_specifications'] = [ { 'resource_type': r_type, - 'tags': [ - {'Key': k, 'Value': v} for k, v - in template_params['tags'].items() - ] + 'tags': tag_list } for r_type in ('instance', 'volume') ] @@ -456,6 +455,7 @@ def delete_template(module): v_resp = ec2.delete_launch_template_versions( LaunchTemplateId=template['LaunchTemplateId'], Versions=non_default_versions, + aws_retry=True, ) if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: module.warn('Failed to delete template versions {0} on launch template {1}'.format( @@ -468,6 +468,7 @@ def delete_template(module): try: resp = ec2.delete_launch_template( LaunchTemplateId=template['LaunchTemplateId'], + aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId'])) From 0f83c7888747f7d692f3256540930e322627d087 Mon Sep 17 00:00:00 2001 From: Phoosha Date: Tue, 15 Dec 2020 21:21:39 +0100 Subject: [PATCH 067/683] Fix eip association when both instance id and private ip address are passed (#328) * Fix ec2_eip with both instance_id and private_ip_address * Add changelog fragment for the ec2_eip fix --- ec2_eip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_eip.py b/ec2_eip.py index 6aa2a531069..54624f59182 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -241,7 +241,7 @@ def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, AllowReassociation=allow_reassociation, ) if private_ip_address: - params['PrivateIPAddress'] = private_ip_address + params['PrivateIpAddress'] = private_ip_address if address['Domain'] == 'vpc': params['AllocationId'] = address['AllocationId'] else: From 671e1b06f29400a0a86ec7954176997ebb908e88 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 16 Dec 2020 21:06:15 +0100 Subject: [PATCH 068/683] Use botocore_at_least rather than LooseVersion/StrictVersion directly (#280) * Use botocore_at_least rather than LooseVersion/StrictVersion directly * changelog --- dynamodb_ttl.py | 5 ++--- ec2_vpc_peer.py | 3 +-- ecs_task.py | 9 +++------ 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index b23c0ab076e..e04dedfafbb 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -65,7 +65,6 @@ - { "AttributeName": "deploy_timestamp", "Enabled": true } ''' -import distutils.version import traceback try: @@ -126,8 +125,8 @@ def main(): argument_spec=argument_spec, ) - if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'): - # TTL was added in this version. + if not module.botocore_at_least('1.5.24'): + # TTL was added in 1.5.24 module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24')) try: diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 31f6ea203a7..c7efeff3829 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -221,7 +221,6 @@ except ImportError: pass # Handled by AnsibleAWSModule -import distutils.version import traceback from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -279,7 +278,7 @@ def create_peer_connection(client, module): params['VpcId'] = module.params.get('vpc_id') params['PeerVpcId'] = module.params.get('peer_vpc_id') if module.params.get('peer_region'): - if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.8.6'): + if not module.botocore_at_least('1.8.6'): module.fail_json(msg="specifying peer_region parameter requires botocore >= 1.8.6") params['PeerRegion'] = module.params.get('peer_region') if module.params.get('peer_owner_id'): diff --git a/ecs_task.py b/ecs_task.py index f43cd700d27..e8eeb9c57ea 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -307,29 +307,26 @@ def stop_task(self, cluster, task): return response['task'] def ecs_api_handles_launch_type(self): - from distutils.version import LooseVersion # There doesn't seem to be a nice way to inspect botocore to look # for attributes (and networkConfiguration is not an explicit argument # to e.g. ecs.run_task, it's just passed as a keyword argument) - return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4') + return self.module.botocore_at_least('1.8.4') def ecs_task_long_format_enabled(self): account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True) return account_support['settings'][0]['value'] == 'enabled' def ecs_api_handles_tags(self): - from distutils.version import LooseVersion # There doesn't seem to be a nice way to inspect botocore to look # for attributes (and networkConfiguration is not an explicit argument # to e.g. ecs.run_task, it's just passed as a keyword argument) - return LooseVersion(botocore.__version__) >= LooseVersion('1.12.46') + return self.module.botocore_at_least('1.12.46') def ecs_api_handles_network_configuration(self): - from distutils.version import LooseVersion # There doesn't seem to be a nice way to inspect botocore to look # for attributes (and networkConfiguration is not an explicit argument # to e.g. ecs.run_task, it's just passed as a keyword argument) - return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44') + return self.module.botocore_at_least('1.7.44') def main(): From 9d3f94a4e4b8f5ba3884cba5682d44bedbb6420b Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jan 2021 19:31:36 +0100 Subject: [PATCH 069/683] ec2_metric_alarm - tweak example to be more generic (#339) * ec2_metric_alarm - Update docs to remove reference to slave We don't need to specifically talk about bamboo, and they now use 'agent' * Tweak import order in line with PEP-0008 --- ec2_metric_alarm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index 09e95d2fd6c..c7b4c28a8ad 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -180,7 +180,7 @@ period: 300 evaluation_periods: 3 unit: "Percent" - description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes " + description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" dimensions: {'InstanceId':'i-XXX'} alarm_actions: ["action1","action2"] @@ -203,13 +203,13 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - try: from botocore.exceptions import ClientError except ImportError: pass # protected by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + def create_metric_alarm(connection, module): From 572ac6c55c2691cb629203f1a53d4eb236503ed6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Tue, 12 Jan 2021 12:47:34 -0500 Subject: [PATCH 070/683] elb_application_lb: also clean up the associate listeners (#350) state=`absent`: Properly remove the associated listeners before the final removal of the object. See: https://github.com/ansible/ansible/issues/49291 --- elb_application_lb.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/elb_application_lb.py b/elb_application_lb.py index f18a00816b8..f154c8803ff 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -589,6 +589,11 @@ def create_or_update_elb(elb_obj): def delete_elb(elb_obj): if elb_obj.elb: + listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj.delete() + elb_obj.delete() elb_obj.module.exit_json(changed=elb_obj.changed) From 32977ccebf2b06966123eca2ab8a544e6f1b9307 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 13 Jan 2021 13:26:38 +0100 Subject: [PATCH 071/683] ec2_eip - Formally deprecate the 'instance_id' alias. (#349) * ec2_eip - Formally deprecate the 'instance_id' alias. We've been issuing a warning for a while, formally deprecate it so we can start cleaning up the code. --- ec2_eip.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ec2_eip.py b/ec2_eip.py index 54624f59182..adf6f0bda41 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -20,6 +20,7 @@ device_id: description: - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. + - The I(instance_id) alias has been deprecated and will be removed after 2022-12-01. required: false aliases: [ instance_id ] type: str @@ -519,7 +520,10 @@ def generate_tag_dict(module, tag_name, tag_value): def main(): argument_spec = dict( - device_id=dict(required=False, aliases=['instance_id']), + device_id=dict(required=False, aliases=['instance_id'], + deprecated_aliases=[dict(name='instance_id', + date='2022-12-01', + collection_name='community.aws')]), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), @@ -560,7 +564,6 @@ def main(): public_ipv4_pool = module.params.get('public_ipv4_pool') if instance_id: - warnings = ["instance_id is no longer used, please use device_id going forward"] is_instance = True device_id = instance_id else: @@ -629,8 +632,6 @@ def main(): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(str(e)) - if instance_id: - result['warnings'] = warnings module.exit_json(**result) From 2dc4babc5d1d6ad0a9f9ff3312273257e880c172 Mon Sep 17 00:00:00 2001 From: Sean Cavanaugh Date: Wed, 13 Jan 2021 13:48:15 -0500 Subject: [PATCH 072/683] add uptime parameter for ec2_instance_info module in minutes (#356) * syncing module and tests for uptime with tons of help from Yanis, we now have uptime in there * updating pr with fixes from suggestions adding to https://github.com/ansible-collections/community.aws/pull/356 with comments from @tremble and @duderamos * Create 356_add_minimum_uptime_parameter.yaml adding changelog fragment per @gravesm suggestion * Update 356_add_minimum_uptime_parameter.yaml last comment from @tremble Co-authored-by: Sean Cavanaugh --- ec2_instance_info.py | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 1c4c1f0df33..e37f2cf9cd1 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -33,6 +33,13 @@ required: false default: {} type: dict + minimum_uptime: + description: + - Minimum running uptime in minutes of instances. For example if I(uptime) is C(60) return all instances that have run more than 60 minutes. + required: false + aliases: ['uptime'] + type: int + extends_documentation_fragment: - amazon.aws.aws @@ -66,6 +73,15 @@ filters: instance-state-name: [ "shutting-down", "stopping", "stopped" ] +- name: Gather information about any instance with Name beginning with RHEL and an uptime of at least 60 minutes + community.aws.ec2_instance_info: + region: "{{ ec2_region }}" + uptime: 60 + filters: + "tag:Name": "RHEL-*" + instance-state-name: [ "running"] + register: ec2_node_info + ''' RETURN = r''' @@ -492,6 +508,8 @@ ''' import traceback +import datetime + try: import boto3 @@ -509,6 +527,7 @@ def list_ec2_instances(connection, module): instance_ids = module.params.get("instance_ids") + uptime = module.params.get('minimum_uptime') filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: @@ -517,10 +536,17 @@ def list_ec2_instances(connection, module): except ClientError as e: module.fail_json_aws(e, msg="Failed to list ec2 instances") - # Get instances from reservations instances = [] - for reservation in reservations['Reservations']: - instances = instances + reservation['Instances'] + + if uptime: + timedelta = int(uptime) if uptime else 0 + oldest_launch_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=timedelta) + # Get instances from reservations + for reservation in reservations['Reservations']: + instances += [instance for instance in reservation['Instances'] if instance['LaunchTime'].replace(tzinfo=None) < oldest_launch_time] + else: + for reservation in reservations['Reservations']: + instances = instances + reservation['Instances'] # Turn the boto3 result in to ansible_friendly_snaked_names snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] @@ -535,6 +561,7 @@ def list_ec2_instances(connection, module): def main(): argument_spec = dict( + minimum_uptime=dict(required=False, type='int', default=None, aliases=['uptime']), instance_ids=dict(default=[], type='list', elements='str'), filters=dict(default={}, type='dict') ) From e3d0411ec5c39c10f24131338ae5efce835296f4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 16 Jan 2021 10:50:49 +0100 Subject: [PATCH 073/683] Bulk import cleanup (#360) * Split imports and reorder * Import camel_dict_to_snake_dict and snake_dict_to_camel_dict direct from ansible.module_utils.common.dict_transformations * Remove unused imports * Route53 Info was migrated to Boto3 drop the HAS_BOTO check and import * changelog --- aws_config_aggregation_authorization.py | 1 - aws_config_delivery_channel.py | 8 +++++--- aws_config_recorder.py | 8 +++++--- aws_config_rule.py | 8 +++++--- cloudformation_stack_set.py | 19 +++++++++---------- cloudfront_origin_access_identity.py | 13 +++++-------- cloudwatchlogs_log_group_metric_filter.py | 9 +++------ dynamodb_ttl.py | 2 -- ec2_ami_copy.py | 13 +++++++------ ec2_customer_gateway.py | 5 ++--- ec2_elb.py | 1 - ec2_instance.py | 16 +++++++--------- ec2_instance_info.py | 6 ++---- ec2_lc_info.py | 4 ++-- ec2_snapshot_copy.py | 4 +--- ec2_vpc_endpoint_info.py | 6 +++--- ec2_vpc_vgw.py | 1 - ecs_attribute.py | 1 - ecs_cluster.py | 2 +- ecs_taskdefinition.py | 4 ++-- elasticache_parameter_group.py | 4 +--- elb_application_lb_info.py | 4 ++-- elb_instance.py | 1 - elb_target.py | 5 ++--- elb_target_group_info.py | 4 ++-- iam_mfa_device_info.py | 4 ++-- iam_server_certificate_info.py | 1 - iam_user_info.py | 9 +++++---- lambda_event.py | 5 ++--- route53_health_check.py | 5 +---- route53_info.py | 11 ++--------- s3_metrics_configuration.py | 8 ++++---- s3_website.py | 4 ++-- sns.py | 1 - sts_session_token.py | 1 - 35 files changed, 84 insertions(+), 114 deletions(-) diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index 5a4ee38bf81..e0f4af6f5b4 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -55,7 +55,6 @@ try: import botocore - from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by AnsibleAWSModule diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index 6e7fe5b2fa9..117afe80c0b 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -68,12 +68,14 @@ try: import botocore - from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry # this waits for an IAM role to become fully available, at the cost of diff --git a/aws_config_recorder.py b/aws_config_recorder.py index 2d3bf003d3b..e740241c082 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -82,12 +82,14 @@ try: import botocore - from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def resource_exists(client, module, params): diff --git a/aws_config_rule.py b/aws_config_rule.py index 80550586aa8..0beae0b63a8 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -110,12 +110,14 @@ try: import botocore - from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def rule_exists(client, module, params): diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 148cbe61047..ac7b57bfe9d 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -296,26 +296,25 @@ ''' # NOQA -import time import datetime -import uuid import itertools +import time +import uuid try: - import boto3 - import botocore.exceptions from botocore.exceptions import ClientError, BotoCoreError except ImportError: # handled by AnsibleAWSModule pass -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, - camel_dict_to_snake_dict, - ) -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def create_stack_set(module, stack_params, cfn): diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 00f188222c5..7370f98625c 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -120,21 +120,18 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule import datetime -from functools import partial -import json -import traceback try: - import botocore - from botocore.signers import CloudFrontSigner from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + class CloudFrontOriginAccessIdentityServiceManager(object): """ diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index a05c7fe2029..04d0219e48b 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -100,13 +100,10 @@ ] """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError -except ImportError: - pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule def metricTransformationHandler(metricTransformations, originMetricTransformations=None): diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index e04dedfafbb..95bf95ffe78 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -65,8 +65,6 @@ - { "AttributeName": "deploy_timestamp", "Enabled": true } ''' -import traceback - try: import botocore except ImportError: diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 38f1123168f..a3a23454b28 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -135,16 +135,17 @@ sample: ami-e689729e ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible.module_utils._text import to_native - try: - from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError + from botocore.exceptions import ClientError, WaiterError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list + def copy_image(module, ec2): """ diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 1e9fc1ded47..5343b316a4f 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -110,15 +110,14 @@ ''' try: - from botocore.exceptions import ClientError - import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class Ec2CustomerGatewayManager: diff --git a/ec2_elb.py b/ec2_elb.py index d9a6231f6b5..f2c124e6e00 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -84,7 +84,6 @@ import boto.ec2 import boto.ec2.autoscale import boto.ec2.elb - from boto.regioninfo import RegionInfo except ImportError: pass # Handled by HAS_BOTO diff --git a/ec2_instance.py b/ec2_instance.py index aba7ac26b10..a240a350d13 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -795,32 +795,30 @@ sample: vpc-0011223344 ''' +from collections import namedtuple import re -import uuid import string import textwrap import time -from collections import namedtuple +import uuid try: - import boto3 import botocore.exceptions except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib import parse as urlparse -from ansible.module_utils._text import to_bytes -from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags module = None diff --git a/ec2_instance_info.py b/ec2_instance_info.py index e37f2cf9cd1..be5f1e68892 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -507,21 +507,19 @@ sample: vpc-0011223344 ''' -import traceback import datetime - try: - import boto3 import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def list_ec2_instances(connection, module): diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 1d680c37bc9..1a51eb580b3 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -152,14 +152,14 @@ ''' try: - import boto3 import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def list_launch_configs(connection, module): diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 85f44d60000..695d0027d12 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -112,16 +112,14 @@ import traceback try: - import boto3 import botocore from botocore.exceptions import ClientError, WaiterError except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def copy_snapshot(module, ec2): diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index e72b487db3d..7e259c6ca8e 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -113,11 +113,11 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list def date_handler(obj): diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 2f8702ecace..d54e7264103 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -116,7 +116,6 @@ try: import botocore - import boto3 except ImportError: pass # Handled by AnsibleAWSModule diff --git a/ecs_attribute.py b/ecs_attribute.py index 552747ba10c..be9210f3272 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -111,7 +111,6 @@ ''' try: - import boto3 import botocore from botocore.exceptions import ClientError, EndpointConnectionError except ImportError: diff --git a/ecs_cluster.py b/ecs_cluster.py index ed0dc1c78ff..87e0476be9b 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -103,10 +103,10 @@ type: str sample: ACTIVE ''' + import time try: - import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 6158fb4ec21..ed2825d0942 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -213,10 +213,10 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils._text import to_text +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + class EcsTaskManager: """Handles ECS Tasks""" diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 1e9c574178d..00992a91e51 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -108,17 +108,15 @@ import traceback try: - import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types -# import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def create(module, conn, name, group_family, description): diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 06e1f3ae229..14937befba8 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -165,17 +165,17 @@ import traceback try: - import boto3 import botocore from botocore.exceptions import ClientError, NoCredentialsError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def get_elb_listeners(connection, module, elb_arn): diff --git a/elb_instance.py b/elb_instance.py index fe10d6cd8dc..97682acb659 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -90,7 +90,6 @@ import boto.ec2 import boto.ec2.autoscale import boto.ec2.elb - from boto.regioninfo import RegionInfo except ImportError: pass # Handled by HAS_BOTO diff --git a/elb_target.py b/elb_target.py index b8cda233814..31761953b17 100644 --- a/elb_target.py +++ b/elb_target.py @@ -111,20 +111,19 @@ ''' -import traceback from time import time, sleep +import traceback try: - import boto3 import botocore from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/elb_target_group_info.py b/elb_target_group_info.py index a9694428872..00cc425e0de 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -210,17 +210,17 @@ import traceback try: - import boto3 import botocore from botocore.exceptions import ClientError, NoCredentialsError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def get_target_group_attributes(connection, module, target_group_arn): diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index c79afab095f..e86687134a9 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -61,14 +61,14 @@ ''' try: - import boto3 import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def list_mfa_devices(connection, module): diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 6e37185680f..994344147e4 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -80,7 +80,6 @@ try: - import boto3 import botocore import botocore.exceptions except ImportError: diff --git a/iam_user_info.py b/iam_user_info.py index 8e1856b1763..f6aaa842eef 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -101,15 +101,16 @@ sample: "test_user" ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry - try: - import botocore from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + @AWSRetry.exponential_backoff() def list_iam_users_with_backoff(client, operation, **kwargs): diff --git a/lambda_event.py b/lambda_event.py index e0009d13582..3906771255f 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -115,17 +115,16 @@ ''' import re -import sys try: - import boto3 from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info diff --git a/route53_health_check.py b/route53_health_check.py index a1f9c9a268c..03ac8b09af0 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -118,18 +118,15 @@ import uuid try: - import boto import boto.ec2 - from boto import route53 from boto.route53 import Route53Connection, exception from boto.route53.healthcheck import HealthCheck except ImportError: pass # Handled by HAS_BOTO -# import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info # Things that can't get changed: diff --git a/route53_info.py b/route53_info.py index 38d0bc540f5..abdf7e44709 100644 --- a/route53_info.py +++ b/route53_info.py @@ -204,18 +204,15 @@ start_record_name: "host1.workshop.test.io" register: RECORDS ''' + try: - import boto import botocore - import boto3 except ImportError: - pass # Handled by HAS_BOTO and HAS_BOTO3 + pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 def get_hosted_zone(client, module): @@ -461,10 +458,6 @@ def main(): if module._name == 'route53_facts': module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", date='2021-12-01', collection_name='community.aws') - # Validate Requirements - if not (HAS_BOTO or HAS_BOTO3): - module.fail_json(msg='json and boto/boto3 is required.') - try: route53 = module.client('route53') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index 729503cf88b..2480d1d7560 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -95,14 +95,14 @@ ''' try: - import boto3 - import botocore from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): diff --git a/s3_website.py b/s3_website.py index 6f7aa898391..8b93edb5bb7 100644 --- a/s3_website.py +++ b/s3_website.py @@ -162,15 +162,15 @@ import time try: - import boto3 import botocore from botocore.exceptions import ClientError, ParamValidationError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def _create_redirect_dict(url): diff --git a/sns.py b/sns.py index 49b73aa68f4..2840ddd2ac9 100644 --- a/sns.py +++ b/sns.py @@ -131,7 +131,6 @@ """ import json -import traceback try: from botocore.exceptions import BotoCoreError, ClientError diff --git a/sts_session_token.py b/sts_session_token.py index 4183b976d15..7c8221a9c68 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -80,7 +80,6 @@ ''' try: - import boto3 import botocore from botocore.exceptions import ClientError except ImportError: From 1e23b1bf2865745b29415861dd649467d2499997 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 27 Jan 2021 09:17:44 +0100 Subject: [PATCH 074/683] Bulk migration to fail_json_aws (#361) * Split imports and sort * Move camel_dict_to_snake_dict imports to ansible.module_utils.common.dict_transformations * Cleanup unused imports * Bulk migration to fail_json_aws * Changelog --- aws_api_gateway.py | 5 +- aws_codepipeline.py | 36 ++---- aws_direct_connect_confirm_connection.py | 9 +- aws_direct_connect_connection.py | 17 +-- aws_direct_connect_gateway.py | 20 ++-- aws_direct_connect_link_aggregation_group.py | 3 +- aws_direct_connect_virtual_interface.py | 10 +- aws_kms_info.py | 37 ++---- aws_s3_bucket_info.py | 9 +- cloudfront_info.py | 103 ++++++----------- cloudwatchlogs_log_group.py | 45 ++------ cloudwatchlogs_log_group_info.py | 13 +-- data_pipeline.py | 16 ++- dms_endpoint.py | 33 ++---- dms_replication_subnet_group.py | 24 ++-- ec2_asg.py | 90 +++++---------- ec2_lc.py | 20 ++-- ec2_scaling_policy.py | 2 +- ec2_snapshot_copy.py | 11 +- ec2_vpc_endpoint.py | 18 +-- ec2_vpc_nacl_info.py | 1 - ec2_vpc_peer.py | 10 +- ec2_vpc_vgw.py | 58 +++++----- ec2_vpc_vgw_info.py | 8 +- ecs_ecr.py | 7 +- elasticache.py | 7 +- elasticache_parameter_group.py | 18 ++- elasticache_snapshot.py | 14 +-- elb_application_lb_info.py | 16 +-- elb_target.py | 35 ++---- elb_target_group_info.py | 14 +-- execute_lambda.py | 23 ++-- iam_managed_policy.py | 114 +++++++------------ iam_policy_info.py | 3 - iam_user.py | 52 ++++----- lambda.py | 15 +-- lambda_alias.py | 1 - rds_instance.py | 7 +- rds_param_group.py | 61 +++------- redshift.py | 2 +- s3_lifecycle.py | 9 -- s3_sync.py | 10 +- s3_website.py | 17 ++- 43 files changed, 360 insertions(+), 663 deletions(-) diff --git a/aws_api_gateway.py b/aws_api_gateway.py index f7466b1d1e2..ccf7c097b57 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -172,16 +172,17 @@ ''' import json +import traceback try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -import traceback +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def main(): diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 8b44dc7614e..101ccaee4df 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -194,18 +194,18 @@ ''' import copy -import traceback - -from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies - try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages} @@ -214,36 +214,24 @@ def create_pipeline(client, name, role_arn, artifact_store, stages, version, mod try: resp = client.create_pipeline(pipeline=pipeline_dict) return resp - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict['name'])) def update_pipeline(client, pipeline_dict, module): try: resp = client.update_pipeline(pipeline=pipeline_dict) return resp - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict['name'])) def delete_pipeline(client, name, module): try: resp = client.delete_pipeline(name=name) return resp - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable delete pipeline {0}".format(name)) def describe_pipeline(client, name, version, module): diff --git a/aws_direct_connect_confirm_connection.py b/aws_direct_connect_confirm_connection.py index 948aa63c81c..642c9c306ca 100644 --- a/aws_direct_connect_confirm_connection.py +++ b/aws_direct_connect_confirm_connection.py @@ -61,15 +61,18 @@ ''' import traceback -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index a84e5f98523..e2ea2d5e232 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -156,20 +156,21 @@ """ import traceback -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry) -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import ( - DirectConnectError, - delete_connection, - associate_connection_and_lag, - disassociate_connection_and_lag, -) try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index b34d6c52a15..e1e6ae093f5 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -97,17 +97,15 @@ ''' import time -import traceback try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def dx_gateway_info(client, gateway_id, module): @@ -115,7 +113,7 @@ def dx_gateway_info(client, gateway_id, module): resp = client.describe_direct_connect_gateways( directConnectGatewayId=gateway_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to fetch gateway information.") if resp['directConnectGateways']: return resp['directConnectGateways'][0] @@ -142,7 +140,7 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): status_achieved = True break except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed while waiting for gateway association.") result = response return status_achieved, result @@ -156,7 +154,7 @@ def associate_direct_connect_gateway(client, module, gateway_id): directConnectGatewayId=gateway_id, virtualGatewayId=params['virtual_gateway_id']) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, 'Failed to associate gateway') status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating') if not status_achieved: @@ -172,7 +170,7 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): directConnectGatewayId=gateway_id, virtualGatewayId=virtual_gateway_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to delete gateway association.") status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating') if not status_achieved: @@ -191,7 +189,7 @@ def create_dx_gateway(client, module): directConnectGatewayName=params['name'], amazonSideAsn=int(params['amazon_asn'])) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to create direct connect gateway.") result = response return result @@ -206,7 +204,7 @@ def find_dx_gateway(client, module, gateway_id=None): try: resp = client.describe_direct_connect_gateways(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to describe gateways") gateways.extend(resp['directConnectGateways']) if 'nextToken' in resp: params['nextToken'] = resp['nextToken'] @@ -233,7 +231,7 @@ def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None): virtualGatewayId=virtual_gateway_id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to check gateway association") return resp @@ -330,7 +328,7 @@ def ensure_absent(client, module): directConnectGatewayId=dx_gateway_id ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to delete gateway") result = resp['directConnectGateway'] return changed diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 41c50134dab..65294317b01 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -172,9 +172,10 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index ba8391a00a0..6c7720fbc54 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -248,9 +248,6 @@ ''' import traceback -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError, delete_virtual_interface -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError @@ -258,6 +255,13 @@ # handled by AnsibleAWSModule pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError +from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + def try_except_ClientError(failure_msg): ''' diff --git a/aws_kms_info.py b/aws_kms_info.py index 235b7bc5b1e..978ed804ec2 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -215,17 +215,16 @@ ''' -import traceback - try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict # Caching lookup for aliases @@ -309,9 +308,7 @@ def get_kms_tags(connection, module, key_id): tags.extend(tag_response['Tags']) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] != 'AccessDeniedException': - module.fail_json(msg="Failed to obtain key tags", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to obtain key tags") else: tag_response = {} if tag_response.get('NextMarker'): @@ -328,9 +325,7 @@ def get_kms_policies(connection, module, key_id): policy in policies] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] != 'AccessDeniedException': - module.fail_json(msg="Failed to obtain key policies", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to obtain key policies") else: return [] @@ -360,18 +355,14 @@ def get_key_details(connection, module, key_id, tokens=None): tokens = [] try: result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to obtain key metadata", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain key metadata") result['KeyArn'] = result.pop('Arn') try: aliases = get_kms_aliases_lookup(connection) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to obtain aliases", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain aliases") result['aliases'] = aliases.get(result['KeyId'], []) if result['Origin'] == 'AWS_KMS': @@ -384,10 +375,8 @@ def get_key_details(connection, module, key_id, tokens=None): try: result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to obtain key grants", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain key grants") tags = get_kms_tags(connection, module, key_id) result = camel_dict_to_snake_dict(result) @@ -399,10 +388,8 @@ def get_key_details(connection, module, key_id, tokens=None): def get_kms_info(connection, module): try: keys = get_kms_keys_with_backoff(connection)['Keys'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to obtain keys", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain keys") return [get_key_details(connection, module, key['KeyId']) for key in keys] diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index cd8b81f36c9..40de3650c9c 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -49,17 +49,14 @@ type: list ''' -import traceback - try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def get_bucket_list(module, connection): @@ -71,8 +68,8 @@ def get_bucket_list(module, connection): """ try: buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list buckets") return buckets diff --git a/cloudfront_info.py b/cloudfront_info.py index 293cd2f0aa6..2b0edcaf841 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -272,7 +272,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class CloudFrontServiceManager: @@ -290,64 +289,50 @@ def get_distribution(self, distribution_id): try: func = partial(self.client.get_distribution, Id=distribution_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing distribution - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing distribution") def get_distribution_config(self, distribution_id): try: func = partial(self.client.get_distribution_config, Id=distribution_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing distribution configuration - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing distribution configuration") def get_origin_access_identity(self, origin_access_identity_id): try: func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing origin access identity - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing origin access identity") def get_origin_access_identity_config(self, origin_access_identity_id): try: func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") def get_invalidation(self, distribution_id, invalidation_id): try: func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing invalidation - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing invalidation") def get_streaming_distribution(self, distribution_id): try: func = partial(self.client.get_streaming_distribution, Id=distribution_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing streaming distribution - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing streaming distribution") def get_streaming_distribution_config(self, distribution_id): try: func = partial(self.client.get_streaming_distribution_config, Id=distribution_id) return self.paginated_response(func) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error describing streaming distribution - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error describing streaming distribution") def list_origin_access_identities(self): try: @@ -356,10 +341,8 @@ def list_origin_access_identities(self): if origin_access_identity_list['Quantity'] > 0: return origin_access_identity_list['Items'] return {} - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities") def list_distributions(self, keyed=True): try: @@ -372,10 +355,8 @@ def list_distributions(self, keyed=True): if not keyed: return distribution_list return self.keyed_list_helper(distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error listing distributions - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error listing distributions") def list_distributions_by_web_acl_id(self, web_acl_id): try: @@ -386,10 +367,8 @@ def list_distributions_by_web_acl_id(self, web_acl_id): else: distribution_list = distribution_list['Items'] return self.keyed_list_helper(distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") def list_invalidations(self, distribution_id): try: @@ -398,10 +377,8 @@ def list_invalidations(self, distribution_id): if invalidation_list['Quantity'] > 0: return invalidation_list['Items'] return {} - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error listing invalidations - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error listing invalidations") def list_streaming_distributions(self, keyed=True): try: @@ -414,10 +391,8 @@ def list_streaming_distributions(self, keyed=True): if not keyed: return streaming_distribution_list return self.keyed_list_helper(streaming_distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error listing streaming distributions - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error listing streaming distributions") def summary(self): summary_dict = {} @@ -436,10 +411,8 @@ def summary_get_origin_access_identity_list(self): oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} origin_access_identity_list['origin_access_identities'].append(oai_summary) return origin_access_identity_list - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error generating summary of origin access identities") def summary_get_distribution_list(self, streaming=False): try: @@ -462,10 +435,8 @@ def summary_get_distribution_list(self, streaming=False): temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) distribution_list[list_name].append(temp_distribution) return distribution_list - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error generating summary of distributions - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error generating summary of distributions") except Exception as e: self.module.fail_json(msg="Error generating summary of distributions - " + str(e), exception=traceback.format_exc()) @@ -485,10 +456,8 @@ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): for invalidation in invalidations: invalidation_ids.append(invalidation['Id']) return invalidation_ids - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting list of invalidation ids") def get_distribution_id_from_domain_name(self, domain_name): try: @@ -502,10 +471,8 @@ def get_distribution_id_from_domain_name(self, domain_name): distribution_id = dist['Id'] break return distribution_id - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting distribution id from domain name") def get_aliases_from_distribution_id(self, distribution_id): aliases = [] @@ -517,10 +484,8 @@ def get_aliases_from_distribution_id(self, distribution_id): aliases.append(alias) break return aliases - except botocore.exceptions.ClientError as e: - self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") def paginated_response(self, func, result_key=""): ''' diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index e8890988509..a5e9ab3192d 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -128,17 +128,14 @@ type: str ''' -import traceback - try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): @@ -150,12 +147,8 @@ def create_log_group(client, log_group_name, kms_key_id, tags, retention, module try: client.create_log_group(**request) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create log group") if retention: input_retention_policy(client=client, @@ -183,23 +176,15 @@ def input_retention_policy(client, log_group_name, retention, module): else: delete_log_group(client=client, log_group_name=log_group_name, module=module) module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]") - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name)) def delete_retention_policy(client, log_group_name, module): try: client.delete_retention_policy(logGroupName=log_group_name) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name)) def delete_log_group(client, log_group_name, module): @@ -213,24 +198,16 @@ def delete_log_group(client, log_group_name, module): if log_group_name == i['logGroupName']: client.delete_log_group(logGroupName=log_group_name) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) def describe_log_group(client, log_group_name, module): try: desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) return desc_log_group - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) def main(): diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index 153aac7baf0..a7f311826e9 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -71,17 +71,14 @@ type: str ''' -import traceback - try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def describe_log_group(client, log_group_name, module): @@ -92,12 +89,8 @@ def describe_log_group(client, log_group_name, module): paginator = client.get_paginator('describe_log_groups') desc_log_group = paginator.paginate(**params).build_full_result() return desc_log_group - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) def main(): diff --git a/data_pipeline.py b/data_pipeline.py index 2e49dcc6aaa..54a4cd6f39a 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -200,19 +200,17 @@ import hashlib import json import time -import traceback try: - import boto3 import botocore from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] @@ -546,10 +544,10 @@ def define_pipeline(client, module, objects, dp_id): parameterValues=values) msg = 'Data Pipeline {0} has been updated.'.format(dp_name) changed = True - except ClientError as e: - module.fail_json(msg="Failed to put the definition for pipeline {0}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects".format(dp_name), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to put the definition for pipeline {0}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects".format(dp_name)) else: changed = False msg = "" @@ -585,11 +583,11 @@ def create_pipeline(client, module): tags=tags) dp_id = dp['pipelineId'] pipeline_exists_timeout(client, dp_id, timeout) - except ClientError as e: - module.fail_json(msg="Failed to create the data pipeline {0}.".format(dp_name), exception=traceback.format_exc()) except TimeOutException: module.fail_json(msg=('Data Pipeline {0} failed to create' 'within timeout {1} seconds').format(dp_name, timeout)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create the data pipeline {0}.".format(dp_name)) # Put pipeline definition changed, msg = define_pipeline(client, module, objects, dp_id) diff --git a/dms_endpoint.py b/dms_endpoint.py index 829aae2773d..d457a7c4208 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -167,14 +167,14 @@ RETURN = ''' # ''' -import traceback -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + backoff_params = dict(tries=5, delay=1, backoff=1.5) @@ -249,13 +249,8 @@ def delete_dms_endpoint(connection): return delete_output else: return connection.delete_endpoint(**delete_arn) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to delete the DMS endpoint.", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to delete the DMS endpoint.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete the DMS endpoint.") def create_module_params(): @@ -359,13 +354,8 @@ def modify_dms_endpoint(connection): try: params = create_module_params() return dms_modify_endpoint(connection, **params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to update DMS endpoint.", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to update DMS endpoint.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update DMS endpoint.") def create_dms_endpoint(connection): @@ -378,13 +368,8 @@ def create_dms_endpoint(connection): try: params = create_module_params() return dms_create_endpoint(connection, **params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to create DMS endpoint.", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to create DMS endpoint.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create DMS endpoint.") def main(): diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 5aa633b44f3..305b6b5a85d 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -58,14 +58,14 @@ RETURN = ''' # ''' -import traceback -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + backoff_params = dict(tries=5, delay=1, backoff=1.5) @@ -156,26 +156,16 @@ def create_replication_subnet_group(module, connection): try: params = create_module_params(module) return replication_subnet_group_create(connection, **params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to create DMS replication subnet group.", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to create DMS replication subnet group.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create DMS replication subnet group.") def modify_replication_subnet_group(module, connection): try: modify_params = create_module_params(module) return replication_subnet_group_modify(connection, **modify_params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to Modify the DMS replication subnet group.", - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to Modify the DMS replication subnet group.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to Modify the DMS replication subnet group.") def main(): diff --git a/ec2_asg.py b/ec2_asg.py index 568b0fca2ca..ee07b68f516 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -526,20 +526,17 @@ ''' import time -import traceback - -from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - camel_dict_to_snake_dict -) try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', @@ -780,8 +777,7 @@ def get_launch_object(connection, ec2_connection): try: launch_configs = describe_launch_configurations(connection, launch_config_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to describe launch configurations", - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to describe launch configurations") if len(launch_configs['LaunchConfigurations']) == 0: module.fail_json(msg="No launch config found with name %s" % launch_config_name) launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} @@ -859,11 +855,9 @@ def elb_healthy(asg_connection, elb_connection, group_name): if e.response['Error']['Code'] == 'InvalidInstance': return None - module.fail_json(msg="Failed to get load balancer.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to get load balancer.") except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to get load balancer.", - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to get load balancer.") for i in lb_instances.get('InstanceStates'): if i['State'] == "InService": @@ -893,11 +887,9 @@ def tg_healthy(asg_connection, elbv2_connection, group_name): if e.response['Error']['Code'] == 'InvalidInstance': return None - module.fail_json(msg="Failed to get target group.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Failed to get target group.") except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to get target group.", - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to get target group.") for i in tg_instances.get('TargetHealthDescriptions'): if i['TargetHealth']['State'] == "healthy": @@ -1006,8 +998,7 @@ def create_autoscaling_group(connection): try: as_groups = describe_autoscaling_groups(connection, group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to describe auto scaling groups.", - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to describe auto scaling groups.") ec2_connection = module.client('ec2') @@ -1064,8 +1055,7 @@ def create_autoscaling_group(connection): else: ag['LaunchTemplate'] = launch_object['LaunchTemplate'] else: - module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate", - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate") try: create_asg(connection, **ag) @@ -1090,12 +1080,8 @@ def create_autoscaling_group(connection): asg_properties = get_properties(as_group) changed = True return changed, asg_properties - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to create Autoscaling Group.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to create Autoscaling Group.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create Autoscaling Group.") else: as_group = as_groups[0] initial_asg_properties = get_properties(as_group) @@ -1135,12 +1121,8 @@ def create_autoscaling_group(connection): changed = True try: attach_load_balancers(connection, group_name, load_balancers) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to update Autoscaling Group.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to update Autoscaling Group.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") # Update load balancers if they are specified and one or more already exists elif as_group['LoadBalancerNames']: @@ -1160,8 +1142,7 @@ def create_autoscaling_group(connection): try: detach_load_balancers(connection, group_name, list(elbs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)), - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach)) if wanted_elbs - has_elbs: # if has contains less than wanted, then we need to add some elbs_to_attach = wanted_elbs.difference(has_elbs) @@ -1170,8 +1151,7 @@ def create_autoscaling_group(connection): try: attach_load_balancers(connection, group_name, list(elbs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)), - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach)) # Handle target group attachments/detachments # Attach target groups if they are specified but none currently exist @@ -1179,12 +1159,8 @@ def create_autoscaling_group(connection): changed = True try: attach_lb_target_groups(connection, group_name, target_group_arns) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to update Autoscaling Group.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to update Autoscaling Group.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") # Update target groups if they are specified and one or more already exists elif target_group_arns is not None and as_group['TargetGroupARNs']: # Get differences @@ -1199,8 +1175,7 @@ def create_autoscaling_group(connection): try: detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)), - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) if wanted_tgs.issuperset(has_tgs): # if has contains less than wanted, then we need to add some tgs_to_attach = wanted_tgs.difference(has_tgs) @@ -1209,8 +1184,7 @@ def create_autoscaling_group(connection): try: attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)), - exception=traceback.format_exc()) + module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values @@ -1263,17 +1237,13 @@ def create_autoscaling_group(connection): connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e), - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to update autoscaling group") + if notification_topic: try: put_notification_config(connection, group_name, notification_topic, notification_types) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to update Autoscaling Group notifications.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to update Autoscaling Group notifications.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update Autoscaling Group notifications.") if wait_for_instances: wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') # Wait for ELB health if ELB(s)defined @@ -1291,12 +1261,8 @@ def create_autoscaling_group(connection): asg_properties = get_properties(as_group) if asg_properties != initial_asg_properties: changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to read existing Autoscaling Groups.", - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.BotoCoreError as e: - module.fail_json(msg="Failed to read existing Autoscaling Groups.", - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to read existing Autoscaling Groups.") return changed, asg_properties diff --git a/ec2_lc.py b/ec2_lc.py index 7555cf68a0c..1ba881dc245 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -456,11 +456,11 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict def create_block_device_meta(module, volume): @@ -555,8 +555,8 @@ def create_launch_config(connection, module): try: launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to describe launch configuration by name", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe launch configuration by name") changed = False result = {} @@ -597,8 +597,8 @@ def create_launch_config(connection, module): changed = True if launch_configs: launch_config = launch_configs[0] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create launch configuration") result = (dict((k, v) for k, v in launch_config.items() if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings'])) @@ -643,8 +643,8 @@ def delete_launch_config(connection, module): module.exit_json(changed=True) else: module.exit_json(changed=False) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Failed to delete launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete launch configuration") def main(): @@ -680,8 +680,8 @@ def main(): try: connection = module.client('autoscaling') - except botocore.exceptions.ClientError as e: - module.fail_json(msg="unable to establish connection - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="unable to establish connection") state = module.params.get('state') diff --git a/ec2_scaling_policy.py b/ec2_scaling_policy.py index 656519b43cb..7aeabd1d7da 100644 --- a/ec2_scaling_policy.py +++ b/ec2_scaling_policy.py @@ -317,7 +317,7 @@ def create_scaling_policy(connection, module): AutoScalingGroupName=asg_name, PolicyNames=[policy_name])['ScalingPolicies'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 695d0027d12..2bf1d723b7e 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -109,16 +109,11 @@ sample: "snap-e9095e8c" ''' -import traceback - try: import botocore - from botocore.exceptions import ClientError, WaiterError except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -159,10 +154,8 @@ def copy_snapshot(module, ec2): Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()] ) - except WaiterError as we: - module.fail_json(msg='An error occurred waiting for the snapshot to become available. (%s)' % str(we), exception=traceback.format_exc()) - except ClientError as ce: - module.fail_json(msg=str(ce), exception=traceback.format_exc(), **camel_dict_to_snake_dict(ce.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.') module.exit_json(changed=True, snapshot_id=snapshot_id) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 771ea52ba75..4daaaeaa23e 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -186,11 +186,10 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def date_handler(obj): @@ -210,9 +209,8 @@ def wait_for_status(client, module, resource_id, status): break else: time.sleep(polling_increment_secs) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failure while waiting for status') return status_achieved, resource @@ -296,9 +294,8 @@ def create_vpc_endpoint(client, module): module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to create VPC.") return changed, result @@ -318,11 +315,8 @@ def setup_removal(client, module): except is_boto3_error_code('DryRunOperation'): changed = True result = 'Would have deleted VPC Endpoint if not in check mode' - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "Failed to delete VPC endpoint") - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) return changed, result diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index aabe489c112..1e42e486cea 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -110,7 +110,6 @@ pass # caught by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index c7efeff3829..cea160d34ff 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -221,8 +221,6 @@ except ImportError: pass # Handled by AnsibleAWSModule -import traceback - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code @@ -333,10 +331,10 @@ def peer_status(client, module): try: vpc_peering_connection = client.describe_vpc_peering_connections(**params) return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code'] - except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: # pylint: disable=duplicate-except - module.fail_json(msg='Malformed connection ID: {0}'.format(e), traceback=traceback.format_exc()) - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json(msg='Error while describing peering connection by peering_id: {0}'.format(e), traceback=traceback.format_exc()) + except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: + module.fail_json_aws(e, msg='Malformed connection ID') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Error while describing peering connection by peering_id') def accept_reject(state, client, module): diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index d54e7264103..ce68833bcfc 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -112,15 +112,12 @@ ''' import time -import traceback try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -162,8 +159,8 @@ def wait_for_status(client, module, vpn_gateway_id, status): break else: time.sleep(polling_increment_secs) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failure while waiting for status update') result = response return status_achieved, result @@ -181,8 +178,8 @@ def attach_vgw(client, module, vpn_gateway_id): catch_extra_error_codes=['InvalidParameterValue'] )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to attach VPC') status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached') if not status_achieved: @@ -199,13 +196,13 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): if vpc_id: try: response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to detach gateway') else: try: response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to detach gateway') status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') if not status_achieved: @@ -229,12 +226,11 @@ def create_vgw(client, module): VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] ) except botocore.exceptions.WaiterError as e: - module.fail_json(msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']), - exception=traceback.format_exc()) - except is_boto3_error_code('VpnGatewayLimitExceeded'): - module.fail_json(msg="Too many VPN gateways exist in this account.", exception=traceback.format_exc()) - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId'])) + except is_boto3_error_code('VpnGatewayLimitExceeded') as e: + module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to create gateway') result = response return result @@ -244,8 +240,8 @@ def delete_vgw(client, module, vpn_gateway_id): try: response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to delete gateway') # return the deleted VpnGatewayId as this is not included in the above response result = vpn_gateway_id @@ -257,8 +253,8 @@ def create_tags(client, module, vpn_gateway_id): try: response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module)) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to add tags") result = response return result @@ -270,13 +266,13 @@ def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None): if tags_to_delete: try: response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to delete tags') else: try: response = client.delete_tags(Resources=[vpn_gateway_id]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to delete all tags') result = response return result @@ -301,8 +297,8 @@ def find_tags(client, module, resource_id=None): response = client.describe_tags(Filters=[ {'Name': 'resource-id', 'Values': [resource_id]} ]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe tags searching by resource') result = response return result @@ -348,8 +344,8 @@ def find_vpc(client, module): if params['vpc_id']: try: response = client.describe_vpcs(VpcIds=[params['vpc_id']]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe VPC') result = response return result @@ -368,8 +364,8 @@ def find_vgw(client, module, vpn_gateway_id=None): params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']}) try: response = client.describe_vpn_gateways(**params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe gateway using filters') return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId']) diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 692c291a87b..5a27f9d672f 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -89,15 +89,15 @@ type: bool sample: "false" ''' -import traceback try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list @@ -121,8 +121,8 @@ def list_virtual_gateways(client, module): try: all_virtual_gateways = client.describe_vpn_gateways(**params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list gateways") return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw)) for vgw in all_virtual_gateways['VpnGateways']] diff --git a/ecs_ecr.py b/ecs_ecr.py index 4ae7d40cd2a..5b7ddd261f4 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -192,10 +192,13 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import sort_json_policy_dict + def build_kwargs(registry_id): """ diff --git a/elasticache.py b/elasticache.py index d6a649ba17b..5fb45a8883b 100644 --- a/elasticache.py +++ b/elasticache.py @@ -127,17 +127,14 @@ """ from time import sleep -from traceback import format_exc try: - import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class ElastiCacheManager(object): @@ -225,7 +222,7 @@ def create(self): try: self.conn.create_cache_cluster(**kwargs) - except botocore.exceptions.ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to create cache cluster") self._refresh_data() @@ -252,7 +249,7 @@ def delete(self): try: response = self.conn.delete_cache_cluster(CacheClusterId=self.name) - except botocore.exceptions.ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to delete cache cluster") cache_cluster_data = response['CacheCluster'] diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 00992a91e51..dd5dffbc4e9 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -105,8 +105,6 @@ changed: true """ -import traceback - try: import botocore except ImportError: @@ -124,8 +122,8 @@ def create(module, conn, name, group_family, description): try: response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description) changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to create cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create cache parameter group.") return response, changed @@ -135,8 +133,8 @@ def delete(module, conn, name): conn.delete_cache_parameter_group(CacheParameterGroupName=name) response = {} changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to delete cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete cache parameter group.") return response, changed @@ -230,8 +228,8 @@ def modify(module, conn, name, values): format_parameters.append({'ParameterName': key, 'ParameterValue': value}) try: response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to modify cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to modify cache parameter group.") return response @@ -254,8 +252,8 @@ def reset(module, conn, name, values): try: response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to reset cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to reset cache parameter group.") # determine changed new_parameters_dict = make_current_modifiable_param_dict(module, conn, name) diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index dc92df6b3c2..d07125023bd 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -111,16 +111,14 @@ changed: true """ -import traceback - try: - import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def create(module, connection, replication_id, cluster_id, name): @@ -135,7 +133,7 @@ def create(module, connection, replication_id, cluster_id, name): response = {} changed = False else: - module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Unable to create the snapshot.") return response, changed @@ -146,8 +144,8 @@ def copy(module, connection, name, target, bucket): TargetSnapshotName=target, TargetBucket=bucket) changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to copy the snapshot.") return response, changed @@ -164,7 +162,7 @@ def delete(module, connection, name): module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." "You may need to wait a few minutes.") else: - module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Unable to delete the snapshot.") return response, changed diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 14937befba8..e3003789911 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -162,15 +162,11 @@ sample: vpc-0011223344 ''' -import traceback - try: import botocore - from botocore.exceptions import ClientError, NoCredentialsError except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -182,7 +178,7 @@ def get_elb_listeners(connection, module, elb_arn): try: return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners'] - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe elb listeners") @@ -190,7 +186,7 @@ def get_listener_rules(connection, module, listener_arn): try: return connection.describe_rules(ListenerArn=listener_arn)['Rules'] - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe listener rules") @@ -198,7 +194,7 @@ def get_load_balancer_attributes(connection, module, load_balancer_arn): try: load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley @@ -213,7 +209,7 @@ def get_load_balancer_tags(connection, module, load_balancer_arn): try: return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer tags") @@ -232,10 +228,8 @@ def list_load_balancers(connection, module): load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result() except is_boto3_error_code('LoadBalancerNotFound'): module.exit_json(load_balancers=[]) - except ClientError as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list load balancers") - except NoCredentialsError as e: - module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc()) for load_balancer in load_balancers['LoadBalancers']: # Get the attributes for each elb diff --git a/elb_target.py b/elb_target.py index 31761953b17..4e3601a70a2 100644 --- a/elb_target.py +++ b/elb_target.py @@ -112,15 +112,12 @@ ''' from time import time, sleep -import traceback try: import botocore - from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -136,12 +133,8 @@ def convert_tg_name_to_arn(connection, module, tg_name): try: response = describe_target_groups_with_backoff(connection, tg_name) - except ClientError as e: - module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except BotoCoreError as e: - module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name)) tg_arn = response['TargetGroups'][0]['TargetGroupArn'] @@ -175,12 +168,8 @@ def describe_targets(connection, module, tg_arn, target=None): if not targets: return {} return targets[0] - except ClientError as e: - module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except BotoCoreError as e: - module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe target health for target {0}".format(target)) @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -224,12 +213,8 @@ def register_target(connection, module): changed = True if target_status: target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) - except ClientError as e: - module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except BotoCoreError as e: - module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target)) # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) @@ -283,12 +268,8 @@ def deregister_target(connection, module): try: deregister_target_with_backoff(connection, target_group_arn, target) changed = True - except ClientError as e: - module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except BotoCoreError as e: - module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Unable to deregister target {0}".format(target)) else: if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining': module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " + diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 00cc425e0de..973743766b1 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -207,15 +207,11 @@ sample: vpc-0123456 ''' -import traceback - try: import botocore - from botocore.exceptions import ClientError, NoCredentialsError except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -227,7 +223,7 @@ def get_target_group_attributes(connection, module, target_group_arn): try: target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley @@ -239,7 +235,7 @@ def get_target_group_tags(connection, module, target_group_arn): try: return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe group tags") @@ -247,7 +243,7 @@ def get_target_group_targets_health(connection, module, target_group_arn): try: return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get target health") @@ -270,10 +266,8 @@ def list_target_groups(connection, module): target_groups = target_group_paginator.paginate(Names=names).build_full_result() except is_boto3_error_code('TargetGroupNotFound'): module.exit_json(target_groups=[]) - except ClientError as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list target groups") - except NoCredentialsError as e: - module.fail_json(msg="AWS authentication problem. " + to_native(e), exception=traceback.format_exc()) # Get the attributes and tags for each target group for target_group in target_groups['TargetGroups']: diff --git a/execute_lambda.py b/execute_lambda.py index ca97f6619c9..199a50fd0a7 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -129,15 +129,12 @@ import base64 import json -import traceback try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -211,18 +208,14 @@ def main(): response = client.invoke(**invoke_params) except botocore.exceptions.ClientError as ce: if ce.response['Error']['Code'] == 'ResourceNotFoundException': - module.fail_json(msg="Could not find Lambda to execute. Make sure " - "the ARN is correct and your profile has " - "permissions to execute this function.", - exception=traceback.format_exc()) - module.fail_json(msg="Client-side error when invoking Lambda, check inputs and specific error", - exception=traceback.format_exc()) + module.fail_json_aws(ce, msg="Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function.") + module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") except botocore.exceptions.ParamValidationError as ve: - module.fail_json(msg="Parameters to `invoke` failed to validate", - exception=traceback.format_exc()) + module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate") except Exception as e: - module.fail_json(msg="Unexpected failure while invoking Lambda function", - exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function") results = { 'logs': '', @@ -235,13 +228,13 @@ def main(): # logs are base64 encoded in the API response results['logs'] = base64.b64decode(response.get('LogResult', '')) except Exception as e: - module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed while decoding logs") if invoke_params['InvocationType'] == 'RequestResponse': try: results['output'] = json.loads(response['Payload'].read().decode('utf8')) except Exception as e: - module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc()) + module.fail_json_aws(e, msg="Failed while decoding function return value") if isinstance(results.get('output'), dict) and any( [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): diff --git a/iam_managed_policy.py b/iam_managed_policy.py index a0b7c3c48af..aa668498ad1 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -130,7 +130,6 @@ ''' import json -import traceback try: import botocore @@ -138,10 +137,10 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies @@ -154,10 +153,8 @@ def list_policies_with_backoff(iam): def get_policy_by_name(module, iam, name): try: response = list_policies_with_backoff(iam) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't list policies: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policies") for policy in response['Policies']: if policy['PolicyName'] == name: return policy @@ -168,36 +165,28 @@ def delete_oldest_non_default_version(module, iam, policy): try: versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] if not v['IsDefaultVersion']] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't list policy versions: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policy versions") versions.sort(key=lambda v: v['CreateDate'], reverse=True) for v in versions[-1:]: try: iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't delete policy version: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete policy version") # This needs to return policy_version, changed def get_or_create_policy_version(module, iam, policy, policy_document): try: versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't list policy versions: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: document = iam.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't get policy version %s: %s" % (v['VersionId'], str(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId'])) # If the current policy matches the existing one if not compare_policies(document, json.loads(to_native(policy_document))): return v, False @@ -217,23 +206,19 @@ def get_or_create_policy_version(module, iam, policy, policy_document): try: version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] return version, True - except botocore.exceptions.ClientError as second_e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: e = second_e # Handle both when the exception isn't LimitExceeded or # the second attempt still failed - module.fail_json(msg="Couldn't create policy version: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Couldn't create policy version") def set_if_default(module, iam, policy, policy_version, is_default): if is_default and not policy_version['IsDefaultVersion']: try: iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't set default policy version: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't set default policy version") return True return False @@ -243,17 +228,13 @@ def set_if_only(module, iam, policy, policy_version, is_only): try: versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[ 'Versions'] if not v['IsDefaultVersion']] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't list policy versions: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't delete policy version: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete policy version") return len(versions) > 0 return False @@ -261,32 +242,24 @@ def set_if_only(module, iam, policy, policy_version, is_only): def detach_all_entities(module, iam, policy, **kwargs): try: entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't detach list entities for policy %s: %s" % (policy['PolicyName'], str(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName'])) for g in entities['PolicyGroups']: try: iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't detach group policy %s: %s" % (g['GroupName'], str(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName'])) for u in entities['PolicyUsers']: try: iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't detach user policy %s: %s" % (u['UserName'], str(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName'])) for r in entities['PolicyRoles']: try: iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't detach role policy %s: %s" % (r['RoleName'], str(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName'])) if entities['IsTruncated']: detach_all_entities(module, iam, policy, marker=entities['Marker']) @@ -330,10 +303,8 @@ def main(): try: rvalue = iam.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) - except Exception as e: - module.fail_json(msg="Couldn't create policy %s: %s" % (name, to_native(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) else: @@ -344,10 +315,8 @@ def main(): if changed: try: p = iam.get_policy(PolicyArn=p['Arn'])['Policy'] - except Exception as e: - module.fail_json(msg="Couldn't get policy: %s" % to_native(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Couldn't get policy") module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p)) else: @@ -358,26 +327,21 @@ def main(): # Delete Versions try: versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't list policy versions: %s" % to_native(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: if not v['IsDefaultVersion']: try: iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId']) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't delete policy version %s: %s" % - (v['VersionId'], to_native(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) # Delete policy try: iam.delete_policy(PolicyArn=p['Arn']) - except Exception as e: - module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], to_native(e)), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(p['PolicyName'])) + # This is the one case where we will return the old policy module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p)) else: diff --git a/iam_policy_info.py b/iam_policy_info.py index c919caec816..e934e09a621 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -77,8 +77,6 @@ type: list ''' -import json - try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: @@ -86,7 +84,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible.module_utils.six import string_types class PolicyError(Exception): diff --git a/iam_user.py b/iam_user.py index 6b8efcda811..7bd8ebda423 100644 --- a/iam_user.py +++ b/iam_user.py @@ -106,17 +106,16 @@ sample: / ''' -from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -import traceback - try: - from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + def compare_attached_policies(current_attached_policies, new_attached_policies): @@ -176,11 +175,8 @@ def create_or_update_user(connection, module): try: connection.create_user(**params) changed = True - except ClientError as e: - module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except ParamValidationError as e: - module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create user") # Manage managed policies current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) @@ -197,14 +193,9 @@ def create_or_update_user(connection, module): if not module.check_mode: try: connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except ClientError as e: - module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format( - policy_arn, params['UserName'], to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except ParamValidationError as e: - module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format( - policy_arn, params['UserName'], to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to detach policy {0} from user {1}".format( + policy_arn, params['UserName'])) # If there are policies to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above @@ -215,14 +206,9 @@ def create_or_update_user(connection, module): for policy_arn in managed_policies: try: connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except ClientError as e: - module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format( - policy_arn, params['UserName'], to_native(e)), - exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) - except ParamValidationError as e: - module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format( - policy_arn, params['UserName'], to_native(e)), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format( + policy_arn, params['UserName'])) if module.check_mode: module.exit_json(changed=changed) @@ -249,7 +235,7 @@ def destroy_user(connection, module): try: for policy in get_attached_policy_list(connection, module, user_name): connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) try: @@ -298,7 +284,7 @@ def destroy_user(connection, module): connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"]) connection.delete_user(UserName=user_name) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) module.exit_json(changed=True) @@ -311,7 +297,7 @@ def get_user(connection, module, name): try: return connection.get_user(**params) - except ClientError as e: + except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': return None else: @@ -323,7 +309,7 @@ def get_attached_policy_list(connection, module, name): try: return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] - except ClientError as e: + except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': return None else: @@ -334,7 +320,7 @@ def delete_user_login_profile(connection, module, user_name): try: return connection.delete_login_profile(UserName=user_name) - except ClientError as e: + except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] == "NoSuchEntity": return None else: diff --git a/lambda.py b/lambda.py index 9cb2e0286cc..e559e181abe 100644 --- a/lambda.py +++ b/lambda.py @@ -211,13 +211,6 @@ } ''' -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - import base64 import hashlib import traceback @@ -228,6 +221,14 @@ except ImportError: pass # protected by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + def get_account_info(module): """return the account information (account id and partition) we are currently working on diff --git a/lambda_alias.py b/lambda_alias.py index bd547a41341..8cd8a891289 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -144,7 +144,6 @@ import re try: - import boto3 from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError except ImportError: pass # Handled by AnsibleAWSModule diff --git a/rds_instance.py b/rds_instance.py index 3aa9c7f67dc..169ace0e2fa 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -755,7 +755,6 @@ get_rds_method_attribute, get_tags, ) -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry from ansible.module_utils.six import string_types @@ -763,7 +762,7 @@ from time import sleep try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + import botocore except ImportError: pass # caught by AnsibleAWSModule @@ -807,7 +806,7 @@ def get_instance(client, module, db_instance_id): sleep(3) else: instance = {} - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Failed to describe DB instances') return instance @@ -820,7 +819,7 @@ def get_final_snapshot(client, module, snapshot_identifier): return {} except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True return {} - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') diff --git a/rds_param_group.py b/rds_param_group.py index 536698473e1..ff18fc98300 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -113,8 +113,6 @@ returned: when state is present ''' -import traceback - try: import botocore except ImportError: @@ -123,12 +121,12 @@ from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags INT_MODIFIERS = { 'K': 1024, @@ -197,10 +195,8 @@ def update_parameters(module, connection): non_empty_slice = [item for item in modify_slice if item] try: connection.modify_db_parameter_group(DBParameterGroupName=groupname, Parameters=non_empty_slice) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't update parameters: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update parameters") return True, errors return False, errors @@ -215,24 +211,15 @@ def update_tags(module, connection, group, tags): connection.add_tags_to_resource(ResourceName=group['DBParameterGroupArn'], Tags=ansible_dict_to_boto3_tag_list(to_update)) changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't add tags to parameter group: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - except botocore.exceptions.ParamValidationError as e: - # Usually a tag value has been passed as an int or bool, needs to be a string - # The AWS exception message is reasonably ok for this purpose - module.fail_json(msg="Couldn't add tags to parameter group: %s." % str(e), - exception=traceback.format_exc()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't add tags to parameter group") if to_delete: try: connection.remove_tags_from_resource(ResourceName=group['DBParameterGroupArn'], TagKeys=to_delete) changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't remove tags from parameter group: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove tags from parameter group") return changed @@ -247,9 +234,7 @@ def ensure_present(module, connection): if e.response['Error']['Code'] == 'DBParameterGroupNotFound': response = None else: - module.fail_json(msg="Couldn't access parameter group information: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Couldn't access parameter group information") if not response: params = dict(DBParameterGroupName=groupname, DBParameterGroupFamily=module.params['engine'], @@ -259,10 +244,8 @@ def ensure_present(module, connection): try: response = connection.create_db_parameter_group(**params) changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't create parameter group: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create parameter group") else: group = response['DBParameterGroups'][0] if tags: @@ -275,16 +258,12 @@ def ensure_present(module, connection): try: response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname) group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't obtain parameter group information: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain parameter group information") try: tags = connection.list_tags_for_resource(ResourceName=group['db_parameter_group_arn'])['TagList'] - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't obtain parameter group tags: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain parameter group tags") group['tags'] = boto3_tag_list_to_ansible_dict(tags) module.exit_json(changed=changed, errors=errors, **group) @@ -298,16 +277,12 @@ def ensure_absent(module, connection): if e.response['Error']['Code'] == 'DBParameterGroupNotFound': module.exit_json(changed=False) else: - module.fail_json(msg="Couldn't access parameter group information: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json_aws(e, msg="Couldn't access parameter group information") try: response = connection.delete_db_parameter_group(DBParameterGroupName=group) module.exit_json(changed=True) - except botocore.exceptions.ClientError as e: - module.fail_json(msg="Couldn't delete parameter group: %s" % str(e), - exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete parameter group") def main(): diff --git a/redshift.py b/redshift.py index 7c992685494..c409545e62b 100644 --- a/redshift.py +++ b/redshift.py @@ -599,7 +599,7 @@ def modify_cluster(module, redshift): try: resource = _describe_cluster(redshift, identifier) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if _ensure_tags(redshift, identifier, resource['Tags'], module): resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 5edceea50bf..967be374219 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -195,12 +195,6 @@ from copy import deepcopy import datetime -try: - import dateutil.parser - HAS_DATEUTIL = True -except ImportError: - HAS_DATEUTIL = False - try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: @@ -469,9 +463,6 @@ def main(): ['noncurrent_version_transition_days', 'noncurrent_version_transitions'], ],) - if not HAS_DATEUTIL: - module.fail_json(msg='dateutil required for this module') - client = module.client('s3') expiration_date = module.params.get("expiration_date") diff --git a/s3_sync.py b/s3_sync.py index 78326587941..1222d98cfd6 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -227,7 +227,6 @@ import mimetypes import os import stat as osstat # os.stat constants -import traceback try: from dateutil import tz @@ -247,8 +246,6 @@ # import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception # the following function, calculate_multipart_etag, is from tlastowka @@ -406,8 +403,6 @@ def head_s3(s3, bucket, s3keys): pass else: raise Exception(err) - # error_msg = boto_exception(err) - # return {'error': error_msg} retkeys.append(retentry) return retkeys @@ -546,9 +541,8 @@ def main(): if result.get('uploads') or result.get('removed'): result['changed'] = True # result.update(filelist=actionable_filelist) - except botocore.exceptions.ClientError as err: - error_msg = boto_exception(err) - module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to push file") module.exit_json(**result) diff --git a/s3_website.py b/s3_website.py index 8b93edb5bb7..24a7cdf7afa 100644 --- a/s3_website.py +++ b/s3_website.py @@ -163,7 +163,6 @@ try: import botocore - from botocore.exceptions import ClientError, ParamValidationError except ImportError: pass # Handled by AnsibleAWSModule @@ -220,21 +219,21 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m try: bucket_website = resource_connection.BucketWebsite(bucket_name) - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get bucket") try: website_config = client_connection.get_bucket_website(Bucket=bucket_name) except is_boto3_error_code('NoSuchWebsiteConfiguration'): website_config = None - except ClientError as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get website configuration") if website_config is None: try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True - except (ClientError, ParamValidationError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to set bucket website configuration") except ValueError as e: module.fail_json(msg=str(e)) @@ -247,14 +246,14 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True - except (ClientError, ParamValidationError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") except KeyError as e: try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True - except (ClientError, ParamValidationError) as e: - module.fail_json(e, msg="Failed to update bucket website configuration") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update bucket website configuration") except ValueError as e: module.fail_json(msg=str(e)) @@ -274,13 +273,13 @@ def disable_bucket_as_website(client_connection, module): client_connection.get_bucket_website(Bucket=bucket_name) except is_boto3_error_code('NoSuchWebsiteConfiguration'): module.exit_json(changed=changed) - except ClientError as e: # pylint: disable=duplicate-except + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket website") try: client_connection.delete_bucket_website(Bucket=bucket_name) changed = True - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete bucket website") module.exit_json(changed=changed) From cd1c742262bbf07a37d8871db88e0d0149f05ab9 Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Mon, 1 Feb 2021 09:21:05 +0100 Subject: [PATCH 075/683] Tags feature: support tags specification for NAT gateway (#345) (#372) * implement tags feature for NAT gateway * add integration test tasks for tags feature * refactor integration tests (overall) removing hard-coded parameters * add missing integration test tasks without CHECK_MODE * include until loop for some tasks as they failed during the integration * added code to support tags in ec2_vap_nat_gateway - return error 'NoneType' object has no attribute 'get' because of curr_tags seems to remain None * removed tests in check_mode because not working due to DRY_RUN_GATEWAY * Addressed reviewers comments Signed-off-by: Alina Buzachis Co-authored-by: Alina Buzachis --- ec2_vpc_nat_gateway.py | 151 +++++++++++++++++++++++++++++++++++------ 1 file changed, 131 insertions(+), 20 deletions(-) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 9072a8e32b6..c4e3cbfd797 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -48,6 +48,19 @@ required: false default: false type: bool + tags: + description: + - A dict of tags to apply to the internet gateway. + - To remove all tags set I(tags={}) and I(purge_tags=true). + aliases: [ 'resource_tags' ] + type: dict + version_added: 1.4.0 + purge_tags: + description: + - Remove tags not listed in I(tags). + type: bool + default: true + version_added: 1.4.0 release_eip: description: - Deallocate the EIP from the VPC. @@ -153,6 +166,17 @@ wait: yes wait_timeout: 300 region: ap-southeast-2 + +- name: Create new nat gateway using an allocation-id and tags. + community.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + region: ap-southeast-2 + tags: + Tag1: tag1 + Tag2: tag2 + register: new_nat_gateway ''' RETURN = ''' @@ -176,6 +200,13 @@ returned: In all cases. type: str sample: "available" +tags: + description: The tags associated the VPC NAT Gateway. + type: dict + returned: When tags are present. + sample: + tags: + "Ansible": "Test" vpc_id: description: id of the VPC. returned: In all cases. @@ -204,11 +235,17 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native DRY_RUN_GATEWAYS = [ { @@ -451,6 +488,7 @@ def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, allocation_id_exists = False gateways = [] states = ['available', 'pending'] + gws_retrieved, err_msg, gws = ( get_nat_gateways( client, subnet_id, states=states, check_mode=check_mode @@ -609,7 +647,7 @@ def release_address(client, allocation_id, check_mode=False): return ip_released, err_msg -def create(client, subnet_id, allocation_id, client_token=None, +def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_token=None, wait=False, wait_timeout=0, if_exist_do_not_create=False, check_mode=False): """Create an Amazon NAT Gateway. @@ -680,7 +718,6 @@ def create(client, subnet_id, allocation_id, client_token=None, result['create_time'] = datetime.datetime.utcnow() result['nat_gateway_addresses'][0]['allocation_id'] = allocation_id result['subnet_id'] = subnet_id - success = True changed = True create_time = result['create_time'].replace(tzinfo=None) @@ -689,15 +726,18 @@ def create(client, subnet_id, allocation_id, client_token=None, elif wait: success, err_msg, result = ( wait_for_status( - client, wait_timeout, result['nat_gateway_id'], 'available', - check_mode=check_mode + client, wait_timeout, result['nat_gateway_id'], + 'available', check_mode=check_mode ) ) if success: err_msg = ( 'NAT gateway {0} created'.format(result['nat_gateway_id']) ) - + result['tags'], tags_update_exists = ensure_tags( + client, module, nat_gw_id=result['nat_gateway_id'], tags=tags, + purge_tags=purge_tags, check_mode=check_mode + ) except is_boto3_error_code('IdempotentParameterMismatch'): err_msg = ( 'NAT Gateway does not support update and token has already been provided: ' + err_msg @@ -714,7 +754,7 @@ def create(client, subnet_id, allocation_id, client_token=None, return success, changed, err_msg, result -def pre_create(client, subnet_id, allocation_id=None, eip_address=None, +def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None, if_exist_do_not_create=False, wait=False, wait_timeout=0, client_token=None, check_mode=False): """Create an Amazon NAT Gateway. @@ -772,14 +812,18 @@ def pre_create(client, subnet_id, allocation_id=None, eip_address=None, results = list() if not allocation_id and not eip_address: - existing_gateways, allocation_id_exists = ( - gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode) - ) - + existing_gateways, allocation_id_exists = (gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)) if len(existing_gateways) > 0 and if_exist_do_not_create: + results = existing_gateways[0] + results['tags'], tags_update_exists = ensure_tags(client, module, results['nat_gateway_id'], tags, purge_tags, check_mode) + + if tags_update_exists: + success = True + changed = True + return success, changed, err_msg, results + success = True changed = False - results = existing_gateways[0] err_msg = ( 'NAT Gateway {0} already exists in subnet_id {1}' .format( @@ -805,16 +849,22 @@ def pre_create(client, subnet_id, allocation_id=None, eip_address=None, success = False changed = False return success, changed, err_msg, dict() - existing_gateways, allocation_id_exists = ( gateway_in_subnet_exists( client, subnet_id, allocation_id, check_mode=check_mode ) ) + if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): + results = existing_gateways[0] + results['tags'], tags_update_exists = ensure_tags(client, module, results['nat_gateway_id'], tags, purge_tags, check_mode) + if tags_update_exists: + success = True + changed = True + return success, changed, err_msg, results + success = True changed = False - results = existing_gateways[0] err_msg = ( 'NAT Gateway {0} already exists in subnet_id {1}' .format( @@ -824,7 +874,7 @@ def pre_create(client, subnet_id, allocation_id=None, eip_address=None, return success, changed, err_msg, results success, changed, err_msg, results = create( - client, subnet_id, allocation_id, client_token, + client, module, subnet_id, allocation_id, tags, purge_tags, client_token, wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode ) @@ -919,8 +969,7 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, if release_eip: eip_released, eip_err = ( - release_address(client, allocation_id, check_mode) - ) + release_address(client, allocation_id, check_mode)) if not eip_released: err_msg = ( "{0}: Failed to release EIP {1}: {2}" @@ -931,6 +980,64 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, return success, changed, err_msg, results +def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): + final_tags = [] + changed = False + + filters = ansible_dict_to_boto3_filter_list({'resource-id': nat_gw_id, 'resource-type': 'natgateway'}) + cur_tags = None + try: + cur_tags = client.describe_tags(aws_retry=True, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, 'Couldnt describe tags') + if tags is None: + return boto3_tag_list_to_ansible_dict(cur_tags['Tags']), changed + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) + + if to_update: + try: + if check_mode: + # update tags + final_tags.update(to_update) + else: + client.create_tags( + aws_retry=True, + Resources=[nat_gw_id], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't create tags") + + if to_delete: + try: + if check_mode: + # update tags + for key in to_delete: + del final_tags[key] + else: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + client.delete_tags(aws_retry=True, Resources=[nat_gw_id], Tags=tags_list) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't delete tags") + + if not check_mode and (to_update or to_delete): + try: + response = client.describe_tags(aws_retry=True, Filters=filters) + final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't describe tags") + return final_tags, changed + + def main(): argument_spec = dict( subnet_id=dict(type='str'), @@ -943,6 +1050,8 @@ def main(): release_eip=dict(type='bool', default=False), nat_gateway_id=dict(type='str'), client_token=dict(type='str'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -965,9 +1074,11 @@ def main(): release_eip = module.params.get('release_eip') client_token = module.params.get('client_token') if_exist_do_not_create = module.params.get('if_exist_do_not_create') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') try: - client = module.client('ec2') + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') @@ -977,7 +1088,7 @@ def main(): if state == 'present': success, changed, err_msg, results = ( pre_create( - client, subnet_id, allocation_id, eip_address, + client, module, subnet_id, tags, purge_tags, allocation_id, eip_address, if_exist_do_not_create, wait, wait_timeout, client_token, check_mode=check_mode ) From 3883c7a81b2bcaab93ecace2d7915ca2249bb7dd Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 3 Feb 2021 18:11:15 +0100 Subject: [PATCH 076/683] ec2_instance - Apply retry decorators more consistently. (#373) * ec2_instance: build results inside find_instances and add backoff * Add retry decorator to ec2 clients --- ec2_instance.py | 89 ++++++++++++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/ec2_instance.py b/ec2_instance.py index a240a350d13..a13b00c680b 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -884,7 +884,6 @@ def tower_callback_script(tower_conf, windows=False, passwd=None): raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.") -@AWSRetry.jittered_backoff() def manage_tags(match, new_tags, purge_tags, ec2): changed = False old_tags = boto3_tag_list_to_ansible_dict(match['Tags']) @@ -896,12 +895,14 @@ def manage_tags(match, new_tags, purge_tags, ec2): return bool(tags_to_delete or tags_to_set) if tags_to_set: ec2.create_tags( + aws_retry=True, Resources=[match['InstanceId']], Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) changed |= True if tags_to_delete: delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete) ec2.delete_tags( + aws_retry=True, Resources=[match['InstanceId']], Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values)) changed |= True @@ -929,14 +930,17 @@ def add_or_update_instance_profile(instance, desired_profile_name): if instance_profile_setting.get('Arn') == desired_arn: return False # update association - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) try: - association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) + association = ec2.describe_iam_instance_profile_associations( + aws_retry=True, + Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) except botocore.exceptions.ClientError as e: # check for InvalidAssociationID.NotFound module.fail_json_aws(e, "Could not find instance profile association") try: resp = ec2.replace_iam_instance_profile_association( + aws_retry=True, AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'], IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)} ) @@ -946,9 +950,10 @@ def add_or_update_instance_profile(instance, desired_profile_name): if not instance_profile_setting and desired_profile_name: # create association - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) try: resp = ec2.associate_iam_instance_profile( + aws_retry=True, IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}, InstanceId=instance['InstanceId'] ) @@ -989,7 +994,7 @@ def build_network_spec(params, ec2=None): }, """ if ec2 is None: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) interfaces = [] network = params.get('network') or {} @@ -1109,11 +1114,11 @@ def warn_if_cpu_options_changed(instance): def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None): if ec2 is None: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if subnet_id is not None: try: - sub = ec2.describe_subnets(SubnetIds=[subnet_id]) + sub = ec2.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidGroup.NotFound': module.fail_json( @@ -1168,14 +1173,17 @@ def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, found_groups = [] for f_set in (id_filters, name_filters): if len(f_set) > 1: - found_groups.extend(ec2.get_paginator( - 'describe_security_groups' - ).paginate( - Filters=f_set - ).search('SecurityGroups[]')) + found_groups.extend(describe_security_groups(ec2, Filters=f_set)) return list(dict((g['GroupId'], g) for g in found_groups).values()) +@AWSRetry.jittered_backoff() +def describe_security_groups(ec2, **params): + paginator = ec2.get_paginator('describe_security_groups') + results = paginator.paginate(**params) + return list(results.search('SecurityGroups[]')) + + def build_top_level_options(params): spec = {} if params.get('image_id'): @@ -1257,7 +1265,7 @@ def build_instance_tags(params, propagate_tags_to_volumes=True): def build_run_instance_spec(params, ec2=None): if ec2 is None: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) spec = dict( ClientToken=uuid.uuid4().hex, @@ -1296,7 +1304,7 @@ def await_instances(ids, state='OK'): } if state not in state_opts: module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state)) - waiter = module.client('ec2').get_waiter(state_opts[state]) + waiter = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()).get_waiter(state_opts[state]) try: waiter.wait( InstanceIds=ids, @@ -1316,7 +1324,7 @@ def await_instances(ids, state='OK'): def diff_instance_and_params(instance, params, ec2=None, skip=None): """boto3 instance obj, module params""" if ec2 is None: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if skip is None: skip = [] @@ -1342,7 +1350,7 @@ def value_wrapper(v): if mapping.instance_key in skip: continue - value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_) + value = ec2.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): arguments = dict( InstanceId=instance['InstanceId'], @@ -1352,7 +1360,7 @@ def value_wrapper(v): changes_to_apply.append(arguments) if params.get('security_group') or params.get('security_groups'): - value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute="groupSet", InstanceId=id_) + value = ec2.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) # managing security groups if params.get('vpc_subnet_id'): subnet_id = params.get('vpc_subnet_id') @@ -1404,6 +1412,7 @@ def change_network_attachments(instance, params, ec2): to_attach = set(new_ids) - set(old_ids) for eni_id in to_attach: ec2.attach_network_interface( + aws_retry=True, DeviceIndex=new_ids.index(eni_id), InstanceId=instance['InstanceId'], NetworkInterfaceId=eni_id, @@ -1412,35 +1421,35 @@ def change_network_attachments(instance, params, ec2): return False +@AWSRetry.jittered_backoff() def find_instances(ec2, ids=None, filters=None): paginator = ec2.get_paginator('describe_instances') if ids: - return list(paginator.paginate( - InstanceIds=ids, - ).search('Reservations[].Instances[]')) + params = dict(InstanceIds=ids) elif filters is None: module.fail_json(msg="No filters provided when they were required") - elif filters is not None: + else: for key in list(filters.keys()): if not key.startswith("tag:"): filters[key.replace("_", "-")] = filters.pop(key) - return list(paginator.paginate( - Filters=ansible_dict_to_boto3_filter_list(filters) - ).search('Reservations[].Instances[]')) - return [] + params = dict(Filters=ansible_dict_to_boto3_filter_list(filters)) + + results = paginator.paginate(**params).search('Reservations[].Instances[]') + return list(results) -@AWSRetry.jittered_backoff() def get_default_vpc(ec2): - vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + vpcs = ec2.describe_vpcs( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) if len(vpcs.get('Vpcs', [])): return vpcs.get('Vpcs')[0] return None -@AWSRetry.jittered_backoff() def get_default_subnet(ec2, vpc, availability_zone=None): subnets = ec2.describe_subnets( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list({ 'vpc-id': vpc['VpcId'], 'state': 'available', @@ -1462,7 +1471,7 @@ def get_default_subnet(ec2, vpc, availability_zone=None): def ensure_instance_state(state, ec2=None): if ec2 is None: - module.client('ec2') + module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if state in ('running', 'started'): changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') @@ -1537,11 +1546,10 @@ def ensure_instance_state(state, ec2=None): ) -@AWSRetry.jittered_backoff() def change_instance_state(filters, desired_state, ec2=None): """Takes STOPPED/RUNNING/TERMINATED""" if ec2 is None: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) changed = set() instances = find_instances(ec2, filters=filters) @@ -1558,7 +1566,7 @@ def change_instance_state(filters, desired_state, ec2=None): # TODO use a client-token to prevent double-sends of these start/stop/terminate commands # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html - resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']]) + resp = ec2.terminate_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] if desired_state == 'STOPPED': if inst['State']['Name'] in ('stopping', 'stopped'): @@ -1569,14 +1577,14 @@ def change_instance_state(filters, desired_state, ec2=None): changed.add(inst['InstanceId']) continue - resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']]) + resp = ec2.stop_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] if desired_state == 'RUNNING': if module.check_mode: changed.add(inst['InstanceId']) continue - resp = ec2.start_instances(InstanceIds=[inst['InstanceId']]) + resp = ec2.start_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) [changed.add(i['InstanceId']) for i in resp['StartingInstances']] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: try: @@ -1625,7 +1633,7 @@ def handle_existing(existing_matches, changed, ec2, state): ) changes = diff_instance_and_params(existing_matches[0], module.params) for c in changes: - AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c) + ec2.modify_instance_attribute(aws_retry=True, **c) changed |= bool(changes) changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role')) changed |= change_network_attachments(existing_matches[0], module.params, ec2) @@ -1664,7 +1672,7 @@ def ensure_present(existing_matches, changed, ec2, state): changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) for c in changes: try: - AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c) + ec2.modify_instance_attribute(aws_retry=True, **c) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) @@ -1675,9 +1683,7 @@ def ensure_present(existing_matches, changed, ec2, state): spec=instance_spec, ) await_instances(instance_ids) - instances = ec2.get_paginator('describe_instances').paginate( - InstanceIds=instance_ids - ).search('Reservations[].Instances[]') + instances = find_instances(ec2, ids=instance_ids) module.exit_json( changed=True, @@ -1689,10 +1695,9 @@ def ensure_present(existing_matches, changed, ec2, state): module.fail_json_aws(e, msg="Failed to create new EC2 instance") -@AWSRetry.jittered_backoff() def run_instances(ec2, **instance_spec): try: - return ec2.run_instances(**instance_spec) + return ec2.run_instances(aws_retry=True, **instance_spec) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']: # If the instance profile has just been created, it takes some time to be visible by ec2 @@ -1762,7 +1767,7 @@ def main(): module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") state = module.params.get('state') - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if module.params.get('filters') is None: filters = { # all states except shutting-down and terminated From 838937024d4327c0ad5893199f3fe1ff5cb07a16 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 5 Feb 2021 09:43:09 +0100 Subject: [PATCH 077/683] Cleanup - use is_boto3_error_(message|code) (#268) * Reorder imports * Make use of is_boto3_error_message * Mass-migration over to is_boto3_error_code * Remove unused imports * unused vars in exception * Improve consistency around catching BotoCoreError and ClientError * Remove unused imports * Remove unused 'PolicyError' from iam_policy_info * Avoid catching botocore.exceptions.ClientError when we only want some error codes * Import camel_dict_to_snake_dict/snake_dict_to_camel_dict from ansible.module_utils.common.dict_transformations --- aws_elasticbeanstalk_app.py | 18 ++--- aws_glue_connection.py | 24 +++--- aws_glue_job.py | 27 +++---- aws_kms_info.py | 20 +++-- aws_ssm_parameter_store.py | 17 +++-- aws_step_functions_state_machine_execution.py | 20 ++--- aws_waf_condition.py | 19 +++-- cloudfront_invalidation.py | 36 ++++----- cloudwatchevent_rule.py | 22 +++--- data_pipeline.py | 9 +-- ec2_asg.py | 23 +++--- ec2_asg_info.py | 19 +++-- ec2_instance.py | 37 +++++---- ec2_placement_group.py | 26 ++++--- ec2_transit_gateway_info.py | 24 +++--- ec2_vpc_egress_igw.py | 33 ++++---- ec2_vpc_nacl_info.py | 21 +++-- ec2_vpc_route_table.py | 26 ++++--- ecs_ecr.py | 26 +++---- ecs_service_info.py | 8 +- efs.py | 25 +++--- elasticache.py | 14 ++-- elasticache_info.py | 9 +-- elasticache_snapshot.py | 29 ++++--- elb_target_group.py | 5 +- execute_lambda.py | 13 ++-- iam_group.py | 52 ++++++------- iam_managed_policy.py | 19 +++-- iam_password_policy.py | 13 ++-- iam_policy_info.py | 15 ++-- iam_role.py | 76 +++++++++---------- iam_role_info.py | 15 ++-- iam_user.py | 30 ++++---- lambda_alias.py | 26 +++---- lambda_facts.py | 65 ++++++++-------- lambda_info.py | 65 ++++++++-------- lambda_policy.py | 23 +++--- lightsail.py | 12 ++- rds_instance.py | 38 +++++----- rds_param_group.py | 19 +++-- s3_lifecycle.py | 31 +++----- sqs_queue.py | 28 +++---- 42 files changed, 510 insertions(+), 567 deletions(-) diff --git a/aws_elasticbeanstalk_app.py b/aws_elasticbeanstalk_app.py index bab889f0b07..19110282d0e 100644 --- a/aws_elasticbeanstalk_app.py +++ b/aws_elasticbeanstalk_app.py @@ -85,11 +85,12 @@ ''' try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass # handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message def describe_app(ebs, app_name, module): @@ -104,7 +105,7 @@ def list_apps(ebs, app_name, module): apps = ebs.describe_applications(ApplicationNames=[app_name]) else: apps = ebs.describe_applications() - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe application") return apps.get("Applications", []) @@ -175,7 +176,7 @@ def main(): try: create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not create application") app = describe_app(ebs, app_name, module) @@ -188,7 +189,7 @@ def main(): ebs.update_application(ApplicationName=app_name) else: ebs.update_application(ApplicationName=app_name, Description=description) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not update application") app = describe_app(ebs, app_name, module) @@ -208,13 +209,10 @@ def main(): else: ebs.delete_application(ApplicationName=app_name) changed = True - except BotoCoreError as e: + except is_boto3_error_message('It is currently pending deletion'): + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Cannot terminate app") - except ClientError as e: - if 'It is currently pending deletion.' not in e.response['Error']['Message']: - module.fail_json_aws(e, msg="Cannot terminate app") - else: - changed = False result = dict(changed=changed, app=app) diff --git a/aws_glue_connection.py b/aws_glue_connection.py index 0df4ab915d1..41bc99816a0 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -130,17 +130,20 @@ sample: {'subnet-id':'subnet-aabbccddee'} ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names - # Non-ansible imports import copy import time try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names + def _get_glue_connection(connection, module): """ @@ -160,11 +163,8 @@ def _get_glue_connection(connection, module): try: return connection.get_connection(**params)['Connection'] - except (BotoCoreError, ClientError) as e: - if e.response['Error']['Code'] == 'EntityNotFoundException': - return None - else: - raise e + except is_boto3_error_code('EntityNotFoundException'): + return None def _compare_glue_connection_params(user_params, current_params): @@ -251,13 +251,13 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co update_params['Name'] = update_params['ConnectionInput']['Name'] connection.update_connection(**update_params) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) else: try: connection.create_connection(**params) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) # If changed, get the Glue connection again @@ -292,7 +292,7 @@ def delete_glue_connection(connection, module, glue_connection): try: connection.delete_connection(**params) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) module.exit_json(changed=changed) diff --git a/aws_glue_job.py b/aws_glue_job.py index 1d991f52f41..7f6af1f4d0c 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -185,15 +185,17 @@ sample: 300 ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - # Non-ansible imports import copy try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: - pass + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def _get_glue_job(connection, module, glue_job_name): @@ -208,11 +210,10 @@ def _get_glue_job(connection, module, glue_job_name): try: return connection.get_job(JobName=glue_job_name)['Job'] - except (BotoCoreError, ClientError) as e: - if e.response['Error']['Code'] == 'EntityNotFoundException': - return None - else: - module.fail_json_aws(e) + except is_boto3_error_code('EntityNotFoundException'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) def _compare_glue_job_params(user_params, current_params): @@ -292,13 +293,13 @@ def create_or_update_glue_job(connection, module, glue_job): del update_params['JobUpdate']['Name'] connection.update_job(**update_params) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) else: try: connection.create_job(**params) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) # If changed, get the Glue job again @@ -324,7 +325,7 @@ def delete_glue_job(connection, module, glue_job): try: connection.delete_job(JobName=glue_job['Name']) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) module.exit_json(changed=changed) diff --git a/aws_kms_info.py b/aws_kms_info.py index 978ed804ec2..2366c5d0f45 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -290,7 +290,7 @@ def get_key_policy_with_backoff(connection, key_id, policy_name): def get_enable_key_rotation_with_backoff(connection, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - except is_boto3_error_code('AccessDeniedException') as e: + except is_boto3_error_code('AccessDeniedException'): return None return current_rotation_status.get('KeyRotationEnabled') @@ -306,11 +306,10 @@ def get_kms_tags(connection, module, key_id): try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) tags.extend(tag_response['Tags']) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'AccessDeniedException': - module.fail_json_aws(e, msg="Failed to obtain key tags") - else: - tag_response = {} + except is_boto3_error_code('AccessDeniedException'): + tag_response = {} + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key tags") if tag_response.get('NextMarker'): kwargs['Marker'] = tag_response['NextMarker'] else: @@ -323,11 +322,10 @@ def get_kms_policies(connection, module, key_id): policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for policy in policies] - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'AccessDeniedException': - module.fail_json_aws(e, msg="Failed to obtain key policies") - else: - return [] + except is_boto3_error_code('AccessDeniedException'): + return [] + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key policies") def key_matches_filter(key, filtr): diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 32c1df62536..d31a79b2bef 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -127,13 +127,14 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - try: - from botocore.exceptions import ClientError + import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + def update_parameter(client, module, args): changed = False @@ -142,7 +143,7 @@ def update_parameter(client, module, args): try: response = client.put_parameter(**args) changed = True - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="setting parameter") return changed, response @@ -195,7 +196,7 @@ def create_update_parameter(client, module): describe_existing_parameter = describe_existing_parameter_paginator.paginate( Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result() - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']: @@ -213,9 +214,9 @@ def delete_parameter(client, module): response = client.delete_parameter( Name=module.params.get('name') ) - except ClientError as e: - if e.response['Error']['Code'] == 'ParameterNotFound': - return False, {} + except is_boto3_error_code('ParameterNotFound'): + return False, {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="deleting parameter") return True, response diff --git a/aws_step_functions_state_machine_execution.py b/aws_step_functions_state_machine_execution.py index f9e1d3fa44c..8ecc2a1272d 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/aws_step_functions_state_machine_execution.py @@ -89,14 +89,16 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - try: - from botocore.exceptions import ClientError, BotoCoreError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + def start_execution(module, sfn_client): ''' @@ -123,10 +125,10 @@ def start_execution(module, sfn_client): name=name, input=execution_input ) - except (ClientError, BotoCoreError) as e: - if e.response['Error']['Code'] == 'ExecutionAlreadyExists': - # this will never be executed anymore - module.exit_json(changed=False) + except is_boto3_error_code('ExecutionAlreadyExists'): + # this will never be executed anymore + module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to start execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution)) @@ -151,7 +153,7 @@ def stop_execution(module, sfn_client): cause=cause, error=error ) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to stop execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res)) diff --git a/aws_waf_condition.py b/aws_waf_condition.py index 006caaad7cd..77f66f9f767 100644 --- a/aws_waf_condition.py +++ b/aws_waf_condition.py @@ -402,10 +402,17 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff, MATCH_LOOKUP -from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff class Condition(object): @@ -540,9 +547,9 @@ def delete_unused_regex_pattern(self, regex_pattern_set_id): run_func_with_change_token_backoff(self.client, self.module, {'RegexPatternSetId': regex_pattern_set_id}, self.client.delete_regex_pattern_set, wait=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if e.response['Error']['Code'] == 'WAFNonexistentItemException': - return + except is_boto3_error_code('WAFNonexistentItemException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg='Could not delete regex pattern') def get_condition_by_name(self, name): diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 52e3aea1873..974358f3967 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -136,17 +136,20 @@ sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 ''' -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager import datetime try: - from botocore.exceptions import ClientError, BotoCoreError + import botocore except ImportError: pass # caught by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager + class CloudFrontInvalidationServiceManager(object): """ @@ -166,16 +169,13 @@ def create_invalidation(self, distribution_id, invalidation_batch): return response, False else: return response, True - except BotoCoreError as e: + except is_boto3_error_message('Your request contains a caller reference that was used for a previous invalidation ' + 'batch for the same distribution.'): + self.module.warn("InvalidationBatch target paths are not modifiable. " + "To make a new invalidation please update caller_reference.") + return current_invalidation_response, False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") - except ClientError as e: - if ('Your request contains a caller reference that was used for a previous invalidation batch ' - 'for the same distribution.' in e.response['Error']['Message']): - self.module.warn("InvalidationBatch target paths are not modifiable. " - "To make a new invalidation please update caller_reference.") - return current_invalidation_response, False - else: - self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") def get_invalidation(self, distribution_id, caller_reference): current_invalidation = {} @@ -184,7 +184,7 @@ def get_invalidation(self, distribution_id, caller_reference): paginator = self.client.get_paginator('list_invalidations') invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', []) invalidation_ids = [inv['Id'] for inv in invalidations] - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.") # check if there is an invalidation with the same caller reference @@ -192,7 +192,7 @@ def get_invalidation(self, distribution_id, caller_reference): try: invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation'] caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference') - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id)) if caller_ref == caller_reference: current_invalidation = invalidation @@ -218,7 +218,7 @@ def validate_distribution_id(self, distribution_id, alias): if distribution_id is None: distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias) return distribution_id - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating parameters.") def create_aws_list(self, invalidation_batch): @@ -238,7 +238,7 @@ def validate_invalidation_batch(self, invalidation_batch, caller_reference): 'caller_reference': valid_caller_reference } return valid_invalidation_batch - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating invalidation batch.") diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 0ba66909d25..8de7dc7d291 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -154,8 +154,10 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code class CloudWatchEventRule(object): @@ -174,12 +176,9 @@ def describe(self): """Returns the existing details of the rule in AWS""" try: rule_info = self.client.describe_rule(Name=self.name) - except botocore.exceptions.ClientError as e: - error_code = e.response.get('Error', {}).get('Code') - if error_code == 'ResourceNotFoundException': - return {} - self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) - except botocore.exceptions.BotoCoreError as e: + except is_boto3_error_code('ResourceNotFoundException'): + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) return self._snakify(rule_info) @@ -237,12 +236,9 @@ def list_targets(self): """Lists the existing targets for the rule in AWS""" try: targets = self.client.list_targets_by_rule(Rule=self.name) - except botocore.exceptions.ClientError as e: - error_code = e.response.get('Error', {}).get('Code') - if error_code == 'ResourceNotFoundException': - return [] - self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) - except botocore.exceptions.BotoCoreError as e: + except is_boto3_error_code('ResourceNotFoundException'): + return [] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) return self._snakify(targets)['targets'] diff --git a/data_pipeline.py b/data_pipeline.py index 54a4cd6f39a..4874388c733 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -203,7 +203,6 @@ try: import botocore - from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -211,6 +210,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] @@ -254,7 +254,7 @@ def pipeline_description(client, dp_id): """ try: return client.describe_pipelines(pipelineIds=[dp_id]) - except ClientError as e: + except is_boto3_error_code('PipelineNotFoundException', 'PipelineDeletedException'): raise DataPipelineNotFound @@ -361,9 +361,8 @@ def activate_pipeline(client, module): else: try: client.activate_pipeline(pipelineId=dp_id) - except ClientError as e: - if e.response["Error"]["Code"] == "InvalidRequestException": - module.fail_json(msg="You need to populate your pipeline before activation.") + except is_boto3_error_code('InvalidRequestException'): + module.fail_json(msg="You need to populate your pipeline before activation.") try: pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, timeout=timeout) diff --git a/ec2_asg.py b/ec2_asg.py index ee07b68f516..152918b6d6c 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -535,6 +535,7 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', @@ -594,13 +595,13 @@ def describe_launch_templates(connection, launch_template): try: lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) return lt - except (botocore.exceptions.ClientError) as e: + except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): module.fail_json(msg="No launch template found matching: %s" % launch_template) else: try: lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) return lt - except (botocore.exceptions.ClientError) as e: + except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): module.fail_json(msg="No launch template found matching: %s" % launch_template) @@ -851,12 +852,9 @@ def elb_healthy(asg_connection, elb_connection, group_name): # but has not yet show up in the ELB try: lb_instances = describe_instance_health(elb_connection, lb, instances) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'InvalidInstance': - return None - - module.fail_json_aws(e, msg="Failed to get load balancer.") - except botocore.exceptions.BotoCoreError as e: + except is_boto3_error_code('InvalidInstance'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get load balancer.") for i in lb_instances.get('InstanceStates'): @@ -883,12 +881,9 @@ def tg_healthy(asg_connection, elbv2_connection, group_name): # but has not yet show up in the ELB try: tg_instances = describe_target_health(elbv2_connection, tg, instances) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'InvalidInstance': - return None - - module.fail_json_aws(e, msg="Failed to get target group.") - except botocore.exceptions.BotoCoreError as e: + except is_boto3_error_code('InvalidInstance'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get target group.") for i in tg_instances.get('TargetHealthDescriptions'): diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 07df498968a..819bf6e5ab3 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -219,12 +219,14 @@ import re try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def match_asg_tags(tags_to_match, asg): @@ -330,7 +332,7 @@ def find_asgs(conn, module, name=None, tags=None): try: asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') asgs = asgs_paginator.paginate().build_full_result() - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups') if not asgs: @@ -338,7 +340,7 @@ def find_asgs(conn, module, name=None, tags=None): try: elbv2 = module.client('elbv2') - except ClientError as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # This is nice to have, not essential elbv2 = None matched_asgs = [] @@ -373,12 +375,9 @@ def find_asgs(conn, module, name=None, tags=None): tg_paginator = elbv2.get_paginator('describe_target_groups') tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result() asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']] - except ClientError as e: - if e.response['Error']['Code'] == 'TargetGroupNotFound': - asg['target_group_names'] = [] - else: - module.fail_json_aws(e, msg="Failed to describe Target Groups") - except BotoCoreError as e: + except is_boto3_error_code('TargetGroupNotFound'): + asg['target_group_names'] = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe Target Groups") else: asg['target_group_names'] = [] diff --git a/ec2_instance.py b/ec2_instance.py index a13b00c680b..06ebda60341 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -814,6 +814,8 @@ from ansible.module_utils.six.moves.urllib import parse as urlparse from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list @@ -1119,15 +1121,13 @@ def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, if subnet_id is not None: try: sub = ec2.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'InvalidGroup.NotFound': - module.fail_json( - "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( - subnet_id - ) + except is_boto3_error_code('InvalidGroup.NotFound'): + module.fail_json( + "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( + subnet_id ) - module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) - except botocore.exceptions.BotoCoreError as e: + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) parent_vpc_id = sub['Subnets'][0]['VpcId'] @@ -1615,9 +1615,9 @@ def determine_iam_role(name_or_arn): try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return role['InstanceProfile']['Arn'] - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + except is_boto3_error_code('NoSuchEntity'): + module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) @@ -1697,15 +1697,12 @@ def ensure_present(existing_matches, changed, ec2, state): def run_instances(ec2, **instance_spec): try: - return ec2.run_instances(aws_retry=True, **instance_spec) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']: - # If the instance profile has just been created, it takes some time to be visible by ec2 - # So we wait 10 second and retry the run_instances - time.sleep(10) - return ec2.run_instances(**instance_spec) - else: - raise e + return ec2.run_instances(**instance_spec) + except is_boto3_error_message('Invalid IAM Instance Profile ARN'): + # If the instance profile has just been created, it takes some time to be visible by ec2 + # So we wait 10 second and retry the run_instances + time.sleep(10) + return ec2.run_instances(**instance_spec) def main(): diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 7d9a8004544..3ccb2c00802 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -87,13 +87,15 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: - from botocore.exceptions import (BotoCoreError, ClientError) + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + @AWSRetry.exponential_backoff() def get_placement_group_details(connection, module): @@ -104,7 +106,7 @@ def get_placement_group_details(connection, module): "Name": "group-name", "Values": [name] }]) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg="Couldn't find placement group named [%s]" % name) @@ -128,13 +130,13 @@ def create_placement_group(connection, module): try: connection.create_placement_group( GroupName=name, Strategy=strategy, DryRun=module.check_mode) - except (BotoCoreError, ClientError) as e: - if e.response['Error']['Code'] == "DryRunOperation": - module.exit_json(changed=True, placement_group={ - "name": name, - "state": 'DryRun', - "strategy": strategy, - }) + except is_boto3_error_code('DryRunOperation'): + module.exit_json(changed=True, placement_group={ + "name": name, + "state": 'DryRun', + "strategy": strategy, + }) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg="Couldn't create placement group [%s]" % name) @@ -152,7 +154,7 @@ def delete_placement_group(connection, module): try: connection.delete_placement_group( GroupName=name, DryRun=module.check_mode) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg="Couldn't delete placement group [%s]" % name) diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 2eacf01cd96..707e375a7ee 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -165,17 +165,17 @@ ''' try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass # handled by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_filter_list -) +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict class AnsibleEc2TgwInfo(object): @@ -208,11 +208,9 @@ def describe_transit_gateways(self): try: response = self._connection.describe_transit_gateways( TransitGatewayIds=transit_gateway_ids, Filters=filters) - except ClientError as e: - if e.response['Error']['Code'] == 'InvalidTransitGatewayID.NotFound': - self._results['transit_gateways'] = [] - return - raise + except is_boto3_error_code('InvalidTransitGatewayID.NotFound'): + self._results['transit_gateways'] = [] + return for transit_gateway in response['TransitGateways']: transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags'])) @@ -253,7 +251,7 @@ def main(): tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results) try: tgwf_manager.describe_transit_gateways() - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) module.exit_json(**results) diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index d462696d0af..0026ade65ad 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -57,15 +57,16 @@ sample: vpc-012345678 ''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + def delete_eigw(module, conn, eigw_id): """ @@ -79,14 +80,9 @@ def delete_eigw(module, conn, eigw_id): try: response = conn.delete_egress_only_internet_gateway(DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id) - except botocore.exceptions.ClientError as e: - # When boto3 method is run with DryRun=True it returns an error on success - # We need to catch the error and return something valid - if e.response.get('Error', {}).get('Code') == "DryRunOperation": - changed = True - else: - module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) - except botocore.exceptions.BotoCoreError as e: + except is_boto3_error_code('DryRunOperation'): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) if not module.check_mode: @@ -108,16 +104,13 @@ def create_eigw(module, conn, vpc_id): try: response = conn.create_egress_only_internet_gateway(DryRun=module.check_mode, VpcId=vpc_id) - except botocore.exceptions.ClientError as e: + except is_boto3_error_code('DryRunOperation'): # When boto3 method is run with DryRun=True it returns an error on success # We need to catch the error and return something valid - if e.response.get('Error', {}).get('Code') == "DryRunOperation": - changed = True - elif e.response.get('Error', {}).get('Code') == "InvalidVpcID.NotFound": - module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) - else: - module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) - except botocore.exceptions.BotoCoreError as e: + changed = True + except is_boto3_error_code('InvalidVpcID.NotFound') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) if not module.check_mode: diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 1e42e486cea..8c905f67e58 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -105,16 +105,17 @@ ''' try: - from botocore.exceptions import ClientError, BotoCoreError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_filter_list, - camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict # VPC-supported IANA protocol numbers @@ -132,11 +133,9 @@ def list_ec2_vpc_nacls(connection, module): try: nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters) - except ClientError as e: - if e.response['Error']['Code'] == 'InvalidNetworkAclID.NotFound': - module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist') - module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) - except BotoCoreError as e: + except is_boto3_error_code('InvalidNetworkAclID.NotFound'): + module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) # Turn the boto3 result in to ansible_friendly_snaked_names diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index ca5d586b2e7..cebbed38f8b 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -226,19 +226,24 @@ import re from time import sleep -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, AWSRetry - try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') @@ -370,11 +375,8 @@ def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge def describe_route_tables_with_backoff(connection, **params): try: return connection.describe_route_tables(**params)['RouteTables'] - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound': - return None - else: - raise + except is_boto3_error_code('InvalidRouteTableID.NotFound'): + return None def get_route_table_by_id(connection, module, route_table_id): diff --git a/ecs_ecr.py b/ecs_ecr.py index 5b7ddd261f4..768589dbdff 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -188,13 +188,14 @@ import traceback try: - from botocore.exceptions import ClientError + import botocore except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.six import string_types from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies from ansible_collections.amazon.aws.plugins.module_utils.ec2 import sort_json_policy_dict @@ -227,11 +228,8 @@ def get_repository(self, registry_id, name): repositoryNames=[name], **build_kwargs(registry_id)) repos = res.get('repositories') return repos and repos[0] - except ClientError as err: - code = err.response['Error'].get('Code', 'Unknown') - if code == 'RepositoryNotFoundException': - return None - raise + except is_boto3_error_code('RepositoryNotFoundException'): + return None def get_repository_policy(self, registry_id, name): try: @@ -239,11 +237,8 @@ def get_repository_policy(self, registry_id, name): repositoryName=name, **build_kwargs(registry_id)) text = res.get('policyText') return text and json.loads(text) - except ClientError as err: - code = err.response['Error'].get('Code', 'Unknown') - if code == 'RepositoryPolicyNotFoundException': - return None - raise + except is_boto3_error_code('RepositoryPolicyNotFoundException'): + return None def create_repository(self, registry_id, name, image_tag_mutability): if registry_id: @@ -330,11 +325,8 @@ def get_lifecycle_policy(self, registry_id, name): repositoryName=name, **build_kwargs(registry_id)) text = res.get('lifecyclePolicyText') return text and json.loads(text) - except ClientError as err: - code = err.response['Error'].get('Code', 'Unknown') - if code == 'LifecyclePolicyNotFoundException': - return None - raise + except is_boto3_error_code('LifecyclePolicyNotFoundException'): + return None def put_lifecycle_policy(self, registry_id, name, policy_text): if not self.check_mode: @@ -521,7 +513,7 @@ def run(ecr, params): except Exception as err: msg = str(err) - if isinstance(err, ClientError): + if isinstance(err, botocore.exceptions.ClientError): msg = boto_exception(err) result['msg'] = msg result['exception'] = traceback.format_exc() diff --git a/ecs_service_info.py b/ecs_service_info.py index d428dde8835..2d64a89e6dd 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -138,6 +138,7 @@ pass # caught by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -153,11 +154,8 @@ def list_services_with_backoff(self, **kwargs): paginator = self.ecs.get_paginator('list_services') try: return paginator.paginate(**kwargs).build_full_result() - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'ClusterNotFoundException': - self.module.fail_json_aws(e, "Could not find cluster to list services") - else: - raise + except is_boto3_error_code('ClusterNotFoundException') as e: + self.module.fail_json_aws(e, "Could not find cluster to list services") @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) def describe_services_with_backoff(self, **kwargs): diff --git a/efs.py b/efs.py index 56ec6980e56..f46c3f588c4 100644 --- a/efs.py +++ b/efs.py @@ -230,16 +230,17 @@ from time import time as timestamp try: - from botocore.exceptions import ClientError, BotoCoreError + import botocore except ImportError as e: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (compare_aws_tags, - camel_dict_to_snake_dict, - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags def _index_by_key(key, items): @@ -431,7 +432,7 @@ def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throug try: self.connection.create_file_system(**params) changed = True - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to create file system.") # we always wait for the state to be available when creating. @@ -469,7 +470,7 @@ def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mi try: self.connection.update_file_system(FileSystemId=fs_id, **params) changed = True - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to update file system.") return changed @@ -489,7 +490,7 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, FileSystemId=fs_id, TagKeys=tags_to_delete ) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to delete tags.") result = True @@ -500,7 +501,7 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, FileSystemId=fs_id, Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) ) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to create tags.") result = True @@ -624,8 +625,8 @@ def iterate_all(attr, map_method, **kwargs): args['Marker'] = data['Nextmarker'] continue break - except ClientError as e: - if e.response['Error']['Code'] == "ThrottlingException" and wait < 600: + except is_boto3_error_code('ThrottlingException'): + if wait < 600: sleep(wait) wait = wait * 2 continue diff --git a/elasticache.py b/elasticache.py index 5fb45a8883b..93804562f2e 100644 --- a/elasticache.py +++ b/elasticache.py @@ -134,6 +134,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -439,13 +440,12 @@ def _refresh_data(self, cache_cluster_data=None): if cache_cluster_data is None: try: response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'CacheClusterNotFound': - self.data = None - self.status = 'gone' - return - else: - self.module.fail_json_aws(e, msg="Failed to describe cache clusters") + except is_boto3_error_code('CacheClusterNotFound'): + self.data = None + self.status = 'gone' + return + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Failed to describe cache clusters") cache_cluster_data = response['CacheClusters'][0] self.data = cache_cluster_data self.status = self.data['CacheClusterStatus'] diff --git a/elasticache_info.py b/elasticache_info.py index 5b22c5cec1c..026337e3350 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -225,6 +225,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @@ -243,12 +244,8 @@ def describe_cache_clusters_with_backoff(client, cluster_id=None): params['CacheClusterId'] = cluster_id try: response = paginator.paginate(**params).build_full_result() - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'CacheClusterNotFound': - return [] - raise - except botocore.exceptions.BotoCoreError: - raise + except is_boto3_error_code('CacheClusterNotFound'): + return [] return response['CacheClusters'] diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index d07125023bd..42920a3c2c4 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -119,6 +119,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def create(module, connection, replication_id, cluster_id, name): @@ -128,12 +129,11 @@ def create(module, connection, replication_id, cluster_id, name): CacheClusterId=cluster_id, SnapshotName=name) changed = True - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault": - response = {} - changed = False - else: - module.fail_json_aws(e, msg="Unable to create the snapshot.") + except is_boto3_error_code('SnapshotAlreadyExistsFault'): + response = {} + changed = False + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to create the snapshot.") return response, changed @@ -154,15 +154,14 @@ def delete(module, connection, name): try: response = connection.delete_snapshot(SnapshotName=name) changed = True - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == "SnapshotNotFoundFault": - response = {} - changed = False - elif e.response['Error']['Code'] == "InvalidSnapshotState": - module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." - "You may need to wait a few minutes.") - else: - module.fail_json_aws(e, msg="Unable to delete the snapshot.") + except is_boto3_error_code('SnapshotNotFoundFault'): + response = {} + changed = False + except is_boto3_error_code('InvalidSnapshotState'): # pylint: disable=duplicate-except + module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." + "You may need to wait a few minutes.") + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete the snapshot.") return response, changed diff --git a/elb_target_group.py b/elb_target_group.py index e6c94f06286..4980fc797ad 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -379,13 +379,14 @@ except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list def get_tg_attributes(connection, module, tg_arn): diff --git a/execute_lambda.py b/execute_lambda.py index 199a50fd0a7..e5e21eacb61 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -136,6 +136,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def main(): @@ -206,13 +207,13 @@ def main(): try: response = client.invoke(**invoke_params) - except botocore.exceptions.ClientError as ce: - if ce.response['Error']['Code'] == 'ResourceNotFoundException': - module.fail_json_aws(ce, msg="Could not find Lambda to execute. Make sure " - "the ARN is correct and your profile has " - "permissions to execute this function.") + except is_boto3_error_code('ResourceNotFoundException') as nfe: + module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function.") + except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") - except botocore.exceptions.ParamValidationError as ve: + except botocore.exceptions.ParamValidationError as ve: # pylint: disable=duplicate-except module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate") except Exception as e: module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function") diff --git a/iam_group.py b/iam_group.py index b55e32218a2..af9d781a92f 100644 --- a/iam_group.py +++ b/iam_group.py @@ -177,15 +177,17 @@ sample: / ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + def compare_attached_group_policies(current_attached_policies, new_attached_policies): @@ -246,7 +248,7 @@ def create_or_update_group(connection, module): # Get group try: group = get_group(connection, module, params['GroupName']) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get group") # If group is None, create it @@ -258,7 +260,7 @@ def create_or_update_group(connection, module): try: group = connection.create_group(**params) changed = True - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create group") # Manage managed policies @@ -276,7 +278,7 @@ def create_or_update_group(connection, module): if not module.check_mode: try: connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName']) # If there are policies to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above @@ -287,13 +289,13 @@ def create_or_update_group(connection, module): for policy_arn in managed_policies: try: connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName']) # Manage group memberships try: current_group_members = get_group(connection, module, params['GroupName'])['Users'] - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) current_group_members_list = [] @@ -310,7 +312,7 @@ def create_or_update_group(connection, module): if not module.check_mode: try: connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName'])) # If there are users to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above @@ -321,7 +323,7 @@ def create_or_update_group(connection, module): for user in users: try: connection.add_user_to_group(GroupName=params['GroupName'], UserName=user) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName'])) if module.check_mode: module.exit_json(changed=changed) @@ -329,7 +331,7 @@ def create_or_update_group(connection, module): # Get the group again try: group = get_group(connection, module, params['GroupName']) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) @@ -342,7 +344,7 @@ def destroy_group(connection, module): try: group = get_group(connection, module, params['GroupName']) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) if group: # Check mode means we would remove this group @@ -353,26 +355,26 @@ def destroy_group(connection, module): try: for policy in get_attached_policy_list(connection, module, params['GroupName']): connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn']) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName']) # Remove any users in the group otherwise deletion fails current_group_members_list = [] try: current_group_members = get_group(connection, module, params['GroupName'])['Users'] - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) for member in current_group_members: current_group_members_list.append(member['UserName']) for user in current_group_members_list: try: connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName'])) try: connection.delete_group(**params) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName']) else: @@ -386,11 +388,8 @@ def get_group(connection, module, name): try: paginator = connection.get_paginator('get_group') return paginator.paginate(GroupName=name).build_full_result() - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - return None - else: - raise + except is_boto3_error_code('NoSuchEntity'): + return None @AWSRetry.exponential_backoff() @@ -399,11 +398,8 @@ def get_attached_policy_list(connection, module, name): try: paginator = connection.get_paginator('list_attached_group_policies') return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies'] - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - return None - else: - raise + except is_boto3_error_code('NoSuchEntity'): + return None def main(): diff --git a/iam_managed_policy.py b/iam_managed_policy.py index aa668498ad1..f0fa588c44e 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -140,6 +140,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies @@ -200,16 +201,14 @@ def get_or_create_policy_version(module, iam, policy, policy_document): try: version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] return version, True - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'LimitExceeded': - delete_oldest_non_default_version(module, iam, policy) - try: - version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] - return version, True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: - e = second_e - # Handle both when the exception isn't LimitExceeded or - # the second attempt still failed + except is_boto3_error_code('LimitExceeded'): + delete_oldest_non_default_version(module, iam, policy) + try: + version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + return version, True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: + module.fail_json_aws(second_e, msg="Couldn't create policy version") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create policy version") diff --git a/iam_password_policy.py b/iam_password_policy.py index d654a846cfd..852deb0d10b 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -103,8 +103,10 @@ except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code class IAMConnection(object): @@ -169,11 +171,10 @@ def update_password_policy(self, module, policy): def delete_password_policy(self, policy): try: results = policy.delete() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"}) - else: - self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") + except is_boto3_error_code('NoSuchEntity'): + self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") return camel_dict_to_snake_dict(results) diff --git a/iam_policy_info.py b/iam_policy_info.py index e934e09a621..19c5a01885b 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -78,18 +78,15 @@ ''' try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -class PolicyError(Exception): - pass - - class Policy: def __init__(self, client, name, policy_name): @@ -202,12 +199,10 @@ def main(): policy = GroupPolicy(**args) module.exit_json(**(policy.run())) - except (BotoCoreError, ClientError) as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - module.exit_json(changed=False, msg=e.response['Error']['Message']) + except is_boto3_error_code('NoSuchEntity') as e: + module.exit_json(changed=False, msg=e.response['Error']['Message']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - except PolicyError as e: - module.fail_json(msg=str(e)) if __name__ == '__main__': diff --git a/iam_role.py b/iam_role.py index 9a2eaca8cfe..ddc8ad23041 100644 --- a/iam_role.py +++ b/iam_role.py @@ -194,19 +194,21 @@ import json -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - compare_aws_tags, - ) - try: - from botocore.exceptions import ClientError, BotoCoreError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc): if not compare_policies(current_policy_doc, json.loads(new_policy_doc)): @@ -242,7 +244,7 @@ def attach_policies(connection, module, policies_to_attach, params): try: if not module.check_mode: connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName'])) changed = True return changed @@ -254,7 +256,7 @@ def remove_policies(connection, module, policies_to_remove, params): try: if not module.check_mode: connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName'])) changed = True return changed @@ -293,7 +295,7 @@ def create_basic_role(connection, module, params): else: role = {'MadeInCheckMode': True} role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument']) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create role") return role @@ -312,7 +314,7 @@ def update_role_assumed_policy(connection, module, params, role): RoleName=params['RoleName'], PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])), aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName'])) return True @@ -329,7 +331,7 @@ def update_role_description(connection, module, params, role): try: connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName'])) return True @@ -346,7 +348,7 @@ def update_role_max_session_duration(connection, module, params, role): try: connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName'])) return True @@ -364,12 +366,12 @@ def update_role_permissions_boundary(connection, module, params, role): if params.get('PermissionsBoundary') == '': try: connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName'])) else: try: connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName'])) return True @@ -457,7 +459,7 @@ def create_instance_profiles(connection, module, params, role): # Fetch existing Profiles try: instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles'] - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName'])) # Profile already exists @@ -470,20 +472,17 @@ def create_instance_profiles(connection, module, params, role): # Make sure an instance profile is created try: connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True) - except ClientError as e: + except is_boto3_error_code('EntityAlreadyExists'): # If the profile already exists, no problem, move on. # Implies someone's changing things at the same time... - if e.response['Error']['Code'] == 'EntityAlreadyExists': - return False - else: - module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName'])) - except BotoCoreError as e: + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName'])) # And attach the role to the profile try: connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName'])) return True @@ -495,7 +494,7 @@ def remove_instance_profiles(connection, module, role_params, role): try: instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles'] - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) # Remove the role from the instance profile(s) @@ -508,9 +507,9 @@ def remove_instance_profiles(connection, module, role_params, role): if delete_profiles: try: connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) @@ -537,7 +536,7 @@ def destroy_role(connection, module): try: if not module.check_mode: connection.delete_role(aws_retry=True, **role_params) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to delete role") module.exit_json(changed=True) @@ -546,26 +545,23 @@ def destroy_role(connection, module): def get_role_with_backoff(connection, module, name): try: return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role'] - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) def get_role(connection, module, name): try: return connection.get_role(RoleName=name, aws_retry=True)['Role'] - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - return None - else: - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) - except BotoCoreError as e: + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) def get_attached_policy_list(connection, module, name): try: return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) @@ -575,7 +571,7 @@ def get_role_tags(connection, module): return {} try: return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) @@ -590,7 +586,7 @@ def update_role_tags(connection, module, params, role): try: existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) - except (ClientError, KeyError): + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): existing_tags = {} tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) @@ -601,7 +597,7 @@ def update_role_tags(connection, module, params, role): connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) if tags_to_add: connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name) changed = bool(tags_to_add) or bool(tags_to_remove) diff --git a/iam_role_info.py b/iam_role_info.py index 95eabdb95ab..132bdeedcc9 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -152,8 +152,12 @@ except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @AWSRetry.exponential_backoff() @@ -208,12 +212,9 @@ def describe_iam_roles(module, client): if name: try: roles = [client.get_role(RoleName=name)['Role']] - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - return [] - else: - module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) - except botocore.exceptions.BotoCoreError as e: + except is_boto3_error_code('NoSuchEntity'): + return [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) else: params = dict() diff --git a/iam_user.py b/iam_user.py index 7bd8ebda423..531ae6ba9ae 100644 --- a/iam_user.py +++ b/iam_user.py @@ -111,10 +111,10 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def compare_attached_policies(current_attached_policies, new_attached_policies): @@ -297,34 +297,30 @@ def get_user(connection, module, name): try: return connection.get_user(**params) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - return None - else: - module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)), - **camel_dict_to_snake_dict(e.response)) + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) def get_attached_policy_list(connection, module, name): try: return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'NoSuchEntity': - return None - else: - module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) def delete_user_login_profile(connection, module, user_name): try: return connection.delete_login_profile(UserName=user_name) - except botocore.exceptions.ClientError as e: - if e.response["Error"]["Code"] == "NoSuchEntity": - return None - else: - module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) def main(): diff --git a/lambda_alias.py b/lambda_alias.py index 8cd8a891289..aeacb6e3b75 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -144,13 +144,15 @@ import re try: - from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info @@ -182,12 +184,12 @@ def __init__(self, ansible_obj, resources, boto3_=True): if not self.region: self.region = self.resource_client['lambda'].meta.region_name - except (ClientError, ParamValidationError, MissingParametersError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) try: self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] - except (ClientError, ValueError, KeyError, IndexError): + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ValueError, KeyError, IndexError): self.account_id = '' def client(self, resource='lambda'): @@ -269,12 +271,10 @@ def get_lambda_alias(module, aws): # check if alias exists and get facts try: results = client.get_alias(**api_params) - - except (ClientError, ParamValidationError, MissingParametersError) as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - results = None - else: - module.fail_json(msg='Error retrieving function alias: {0}'.format(e)) + except is_boto3_error_code('ResourceNotFoundException'): + results = None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Error retrieving function alias') return results @@ -314,7 +314,7 @@ def lambda_alias(module, aws): if not module.check_mode: try: results = client.update_alias(**api_params) - except (ClientError, ParamValidationError, MissingParametersError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg='Error updating function alias: {0}'.format(e)) else: @@ -325,7 +325,7 @@ def lambda_alias(module, aws): if not module.check_mode: results = client.create_alias(**api_params) changed = True - except (ClientError, ParamValidationError, MissingParametersError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg='Error creating function alias: {0}'.format(e)) else: # state = 'absent' @@ -337,7 +337,7 @@ def lambda_alias(module, aws): if not module.check_mode: results = client.delete_alias(**api_params) changed = True - except (ClientError, ParamValidationError, MissingParametersError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg='Error deleting function alias: {0}'.format(e)) return dict(changed=changed, **dict(results or facts)) diff --git a/lambda_facts.py b/lambda_facts.py index 4c02947c998..b1a223b61db 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -88,20 +88,21 @@ returned: success type: dict ''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import json import datetime import sys import re - try: - from botocore.exceptions import ClientError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + def fix_return(node): """ @@ -147,11 +148,10 @@ def alias_details(client, module): params['Marker'] = module.params.get('next_marker') try: lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_facts.update(aliases=[]) - else: - module.fail_json_aws(e, msg="Trying to get aliases") + except is_boto3_error_code('ResourceNotFoundException'): + lambda_facts.update(aliases=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get aliases") else: module.fail_json(msg='Parameter function_name required for query=aliases.') @@ -201,11 +201,10 @@ def config_details(client, module): if function_name: try: lambda_facts.update(client.get_function_configuration(FunctionName=function_name)) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_facts.update(function={}) - else: - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_facts.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) else: params = dict() if module.params.get('max_items'): @@ -216,11 +215,10 @@ def config_details(client, module): try: lambda_facts.update(function_list=client.list_functions(**params)['Functions']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_facts.update(function_list=[]) - else: - module.fail_json_aws(e, msg="Trying to get function list") + except is_boto3_error_code('ResourceNotFoundException'): + lambda_facts.update(function_list=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get function list") functions = dict() for func in lambda_facts.pop('function_list', []): @@ -257,11 +255,10 @@ def mapping_details(client, module): try: lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_facts.update(mappings=[]) - else: - module.fail_json_aws(e, msg="Trying to get source event mappings") + except is_boto3_error_code('ResourceNotFoundException'): + lambda_facts.update(mappings=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get source event mappings") if function_name: return {function_name: camel_dict_to_snake_dict(lambda_facts)} @@ -288,11 +285,10 @@ def policy_details(client, module): try: # get_policy returns a JSON string so must convert to dict before reassigning to its key lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_facts.update(policy={}) - else: - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_facts.update(policy={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) else: module.fail_json(msg='Parameter function_name required for query=policy.') @@ -321,11 +317,10 @@ def version_details(client, module): try: lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_facts.update(versions=[]) - else: - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_facts.update(versions=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) else: module.fail_json(msg='Parameter function_name required for query=versions.') diff --git a/lambda_info.py b/lambda_info.py index 1e40aec4ca1..725149d9c3b 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -78,19 +78,20 @@ returned: success type: dict ''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict import json import datetime import re - try: - from botocore.exceptions import ClientError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + def fix_return(node): """ @@ -136,11 +137,10 @@ def alias_details(client, module): params['Marker'] = module.params.get('next_marker') try: lambda_info.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_info.update(aliases=[]) - else: - module.fail_json_aws(e, msg="Trying to get aliases") + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(aliases=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get aliases") else: module.fail_json(msg='Parameter function_name required for query=aliases.') @@ -190,11 +190,10 @@ def config_details(client, module): if function_name: try: lambda_info.update(client.get_function_configuration(FunctionName=function_name)) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_info.update(function={}) - else: - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) else: params = dict() if module.params.get('max_items'): @@ -205,11 +204,10 @@ def config_details(client, module): try: lambda_info.update(function_list=client.list_functions(**params)['Functions']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_info.update(function_list=[]) - else: - module.fail_json_aws(e, msg="Trying to get function list") + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function_list=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get function list") functions = dict() for func in lambda_info.pop('function_list', []): @@ -246,11 +244,10 @@ def mapping_details(client, module): try: lambda_info.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_info.update(mappings=[]) - else: - module.fail_json_aws(e, msg="Trying to get source event mappings") + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(mappings=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get source event mappings") if function_name: return {function_name: camel_dict_to_snake_dict(lambda_info)} @@ -277,11 +274,10 @@ def policy_details(client, module): try: # get_policy returns a JSON string so must convert to dict before reassigning to its key lambda_info.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_info.update(policy={}) - else: - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(policy={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) else: module.fail_json(msg='Parameter function_name required for query=policy.') @@ -310,11 +306,10 @@ def version_details(client, module): try: lambda_info.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) - except ClientError as e: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - lambda_info.update(versions=[]) - else: - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(versions=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) else: module.fail_json(msg='Parameter function_name required for query=versions.') diff --git a/lambda_policy.py b/lambda_policy.py index 2fb4b4ddead..ff091a8beaa 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -134,14 +134,16 @@ import json import re -from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: - from botocore.exceptions import ClientError + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + def pc(key): """ @@ -285,14 +287,9 @@ def get_policy_statement(module, client): # check if function policy exists try: policy_results = client.get_policy(**api_params) - except ClientError as e: - try: - if e.response['Error']['Code'] == 'ResourceNotFoundException': - return {} - except AttributeError: # catches ClientErrors without response, e.g. fail before connect - pass - module.fail_json_aws(e, msg="retrieving function policy") - except Exception as e: + except is_boto3_error_code('ResourceNotFoundException'): + return {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="retrieving function policy") # get_policy returns a JSON string so must convert to dict before reassigning to its key @@ -328,7 +325,7 @@ def add_policy_permission(module, client): if not module.check_mode: try: client.add_permission(**api_params) - except Exception as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="adding permission to policy") changed = True @@ -356,7 +353,7 @@ def remove_policy_permission(module, client): if not module.check_mode: client.remove_permission(**api_params) changed = True - except Exception as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="removing permission from policy") return changed diff --git a/lightsail.py b/lightsail.py index 4be2fc3f458..a996edc5e85 100644 --- a/lightsail.py +++ b/lightsail.py @@ -160,17 +160,21 @@ # will be caught by AnsibleAWSModule pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def find_instance_info(module, client, instance_name, fail_if_not_found=False): try: res = client.get_instance(instanceName=instance_name) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'NotFoundException' and not fail_if_not_found: - return None + except is_boto3_error_code('NotFoundException') as e: + if fail_if_not_found: + module.fail_json_aws(e) + return None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) return res['instance'] diff --git a/rds_instance.py b/rds_instance.py index 169ace0e2fa..0dd763c369f 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -745,20 +745,6 @@ sample: sg-12345678 ''' -from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.rds import ( - arg_spec_to_rds_params, - call_method, - ensure_tags, - get_final_identifier, - get_rds_method_attribute, - get_tags, -) -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry -from ansible.module_utils.six import string_types - from time import sleep try: @@ -766,6 +752,23 @@ except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_identifier +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + def get_rds_method_attribute_name(instance, state, creation_source, read_replica): method_name = None @@ -1034,11 +1037,8 @@ def promote_replication_instance(client, module, instance, read_replica): try: call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) changed = True - except is_boto3_error_code('InvalidDBInstanceState') as e: - if 'DB Instance is not a read replica' in e.response['Error']['Message']: - pass - else: - raise e + except is_boto3_error_message('DB Instance is not a read replica'): + pass return changed diff --git a/rds_param_group.py b/rds_param_group.py index ff18fc98300..30aa814de67 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -124,6 +124,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags @@ -230,11 +231,10 @@ def ensure_present(module, connection): errors = [] try: response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'DBParameterGroupNotFound': - response = None - else: - module.fail_json_aws(e, msg="Couldn't access parameter group information") + except is_boto3_error_code('DBParameterGroupNotFound'): + response = None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't access parameter group information") if not response: params = dict(DBParameterGroupName=groupname, DBParameterGroupFamily=module.params['engine'], @@ -273,11 +273,10 @@ def ensure_absent(module, connection): group = module.params['name'] try: response = connection.describe_db_parameter_groups(DBParameterGroupName=group) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'DBParameterGroupNotFound': - module.exit_json(changed=False) - else: - module.fail_json_aws(e, msg="Couldn't access parameter group information") + except is_boto3_error_code('DBParameterGroupNotFound'): + module.exit_json(changed=False) + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't access parameter group information") try: response = connection.delete_db_parameter_group(DBParameterGroupName=group) module.exit_json(changed=True) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 967be374219..0bc4a328680 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -17,8 +17,6 @@ notes: - If specifying expiration time as days then transition time must also be specified in days - If specifying expiration time as a date then transition time must also be specified as a date -requirements: - - python-dateutil options: name: description: @@ -196,11 +194,12 @@ import datetime try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore except ImportError: pass # handled by AnsibleAwsModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def create_lifecycle_rule(client, module): @@ -226,12 +225,9 @@ def create_lifecycle_rule(client, module): try: current_lifecycle = client.get_bucket_lifecycle_configuration(Bucket=name) current_lifecycle_rules = current_lifecycle['Rules'] - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration': - current_lifecycle_rules = [] - else: - module.fail_json_aws(e) - except BotoCoreError as e: + except is_boto3_error_code('NoSuchLifecycleConfiguration'): + current_lifecycle_rules = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) @@ -307,7 +303,7 @@ def create_lifecycle_rule(client, module): # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_configuration) - except (BotoCoreError, ClientError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) module.exit_json(changed=changed) @@ -390,12 +386,9 @@ def destroy_lifecycle_rule(client, module): # Get the bucket's current lifecycle rules try: current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules'] - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration': - current_lifecycle_rules = [] - else: - module.fail_json_aws(e) - except BotoCoreError as e: + except is_boto3_error_code('NoSuchLifecycleConfiguration'): + current_lifecycle_rules = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) # Create lifecycle @@ -425,7 +418,7 @@ def destroy_lifecycle_rule(client, module): elif current_lifecycle_rules: changed = True client.delete_bucket_lifecycle(Bucket=name) - except (ClientError, BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) module.exit_json(changed=changed) @@ -485,13 +478,13 @@ def main(): if expiration_date is not None: try: datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z") - except ValueError as e: + except ValueError: module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") if transition_date is not None: try: datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z") - except ValueError as e: + except ValueError: module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") if state == 'present': diff --git a/sqs_queue.py b/sqs_queue.py index 5d65967974a..0a93909f021 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -220,19 +220,21 @@ ''' import json -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (AWSRetry, - camel_dict_to_snake_dict, - compare_aws_tags, - snake_dict_to_camel_dict, - compare_policies, - ) try: - from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError + import botocore except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + def get_queue_name(module, is_fifo=False): name = module.params.get('name') @@ -246,10 +248,8 @@ def get_queue_name(module, is_fifo=False): def get_queue_url(client, name): try: return client.get_queue_url(QueueName=name)['QueueUrl'] - except ClientError as e: - if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue': - return None - raise + except is_boto3_error_code('AWS.SimpleQueueService.NonExistentQueue'): + return None def describe_queue(client, queue_url): @@ -418,7 +418,7 @@ def update_tags(client, queue_url, module): try: existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags'] - except (ClientError, KeyError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e: existing_tags = {} tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) @@ -465,7 +465,7 @@ def main(): result = create_or_update_sqs_queue(client, module) elif state == 'absent': result = delete_sqs_queue(client, module) - except (BotoCoreError, ClientError, ParamValidationError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to control sqs queue') else: module.exit_json(**result) From 1b4e498522d6d5a42a2a5b3f258a9b6ddf94de2d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 10 Feb 2021 17:30:55 +0100 Subject: [PATCH 078/683] ec2_instance - Use shared module implementation of get_ec2_security_group_ids_from_names (#214) * ec2_instance - Use shared module implementation of get_ec2_security_group_ids_from_names * changelog --- ec2_instance.py | 59 ++++++------------------------------------------- 1 file changed, 7 insertions(+), 52 deletions(-) diff --git a/ec2_instance.py b/ec2_instance.py index 06ebda60341..380e3527910 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -803,7 +803,7 @@ import uuid try: - import botocore.exceptions + import botocore except ImportError: pass # caught by AnsibleAWSModule @@ -821,6 +821,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names module = None @@ -1029,7 +1030,7 @@ def build_network_spec(params, ec2=None): subnet_id=spec['SubnetId'], ec2=ec2 ) - spec['Groups'] = [g['GroupId'] for g in groups] + spec['Groups'] = groups if network.get('description') is not None: spec['Description'] = network['description'] # TODO more special snowflake network things @@ -1131,57 +1132,11 @@ def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) parent_vpc_id = sub['Subnets'][0]['VpcId'] - vpc = { - 'Name': 'vpc-id', - 'Values': [parent_vpc_id] - } - - # because filter lists are AND in the security groups API, - # make two separate requests for groups by ID and by name - id_filters = [vpc] - name_filters = [vpc] - if group: - name_filters.append( - dict( - Name='group-name', - Values=[group] - ) - ) - if group.startswith('sg-'): - id_filters.append( - dict( - Name='group-id', - Values=[group] - ) - ) + return get_ec2_security_group_ids_from_names(group, ec2, vpc_id=parent_vpc_id) if groups: - name_filters.append( - dict( - Name='group-name', - Values=groups - ) - ) - if [g for g in groups if g.startswith('sg-')]: - id_filters.append( - dict( - Name='group-id', - Values=[g for g in groups if g.startswith('sg-')] - ) - ) - - found_groups = [] - for f_set in (id_filters, name_filters): - if len(f_set) > 1: - found_groups.extend(describe_security_groups(ec2, Filters=f_set)) - return list(dict((g['GroupId'], g) for g in found_groups).values()) - - -@AWSRetry.jittered_backoff() -def describe_security_groups(ec2, **params): - paginator = ec2.get_paginator('describe_security_groups') - results = paginator.paginate(**params) - return list(results.search('SecurityGroups[]')) + return get_ec2_security_group_ids_from_names(groups, ec2, vpc_id=parent_vpc_id) + return [] def build_top_level_options(params): @@ -1379,7 +1334,7 @@ def value_wrapper(v): subnet_id=subnet_id, ec2=ec2 ) - expected_groups = [g['GroupId'] for g in groups] + expected_groups = groups instance_groups = [g['GroupId'] for g in value['Groups']] if set(instance_groups) != set(expected_groups): changes_to_apply.append(dict( From b8ae87c0f3b8c54e4413ec95b7da260efe13d479 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 10 Feb 2021 19:47:02 +0100 Subject: [PATCH 079/683] aws_kms: Support setting PendingWindowInDays (Deletion Delay) and fix tests (#200) * Ensure we can still update / delete KMS keys when we can't access the key rotation status * Fix and enable KMS tests * Add support for setting the deletion schedule window * Ignore failures during cleanup * changelog * Change role name to match those permitted by CI policies * Split imports - easier to rebase * Make sure key rotation enable/disable errors don't drop through to main() * Allow STS principals as well as IAM principals * Add support for direct lookup by alias/id Use it in test suite (filters are done client side and are SLOW) * Ensure we don't throw an exception when a tag doesn't exist * Add docs * changelog * Flag aws_kms tests as unstable * lint fixups * Consistently handle 'UnsupportedOperationException' on key rotation * Update version added * Allow a little flexibility for deletion times * Update version_added --- aws_kms.py | 70 ++++++++++++++++++++++++++++++++++------------ aws_kms_info.py | 74 +++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 112 insertions(+), 32 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index b86686cd264..10753f63584 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -117,6 +117,15 @@ tags: description: A dictionary of tags to apply to a key. type: dict + pending_window: + description: + - The number of days between requesting deletion of the CMK and when it will actually be deleted. + - Only used when I(state=absent) and the CMK has not yet been deleted. + - Valid values are between 7 and 30 (inclusive). + - 'See also: U(https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html#KMS-ScheduleKeyDeletion-request-PendingWindowInDays)' + type: int + aliases: ['deletion_delay'] + version_added: 1.4.0 purge_tags: description: Whether the I(tags) argument should cause tags not in the list to be removed @@ -405,19 +414,25 @@ 'admin': 'Allow access for Key Administrators' } -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, compare_policies -from ansible.module_utils.six import string_types - import json +import re try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) def get_iam_roles_with_backoff(connection): @@ -533,8 +548,11 @@ def get_key_details(connection, module, key_id): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain aliases") - current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') + try: + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') + except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: + result['enable_key_rotation'] = None result['aliases'] = aliases.get(result['KeyId'], []) result = camel_dict_to_snake_dict(result) @@ -622,8 +640,12 @@ def start_key_deletion(connection, module, key_metadata): if module.check_mode: return True + deletion_params = {'KeyId': key_metadata['Arn']} + if module.params.get('pending_window'): + deletion_params['PendingWindowInDays'] = module.params.get('pending_window') + try: - connection.schedule_key_deletion(KeyId=key_metadata['Arn']) + connection.schedule_key_deletion(**deletion_params) return True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to schedule key for deletion") @@ -767,14 +789,23 @@ def update_key_rotation(connection, module, key, enable_key_rotation): if enable_key_rotation is None: return False key_id = key['key_arn'] - current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: - return False - if enable_key_rotation: - connection.enable_key_rotation(KeyId=key_id) - else: - connection.disable_key_rotation(KeyId=key_id) + try: + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: + return False + except is_boto3_error_code('AccessDeniedException'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get current key rotation status") + + try: + if enable_key_rotation: + connection.enable_key_rotation(KeyId=key_id) + else: + connection.disable_key_rotation(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to enable/disable key rotation") return True @@ -881,8 +912,10 @@ def _clean_statement_principals(statement, clean_invalid_entries): if not isinstance(statement['Principal'].get('AWS'), list): statement['Principal']['AWS'] = list() - invalid_entries = [item for item in statement['Principal']['AWS'] if not item.startswith('arn:aws:iam::')] - valid_entries = [item for item in statement['Principal']['AWS'] if item.startswith('arn:aws:iam::')] + valid_princ = re.compile('^arn:aws:(iam|sts)::') + + invalid_entries = [item for item in statement['Principal']['AWS'] if not valid_princ.match(item)] + valid_entries = [item for item in statement['Principal']['AWS'] if valid_princ.match(item)] if bool(invalid_entries) and clean_invalid_entries: statement['Principal']['AWS'] = valid_entries @@ -1024,6 +1057,7 @@ def main(): policy_role_arn=dict(aliases=['role_arn']), policy_grant_types=dict(aliases=['grant_types'], type='list', elements='str'), policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True), + pending_window=dict(aliases=['deletion_delay'], type='int'), key_id=dict(aliases=['key_arn']), description=dict(), enabled=dict(type='bool', default=True), diff --git a/aws_kms_info.py b/aws_kms_info.py index 2366c5d0f45..879cf317497 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -16,12 +16,31 @@ - This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change. author: "Will Thames (@willthames)" options: + alias: + description: + - Alias for key. + - Mutually exclusive with I(key_id) and I(filters). + required: false + aliases: + - key_alias + type: str + version_added: 1.4.0 + key_id: + description: + - Key ID or ARN of the key. + - Mutually exclusive with I(alias) and I(filters). + required: false + aliases: + - key_arn + type: str + version_added: 1.4.0 filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filters aren't natively supported by boto3, but are supported to provide similar functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and C(tag:tagName)) are available, as are C(key-id) and C(alias) + - Mutually exclusive with I(alias) and I(key_id). type: dict pending_deletion: description: Whether to get full details (tags, grants etc.) of keys pending deletion @@ -290,12 +309,20 @@ def get_key_policy_with_backoff(connection, key_id, policy_name): def get_enable_key_rotation_with_backoff(connection, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: return None return current_rotation_status.get('KeyRotationEnabled') +def canonicalize_alias_name(alias): + if alias is None: + return None + if alias.startswith('alias/'): + return alias + return 'alias/' + alias + + def get_kms_tags(connection, module, key_id): # Handle pagination here as list_resource_tags does not have # a paginator @@ -338,7 +365,10 @@ def key_matches_filter(key, filtr): if filtr[0] == 'alias': return filtr[1] in key['aliases'] if filtr[0].startswith('tag:'): - return key['tags'][filtr[0][4:]] == filtr[1] + tag_key = filtr[0][4:] + if tag_key not in key['tags']: + return False + return key['tags'].get(tag_key) == filtr[1] def key_matches_filters(key, filters): @@ -353,7 +383,11 @@ def get_key_details(connection, module, key_id, tokens=None): tokens = [] try: result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # Make sure we have the canonical ARN, we might have been passed an alias + key_id = result['Arn'] + except is_boto3_error_code('NotFoundException'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key metadata") result['KeyArn'] = result.pop('Arn') @@ -361,12 +395,9 @@ def get_key_details(connection, module, key_id, tokens=None): aliases = get_kms_aliases_lookup(connection) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain aliases") + # We can only get aliases for our own account, so we don't need the full ARN result['aliases'] = aliases.get(result['KeyId'], []) - - if result['Origin'] == 'AWS_KMS': - result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) - else: - result['enable_key_rotation'] = None + result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) if module.params.get('pending_deletion'): return camel_dict_to_snake_dict(result) @@ -384,21 +415,36 @@ def get_key_details(connection, module, key_id, tokens=None): def get_kms_info(connection, module): - try: - keys = get_kms_keys_with_backoff(connection)['Keys'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain keys") - - return [get_key_details(connection, module, key['KeyId']) for key in keys] + if module.params.get('key_id'): + key_id = module.params.get('key_id') + details = get_key_details(connection, module, key_id) + if details: + return [details] + return [] + elif module.params.get('alias'): + alias = canonicalize_alias_name(module.params.get('alias')) + details = get_key_details(connection, module, alias) + if details: + return [details] + return [] + else: + try: + keys = get_kms_keys_with_backoff(connection)['Keys'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain keys") + return [get_key_details(connection, module, key['KeyId']) for key in keys] def main(): argument_spec = dict( + alias=dict(aliases=['key_alias']), + key_id=dict(aliases=['key_arn']), filters=dict(type='dict'), pending_deletion=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[['alias', 'filters', 'key_id']], supports_check_mode=True) if module._name == 'aws_kms_facts': module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws') From 0328133533bd4458f7380d4f259461e7b64f8f6b Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 11 Feb 2021 18:48:21 +0100 Subject: [PATCH 080/683] Fix data_pipeline unit test - error code not properly caught (#406) --- data_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_pipeline.py b/data_pipeline.py index 4874388c733..a3821a068d8 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -254,7 +254,7 @@ def pipeline_description(client, dp_id): """ try: return client.describe_pipelines(pipelineIds=[dp_id]) - except is_boto3_error_code('PipelineNotFoundException', 'PipelineDeletedException'): + except is_boto3_error_code(['PipelineNotFoundException', 'PipelineDeletedException']): raise DataPipelineNotFound From ad54f207a6846d7d44ac8cb2f7db2f83787e75d3 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 11 Feb 2021 20:29:23 +0100 Subject: [PATCH 081/683] fix wrong element spec for rules param in aws_s3_cors (#408) * #404 fix wrong element spec for rules param * #404 documentation fix Co-authored-by: Markus Bergholz --- aws_s3_cors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_s3_cors.py b/aws_s3_cors.py index 820530dc08d..58e33cf1104 100644 --- a/aws_s3_cors.py +++ b/aws_s3_cors.py @@ -25,7 +25,7 @@ description: - Cors rules to put on the s3 bucket type: list - elements: str + elements: dict state: description: - Create or remove cors on the s3 bucket @@ -147,7 +147,7 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), - rules=dict(type='list', elements='str'), + rules=dict(type='list', elements='dict'), state=dict(type='str', choices=['present', 'absent'], required=True) ) From 9d51c4435a7d8c6accfd4e0b796a792d55960df8 Mon Sep 17 00:00:00 2001 From: Michael Mayer Date: Thu, 11 Feb 2021 12:41:05 -0800 Subject: [PATCH 082/683] Fix parameter validation in ecs_task (#402) * Fix parameter validation in ecs_task * Require cluster parameter in ecs_task module * Move parameter validation to AnsibleAWSModule * Fix pep8 formatting line too long * changelog --- ecs_task.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/ecs_task.py b/ecs_task.py index e8eeb9c57ea..90f9df43f01 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -19,17 +19,20 @@ operation: description: - Which task operation to execute. + - When I(operation=run) I(task_definition) must be set. + - When I(operation=start) both I(task_definition) and I(container_instances) must be set. + - When I(operation=stop) both I(task_definition) and I(task) must be set. required: True choices: ['run', 'start', 'stop'] type: str cluster: description: - The name of the cluster to run the task on. - required: False + required: True type: str task_definition: description: - - The task definition to start or run. + - The task definition to start, run or stop. required: False type: str overrides: @@ -44,7 +47,7 @@ type: int task: description: - - The task to stop. + - The ARN of the task to stop. required: False type: str container_instances: @@ -332,7 +335,7 @@ def ecs_api_handles_network_configuration(self): def main(): argument_spec = dict( operation=dict(required=True, choices=['run', 'start', 'stop']), - cluster=dict(required=False, type='str'), # R S P + cluster=dict(required=True, type='str'), # R S P task_definition=dict(required=False, type='str'), # R* S* overrides=dict(required=False, type='dict'), # R S count=dict(required=False, type='int'), # R @@ -345,28 +348,26 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['network_configuration'])]) + required_if=[ + ('launch_type', 'FARGATE', ['network_configuration']), + ('operation', 'run', ['task_definition']), + ('operation', 'start', [ + 'task_definition', + 'container_instances' + ]), + ('operation', 'stop', ['task_definition', 'task']), + ]) # Validate Inputs if module.params['operation'] == 'run': - if 'task_definition' not in module.params and module.params['task_definition'] is None: - module.fail_json(msg="To run a task, a task_definition must be specified") task_to_list = module.params['task_definition'] status_type = "RUNNING" if module.params['operation'] == 'start': - if 'task_definition' not in module.params and module.params['task_definition'] is None: - module.fail_json(msg="To start a task, a task_definition must be specified") - if 'container_instances' not in module.params and module.params['container_instances'] is None: - module.fail_json(msg="To start a task, container instances must be specified") task_to_list = module.params['task'] status_type = "RUNNING" if module.params['operation'] == 'stop': - if 'task' not in module.params and module.params['task'] is None: - module.fail_json(msg="To stop a task, a task must be specified") - if 'task_definition' not in module.params and module.params['task_definition'] is None: - module.fail_json(msg="To stop a task, a task definition must be specified") task_to_list = module.params['task_definition'] status_type = "STOPPED" From a20407989897eada381ba60f9452120b8c986c13 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 12 Feb 2021 01:23:16 +0100 Subject: [PATCH 083/683] ec2_vpc_endpoint - deprecate policy_file (#366) * deprecate policy_file * ignore file * changelog --- ec2_vpc_endpoint.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 4daaaeaa23e..8e2426a525a 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -44,6 +44,9 @@ on how to use it properly. Cannot be used with I(policy). - Option when creating an endpoint. If not provided AWS will utilise a default policy which provides full access to the service. + - This option has been deprecated and will be removed after 2022-12-01 + to maintain the existing functionality please use the I(policy) option + and a file lookup. required: false aliases: [ "policy_path" ] type: path @@ -346,6 +349,11 @@ def main(): # Validate Requirements state = module.params.get('state') + if module.params.get('policy_file'): + module.deprecate('The policy_file option has been deprecated and' + ' will be removed after 2022-12-01', + date='2022-12-01', collection_name='community.aws') + try: ec2 = module.client('ec2') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: From 56855a115951f93a051c81009635d9eb61dc8c5a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 12 Feb 2021 01:32:24 +0100 Subject: [PATCH 084/683] ec2_vpc_endpoint - fixup deletion 'changed' (#362) * Ensure ec2_vpc_endpoint returns True when deleting an Endpoint Return not changed when state=absent and endpoint has already been deleted * Add minimal endpoint tests --- ec2_vpc_endpoint.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 8e2426a525a..28d0fda0eba 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -313,8 +313,16 @@ def setup_removal(client, module): params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') try: result = client.delete_vpc_endpoints(**params)['Unsuccessful'] - if not module.check_mode and (result != []): - module.fail_json(msg=result) + if len(result) < len(params['VpcEndpointIds']): + changed = True + # For some reason delete_vpc_endpoints doesn't throw exceptions it + # returns a list of failed 'results' instead. Throw these so we can + # catch them the way we expect + for r in result: + try: + raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints') + except is_boto3_error_code('InvalidVpcEndpoint.NotFound'): + continue except is_boto3_error_code('DryRunOperation'): changed = True result = 'Would have deleted VPC Endpoint if not in check mode' From 7bff40dd04fa6eb25b1732da011b3e8383da78c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20Magalh=C3=A3es?= <4622652+pjrm@users.noreply.github.com> Date: Fri, 12 Feb 2021 13:22:41 +0000 Subject: [PATCH 085/683] route53 - Refactor to use boto3 (#405) * route53 - Refactor to use boto3 * Changelog Co-authored-by: Mark Chappell --- route53.py | 321 ++++++++++++++++++++--------------------------------- 1 file changed, 119 insertions(+), 202 deletions(-) diff --git a/route53.py b/route53.py index 6caf385002f..495be280fc5 100644 --- a/route53.py +++ b/route53.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -135,7 +136,6 @@ - Mike Buzzetti (@jimbydamonk) extends_documentation_fragment: - amazon.aws.aws - ''' RETURN = r''' @@ -224,7 +224,6 @@ ttl: 7200 value: 1.1.1.1,2.2.2.2,3.3.3.3 wait: yes - - name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated community.aws.route53: state: present @@ -237,7 +236,6 @@ - 2.2.2.2 - 3.3.3.3 wait: yes - - name: Retrieve the details for new.foo.com community.aws.route53: state: get @@ -245,7 +243,6 @@ record: new.foo.com type: A register: rec - - name: Delete new.foo.com A record using the results from the get command community.aws.route53: state: absent @@ -254,7 +251,6 @@ ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" - # Add an AAAA record. Note that because there are colons in the value # that the IPv6 address must be quoted. Also shows using the old form command=create. - name: Add an AAAA record @@ -265,7 +261,6 @@ type: AAAA ttl: 7200 value: "::1" - # For more information on SRV records see: # https://en.wikipedia.org/wiki/SRV_record - name: Add a SRV record with multiple fields for a service on port 22222 @@ -275,7 +270,6 @@ record: "_example-service._tcp.foo.com" type: SRV value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" - # Note that TXT and SPF records must be surrounded # by quotes when sent to Route 53: - name: Add a TXT record. @@ -286,7 +280,6 @@ type: TXT ttl: 7200 value: '"bar"' - - name: Add an alias record that points to an Amazon ELB community.aws.route53: state: present @@ -296,7 +289,6 @@ value: "{{ elb_dns_name }}" alias: True alias_hosted_zone_id: "{{ elb_zone_id }}" - - name: Retrieve the details for elb.foo.com community.aws.route53: state: get @@ -304,7 +296,6 @@ record: elb.foo.com type: A register: rec - - name: Delete an alias record using the results from the get command community.aws.route53: state: absent @@ -315,7 +306,6 @@ value: "{{ rec.set.value }}" alias: True alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" - - name: Add an alias record that points to an Amazon ELB and evaluates it health community.aws.route53: state: present @@ -326,7 +316,6 @@ alias: True alias_hosted_zone_id: "{{ elb_zone_id }}" alias_evaluate_target_health: True - - name: Add an AAAA record with Hosted Zone ID community.aws.route53: state: present @@ -336,7 +325,6 @@ type: AAAA ttl: 7200 value: "::1" - - name: Use a routing policy to distribute traffic community.aws.route53: state: present @@ -349,7 +337,6 @@ identifier: "host1@www" weight: 100 health_check: "d994b780-3150-49fd-9205-356abdd42e75" - - name: Add a CAA record (RFC 6844) community.aws.route53: state: present @@ -360,136 +347,82 @@ - 0 issue "ca.example.net" - 0 issuewild ";" - 0 iodef "mailto:security@example.com" - ''' -import time -import distutils.version +from operator import itemgetter try: - import boto - import boto.ec2 - from boto.route53 import Route53Connection - from boto.route53.record import Record, ResourceRecordSets - from boto.route53.status import Status + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + +MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing +WAIT_RETRY = 5 # how many seconds to wait between propagation status polls + + +@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) +def _list_record_sets(route53, **kwargs): + paginator = route53.get_paginator('list_resource_record_sets') + return paginator.paginate(**kwargs).build_full_result()['ResourceRecordSets'] + + +@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) +def _list_hosted_zones(route53, **kwargs): + paginator = route53.get_paginator('list_hosted_zones') + return paginator.paginate(**kwargs).build_full_result()['HostedZones'] -MINIMUM_BOTO_VERSION = '2.28.0' -WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls +def get_record(route53, zone_id, record_name, record_type, record_identifier): + record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id) + for record_set in record_sets_results: + # If the record name and type is not equal, move to the next record + if (record_name, record_type) != (record_set['Name'], record_set['Type']): + continue -class TimeoutError(Exception): - pass + if record_identifier and record_identifier != record_set.get("SetIdentifier"): + continue + return record_set -def get_zone_id_by_name(conn, module, zone_name, want_private, want_vpc_id): + return None + + +def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): """Finds a zone by name or zone_id""" - for zone in invoke_with_throttling_retries(conn.get_zones): + hosted_zones_results = _list_hosted_zones(route53) + + for zone in hosted_zones_results: # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params - private_zone = module.boolean(zone.config.get('PrivateZone', False)) - if private_zone == want_private and zone.name == zone_name: + private_zone = module.boolean(zone['Config'].get('PrivateZone', False)) + zone_id = zone['Id'].replace("/hostedzone/", "") + + if private_zone == want_private and zone['Name'] == zone_name: if want_vpc_id: # NOTE: These details aren't available in other boto methods, hence the necessary # extra API call - hosted_zone = invoke_with_throttling_retries(conn.get_hosted_zone, zone.id) - zone_details = hosted_zone['GetHostedZoneResponse'] + hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone.id) + zone_details = hosted_zone['HostedZone'] # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 if isinstance(zone_details['VPCs'], dict): if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: - return zone.id + return zone_id else: # Forward compatibility for when boto fixes that bug if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: - return zone.id + return zone_id else: - return zone.id + return zone_id return None -def commit(changes, retry_interval, wait, wait_timeout): - """Commit changes, but retry PriorRequestNotComplete errors.""" - result = None - retry = 10 - while True: - try: - retry -= 1 - result = changes.commit() - break - except boto.route53.exception.DNSServerError as e: - code = e.body.split("")[1] - code = code.split("")[0] - if code != 'PriorRequestNotComplete' or retry < 0: - raise e - time.sleep(float(retry_interval)) - - if wait: - timeout_time = time.time() + wait_timeout - connection = changes.connection - change = result['ChangeResourceRecordSetsResponse']['ChangeInfo'] - status = Status(connection, change) - while status.status != 'INSYNC' and time.time() < timeout_time: - time.sleep(WAIT_RETRY_SLEEP) - status.update() - if time.time() >= timeout_time: - raise TimeoutError() - return result - - -# Shamelessly copied over from https://git.io/vgmDG -IGNORE_CODE = 'Throttling' -MAX_RETRIES = 5 - - -def invoke_with_throttling_retries(function_ref, *argv, **kwargs): - retries = 0 - while True: - try: - retval = function_ref(*argv, **kwargs) - return retval - except boto.exception.BotoServerError as e: - if e.code != IGNORE_CODE or retries == MAX_RETRIES: - raise e - time.sleep(5 * (2**retries)) - retries += 1 - - -def decode_name(name): - # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round - # tripping of things like * and @. - return name.encode().decode('unicode_escape') - - -def to_dict(rset, zone_in, zone_id): - record = dict() - record['zone'] = zone_in - record['type'] = rset.type - record['record'] = decode_name(rset.name) - record['ttl'] = str(rset.ttl) - record['identifier'] = rset.identifier - record['weight'] = rset.weight - record['region'] = rset.region - record['failover'] = rset.failover - record['health_check'] = rset.health_check - record['hosted_zone_id'] = zone_id - if rset.alias_dns_name: - record['alias'] = True - record['value'] = rset.alias_dns_name - record['values'] = [rset.alias_dns_name] - record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id - record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health - else: - record['alias'] = False - record['value'] = ','.join(sorted(rset.resource_records)) - record['values'] = sorted(rset.resource_records) - return record - - def main(): argument_spec = dict( state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), @@ -536,15 +469,8 @@ def main(): region=('identifier',), weight=('identifier',), ), - check_boto3=False, ) - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION): - module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION)) - if module.params['state'] in ('present', 'create'): command_in = 'create' elif module.params['state'] in ('absent', 'delete'): @@ -577,8 +503,6 @@ def main(): wait_in = module.params.get('wait') wait_timeout_in = module.params.get('wait_timeout') - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - if zone_in[-1:] != '.': zone_in += "." @@ -593,113 +517,106 @@ def main(): # connect to the route53 endpoint try: - conn = Route53Connection(**aws_connect_kwargs) - except boto.exception.BotoServerError as e: - module.fail_json(msg=e.error_message) + route53 = module.client( + 'route53', + retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=retry_interval_in) + ) + except botocore.exceptions.HTTPClientError as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') # Find the named zone ID - zone_id = hosted_zone_id_in or get_zone_id_by_name(conn, module, zone_in, private_zone_in, vpc_id_in) + zone_id = hosted_zone_id_in or get_zone_id_by_name(route53, module, zone_in, private_zone_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone_id is None: errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) module.fail_json(msg=errmsg) - record = {} - - found_record = False - wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, - identifier=identifier_in, weight=weight_in, - region=region_in, health_check=health_check_in, - failover=failover_in) - for v in value_in: - if alias_in: - wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in) - else: - wanted_rset.add_value(v) - - need_to_sort_records = (type_in == 'CAA') - - # Sort records for wanted_rset if necessary (keep original list) - unsorted_records = wanted_rset.resource_records - if need_to_sort_records: - wanted_rset.resource_records = sorted(unsorted_records) - - sets = invoke_with_throttling_retries(conn.get_all_rrsets, zone_id, name=record_in, - type=type_in, identifier=identifier_in) - sets_iter = iter(sets) - while True: - try: - rset = invoke_with_throttling_retries(next, sets_iter) - except StopIteration: - break - # Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block - rset.name = decode_name(rset.name) - - if identifier_in is not None: - identifier_in = str(identifier_in) - - if rset.type == type_in and rset.name.lower() == record_in.lower() and rset.identifier == identifier_in: - if need_to_sort_records: - # Sort records - rset.resource_records = sorted(rset.resource_records) - found_record = True - record = to_dict(rset, zone_in, zone_id) - if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): - module.exit_json(changed=False) - - # We need to look only at the first rrset returned by the above call, - # so break here. The returned elements begin with the one matching our - # requested name, type, and identifier, if such an element exists, - # followed by all others that come after it in alphabetical order. - # Therefore, if the first set does not match, no subsequent set will - # match either. - break + aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in) + + resource_record_set = scrub_none_parameters({ + 'Name': record_in, + 'Type': type_in, + 'Weight': weight_in, + 'Region': region_in, + 'Failover': failover_in, + 'TTL': ttl_in, + 'ResourceRecords': [dict(Value=value) for value in value_in], + 'HealthCheckId': health_check_in, + }) + + if alias_in: + resource_record_set['AliasTarget'] = dict( + HostedZoneId=alias_hosted_zone_id_in, + DNSName=value_in[0], + EvaluateTargetHealth=alias_evaluate_target_health_in + ) + + # On CAA records order doesn't matter + if type_in == 'CAA': + resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value')) + + if command_in == 'create' and aws_record == resource_record_set: + module.exit_json(changed=False) if command_in == 'get': if type_in == 'NS': - ns = record.get('values', []) + ns = aws_record.get('values', []) else: # Retrieve name servers associated to the zone. - z = invoke_with_throttling_retries(conn.get_zone, zone_in) - ns = invoke_with_throttling_retries(z.get_nameservers) + ns = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['DelegationSet']['NameServers'] - module.exit_json(changed=False, set=record, nameservers=ns) + module.exit_json(changed=False, set=aws_record, nameservers=ns) - if command_in == 'delete' and not found_record: + if command_in == 'delete' and not aws_record: module.exit_json(changed=False) - changes = ResourceRecordSets(conn, zone_id) - if command_in == 'create' or command_in == 'delete': - if command_in == 'create' and found_record: + if command_in == 'create' and aws_record: if not module.params['overwrite']: module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") command = 'UPSERT' else: command = command_in.upper() - # Restore original order of records - wanted_rset.resource_records = unsorted_records - changes.add_change_record(command, wanted_rset) if not module.check_mode: try: - invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in) - except boto.route53.exception.DNSServerError as e: - txt = e.body.split("")[1] - txt = txt.split("")[0] - if "but it already exists" in txt: - module.exit_json(changed=False) - else: - module.fail_json(msg=txt) - except TimeoutError: - module.fail_json(msg='Timeout waiting for changes to replicate') + change_resource_record_sets = route53.change_resource_record_sets( + aws_retry=True, + HostedZoneId=zone_id, + ChangeBatch=dict( + Changes=[ + dict( + Action=command, + ResourceRecordSet=resource_record_set + ) + ] + ) + ) + + if wait_in: + waiter = route53.get_waiter('resource_record_sets_changed') + waiter.wait( + Id=change_resource_record_sets['ChangeInfo']['Id'], + WaiterConfig=dict( + Delay=WAIT_RETRY, + MaxAttemps=wait_timeout_in // WAIT_RETRY, + ) + ) + except is_boto3_error_message('but it already exists'): + module.exit_json(changed=False) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout waiting for resource records changes to be applied') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to update records') + except Exception as e: + module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) module.exit_json( changed=True, diff=dict( - before=record, - after=to_dict(wanted_rset, zone_in, zone_id) if command != 'delete' else {}, + before=aws_record, + after=resource_record_set if command != 'delete' else {}, ), ) From 690a12b492fbd68a89a3542752cb8f79018c06ca Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Mon, 15 Feb 2021 17:16:28 -0700 Subject: [PATCH 086/683] Handle new sanity checks (#416) Update code where it's easy to do so, add ignores where it will take more work --- ec2_vpc_route_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index cebbed38f8b..cdab10a9d79 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -441,7 +441,7 @@ def index_of_matching_route(route_spec, routes_to_match): def ensure_routes(connection=None, module=None, route_table=None, route_specs=None, propagating_vgw_ids=None, check_mode=None, purge_routes=None): - routes_to_match = [route for route in route_table['Routes']] + routes_to_match = list(route_table['Routes']) route_specs_to_create = [] route_specs_to_recreate = [] for route_spec in route_specs: From 042f808a047009bfdaf231ad8cbe5a57a0e0cdf6 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Tue, 16 Feb 2021 00:26:30 -0700 Subject: [PATCH 087/683] Add boto3 requirements to route53 module docs (#417) --- route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/route53.py b/route53.py index 495be280fc5..43b17a44f5b 100644 --- a/route53.py +++ b/route53.py @@ -13,6 +13,7 @@ --- module: route53 version_added: 1.0.0 +requirements: [ "boto3", "botocore" ] short_description: add or delete entries in Amazons Route 53 DNS service description: - Creates and deletes DNS records in Amazons Route 53 service. From 13f6a616cc1d73cb6f148a8a8a8302e536a7b256 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 19 Feb 2021 00:54:50 +0100 Subject: [PATCH 088/683] Add retries to ec2_vpc_egress_igw (#421) * ec2_vpc_egress_igw - use connection rather than 'conn' for consistency * ec2_vpc_egress_igw - enable retry decorator * changelog --- ec2_vpc_egress_igw.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index 0026ade65ad..23c2f86abd0 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -66,20 +66,24 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -def delete_eigw(module, conn, eigw_id): +def delete_eigw(module, connection, eigw_id): """ Delete EIGW. module : AnsibleAWSModule object - conn : boto3 client connection object + connection : boto3 client connection object eigw_id : ID of the EIGW to delete """ changed = False try: - response = conn.delete_egress_only_internet_gateway(DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id) + response = connection.delete_egress_only_internet_gateway( + aws_retry=True, + DryRun=module.check_mode, + EgressOnlyInternetGatewayId=eigw_id) except is_boto3_error_code('DryRunOperation'): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -91,19 +95,22 @@ def delete_eigw(module, conn, eigw_id): return changed -def create_eigw(module, conn, vpc_id): +def create_eigw(module, connection, vpc_id): """ Create EIGW. module : AnsibleAWSModule object - conn : boto3 client connection object + connection : boto3 client connection object vpc_id : ID of the VPC we are operating on """ gateway_id = None changed = False try: - response = conn.create_egress_only_internet_gateway(DryRun=module.check_mode, VpcId=vpc_id) + response = connection.create_egress_only_internet_gateway( + aws_retry=True, + DryRun=module.check_mode, + VpcId=vpc_id) except is_boto3_error_code('DryRunOperation'): # When boto3 method is run with DryRun=True it returns an error on success # We need to catch the error and return something valid @@ -128,18 +135,19 @@ def create_eigw(module, conn, vpc_id): return changed, gateway_id -def describe_eigws(module, conn, vpc_id): +def describe_eigws(module, connection, vpc_id): """ Describe EIGWs. module : AnsibleAWSModule object - conn : boto3 client connection object + connection : boto3 client connection object vpc_id : ID of the VPC we are operating on """ gateway_id = None try: - response = conn.describe_egress_only_internet_gateways() + response = connection.describe_egress_only_internet_gateways( + aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways") @@ -159,7 +167,8 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + retry_decorator = AWSRetry.jittered_backoff(retries=10) + connection = module.client('ec2', retry_decorator=retry_decorator) vpc_id = module.params.get('vpc_id') state = module.params.get('state') From dddb8d96c1e0f29e85e8897c78b2ddd370f3d56f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 19 Feb 2021 00:55:39 +0100 Subject: [PATCH 089/683] Enable AWSRetry on aws_region_info (#422) * Enable AWSRetry on aws_region_info * changelog --- aws_region_info.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aws_region_info.py b/aws_region_info.py index d0b74e3f112..bedb8a5f1fa 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -58,7 +58,9 @@ ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import ClientError, BotoCoreError @@ -86,6 +88,7 @@ def main(): try: regions = connection.describe_regions( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) ) except (BotoCoreError, ClientError) as e: From aa77e7fc68bc3e061837d91927273f5f0fba6145 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 19 Feb 2021 23:03:46 +0100 Subject: [PATCH 090/683] Cleanup lambda_alias (#396) * Update lambda_alias to use AnsibleAWSModule.client * Update lambda_alias to use fail_json_aws * Replace custom snake/camel conversion * lambda_alias replace use of AWSConnection with passing a standard (wrapped) boto3 connection * Enable Retries * Fix idempotency when description isn't set. * Don't throw an exception when attempting to create a new alias in check mode * Add revision_id to return docs * Add integration tests * add changelog --- lambda_alias.py | 116 ++++++++++++++---------------------------------- 1 file changed, 34 insertions(+), 82 deletions(-) diff --git a/lambda_alias.py b/lambda_alias.py index aeacb6e3b75..9ccfbef7ea6 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -139,6 +139,11 @@ returned: success type: str sample: dev +revision_id: + description: A unique identifier that changes when you update the alias. + returned: success + type: str + sample: 12345678-1234-1234-1234-123456789abc ''' import re @@ -149,67 +154,16 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - - -class AWSConnection: - """ - Create the connection object and client objects as required. - """ - - def __init__(self, ansible_obj, resources, boto3_=True): - - try: - self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3_) - - self.resource_client = dict() - if not resources: - resources = ['lambda'] - - resources.append('iam') - - for resource in resources: - aws_connect_kwargs.update(dict(region=self.region, - endpoint=self.endpoint, - conn_type='client', - resource=resource - )) - self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) - - # if region is not provided, then get default profile/session region - if not self.region: - self.region = self.resource_client['lambda'].meta.region_name - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) - - try: - self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ValueError, KeyError, IndexError): - self.account_id = '' - - def client(self, resource='lambda'): - return self.resource_client[resource] - - -def pc(key): - """ - Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. - - :param key: - :return: - """ - - return "".join([token.capitalize() for token in key.split('_')]) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def set_api_params(module, module_params): """ - Sets module parameters to those expected by the boto3 API. + Sets non-None module parameters to those expected by the boto3 API. :param module: :param module_params: @@ -221,17 +175,16 @@ def set_api_params(module, module_params): for param in module_params: module_param = module.params.get(param, None) if module_param: - api_params[pc(param)] = module_param + api_params[param] = module_param - return api_params + return snake_dict_to_camel_dict(api_params, capitalize_first=True) -def validate_params(module, aws): +def validate_params(module): """ Performs basic parameter validation. - :param module: Ansible module reference - :param aws: AWS client connection + :param module: AnsibleAWSModule reference :return: """ @@ -254,23 +207,21 @@ def validate_params(module, aws): return -def get_lambda_alias(module, aws): +def get_lambda_alias(module, client): """ Returns the lambda function alias if it exists. - :param module: Ansible module reference - :param aws: AWS client connection + :param module: AnsibleAWSModule + :param client: (wrapped) boto3 lambda client :return: """ - client = aws.client('lambda') - # set API parameters api_params = set_api_params(module, ('function_name', 'name')) # check if alias exists and get facts try: - results = client.get_alias(**api_params) + results = client.get_alias(aws_retry=True, **api_params) except is_boto3_error_code('ResourceNotFoundException'): results = None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -279,31 +230,33 @@ def get_lambda_alias(module, aws): return results -def lambda_alias(module, aws): +def lambda_alias(module, client): """ Adds, updates or deletes lambda function aliases. - :param module: Ansible module reference - :param aws: AWS client connection + :param module: AnsibleAWSModule + :param client: (wrapped) boto3 lambda client :return dict: """ - client = aws.client('lambda') results = dict() changed = False current_state = 'absent' state = module.params['state'] - facts = get_lambda_alias(module, aws) + facts = get_lambda_alias(module, client) if facts: current_state = 'present' if state == 'present': if current_state == 'present': + snake_facts = camel_dict_to_snake_dict(facts) # check if alias has changed -- only version and description can change alias_params = ('function_version', 'description') for param in alias_params: - if module.params.get(param) != facts.get(pc(param)): + if module.params.get(param) is None: + continue + if module.params.get(param) != snake_facts.get(param): changed = True break @@ -313,9 +266,9 @@ def lambda_alias(module, aws): if not module.check_mode: try: - results = client.update_alias(**api_params) + results = client.update_alias(aws_retry=True, **api_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg='Error updating function alias: {0}'.format(e)) + module.fail_json_aws(e, msg='Error updating function alias') else: # create new function alias @@ -323,10 +276,10 @@ def lambda_alias(module, aws): try: if not module.check_mode: - results = client.create_alias(**api_params) + results = client.create_alias(aws_retry=True, **api_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg='Error creating function alias: {0}'.format(e)) + module.fail_json_aws(e, msg='Error creating function alias') else: # state = 'absent' if current_state == 'present': @@ -335,12 +288,12 @@ def lambda_alias(module, aws): try: if not module.check_mode: - results = client.delete_alias(**api_params) + results = client.delete_alias(aws_retry=True, **api_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg='Error deleting function alias: {0}'.format(e)) + module.fail_json_aws(e, msg='Error deleting function alias') - return dict(changed=changed, **dict(results or facts)) + return dict(changed=changed, **dict(results or facts or {})) def main(): @@ -364,11 +317,10 @@ def main(): required_together=[], ) - aws = AWSConnection(module, ['lambda']) - - validate_params(module, aws) + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - results = lambda_alias(module, aws) + validate_params(module) + results = lambda_alias(module, client) module.exit_json(**camel_dict_to_snake_dict(results)) From f2f20047b81f60ec06b0fdfe42e0b1f1ec8ae1b0 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 23 Feb 2021 14:35:02 +0100 Subject: [PATCH 091/683] improve ec2_vpc_nat_gateway stability (#427) * attempt to add more information when failures occur * Use pagination and add more retry wrappers * Mark ec2_vpc_nat_gateway stable again. Using paginator and retries seems to have fixed things * Add changelog --- ec2_vpc_nat_gateway.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index c4e3cbfd797..0fdd6c96b6d 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -278,6 +278,18 @@ DRY_RUN_MSGS = 'DryRun Mode:' +@AWSRetry.jittered_backoff(retries=10) +def _describe_addresses(client, **params): + paginator = client.get_paginator('describe_addresses') + return paginator.paginate(**params).build_full_result()['Addresses'] + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_nat_gateways(client, **params): + paginator = client.get_paginator('describe_nat_gateways') + return paginator.paginate(**params).build_full_result()['NatGateways'] + + def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, states=None, check_mode=False): """Retrieve a list of NAT Gateways @@ -339,7 +351,7 @@ def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, try: if not check_mode: - gateways = client.describe_nat_gateways(**params)['NatGateways'] + gateways = _describe_nat_gateways(client, **params) if gateways: for gw in gateways: existing_gateways.append(camel_dict_to_snake_dict(gw)) @@ -534,7 +546,7 @@ def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): err_msg = "" try: if not check_mode: - allocations = client.describe_addresses(**params)['Addresses'] + allocations = _describe_addresses(client, **params) if len(allocations) == 1: allocation = allocations[0] else: @@ -597,7 +609,7 @@ def allocate_eip_address(client, check_mode=False): ) new_eip = 'eipalloc-{0}'.format(random_numbers) else: - new_eip = client.allocate_address(**params)['AllocationId'] + new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] ip_allocated = True err_msg = 'eipalloc id {0} created'.format(new_eip) @@ -632,14 +644,14 @@ def release_address(client, allocation_id, check_mode=False): ip_released = False try: - client.describe_addresses(AllocationIds=[allocation_id]) + _describe_addresses(client, aws_retry=True, AllocationIds=[allocation_id]) except botocore.exceptions.ClientError as e: # IP address likely already released # Happens with gateway in 'deleted' state that # still lists associations return True, str(e) try: - client.release_address(AllocationId=allocation_id) + client.release_address(aws_retry=True, AllocationId=allocation_id) ip_released = True except botocore.exceptions.ClientError as e: err_msg = str(e) @@ -712,7 +724,7 @@ def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_to try: if not check_mode: - result = camel_dict_to_snake_dict(client.create_nat_gateway(**params)["NatGateway"]) + result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) else: result = DRY_RUN_GATEWAYS[0] result['create_time'] = datetime.datetime.utcnow() @@ -939,7 +951,7 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, if exist and len(gw) == 1: results = gw[0] if not check_mode: - client.delete_nat_gateway(**params) + client.delete_nat_gateway(aws_retry=True, **params) allocation_id = ( results['nat_gateway_addresses'][0]['allocation_id'] @@ -1102,8 +1114,9 @@ def main(): ) if not success: + results = results or {} module.fail_json( - msg=err_msg, success=success, changed=changed + msg=err_msg, success=success, changed=changed, **results ) else: module.exit_json( From 4f4cd1c87b2bf7b3c184f97a460780f1e2a54493 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 25 Feb 2021 01:21:24 +0100 Subject: [PATCH 092/683] move describe_addresses call back to non-paginated (pagination not supported) (#441) --- ec2_vpc_nat_gateway.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 0fdd6c96b6d..428f82b392b 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -278,12 +278,6 @@ DRY_RUN_MSGS = 'DryRun Mode:' -@AWSRetry.jittered_backoff(retries=10) -def _describe_addresses(client, **params): - paginator = client.get_paginator('describe_addresses') - return paginator.paginate(**params).build_full_result()['Addresses'] - - @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, **params): paginator = client.get_paginator('describe_nat_gateways') @@ -546,7 +540,7 @@ def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): err_msg = "" try: if not check_mode: - allocations = _describe_addresses(client, **params) + allocations = client.describe_addresses(aws_retry=True, **params) if len(allocations) == 1: allocation = allocations[0] else: @@ -644,7 +638,7 @@ def release_address(client, allocation_id, check_mode=False): ip_released = False try: - _describe_addresses(client, aws_retry=True, AllocationIds=[allocation_id]) + client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id]) except botocore.exceptions.ClientError as e: # IP address likely already released # Happens with gateway in 'deleted' state that From 35f54802c1f0f69f94a8ed93a828a680e3fe0b3a Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Thu, 25 Feb 2021 21:11:13 +0100 Subject: [PATCH 093/683] ec2_vpc_nat_gateway_info: add retry decorator (#446) * Solve RequestLimitExceeded error by adding the retry decorator Signed-off-by: Alina Buzachis Co-authored-by: Alina Buzachis --- ec2_vpc_nat_gateway_info.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index 97816c72362..7d31eeac993 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -84,6 +84,7 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list @@ -102,7 +103,7 @@ def get_nat_gateways(client, module, nat_gateway_id=None): params['NatGatewayIds'] = module.params.get('nat_gateway_ids') try: - result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler)) + result = json.loads(json.dumps(client.describe_nat_gateways(aws_retry=True, **params), default=date_handler)) except Exception as e: module.fail_json(msg=to_native(e)) @@ -131,7 +132,7 @@ def main(): date='2021-12-01', collection_name='community.aws') try: - connection = module.client('ec2') + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') From b2066542a697e5f7ea53d5b0ea439f47cdc9eee5 Mon Sep 17 00:00:00 2001 From: Damien Levac Date: Sun, 7 Mar 2021 10:22:58 -0500 Subject: [PATCH 094/683] Added support for 'vpc_endpoint_type'. (#460) * Added support for 'vpc_endpoint_type'. * Integration test for the 'vpc_endpoint_type' feature. * Added choices in documentation. * Added changelog. --- ec2_vpc_endpoint.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 28d0fda0eba..d7d10769f06 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -21,6 +21,13 @@ - Required when creating a VPC endpoint. required: false type: str + vpc_endpoint_type: + description: + - The type of endpoint. + required: false + default: Gateway + choices: [ "Interface", "Gateway", "GatewayLoadBalancer" ] + type: str service: description: - An AWS supported vpc endpoint service. Use the M(community.aws.ec2_vpc_endpoint_info) @@ -56,7 +63,7 @@ - absent to remove resource required: false default: present - choices: [ "present", "absent"] + choices: [ "present", "absent" ] type: str wait: description: @@ -251,6 +258,7 @@ def create_vpc_endpoint(client, module): changed = False token_provided = False params['VpcId'] = module.params.get('vpc_id') + params['VpcEndpointType'] = module.params.get('vpc_endpoint_type') params['ServiceName'] = module.params.get('service') params['DryRun'] = module.check_mode @@ -334,6 +342,7 @@ def setup_removal(client, module): def main(): argument_spec = dict( vpc_id=dict(), + vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']), service=dict(), policy=dict(type='json'), policy_file=dict(type='path', aliases=['policy_path']), From 80f66363766aa642a77937c83f26825226c2fbae Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 Mar 2021 09:14:27 +0100 Subject: [PATCH 095/683] Fix version_added and changelog from #460 (#465) --- ec2_vpc_endpoint.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index d7d10769f06..d15da3b2a79 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -28,6 +28,7 @@ default: Gateway choices: [ "Interface", "Gateway", "GatewayLoadBalancer" ] type: str + version_added: 1.5.0 service: description: - An AWS supported vpc endpoint service. Use the M(community.aws.ec2_vpc_endpoint_info) From d8b5e86725c0f8e81c51e5527f1694cd1904face Mon Sep 17 00:00:00 2001 From: Nicolas Boutet Date: Mon, 8 Mar 2021 13:47:03 +0100 Subject: [PATCH 096/683] ec2_vpc_nacl: add IPv6 support (#398) --- ec2_vpc_nacl.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 387ceb48f26..da053f55a46 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -43,8 +43,8 @@ egress: description: - A list of rules for outgoing traffic. Each rule must be specified as a list. - Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']), - the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny, + Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']), + the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny, the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for TCP or UDP protocols, and the first port in the range for TCP or UDP protocols. See examples. @@ -55,8 +55,8 @@ ingress: description: - List of rules for incoming traffic. Each rule must be specified as a list. - Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']), - the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny, + Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']), + the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny, the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for TCP or UDP protocols, and the first port in the range for TCP or UDP protocols. See examples. @@ -104,9 +104,12 @@ # port from, port to - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22] - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80] + - [205, 'tcp', 'allow', '::/0', null, null, 80, 80] - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8] + - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8] egress: - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] + - [105, 'all', 'allow', '::/0', null, null, null, null] state: 'present' - name: "Remove the ingress and egress rules - defaults to deny all" @@ -163,12 +166,12 @@ # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, } +PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58} # Utility methods def icmp_present(entry): - if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1: + if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]: return True @@ -291,13 +294,20 @@ def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): return changed +def is_ipv6(cidr): + return ':' in cidr + + def process_rule_entry(entry, Egress): params = dict() params['RuleNumber'] = entry[0] params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) params['RuleAction'] = entry[2] params['Egress'] = Egress - params['CidrBlock'] = entry[3] + if is_ipv6(entry[3]): + params['Ipv6CidrBlock'] = entry[3] + else: + params['CidrBlock'] = entry[3] if icmp_present(entry): params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} else: From 13b896657393d27a20fd3938214e9a1c1eead625 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 Mar 2021 14:15:46 +0100 Subject: [PATCH 097/683] iam_saml_federation - return details of provider when no changes are made (#419) * iam_saml_federation - return details of provider when no changes are made. * iam_saml_federation - enable integration tests --- iam_saml_federation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 214cbe74179..895631b7e05 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -179,6 +179,8 @@ def create_or_update_saml_provider(self, name, metadata): res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name)) + else: + res['saml_provider'] = self._build_res(arn) else: # create res['changed'] = True From e680559a037317bf50c1c7305b6044afd2ab5b95 Mon Sep 17 00:00:00 2001 From: Ross Williams Date: Mon, 8 Mar 2021 15:41:45 -0500 Subject: [PATCH 098/683] sns_topic: Allow canonical international-format phone numbers in SMS subscriptions (#454) * Update sns_topic.py Adds `+` to the list of acceptable characters in an SMS endpoint. Closes #453. * Add changelog fragment for #454 * sns_topic: comment explaining SMS canonicalization Add comment documenting to what standard SMS endpoint addresses (phone numbers) are canonicalized * sns_topic: fix changelog * Get quoting correct * Simplify message to leave details in PR description Co-authored-by: Mark Chappell Co-authored-by: Mark Chappell --- sns_topic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sns_topic.py b/sns_topic.py index 79070cbabc5..1be60a38ec9 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -349,8 +349,11 @@ def _set_topic_attrs(self): return changed def _canonicalize_endpoint(self, protocol, endpoint): + # AWS SNS expects phone numbers in + # and canonicalizes to E.164 format + # See if protocol == 'sms': - return re.sub('[^0-9]*', '', endpoint) + return re.sub('[^0-9+]*', '', endpoint) return endpoint def _set_topic_subs(self): From 2c8b4d24c7763e81c194df1e9f8bfb6df956d87f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 10 Mar 2021 11:42:32 +0100 Subject: [PATCH 099/683] Cleanup ec2_vpc_route_table(_info) (#442) * Move ec2_vpc_route_table tests into group 4 * ec2_vpc_route_table - Use retries more consistently. * ec2_vpc_route_table_info - boto3 migration * changelog * Add return value documentation * catch WaiterError for cleaner error messages --- ec2_vpc_route_table.py | 103 +++++++++------ ec2_vpc_route_table_info.py | 243 +++++++++++++++++++++++++++++------- 2 files changed, 265 insertions(+), 81 deletions(-) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index cdab10a9d79..1ef10e89ceb 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -250,9 +250,33 @@ ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$') -@AWSRetry.exponential_backoff() +@AWSRetry.jittered_backoff() def describe_subnets_with_backoff(connection, **params): - return connection.describe_subnets(**params)['Subnets'] + paginator = connection.get_paginator('describe_subnets') + return paginator.paginate(**params).build_full_result()['Subnets'] + + +@AWSRetry.jittered_backoff() +def describe_igws_with_backoff(connection, **params): + paginator = connection.get_paginator('describe_internet_gateways') + return paginator.paginate(**params).build_full_result()['InternetGateways'] + + +@AWSRetry.jittered_backoff() +def describe_tags_with_backoff(connection, resource_id): + filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id}) + paginator = connection.get_paginator('describe_tags') + tags = paginator.paginate(Filters=filters).build_full_result()['Tags'] + return boto3_tag_list_to_ansible_dict(tags) + + +@AWSRetry.jittered_backoff() +def describe_route_tables_with_backoff(connection, **params): + try: + paginator = connection.get_paginator('describe_route_tables') + return paginator.paginate(**params).build_full_result()['RouteTables'] + except is_boto3_error_code('InvalidRouteTableID.NotFound'): + return None def find_subnets(connection, module, vpc_id, identified_subnets): @@ -314,7 +338,7 @@ def find_igw(connection, module, vpc_id): """ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) try: - igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways'] + igw = describe_igws_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) if len(igw) == 1: @@ -325,14 +349,6 @@ def find_igw(connection, module, vpc_id): module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) -@AWSRetry.exponential_backoff() -def describe_tags_with_backoff(connection, resource_id): - filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id}) - paginator = connection.get_paginator('describe_tags') - tags = paginator.paginate(Filters=filters).build_full_result()['Tags'] - return boto3_tag_list_to_ansible_dict(tags) - - def tags_match(match_tags, candidate_tags): return all((k in candidate_tags and candidate_tags[k] == v for k, v in match_tags.items())) @@ -355,12 +371,18 @@ def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge if to_delete: try: - connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete]) + connection.delete_tags( + aws_retry=True, + Resources=[resource_id], + Tags=[{'Key': k} for k in to_delete]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags") if to_add: try: - connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add)) + connection.create_tags( + aws_retry=True, + Resources=[resource_id], + Tags=ansible_dict_to_boto3_tag_list(to_add)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create tags") @@ -371,14 +393,6 @@ def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge return {'changed': True, 'tags': latest_tags} -@AWSRetry.exponential_backoff() -def describe_route_tables_with_backoff(connection, **params): - try: - return connection.describe_route_tables(**params)['RouteTables'] - except is_boto3_error_code('InvalidRouteTableID.NotFound'): - return None - - def get_route_table_by_id(connection, module, route_table_id): route_table = None @@ -474,21 +488,28 @@ def ensure_routes(connection=None, module=None, route_table=None, route_specs=No if changed and not check_mode: for route in routes_to_delete: try: - connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock']) + connection.delete_route( + aws_retry=True, + RouteTableId=route_table['RouteTableId'], + DestinationCidrBlock=route['DestinationCidrBlock']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete route") for route_spec in route_specs_to_recreate: try: - connection.replace_route(RouteTableId=route_table['RouteTableId'], - **route_spec) + connection.replace_route( + aws_retry=True, + RouteTableId=route_table['RouteTableId'], + **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't recreate route") for route_spec in route_specs_to_create: try: - connection.create_route(RouteTableId=route_table['RouteTableId'], - **route_spec) + connection.create_route( + aws_retry=True, + RouteTableId=route_table['RouteTableId'], + **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create route") @@ -515,12 +536,15 @@ def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_t if check_mode: return {'changed': True} try: - connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId']) + connection.disassociate_route_table( + aws_retry=True, AssociationId=a['RouteTableAssociationId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") try: - association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id) + association_id = connection.associate_route_table(aws_retry=True, + RouteTableId=route_table_id, + SubnetId=subnet_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't associate subnet with route table") return {'changed': True, 'association_id': association_id} @@ -532,8 +556,10 @@ def ensure_subnet_associations(connection=None, module=None, route_table=None, s new_association_ids = [] changed = False for subnet in subnets: - result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'], - route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode) + result = ensure_subnet_association( + connection=connection, module=module, vpc_id=route_table['VpcId'], + route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], + check_mode=check_mode) changed = changed or result['changed'] if changed and check_mode: return {'changed': True} @@ -547,7 +573,7 @@ def ensure_subnet_associations(connection=None, module=None, route_table=None, s changed = True if not check_mode: try: - connection.disassociate_route_table(AssociationId=a_id) + connection.disassociate_route_table(aws_retry=True, AssociationId=a_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") @@ -564,8 +590,10 @@ def ensure_propagation(connection=None, module=None, route_table=None, propagati if not check_mode: for vgw_id in to_add: try: - connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'], - GatewayId=vgw_id) + connection.enable_vgw_route_propagation( + aws_retry=True, + RouteTableId=route_table['RouteTableId'], + GatewayId=vgw_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't enable route propagation") @@ -596,7 +624,7 @@ def ensure_route_table_absent(connection, module): ensure_subnet_associations(connection=connection, module=module, route_table=route_table, subnets=[], check_mode=False, purge_subnets=purge_subnets) try: - connection.delete_route_table(RouteTableId=route_table['RouteTableId']) + connection.delete_route_table(aws_retry=True, RouteTableId=route_table['RouteTableId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error deleting route table") @@ -665,13 +693,15 @@ def ensure_route_table_present(connection, module): changed = True if not module.check_mode: try: - route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable'] + route_table = connection.create_route_table(aws_retry=True, VpcId=vpc_id)['RouteTable'] # try to wait for route table to be present before moving on get_waiter( connection, 'route_table_exists' ).wait( RouteTableIds=[route_table['RouteTableId']], ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout waiting for route table creation') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating route table") else: @@ -730,7 +760,8 @@ def main(): ['state', 'present', ['vpc_id']]], supports_check_mode=True) - connection = module.client('ec2') + retry_decorator = AWSRetry.jittered_backoff(retries=10) + connection = module.client('ec2', retry_decorator=retry_decorator) state = module.params.get('state') diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index 9ff9959c271..2e4dd384930 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_route_table_info version_added: 1.0.0 @@ -14,7 +14,9 @@ description: - Gather information about ec2 VPC route tables in AWS - This module was called C(ec2_vpc_route_table_facts) before Ansible 2.9. The usage did not change. -author: "Rob White (@wimnat)" +author: +- "Rob White (@wimnat)" +- "Mark Chappell (@tremble)" options: filters: description: @@ -27,7 +29,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all VPC route tables @@ -47,56 +49,218 @@ community.aws.ec2_vpc_route_table_info: filters: vpc-id: vpc-abcdef00 +''' +RETURN = r''' +route_tables: + description: + - A list of dictionarys describing route tables + - See also U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_route_tables) + returned: always + type: complex + contains: + associations: + description: List of subnets associated with the route table + returned: always + type: complex + contains: + main: + description: Whether this is the main route table + returned: always + type: bool + sample: false + id: + description: ID of association between route table and subnet + returned: always + type: str + sample: rtbassoc-ab47cfc3 + route_table_association_id: + description: ID of association between route table and subnet + returned: always + type: str + sample: rtbassoc-ab47cfc3 + route_table_id: + description: ID of the route table + returned: always + type: str + sample: rtb-bf779ed7 + subnet_id: + description: ID of the subnet + returned: always + type: str + sample: subnet-82055af9 + association_state: + description: The state of the association + returned: always + type: complex + contains: + state: + description: The state of the association + returned: always + type: str + sample: associated + state_message: + description: Additional information about the state of the association + returned: when available + type: str + sample: 'Creating association' + id: + description: ID of the route table (same as route_table_id for backwards compatibility) + returned: always + type: str + sample: rtb-bf779ed7 + owner_id: + description: ID of the account which owns the route table + returned: always + type: str + sample: '012345678912' + propagating_vgws: + description: List of Virtual Private Gateways propagating routes + returned: always + type: list + sample: [] + route_table_id: + description: ID of the route table + returned: always + type: str + sample: rtb-bf779ed7 + routes: + description: List of routes in the route table + returned: always + type: complex + contains: + destination_cidr_block: + description: CIDR block of destination + returned: always + type: str + sample: 10.228.228.0/22 + gateway_id: + description: ID of the gateway + returned: when gateway is local or internet gateway + type: str + sample: local + instance_id: + description: + - ID of a NAT instance. + - Empty unless the route is via an EC2 instance + returned: always + type: str + sample: i-abcd123456789 + instance_owner_id: + description: + - AWS account owning the NAT instance + - Empty unless the route is via an EC2 instance + returned: always + type: str + sample: 123456789012 + network_interface_id: + description: + - The ID of the network interface + - Empty unless the route is via an EC2 instance + returned: always + type: str + sample: 123456789012 + nat_gateway_id: + description: ID of the NAT gateway + returned: when the route is via a NAT gateway + type: str + sample: local + origin: + description: mechanism through which the route is in the table + returned: always + type: str + sample: CreateRouteTable + state: + description: state of the route + returned: always + type: str + sample: active + tags: + description: Tags applied to the route table + returned: always + type: dict + sample: + Name: Public route table + Public: 'true' + vpc_id: + description: ID for the VPC in which the route lives + returned: always + type: str + sample: vpc-6e2d2407 ''' try: - import boto.vpc - from boto.exception import BotoServerError + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.jittered_backoff() +def describe_route_tables_with_backoff(connection, **params): + try: + paginator = connection.get_paginator('describe_route_tables') + return paginator.paginate(**params).build_full_result() + except is_boto3_error_code('InvalidRouteTableID.NotFound'): + return None + +def normalize_route(route): + # Historically these were all there, but set to null when empty' + for legacy_key in ['DestinationCidrBlock', 'GatewayId', 'InstanceId', + 'Origin', 'State', 'NetworkInterfaceId']: + if legacy_key not in route: + route[legacy_key] = None + route['InterfaceId'] = route['NetworkInterfaceId'] + return route -def get_route_table_info(route_table): - # Add any routes to array - routes = [] - associations = [] - for route in route_table.routes: - routes.append(route.__dict__) - for association in route_table.associations: - associations.append(association.__dict__) +def normalize_association(assoc): + # Name change between boto v2 and boto v3, return both + assoc['Id'] = assoc['RouteTableAssociationId'] + return assoc - route_table_info = {'id': route_table.id, - 'routes': routes, - 'associations': associations, - 'tags': route_table.tags, - 'vpc_id': route_table.vpc_id - } - return route_table_info +def normalize_route_table(table): + table['tags'] = boto3_tag_list_to_ansible_dict(table['Tags']) + table['Associations'] = [normalize_association(assoc) for assoc in table['Associations']] + table['Routes'] = [normalize_route(route) for route in table['Routes']] + table['Id'] = table['RouteTableId'] + del table['Tags'] + return camel_dict_to_snake_dict(table, ignore_list=['tags']) + + +def normalize_results(results): + """ + We used to be a boto v2 module, make sure that the old return values are + maintained and the shape of the return values are what people expect + """ + + routes = [normalize_route_table(route) for route in results['RouteTables']] + del results['RouteTables'] + results = camel_dict_to_snake_dict(results) + results['route_tables'] = routes + return results def list_ec2_vpc_route_tables(connection, module): - filters = module.params.get("filters") - route_table_dict_array = [] + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: - all_route_tables = connection.get_all_route_tables(filters=filters) - except BotoServerError as e: + results = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get route tables") - for route_table in all_route_tables: - route_table_dict_array.append(get_route_table_info(route_table)) - - module.exit_json(route_tables=route_table_dict_array) + results = normalize_results(results) + module.exit_json(changed=False, **results) def main(): @@ -110,18 +274,7 @@ def main(): module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", date='2021-12-01', collection_name='community.aws') - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if region: - try: - connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - module.fail_json(msg=str(e)) - else: - module.fail_json(msg="region must be specified") + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) list_ec2_vpc_route_tables(connection, module) From 4393181fc904cddc6e45c0617a9fd7757ed6e05c Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Wed, 10 Mar 2021 11:47:13 +0100 Subject: [PATCH 100/683] [ec2_vpc_nat_gateway] Fix broken check_mode (#436) * ec2_vpc_nat_gateway: fix broken check_mode * fix broken check_mode (remove hard coded values) Signed-off-by: Alina Buzachis --- ec2_vpc_nat_gateway.py | 127 +++++++++++++++-------------------------- 1 file changed, 46 insertions(+), 81 deletions(-) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 428f82b392b..11c271434d9 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -247,36 +247,6 @@ from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native -DRY_RUN_GATEWAYS = [ - { - "nat_gateway_id": "nat-123456789", - "subnet_id": "subnet-123456789", - "nat_gateway_addresses": [ - { - "public_ip": "55.55.55.55", - "network_interface_id": "eni-1234567", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-1234567" - } - ], - "state": "available", - "create_time": "2016-03-05T05:19:20.282000+00:00", - "vpc_id": "vpc-12345678" - } -] - -DRY_RUN_ALLOCATION_UNCONVERTED = { - 'Addresses': [ - { - 'PublicIp': '55.55.55.55', - 'Domain': 'vpc', - 'AllocationId': 'eipalloc-1234567' - } - ] -} - -DRY_RUN_MSGS = 'DryRun Mode:' - @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, **params): @@ -344,22 +314,11 @@ def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, ] try: - if not check_mode: - gateways = _describe_nat_gateways(client, **params) - if gateways: - for gw in gateways: - existing_gateways.append(camel_dict_to_snake_dict(gw)) - gateways_retrieved = True - else: - gateways_retrieved = True - if nat_gateway_id: - if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id: - existing_gateways = DRY_RUN_GATEWAYS - elif subnet_id: - if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id: - existing_gateways = DRY_RUN_GATEWAYS - err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS) - + gateways = _describe_nat_gateways(client, **params) + if gateways: + for gw in gateways: + existing_gateways.append(camel_dict_to_snake_dict(gw)) + gateways_retrieved = True except botocore.exceptions.ClientError as e: err_msg = str(e) @@ -422,8 +381,6 @@ def wait_for_status(client, wait_timeout, nat_gateway_id, status, ) if gws_retrieved and nat_gateways: nat_gateway = nat_gateways[0] - if check_mode: - nat_gateway['state'] = status if nat_gateway.get('state') == status: status_achieved = True @@ -500,6 +457,7 @@ def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, client, subnet_id, states=states, check_mode=check_mode ) ) + if not gws_retrieved: return gateways, allocation_id_exists for gw in gws: @@ -538,21 +496,14 @@ def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): } allocation_id = None err_msg = "" + try: - if not check_mode: - allocations = client.describe_addresses(aws_retry=True, **params) - if len(allocations) == 1: - allocation = allocations[0] - else: - allocation = None + allocations = client.describe_addresses(aws_retry=True, **params)['Addresses'] + if len(allocations) == 1: + allocation = allocations[0] else: - dry_run_eip = ( - DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp'] - ) - if dry_run_eip == eip_address: - allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0] - else: - allocation = None + allocation = None + if allocation: if allocation.get('Domain') != 'vpc': err_msg = ( @@ -595,16 +546,15 @@ def allocate_eip_address(client, check_mode=False): params = { 'Domain': 'vpc', } + + if check_mode: + ip_allocated = True + new_eip = None + return ip_allocated, err_msg, new_eip + try: - if check_mode: - ip_allocated = True - random_numbers = ( - ''.join(str(x) for x in random.sample(range(0, 9), 7)) - ) - new_eip = 'eipalloc-{0}'.format(random_numbers) - else: - new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] - ip_allocated = True + new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] + ip_allocated = True err_msg = 'eipalloc id {0} created'.format(new_eip) except botocore.exceptions.ClientError as e: @@ -633,6 +583,7 @@ def release_address(client, allocation_id, check_mode=False): Boolean, string """ err_msg = '' + if check_mode: return True, '' @@ -711,22 +662,24 @@ def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_to success = False token_provided = False err_msg = "" + result = {} if client_token: token_provided = True params['ClientToken'] = client_token + if check_mode: + success = True + changed = True + return success, changed, err_msg, result + try: - if not check_mode: - result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) - else: - result = DRY_RUN_GATEWAYS[0] - result['create_time'] = datetime.datetime.utcnow() - result['nat_gateway_addresses'][0]['allocation_id'] = allocation_id - result['subnet_id'] = subnet_id + result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) success = True changed = True + create_time = result['create_time'].replace(tzinfo=None) + if token_provided and (request_time > create_time): changed = False elif wait: @@ -815,10 +768,11 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, success = False changed = False err_msg = "" - results = list() + results = {} if not allocation_id and not eip_address: existing_gateways, allocation_id_exists = (gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)) + if len(existing_gateways) > 0 and if_exist_do_not_create: results = existing_gateways[0] results['tags'], tags_update_exists = ensure_tags(client, module, results['nat_gateway_id'], tags, purge_tags, check_mode) @@ -855,6 +809,7 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, success = False changed = False return success, changed, err_msg, dict() + existing_gateways, allocation_id_exists = ( gateway_in_subnet_exists( client, subnet_id, allocation_id, check_mode=check_mode @@ -933,8 +888,14 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, success = False changed = False err_msg = "" - results = list() + results = {} states = ['pending', 'available'] + + if check_mode: + changed = True + success = True + return success, changed, err_msg, results + try: exist, err_msg, gw = ( get_nat_gateways( @@ -944,8 +905,7 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, ) if exist and len(gw) == 1: results = gw[0] - if not check_mode: - client.delete_nat_gateway(aws_retry=True, **params) + client.delete_nat_gateway(aws_retry=True, **params) allocation_id = ( results['nat_gateway_addresses'][0]['allocation_id'] @@ -990,6 +950,10 @@ def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): final_tags = [] changed = False + if check_mode and nat_gw_id is None: + # We can't describe tags without an EIP id, we might get here when creating a new EIP in check_mode + return final_tags, changed + filters = ansible_dict_to_boto3_filter_list({'resource-id': nat_gw_id, 'resource-type': 'natgateway'}) cur_tags = None try: @@ -1041,6 +1005,7 @@ def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't describe tags") + return final_tags, changed From 5a654c390bb4602111e905702827de2c1a41916d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20Magalh=C3=A3es?= <4622652+pjrm@users.noreply.github.com> Date: Thu, 11 Mar 2021 13:15:18 +0000 Subject: [PATCH 101/683] Fix state=get on route53 module (Issue #423) (#424) * Fix state=get on route53 module This bug was introduced when refactoring from boto to boto3 library. This happens because the method "get_hosted_zone" only returns the DelegationSet when the DNS zone is external. Therefore this breaks when trying to get internal records. The solution is to search for getting DNS records of type ''NS'' with the same name as the hosted zone. * Update changelogs/fragments/406-route53-state-get.yml Co-authored-by: Mark Chappell --- route53.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/route53.py b/route53.py index 43b17a44f5b..84a8dc997fb 100644 --- a/route53.py +++ b/route53.py @@ -424,6 +424,17 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): return None +def get_hosted_zone_nameservers(route53, zone_id): + hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name'] + resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id) + + nameservers_records = list( + filter(lambda record: record['Name'] == hosted_zone_name and record['Type'] == 'NS', resource_records_sets) + )[0]['ResourceRecords'] + + return [ns_record['Value'] for ns_record in nameservers_records] + + def main(): argument_spec = dict( state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), @@ -565,7 +576,7 @@ def main(): ns = aws_record.get('values', []) else: # Retrieve name servers associated to the zone. - ns = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['DelegationSet']['NameServers'] + ns = get_hosted_zone_nameservers(route53, zone_id) module.exit_json(changed=False, set=aws_record, nameservers=ns) From 4f366b5b3d4df2ec22e0d56b664e5491e15cce65 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 12 Mar 2021 09:14:56 +0100 Subject: [PATCH 102/683] More no_log changes (#471) * More no_log changes * changelog --- aws_secret.py | 2 +- s3_sync.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_secret.py b/aws_secret.py index 962501d5d02..22141ce24a6 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -334,7 +334,7 @@ def main(): 'description': dict(default=""), 'kms_key_id': dict(), 'secret_type': dict(choices=['binary', 'string'], default="string"), - 'secret': dict(default=""), + 'secret': dict(default="", no_log=True), 'tags': dict(type='dict', default={}), 'rotation_lambda': dict(), 'rotation_interval': dict(type='int', default=30), diff --git a/s3_sync.py b/s3_sync.py index 1222d98cfd6..e0edbea82b0 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -497,7 +497,7 @@ def main(): mode=dict(choices=['push'], default='push'), file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'), bucket=dict(required=True), - key_prefix=dict(required=False, default=''), + key_prefix=dict(required=False, default='', no_log=False), file_root=dict(required=True, type='path'), permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), From 3e93400f38e25594e657aada820dd4dee5fbc6a9 Mon Sep 17 00:00:00 2001 From: ichekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Fri, 12 Mar 2021 06:08:49 -0500 Subject: [PATCH 103/683] Make "unit" parameter optional and add support for check mode (#470) * Make "unit" parameter optional and add support for check mode boto3 documentation explicitly suggests omitting this parameter: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#CloudWatch.Client.put_metric_alarm Creating an alarm without the "unit" parameter specified fails: ``` - community.aws.ec2_metric_alarm: name: My alarm description: My description namespace: AWS/CertificateManager metric: DaysToExpiry statistic: Average comparison: LessThanOrEqualToThreshold threshold: 45 period: 86400 evaluation_periods: 1 dimensions: CertificateArn: "arn:aws:acm:us-east-1:123412341234:certificate/example" alarm_actions: - arn:aws:sns:us-east-1:123412341234:my-sns-topic ok_actions: - arn:aws:sns:us-east-1:123412341234:my-sns-topic treat_missing_data: ignore state: present ``` with the following error: ``` Invalid type for parameter Unit, value: None, type: , valid types: ``` Apparently specifying `unit: None` in the example above is not the same as omitting the unit - it causes the alarm to be in "Insufficient data" state. * Fix module output for tests * Add tests for idempotency and check mode * Fix an error when the module creates a new alarm in check mode Alarm is not actuall created in check mode, and therefore `describe_alarms` returns an empty list. * Add tests for alarm creation with no unit attribute specified * Fix typo - MetricAlarms vs MetricsAlarms * Fix variable name - alarm_info_query_check vs alarm_info_check * Fix variable names in tests * Fix tests by ensuring that alarm doesn't exist before we begin * Fix variable name * Fix assertion * Ensure check mode is enabled when it is supposed to be * Enable check mode for alarm deletion * Fix variable name - alarm_info_no_unit vs alarm_info * Fix the test of creating the alarm without unit attribute * Fix variable name - alarm_info_query_no_unit vs alarm_info * Update changelogs/fragments/470-ec2_metric_alarm-unit-optional.yml * Update changelogs/fragments/470-ec2_metric_alarm-unit-optional.yml Co-authored-by: Mark Chappell --- ec2_metric_alarm.py | 190 +++++++++++++++++--------------------------- 1 file changed, 75 insertions(+), 115 deletions(-) diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index c7b4c28a8ad..effa9bd5c4e 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -211,63 +211,30 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -def create_metric_alarm(connection, module): - - name = module.params.get('name') - metric = module.params.get('metric') - namespace = module.params.get('namespace') - statistic = module.params.get('statistic') - comparison = module.params.get('comparison') - threshold = module.params.get('threshold') - period = module.params.get('period') - evaluation_periods = module.params.get('evaluation_periods') - unit = module.params.get('unit') - description = module.params.get('description') - dimensions = module.params.get('dimensions') - alarm_actions = module.params.get('alarm_actions') - insufficient_data_actions = module.params.get('insufficient_data_actions') - ok_actions = module.params.get('ok_actions') - treat_missing_data = module.params.get('treat_missing_data') - - warnings = [] - - alarms = connection.describe_alarms(AlarmNames=[name]) +def create_metric_alarm(connection, module, params): + alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'} - if comparison in ('<=', '<', '>', '>='): + if params['ComparisonOperator'] in ('<=', '<', '>', '>='): module.deprecate('Using the <=, <, > and >= operators for comparison has been deprecated. Please use LessThanOrEqualToThreshold, ' 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.', date='2022-06-01', collection_name='community.aws') - comparison = comparisons[comparison] + params['ComparisonOperator'] = comparisons[params['ComparisonOperator']] - if not isinstance(dimensions, list): + if not isinstance(params['Dimensions'], list): fixed_dimensions = [] - for key, value in dimensions.items(): + for key, value in params['Dimensions'].items(): fixed_dimensions.append({'Name': key, 'Value': value}) - dimensions = fixed_dimensions + params['Dimensions'] = fixed_dimensions if not alarms['MetricAlarms']: try: - connection.put_metric_alarm(AlarmName=name, - MetricName=metric, - Namespace=namespace, - Statistic=statistic, - ComparisonOperator=comparison, - Threshold=threshold, - Period=period, - EvaluationPeriods=evaluation_periods, - Unit=unit, - AlarmDescription=description, - Dimensions=dimensions, - AlarmActions=alarm_actions, - InsufficientDataActions=insufficient_data_actions, - OKActions=ok_actions, - TreatMissingData=treat_missing_data) + if not module.check_mode: + connection.put_metric_alarm(**params) changed = True - alarms = connection.describe_alarms(AlarmNames=[name]) except ClientError as e: module.fail_json_aws(e) @@ -279,85 +246,60 @@ def create_metric_alarm(connection, module): if 'TreatMissingData' not in alarm.keys(): alarm['TreatMissingData'] = 'missing' - for key, value in {'MetricName': metric, - 'Namespace': namespace, - 'Statistic': statistic, - 'ComparisonOperator': comparison, - 'Threshold': threshold, - 'Period': period, - 'EvaluationPeriods': evaluation_periods, - 'Unit': unit, - 'AlarmDescription': description, - 'Dimensions': dimensions, - 'TreatMissingData': treat_missing_data}.items(): - try: - if alarm[key] != value: - changed = True - except KeyError: - if value is not None: - changed = True - - alarm[key] = value - - for key, value in {'AlarmActions': alarm_actions, - 'InsufficientDataActions': insufficient_data_actions, - 'OKActions': ok_actions}.items(): - action = value or [] - if alarm[key] != action: - changed = True - alarm[key] = value + for key in ['ActionsEnabled', 'StateValue', 'StateReason', + 'StateReasonData', 'StateUpdatedTimestamp', + 'AlarmArn', 'AlarmConfigurationUpdatedTimestamp']: + alarm.pop(key, None) + if alarm != params: + changed = True + alarm = params try: if changed: - connection.put_metric_alarm(AlarmName=alarm['AlarmName'], - MetricName=alarm['MetricName'], - Namespace=alarm['Namespace'], - Statistic=alarm['Statistic'], - ComparisonOperator=alarm['ComparisonOperator'], - Threshold=alarm['Threshold'], - Period=alarm['Period'], - EvaluationPeriods=alarm['EvaluationPeriods'], - Unit=alarm['Unit'], - AlarmDescription=alarm['AlarmDescription'], - Dimensions=alarm['Dimensions'], - AlarmActions=alarm['AlarmActions'], - InsufficientDataActions=alarm['InsufficientDataActions'], - OKActions=alarm['OKActions'], - TreatMissingData=alarm['TreatMissingData']) + if not module.check_mode: + connection.put_metric_alarm(**alarm) except ClientError as e: module.fail_json_aws(e) - result = alarms['MetricAlarms'][0] - module.exit_json(changed=changed, warnings=warnings, - name=result['AlarmName'], - actions_enabled=result['ActionsEnabled'], - alarm_actions=result['AlarmActions'], - alarm_arn=result['AlarmArn'], - comparison=result['ComparisonOperator'], - description=result['AlarmDescription'], - dimensions=result['Dimensions'], - evaluation_periods=result['EvaluationPeriods'], - insufficient_data_actions=result['InsufficientDataActions'], - last_updated=result['AlarmConfigurationUpdatedTimestamp'], - metric=result['MetricName'], - namespace=result['Namespace'], - ok_actions=result['OKActions'], - period=result['Period'], - state_reason=result['StateReason'], - state_value=result['StateValue'], - statistic=result['Statistic'], - threshold=result['Threshold'], - treat_missing_data=result['TreatMissingData'], - unit=result['Unit']) - - -def delete_metric_alarm(connection, module): - name = module.params.get('name') - alarms = connection.describe_alarms(AlarmNames=[name]) + try: + alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + except ClientError as e: + module.fail_json_aws(e) + + result = {} + if alarms['MetricAlarms']: + result = alarms['MetricAlarms'][0] + + module.exit_json(changed=changed, + name=result.get('AlarmName'), + actions_enabled=result.get('ActionsEnabled'), + alarm_actions=result.get('AlarmActions'), + alarm_arn=result.get('AlarmArn'), + comparison=result.get('ComparisonOperator'), + description=result.get('AlarmDescription'), + dimensions=result.get('Dimensions'), + evaluation_periods=result.get('EvaluationPeriods'), + insufficient_data_actions=result.get('InsufficientDataActions'), + last_updated=result.get('AlarmConfigurationUpdatedTimestamp'), + metric=result.get('MetricName'), + namespace=result.get('Namespace'), + ok_actions=result.get('OKActions'), + period=result.get('Period'), + state_reason=result.get('StateReason'), + state_value=result.get('StateValue'), + statistic=result.get('Statistic'), + threshold=result.get('Threshold'), + treat_missing_data=result.get('TreatMissingData'), + unit=result.get('Unit')) + + +def delete_metric_alarm(connection, module, params): + alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) if alarms['MetricAlarms']: try: - connection.delete_alarms(AlarmNames=[name]) + if not module.check_mode: + connection.delete_alarms(AlarmNames=[params['AlarmName']]) module.exit_json(changed=True) except (ClientError) as e: module.fail_json_aws(e) @@ -390,16 +332,34 @@ def main(): state=dict(default='present', choices=['present', 'absent']), ) - module = AnsibleAWSModule(argument_spec=argument_spec) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params.get('state') + params = dict() + params['AlarmName'] = module.params.get('name') + params['MetricName'] = module.params.get('metric') + params['Namespace'] = module.params.get('namespace') + params['Statistic'] = module.params.get('statistic') + params['ComparisonOperator'] = module.params.get('comparison') + params['Threshold'] = module.params.get('threshold') + params['Period'] = module.params.get('period') + params['EvaluationPeriods'] = module.params.get('evaluation_periods') + if module.params.get('unit'): + params['Unit'] = module.params.get('unit') + params['AlarmDescription'] = module.params.get('description') + params['Dimensions'] = module.params.get('dimensions') + params['AlarmActions'] = module.params.get('alarm_actions', []) + params['InsufficientDataActions'] = module.params.get('insufficient_data_actions', []) + params['OKActions'] = module.params.get('ok_actions', []) + params['TreatMissingData'] = module.params.get('treat_missing_data') + connection = module.client('cloudwatch') if state == 'present': - create_metric_alarm(connection, module) + create_metric_alarm(connection, module, params) elif state == 'absent': - delete_metric_alarm(connection, module) + delete_metric_alarm(connection, module, params) if __name__ == '__main__': From 90ab23a826529bc17449d434a3a39f16c39c555e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 13 Mar 2021 20:10:09 +0100 Subject: [PATCH 104/683] More no_log=False to fix sanity tests (#474) * Add no_log=False to mark some more false-positives of the no_log check. * More false-positives confirmed by tremble. --- aws_batch_compute_environment.py | 2 +- aws_codebuild.py | 2 +- cloudtrail.py | 2 +- dms_endpoint.py | 2 +- ec2_vpc_endpoint.py | 2 +- ec2_vpc_nat_gateway.py | 2 +- ecs_service.py | 2 +- iam.py | 2 +- lambda.py | 2 +- lambda_policy.py | 2 +- s3_website.py | 2 +- sqs_queue.py | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 39ff11e2576..68044a8d11e 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -460,7 +460,7 @@ def main(): image_id=dict(), subnets=dict(type='list', required=True, elements='str'), security_group_ids=dict(type='list', required=True, elements='str'), - ec2_key_pair=dict(), + ec2_key_pair=dict(no_log=False), instance_role=dict(required=True), tags=dict(type='dict'), bid_percentage=dict(type='int'), diff --git a/aws_codebuild.py b/aws_codebuild.py index 7c5e7500a50..e56b1a566b0 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -378,7 +378,7 @@ def main(): environment=dict(type='dict'), service_role=dict(), timeout_in_minutes=dict(type='int', default=60), - encryption_key=dict(), + encryption_key=dict(no_log=False), tags=dict(type='list', elements='dict'), vpc_config=dict(type='dict'), state=dict(choices=['present', 'absent'], default='present') diff --git a/cloudtrail.py b/cloudtrail.py index c0bf3f4db07..5f8aa5ae03f 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -438,7 +438,7 @@ def main(): name=dict(default='default'), enable_logging=dict(default=True, type='bool'), s3_bucket_name=dict(), - s3_key_prefix=dict(), + s3_key_prefix=dict(no_log=False), sns_topic_name=dict(), is_multi_region_trail=dict(default=False, type='bool'), enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), diff --git a/dms_endpoint.py b/dms_endpoint.py index d457a7c4208..f4ab520903a 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -387,7 +387,7 @@ def main(): port=dict(type='int'), databasename=dict(), extraconnectionattributes=dict(), - kmskeyid=dict(), + kmskeyid=dict(no_log=False), tags=dict(type='dict'), certificatearn=dict(), sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'], diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index d15da3b2a79..2bfe89008e5 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -352,7 +352,7 @@ def main(): wait_timeout=dict(type='int', default=320, required=False), route_table_ids=dict(type='list', elements='str'), vpc_endpoint_id=dict(), - client_token=dict(), + client_token=dict(no_log=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 11c271434d9..b85d8ed97e8 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -1020,7 +1020,7 @@ def main(): wait_timeout=dict(type='int', default=320, required=False), release_eip=dict(type='bool', default=False), nat_gateway_id=dict(type='str'), - client_token=dict(type='str'), + client_token=dict(type='str', no_log=False), tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, type='bool'), ) diff --git a/ecs_service.py b/ecs_service.py index 7bc3d467df7..42a45bba064 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -652,7 +652,7 @@ def main(): task_definition=dict(required=False, type='str'), load_balancers=dict(required=False, default=[], type='list', elements='dict'), desired_count=dict(required=False, type='int'), - client_token=dict(required=False, default='', type='str'), + client_token=dict(required=False, default='', type='str', no_log=False), role=dict(required=False, default='', type='str'), delay=dict(required=False, type='int', default=10), repeat=dict(required=False, type='int', default=10), diff --git a/iam.py b/iam.py index 823bfb89925..4c774285f6f 100644 --- a/iam.py +++ b/iam.py @@ -627,7 +627,7 @@ def main(): access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', 'Active', 'Inactive', 'Create', 'Remove']), - access_key_ids=dict(type='list', default=None, required=False, elements='str'), + access_key_ids=dict(type='list', default=None, required=False, elements='str', no_log=False), key_count=dict(type='int', default=1, required=False), name=dict(required=True), trust_policy_filepath=dict(default=None, required=False), diff --git a/lambda.py b/lambda.py index e559e181abe..c6960d99c98 100644 --- a/lambda.py +++ b/lambda.py @@ -330,7 +330,7 @@ def main(): handler=dict(), zip_file=dict(aliases=['src']), s3_bucket=dict(), - s3_key=dict(), + s3_key=dict(no_log=False), s3_object_version=dict(), description=dict(default=''), timeout=dict(type='int', default=3), diff --git a/lambda_policy.py b/lambda_policy.py index ff091a8beaa..5c65b7969da 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -397,7 +397,7 @@ def setup_module_object(): principal=dict(required=True, ), source_arn=dict(), source_account=dict(), - event_source_token=dict(), + event_source_token=dict(no_log=False), ) return AnsibleAWSModule( diff --git a/s3_website.py b/s3_website.py index 24a7cdf7afa..57251826a04 100644 --- a/s3_website.py +++ b/s3_website.py @@ -291,7 +291,7 @@ def main(): name=dict(type='str', required=True), state=dict(type='str', required=True, choices=['present', 'absent']), suffix=dict(type='str', required=False, default='index.html'), - error_key=dict(type='str', required=False), + error_key=dict(type='str', required=False, no_log=False), redirect_all_requests=dict(type='str', required=False), ) diff --git a/sqs_queue.py b/sqs_queue.py index 0a93909f021..b0565c6c8d0 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -450,7 +450,7 @@ def main(): redrive_policy=dict(type='dict'), visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']), kms_master_key_id=dict(type='str'), - kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period']), + kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), content_based_deduplication=dict(type='bool'), tags=dict(type='dict'), purge_tags=dict(type='bool', default=False), From 8cab094073f252dca3a04b329ed27da674dd8f58 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 13 Mar 2021 17:32:35 +0100 Subject: [PATCH 105/683] Fix missing no_log=True. --- aws_direct_connect_virtual_interface.py | 2 +- sts_assume_role.py | 2 +- sts_session_token.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index 6c7720fbc54..eb4906cc730 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -484,7 +484,7 @@ def main(): name=dict(), vlan=dict(type='int', default=100), bgp_asn=dict(type='int', default=65000), - authentication_key=dict(), + authentication_key=dict(no_log=True), amazon_address=dict(), customer_address=dict(), address_type=dict(), diff --git a/sts_assume_role.py b/sts_assume_role.py index 378eb0031f8..d1203a3c5a5 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -162,7 +162,7 @@ def main(): external_id=dict(required=False, default=None), policy=dict(required=False, default=None), mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None) + mfa_token=dict(required=False, default=None, no_log=True) ) module = AnsibleAWSModule(argument_spec=argument_spec) diff --git a/sts_session_token.py b/sts_session_token.py index 7c8221a9c68..7e51fb08ac3 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -129,7 +129,7 @@ def main(): argument_spec = dict( duration_seconds=dict(required=False, default=None, type='int'), mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None), + mfa_token=dict(required=False, default=None, no_log=True), ) module = AnsibleAWSModule(argument_spec=argument_spec) From 99fae77375fc95bfc075082c4e81932d67fa5c0c Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Sun, 14 Mar 2021 06:08:58 -0400 Subject: [PATCH 106/683] add metadata options to ec2 template (#322) * add metadata options to ec2 template --- ec2_launch_template.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 4a35812cfb4..c1ce6d3dcb8 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -325,6 +325,32 @@ U(http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) documentation on user-data. type: str + metadata_options: + description: + - Configure EC2 Metadata options. + - For more information see the IMDS documentation + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html). + type: dict + version_added: 1.5.0 + suboptions: + http_endpoint: + type: str + description: > + This parameter enables or disables the HTTP metadata endpoint on your instances. + choices: [enabled, disabled] + default: 'enabled' + http_put_response_hop_limit: + type: int + description: > + The desired HTTP PUT response hop limit for instance metadata requests. + The larger the number, the further instance metadata requests can travel. + default: 1 + http_tokens: + type: str + description: > + The state of token usage for your instance metadata requests. + choices: [optional, required] + default: 'optional' ''' EXAMPLES = ''' @@ -636,6 +662,14 @@ def main(): enabled=dict(type='bool') ), ), + metadata_options=dict( + type='dict', + options=dict( + http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), + http_put_response_hop_limit=dict(type='int', default=1), + http_tokens=dict(choices=['optional', 'required'], default='optional') + ) + ), network_interfaces=dict( type='list', elements='dict', From 4a3a8621e2657d3a56e663423a63ef6de9531b38 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Sun, 14 Mar 2021 12:34:12 +0100 Subject: [PATCH 107/683] fix KeyError: 'Tags' for ec2_instance (#476) * fix key error when ec2 instance has no tags * add changelog fragment --- ec2_instance.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ec2_instance.py b/ec2_instance.py index 380e3527910..18af847aed6 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -889,7 +889,7 @@ def tower_callback_script(tower_conf, windows=False, passwd=None): def manage_tags(match, new_tags, purge_tags, ec2): changed = False - old_tags = boto3_tag_list_to_ansible_dict(match['Tags']) + old_tags = boto3_tag_list_to_ansible_dict(match.get('Tags', {})) tags_to_set, tags_to_delete = compare_aws_tags( old_tags, new_tags, purge_tags=purge_tags, @@ -1559,7 +1559,7 @@ def change_instance_state(filters, desired_state, ec2=None): def pretty_instance(i): instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) - instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags']) + instance['tags'] = boto3_tag_list_to_ansible_dict(i.get('Tags', {})) return instance From d4f6e78308b30c9887dae91c363fc6108230c62b Mon Sep 17 00:00:00 2001 From: Milan Zink Date: Sun, 14 Mar 2021 13:10:45 +0100 Subject: [PATCH 108/683] Extending aws_s3_bucket_info module (#260) * WIP - adding new functionality to aws_s3_bucket_info module * Fixing exceptions adding integration tests * Code linting - shippable * Fixing option type * Fixing RETURN / Shippable * Sync with upstream + .gitignore VSCode * Extending integration test * Adding version_added to new module options * Prepare 1.3.0 changelog * Use boto3_tag_list_to_ansible_dict for bucket tags * Fixing integration test (delete buckets) * Adjust integration test (python 2.7) * fix integratiuon test -2 * Adjusting changelogs/changelog.yaml * WIP - adding new functionality to aws_s3_bucket_info module * Fixing exceptions adding integration tests * Code linting - shippable * Fixing option type * Fixing RETURN / Shippable * Extending integration test * Adding version_added to new module options * Prepare 1.3.0 changelog * Use boto3_tag_list_to_ansible_dict for bucket tags * Fixing integration test (delete buckets) * Adjust integration test (python 2.7) * fix integratiuon test -2 * Adjusting changelogs/changelog.yaml * WIP - adding new functionality to aws_s3_bucket_info module * Fixing exceptions adding integration tests * Code linting - shippable * Fixing option type * Fixing RETURN / Shippable * Extending integration test * Adding version_added to new module options * Use boto3_tag_list_to_ansible_dict for bucket tags * Fixing integration test (delete buckets) * Adjust integration test (python 2.7) * fix integratiuon test -2 * WIP - adding new functionality to aws_s3_bucket_info module * Fixing exceptions adding integration tests * Code linting - shippable * Fixing option type * Fixing RETURN / Shippable * Extending integration test * Adding version_added to new module options * Use boto3_tag_list_to_ansible_dict for bucket tags * Fixing integration test (delete buckets) * Adjust integration test (python 2.7) * fix integratiuon test -2 * Merging requested PR changes #1 Co-authored-by: Mark Chappell * Bump version_added 1.3.0->1.4.0 Co-authored-by: Mark Chappell * Fix module docstring Co-authored-by: Mark Chappell * Fixing changelog * Code cleanup * Update integration tests * Add S3 bucket location check to integration test * Documentation update - fix wording and typos in DOCUMENTATION - complete rewrite of RETURN * Update plugins/modules/aws_s3_bucket_info.py * Update plugins/modules/aws_s3_bucket_info.py * Fixing RETURN indentation * Fixing missing type: in RETURN * Revert changes to changelogs/changelog.yaml * Revert changes to changelogs/changelog.yaml Co-authored-by: Mark Chappell --- aws_s3_bucket_info.py | 571 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 549 insertions(+), 22 deletions(-) diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 40de3650c9c..05d92310013 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -1,6 +1,8 @@ #!/usr/bin/python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +Copyright (c) 2017 Ansible Project +GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" from __future__ import absolute_import, division, print_function __metaclass__ = type @@ -10,19 +12,111 @@ --- module: aws_s3_bucket_info version_added: 1.0.0 -short_description: Lists S3 buckets in AWS +author: "Gerben Geijteman (@hyperized)" +short_description: lists S3 buckets in AWS requirements: - boto3 >= 1.4.4 - python >= 2.6 description: - - Lists S3 buckets in AWS + - Lists S3 buckets and details about those buckets. - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts). Note that the M(community.aws.aws_s3_bucket_info) module no longer returns C(ansible_facts)! -author: "Gerben Geijteman (@hyperized)" +options: + name: + description: + - Name of bucket to query. + type: str + default: "" + version_added: 1.4.0 + name_filter: + description: + - Limits buckets to only buckets who's name contain the string in I(name_filter). + type: str + default: "" + version_added: 1.4.0 + bucket_facts: + description: + - Retrieve requested S3 bucket detailed information + - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution. + - You can limit buckets by using the I(name) or I(name_filter) option. + suboptions: + bucket_accelerate_configuration: + description: Retrive S3 accelerate configuration. + type: bool + default: False + bucket_location: + description: Retrive S3 bucket location. + type: bool + default: False + bucket_replication: + description: Retrive S3 bucket replication. + type: bool + default: False + bucket_acl: + description: Retrive S3 bucket ACLs. + type: bool + default: False + bucket_logging: + description: Retrive S3 bucket logging. + type: bool + default: False + bucket_request_payment: + description: Retrive S3 bucket request payment. + type: bool + default: False + bucket_tagging: + description: Retrive S3 bucket tagging. + type: bool + default: False + bucket_cors: + description: Retrive S3 bucket CORS configuration. + type: bool + default: False + bucket_notification_configuration: + description: Retrive S3 bucket notification configuration. + type: bool + default: False + bucket_encryption: + description: Retrive S3 bucket encryption. + type: bool + default: False + bucket_ownership_controls: + description: Retrive S3 ownership controls. + type: bool + default: False + bucket_website: + description: Retrive S3 bucket website. + type: bool + default: False + bucket_policy: + description: Retrive S3 bucket policy. + type: bool + default: False + bucket_policy_status: + description: Retrive S3 bucket policy status. + type: bool + default: False + bucket_lifecycle_configuration: + description: Retrive S3 bucket lifecycle configuration. + type: bool + default: False + public_access_block: + description: Retrive S3 bucket public access block. + type: bool + default: False + type: dict + version_added: 1.4.0 + transform_location: + description: + - S3 bucket location for default us-east-1 is normally reported as C(null). + - Setting this option to C(true) will return C(us-east-1) instead. + - Affects only queries with I(bucket_facts=true) and I(bucket_location=true). + type: bool + default: False + version_added: 1.4.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = ''' @@ -34,19 +128,277 @@ - community.aws.aws_s3_bucket_info: register: result +# Retrieve detailed bucket information +- community.aws.aws_s3_bucket_info: + # Show only buckets with name matching + name_filter: your.testing + # Choose facts to retrieve + bucket_facts: + # bucket_accelerate_configuration: true + bucket_acl: true + bucket_cors: true + bucket_encryption: true + # bucket_lifecycle_configuration: true + bucket_location: true + # bucket_logging: true + # bucket_notification_configuration: true + # bucket_ownership_controls: true + # bucket_policy: true + # bucket_policy_status: true + # bucket_replication: true + # bucket_request_payment: true + # bucket_tagging: true + # bucket_website: true + # public_access_block: true + transform_location: true + register: result + +# Print out result - name: List buckets ansible.builtin.debug: msg: "{{ result['buckets'] }}" ''' RETURN = ''' -buckets: +bucket_list: description: "List of buckets" returned: always - sample: - - creation_date: '2017-07-06 15:05:12 +00:00' - name: my_bucket - type: list + type: complex + contains: + name: + description: Bucket name. + returned: always + type: str + sample: a-testing-bucket-name + creation_date: + description: Bucket creation date timestamp. + returned: always + type: str + sample: "2021-01-21T12:44:10+00:00" + public_access_block: + description: Bucket public access block configuration. + returned: when I(bucket_facts=true) and I(public_access_block=true) + type: complex + contains: + PublicAccessBlockConfiguration: + description: PublicAccessBlockConfiguration data. + returned: when PublicAccessBlockConfiguration is defined for the bucket + type: complex + contains: + BlockPublicAcls: + description: BlockPublicAcls setting value. + type: bool + sample: true + BlockPublicPolicy: + description: BlockPublicPolicy setting value. + type: bool + sample: true + IgnorePublicAcls: + description: IgnorePublicAcls setting value. + type: bool + sample: true + RestrictPublicBuckets: + description: RestrictPublicBuckets setting value. + type: bool + sample: true + bucket_name_filter: + description: String used to limit buckets. See I(name_filter). + returned: when I(name_filter) is defined + type: str + sample: filter-by-this-string + bucket_acl: + description: Bucket ACL configuration. + returned: when I(bucket_facts=true) and I(bucket_acl=true) + type: complex + contains: + Grants: + description: List of ACL grants. + type: list + sample: [] + Owner: + description: Bucket owner information. + type: complex + contains: + DisplayName: + description: Bucket owner user display name. + returned: always + type: str + sample: username + ID: + description: Bucket owner user ID. + returned: always + type: str + sample: 123894e509349etc + bucket_cors: + description: Bucket CORS configuration. + returned: when I(bucket_facts=true) and I(bucket_cors=true) + type: complex + contains: + CORSRules: + description: Bucket CORS configuration. + returned: when CORS rules are defined for the bucket + type: list + sample: [] + bucket_encryption: + description: Bucket encryption configuration. + returned: when I(bucket_facts=true) and I(bucket_encryption=true) + type: complex + contains: + ServerSideEncryptionConfiguration: + description: ServerSideEncryptionConfiguration configuration. + returned: when encryption is enabled on the bucket + type: complex + contains: + Rules: + description: List of applied encryptio rules. + returned: when encryption is enabled on the bucket + type: list + sample: { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" }, "BucketKeyEnabled": False } + bucket_lifecycle_configuration: + description: Bucket lifecycle configuration settings. + returned: when I(bucket_facts=true) and I(bucket_lifecycle_configuration=true) + type: complex + contains: + Rules: + description: List of lifecycle management rules. + returned: when lifecycle configuration is present + type: list + sample: [{ "Status": "Enabled", "ID": "example-rule" }] + bucket_location: + description: Bucket location. + returned: when I(bucket_facts=true) and I(bucket_location=true) + type: complex + contains: + LocationConstraint: + description: AWS region. + returned: always + type: str + sample: us-east-2 + bucket_logging: + description: Server access logging configuration. + returned: when I(bucket_facts=true) and I(bucket_logging=true) + type: complex + contains: + LoggingEnabled: + description: Server access logging configuration. + returned: when server access logging is defined for the bucket + type: complex + contains: + TargetBucket: + description: Target bucket name. + returned: always + type: str + sample: logging-bucket-name + TargetPrefix: + description: Prefix in target bucket. + returned: always + type: str + sample: "" + bucket_notification_configuration: + description: Bucket notification settings. + returned: when I(bucket_facts=true) and I(bucket_notification_configuration=true) + type: complex + contains: + TopicConfigurations: + description: List of notification events configurations. + returned: when at least one notification is configured + type: list + sample: [] + bucket_ownership_controls: + description: Preffered object ownership settings. + returned: when I(bucket_facts=true) and I(bucket_ownership_controls=true) + type: complex + contains: + OwnershipControls: + description: Object ownership settings. + returned: when ownership controls are defined for the bucket + type: complex + contains: + Rules: + description: List of ownership rules. + returned: when ownership rule is defined + type: list + sample: [{ "ObjectOwnership:": "ObjectWriter" }] + bucket_policy: + description: Bucket policy contents. + returned: when I(bucket_facts=true) and I(bucket_policy=true) + type: str + sample: '{"Version":"2012-10-17","Statement":[{"Sid":"AddCannedAcl","Effect":"Allow",..}}]}' + bucket_policy_status: + description: Status of bucket policy. + returned: when I(bucket_facts=true) and I(bucket_policy_status=true) + type: complex + contains: + PolicyStatus: + description: Status of bucket policy. + returned: when bucket policy is present + type: complex + contains: + IsPublic: + description: Report bucket policy public status. + returned: when bucket policy is present + type: bool + sample: True + bucket_replication: + description: Replication configuration settings. + returned: when I(bucket_facts=true) and I(bucket_replication=true) + type: complex + contains: + Role: + description: IAM role used for replication. + returned: when replication rule is defined + type: str + sample: "arn:aws:iam::123:role/example-role" + Rules: + description: List of replication rules. + returned: when replication rule is defined + type: list + sample: [{ "ID": "rule-1", "Filter": "{}" }] + bucket_request_payment: + description: Requester pays setting. + returned: when I(bucket_facts=true) and I(bucket_request_payment=true) + type: complex + contains: + Payer: + description: Current payer. + returned: always + type: str + sample: BucketOwner + bucket_tagging: + description: Bucket tags. + returned: when I(bucket_facts=true) and I(bucket_tagging=true) + type: dict + sample: { "Tag1": "Value1", "Tag2": "Value2" } + bucket_website: + description: Static website hosting. + returned: when I(bucket_facts=true) and I(bucket_website=true) + type: complex + contains: + ErrorDocument: + description: Object serving as HTTP error page. + returned: when static website hosting is enabled + type: dict + sample: { "Key": "error.html" } + IndexDocument: + description: Object serving as HTTP index page. + returned: when static website hosting is enabled + type: dict + sample: { "Suffix": "error.html" } + RedirectAllRequestsTo: + description: Website redict settings. + returned: when redirect requests is configured + type: complex + contains: + HostName: + description: Hostname to redirect. + returned: always + type: str + sample: www.example.com + Protocol: + description: Protocol used for redirect. + returned: always + type: str + sample: https ''' try: @@ -54,24 +406,150 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -def get_bucket_list(module, connection): +def get_bucket_list(module, connection, name="", name_filter=""): """ Return result of list_buckets json encoded + Filter only buckets matching 'name' or name_filter if defined :param module: :param connection: :return: """ + buckets = [] + filtered_buckets = [] + final_buckets = [] + + # Get all buckets try: buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to list buckets") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: + module.fail_json_aws(err_code, msg="Failed to list buckets") + + # Filter buckets if requested + if name_filter: + for bucket in buckets: + if name_filter in bucket['name']: + filtered_buckets.append(bucket) + elif name: + for bucket in buckets: + if name == bucket['name']: + filtered_buckets.append(bucket) + + # Return proper list (filtered or all) + if name or name_filter: + final_buckets = filtered_buckets + else: + final_buckets = buckets + return(final_buckets) + - return buckets +def get_buckets_facts(connection, buckets, requested_facts, transform_location): + """ + Retrive additional information about S3 buckets + """ + full_bucket_list = [] + # Iterate over all buckets and append retrived facts to bucket + for bucket in buckets: + bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location)) + full_bucket_list.append(bucket) + + return(full_bucket_list) + + +def get_bucket_details(connection, name, requested_facts, transform_location): + """ + Execute all enabled S3API get calls for selected bucket + """ + all_facts = {} + + for key in requested_facts: + if requested_facts[key]: + if key == 'bucket_location': + all_facts[key] = {} + try: + all_facts[key] = get_bucket_location(name, connection, transform_location) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + elif key == 'bucket_tagging': + all_facts[key] = {} + try: + all_facts[key] = get_bucket_tagging(name, connection) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + else: + all_facts[key] = {} + try: + all_facts[key] = get_bucket_property(name, connection, key) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + + return(all_facts) + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_location(name, connection, transform_location=False): + """ + Get bucket location and optionally transform 'null' to 'us-east-1' + """ + data = connection.get_bucket_location(Bucket=name) + + # Replace 'null' with 'us-east-1'? + if transform_location: + try: + if not data['LocationConstraint']: + data['LocationConstraint'] = 'us-east-1' + except KeyError: + pass + # Strip response metadata (not needed) + try: + data.pop('ResponseMetadata') + return(data) + except KeyError: + return(data) + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_tagging(name, connection): + """ + Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function + """ + data = connection.get_bucket_tagging(Bucket=name) + + try: + bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet']) + return(bucket_tags) + except KeyError: + # Strip response metadata (not needed) + try: + data.pop('ResponseMetadata') + return(data) + except KeyError: + return(data) + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_property(name, connection, get_api_name): + """ + Get bucket property + """ + api_call = "get_" + get_api_name + api_function = getattr(connection, api_call) + data = api_function(Bucket=name) + + # Strip response metadata (not needed) + try: + data.pop('ResponseMetadata') + return(data) + except KeyError: + return(data) def main(): @@ -79,25 +557,73 @@ def main(): Get list of S3 buckets :return: """ + argument_spec = dict( + name=dict(type='str', default=""), + name_filter=dict(type='str', default=""), + bucket_facts=dict(type='dict', options=dict( + bucket_accelerate_configuration=dict(type='bool', default=False), + bucket_acl=dict(type='bool', default=False), + bucket_cors=dict(type='bool', default=False), + bucket_encryption=dict(type='bool', default=False), + bucket_lifecycle_configuration=dict(type='bool', default=False), + bucket_location=dict(type='bool', default=False), + bucket_logging=dict(type='bool', default=False), + bucket_notification_configuration=dict(type='bool', default=False), + bucket_ownership_controls=dict(type='bool', default=False), + bucket_policy=dict(type='bool', default=False), + bucket_policy_status=dict(type='bool', default=False), + bucket_replication=dict(type='bool', default=False), + bucket_request_payment=dict(type='bool', default=False), + bucket_tagging=dict(type='bool', default=False), + bucket_website=dict(type='bool', default=False), + public_access_block=dict(type='bool', default=False), + )), + transform_location=dict(type='bool', default=False) + ) # Ensure we have an empty dict result = {} + # Define mutually exclusive options + mutually_exclusive = [ + ['name', 'name_filter'] + ] + # Including ec2 argument spec - module = AnsibleAWSModule(argument_spec={}, supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) is_old_facts = module._name == 'aws_s3_bucket_facts' if is_old_facts: module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') + # Get parameters + name = module.params.get("name") + name_filter = module.params.get("name_filter") + requested_facts = module.params.get("bucket_facts") + transform_location = module.params.get("bucket_facts") + # Set up connection + connection = {} try: connection = module.client('s3') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: + module.fail_json_aws(err_code, msg='Failed to connect to AWS') + + # Get basic bucket list (name + creation date) + bucket_list = get_bucket_list(module, connection, name, name_filter) - # Gather results - result['buckets'] = get_bucket_list(module, connection) + # Add information about name/name_filter to result + if name: + result['bucket_name'] = name + elif name_filter: + result['bucket_name_filter'] = name_filter + + # Gather detailed information about buckets if requested + bucket_facts = module.params.get("bucket_facts") + if bucket_facts: + result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) + else: + result['buckets'] = bucket_list # Send exit if is_old_facts: @@ -106,5 +632,6 @@ def main(): module.exit_json(msg="Retrieved s3 info.", **result) +# MAIN if __name__ == '__main__': main() From 8cd3a2d681cc76b9545eb6f7117704024418c8d4 Mon Sep 17 00:00:00 2001 From: "Saleh A. Saber" Date: Sun, 14 Mar 2021 14:26:11 +0000 Subject: [PATCH 109/683] Gather information about ASG lifecycle hooks (#233) * Gather information about ASG lifecycle hooks for community.aws.ec2_asg_info * add a changelog fragment to the change * use fail_json_aws Co-authored-by: Saleh Abbas Co-authored-by: Mark Chappell --- ec2_asg_info.py | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 819bf6e5ab3..3c809e069be 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -143,6 +143,28 @@ returned: success type: str sample: "public-webapp-production-1" +lifecycle_hooks: + description: List of lifecycle hooks for the ASG. + returned: success + type: list + sample: [ + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-launch", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING" + }, + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-terminate", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" + } + ] load_balancer_names: description: List of load balancers names attached to the ASG. returned: success @@ -289,6 +311,25 @@ def find_asgs(conn, module, name=None, tags=None): ], "launch_config_name": "public-webapp-production-1", "launch_configuration_name": "public-webapp-production-1", + "lifecycle_hooks": + [ + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-launch", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING" + }, + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-terminate", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" + } + ], "load_balancer_names": ["public-webapp-production-lb"], "max_size": 4, "min_size": 2, @@ -381,6 +422,12 @@ def find_asgs(conn, module, name=None, tags=None): module.fail_json_aws(e, msg="Failed to describe Target Groups") else: asg['target_group_names'] = [] + # get asg lifecycle hooks if any + try: + asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg['auto_scaling_group_name']) + asg['lifecycle_hooks'] = asg_lifecyclehooks['LifecycleHooks'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to fetch information about ASG lifecycle hooks") matched_asgs.append(asg) return matched_asgs From 3e42eb8d063c782f1983209b4c49824223e0f33c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20Magalh=C3=A3es?= <4622652+pjrm@users.noreply.github.com> Date: Sun, 14 Mar 2021 14:27:15 +0000 Subject: [PATCH 110/683] AWS ELB: Return empty list when no load balancer name was found (#215) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When trying to describe a LoadBalancer that doesn't exist, the module crash. Instead of that behavior, this commit will return an empty list when no load balancer is found, allowing to deal next tasks by reading the output of the module. Co-authored-by: Pedro Magalhães --- elb_classic_lb_info.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 12a6a43771a..a1a0c39e042 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -142,7 +142,7 @@ vpc_id: vpc-c248fda4 ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict, @@ -154,14 +154,17 @@ except ImportError: pass # caught by AnsibleAWSModule +MAX_AWS_RETRIES = 5 +MAX_AWS_DELAY = 5 -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) -def list_elbs(connection, names): - paginator = connection.get_paginator('describe_load_balancers') - load_balancers = paginator.paginate(LoadBalancerNames=names).build_full_result().get('LoadBalancerDescriptions', []) + +def list_elbs(connection, load_balancer_names): results = [] - for lb in load_balancers: + for load_balancer_name in load_balancer_names: + lb = get_lb(connection, load_balancer_name) + if not lb: + continue description = camel_dict_to_snake_dict(lb) name = lb['LoadBalancerName'] instances = lb.get('Instances', []) @@ -174,13 +177,20 @@ def list_elbs(connection, names): return results -def get_lb_attributes(connection, name): - attributes = connection.describe_load_balancer_attributes(LoadBalancerName=name).get('LoadBalancerAttributes', {}) +def get_lb(connection, load_balancer_name): + try: + return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0] + except is_boto3_error_code('LoadBalancerNotFound'): + return [] + + +def get_lb_attributes(connection, load_balancer_name): + attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get('LoadBalancerAttributes', {}) return camel_dict_to_snake_dict(attributes) def get_tags(connection, load_balancer_name): - tags = connection.describe_tags(LoadBalancerNames=[load_balancer_name])['TagDescriptions'] + tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])['TagDescriptions'] if not tags: return {} return boto3_tag_list_to_ansible_dict(tags[0]['Tags']) @@ -194,14 +204,14 @@ def lb_instance_health(connection, load_balancer_name, instances, state): def main(): argument_spec = dict( - names={'default': [], 'type': 'list', 'elements': 'str'} + names=dict(default=[], type='list', elements='str') ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'elb_classic_lb_facts': module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", date='2021-12-01', collection_name='community.aws') - connection = module.client('elb') + connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)) try: elbs = list_elbs(connection, module.params.get('names')) From 0eae0794e41d3c83ea315896d0fa989f686ce83d Mon Sep 17 00:00:00 2001 From: DAVINDER PAL Date: Mon, 15 Mar 2021 16:07:14 +0530 Subject: [PATCH 111/683] Added Tier as option to aws_ssm_parameter_store module (#305) * Added Tier as option * * added better description * added changelong Co-authored-by: Markus Bergholz Co-authored-by: Mark Chappell --- aws_ssm_parameter_store.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index d31a79b2bef..3d49c133048 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -65,10 +65,21 @@ choices: ['never', 'changed', 'always'] default: changed type: str + tier: + description: + - Parameter store tier type. + required: false + choices: ['Standard', 'Advanced', 'Intelligent-Tiering'] + default: Standard + type: str + version_added: 1.5.0 + author: - - Nathan Webster (@nathanwebsterdotme) - - Bill Wang (@ozbillwang) - - Michael De La Rue (@mikedlr) + - "Davinder Pal (@116davinder) " + - "Nathan Webster (@nathanwebsterdotme)" + - "Bill Wang (@ozbillwang) " + - "Michael De La Rue (@mikedlr)" + extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -111,6 +122,13 @@ value: "Test1234" overwrite_value: "always" +- name: Create or update key/value pair in aws parameter store with tier + community.aws.aws_ssm_parameter_store: + name: "Hello" + description: "This is your first key" + value: "World" + tier: "Advanced" + - name: recommend to use with aws_ssm lookup plugin ansible.builtin.debug: msg: "{{ lookup('amazon.aws.aws_ssm', 'hello') }}" @@ -157,7 +175,8 @@ def create_update_parameter(client, module): args = dict( Name=module.params.get('name'), Value=module.params.get('value'), - Type=module.params.get('string_type') + Type=module.params.get('string_type'), + Tier=module.params.get('tier') ) if (module.params.get('overwrite_value') in ("always", "changed")): @@ -237,6 +256,7 @@ def setup_module_object(): decryption=dict(default=True, type='bool'), key_id=dict(default="alias/aws/ssm"), overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), + tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']), ) return AnsibleAWSModule( From 0847bf1524b6669fa0b2a5c9c6139baadceba032 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 15 Mar 2021 17:29:27 +0100 Subject: [PATCH 112/683] Attempt to improve the stability of ec2_vpc_vgw and ec2_vpc_vpn (#162) * Add a custom Retry so we can retry when we receive 'The maximum number of mutating objects has been reached' * Update ec2_vpc_vpn unit test to use a connection with an AWSRetry decorator * changelog --- ec2_vpc_vgw.py | 78 +++++++++++++++++++++++++++++--------------------- ec2_vpc_vpn.py | 57 ++++++++++++++++++++++++++---------- 2 files changed, 88 insertions(+), 47 deletions(-) diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index ce68833bcfc..4dd9a2cb456 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -124,6 +124,29 @@ from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' +# we need to look at the mesage to tell the difference. +class VGWRetry(AWSRetry): + @staticmethod + def status_code_from_exception(error): + return (error.response['Error']['Code'], error.response['Error']['Message'],) + + @staticmethod + def found(response_codes, catch_extra_error_codes=None): + retry_on = ['The maximum number of mutating objects has been reached.'] + + if catch_extra_error_codes: + retry_on.extend(catch_extra_error_codes) + if not isinstance(response_codes, tuple): + response_codes = (response_codes,) + + for code in response_codes: + if super().found(response_codes, catch_extra_error_codes): + return True + + return False + + def get_vgw_info(vgws): if not isinstance(vgws, list): return @@ -174,7 +197,7 @@ def attach_vgw(client, module, vpn_gateway_id): # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State # as available several seconds before actually permitting a new attachment. # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185 - response = AWSRetry.jittered_backoff(retries=5, + response = VGWRetry.jittered_backoff(retries=5, catch_extra_error_codes=['InvalidParameterValue'] )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) @@ -193,16 +216,13 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): params = dict() params['VpcId'] = module.params.get('vpc_id') - if vpc_id: - try: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to detach gateway') - else: - try: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to detach gateway') + try: + if vpc_id: + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True) + else: + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'], aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, 'Failed to detach gateway') status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') if not status_achieved: @@ -219,7 +239,7 @@ def create_vgw(client, module): params['AmazonSideAsn'] = module.params.get('asn') try: - response = client.create_vpn_gateway(**params) + response = client.create_vpn_gateway(aws_retry=True, **params) get_waiter( client, 'vpn_gateway_exists' ).wait( @@ -239,7 +259,7 @@ def create_vgw(client, module): def delete_vgw(client, module, vpn_gateway_id): try: - response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id) + response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to delete gateway') @@ -252,7 +272,7 @@ def create_tags(client, module, vpn_gateway_id): params = dict() try: - response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module)) + response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to add tags") @@ -263,16 +283,13 @@ def create_tags(client, module, vpn_gateway_id): def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None): params = dict() - if tags_to_delete: - try: - response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to delete tags') - else: - try: - response = client.delete_tags(Resources=[vpn_gateway_id]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to delete all tags') + try: + if tags_to_delete: + response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete, aws_retry=True) + else: + response = client.delete_tags(Resources=[vpn_gateway_id], aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to remove tags from gateway') result = response return result @@ -294,8 +311,8 @@ def find_tags(client, module, resource_id=None): if resource_id: try: - response = client.describe_tags(Filters=[ - {'Name': 'resource-id', 'Values': [resource_id]} + response = client.describe_tags(aws_retry=True, Filters=[ + {'Name': 'resource-id', 'Values': [resource_id]}, ]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to describe tags searching by resource') @@ -343,7 +360,7 @@ def find_vpc(client, module): if params['vpc_id']: try: - response = client.describe_vpcs(VpcIds=[params['vpc_id']]) + response = client.describe_vpcs(VpcIds=[params['vpc_id']], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to describe VPC') @@ -363,7 +380,7 @@ def find_vgw(client, module, vpn_gateway_id=None): if module.params.get('state') == 'present': params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']}) try: - response = client.describe_vpn_gateways(**params) + response = client.describe_vpn_gateways(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to describe gateway using filters') @@ -549,10 +566,7 @@ def main(): state = module.params.get('state').lower() - try: - client = module.client('ec2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + client = module.client('ec2', retry_decorator=VGWRetry.jittered_backoff(retries=10)) if state == 'present': (changed, results) = ensure_vgw_present(client, module) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 6e18e724258..56bb4e9b8fd 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -298,14 +298,13 @@ vpn_connection_id: vpn-781e0e19 """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict, - compare_aws_tags, - ansible_dict_to_boto3_tag_list, -) +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags try: from botocore.exceptions import BotoCoreError, ClientError, WaiterError @@ -319,6 +318,29 @@ def __init__(self, msg, exception=None): self.exception = exception +# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' +# we need to look at the mesage to tell the difference. +class VPNRetry(AWSRetry): + @staticmethod + def status_code_from_exception(error): + return (error.response['Error']['Code'], error.response['Error']['Message'],) + + @staticmethod + def found(response_codes, catch_extra_error_codes=None): + retry_on = ['The maximum number of mutating objects has been reached.'] + + if catch_extra_error_codes: + retry_on.extend(catch_extra_error_codes) + if not isinstance(response_codes, tuple): + response_codes = (response_codes,) + + for code in response_codes: + if super().found(response_codes, catch_extra_error_codes): + return True + + return False + + def find_connection(connection, module_params, vpn_connection_id=None): ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, or raise an error if there were multiple viable connections. ''' @@ -342,10 +364,11 @@ def find_connection(connection, module_params, vpn_connection_id=None): # see if there is a unique matching connection try: if vpn_connection_id: - existing_conn = connection.describe_vpn_connections(VpnConnectionIds=vpn_connection_id, + existing_conn = connection.describe_vpn_connections(aws_retry=True, + VpnConnectionIds=vpn_connection_id, Filters=formatted_filter) else: - existing_conn = connection.describe_vpn_connections(Filters=formatted_filter) + existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter) except (BotoCoreError, ClientError) as e: raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e) @@ -356,7 +379,8 @@ def find_connection(connection, module_params, vpn_connection_id=None): def add_routes(connection, vpn_connection_id, routes_to_add): for route in routes_to_add: try: - connection.create_vpn_connection_route(VpnConnectionId=vpn_connection_id, + connection.create_vpn_connection_route(aws_retry=True, + VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route) except (BotoCoreError, ClientError) as e: raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), @@ -366,7 +390,8 @@ def add_routes(connection, vpn_connection_id, routes_to_add): def remove_routes(connection, vpn_connection_id, routes_to_remove): for route in routes_to_remove: try: - connection.delete_vpn_connection_route(VpnConnectionId=vpn_connection_id, + connection.delete_vpn_connection_route(aws_retry=True, + VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route) except (BotoCoreError, ClientError) as e: raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), @@ -504,7 +529,7 @@ def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_ def delete_connection(connection, vpn_connection_id, delay, max_attempts): """ Deletes a VPN connection """ try: - connection.delete_vpn_connection(VpnConnectionId=vpn_connection_id) + connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id) connection.get_waiter('vpn_connection_deleted').wait( VpnConnectionIds=[vpn_connection_id], WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} @@ -519,7 +544,8 @@ def delete_connection(connection, vpn_connection_id, delay, max_attempts): def add_tags(connection, vpn_connection_id, add): try: - connection.create_tags(Resources=[vpn_connection_id], + connection.create_tags(aws_retry=True, + Resources=[vpn_connection_id], Tags=add) except (BotoCoreError, ClientError) as e: raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), @@ -530,7 +556,8 @@ def remove_tags(connection, vpn_connection_id, remove): # format tags since they are a list in the format ['tag1', 'tag2', 'tag3'] key_dict_list = [{'Key': tag} for tag in remove] try: - connection.delete_tags(Resources=[vpn_connection_id], + connection.delete_tags(aws_retry=True, + Resources=[vpn_connection_id], Tags=key_dict_list) except (BotoCoreError, ClientError) as e: raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), @@ -755,7 +782,7 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) state = module.params.get('state') parameters = dict(module.params) From a6cd3955eec64bffb7fe7d47985def085b4de09e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 16 Mar 2021 06:35:55 +0100 Subject: [PATCH 113/683] ec2_launch_template - scrub None parameters from what we'll pass to create_launch_config (#413) * ec2_launch_template - scrub None parameters from what we'll pass to create_launch_config * tests * changelog --- ec2_launch_template.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index c1ce6d3dcb8..a3f203a48f4 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -398,6 +398,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @@ -512,6 +513,7 @@ def create_or_update(module, template_options): template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) + lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) if not (template or template_versions): # create a full new one try: From 6121c2a541e570b523644191aa12c251016cff5c Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Tue, 16 Mar 2021 06:45:22 +0100 Subject: [PATCH 114/683] ec2_vpc_igw: increase integration tests coverage and code cleaning (#386) * Internet gateway: add missing integration tests and code check * Add CamelCase tags integration test * Removed check_input_tags() because useless as ansible_dict_to_boto3_tag_list() already performs a to_native() conversion to strings * Add additional integration tests for ec2_vpc_igw_info module Signed-off-by: Alina Buzachis * Internet Gateway - integration tests * Fix Internet Gatways search by tags * * Apply reviewer suggestions * Better error handling for the waiter --- ec2_vpc_igw.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index 3d8d9f3bf25..bef92a71fcf 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -92,8 +92,6 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils.six import string_types - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -141,13 +139,6 @@ def get_matching_igw(self, vpc_id): return igw - def check_input_tags(self, tags): - if tags is None: - return - nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)] - if nonstring_tags: - self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags)) - def ensure_tags(self, igw_id, tags, purge_tags): final_tags = [] @@ -167,7 +158,6 @@ def ensure_tags(self, igw_id, tags, purge_tags): if to_update: try: if self._check_mode: - # update tags final_tags.update(to_update) else: self._connection.create_tags( @@ -183,7 +173,6 @@ def ensure_tags(self, igw_id, tags, purge_tags): if to_delete: try: if self._check_mode: - # update tags for key in to_delete: del final_tags[key] else: @@ -233,8 +222,6 @@ def ensure_igw_absent(self, vpc_id): return self._results def ensure_igw_present(self, vpc_id, tags, purge_tags): - self.check_input_tags(tags) - igw = self.get_matching_igw(vpc_id) if igw is None: @@ -253,6 +240,8 @@ def ensure_igw_present(self, vpc_id, tags, purge_tags): igw = camel_dict_to_snake_dict(response['InternetGateway']) self._connection.attach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) self._results['changed'] = True + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="No Internet Gateway exists.") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') From a49cca262c9a9a3d7f7657ccaeb6b1103425a7fb Mon Sep 17 00:00:00 2001 From: Matthew Hellmer Date: Tue, 16 Mar 2021 06:57:26 -0400 Subject: [PATCH 115/683] Added state_machine_arn when unchanged. (#302) * Added state_machine_arn when unchanged. * Add integration test * changelog * Shorten role/function names in integration tests Co-authored-by: Mark Chappell --- aws_step_functions_state_machine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index 5ab13baa76c..be9d594d7c2 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -123,7 +123,7 @@ def manage_state_machine(state, sfn_client, module): remove(state_machine_arn, sfn_client, module) check_mode(module, msg='State is up-to-date.') - module.exit_json(changed=False) + module.exit_json(changed=False, state_machine_arn=state_machine_arn) def create(sfn_client, module): From 3cea8059427fee945439e919b1eba56a76dc2c62 Mon Sep 17 00:00:00 2001 From: "David M. Lee" Date: Tue, 16 Mar 2021 08:20:57 -0500 Subject: [PATCH 116/683] Fix UnboundLocalError in sqs_queue (#389) * Fix UnboundLocalError in sqs_queue The variable `existing_value` is nowhere to be found, but looks like this might have been missed in a rename. Changing to `value`. Fixes #172 * integration test * changelog Co-authored-by: Mark Chappell --- sqs_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqs_queue.py b/sqs_queue.py index b0565c6c8d0..b76cdb31410 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -375,7 +375,7 @@ def update_sqs_queue(module, client, queue_url): if isinstance(new_value, bool): new_value = str(new_value).lower() - existing_value = str(existing_value).lower() + value = str(value).lower() if new_value == value: continue From afa20aaf184dc951df9eedc8b54b2f21cfd72bdd Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Tue, 16 Mar 2021 23:28:33 +0100 Subject: [PATCH 117/683] ec2_vpc_nat_gateway cleanup (#445) * ec2_vpc_nat_gateway overall cleanup * use custm waiters to manage NAT gateway states (deleted and available) * imporve error handling * improve documentation examples * code cleaning Signed-off-by: Alina Buzachis --- ec2_vpc_nat_gateway.py | 578 ++++++++++++++++++----------------------- 1 file changed, 246 insertions(+), 332 deletions(-) diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index b85d8ed97e8..87511fa2582 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: ec2_vpc_nat_gateway version_added: 1.0.0 @@ -50,7 +50,7 @@ type: bool tags: description: - - A dict of tags to apply to the internet gateway. + - A dict of tags to apply to the NAT gateway. - To remove all tags set I(tags={}) and I(purge_tags=true). aliases: [ 'resource_tags' ] type: dict @@ -88,13 +88,13 @@ - Allen Sanabria (@linuxdynasty) - Jon Hadfield (@jonhadfield) - Karen Cheng (@Etherdaemon) + - Alina Buzachis (@alinabuzachis) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new nat gateway with client token. @@ -167,19 +167,30 @@ wait_timeout: 300 region: ap-southeast-2 -- name: Create new nat gateway using an allocation-id and tags. +- name: Create new nat gateway using allocation-id and tags. community.aws.ec2_vpc_nat_gateway: state: present subnet_id: subnet-12345678 allocation_id: eipalloc-12345678 region: ap-southeast-2 tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: new_nat_gateway + +- name: Update tags without purge + community.aws.ec2_vpc_nat_gateway: + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + region: ap-southeast-2 + purge_tags: no + tags: + Tag3: tag3 + wait: yes + register: update_tags_nat_gateway ''' -RETURN = ''' +RETURN = r''' create_time: description: The ISO 8601 date time format in UTC. returned: In all cases. @@ -206,7 +217,7 @@ returned: When tags are present. sample: tags: - "Ansible": "Test" + "Ansible": "Test" vpc_id: description: id of the VPC. returned: In all cases. @@ -218,64 +229,79 @@ type: str sample: [ { - 'public_ip': '52.52.52.52', - 'network_interface_id': 'eni-12345', - 'private_ip': '10.0.0.100', - 'allocation_id': 'eipalloc-12345' + 'public_ip': '52.52.52.52', + 'network_interface_id': 'eni-12345', + 'private_ip': '10.0.0.100', + 'allocation_id': 'eipalloc-12345' } ] ''' import datetime -import random -import time try: import botocore except ImportError: pass # Handled by AnsibleAWSModule - from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, **params): - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] + try: + paginator = client.get_paginator('describe_nat_gateways') + return paginator.paginate(**params).build_full_result()['NatGateways'] + except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + return None -def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, - states=None, check_mode=False): +def wait_for_status(client, module, waiter_name, nat_gateway_id): + wait_timeout = module.params.get('wait_timeout') + try: + waiter = get_waiter(client, waiter_name) + attempts = 1 + int(wait_timeout / waiter.config.delay) + waiter.wait( + NatGatewayIds=[nat_gateway_id], + WaiterConfig={'MaxAttempts': attempts} + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg="NAT gateway failed to reach expected state.") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for NAT gateway state to update.") + + +def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states=None): """Retrieve a list of NAT Gateways Args: client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance Kwargs: subnet_id (str): The subnet_id the nat resides in. - nat_gateway_id (str): The Amazon nat id. + nat_gateway_id (str): The Amazon NAT id. states (list): States available (pending, failed, available, deleting, and deleted) default=None Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> subnet_id = 'subnet-12345678' - >>> get_nat_gateways(client, subnet_id) + >>> get_nat_gateways(client, module, subnet_id) [ true, "", { - "nat_gateway_id": "nat-123456789", - "subnet_id": "subnet-123456789", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", "nat_gateway_addresses": [ { "public_ip": "55.55.55.55", @@ -284,19 +310,20 @@ def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, "allocation_id": "eipalloc-1234567" } ], + "nat_gateway_id": "nat-123456789", "state": "deleted", - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", + "subnet_id": "subnet-123456789", + "tags": {}, "vpc_id": "vpc-12345678" } Returns: Tuple (bool, str, list) """ + params = dict() - err_msg = "" - gateways_retrieved = False existing_gateways = list() + if not states: states = ['available', 'pending'] if nat_gateway_id: @@ -318,100 +345,17 @@ def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, if gateways: for gw in gateways: existing_gateways.append(camel_dict_to_snake_dict(gw)) - gateways_retrieved = True - except botocore.exceptions.ClientError as e: - err_msg = str(e) - - return gateways_retrieved, err_msg, existing_gateways - - -def wait_for_status(client, wait_timeout, nat_gateway_id, status, - check_mode=False): - """Wait for the NAT Gateway to reach a status - Args: - client (botocore.client.EC2): Boto3 client - wait_timeout (int): Number of seconds to wait, until this timeout is reached. - nat_gateway_id (str): The Amazon nat id. - status (str): The status to wait for. - examples. status=available, status=deleted - - Basic Usage: - >>> client = boto3.client('ec2') - >>> subnet_id = 'subnet-12345678' - >>> allocation_id = 'eipalloc-12345678' - >>> wait_for_status(client, subnet_id, allocation_id) - [ - true, - "", - { - "nat_gateway_id": "nat-123456789", - "subnet_id": "subnet-1234567", - "nat_gateway_addresses": [ - { - "public_ip": "55.55.55.55", - "network_interface_id": "eni-1234567", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-12345678" - } - ], - "state": "deleted", - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", - "vpc_id": "vpc-12345677" - } - ] - - Returns: - Tuple (bool, str, dict) - """ - polling_increment_secs = 5 - wait_timeout = time.time() + wait_timeout - status_achieved = False - nat_gateway = dict() - states = ['pending', 'failed', 'available', 'deleting', 'deleted'] - err_msg = "" - - while wait_timeout > time.time(): - try: - gws_retrieved, err_msg, nat_gateways = ( - get_nat_gateways( - client, nat_gateway_id=nat_gateway_id, - states=states, check_mode=check_mode - ) - ) - if gws_retrieved and nat_gateways: - nat_gateway = nat_gateways[0] - - if nat_gateway.get('state') == status: - status_achieved = True - break - - elif nat_gateway.get('state') == 'failed': - err_msg = nat_gateway.get('failure_message') - break - - elif nat_gateway.get('state') == 'pending': - if 'failure_message' in nat_gateway: - err_msg = nat_gateway.get('failure_message') - status_achieved = False - break - - else: - time.sleep(polling_increment_secs) - - except botocore.exceptions.ClientError as e: - err_msg = str(e) - - if not status_achieved: - err_msg = "Wait time out reached, while waiting for results" + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) - return status_achieved, err_msg, nat_gateway + return existing_gateways -def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, - check_mode=False): +def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None): """Retrieve all NAT Gateways for a subnet. Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance subnet_id (str): The subnet_id the nat resides in. Kwargs: @@ -420,14 +364,15 @@ def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> subnet_id = 'subnet-1234567' >>> allocation_id = 'eipalloc-1234567' - >>> gateway_in_subnet_exists(client, subnet_id, allocation_id) + >>> gateway_in_subnet_exists(client, module, subnet_id, allocation_id) ( [ { - "nat_gateway_id": "nat-123456789", - "subnet_id": "subnet-123456789", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", "nat_gateway_addresses": [ { "public_ip": "55.55.55.55", @@ -436,9 +381,10 @@ def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, "allocation_id": "eipalloc-1234567" } ], + "nat_gateway_id": "nat-123456789", "state": "deleted", - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", + "subnet_id": "subnet-123456789", + "tags": {}, "vpc_id": "vpc-1234567" } ], @@ -448,57 +394,53 @@ def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, Returns: Tuple (list, bool) """ + allocation_id_exists = False gateways = [] states = ['available', 'pending'] - gws_retrieved, err_msg, gws = ( - get_nat_gateways( - client, subnet_id, states=states, check_mode=check_mode - ) - ) + gws_retrieved = (get_nat_gateways(client, module, subnet_id, states=states)) - if not gws_retrieved: - return gateways, allocation_id_exists - for gw in gws: - for address in gw['nat_gateway_addresses']: - if allocation_id: - if address.get('allocation_id') == allocation_id: - allocation_id_exists = True + if gws_retrieved: + for gw in gws_retrieved: + for address in gw['nat_gateway_addresses']: + if allocation_id: + if address.get('allocation_id') == allocation_id: + allocation_id_exists = True + gateways.append(gw) + else: gateways.append(gw) - else: - gateways.append(gw) return gateways, allocation_id_exists -def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): +def get_eip_allocation_id_by_address(client, module, eip_address): """Release an EIP from your EIP Pool Args: client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance eip_address (str): The Elastic IP Address of the EIP. - Kwargs: - check_mode (bool): if set to true, do not run anything and - falsify the results. - Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> eip_address = '52.87.29.36' - >>> get_eip_allocation_id_by_address(client, eip_address) + >>> get_eip_allocation_id_by_address(client, module, eip_address) 'eipalloc-36014da3' Returns: Tuple (str, str) """ + params = { 'PublicIps': [eip_address], } allocation_id = None - err_msg = "" + msg = '' try: allocations = client.describe_addresses(aws_retry=True, **params)['Addresses'] + if len(allocations) == 1: allocation = allocations[0] else: @@ -506,135 +448,137 @@ def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): if allocation: if allocation.get('Domain') != 'vpc': - err_msg = ( + msg = ( "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" .format(eip_address) ) else: allocation_id = allocation.get('AllocationId') - else: - err_msg = ( - "EIP {0} does not exist".format(eip_address) - ) - except botocore.exceptions.ClientError as e: - err_msg = str(e) + except is_boto3_error_code('InvalidAddress.Malformed') as e: + module.fail_json(msg='EIP address {0} is invalid.'.format(eip_address)) + except is_boto3_error_code('InvalidAddress.NotFound') as e: # pylint: disable=duplicate-except + msg = ( + "EIP {0} does not exist".format(eip_address) + ) + allocation_id = None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) - return allocation_id, err_msg + return allocation_id, msg -def allocate_eip_address(client, check_mode=False): +def allocate_eip_address(client, module): """Release an EIP from your EIP Pool Args: client (botocore.client.EC2): Boto3 client - - Kwargs: - check_mode (bool): if set to true, do not run anything and - falsify the results. + module: AnsibleAWSModule class instance Basic Usage: >>> client = boto3.client('ec2') - >>> allocate_eip_address(client) + >>> module = AnsibleAWSModule(...) + >>> allocate_eip_address(client, module) True Returns: Tuple (bool, str) """ - ip_allocated = False + new_eip = None - err_msg = '' + msg = '' params = { 'Domain': 'vpc', } - if check_mode: + if module.check_mode: ip_allocated = True new_eip = None - return ip_allocated, err_msg, new_eip + return ip_allocated, msg, new_eip try: new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] ip_allocated = True - err_msg = 'eipalloc id {0} created'.format(new_eip) - - except botocore.exceptions.ClientError as e: - err_msg = str(e) + msg = 'eipalloc id {0} created'.format(new_eip) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) - return ip_allocated, err_msg, new_eip + return ip_allocated, msg, new_eip -def release_address(client, allocation_id, check_mode=False): +def release_address(client, module, allocation_id): """Release an EIP from your EIP Pool Args: client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance allocation_id (str): The eip Amazon identifier. - Kwargs: - check_mode (bool): if set to true, do not run anything and - falsify the results. - Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> allocation_id = "eipalloc-123456" - >>> release_address(client, allocation_id) + >>> release_address(client, module, allocation_id) True Returns: Boolean, string """ - err_msg = '' - if check_mode: + msg = '' + + if module.check_mode: return True, '' ip_released = False + try: client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id]) - except botocore.exceptions.ClientError as e: + except is_boto3_error_code('InvalidAllocationID.NotFound') as e: # IP address likely already released # Happens with gateway in 'deleted' state that # still lists associations - return True, str(e) + return True, e + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + try: client.release_address(aws_retry=True, AllocationId=allocation_id) ip_released = True - except botocore.exceptions.ClientError as e: - err_msg = str(e) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) - return ip_released, err_msg + return ip_released, msg def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_token=None, - wait=False, wait_timeout=0, if_exist_do_not_create=False, - check_mode=False): + wait=False): """Create an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client - subnet_id (str): The subnet_id the nat resides in. - allocation_id (str): The eip Amazon identifier. + module: AnsibleAWSModule class instance + subnet_id (str): The subnet_id the nat resides in + allocation_id (str): The eip Amazon identifier + tags (dict): Tags to associate to the NAT gateway + purge_tags (bool): If true, remove tags not listed in I(tags) + type: bool Kwargs: - if_exist_do_not_create (bool): if a nat gateway already exists in this - subnet, than do not create another one. - default = False wait (bool): Wait for the nat to be in the deleted state before returning. default = False - wait_timeout (int): Number of seconds to wait, until this timeout is reached. - default = 0 client_token (str): default = None Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> subnet_id = 'subnet-1234567' >>> allocation_id = 'eipalloc-1234567' - >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500) + >>> create(client, module, subnet_id, allocation_id, wait=True) [ true, "", { - "nat_gateway_id": "nat-123456789", - "subnet_id": "subnet-1234567", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", "nat_gateway_addresses": [ { "public_ip": "55.55.55.55", @@ -643,9 +587,10 @@ def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_to "allocation_id": "eipalloc-1234567" } ], + "nat_gateway_id": "nat-123456789", "state": "deleted", - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", + "subnet_id": "subnet-1234567", + "tags": {}, "vpc_id": "vpc-1234567" } ] @@ -653,73 +598,67 @@ def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_to Returns: Tuple (bool, str, list) """ + params = { 'SubnetId': subnet_id, 'AllocationId': allocation_id } request_time = datetime.datetime.utcnow() changed = False - success = False token_provided = False - err_msg = "" result = {} + msg = '' if client_token: token_provided = True params['ClientToken'] = client_token - if check_mode: - success = True + if module.check_mode: changed = True - return success, changed, err_msg, result + return changed, result, msg try: result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) - success = True changed = True create_time = result['create_time'].replace(tzinfo=None) if token_provided and (request_time > create_time): changed = False - elif wait: - success, err_msg, result = ( - wait_for_status( - client, wait_timeout, result['nat_gateway_id'], - 'available', check_mode=check_mode - ) + + elif wait and result.get('state') != 'available': + wait_for_status(client, module, 'nat_gateway_available', result['nat_gateway_id']) + + # Get new result + result = camel_dict_to_snake_dict( + _describe_nat_gateways(client, NatGatewayIds=[result['nat_gateway_id']])[0] ) - if success: - err_msg = ( - 'NAT gateway {0} created'.format(result['nat_gateway_id']) - ) - result['tags'], tags_update_exists = ensure_tags( + + result['tags'], _tags_update_exists = ensure_tags( client, module, nat_gw_id=result['nat_gateway_id'], tags=tags, - purge_tags=purge_tags, check_mode=check_mode + purge_tags=purge_tags ) - except is_boto3_error_code('IdempotentParameterMismatch'): - err_msg = ( - 'NAT Gateway does not support update and token has already been provided: ' + err_msg + except is_boto3_error_code('IdempotentParameterMismatch') as e: + msg = ( + 'NAT Gateway does not support update and token has already been provided:' + e ) - success = False - changed = False - result = None - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - err_msg = to_native(e) - success = False changed = False result = None + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) - return success, changed, err_msg, result + return changed, result, msg def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None, - if_exist_do_not_create=False, wait=False, wait_timeout=0, - client_token=None, check_mode=False): + if_exist_do_not_create=False, wait=False, client_token=None): """Create an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client - subnet_id (str): The subnet_id the nat resides in. + module: AnsibleAWSModule class instance + subnet_id (str): The subnet_id the nat resides in + tags (dict): Tags to associate to the NAT gateway + purge_tags (bool): If true, remove tags not listed in I(tags) Kwargs: allocation_id (str): The EIP Amazon identifier. @@ -731,22 +670,21 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, default = False wait (bool): Wait for the nat to be in the deleted state before returning. default = False - wait_timeout (int): Number of seconds to wait, until this timeout is reached. - default = 0 client_token (str): default = None Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> subnet_id = 'subnet-w4t12897' >>> allocation_id = 'eipalloc-36014da3' - >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500) + >>> pre_create(client, module, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True) [ true, "", { - "nat_gateway_id": "nat-03835afb6e31df79b", - "subnet_id": "subnet-w4t12897", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", "nat_gateway_addresses": [ { "public_ip": "52.87.29.36", @@ -755,9 +693,10 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, "allocation_id": "eipalloc-36014da3" } ], + "nat_gateway_id": "nat-03835afb6e31df79b", "state": "deleted", - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", + "subnet_id": "subnet-w4t12897", + "tags": {}, "vpc_id": "vpc-w68571b5" } ] @@ -765,105 +704,104 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, Returns: Tuple (bool, bool, str, list) """ - success = False + changed = False - err_msg = "" + msg = '' results = {} if not allocation_id and not eip_address: - existing_gateways, allocation_id_exists = (gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)) + existing_gateways, allocation_id_exists = (gateway_in_subnet_exists(client, module, subnet_id)) if len(existing_gateways) > 0 and if_exist_do_not_create: results = existing_gateways[0] - results['tags'], tags_update_exists = ensure_tags(client, module, results['nat_gateway_id'], tags, purge_tags, check_mode) + results['tags'], tags_update_exists = ensure_tags( + client, module, results['nat_gateway_id'], tags, purge_tags + ) if tags_update_exists: - success = True changed = True - return success, changed, err_msg, results + return changed, msg, results - success = True changed = False - err_msg = ( + msg = ( 'NAT Gateway {0} already exists in subnet_id {1}' .format( existing_gateways[0]['nat_gateway_id'], subnet_id ) ) - return success, changed, err_msg, results + return changed, msg, results else: - success, err_msg, allocation_id = ( - allocate_eip_address(client, check_mode=check_mode) + changed, msg, allocation_id = ( + allocate_eip_address(client, module) ) - if not success: - return success, 'False', err_msg, dict() + if not changed: + return changed, msg, dict() elif eip_address or allocation_id: if eip_address and not allocation_id: - allocation_id, err_msg = ( + allocation_id, msg = ( get_eip_allocation_id_by_address( - client, eip_address, check_mode=check_mode + client, module, eip_address ) ) if not allocation_id: - success = False changed = False - return success, changed, err_msg, dict() + return changed, msg, dict() existing_gateways, allocation_id_exists = ( gateway_in_subnet_exists( - client, subnet_id, allocation_id, check_mode=check_mode + client, module, subnet_id, allocation_id ) ) if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): results = existing_gateways[0] - results['tags'], tags_update_exists = ensure_tags(client, module, results['nat_gateway_id'], tags, purge_tags, check_mode) + results['tags'], tags_update_exists = ensure_tags( + client, module, results['nat_gateway_id'], tags, purge_tags + ) + if tags_update_exists: - success = True changed = True - return success, changed, err_msg, results + return changed, msg, results - success = True changed = False - err_msg = ( + msg = ( 'NAT Gateway {0} already exists in subnet_id {1}' .format( existing_gateways[0]['nat_gateway_id'], subnet_id ) ) - return success, changed, err_msg, results + return changed, msg, results - success, changed, err_msg, results = create( - client, module, subnet_id, allocation_id, tags, purge_tags, client_token, - wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode + changed, results, msg = create( + client, module, subnet_id, allocation_id, tags, purge_tags, client_token, wait ) - return success, changed, err_msg, results + return changed, msg, results -def remove(client, nat_gateway_id, wait=False, wait_timeout=0, - release_eip=False, check_mode=False): +def remove(client, module, nat_gateway_id, wait=False, release_eip=False): """Delete an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client - nat_gateway_id (str): The Amazon nat id. + module: AnsibleAWSModule class instance + nat_gateway_id (str): The Amazon nat id Kwargs: wait (bool): Wait for the nat to be in the deleted state before returning. - wait_timeout (int): Number of seconds to wait, until this timeout is reached. release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc. Basic Usage: >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) >>> nat_gw_id = 'nat-03835afb6e31df79b' - >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True) + >>> remove(client, module, nat_gw_id, wait=True, release_eip=True) [ true, "", { - "nat_gateway_id": "nat-03835afb6e31df79b", - "subnet_id": "subnet-w4t12897", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", "nat_gateway_addresses": [ { "public_ip": "52.87.29.36", @@ -872,9 +810,10 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, "allocation_id": "eipalloc-36014da3" } ], + "nat_gateway_id": "nat-03835afb6e31df79b", "state": "deleted", - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", + "subnet_id": "subnet-w4t12897", + "tags": {}, "vpc_id": "vpc-w68571b5" } ] @@ -882,75 +821,65 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0, Returns: Tuple (bool, str, list) """ + params = { 'NatGatewayId': nat_gateway_id } - success = False changed = False - err_msg = "" results = {} states = ['pending', 'available'] + msg = '' - if check_mode: + if module.check_mode: changed = True - success = True - return success, changed, err_msg, results + return changed, msg, results try: - exist, err_msg, gw = ( + gw_list = ( get_nat_gateways( - client, nat_gateway_id=nat_gateway_id, - states=states, check_mode=check_mode + client, module, nat_gateway_id=nat_gateway_id, + states=states ) ) - if exist and len(gw) == 1: - results = gw[0] - client.delete_nat_gateway(aws_retry=True, **params) + if len(gw_list) == 1: + results = gw_list[0] + client.delete_nat_gateway(aws_retry=True, **params) allocation_id = ( results['nat_gateway_addresses'][0]['allocation_id'] ) changed = True - success = True - err_msg = ( + msg = ( 'NAT gateway {0} is in a deleting state. Delete was successful' .format(nat_gateway_id) ) - if wait: - status_achieved, err_msg, results = ( - wait_for_status( - client, wait_timeout, nat_gateway_id, 'deleted', - check_mode=check_mode - ) - ) - if status_achieved: - err_msg = ( - 'NAT gateway {0} was deleted successfully' - .format(nat_gateway_id) - ) + if wait and results.get('state') != 'deleted': + wait_for_status(client, module, 'nat_gateway_deleted', nat_gateway_id) - except botocore.exceptions.ClientError as e: - err_msg = str(e) + # Get new results + results = camel_dict_to_snake_dict( + _describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) if release_eip: - eip_released, eip_err = ( - release_address(client, allocation_id, check_mode)) + eip_released, msg = ( + release_address(client, module, allocation_id)) if not eip_released: - err_msg = ( - "{0}: Failed to release EIP {1}: {2}" - .format(err_msg, allocation_id, eip_err) + module.fail_json( + msg="Failed to release EIP {0}: {1}".format(allocation_id, msg) ) - success = False - return success, changed, err_msg, results + return changed, msg, results -def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): +def ensure_tags(client, module, nat_gw_id, tags, purge_tags): final_tags = [] changed = False - if check_mode and nat_gw_id is None: + if module.check_mode and nat_gw_id is None: # We can't describe tags without an EIP id, we might get here when creating a new EIP in check_mode return final_tags, changed @@ -968,8 +897,7 @@ def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): if to_update: try: - if check_mode: - # update tags + if module.check_mode: final_tags.update(to_update) else: client.create_tags( @@ -977,15 +905,13 @@ def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): Resources=[nat_gw_id], Tags=ansible_dict_to_boto3_tag_list(to_update) ) - changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't create tags") if to_delete: try: - if check_mode: - # update tags + if module.check_mode: for key in to_delete: del final_tags[key] else: @@ -994,12 +920,11 @@ def ensure_tags(client, module, nat_gw_id, tags, purge_tags, check_mode): tags_list.append({'Key': key}) client.delete_tags(aws_retry=True, Resources=[nat_gw_id], Tags=tags_list) - changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't delete tags") - if not check_mode and (to_update or to_delete): + if not module.check_mode and (to_update or to_delete): try: response = client.describe_tags(aws_retry=True, Filters=filters) final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) @@ -1024,6 +949,7 @@ def main(): tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, type='bool'), ) + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, @@ -1035,13 +961,11 @@ def main(): ) state = module.params.get('state').lower() - check_mode = module.check_mode subnet_id = module.params.get('subnet_id') allocation_id = module.params.get('allocation_id') eip_address = module.params.get('eip_address') nat_gateway_id = module.params.get('nat_gateway_id') wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') release_eip = module.params.get('release_eip') client_token = module.params.get('client_token') if_exist_do_not_create = module.params.get('if_exist_do_not_create') @@ -1051,36 +975,26 @@ def main(): try: client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg='Failed to connect to AWS.') changed = False - err_msg = '' + msg = '' if state == 'present': - success, changed, err_msg, results = ( + changed, msg, results = ( pre_create( client, module, subnet_id, tags, purge_tags, allocation_id, eip_address, - if_exist_do_not_create, wait, wait_timeout, - client_token, check_mode=check_mode + if_exist_do_not_create, wait, client_token ) ) else: - success, changed, err_msg, results = ( + changed, msg, results = ( remove( - client, nat_gateway_id, wait, wait_timeout, release_eip, - check_mode=check_mode + client, module, nat_gateway_id, wait, release_eip ) ) - if not success: - results = results or {} - module.fail_json( - msg=err_msg, success=success, changed=changed, **results - ) - else: - module.exit_json( - msg=err_msg, success=success, changed=changed, **results - ) + module.exit_json(msg=msg, changed=changed, **results) if __name__ == '__main__': From 3a4b5178cad9fd3d10ced419394e8dc247433fd9 Mon Sep 17 00:00:00 2001 From: Matthew Davis <7035647+mdavis-xyz@users.noreply.github.com> Date: Thu, 18 Mar 2021 17:46:07 +1100 Subject: [PATCH 118/683] aws_acm - check mode (#477) * add integration tests for acm with check mode * add check mode to acm module --- aws_acm.py | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index 25581db1a39..4f17e83e0be 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -207,7 +207,7 @@ arn: description: The ARN of the certificate in ACM type: str - returned: when I(state=present) + returned: when I(state=present) and not in check mode sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" domain_name: description: The domain name encoded within the public certificate @@ -362,30 +362,39 @@ def main(): else: module.debug("Existing certificate in ACM is different, overwriting") - # update cert in ACM - arn = acm.import_certificate(client, module, + if module.check_mode: + arn = old_cert['certificate_arn'] + # note: returned domain will be the domain of the previous cert + else: + # update cert in ACM + arn = acm.import_certificate(client, module, + certificate=module.params['certificate'], + private_key=module.params['private_key'], + certificate_chain=module.params['certificate_chain'], + arn=old_cert['certificate_arn'], + tags=tags) + domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) + module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) + else: # len(certificates) == 0 + module.debug("No certificate in ACM. Creating new one.") + if module.check_mode: + domain = 'example.com' + module.exit_json(certificate=dict(domain_name=domain), changed=True) + else: + arn = acm.import_certificate(client=client, + module=module, certificate=module.params['certificate'], private_key=module.params['private_key'], certificate_chain=module.params['certificate_chain'], - arn=old_cert['certificate_arn'], tags=tags) domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) - module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) - else: # len(certificates) == 0 - module.debug("No certificate in ACM. Creating new one.") - arn = acm.import_certificate(client=client, - module=module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], - tags=tags) - domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) - module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) + module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) else: # state == absent for cert in certificates: - acm.delete_certificate(client, module, cert['certificate_arn']) + if not module.check_mode: + acm.delete_certificate(client, module, cert['certificate_arn']) module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0)) From ce1b7cd7536d29c7a667c477c25065cdc5e09cec Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Thu, 18 Mar 2021 17:59:02 +0100 Subject: [PATCH 119/683] ec2_vpc_nat_gateway_info: module stabilization (#472) * ec2_vpc_nat_gateway_info: stability * Catches and handles (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) on boto API calls * Add paginator * Document returned data Signed-off-by: Alina Buzachis --- ec2_vpc_nat_gateway_info.py | 102 +++++++++++++++++++++++++++++++----- 1 file changed, 88 insertions(+), 14 deletions(-) diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index 7d31eeac993..a9337ecd9f8 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -30,7 +30,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = r''' @@ -69,33 +68,109 @@ ''' RETURN = r''' +changed: + description: True if listing the internet gateways succeeds + type: bool + returned: always + sample: false result: - description: The result of the describe, converted to ansible snake case style. - See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response. - returned: success + description: + - The result of the describe, converted to ansible snake case style. + - See also U(http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways) + returned: suceess type: list + contains: + create_time: + description: The date and time the NAT gateway was created + returned: always + type: str + sample: "2021-03-11T22:43:25+00:00" + delete_time: + description: The date and time the NAT gateway was deleted + returned: when the NAT gateway has been deleted + type: str + sample: "2021-03-11T22:43:25+00:00" + nat_gateway_addresses: + description: List containing a dictionary with the IP addresses and network interface associated with the NAT gateway + returned: always + type: dict + contains: + allocation_id: + description: The allocation ID of the Elastic IP address that's associated with the NAT gateway + returned: always + type: str + sample: eipalloc-0853e66a40803da76 + network_interface_id: + description: The ID of the network interface associated with the NAT gateway + returned: always + type: str + sample: eni-0a37acdbe306c661c + private_ip: + description: The private IP address associated with the Elastic IP address + returned: always + type: str + sample: 10.0.238.227 + public_ip: + description: The Elastic IP address associated with the NAT gateway + returned: always + type: str + sample: 34.204.123.52 + nat_gateway_id: + description: The ID of the NAT gateway + returned: always + type: str + sample: nat-0c242a2397acf6173 + state: + description: state of the NAT gateway + returned: always + type: str + sample: available + subnet_id: + description: The ID of the subnet in which the NAT gateway is located + returned: always + type: str + sample: subnet-098c447465d4344f9 + vpc_id: + description: The ID of the VPC in which the NAT gateway is located + returned: always + type: str + sample: vpc-02f37f48438ab7d4c + tags: + description: Tags applied to the NAT gateway + returned: always + type: dict + sample: + Tag1: tag1 + Tag_2: tag_2 ''' -import json try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj +@AWSRetry.jittered_backoff(retries=10) +def _describe_nat_gateways(client, module, **params): + try: + paginator = client.get_paginator('describe_nat_gateways') + return paginator.paginate(**params).build_full_result()['NatGateways'] + except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + module.exit_json(msg="NAT gateway not found.") + except is_boto3_error_code('NatGatewayMalformed'): # pylint: disable=duplicate-except + module.fail_json_aws(msg="NAT gateway id is malformed.") -def get_nat_gateways(client, module, nat_gateway_id=None): +def get_nat_gateways(client, module): params = dict() nat_gateways = list() @@ -103,17 +178,16 @@ def get_nat_gateways(client, module, nat_gateway_id=None): params['NatGatewayIds'] = module.params.get('nat_gateway_ids') try: - result = json.loads(json.dumps(client.describe_nat_gateways(aws_retry=True, **params), default=date_handler)) - except Exception as e: - module.fail_json(msg=to_native(e)) + result = normalize_boto3_result(_describe_nat_gateways(client, module, **params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, 'Unable to describe NAT gateways.') - for gateway in result['NatGateways']: + for gateway in result: # Turn the boto3 result into ansible_friendly_snaked_names converted_gateway = camel_dict_to_snake_dict(gateway) if 'tags' in converted_gateway: # Turn the boto3 result into ansible friendly tag dictionary converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) - nat_gateways.append(converted_gateway) return nat_gateways From d6d0b78d6e26ca27f6a17dba88c7f1d50cab88ae Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Fri, 19 Mar 2021 10:18:05 -0700 Subject: [PATCH 120/683] Cleanup ec2_vpc_route_table modules (#484) * Move regex definitions from global to calling function * Remove unused regex * Tidy up comments * Replace json_query uses in tests with jinja selectattr * Additional assertions in integration tests --- ec2_vpc_route_table.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 1ef10e89ceb..fc7c02c6706 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -245,11 +245,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') -SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') -ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$') - - @AWSRetry.jittered_backoff() def describe_subnets_with_backoff(connection, **params): paginator = connection.get_paginator('describe_subnets') @@ -283,10 +278,10 @@ def find_subnets(connection, module, vpc_id, identified_subnets): """ Finds a list of subnets, each identified either by a raw ID, a unique 'Name' tag, or a CIDR such as 10.0.0.0/8. - - Note that this function is duplicated in other ec2 modules, and should - potentially be moved into a shared module_utils """ + CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') + SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') + subnet_ids = [] subnet_names = [] subnet_cidrs = [] From 07c23588403f3122701c7c62ab566f4f8d1a4a18 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 19 Mar 2021 22:29:49 +0100 Subject: [PATCH 121/683] New module - ec2_vpc_endpoint_service_info (#346) * New module - ec2_vpc_endpoint_service_info * Deprecate querying services through ec2_vpc_endpoint_info * Attempt to cope with some services which have who possible endpoints Co-authored-by: Jill R <4121322+jillr@users.noreply.github.com> --- ec2_vpc_endpoint_info.py | 36 +++++-- ec2_vpc_endpoint_service_info.py | 180 +++++++++++++++++++++++++++++++ 2 files changed, 209 insertions(+), 7 deletions(-) create mode 100644 ec2_vpc_endpoint_service_info.py diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index 7e259c6ca8e..7706a00c915 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -10,22 +10,26 @@ short_description: Retrieves AWS VPC endpoints details using AWS methods. version_added: 1.0.0 description: - - Gets various details related to AWS VPC Endpoints. + - Gets various details related to AWS VPC endpoints. - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change. requirements: [ boto3 ] options: query: description: - - Specifies the query action to take. Services returns the supported - AWS services that can be specified when creating an endpoint. - required: True + - Defaults to C(endpoints). + - Specifies the query action to take. + - I(query=endpoints) returns information about AWS VPC endpoints. + - Retrieving information about services using I(query=services) has been + deprecated in favour of the M(ec2_vpc_endpoint_service_info) module. + - The I(query) option has been deprecated and will be removed after 2022-12-01. + required: False choices: - services - endpoints type: str vpc_endpoint_ids: description: - - Get details of specific endpoint IDs + - The IDs of specific endpoints to retrieve the details of. type: list elements: str filters: @@ -161,7 +165,7 @@ def get_endpoints(client, module): def main(): argument_spec = dict( - query=dict(choices=['services', 'endpoints'], required=True), + query=dict(choices=['services', 'endpoints'], required=False), filters=dict(default={}, type='dict'), vpc_endpoint_ids=dict(type='list', elements='str'), ) @@ -176,11 +180,29 @@ def main(): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') + query = module.params.get('query') + if query == 'endpoints': + module.deprecate('The query option has been deprecated and' + ' will be removed after 2022-12-01. Searching for' + ' `endpoints` is now the default and after' + ' 2022-12-01 this module will only support fetching' + ' endpoints.', + date='2022-12-01', collection_name='community.aws') + elif query == 'services': + module.deprecate('Support for fetching service information with this ' + 'module has been deprecated and will be removed after' + ' 2022-12-01. ' + 'Please use the ec2_vpc_endpoint_service_info module ' + 'instead.', date='2022-12-01', + collection_name='community.aws') + else: + query = 'endpoints' + invocations = { 'services': get_supported_services, 'endpoints': get_endpoints, } - results = invocations[module.params.get('query')](connection, module) + results = invocations[query](connection, module) module.exit_json(**results) diff --git a/ec2_vpc_endpoint_service_info.py b/ec2_vpc_endpoint_service_info.py new file mode 100644 index 00000000000..2afd0e5e906 --- /dev/null +++ b/ec2_vpc_endpoint_service_info.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: ec2_vpc_endpoint_service_info +short_description: retrieves AWS VPC endpoint service details +version_added: 1.5.0 +description: + - Gets details related to AWS VPC Endpoint Services. +requirements: [ boto3 ] +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpointServices.html) + for possible filters. + type: dict + service_names: + description: + - A list of service names which can be used to narrow the search results. + type: list + elements: str +author: + - Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + +''' + +EXAMPLES = r''' +# Simple example of listing all supported AWS services for VPC endpoints +- name: List supported AWS endpoint services + community.aws.ec2_vpc_endpoint_service_info: + region: ap-southeast-2 + register: supported_endpoint_services +''' + +RETURN = r''' +service_names: + description: List of supported AWS VPC endpoint service names. + returned: success + type: list + sample: + service_names: + - com.amazonaws.ap-southeast-2.s3 +service_details: + description: Detailed information about the AWS VPC endpoint services. + returned: success + type: complex + contains: + service_name: + returned: success + description: The ARN of the endpoint service. + type: str + service_id: + returned: success + description: The ID of the endpoint service. + type: str + service_type: + returned: success + description: The type of the service + type: list + availability_zones: + returned: success + description: The Availability Zones in which the service is available. + type: list + owner: + returned: success + description: The AWS account ID of the service owner. + type: str + base_endpoint_dns_names: + returned: success + description: The DNS names for the service. + type: list + private_dns_name: + returned: success + description: The private DNS name for the service. + type: str + private_dns_names: + returned: success + description: The private DNS names assigned to the VPC endpoint service. + type: list + vpc_endpoint_policy_supported: + returned: success + description: Whether the service supports endpoint policies. + type: bool + acceptance_required: + returned: success + description: + Whether VPC endpoint connection requests to the service must be + accepted by the service owner. + type: bool + manages_vpc_endpoints: + returned: success + description: Whether the service manages its VPC endpoints. + type: bool + tags: + returned: success + description: A dict of tags associated with the service + type: dict + private_dns_name_verification_state: + returned: success + description: + - The verification state of the VPC endpoint service. + - Consumers of an endpoint service cannot use the private name when the state is not C(verified). + type: str +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +# We're using a paginator so we can't use the client decorators +@AWSRetry.jittered_backoff() +def get_services(client, module): + paginator = client.get_paginator('describe_vpc_endpoint_services') + params = {} + if module.params.get("filters"): + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + if module.params.get("service_names"): + params['ServiceNames'] = module.params.get("service_names") + + results = paginator.paginate(**params).build_full_result() + return results + + +def normalize_service(service): + normalized = camel_dict_to_snake_dict(service, ignore_list=['Tags']) + normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get('Tags')) + return normalized + + +def normalize_result(result): + normalized = {} + normalized['service_details'] = [normalize_service(service) for service in result.get('ServiceDetails')] + normalized['service_names'] = result.get('ServiceNames', []) + return normalized + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict'), + service_names=dict(type='list', elements='str'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + # Validate Requirements + try: + client = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + try: + results = get_services(client, module) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to retrieve service details') + normalized_result = normalize_result(results) + + module.exit_json(changed=False, **normalized_result) + + +if __name__ == '__main__': + main() From b432cc612a480e9448400c0cda6147d1ae3505f2 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Wed, 24 Mar 2021 14:41:54 +0100 Subject: [PATCH 122/683] ec2_vpc_route_table: Don't fail if a route was already created. (#359) --- ec2_vpc_route_table.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index fc7c02c6706..6549f78881b 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -492,20 +492,16 @@ def ensure_routes(connection=None, module=None, route_table=None, route_specs=No for route_spec in route_specs_to_recreate: try: - connection.replace_route( - aws_retry=True, - RouteTableId=route_table['RouteTableId'], - **route_spec) + connection.replace_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't recreate route") for route_spec in route_specs_to_create: try: - connection.create_route( - aws_retry=True, - RouteTableId=route_table['RouteTableId'], - **route_spec) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + connection.create_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) + except is_boto3_error_code('RouteAlreadyExists'): + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create route") return {'changed': bool(changed)} From d802f7dc008682e8301c140840a9485be3bdb9f6 Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Wed, 24 Mar 2021 14:56:16 +0100 Subject: [PATCH 123/683] ec2_vpc_igw (_info) - documentation improvement (#495) * * ec2_vpc_igw (_info) - documentation improvement Signed-off-by: Alina Buzachis * Whitespace linting Co-authored-by: Mark Chappell --- ec2_vpc_igw.py | 17 +++++++++++++- ec2_vpc_igw_info.py | 54 +++++++++++++++++++++++++++------------------ 2 files changed, 48 insertions(+), 23 deletions(-) diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index bef92a71fcf..0d649d2131c 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -52,11 +52,26 @@ # Ensure that the VPC has an Internet Gateway. # The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc. -- community.aws.ec2_vpc_igw: +- name: Create Internet gateway + community.aws.ec2_vpc_igw: vpc_id: vpc-abcdefgh state: present register: igw +- name: Create Internet gateway with tags + community.aws.ec2_vpc_igw: + vpc_id: vpc-abcdefgh + state: present + tags: + Tag1: tag1 + Tag2: tag2 + register: igw + +- name: Delete Internet gateway + community.aws.ec2_vpc_igw: + state: absent + vpc_id: vpc-abcdefgh + register: vpc_igw_delete ''' RETURN = ''' diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index ab7d26a80b4..17e5cc805ae 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -65,33 +65,43 @@ ''' RETURN = r''' -internet_gateways: - description: The internet gateways for the account. - returned: always - type: list - sample: [ - { - "attachments": [ - { - "state": "available", - "vpc_id": "vpc-02123b67" - } - ], - "internet_gateway_id": "igw-2123634d", - "tags": [ - { - "key": "Name", - "value": "test-vpc-20-igw" - } - ] - } - ] - changed: description: True if listing the internet gateways succeeds. type: bool returned: always sample: "false" +internet_gateways: + description: The internet gateways for the account. + returned: always + type: complex + contains: + attachments: + description: Any VPCs attached to the internet gateway + returned: I(state=present) + type: complex + contains: + state: + description: The current state of the attachment + returned: I(state=present) + type: str + sample: available + vpc_id: + description: The ID of the VPC. + returned: I(state=present) + type: str + sample: vpc-02123b67 + internet_gateway_id: + description: The ID of the internet gateway + returned: I(state=present) + type: str + sample: igw-2123634d + tags: + description: Any tags assigned to the internet gateway + returned: I(state=present) + type: dict + sample: + tags: + "Ansible": "Test" ''' try: From 19ad24f5d0727ebb849843e4805ca291205d2481 Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Wed, 24 Mar 2021 20:12:28 +0100 Subject: [PATCH 124/683] s3_sync - add storage_class feature (#497) * s3_sync - add support for choosing storage_class when objects are added to an S3 bucket. Signed-off-by: Alina Buzachis --- s3_sync.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/s3_sync.py b/s3_sync.py index e0edbea82b0..589dcd5ba3b 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -102,6 +102,22 @@ - Directives are separated by commas. required: false type: str + storage_class: + description: + - Storage class to be associated to each object added to the S3 bucket. + required: false + choices: + - 'STANDARD' + - 'REDUCED_REDUNDANCY' + - 'STANDARD_IA' + - 'ONEZONE_IA' + - 'INTELLIGENT_TIERING' + - 'GLACIER' + - 'DEEP_ARCHIVE' + - 'OUTPOSTS' + default: 'STANDARD' + type: str + version_added: 1.5.0 delete: description: - Remove remote files that exist in bucket but are not present in the file root. @@ -131,6 +147,12 @@ bucket: tedder file_root: roles/s3/files/ +- name: basic upload using the glacier storage class + community.aws.s3_sync: + bucket: tedder + file_root: roles/s3/files/ + storage_class: GLACIER + - name: all the options community.aws.s3_sync: bucket: tedder @@ -142,6 +164,7 @@ file_change_strategy: force permission: public-read cache_control: "public, max-age=31536000" + storage_class: "GLACIER" include: "*" exclude: "*.txt,.*" ''' @@ -470,6 +493,8 @@ def upload_files(s3, bucket, filelist, params): args['ACL'] = params['permission'] if params.get('cache_control'): args['CacheControl'] = params['cache_control'] + if params.get('storage_class'): + args['StorageClass'] = params['storage_class'] # if this fails exception is caught in main() s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None) ret.append(entry) @@ -507,7 +532,10 @@ def main(): include=dict(required=False, default="*"), cache_control=dict(required=False, default=''), delete=dict(required=False, type='bool', default=False), - # future options: encoding, metadata, storage_class, retries + storage_class=dict(required=False, default='STANDARD', + choices=['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA', + 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE', 'OUTPOSTS']), + # future options: encoding, metadata, retries ) module = AnsibleAWSModule( From 02dfc76e3a374b065aa930fdac88750cf7c82360 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Wed, 24 Mar 2021 20:28:01 +0100 Subject: [PATCH 125/683] =?UTF-8?q?Make=20iam=20module=20more=20predictabl?= =?UTF-8?q?e=20and=20idempotent=20=E2=80=A6=20(#369)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make iam module more predictable and idempotent on returning the user_name it creates/deletes * Add changelog fragment for iam module change --- iam.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/iam.py b/iam.py index 4c774285f6f..e65824a4862 100644 --- a/iam.py +++ b/iam.py @@ -742,7 +742,7 @@ def main(): (user_groups, changed) = set_users_groups( module, iam, name, groups, been_updated, new_name) module.exit_json( - user_meta=meta, groups=user_groups, keys=keys, changed=changed) + user_meta=meta, groups=user_groups, user_name=meta['created_user']['user_name'], keys=keys, changed=changed) elif state in ['present', 'update'] and user_exists: if update_pw == 'on_create': @@ -778,7 +778,7 @@ def main(): created_keys=new_key, user_meta=user_meta) elif new_name and not new_path and not been_updated: module.exit_json( - changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list, + changed=changed, groups=user_groups, old_user_name=orig_name, user_name=new_name, new_user_name=new_name, keys=key_list, created_keys=new_key, user_meta=user_meta) elif new_name and not new_path and been_updated: module.exit_json( @@ -802,7 +802,7 @@ def main(): try: set_users_groups(module, iam, name, '') name, changed = delete_user(module, iam, name) - module.exit_json(deleted_user=name, changed=changed) + module.exit_json(deleted_user=name, user_name=name, changed=changed) except Exception as ex: module.fail_json(changed=changed, msg=str(ex)) From 0f42758559b73247de26acab385f2a4314a76ab9 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Wed, 24 Mar 2021 13:35:52 -0700 Subject: [PATCH 126/683] Stabilize and improve ec2_vpc_endpoint modules (#473) * Stabilize and improve ec2_vpc_endpoint modules - Add tagging support - Make idempotent - Better exception handling - Better check_mode support - Use module_utils for common functions - Enable retries on common AWS failures * Make endpoint deletion idempotent in check_mode * Sanity fixes * Address review feedback --- ec2_vpc_endpoint.py | 190 +++++++++++++++++++++++++++++---------- ec2_vpc_endpoint_info.py | 22 ++--- 2 files changed, 150 insertions(+), 62 deletions(-) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 2bfe89008e5..2aa4441fac7 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -66,6 +66,19 @@ default: present choices: [ "present", "absent" ] type: str + tags: + description: + - A dict of tags to apply to the internet gateway. + - To remove all tags set I(tags={}) and I(purge_tags=true). + type: dict + version_added: 1.5.0 + purge_tags: + description: + - Delete any tags not specified in the task that are on the instance. + This means you have to specify all the desired tags on each task affecting an instance. + default: false + type: bool + version_added: 1.5.0 wait: description: - When specified, will wait for either available status for state present. @@ -200,58 +213,112 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj +def get_endpoints(client, module, endpoint_id=None): + params = dict() + if endpoint_id: + params['VpcEndpointIds'] = [endpoint_id] + else: + filters = list() + if module.params.get('service'): + filters.append({'Name': 'service-name', 'Values': [module.params.get('service')]}) + if module.params.get('vpc_id'): + filters.append({'Name': 'vpc-id', 'Values': [module.params.get('vpc_id')]}) + params['Filters'] = filters + try: + result = client.describe_vpc_endpoints(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get endpoints") + # normalize iso datetime fields in result + normalized_result = normalize_boto3_result(result) + return normalized_result -def wait_for_status(client, module, resource_id, status): - polling_increment_secs = 15 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) - status_achieved = False - for x in range(0, max_retries): - try: - resource = get_endpoints(client, module, resource_id)['VpcEndpoints'][0] - if resource['State'] == status: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failure while waiting for status') +def match_endpoints(route_table_ids, service_name, vpc_id, endpoint): + found = False + sorted_route_table_ids = [] - return status_achieved, resource + if route_table_ids: + sorted_route_table_ids = sorted(route_table_ids) + if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: + sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) + if sorted_endpoint_rt_ids == sorted_route_table_ids: -def get_endpoints(client, module, resource_id=None): - params = dict() - if resource_id: - params['VpcEndpointIds'] = [resource_id] + found = True + return found - result = json.loads(json.dumps(client.describe_vpc_endpoints(**params), default=date_handler)) - return result + +def ensure_tags(client, module, vpc_endpoint_id): + changed = False + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + filters = ansible_dict_to_boto3_filter_list({'resource-id': vpc_endpoint_id}) + try: + current_tags = client.describe_tags(aws_retry=True, Filters=filters) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe tags for VPC Endpoint: {0}".format(vpc_endpoint_id)) + + tags_to_set, tags_to_unset = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags.get('Tags')), tags, purge_tags=purge_tags) + if purge_tags and not tags: + tags_to_unset = current_tags + + if tags_to_unset: + changed = True + if not module.check_mode: + try: + client.delete_tags(aws_retry=True, Resources=[vpc_endpoint_id], Tags=[dict(Key=tagkey) for tagkey in tags_to_unset]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_unset)) + + if tags_to_set: + changed = True + if not module.check_mode: + try: + client.create_tags(aws_retry=True, Resources=[vpc_endpoint_id], Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_to_set)) + return changed def setup_creation(client, module): - vpc_id = module.params.get('vpc_id') + endpoint_id = module.params.get('vpc_endpoint_id') + route_table_ids = module.params.get('route_table_ids') service_name = module.params.get('service') + vpc_id = module.params.get('vpc_id') + changed = False - if module.params.get('route_table_ids'): - route_table_ids = module.params.get('route_table_ids') - existing_endpoints = get_endpoints(client, module) - for endpoint in existing_endpoints['VpcEndpoints']: - if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: - sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) - sorted_route_table_ids = sorted(route_table_ids) - if sorted_endpoint_rt_ids == sorted_route_table_ids: - return False, camel_dict_to_snake_dict(endpoint) + if not endpoint_id: + # Try to use the module parameters to match any existing endpoints + all_endpoints = get_endpoints(client, module, endpoint_id) + if len(all_endpoints['VpcEndpoints']) > 0: + for endpoint in all_endpoints['VpcEndpoints']: + if match_endpoints(route_table_ids, service_name, vpc_id, endpoint): + endpoint_id = endpoint['VpcEndpointId'] + break + + if endpoint_id: + # If we have an endpoint now, just ensure tags and exit + if module.params.get('tags'): + changed = ensure_tags(client, module, endpoint_id) + normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)['VpcEndpoints'][0] + return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=['Tags']) changed, result = create_vpc_endpoint(client, module) - return changed, json.loads(json.dumps(result, default=date_handler)) + return changed, camel_dict_to_snake_dict(result, ignore_list=['Tags']) def create_vpc_endpoint(client, module): @@ -261,7 +328,11 @@ def create_vpc_endpoint(client, module): params['VpcId'] = module.params.get('vpc_id') params['VpcEndpointType'] = module.params.get('vpc_endpoint_type') params['ServiceName'] = module.params.get('service') - params['DryRun'] = module.check_mode + + if module.check_mode: + changed = True + result = 'Would have created VPC Endpoint if not in check mode' + module.exit_json(changed=changed, result=result) if module.params.get('route_table_ids'): params['RouteTableIds'] = module.params.get('route_table_ids') @@ -292,16 +363,18 @@ def create_vpc_endpoint(client, module): try: changed = True - result = camel_dict_to_snake_dict(client.create_vpc_endpoint(**params)['VpcEndpoint']) + result = client.create_vpc_endpoint(aws_retry=True, **params)['VpcEndpoint'] if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)): changed = False elif module.params.get('wait') and not module.check_mode: - status_achieved, result = wait_for_status(client, module, result['vpc_endpoint_id'], 'available') - if not status_achieved: - module.fail_json(msg='Error waiting for vpc endpoint to become available - please check the AWS console') - except is_boto3_error_code('DryRunOperation'): - changed = True - result = 'Would have created VPC Endpoint if not in check mode' + try: + waiter = get_waiter(client, 'vpc_endpoint_exists') + waiter.wait(VpcEndpointIds=[result['VpcEndpointId']], WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get('wait_timeout') // 15)) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(msg='Error waiting for vpc endpoint to become available - please check the AWS console') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failure while waiting for status') + except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except @@ -309,19 +382,38 @@ def create_vpc_endpoint(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to create VPC.") - return changed, result + if module.params.get('tags'): + ensure_tags(client, module, result['VpcEndpointId']) + + # describe and normalize iso datetime fields in result after adding tags + normalized_result = get_endpoints(client, module, endpoint_id=result['VpcEndpointId'])['VpcEndpoints'][0] + return changed, normalized_result def setup_removal(client, module): params = dict() changed = False - params['DryRun'] = module.check_mode + + if module.check_mode: + try: + exists = client.describe_vpc_endpoints(aws_retry=True, VpcEndpointIds=[module.params.get('vpc_endpoint_id')]) + if exists: + result = {'msg': 'Would have deleted VPC Endpoint if not in check mode'} + changed = True + except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): + result = {'msg': 'Endpoint does not exist, nothing to delete.'} + changed = False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get endpoints") + + return changed, result + if isinstance(module.params.get('vpc_endpoint_id'), string_types): params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')] else: params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') try: - result = client.delete_vpc_endpoints(**params)['Unsuccessful'] + result = client.delete_vpc_endpoints(aws_retry=True, **params)['Unsuccessful'] if len(result) < len(params['VpcEndpointIds']): changed = True # For some reason delete_vpc_endpoints doesn't throw exceptions it @@ -332,9 +424,7 @@ def setup_removal(client, module): raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints') except is_boto3_error_code('InvalidVpcEndpoint.NotFound'): continue - except is_boto3_error_code('DryRunOperation'): - changed = True - result = 'Would have deleted VPC Endpoint if not in check mode' + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "Failed to delete VPC endpoint") return changed, result @@ -353,6 +443,8 @@ def main(): route_table_ids=dict(type='list', elements='str'), vpc_endpoint_id=dict(), client_token=dict(no_log=False), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -373,7 +465,7 @@ def main(): date='2022-12-01', collection_name='community.aws') try: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index 7706a00c915..425e0c63ec7 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -120,14 +120,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj - - @AWSRetry.exponential_backoff() def get_supported_services(client, module): results = list() @@ -149,16 +147,14 @@ def get_endpoints(client, module): params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) if module.params.get('vpc_endpoint_ids'): params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') - while True: - response = client.describe_vpc_endpoints(**params) - results.extend(response['VpcEndpoints']) - if 'NextToken' in response: - params['NextToken'] = response['NextToken'] - else: - break try: - results = json.loads(json.dumps(results, default=date_handler)) - except Exception as e: + paginator = client.get_paginator('describe_vpc_endpoints') + results = paginator.paginate(**params).build_full_result()['VpcEndpoints'] + + results = normalize_boto3_result(results) + except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): + module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get endpoints") return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) From 30b3586179049c4ca475d4336b89621176393187 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 25 Mar 2021 15:50:16 +0100 Subject: [PATCH 127/683] Stability - s3_lifecycle (#448) * Add aws retry * re-enable lifecycle tests * changelog * Rename S3 bucket used by tests * Cleanup docs * Use datetime to be a little more flexible with the date formats * Rework and add support for waiting for changes to propagate * Wait for changes to propagate in tests s3 lifecycle events are especially susceptible to eventual consistency * Test examples are also idempotent... * More changelogs * Catch Amazon's "Nothing Changed" error (can happen when just trying to enable/disable a rule) --- s3_lifecycle.py | 303 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 201 insertions(+), 102 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 0bc4a328680..9cec1402eb1 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -10,105 +10,110 @@ --- module: s3_lifecycle version_added: 1.0.0 -short_description: Manage s3 bucket lifecycle rules in AWS +short_description: Manage S3 bucket lifecycle rules in AWS description: - - Manage s3 bucket lifecycle rules in AWS + - Manage S3 bucket lifecycle rules in AWS. author: "Rob White (@wimnat)" notes: - - If specifying expiration time as days then transition time must also be specified in days - - If specifying expiration time as a date then transition time must also be specified as a date + - If specifying expiration time as days then transition time must also be specified in days. + - If specifying expiration time as a date then transition time must also be specified as a date. options: name: description: - - "Name of the s3 bucket" + - Name of the S3 bucket. required: true type: str expiration_date: description: - - > - Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must - be midnight and a GMT timezone must be specified. + - Indicates the lifetime of the objects that are subject to the rule by the date they will expire. + - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. type: str expiration_days: description: - - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + - Indicates the lifetime, in days, of the objects that are subject to the rule. + - The value must be a non-zero positive integer. type: int prefix: description: - - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket." + - Prefix identifying one or more objects to which the rule applies. + - If no prefix is specified, the rule will apply to the whole bucket. type: str purge_transitions: description: - - > - "Whether to replace all the current transition(s) with the new transition(s). When false, the provided transition(s) - will be added, replacing transitions with the same storage_class. When true, existing transitions will be removed and - replaced with the new transition(s) + - Whether to replace all the current transition(s) with the new transition(s). + - When C(false), the provided transition(s) will be added, replacing transitions + with the same storage_class. When true, existing transitions will be removed + and replaced with the new transition(s) default: true type: bool noncurrent_version_expiration_days: description: - - 'Delete noncurrent versions this many days after they become noncurrent' + - The number of days after which non-current versions should be deleted. required: false type: int noncurrent_version_storage_class: description: - - 'Transition noncurrent versions to this storage class' + - The storage class to which non-current versions are transitioned. default: glacier choices: ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] required: false type: str noncurrent_version_transition_days: description: - - 'Transition noncurrent versions this many days after they become noncurrent' + - The number of days after which non-current versions will be transitioned + to the storage class specified in I(noncurrent_version_storage_class). required: false type: int noncurrent_version_transitions: description: - - > - A list of transition behaviors to be applied to noncurrent versions for the rule. Each storage class may be used only once. Each transition - behavior contains these elements + - A list of transition behaviors to be applied to noncurrent versions for the rule. + - Each storage class may be used only once. Each transition behavior contains these elements I(transition_days) I(storage_class) type: list elements: dict rule_id: description: - - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided." + - Unique identifier for the rule. + - The value cannot be longer than 255 characters. + - A unique value for the rule will be generated if no value is provided. type: str state: description: - - "Create or remove the lifecycle rule" + - Create or remove the lifecycle rule. default: present choices: [ 'present', 'absent' ] type: str status: description: - - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied." + - If C(enabled), the rule is currently being applied. + - If C(disabled), the rule is not currently being applied. default: enabled choices: [ 'enabled', 'disabled' ] type: str storage_class: description: - - "The storage class to transition to." - - "The 'standard_ia' class is only being available from Ansible version 2.2." + - The storage class to transition to. default: glacier choices: [ 'glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] type: str transition_date: description: - - > - Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, - this parameter is required." + - Indicates the lifetime of the objects that are subject to the rule by the date they + will transition to a different storage class. + - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must + be specified. + - If (transition_days) is not specified, this parameter is required. type: str transition_days: description: - - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required." + - Indicates when, in days, an object transitions to a different storage class. + - If I(transition_date) is not specified, this parameter is required. type: int transitions: description: - - A list of transition behaviors to be applied to the rule. Each storage class may be used only once. Each transition - behavior may contain these elements + - A list of transition behaviors to be applied to the rule. + - Each storage class may be used only once. Each transition behavior may contain these elements I(transition_days) I(transition_date) I(storage_class) @@ -118,6 +123,12 @@ description: - The I(requester_pays) option does nothing and will be removed after 2022-06-01 type: bool + wait: + description: + - Wait for the configuration to complete before returning. + version_added: 1.5.0 + type: bool + default: no extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -192,6 +203,13 @@ from copy import deepcopy import datetime +import time + +try: + from dateutil import parser as date_parser + HAS_DATEUTIL = True +except ImportError: + HAS_DATEUTIL = False try: import botocore @@ -200,12 +218,39 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -def create_lifecycle_rule(client, module): +def parse_date(date): + if date is None: + return None + try: + if HAS_DATEUTIL: + return date_parser.parse(date) + else: + # Very simplistic + return datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z") + except ValueError: + return None + + +def fetch_rules(client, module, name): + # Get the bucket's current lifecycle rules + try: + current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name) + current_lifecycle_rules = normalize_boto3_result(current_lifecycle['Rules']) + except is_boto3_error_code('NoSuchLifecycleConfiguration'): + current_lifecycle_rules = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + return current_lifecycle_rules + +def build_rule(client, module): name = module.params.get("name") - expiration_date = module.params.get("expiration_date") + expiration_date = parse_date(module.params.get("expiration_date")) expiration_days = module.params.get("expiration_days") noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days") noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days") @@ -215,20 +260,10 @@ def create_lifecycle_rule(client, module): rule_id = module.params.get("rule_id") status = module.params.get("status") storage_class = module.params.get("storage_class") - transition_date = module.params.get("transition_date") + transition_date = parse_date(module.params.get("transition_date")) transition_days = module.params.get("transition_days") transitions = module.params.get("transitions") purge_transitions = module.params.get("purge_transitions") - changed = False - - # Get the bucket's current lifecycle rules - try: - current_lifecycle = client.get_bucket_lifecycle_configuration(Bucket=name) - current_lifecycle_rules = current_lifecycle['Rules'] - except is_boto3_error_code('NoSuchLifecycleConfiguration'): - current_lifecycle_rules = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) if rule_id is not None: @@ -237,7 +272,7 @@ def create_lifecycle_rule(client, module): if expiration_days is not None: rule['Expiration'] = dict(Days=expiration_days) elif expiration_date is not None: - rule['Expiration'] = dict(Date=expiration_date) + rule['Expiration'] = dict(Date=expiration_date.isoformat()) if noncurrent_version_expiration_days is not None: rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days) @@ -246,7 +281,7 @@ def create_lifecycle_rule(client, module): rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ] elif transition_date is not None: - rule['Transitions'] = [dict(Date=transition_date, StorageClass=storage_class.upper()), ] + rule['Transitions'] = [dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), ] if transitions is not None: if not rule.get('Transitions'): @@ -275,8 +310,17 @@ def create_lifecycle_rule(client, module): t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper() rule['NoncurrentVersionTransitions'].append(t_out) + return rule + + +def compare_and_update_configuration(client, module, current_lifecycle_rules, rule): + purge_transitions = module.params.get("purge_transitions") + rule_id = module.params.get("rule_id") + lifecycle_configuration = dict(Rules=[]) + changed = False appended = False + # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule if current_lifecycle_rules: # If rule ID exists, use that for comparison otherwise compare based on prefix @@ -300,13 +344,7 @@ def create_lifecycle_rule(client, module): lifecycle_configuration['Rules'].append(rule) changed = True - # Write lifecycle to bucket - try: - client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_configuration) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed) + return changed, lifecycle_configuration def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj): @@ -334,11 +372,35 @@ def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_ return changed, appended -def compare_rule(rule_a, rule_b, purge_transitions): +def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None): + changed = False + lifecycle_configuration = dict(Rules=[]) + + # Check if rule exists + # If an ID exists, use that otherwise compare based on prefix + if rule_id is not None: + for existing_rule in current_lifecycle_rules: + if rule_id == existing_rule['ID']: + # We're not keeping the rule (i.e. deleting) so mark as changed + changed = True + else: + lifecycle_configuration['Rules'].append(existing_rule) + else: + for existing_rule in current_lifecycle_rules: + if prefix == existing_rule['Filter']['Prefix']: + # We're not keeping the rule (i.e. deleting) so mark as changed + changed = True + else: + lifecycle_configuration['Rules'].append(existing_rule) + + return changed, lifecycle_configuration + + +def compare_rule(new_rule, old_rule, purge_transitions): # Copy objects - rule1 = deepcopy(rule_a) - rule2 = deepcopy(rule_b) + rule1 = deepcopy(new_rule) + rule2 = deepcopy(old_rule) if purge_transitions: return rule1 == rule2 @@ -359,7 +421,7 @@ def compare_rule(rule_a, rule_b, purge_transitions): def merge_transitions(updated_rule, updating_rule): - # because of the legal s3 transitions, we know only one can exist for each storage class. + # because of the legal S3 transitions, we know only one can exist for each storage class. # So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only # in updating_rule to updated_rule updated_transitions = {} @@ -373,54 +435,90 @@ def merge_transitions(updated_rule, updating_rule): updated_rule['Transitions'].append(transition) +def create_lifecycle_rule(client, module): + + name = module.params.get("name") + wait = module.params.get("wait") + changed = False + + old_lifecycle_rules = fetch_rules(client, module, name) + new_rule = build_rule(client, module) + (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, + old_lifecycle_rules, + new_rule) + + # Write lifecycle to bucket + try: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_configuration) + except is_boto3_error_message('At least one action needs to be specified in a rule'): + # Amazon interpretted this as not changing anything + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + + _changed = changed + _retries = 10 + while wait and _changed and _retries: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again, + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, + new_rules, + new_rule) + + new_rules = fetch_rules(client, module, name) + + module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, + old_rules=old_lifecycle_rules, _retries=_retries, + _config=lifecycle_configuration) + + def destroy_lifecycle_rule(client, module): name = module.params.get("name") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") + wait = module.params.get("wait") changed = False if prefix is None: prefix = "" - # Get the bucket's current lifecycle rules - try: - current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules'] - except is_boto3_error_code('NoSuchLifecycleConfiguration'): - current_lifecycle_rules = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - # Create lifecycle - lifecycle_obj = dict(Rules=[]) - - # Check if rule exists - # If an ID exists, use that otherwise compare based on prefix - if rule_id is not None: - for existing_rule in current_lifecycle_rules: - if rule_id == existing_rule['ID']: - # We're not keeping the rule (i.e. deleting) so mark as changed - changed = True - else: - lifecycle_obj['Rules'].append(existing_rule) - else: - for existing_rule in current_lifecycle_rules: - if prefix == existing_rule['Filter']['Prefix']: - # We're not keeping the rule (i.e. deleting) so mark as changed - changed = True - else: - lifecycle_obj['Rules'].append(existing_rule) + current_lifecycle_rules = fetch_rules(client, module, name) + changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix) # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration try: if lifecycle_obj['Rules']: - client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_obj) + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_obj) elif current_lifecycle_rules: changed = True - client.delete_bucket_lifecycle(Bucket=name) + client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - module.exit_json(changed=changed) + + _changed = changed + _retries = 10 + while wait and _changed and _retries: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again, + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) + + new_rules = fetch_rules(client, module, name) + + module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, + _retries=_retries) def main(): @@ -442,7 +540,8 @@ def main(): transition_days=dict(type='int'), transition_date=dict(), transitions=dict(type='list', elements='dict'), - purge_transitions=dict(default='yes', type='bool') + purge_transitions=dict(default='yes', type='bool'), + wait=dict(type='bool', default=False) ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -456,7 +555,7 @@ def main(): ['noncurrent_version_transition_days', 'noncurrent_version_transitions'], ],) - client = module.client('s3') + client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) expiration_date = module.params.get("expiration_date") transition_date = module.params.get("transition_date") @@ -474,18 +573,18 @@ def main(): else: msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present) module.fail_json(msg=msg) - # If expiration_date set, check string is valid - if expiration_date is not None: - try: - datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z") - except ValueError: - module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") - - if transition_date is not None: - try: - datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z") - except ValueError: - module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") + + # If dates have been set, make sure they're in a valid format + if expiration_date: + expiration_date = parse_date(expiration_date) + if expiration_date is None: + module.fail_json(msg="expiration_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included") + if transition_date: + transition_date = parse_date(transition_date) + if transition_date is None: + module.fail_json(msg="transition_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included") if state == 'present': create_lifecycle_rule(client, module) From 783c31424cfcd14af0c749049c6b653e4e469435 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 27 Mar 2021 12:50:18 +0100 Subject: [PATCH 128/683] s3_logging - migrate to boto3 (#447) * Migrate s3_logging to boto3 * Re-enable s3_logging tests * Add support for check_mode * Catch and retry on "InvalidTargetBucketForLogging" - ACL updates occasionally take time to propogate * changelog --- s3_logging.py | 158 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 100 insertions(+), 58 deletions(-) diff --git a/s3_logging.py b/s3_logging.py index 24f4004eec7..a9359ca2d3b 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -59,27 +59,68 @@ ''' try: - import boto.ec2 - from boto.s3.connection import OrdinaryCallingFormat, Location - from boto.exception import S3ResponseError + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -def compare_bucket_logging(bucket, target_bucket, target_prefix): +def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): - bucket_log_obj = bucket.get_logging_status() - if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix: + if not bucket_logging.get('LoggingEnabled', False): + if target_bucket: + return True return False - else: + + logging = bucket_logging['LoggingEnabled'] + if logging['TargetBucket'] != target_bucket: + return True + if logging['TargetPrefix'] != target_prefix: + return True + return False + + +def verify_acls(connection, module, target_bucket): + try: + current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) + current_grants = current_acl['Grants'] + except is_boto3_error_code('NoSuchBucket'): + module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to fetch target bucket ACL") + + required_grant = { + 'Grantee': { + 'URI': "http://acs.amazonaws.com/groups/s3/LogDelivery", + 'Type': 'Group' + }, + 'Permission': 'FULL_CONTROL' + } + + for grant in current_grants: + if grant == required_grant: + return False + + if module.check_mode: return True + updated_acl = dict(current_acl) + updated_grants = list(current_grants) + updated_grants.append(required_grant) + updated_acl['Grants'] = updated_grants + del updated_acl['ResponseMetadata'] + try: + connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to update target bucket ACL to allow log delivery") + + return True + def enable_bucket_logging(connection, module): @@ -89,29 +130,37 @@ def enable_bucket_logging(connection, module): changed = False try: - bucket = connection.get_bucket(bucket_name) - except S3ResponseError as e: - module.fail_json(msg=to_native(e)) + bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) + except is_boto3_error_code('NoSuchBucket'): + module.fail_json(msg="Bucket '{0}' not found".format(bucket_name)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to fetch current logging status") try: - if not compare_bucket_logging(bucket, target_bucket, target_prefix): - # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket - try: - target_bucket_obj = connection.get_bucket(target_bucket) - except S3ResponseError as e: - if e.status == 301: - module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged") - else: - module.fail_json(msg=to_native(e)) - target_bucket_obj.set_as_logging_target() + changed |= verify_acls(connection, module, target_bucket) - bucket.enable_logging(target_bucket, target_prefix) - changed = True + if not compare_bucket_logging(bucket_logging, target_bucket, target_prefix): + bucket_logging = camel_dict_to_snake_dict(bucket_logging) + module.exit_json(changed=changed, **bucket_logging) - except S3ResponseError as e: - module.fail_json(msg=to_native(e)) + if module.check_mode: + module.exit_json(changed=True) - module.exit_json(changed=changed) + result = connection.put_bucket_logging( + aws_retry=True, + Bucket=bucket_name, + BucketLoggingStatus={ + 'LoggingEnabled': { + 'TargetBucket': target_bucket, + 'TargetPrefix': target_prefix, + } + }) + + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to enable bucket logging") + + result = camel_dict_to_snake_dict(result) + module.exit_json(changed=True, **result) def disable_bucket_logging(connection, module): @@ -120,14 +169,26 @@ def disable_bucket_logging(connection, module): changed = False try: - bucket = connection.get_bucket(bucket_name) - if not compare_bucket_logging(bucket, None, None): - bucket.disable_logging() - changed = True - except S3ResponseError as e: - module.fail_json(msg=to_native(e)) + bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to fetch current logging status") + + if not compare_bucket_logging(bucket_logging, None, None): + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + try: + response = AWSRetry.jittered_backoff( + catch_extra_error_codes=['InvalidTargetBucketForLogging'] + )(connection.put_bucket_logging)( + Bucket=bucket_name, BucketLoggingStatus={} + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to disable bucket logging") - module.exit_json(changed=changed) + module.exit_json(changed=True) def main(): @@ -139,28 +200,9 @@ def main(): state=dict(required=False, default='present', choices=['present', 'absent']), ) - module = AnsibleAWSModule(argument_spec=argument_spec) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if region in ('us-east-1', '', None): - # S3ism for the US Standard region - location = Location.DEFAULT - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - try: - connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params) - # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases - if connection is None: - connection = boto.connect_s3(**aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - module.fail_json(msg=str(e)) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': From 1915f2a5574cbdc765cba067d7affdfad814f692 Mon Sep 17 00:00:00 2001 From: Vijayanand Sharma Date: Mon, 29 Mar 2021 19:38:47 +1100 Subject: [PATCH 129/683] Added 'worker_type' and 'glue_version' attributes to the module (#370) * Added 'worker_type' and 'glue_version' attributes to the module * fix: fixed compare_glue_job_params to compare the new attributes * Added changelog - fragments * Minor docs tweaks --- aws_glue_job.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/aws_glue_job.py b/aws_glue_job.py index 7f6af1f4d0c..d1f249aaefc 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -14,7 +14,9 @@ description: - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. requirements: [ boto3 ] -author: "Rob White (@wimnat)" +author: + - "Rob White (@wimnat)" + - "Vijayanand Sharma (@vijayanandsharma)" options: allocated_capacity: description: @@ -75,6 +77,22 @@ description: - The job timeout in minutes. type: int + glue_version: + description: + - Glue version determines the versions of Apache Spark and Python that AWS Glue supports. + type: str + version_added: 1.5.0 + worker_type: + description: + - The type of predefined worker that is allocated when a job runs. + choices: [ 'Standard', 'G.1X', 'G.2X' ] + type: str + version_added: 1.5.0 + number_of_workers: + description: + - The number of workers of a defined workerType that are allocated when a job runs. + type: int + version_added: 1.5.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -249,6 +267,12 @@ def _compare_glue_job_params(user_params, current_params): return True if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']: return True + if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + return True + if 'WorkerType' in user_params and user_params['WorkerType'] != current_params['WorkerType']: + return True + if 'NumberOfWorkers' in user_params and user_params['NumberOfWorkers'] != current_params['NumberOfWorkers']: + return True return False @@ -283,6 +307,12 @@ def create_or_update_glue_job(connection, module, glue_job): params['MaxRetries'] = module.params.get("max_retries") if module.params.get("timeout") is not None: params['Timeout'] = module.params.get("timeout") + if module.params.get("glue_version") is not None: + params['GlueVersion'] = module.params.get("glue_version") + if module.params.get("worker_type") is not None: + params['WorkerType'] = module.params.get("worker_type") + if module.params.get("number_of_workers") is not None: + params['NumberOfWorkers'] = module.params.get("number_of_workers") # If glue_job is not None then check if it needs to be modified, else create it if glue_job: @@ -346,7 +376,10 @@ def main(): name=dict(required=True, type='str'), role=dict(type='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), - timeout=dict(type='int') + timeout=dict(type='int'), + glue_version=dict(type='str'), + worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'), + number_of_workers=dict(type='int'), ) ) From 7039a65019952744856abc4258927c16cc4eb9c4 Mon Sep 17 00:00:00 2001 From: ichekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Mon, 29 Mar 2021 06:44:41 -0400 Subject: [PATCH 130/683] aws_glue_connection: Add multiple connection types, add check mode (#503) * Add multiple connection types and support for check mode Examples: ``` - community.aws.aws_glue_connection: name: My connection availability_zone: us-east-1a connection_properties: JDBC_ENFORCE_SSL: "false" connection_type: NETWORK description: My test connection security_groups: - test subnet_id: subnet-123abc state: present ``` * Add retries. * Add description of how to create a Glue network connection Co-authored-by: Mark Chappell --- aws_glue_connection.py | 86 +++++++++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 21 deletions(-) diff --git a/aws_glue_connection.py b/aws_glue_connection.py index 41bc99816a0..b279509be18 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -16,6 +16,12 @@ requirements: [ boto3 ] author: "Rob White (@wimnat)" options: + availability_zone: + description: + - Availability Zone used by the connection + - Required when I(connection_type=NETWORK). + type: str + version_added: 1.5.0 catalog_id: description: - The ID of the Data Catalog in which to create the connection. If none is supplied, @@ -28,9 +34,9 @@ type: dict connection_type: description: - - The type of the connection. Currently, only JDBC is supported; SFTP is not supported. + - The type of the connection. Currently, SFTP is not supported. default: JDBC - choices: [ 'JDBC', 'SFTP' ] + choices: [ 'CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK' ] type: str description: description: @@ -49,6 +55,7 @@ security_groups: description: - A list of security groups to be used by the connection. Use either security group name or ID. + - Required when I(connection_type=NETWORK). type: list elements: str state: @@ -60,6 +67,7 @@ subnet_id: description: - The subnet ID used by the connection. + - Required when I(connection_type=NETWORK). type: str extends_documentation_fragment: - amazon.aws.aws @@ -79,6 +87,19 @@ PASSWORD: my-password state: present +# Create an AWS Glue network connection +- community.aws.aws_glue_connection: + name: my-glue-network-connection + availability_zone: us-east-1a + connection_properties: + JDBC_ENFORCE_SSL: "false" + connection_type: NETWORK + description: Test connection + security_groups: + - sg-glue + subnet_id: subnet-123abc + state: present + # Delete an AWS Glue connection - community.aws.aws_glue_connection: name: my-glue-connection @@ -142,6 +163,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names @@ -162,7 +184,7 @@ def _get_glue_connection(connection, module): params['CatalogId'] = connection_catalog_id try: - return connection.get_connection(**params)['Connection'] + return connection.get_connection(aws_retry=True, **params)['Connection'] except is_boto3_error_code('EntityNotFoundException'): return None @@ -207,10 +229,29 @@ def _compare_glue_connection_params(user_params, current_params): user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \ != current_params['PhysicalConnectionRequirements']['SubnetId']: return True + if 'AvailabilityZone' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ + user_params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] \ + != current_params['PhysicalConnectionRequirements']['AvailabilityZone']: + return True return False +# Glue module doesn't appear to have any waiters, unlike EC2 or RDS +def _await_glue_connection(connection, module): + start_time = time.time() + wait_timeout = start_time + 30 + check_interval = 5 + + while wait_timeout > time.time(): + glue_connection = _get_glue_connection(connection, module) + if glue_connection and glue_connection.get('Name'): + return glue_connection + time.sleep(check_interval) + + module.fail_json(msg='Timeout waiting for Glue connection %s' % module.params.get('name')) + + def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): """ Create or update an AWS Glue connection @@ -220,8 +261,8 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co :param glue_connection: a dict of AWS Glue connection parameters or None :return: """ - changed = False + params = dict() params['ConnectionInput'] = dict() params['ConnectionInput']['Name'] = module.params.get("name") @@ -241,6 +282,8 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids if module.params.get("subnet_id") is not None: params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id") + if module.params.get("availability_zone") is not None: + params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] = module.params.get("availability_zone") # If glue_connection is not None then check if it needs to be modified, else create it if glue_connection: @@ -249,27 +292,24 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co # We need to slightly modify the params for an update update_params = copy.deepcopy(params) update_params['Name'] = update_params['ConnectionInput']['Name'] - connection.update_connection(**update_params) + if not module.check_mode: + connection.update_connection(aws_retry=True, **update_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) else: try: - connection.create_connection(**params) + if not module.check_mode: + connection.create_connection(aws_retry=True, **params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) # If changed, get the Glue connection again - if changed: - glue_connection = None - for i in range(10): - glue_connection = _get_glue_connection(connection, module) - if glue_connection is not None: - break - time.sleep(10) + if changed and not module.check_mode: + glue_connection = _await_glue_connection(connection, module) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection)) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {})) def delete_glue_connection(connection, module, glue_connection): @@ -281,7 +321,6 @@ def delete_glue_connection(connection, module, glue_connection): :param glue_connection: a dict of AWS Glue connection parameters or None :return: """ - changed = False params = {'ConnectionName': module.params.get("name")} @@ -290,7 +329,8 @@ def delete_glue_connection(connection, module, glue_connection): if glue_connection: try: - connection.delete_connection(**params) + if not module.check_mode: + connection.delete_connection(aws_retry=True, **params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -302,9 +342,10 @@ def main(): argument_spec = ( dict( + availability_zone=dict(type='str'), catalog_id=dict(type='str'), connection_properties=dict(type='dict'), - connection_type=dict(type='str', default='JDBC', choices=['JDBC', 'SFTP']), + connection_type=dict(type='str', default='JDBC', choices=['CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK']), description=dict(type='str'), match_criteria=dict(type='list', elements='str'), name=dict(required=True, type='str'), @@ -316,12 +357,15 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[ - ('state', 'present', ['connection_properties']) - ] + ('state', 'present', ['connection_properties']), + ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id']) + ], + supports_check_mode=True ) - connection_glue = module.client('glue') - connection_ec2 = module.client('ec2') + retry_decorator = AWSRetry.jittered_backoff(retries=10) + connection_glue = module.client('glue', retry_decorator=retry_decorator) + connection_ec2 = module.client('ec2', retry_decorator=retry_decorator) glue_connection = _get_glue_connection(connection_glue, module) From fb2ea600677fe3bccc73bf2eef33509eeb8abb0b Mon Sep 17 00:00:00 2001 From: Sakar Date: Mon, 29 Mar 2021 16:26:04 +0530 Subject: [PATCH 131/683] Added assignPublicIp param in network_configuration (#395) * added assign_public_ip feature * fix sanity issues and added changelog Co-authored-by: Mark Chappell --- ecs_task.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/ecs_task.py b/ecs_task.py index 90f9df43f01..03295c16eac 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -64,8 +64,13 @@ network_configuration: description: - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). + - I(assign_public_ip) requires botocore >= 1.8.4 type: dict suboptions: + assign_public_ip: + description: Whether the task's elastic network interface receives a public IP address. + type: bool + version_added: 1.5.0 subnets: description: A list of subnet IDs to which the task is attached. type: list @@ -142,6 +147,21 @@ - my_security_group register: task_output +- name: RUN a task on Fargate with public ip assigned + community.aws.ecs_task: + operation: run + count: 2 + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + started_by: ansible_user + launch_type: FARGATE + network_configuration: + assign_public_ip: yes + subnets: + - subnet-abcd1234 + register: task_output + - name: Stop a task community.aws.ecs_task: operation: stop @@ -248,6 +268,12 @@ def format_network_configuration(self, network_config): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") result['securityGroups'] = groups + if 'assign_public_ip' in network_config: + if network_config['assign_public_ip'] is True: + result['assignPublicIp'] = "ENABLED" + else: + result['assignPublicIp'] = "DISABLED" + return dict(awsvpcConfiguration=result) def list_tasks(self, cluster_name, service_name, status): @@ -331,6 +357,12 @@ def ecs_api_handles_network_configuration(self): # to e.g. ecs.run_task, it's just passed as a keyword argument) return self.module.botocore_at_least('1.7.44') + def ecs_api_handles_network_configuration_assignIp(self): + # There doesn't seem to be a nice way to inspect botocore to look + # for attributes (and networkConfiguration is not an explicit argument + # to e.g. ecs.run_task, it's just passed as a keyword argument) + return self.module.botocore_at_least('1.8.4') + def main(): argument_spec = dict( @@ -373,8 +405,11 @@ def main(): service_mgr = EcsExecManager(module) - if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration(): - module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration') + if module.params['network_configuration']: + if 'assignPublicIp' in module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration_assignIp(): + module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration') + elif not service_mgr.ecs_api_handles_network_configuration(): + module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration') if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type(): module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type') From 928ee58d587d3fb679089520018dba8a6d5e7d3d Mon Sep 17 00:00:00 2001 From: Eduardo Ramos Date: Tue, 30 Mar 2021 04:19:59 +1000 Subject: [PATCH 132/683] Add platform to ecs service (#353) * Add platformVersion params to ecs_service * Add changelog fragment * Add version_added Co-authored-by: Mark Chappell --- ecs_service.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ecs_service.py b/ecs_service.py index 42a45bba064..e498bb6d827 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -156,6 +156,13 @@ required: false choices: ["EC2", "FARGATE"] type: str + platform_version: + type: str + description: + - Numeric part of platform version or LATEST + - See U(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) for more details. + required: false + version_added: 1.5.0 health_check_grace_period_seconds: description: - Seconds to wait before health checking the freshly added/updated services. @@ -561,7 +568,8 @@ def is_matching_service(self, expected, existing): def create_service(self, service_name, cluster_name, task_definition, load_balancers, desired_count, client_token, role, deployment_configuration, placement_constraints, placement_strategy, health_check_grace_period_seconds, - network_configuration, service_registries, launch_type, scheduling_strategy): + network_configuration, service_registries, launch_type, platform_version, + scheduling_strategy): params = dict( cluster=cluster_name, @@ -578,6 +586,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan params['networkConfiguration'] = network_configuration if launch_type: params['launchType'] = launch_type + if platform_version: + params['platformVersion'] = platform_version if self.health_check_setable(params) and health_check_grace_period_seconds is not None: params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds if service_registries: @@ -685,6 +695,7 @@ def main(): assign_public_ip=dict(type='bool') )), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + platform_version=dict(required=False, type='str'), service_registries=dict(required=False, type='list', default=[], elements='dict'), scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']) ) @@ -804,6 +815,7 @@ def main(): network_configuration, serviceRegistries, module.params['launch_type'], + module.params['platform_version'], module.params['scheduling_strategy'] ) except botocore.exceptions.ClientError as e: From 70e5f4a0b0f7f3c8b22ebeb1747042ff507f7b75 Mon Sep 17 00:00:00 2001 From: Nicolas Boutet Date: Tue, 30 Mar 2021 12:40:51 +0200 Subject: [PATCH 133/683] route53: fix getting private zones by name when a vpc_id is provided (#510) * Fix route53 get_zone_id_by_name for private zones * Add tests for route53 modules with private zone * Add tests for route53 with vpc_id * Add changelog fragment * Remove dead code * Use private IPs in tests --- route53.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/route53.py b/route53.py index 84a8dc997fb..78e17437d74 100644 --- a/route53.py +++ b/route53.py @@ -410,15 +410,9 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): if want_vpc_id: # NOTE: These details aren't available in other boto methods, hence the necessary # extra API call - hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone.id) - zone_details = hosted_zone['HostedZone'] - # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 - if isinstance(zone_details['VPCs'], dict): - if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: - return zone_id - else: # Forward compatibility for when boto fixes that bug - if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: - return zone_id + hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) + if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: + return zone_id else: return zone_id return None From 4f0d4996defc23b84b7e106aaaef0cd27a594294 Mon Sep 17 00:00:00 2001 From: Tyler Schwend Date: Wed, 31 Mar 2021 12:16:16 -0400 Subject: [PATCH 134/683] kinesis_stream: Don't mark kstreams `changed` when no encryption actions taken (#27) * fix: Don't mark kstreams `changed` when no encryption actions taken Fixes https://github.com/ansible/ansible/issues/65928 * doc: add changelog fragment * Move disable_stream_encryption test to integration test * Update descriptions/fetch calls to still run on check_mode * use standard helpers to convert tags to/from boto3 format * use camel_dict_to_snake_dict helper * use standard compare_aws_tags helper * Fix tag handling and use standard helpers * Format results and add tags when manipulating encryption settings * Move kinesis_stream tests over to just integration tests * changelog * lint Co-authored-by: Tyler Schwend Co-authored-by: Mark Chappell --- kinesis_stream.py | 371 ++++++++++++++-------------------------------- 1 file changed, 115 insertions(+), 256 deletions(-) diff --git a/kinesis_stream.py b/kinesis_stream.py index d9b3cc0d938..755cfa096d4 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -192,112 +192,21 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -def convert_to_lower(data): - """Convert all uppercase keys in dict with lowercase_ - Args: - data (dict): Dictionary with keys that have upper cases in them - Example.. FooBar == foo_bar - if a val is of type datetime.datetime, it will be converted to - the ISO 8601 - - Basic Usage: - >>> test = {'FooBar': []} - >>> test = convert_to_lower(test) - { - 'foo_bar': [] - } - - Returns: - Dictionary - """ - results = dict() - if isinstance(data, dict): - for key, val in data.items(): - key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower() - if key[0] == '_': - key = key[1:] - if isinstance(val, datetime.datetime): - results[key] = val.isoformat() - elif isinstance(val, dict): - results[key] = convert_to_lower(val) - elif isinstance(val, list): - converted = list() - for item in val: - converted.append(convert_to_lower(item)) - results[key] = converted - else: - results[key] = val - return results - - -def make_tags_in_proper_format(tags): - """Take a dictionary of tags and convert them into the AWS Tags format. - Args: - tags (list): The tags you want applied. - - Basic Usage: - >>> tags = [{'Key': 'env', 'Value': 'development'}] - >>> make_tags_in_proper_format(tags) - { - "env": "development", - } - - Returns: - Dict - """ - formatted_tags = dict() - for tag in tags: - formatted_tags[tag.get('Key')] = tag.get('Value') - - return formatted_tags - - -def make_tags_in_aws_format(tags): - """Take a dictionary of tags and convert them into the AWS Tags format. - Args: - tags (dict): The tags you want applied. - - Basic Usage: - >>> tags = {'env': 'development', 'service': 'web'} - >>> make_tags_in_proper_format(tags) - [ - { - "Value": "web", - "Key": "service" - }, - { - "Value": "development", - "key": "env" - } - ] - - Returns: - List - """ - formatted_tags = list() - for key, val in tags.items(): - formatted_tags.append({ - 'Key': key, - 'Value': val - }) - - return formatted_tags - - -def get_tags(client, stream_name, check_mode=False): +def get_tags(client, stream_name): """Retrieve the tags for a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. stream_name (str): Name of the Kinesis stream. - Kwargs: - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - Basic Usage: >>> client = boto3.client('kinesis') >>> stream_name = 'test-stream' @@ -313,34 +222,22 @@ def get_tags(client, stream_name, check_mode=False): } results = dict() try: - if not check_mode: - results = ( - client.list_tags_for_stream(**params)['Tags'] - ) - else: - results = [ - { - 'Key': 'DryRunMode', - 'Value': 'true' - }, - ] + results = ( + client.list_tags_for_stream(**params)['Tags'] + ) success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) - return success, err_msg, results + return success, err_msg, boto3_tag_list_to_ansible_dict(results) -def find_stream(client, stream_name, check_mode=False): +def find_stream(client, stream_name): """Retrieve a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. stream_name (str): Name of the Kinesis stream. - Kwargs: - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - Basic Usage: >>> client = boto3.client('kinesis') >>> stream_name = 'test-stream' @@ -357,32 +254,19 @@ def find_stream(client, stream_name, check_mode=False): has_more_shards = True shards = list() try: - if not check_mode: - while has_more_shards: - results = ( - client.describe_stream(**params)['StreamDescription'] - ) - shards.extend(results.pop('Shards')) - has_more_shards = results['HasMoreShards'] - if has_more_shards: - params['ExclusiveStartShardId'] = shards[-1]['ShardId'] - results['Shards'] = shards - num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) - results['OpenShardsCount'] = len(shards) - num_closed_shards - results['ClosedShardsCount'] = num_closed_shards - results['ShardsCount'] = len(shards) - else: - results = { - 'OpenShardsCount': 5, - 'ClosedShardsCount': 0, - 'ShardsCount': 5, - 'HasMoreShards': True, - 'RetentionPeriodHours': 24, - 'StreamName': stream_name, - 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name), - 'StreamStatus': 'ACTIVE', - 'EncryptionType': 'NONE' - } + while has_more_shards: + results = ( + client.describe_stream(**params)['StreamDescription'] + ) + shards.extend(results.pop('Shards')) + has_more_shards = results['HasMoreShards'] + if has_more_shards: + params['ExclusiveStartShardId'] = shards[-1]['ShardId'] + results['Shards'] = shards + num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) + results['OpenShardsCount'] = len(shards) - num_closed_shards + results['ClosedShardsCount'] = num_closed_shards + results['ShardsCount'] = len(shards) success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -421,7 +305,7 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, while wait_timeout > time.time(): try: find_success, find_msg, stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if check_mode: status_achieved = True @@ -486,7 +370,7 @@ def tags_action(client, stream_name, tags, action='create', check_mode=False): client.add_tags_to_stream(**params) success = True elif action == 'delete': - params['TagKeys'] = list(tags) + params['TagKeys'] = tags client.remove_tags_from_stream(**params) success = True else: @@ -505,38 +389,6 @@ def tags_action(client, stream_name, tags, action='create', check_mode=False): return success, err_msg -def recreate_tags_from_list(list_of_tags): - """Recreate tags from a list of tuples into the Amazon Tag format. - Args: - list_of_tags (list): List of tuples. - - Basic Usage: - >>> list_of_tags = [('Env', 'Development')] - >>> recreate_tags_from_list(list_of_tags) - [ - { - "Value": "Development", - "Key": "Env" - } - ] - - Returns: - List - """ - tags = list() - i = 0 - for i in range(len(list_of_tags)): - key_name = list_of_tags[i][0] - key_val = list_of_tags[i][1] - tags.append( - { - 'Key': key_name, - 'Value': key_val - } - ) - return tags - - def update_tags(client, stream_name, tags, check_mode=False): """Update tags for an amazon resource. Args: @@ -561,52 +413,28 @@ def update_tags(client, stream_name, tags, check_mode=False): changed = False err_msg = '' tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name, check_mode=check_mode) + get_tags(client, stream_name) ) - if current_tags: - tags = make_tags_in_aws_format(tags) - current_tags_set = ( - set( - reduce( - lambda x, y: x + y, - [make_tags_in_proper_format(current_tags).items()] - ) - ) - ) - new_tags_set = ( - set( - reduce( - lambda x, y: x + y, - [make_tags_in_proper_format(tags).items()] - ) + tags_to_set, tags_to_delete = compare_aws_tags( + current_tags, tags, + purge_tags=True, + ) + if tags_to_delete: + delete_success, delete_msg = ( + tags_action( + client, stream_name, tags_to_delete, action='delete', + check_mode=check_mode ) ) - tags_to_delete = list(current_tags_set.difference(new_tags_set)) - tags_to_update = list(new_tags_set.difference(current_tags_set)) - if tags_to_delete: - tags_to_delete = make_tags_in_proper_format( - recreate_tags_from_list(tags_to_delete) - ) - delete_success, delete_msg = ( - tags_action( - client, stream_name, tags_to_delete, action='delete', - check_mode=check_mode - ) - ) - if not delete_success: - return delete_success, changed, delete_msg - if tags_to_update: - tags = make_tags_in_proper_format( - recreate_tags_from_list(tags_to_update) - ) - else: - return True, changed, 'Tags do not need to be updated' + if not delete_success: + return delete_success, changed, delete_msg + tag_msg = 'Tags removed' - if tags: + if tags_to_set: create_success, create_msg = ( tags_action( - client, stream_name, tags, action='create', + client, stream_name, tags_to_set, action='create', check_mode=check_mode ) ) @@ -926,7 +754,7 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe return wait_success, False, wait_msg elif changed and not wait: stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if stream_found: if current_stream['StreamStatus'] != 'ACTIVE': @@ -963,7 +791,7 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe return wait_success, changed, wait_msg else: stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if stream_found and current_stream['StreamStatus'] != 'ACTIVE': err_msg = ( @@ -976,6 +804,7 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe tag_success, tag_changed, err_msg = ( update_tags(client, stream_name, tags, check_mode=check_mode) ) + changed |= tag_changed if wait: success, err_msg, status_stream = ( wait_for_status( @@ -1028,7 +857,7 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None results = dict() stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait: @@ -1089,7 +918,7 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None return success, changed, err_msg, results stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if retention_period and current_stream.get('StreamStatus') == 'ACTIVE': changed, err_msg = ( @@ -1112,19 +941,19 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None if success: stream_found, stream_msg, results = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name, check_mode=check_mode) + get_tags(client, stream_name) ) - if current_tags and not check_mode: - current_tags = make_tags_in_proper_format(current_tags) - results['Tags'] = current_tags - elif check_mode and tags: - results['Tags'] = tags - else: - results['Tags'] = dict() - results = convert_to_lower(results) + if check_mode: + current_tags = tags + + if not current_tags: + current_tags = dict() + + results = camel_dict_to_snake_dict(results) + results['tags'] = current_tags return success, changed, err_msg, results @@ -1157,7 +986,7 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300, err_msg = '' results = dict() stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if stream_found: success, err_msg = ( @@ -1226,35 +1055,53 @@ def start_stream_encryption(client, stream_name, encryption_type='', key_id='', results = dict() stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if stream_found: - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode + if (current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id): + changed = False + success = True + err_msg = 'Kinesis Stream {0} encryption already configured.'.format(stream_name) + else: + success, err_msg = ( + stream_encryption_action( + client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode + ) ) - ) - if success: - changed = True - if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode + if success: + changed = True + if wait: + success, err_msg, results = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name) + if not success: + return success, True, err_msg, results + else: + err_msg = ( + 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name) ) - ) - err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name) - if not success: - return success, True, err_msg, results - else: - err_msg = ( - 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name) - ) else: success = True changed = False err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name) + if success: + stream_found, stream_msg, results = ( + find_stream(client, stream_name) + ) + tag_success, tag_msg, current_tags = ( + get_tags(client, stream_name) + ) + if not current_tags: + current_tags = dict() + + results = camel_dict_to_snake_dict(results) + results['tags'] = current_tags + return success, changed, err_msg, results @@ -1278,7 +1125,7 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', Basic Usage: >>> client = boto3.client('kinesis') >>> stream_name = 'test-stream' - >>> start_stream_encryption(client, stream_name,encryption_type, key_id) + >>> stop_stream_encryption(client, stream_name,encryption_type, key_id) Returns: Tuple (bool, bool, str, dict) @@ -1292,7 +1139,7 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', results = dict() stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name, check_mode=check_mode) + find_stream(client, stream_name) ) if stream_found: if current_stream.get('EncryptionType') == 'KMS': @@ -1301,11 +1148,7 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode ) ) - elif current_stream.get('EncryptionType') == 'NONE': - success = True - - if success: - changed = True + changed = success if wait: success, err_msg, results = ( wait_for_status( @@ -1313,18 +1156,34 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', check_mode=check_mode ) ) - err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name) if not success: return success, True, err_msg, results + err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name) else: err_msg = ( 'Stream {0} is in the process of stopping encryption.'.format(stream_name) ) + elif current_stream.get('EncryptionType') == 'NONE': + success = True + err_msg = 'Kinesis Stream {0} encryption already stopped.'.format(stream_name) else: success = True changed = False err_msg = 'Stream {0} does not exist.'.format(stream_name) + if success: + stream_found, stream_msg, results = ( + find_stream(client, stream_name) + ) + tag_success, tag_msg, current_tags = ( + get_tags(client, stream_name) + ) + if not current_tags: + current_tags = dict() + + results = camel_dict_to_snake_dict(results) + results['tags'] = current_tags + return success, changed, err_msg, results From 2a991ab6cee4144135cdf99fbba618c19fe6a598 Mon Sep 17 00:00:00 2001 From: msven Date: Wed, 31 Mar 2021 14:30:50 -0500 Subject: [PATCH 135/683] Fix changed always true when kms key alias used (#506) Fix output.exists value on create Fix tags being changed to lower case add changelog --- cloudtrail.py | 50 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/cloudtrail.py b/cloudtrail.py index 5f8aa5ae03f..a2f2076993f 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -261,6 +261,24 @@ ) +def get_kms_key_aliases(module, client, keyId): + """ + get list of key aliases + + module : AnsibleAWSModule object + client : boto3 client connection object for kms + keyId : keyId to get aliases for + """ + try: + key_resp = client.list_aliases(KeyId=keyId) + except (BotoCoreError, ClientError) as err: + # Don't fail here, just return [] to maintain backwards compat + # in case user doesn't have kms:ListAliases permissions + return [] + + return key_resp['Aliases'] + + def create_trail(module, client, ct_params): """ Creates a CloudTrail @@ -500,6 +518,7 @@ def main(): # If the trail exists set the result exists variable if trail is not None: results['exists'] = True + initial_kms_key_id = trail.get('KmsKeyId') if state == 'absent' and results['exists']: # If Trail exists go ahead and delete @@ -524,7 +543,11 @@ def main(): val = ct_params.get(key) if val != trail.get(tkey): do_update = True - results['changed'] = True + if tkey != 'KmsKeyId': + # We'll check if the KmsKeyId casues changes later since + # user could've provided a key alias, alias arn, or key id + # and trail['KmsKeyId'] is always a key arn + results['changed'] = True # If we are in check mode copy the changed values to the trail facts in result output to show what would change. if module.check_mode: trail.update({tkey: ct_params.get(key)}) @@ -533,6 +556,26 @@ def main(): update_trail(module, client, ct_params) trail = get_trail_facts(module, client, ct_params['Name']) + # Determine if KmsKeyId changed + if not module.check_mode: + if initial_kms_key_id != trail.get('KmsKeyId'): + results['changed'] = True + else: + new_key = ct_params.get('KmsKeyId') + if initial_kms_key_id != new_key: + # Assume changed for a moment + results['changed'] = True + + # However, new_key could be a key id, alias arn, or alias name + # that maps back to the key arn in initial_kms_key_id. So check + # all aliases for a match. + initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) + for a in initial_aliases: + if(a['AliasName'] == new_key or + a['AliasArn'] == new_key or + a['TargetKeyId'] == new_key): + results['changed'] = False + # Check if we need to start/stop logging if enable_logging and not trail['IsLogging']: results['changed'] = True @@ -554,11 +597,12 @@ def main(): results['changed'] = True trail['tags'] = tags # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail) + results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) elif state == 'present' and not results['exists']: # Trail doesn't exist just go create it results['changed'] = True + results['exists'] = True if not module.check_mode: # If we aren't in check_mode then actually create it created_trail = create_trail(module, client, ct_params) @@ -598,7 +642,7 @@ def main(): trail['IsLogging'] = enable_logging trail['tags'] = tags # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail) + results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) module.exit_json(**results) From 0ffee90d567f3f86ad3ab02e554aadb405c7816c Mon Sep 17 00:00:00 2001 From: Jacob Henner Date: Wed, 31 Mar 2021 15:33:54 -0400 Subject: [PATCH 136/683] Fully support mixed instance policy (#232) * Fully support mixed instance policy Previously, setting instances_distribution was not supported. instances_distribution should be supported, to allow users to enable spot instances within their mixed instance ASGs. Note: The type and significance of the mixed_instance_policy has changed. It now captures all of the mixed_instance_policy configuration parameters, rather than just a list of instance types. Fixes #231 * Restore mixed_instances_policy backwards-compat Restore mixed_instances_policy backwards compatibility by using mixed_instances_policy_full to return full dictionary. Also, fix some doc typos, add CHANGELOG fragment, and split into separate test case. Addresses feedback in #232 * Only return mixed_instances_policy_full if set --- ec2_asg.py | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 113 insertions(+), 2 deletions(-) diff --git a/ec2_asg.py b/ec2_asg.py index 152918b6d6c..a87ce7f9681 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -93,6 +93,67 @@ - A list of instance_types. type: list elements: str + required: false + instances_distribution: + description: + - >- + Specifies the distribution of On-Demand Instances and Spot Instances, the maximum price + to pay for Spot Instances, and how the Auto Scaling group allocates instance types + to fulfill On-Demand and Spot capacity. + - 'See also U(https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_InstancesDistribution.html)' + required: false + type: dict + version_added: 1.5.0 + suboptions: + on_demand_allocation_strategy: + description: + - Indicates how to allocate instance types to fulfill On-Demand capacity. + type: str + required: false + version_added: 1.5.0 + on_demand_base_capacity: + description: + - >- + The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand + Instances. This base portion is provisioned first as your group scales. + - >- + Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a + percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting. + type: int + required: false + version_added: 1.5.0 + on_demand_percentage_above_base_capacity: + description: + - Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity. + - Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances. + - 'Valid range: 0 to 100' + type: int + required: false + version_added: 1.5.0 + spot_allocation_strategy: + description: + - Indicates how to allocate instances across Spot Instance pools. + type: str + required: false + version_added: 1.5.0 + spot_instance_pools: + description: + - >- + The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from + the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2. + - Used only when the Spot allocation strategy is lowest-price. + - 'Valid Range: Minimum value of 1. Maximum value of 20.' + type: int + required: false + version_added: 1.5.0 + spot_max_price: + description: + - The maximum price per unit hour that you are willing to pay for a Spot Instance. + - If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price. + - To remove a value that you previously set, include the parameter but leave the value blank. + type: str + required: false + version_added: 1.5.0 type: dict placement_group: description: @@ -339,6 +400,9 @@ - t3a.large - t3.large - t2.large + instances_distribution: + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: capacity-optimized min_size: 1 max_size: 10 desired_capacity: 5 @@ -447,11 +511,38 @@ returned: success type: int sample: 1 -mixed_instance_policy: - description: Returns the list of instance types if a mixed instance policy is set. +mixed_instances_policy: + description: Returns the list of instance types if a mixed instances policy is set. returned: success type: list sample: ["t3.micro", "t3a.micro"] +mixed_instances_policy_full: + description: Returns the full dictionary representation of the mixed instances policy if a mixed instances policy is set. + returned: success + type: dict + sample: { + "instances_distribution": { + "on_demand_allocation_strategy": "prioritized", + "on_demand_base_capacity": 0, + "on_demand_percentage_above_base_capacity": 0, + "spot_allocation_strategy": "capacity-optimized" + }, + "launch_template": { + "launch_template_specification": { + "launch_template_id": "lt-53c2425cffa544c23", + "launch_template_name": "random-LaunchTemplate", + "version": "2" + }, + "overrides": [ + { + "instance_type": "m5.xlarge" + }, + { + "instance_type": "m5a.xlarge" + }, + ] + } + } pending_instances: description: Number of instances in pending state returned: success @@ -536,7 +627,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', @@ -742,6 +836,7 @@ def get_properties(autoscaling_group): properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy') if raw_mixed_instance_object: + properties['mixed_instances_policy_full'] = camel_dict_to_snake_dict(raw_mixed_instance_object) properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')] metrics = autoscaling_group.get('EnabledMetrics') @@ -792,6 +887,7 @@ def get_launch_object(connection, ec2_connection): if mixed_instances_policy: instance_types = mixed_instances_policy.get('instance_types', []) + instances_distribution = mixed_instances_policy.get('instances_distribution', {}) policy = { 'LaunchTemplate': { 'LaunchTemplateSpecification': launch_object['LaunchTemplate'] @@ -802,6 +898,9 @@ def get_launch_object(connection, ec2_connection): for instance_type in instance_types: instance_type_dict = {'InstanceType': instance_type} policy['LaunchTemplate']['Overrides'].append(instance_type_dict) + if instances_distribution: + instances_distribution_params = scrub_none_parameters(instances_distribution) + policy['InstancesDistribution'] = snake_dict_to_camel_dict(instances_distribution_params, capitalize_first=True) launch_object['MixedInstancesPolicy'] = policy return launch_object @@ -1661,6 +1760,18 @@ def main(): type='list', elements='str' ), + instances_distribution=dict( + type='dict', + default=None, + options=dict( + on_demand_allocation_strategy=dict(type='str'), + on_demand_base_capacity=dict(type='int'), + on_demand_percentage_above_base_capacity=dict(type='int'), + spot_allocation_strategy=dict(type='str'), + spot_instance_pools=dict(type='int'), + spot_max_price=dict(type='str'), + ) + ) ) ), placement_group=dict(type='str'), From 2913b14d5c7c3d4c7c4738a1c434c8520cb49809 Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Sat, 3 Apr 2021 07:12:18 -0700 Subject: [PATCH 137/683] ec2_instance don't change termination protection in check mode (#505) * ec2_instance don't change termination protection in check mode Fixes: ansible/ansible/issues/67716 Extend termination protection tests * Set the path for the aws CLI tool - setting ansible_python_interpreter updates the python search path but not the shell search path * changelog Co-authored-by: Mark Chappell --- ec2_instance.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ec2_instance.py b/ec2_instance.py index 18af847aed6..9f61882491b 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -1512,6 +1512,7 @@ def change_instance_state(filters, desired_state, ec2=None): unchanged = set() failure_reason = "" + # TODO: better check_moding in here https://github.com/ansible-collections/community.aws/issues/16 for inst in instances: try: if desired_state == 'TERMINATED': @@ -1588,7 +1589,8 @@ def handle_existing(existing_matches, changed, ec2, state): ) changes = diff_instance_and_params(existing_matches[0], module.params) for c in changes: - ec2.modify_instance_attribute(aws_retry=True, **c) + if not module.check_mode: + ec2.modify_instance_attribute(aws_retry=True, **c) changed |= bool(changes) changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role')) changed |= change_network_attachments(existing_matches[0], module.params, ec2) From c679569474d523391deebc15e81e2ef5799576c9 Mon Sep 17 00:00:00 2001 From: Kyle Date: Mon, 5 Apr 2021 11:59:45 -0600 Subject: [PATCH 138/683] Remove 'ResourceRecords' when 'AliasTarget' (#502) * Remove 'ResourceRecords' when 'AliasTarget' sending a change to the route53 api that includes both an AliasTarget and a ResourceRecord causes the api to return with an error. removing the ResourceRecord when an AliasTarget is preset allows this module to continue without error * Cleanup tests and use RFC2602 Domains and RFC5737 CIDRs * Add integration test for aliases * Make Alias and TTL mutually exclusive * Update docs to list region/failover/weight as mutually exclusive. * changelog --- route53.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/route53.py b/route53.py index 78e17437d74..2168a0b11b6 100644 --- a/route53.py +++ b/route53.py @@ -44,6 +44,7 @@ ttl: description: - The TTL, in second, to give the new record. + - Mutually exclusive with I(alias). default: 3600 type: int type: @@ -55,6 +56,7 @@ alias: description: - Indicates if this is an alias record. + - Mutually exclusive with I(ttl). - Defaults to C(false). type: bool alias_hosted_zone_id: @@ -99,6 +101,7 @@ have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. + - Mutually exclusive with I(region) and I(failover). type: int region: description: @@ -106,6 +109,7 @@ that have the same combination of DNS name and type, a value that determines which region this should be associated with for the latency-based routing + - Mutually exclusive with I(weight) and I(failover). type: str health_check: description: @@ -115,6 +119,7 @@ description: - Failover resource record sets only. Whether this is the primary or secondary resource record set. Allowed values are PRIMARY and SECONDARY + - Mutually exclusive with I(weight) and I(region). type: str choices: ['SECONDARY', 'PRIMARY'] vpc_id: @@ -468,7 +473,10 @@ def main(): ('state', 'delete', ['value']), ), # failover, region and weight are mutually exclusive - mutually_exclusive=[('failover', 'region', 'weight')], + mutually_exclusive=[ + ('failover', 'region', 'weight'), + ('alias', 'ttl'), + ], # failover, region and weight require identifier required_by=dict( failover=('identifier',), @@ -557,6 +565,10 @@ def main(): DNSName=value_in[0], EvaluateTargetHealth=alias_evaluate_target_health_in ) + if 'ResourceRecords' in resource_record_set: + del resource_record_set['ResourceRecords'] + if 'TTL' in resource_record_set: + del resource_record_set['TTL'] # On CAA records order doesn't matter if type_in == 'CAA': From dbdc43196b0d3a7c057ba1cf3c3a8b0842fedb8f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 6 Apr 2021 09:22:36 +0200 Subject: [PATCH 139/683] Fix route53 idempotency issues (#525) * Fix name comparison: AWS uses octal encoding for characters like '@' and '*'. * Fix CAA record ordering. * Add changelog fragment. * Add wildcard record test. --- route53.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/route53.py b/route53.py index 2168a0b11b6..945d5e8b679 100644 --- a/route53.py +++ b/route53.py @@ -389,8 +389,9 @@ def get_record(route53, zone_id, record_name, record_type, record_identifier): record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id) for record_set in record_sets_results: + record_set['Name'] = record_set['Name'].encode().decode('unicode_escape') # If the record name and type is not equal, move to the next record - if (record_name, record_type) != (record_set['Name'], record_set['Type']): + if (record_name.lower(), record_type) != (record_set['Name'].lower(), record_set['Type']): continue if record_identifier and record_identifier != record_set.get("SetIdentifier"): @@ -573,6 +574,8 @@ def main(): # On CAA records order doesn't matter if type_in == 'CAA': resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value')) + if aws_record: + aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value')) if command_in == 'create' and aws_record == resource_record_set: module.exit_json(changed=False) From c1cde1527e029ed44eb67a8fb95b258aba70248d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 6 Apr 2021 16:50:56 +0200 Subject: [PATCH 140/683] ec2_vpc_peer - idempotency + integration tests (#501) * Add minimal integration tests for ec2_vpc_peer * Add Retries to VPC Peering * Fix idempotency when deleting connections * Fix idempotency when after rejecting peering connections * Test for updating tags * Add first round of assertions (changed/successful) * Add docs about ec2_vpc_peering_info return values * Make sure Peering IDs are consistent * docs update * Initial tests for ec2_vpc_peering_info results * Use ansible_dict_to_boto3_filter_list * Add support for waiting on state changes * Assert shape of results when searching based on status code * changelog --- ec2_vpc_peer.py | 91 ++++++++++++++++++++------- ec2_vpc_peering_info.py | 136 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 202 insertions(+), 25 deletions(-) diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index cea160d34ff..9c5d35349eb 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -51,6 +51,12 @@ default: present choices: ['present', 'absent', 'accept', 'reject'] type: str + wait: + description: + - Wait for peering state changes to complete. + required: false + default: false + type: bool author: Mike Mochan (@mmochan) extends_documentation_fragment: - amazon.aws.aws @@ -223,6 +229,24 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list + + +def wait_for_state(client, module, state, pcx_id): + waiter = client.get_waiter('vpc_peering_connection_exists') + peer_filter = { + 'vpc-peering-connection-id': pcx_id, + 'status-code': state, + } + try: + waiter.wait( + Filters=ansible_dict_to_boto3_filter_list(peer_filter) + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, "Failed to wait for state change") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Enable to describe Peerig Connection while waiting for state to change") def tags_changed(pcx_id, client, module): @@ -246,18 +270,18 @@ def tags_changed(pcx_id, client, module): def describe_peering_connections(params, client): + peer_filter = { + 'requester-vpc-info.vpc-id': params['VpcId'], + 'accepter-vpc-info.vpc-id': params['PeerVpcId'], + } result = client.describe_vpc_peering_connections( - Filters=[ - {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]}, - {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]} - ] + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) if result['VpcPeeringConnections'] == []: result = client.describe_vpc_peering_connections( - Filters=[ - {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}, - {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]} - ] + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) return result @@ -291,8 +315,10 @@ def create_peer_connection(client, module): if is_pending(peering_conn): return (changed, peering_conn['VpcPeeringConnectionId']) try: - peering_conn = client.create_vpc_peering_connection(**params) + peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] + if module.params.get('wait'): + wait_for_state(client, module, 'pending-acceptance', pcx_id) if module.params.get('tags'): create_tags(pcx_id, client, module) changed = True @@ -303,7 +329,9 @@ def create_peer_connection(client, module): def remove_peer_connection(client, module): pcx_id = module.params.get('peering_id') - if not pcx_id: + if pcx_id: + peering_conns = client.describe_vpc_peering_connections(aws_retry=True, VpcPeeringConnectionIds=[pcx_id]) + else: params = dict() params['VpcId'] = module.params.get('vpc_id') params['PeerVpcId'] = module.params.get('peer_vpc_id') @@ -311,15 +339,23 @@ def remove_peer_connection(client, module): if module.params.get('peer_owner_id'): params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) peering_conns = describe_peering_connections(params, client) - if not peering_conns: - module.exit_json(changed=False) - else: - pcx_id = peering_conns['VpcPeeringConnections'][0]['VpcPeeringConnectionId'] + + if not peering_conns: + module.exit_json(changed=False) + else: + pcx_id = pcx_id or peering_conns['VpcPeeringConnections'][0]['VpcPeeringConnectionId'] + + if peering_conns['VpcPeeringConnections'][0]['Status']['Code'] == 'deleted': + module.exit_json(msg='Connection in deleted state.', changed=False) + if peering_conns['VpcPeeringConnections'][0]['Status']['Code'] == 'rejected': + module.exit_json(msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS', changed=False) try: params = dict() params['VpcPeeringConnectionId'] = pcx_id - client.delete_vpc_peering_connection(**params) + client.delete_vpc_peering_connection(aws_retry=True, **params) + if module.params.get('wait'): + wait_for_state(client, module, 'deleted', pcx_id) module.exit_json(changed=True) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) @@ -329,7 +365,7 @@ def peer_status(client, module): params = dict() params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')] try: - vpc_peering_connection = client.describe_vpc_peering_connections(**params) + vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params) return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code'] except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: module.fail_json_aws(e, msg='Malformed connection ID') @@ -340,16 +376,22 @@ def peer_status(client, module): def accept_reject(state, client, module): changed = False params = dict() - params['VpcPeeringConnectionId'] = module.params.get('peering_id') - if peer_status(client, module) != 'active': + pcx_id = module.params.get('peering_id') + params['VpcPeeringConnectionId'] = pcx_id + current_state = peer_status(client, module) + if current_state not in ['active', 'rejected']: try: if state == 'accept': - client.accept_vpc_peering_connection(**params) + client.accept_vpc_peering_connection(aws_retry=True, **params) + target_state = 'active' else: - client.reject_vpc_peering_connection(**params) + client.reject_vpc_peering_connection(aws_retry=True, **params) + target_state = 'rejected' if module.params.get('tags'): create_tags(params['VpcPeeringConnectionId'], client, module) changed = True + if module.params.get('wait'): + wait_for_state(client, module, target_state, pcx_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) if tags_changed(params['VpcPeeringConnectionId'], client, module): @@ -368,21 +410,21 @@ def load_tags(module): def create_tags(pcx_id, client, module): try: delete_tags(pcx_id, client, module) - client.create_tags(Resources=[pcx_id], Tags=load_tags(module)) + client.create_tags(aws_retry=True, Resources=[pcx_id], Tags=load_tags(module)) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def delete_tags(pcx_id, client, module): try: - client.delete_tags(Resources=[pcx_id]) + client.delete_tags(aws_retry=True, Resources=[pcx_id]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def find_pcx_by_id(pcx_id, client, module): try: - return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id]) + return client.describe_vpc_peering_connections(aws_retry=True, VpcPeeringConnectionIds=[pcx_id]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) @@ -396,6 +438,7 @@ def main(): peer_owner_id=dict(), tags=dict(required=False, type='dict'), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), + wait=dict(default=False, type='bool'), ) required_if = [ ('state', 'present', ['vpc_id', 'peer_vpc_id']), @@ -411,7 +454,7 @@ def main(): peer_vpc_id = module.params.get('peer_vpc_id') try: - client = module.client('ec2') + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 117992e76c6..a086fde3639 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -61,6 +61,140 @@ ''' RETURN = r''' +vpc_peering_connections: + description: Details of the matching VPC peering connections. + returned: success + type: list + contains: + accepter_vpc_info: + description: Information about the VPC which accepted the connection. + returned: success + type: complex + contains: + cidr_block: + description: The primary CIDR for the VPC. + returned: when connection is in the accepted state. + type: str + example: '10.10.10.0/23' + cidr_block_set: + description: A list of all CIDRs for the VPC. + returned: when connection is in the accepted state. + type: complex + contains: + cidr_block: + description: A CIDR block used by the VPC. + returned: success + type: str + example: '10.10.10.0/23' + owner_id: + description: The AWS account that owns the VPC. + returned: success + type: str + example: 012345678901 + peering_options: + description: Additional peering configuration. + returned: when connection is in the accepted state. + type: dict + contains: + allow_dns_resolution_from_remote_vpc: + description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. + returned: success + type: bool + allow_egress_from_local_classic_link_to_remote_vpc: + description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. + returned: success + type: bool + allow_egress_from_local_vpc_to_remote_classic_link: + description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. + returned: success + type: bool + region: + description: The AWS region that the VPC is in. + returned: success + type: str + example: us-east-1 + vpc_id: + description: The ID of the VPC + returned: success + type: str + example: vpc-0123456789abcdef0 + requester_vpc_info: + description: Information about the VPC which requested the connection. + returned: success + type: complex + contains: + cidr_block: + description: The primary CIDR for the VPC. + returned: when connection is not in the deleted state. + type: str + example: '10.10.10.0/23' + cidr_block_set: + description: A list of all CIDRs for the VPC. + returned: when connection is not in the deleted state. + type: complex + contains: + cidr_block: + description: A CIDR block used by the VPC + returned: success + type: str + example: '10.10.10.0/23' + owner_id: + description: The AWS account that owns the VPC. + returned: success + type: str + example: 012345678901 + peering_options: + description: Additional peering configuration. + returned: when connection is not in the deleted state. + type: dict + contains: + allow_dns_resolution_from_remote_vpc: + description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. + returned: success + type: bool + allow_egress_from_local_classic_link_to_remote_vpc: + description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. + returned: success + type: bool + allow_egress_from_local_vpc_to_remote_classic_link: + description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. + returned: success + type: bool + region: + description: The AWS region that the VPC is in. + returned: success + type: str + example: us-east-1 + vpc_id: + description: The ID of the VPC + returned: success + type: str + example: vpc-0123456789abcdef0 + status: + description: Details of the current status of the connection. + returned: success + type: complex + contains: + code: + description: A short code describing the status of the connection. + returned: success + type: str + example: active + message: + description: Additional information about the status of the connection. + returned: success + type: str + example: Pending Acceptance by 012345678901 + tags: + description: Tags applied to the connection. + returned: success + type: dict + vpc_peering_connection_id: + description: The ID of the VPC peering connection. + returned: success + type: str + example: "pcx-0123456789abcdef0" + result: description: The result of the describe. returned: success @@ -121,7 +255,7 @@ def main(): for peer in results: peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', [])) - module.exit_json(result=results) + module.exit_json(result=results, vpc_peering_connections=results) if __name__ == '__main__': From 220791d736b8a3008f0bfba6fefa14c2ec3ebd4a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 6 Apr 2021 16:51:10 +0200 Subject: [PATCH 141/683] ec2_instance_info - Add AWS Retries (#521) * Add retries to ec2_instance_info * changelog --- ec2_instance_info.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ec2_instance_info.py b/ec2_instance_info.py index be5f1e68892..4c743688627 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -511,17 +511,23 @@ try: import botocore - from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +@AWSRetry.jittered_backoff() +def _describe_instances(connection, **params): + paginator = connection.get_paginator('describe_instances') + return paginator.paginate(**params).build_full_result() + + def list_ec2_instances(connection, module): instance_ids = module.params.get("instance_ids") @@ -529,9 +535,8 @@ def list_ec2_instances(connection, module): filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: - reservations_paginator = connection.get_paginator('describe_instances') - reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result() - except ClientError as e: + reservations = _describe_instances(connection, InstanceIds=instance_ids, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to list ec2 instances") instances = [] From 879220d70be61dbb570e30a2c5a4668684edcc2f Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Wed, 7 Apr 2021 13:43:25 +0200 Subject: [PATCH 142/683] rds_instance: Add purge_security_groups (#500) * rds_instance: Add purge_security_groups feature for vpc_security_groups_ids. * Fixes: #385 --- rds_instance.py | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/rds_instance.py b/rds_instance.py index 0dd763c369f..0dae125889b 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -416,6 +416,13 @@ - A list of EC2 VPC security groups to associate with the DB cluster. type: list elements: str + purge_security_groups: + description: + - Set to False to retain any enabled security groups that aren't specified in the task and are associated with the instance. + - Can be applied to I(vpc_security_group_ids) and I(db_security_groups) + type: bool + default: True + version_added: 1.5.0 ''' EXAMPLES = r''' @@ -451,6 +458,15 @@ id: "{{ instance_id }}" state: absent final_snapshot_identifier: "{{ snapshot_id }}" + +- name: Add a new security group without purge + community.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - sg-0be17ba10c9286b0b + purge_security_groups: false + register: result ''' RETURN = r''' @@ -752,6 +768,7 @@ except ImportError: pass # caught by AnsibleAWSModule + from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types @@ -861,6 +878,7 @@ def get_options_with_changing_values(client, module, parameters): port = module.params['port'] apply_immediately = parameters.pop('ApplyImmediately', None) cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] + purge_security_groups = module.params['purge_security_groups'] if port: parameters['DBPortNumber'] = port @@ -872,7 +890,7 @@ def get_options_with_changing_values(client, module, parameters): parameters.pop('Iops', None) instance = get_instance(client, module, instance_id) - updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs) + updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs, purge_security_groups) updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) parameters = updated_parameters @@ -922,7 +940,7 @@ def get_current_attributes_with_inconsistent_keys(instance): return options -def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs): +def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups): changing_params = {} current_options = get_current_attributes_with_inconsistent_keys(instance) @@ -938,7 +956,12 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c # TODO: allow other purge_option module parameters rather than just checking for things to add if isinstance(current_option, list): if isinstance(desired_option, list): - if set(desired_option) <= set(current_option): + if ( + set(desired_option) < set(current_option) and + option in ('DBSecurityGroups', 'VpcSecurityGroupIds',) and purge_security_groups + ): + changing_params[option] = desired_option + elif set(desired_option) <= set(current_option): continue elif isinstance(desired_option, string_types): if desired_option in current_option: @@ -958,6 +981,11 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: changing_params[option] = format_option + elif option in ('DBSecurityGroups', 'VpcSecurityGroupIds',): + if purge_security_groups: + changing_params[option] = desired_option + else: + changing_params[option] = list(set(current_option) | set(desired_option)) else: changing_params[option] = desired_option @@ -1082,6 +1110,7 @@ def main(): purge_tags=dict(type='bool', default=True), read_replica=dict(type='bool'), wait=dict(type='bool', default=True), + purge_security_groups=dict(type='bool', default=True), ) parameter_options = dict( From 9f4f7b0c86ecaae81753b1797b20d691acbb9b27 Mon Sep 17 00:00:00 2001 From: Alina Buzachis <49211501+alinabuzachis@users.noreply.github.com> Date: Wed, 7 Apr 2021 16:36:02 +0200 Subject: [PATCH 143/683] rds_param_group: fix tests, check_mode, AWSRetry (#532) * * Fix integration tests * Add AWSRetry * Add check_mode * Add changelog fragment Signed-off-by: Alina Buzachis --- rds_param_group.py | 69 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/rds_param_group.py b/rds_param_group.py index 30aa814de67..ab0718e4b04 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -120,11 +120,11 @@ from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags @@ -137,6 +137,15 @@ } +@AWSRetry.jittered_backoff() +def _describe_db_parameters(connection, **params): + try: + paginator = connection.get_paginator('describe_db_parameters') + return paginator.paginate(**params).build_full_result() + except is_boto3_error_code('DBParameterGroupNotFound'): + return None + + def convert_parameter(param, value): """ Allows setting parameters with 10M = 10* 1024 * 1024 and so on. @@ -158,7 +167,7 @@ def convert_parameter(param, value): elif param['DataType'] == 'boolean': if isinstance(value, string_types): - converted_value = to_native(value) in BOOLEANS_TRUE + converted_value = value in BOOLEANS_TRUE # convert True/False to 1/0 converted_value = 1 if converted_value else 0 return str(converted_value) @@ -170,8 +179,13 @@ def update_parameters(module, connection): apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot' errors = [] modify_list = [] - parameters_paginator = connection.get_paginator('describe_db_parameters') - existing = parameters_paginator.paginate(DBParameterGroupName=groupname).build_full_result()['Parameters'] + existing = {} + try: + _existing = _describe_db_parameters(connection, DBParameterGroupName=groupname) + if _existing: + existing = _existing['Parameters'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe existing parameter groups") lookup = dict((param['ParameterName'], param) for param in existing) for param_key, param_value in desired.items(): if param_key not in lookup: @@ -187,7 +201,7 @@ def update_parameters(module, connection): errors.append("Parameter %s is not modifiable" % param_key) # modify_db_parameters takes at most 20 parameters - if modify_list: + if modify_list and not module.check_mode: try: from itertools import izip_longest as zip_longest # python 2 except ImportError: @@ -195,7 +209,7 @@ def update_parameters(module, connection): for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): non_empty_slice = [item for item in modify_slice if item] try: - connection.modify_db_parameter_group(DBParameterGroupName=groupname, Parameters=non_empty_slice) + connection.modify_db_parameter_group(aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update parameters") return True, errors @@ -204,19 +218,26 @@ def update_parameters(module, connection): def update_tags(module, connection, group, tags): changed = False - existing_tags = connection.list_tags_for_resource(ResourceName=group['DBParameterGroupArn'])['TagList'] + existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList'] to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, module.params['purge_tags']) + + if module.check_mode: + if not to_update and not to_delete: + return False + else: + return True + if to_update: try: - connection.add_tags_to_resource(ResourceName=group['DBParameterGroupArn'], + connection.add_tags_to_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], Tags=ansible_dict_to_boto3_tag_list(to_update)) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to parameter group") if to_delete: try: - connection.remove_tags_from_resource(ResourceName=group['DBParameterGroupArn'], + connection.remove_tags_from_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], TagKeys=to_delete) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -230,7 +251,7 @@ def ensure_present(module, connection): changed = False errors = [] try: - response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname) + response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) except is_boto3_error_code('DBParameterGroupNotFound'): response = None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except @@ -241,11 +262,12 @@ def ensure_present(module, connection): Description=module.params['description']) if tags: params['Tags'] = ansible_dict_to_boto3_tag_list(tags) - try: - response = connection.create_db_parameter_group(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create parameter group") + if not module.check_mode: + try: + response = connection.create_db_parameter_group(aws_retry=True, **params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create parameter group") else: group = response['DBParameterGroups'][0] if tags: @@ -256,12 +278,14 @@ def ensure_present(module, connection): changed = changed or params_changed try: - response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname) + response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except is_boto3_error_code('DBParameterGroupNotFound'): + module.exit_json(changed=True, errors=errors) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't obtain parameter group information") try: - tags = connection.list_tags_for_resource(ResourceName=group['db_parameter_group_arn'])['TagList'] + tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['db_parameter_group_arn'])['TagList'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain parameter group tags") group['tags'] = boto3_tag_list_to_ansible_dict(tags) @@ -277,8 +301,12 @@ def ensure_absent(module, connection): module.exit_json(changed=False) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't access parameter group information") + + if response and module.check_mode: + module.exit_json(changed=True) + try: - response = connection.delete_db_parameter_group(DBParameterGroupName=group) + response = connection.delete_db_parameter_group(aws_retry=True, DBParameterGroupName=group) module.exit_json(changed=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete parameter group") @@ -298,10 +326,11 @@ def main(): module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[['state', 'present', ['description', 'engine']]], + supports_check_mode=True ) try: - conn = module.client('rds') + conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') From 4fc736da3307a12795715308cf47fdacf66dfea7 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 8 Apr 2021 10:39:17 +0200 Subject: [PATCH 144/683] add wafv2_ip_set module (#449) * add wafv2 ip set module * expand meta/runtime with wafv2_ip_set modules --- wafv2_ip_set.py | 345 +++++++++++++++++++++++++++++++++++++++++++ wafv2_ip_set_info.py | 150 +++++++++++++++++++ 2 files changed, 495 insertions(+) create mode 100644 wafv2_ip_set.py create mode 100644 wafv2_ip_set_info.py diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py new file mode 100644 index 00000000000..f183211bcf8 --- /dev/null +++ b/wafv2_ip_set.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_ip_set +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_ip_set +description: + - Create, modify and delete IP sets for WAFv2. +requirements: + - boto3 + - botocore +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + name: + description: + - The name of the IP set. + required: true + type: str + description: + description: + - Description of the IP set. + required: false + type: str + scope: + description: + - Specifies whether this is for an AWS CloudFront distribution or for a regional application, + such as API Gateway or Application LoadBalancer. + choices: ["CLOUDFRONT","REGIONAL"] + required: true + type: str + ip_address_version: + description: + - Specifies whether this is an IPv4 or an IPv6 IP set. + - Required when I(state=present). + choices: ["IPV4","IPV6"] + type: str + addresses: + description: + - Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in + Classless Inter-Domain Routing (CIDR) notation. + - Required when I(state=present). + - When I(state=absent) and I(addresses) is defined, only the given IP addresses will be removed + from the IP set. The entire IP set itself will stay present. + type: list + elements: str + tags: + description: + - Key value pairs to associate with the resource. + - Currently tags are not visible. Nor in the web ui, nor via cli and nor in boto3. + required: false + type: dict + purge_addresses: + description: + - When set to C(no), keep the existing addresses in place. Will modify and add, but will not delete. + default: yes + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: test ip set + wafv2_ip_set: + name: test02 + state: present + description: hallo eins + scope: REGIONAL + ip_address_version: IPV4 + addresses: + - 8.8.8.8/32 + - 8.8.4.4/32 + tags: + A: B + C: D +''' + +RETURN = """ +addresses: + description: Current addresses of the ip set + sample: + - 8.8.8.8/32 + - 8.8.4.4/32 + returned: Always, as long as the ip set exists + type: list +arn: + description: IP set arn + sample: "arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/4b007330-2934-4dc5-af24-82dcb3aeb127" + type: str + returned: Always, as long as the ip set exists +description: + description: Description of the ip set + sample: Some IP set description + returned: Always, as long as the ip set exists + type: str +ip_address_version: + description: IP version of the ip set + sample: IPV4 + type: str + returned: Always, as long as the ip set exists +name: + description: IP set name + sample: test02 + returned: Always, as long as the ip set exists + type: str +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +class IpSet: + def __init__(self, wafv2, name, scope, fail_json_aws): + self.wafv2 = wafv2 + self.name = name + self.scope = scope + self.fail_json_aws = fail_json_aws + self.existing_set, self.id, self.locktoken = self.get_set() + + def description(self): + return self.existing_set.get('Description') + + def get(self): + if self.existing_set: + return camel_dict_to_snake_dict(self.existing_set) + return None + + def remove(self): + try: + response = self.wafv2.delete_ip_set( + Name=self.name, + Scope=self.scope, + Id=self.id, + LockToken=self.locktoken + ) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.") + return {} + + def create(self, description, ip_address_version, addresses, tags): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'IPAddressVersion': ip_address_version, + 'Addresses': addresses, + } + + if description: + req_obj['Description'] = description + + if tags: + req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + + try: + response = self.wafv2.create_ip_set(**req_obj) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to create wafv2 ip set.") + + self.existing_set, self.id, self.locktoken = self.get_set() + return camel_dict_to_snake_dict(self.existing_set) + + def update(self, description, addresses): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'Id': self.id, + 'Addresses': addresses, + 'LockToken': self.locktoken + } + + if description: + req_obj['Description'] = description + + try: + response = self.wafv2.update_ip_set(**req_obj) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to update wafv2 ip set.") + + self.existing_set, self.id, self.locktoken = self.get_set() + return camel_dict_to_snake_dict(self.existing_set) + + def get_set(self): + response = self.list() + existing_set = None + id = None + locktoken = None + for item in response.get('IPSets'): + if item.get('Name') == self.name: + id = item.get('Id') + locktoken = item.get('LockToken') + arn = item.get('ARN') + if id: + try: + existing_set = self.wafv2.get_ip_set( + Name=self.name, + Scope=self.scope, + Id=id + ).get('IPSet') + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") + + return existing_set, id, locktoken + + def list(self, Nextmarker=None): + # there is currently no paginator for wafv2 + req_obj = { + 'Scope': self.scope, + 'Limit': 100 + } + if Nextmarker: + req_obj['NextMarker'] = Nextmarker + + try: + response = self.wafv2.list_ip_sets(**req_obj) + if response.get('NextMarker'): + response['IPSets'] += self.list(Nextmarker=response.get('NextMarker')).get('IPSets') + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to list wafv2 ip set.") + + return response + + +def compare(existing_set, addresses, purge_addresses, state): + diff = False + new_rules = [] + existing_rules = existing_set.get('addresses') + if state == 'present': + if purge_addresses: + new_rules = addresses + if sorted(addresses) != sorted(existing_set.get('addresses')): + diff = True + + else: + for requested_rule in addresses: + if requested_rule not in existing_rules: + diff = True + new_rules.append(requested_rule) + + new_rules += existing_rules + else: + if purge_addresses and addresses: + for requested_rule in addresses: + if requested_rule in existing_rules: + diff = True + existing_rules.pop(existing_rules.index(requested_rule)) + new_rules = existing_rules + + return diff, new_rules + + +def main(): + + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), + description=dict(type='str'), + ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']), + addresses=dict(type='list', elements='str'), + tags=dict(type='dict'), + purge_addresses=dict(type='bool', default=True) + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['ip_address_version', 'addresses']]] + ) + + state = module.params.get("state") + name = module.params.get("name") + scope = module.params.get("scope") + description = module.params.get("description") + ip_address_version = module.params.get("ip_address_version") + addresses = module.params.get("addresses") + tags = module.params.get("tags") + purge_addresses = module.params.get("purge_addresses") + check_mode = module.check_mode + + wafv2 = module.client('wafv2') + + change = False + retval = {} + + ip_set = IpSet(wafv2, name, scope, module.fail_json_aws) + + if state == 'present': + if ip_set.get(): + change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) + if (change or ip_set.description() != description) and not check_mode: + retval = ip_set.update( + description=description, + addresses=addresses + ) + else: + retval = ip_set.get() + else: + if not check_mode: + retval = ip_set.create( + description=description, + ip_address_version=ip_address_version, + addresses=addresses, + tags=tags + ) + change = True + + if state == 'absent': + if ip_set.get(): + if addresses: + if len(addresses) > 0: + change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) + if change and not check_mode: + retval = ip_set.update( + description=description, + addresses=addresses + ) + else: + if not check_mode: + retval = ip_set.remove() + change = True + + module.exit_json(changed=change, **retval) + + +if __name__ == '__main__': + main() diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py new file mode 100644 index 00000000000..23b3abed4ec --- /dev/null +++ b/wafv2_ip_set_info.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_ip_set_info +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: Get information about wafv2 ip sets +description: + - Get information about existing wafv2 ip sets. +requirements: + - boto3 + - botocore +options: + name: + description: + - The name of the IP set. + required: true + type: str + scope: + description: + - Specifies whether this is for an AWS CloudFront distribution or for a regional application. + choices: ["CLOUDFRONT","REGIONAL"] + required: true + type: str + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: test ip set + wafv2_ip_set_info: + name: test02 + scope: REGIONAL +''' + +RETURN = """ +addresses: + description: Current addresses of the ip set + sample: + - 8.8.8.8/32 + - 8.8.4.4/32 + returned: Always, as long as the ip set exists + type: list +arn: + description: IP set arn + sample: "arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/4b007330-2934-4dc5-af24-82dcb3aeb127" + type: str + returned: Always, as long as the ip set exists +description: + description: Description of the ip set + sample: Some IP set description + returned: Always, as long as the ip set exists + type: str +ip_address_version: + description: IP version of the ip set + sample: IPV4 + type: str + returned: Always, as long as the ip set exists +name: + description: IP set name + sample: test02 + returned: Always, as long as the ip set exists + type: str +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + + +def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): + # there is currently no paginator for wafv2 + req_obj = { + 'Scope': scope, + 'Limit': 100 + } + if Nextmarker: + req_obj['NextMarker'] = Nextmarker + + try: + response = wafv2.list_ip_sets(**req_obj) + if response.get('NextMarker'): + response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets') + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to list wafv2 ip set.") + return response + + +def get_ip_set(wafv2, name, scope, id, fail_json_aws): + try: + response = wafv2.get_ip_set( + Name=name, + Scope=scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to get wafv2 ip set.") + return response + + +def main(): + + arg_spec = dict( + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + ) + + name = module.params.get("name") + scope = module.params.get("scope") + + wafv2 = module.client('wafv2') + + # check if ip set exist + response = list_ip_sets(wafv2, scope, module.fail_json_aws) + + id = None + + for item in response.get('IPSets'): + if item.get('Name') == name: + id = item.get('Id') + + retval = {} + existing_set = None + if id: + existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) + retval = camel_dict_to_snake_dict(existing_set.get('IPSet')) + + module.exit_json(**retval) + + +if __name__ == '__main__': + main() From 39218ce15ed9c16db01b18521677663af8f0bf3f Mon Sep 17 00:00:00 2001 From: Calvin Walton Date: Thu, 8 Apr 2021 09:55:58 -0400 Subject: [PATCH 145/683] Fix passing an instance profile arn to ec2_launch_template (#371) * Fix passing an instance profile arn to ec2_launch_template If the string given on the iam_instance_profile looked like an arn, the wrong structure format was being returned. boto3 expects a dict of the format { arn: "{{ arn }}" } but the string was returned instead. This fixes this error: Couldn't create subsequent launch template version: Parameter validation failed: Invalid type for parameter LaunchTemplateData.IamInstanceProfile, value: arn:aws:iam::[...]:instance-profile/[...], type: , valid types: * integration test * changelog Co-authored-by: Mark Chappell --- ec2_launch_template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index a3f203a48f4..c2189081a68 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -411,7 +411,7 @@ def determine_iam_role(module, name_or_arn): if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): - return name_or_arn + return {'arn': name_or_arn} iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) From caa8c9c80b48692157581335e5272cc48f9be82b Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Thu, 8 Apr 2021 07:54:50 -0700 Subject: [PATCH 146/683] ec2_instance exception handling and client cleanup (#526) * ec2_instance exception handling and client cleanup Catch botocore and client errors on all API calls Pass boto client to functions, rather than creating new clients throughout the code * Add review suggestion to plugins/modules/ec2_instance.py Co-authored-by: Mark Chappell --- ec2_instance.py | 155 ++++++++++++++++++++++++------------------- ec2_instance_info.py | 2 +- 2 files changed, 89 insertions(+), 68 deletions(-) diff --git a/ec2_instance.py b/ec2_instance.py index 9f61882491b..22db3c88f79 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -896,19 +896,22 @@ def manage_tags(match, new_tags, purge_tags, ec2): ) if module.check_mode: return bool(tags_to_delete or tags_to_set) - if tags_to_set: - ec2.create_tags( - aws_retry=True, - Resources=[match['InstanceId']], - Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) - changed |= True - if tags_to_delete: - delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete) - ec2.delete_tags( - aws_retry=True, - Resources=[match['InstanceId']], - Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values)) - changed |= True + try: + if tags_to_set: + ec2.create_tags( + aws_retry=True, + Resources=[match['InstanceId']], + Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) + changed |= True + if tags_to_delete: + delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete) + ec2.delete_tags( + aws_retry=True, + Resources=[match['InstanceId']], + Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values)) + changed |= True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not update tags for instance {0}".format(match['InstanceId'])) return changed @@ -922,7 +925,7 @@ def build_volume_spec(params): return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] -def add_or_update_instance_profile(instance, desired_profile_name): +def add_or_update_instance_profile(instance, desired_profile_name, ec2): instance_profile_setting = instance.get('IamInstanceProfile') if instance_profile_setting and desired_profile_name: if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')): @@ -932,13 +935,13 @@ def add_or_update_instance_profile(instance, desired_profile_name): desired_arn = determine_iam_role(desired_profile_name) if instance_profile_setting.get('Arn') == desired_arn: return False + # update association - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) try: association = ec2.describe_iam_instance_profile_associations( aws_retry=True, Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) - except botocore.exceptions.ClientError as e: + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # check for InvalidAssociationID.NotFound module.fail_json_aws(e, "Could not find instance profile association") try: @@ -953,7 +956,6 @@ def add_or_update_instance_profile(instance, desired_profile_name): if not instance_profile_setting and desired_profile_name: # create association - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) try: resp = ec2.associate_iam_instance_profile( aws_retry=True, @@ -961,13 +963,13 @@ def add_or_update_instance_profile(instance, desired_profile_name): InstanceId=instance['InstanceId'] ) return True - except botocore.exceptions.ClientError as e: + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, "Could not associate new instance profile") return False -def build_network_spec(params, ec2=None): +def build_network_spec(params, ec2): """ Returns list of interfaces [complex] Interface type: { @@ -996,8 +998,6 @@ def build_network_spec(params, ec2=None): 'SubnetId': 'string' }, """ - if ec2 is None: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) interfaces = [] network = params.get('network') or {} @@ -1116,8 +1116,6 @@ def warn_if_cpu_options_changed(instance): def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None): - if ec2 is None: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if subnet_id is not None: try: @@ -1218,9 +1216,7 @@ def build_instance_tags(params, propagate_tags_to_volumes=True): ] -def build_run_instance_spec(params, ec2=None): - if ec2 is None: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) +def build_run_instance_spec(params, ec2): spec = dict( ClientToken=uuid.uuid4().hex, @@ -1276,10 +1272,8 @@ def await_instances(ids, state='OK'): ', '.join(ids), state, to_native(e))) -def diff_instance_and_params(instance, params, ec2=None, skip=None): +def diff_instance_and_params(instance, params, ec2, skip=None): """boto3 instance obj, module params""" - if ec2 is None: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if skip is None: skip = [] @@ -1305,7 +1299,10 @@ def value_wrapper(v): if mapping.instance_key in skip: continue - value = ec2.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) + try: + value = ec2.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_)) if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): arguments = dict( InstanceId=instance['InstanceId'], @@ -1315,7 +1312,10 @@ def value_wrapper(v): changes_to_apply.append(arguments) if params.get('security_group') or params.get('security_groups'): - value = ec2.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) + try: + value = ec2.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_)) # managing security groups if params.get('vpc_subnet_id'): subnet_id = params.get('vpc_subnet_id') @@ -1366,12 +1366,15 @@ def change_network_attachments(instance, params, ec2): old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] to_attach = set(new_ids) - set(old_ids) for eni_id in to_attach: - ec2.attach_network_interface( - aws_retry=True, - DeviceIndex=new_ids.index(eni_id), - InstanceId=instance['InstanceId'], - NetworkInterfaceId=eni_id, - ) + try: + ec2.attach_network_interface( + aws_retry=True, + DeviceIndex=new_ids.index(eni_id), + InstanceId=instance['InstanceId'], + NetworkInterfaceId=eni_id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not attach interface {0} to instance {1}".format(eni_id, instance['InstanceId'])) return bool(len(to_attach)) return False @@ -1389,28 +1392,43 @@ def find_instances(ec2, ids=None, filters=None): filters[key.replace("_", "-")] = filters.pop(key) params = dict(Filters=ansible_dict_to_boto3_filter_list(filters)) - results = paginator.paginate(**params).search('Reservations[].Instances[]') + try: + results = _describe_instances(ec2, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe instances") return list(results) +@AWSRetry.jittered_backoff() +def _describe_instances(ec2, **params): + paginator = ec2.get_paginator('describe_instances') + return paginator.paginate(**params).search('Reservations[].Instances[]') + + def get_default_vpc(ec2): - vpcs = ec2.describe_vpcs( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + try: + vpcs = ec2.describe_vpcs( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe default VPC") if len(vpcs.get('Vpcs', [])): return vpcs.get('Vpcs')[0] return None def get_default_subnet(ec2, vpc, availability_zone=None): - subnets = ec2.describe_subnets( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({ - 'vpc-id': vpc['VpcId'], - 'state': 'available', - 'default-for-az': 'true', - }) - ) + try: + subnets = ec2.describe_subnets( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list({ + 'vpc-id': vpc['VpcId'], + 'state': 'available', + 'default-for-az': 'true', + }) + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc['VpcId'])) if len(subnets.get('Subnets', [])): if availability_zone is not None: subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) @@ -1424,11 +1442,9 @@ def get_default_subnet(ec2, vpc, availability_zone=None): return None -def ensure_instance_state(state, ec2=None): - if ec2 is None: - module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) +def ensure_instance_state(state, ec2): if state in ('running', 'started'): - changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') + changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING', ec2=ec2) if failed: module.fail_json( @@ -1446,10 +1462,12 @@ def ensure_instance_state(state, ec2=None): elif state in ('restarted', 'rebooted'): changed, failed, instances, failure_reason = change_instance_state( filters=module.params.get('filters'), - desired_state='STOPPED') + desired_state='STOPPED', + ec2=ec2) changed, failed, instances, failure_reason = change_instance_state( filters=module.params.get('filters'), - desired_state='RUNNING') + desired_state='RUNNING', + ec2=ec2) if failed: module.fail_json( @@ -1467,7 +1485,8 @@ def ensure_instance_state(state, ec2=None): elif state in ('stopped',): changed, failed, instances, failure_reason = change_instance_state( filters=module.params.get('filters'), - desired_state='STOPPED') + desired_state='STOPPED', + ec2=ec2) if failed: module.fail_json( @@ -1485,7 +1504,8 @@ def ensure_instance_state(state, ec2=None): elif state in ('absent', 'terminated'): terminated, terminate_failed, instances, failure_reason = change_instance_state( filters=module.params.get('filters'), - desired_state='TERMINATED') + desired_state='TERMINATED', + ec2=ec2) if terminate_failed: module.fail_json( @@ -1501,10 +1521,8 @@ def ensure_instance_state(state, ec2=None): ) -def change_instance_state(filters, desired_state, ec2=None): +def change_instance_state(filters, desired_state, ec2): """Takes STOPPED/RUNNING/TERMINATED""" - if ec2 is None: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) changed = set() instances = find_instances(ec2, filters=filters) @@ -1571,7 +1589,7 @@ def determine_iam_role(name_or_arn): try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return role['InstanceProfile']['Arn'] - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code('NoSuchEntity') as e: module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) @@ -1579,7 +1597,7 @@ def determine_iam_role(name_or_arn): def handle_existing(existing_matches, changed, ec2, state): if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']: - ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') + ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING', ec2=ec2) if failed: module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason)) module.exit_json( @@ -1587,12 +1605,15 @@ def handle_existing(existing_matches, changed, ec2, state): instances=[pretty_instance(i) for i in instances], instance_ids=[i['InstanceId'] for i in instances], ) - changes = diff_instance_and_params(existing_matches[0], module.params) + changes = diff_instance_and_params(existing_matches[0], module.params, ec2) for c in changes: if not module.check_mode: - ec2.modify_instance_attribute(aws_retry=True, **c) + try: + ec2.modify_instance_attribute(aws_retry=True, **c) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c))) changed |= bool(changes) - changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role')) + changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'), ec2) changed |= change_network_attachments(existing_matches[0], module.params, ec2) altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches]) module.exit_json( @@ -1614,7 +1635,7 @@ def ensure_present(existing_matches, changed, ec2, state): # instance_ids=[i['InstanceId'] for i in existing_matches], ) try: - instance_spec = build_run_instance_spec(module.params) + instance_spec = build_run_instance_spec(module.params, ec2) # If check mode is enabled,suspend 'ensure function'. if module.check_mode: module.exit_json( @@ -1626,7 +1647,7 @@ def ensure_present(existing_matches, changed, ec2, state): instance_ids = [i['InstanceId'] for i in instances] for ins in instances: - changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) + changes = diff_instance_and_params(ins, module.params, ec2, skip=['UserData', 'EbsOptimized']) for c in changes: try: ec2.modify_instance_attribute(aws_retry=True, **c) diff --git a/ec2_instance_info.py b/ec2_instance_info.py index 4c743688627..dafe60ea4dd 100644 --- a/ec2_instance_info.py +++ b/ec2_instance_info.py @@ -512,7 +512,7 @@ try: import botocore except ImportError: - pass # Handled by AnsibleAWSModule + pass # caught by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict From 0983d0858a02e22f553ee7dd849749d54cadd6ae Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 9 Apr 2021 14:44:03 +0200 Subject: [PATCH 147/683] rds_instance - fix idempotency when using non-lowercase for preferred_maintenance_window (#516) * rds_instance - preferred_maintenance_window - force day of week into lowercase * add integration test * changelog --- rds_instance.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rds_instance.py b/rds_instance.py index 0dae125889b..6e1312b739b 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -1214,6 +1214,10 @@ def main(): if module.params['processor_features'] is not None: module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) + # Ensure dates are in lowercase + if module.params['preferred_maintenance_window']: + module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower() + client = module.client('rds') changed = False state = module.params['state'] From 60e3caa2e819ba31913438a383338745ad4b4fab Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 9 Apr 2021 14:48:31 +0200 Subject: [PATCH 148/683] ec2_instance - fetch status of instance before attempting to set additional parameters (#533) --- ec2_instance.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ec2_instance.py b/ec2_instance.py index 22db3c88f79..5138fd7647a 100644 --- a/ec2_instance.py +++ b/ec2_instance.py @@ -1647,6 +1647,18 @@ def ensure_present(existing_matches, changed, ec2, state): instance_ids = [i['InstanceId'] for i in instances] for ins in instances: + # Wait for instances to exist (don't check state) + try: + AWSRetry.jittered_backoff( + catch_extra_error_codes=['InvalidInstanceID.NotFound'], + )( + ec2.describe_instance_status + )( + InstanceIds=[ins['InstanceId']], + IncludeAllInstances=True, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to fetch status of new EC2 instance") changes = diff_instance_and_params(ins, module.params, ec2, skip=['UserData', 'EbsOptimized']) for c in changes: try: From f62ce3c85923ba35fc69f98cc6bf44ba2d6fb983 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Fri, 9 Apr 2021 17:45:09 +0200 Subject: [PATCH 149/683] Return all infos of a VPC peering connection in ec2_vpc_peer module (#355) * Return all infos of a VPC peering connection in ec2_vpc_peer module. * More extensive tests for vpc_peer module. Also got rid of redundant helper method in vpc_peer module --- ec2_vpc_peer.py | 221 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 182 insertions(+), 39 deletions(-) diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 9c5d35349eb..29011094766 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -216,10 +216,144 @@ ''' RETURN = ''' -task: - description: The result of the create, accept, reject or delete action. +peering_id: + description: The id of the VPC peering connection created/deleted. + returned: always + type: str + sample: pcx-034223d7c0aec3cde +vpc_peering_connection: + description: The details of the VPC peering connection as returned by Boto3 (snake cased). returned: success - type: dict + type: complex + contains: + accepter_vpc_info: + description: Information about the VPC which accepted the connection. + returned: success + type: complex + contains: + cidr_block: + description: The primary CIDR for the VPC. + returned: when connection is in the accepted state. + type: str + example: '10.10.10.0/23' + cidr_block_set: + description: A list of all CIDRs for the VPC. + returned: when connection is in the accepted state. + type: complex + contains: + cidr_block: + description: A CIDR block used by the VPC. + returned: success + type: str + example: '10.10.10.0/23' + owner_id: + description: The AWS account that owns the VPC. + returned: success + type: str + example: 012345678901 + peering_options: + description: Additional peering configuration. + returned: when connection is in the accepted state. + type: dict + contains: + allow_dns_resolution_from_remote_vpc: + description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. + returned: success + type: bool + allow_egress_from_local_classic_link_to_remote_vpc: + description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. + returned: success + type: bool + allow_egress_from_local_vpc_to_remote_classic_link: + description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. + returned: success + type: bool + region: + description: The AWS region that the VPC is in. + returned: success + type: str + example: us-east-1 + vpc_id: + description: The ID of the VPC + returned: success + type: str + example: vpc-0123456789abcdef0 + requester_vpc_info: + description: Information about the VPC which requested the connection. + returned: success + type: complex + contains: + cidr_block: + description: The primary CIDR for the VPC. + returned: when connection is not in the deleted state. + type: str + example: '10.10.10.0/23' + cidr_block_set: + description: A list of all CIDRs for the VPC. + returned: when connection is not in the deleted state. + type: complex + contains: + cidr_block: + description: A CIDR block used by the VPC + returned: success + type: str + example: '10.10.10.0/23' + owner_id: + description: The AWS account that owns the VPC. + returned: success + type: str + example: 012345678901 + peering_options: + description: Additional peering configuration. + returned: when connection is not in the deleted state. + type: dict + contains: + allow_dns_resolution_from_remote_vpc: + description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. + returned: success + type: bool + allow_egress_from_local_classic_link_to_remote_vpc: + description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. + returned: success + type: bool + allow_egress_from_local_vpc_to_remote_classic_link: + description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. + returned: success + type: bool + region: + description: The AWS region that the VPC is in. + returned: success + type: str + example: us-east-1 + vpc_id: + description: The ID of the VPC + returned: success + type: str + example: vpc-0123456789abcdef0 + status: + description: Details of the current status of the connection. + returned: success + type: complex + contains: + code: + description: A short code describing the status of the connection. + returned: success + type: str + example: active + message: + description: Additional information about the status of the connection. + returned: success + type: str + example: Pending Acceptance by 012345678901 + tags: + description: Tags applied to the connection. + returned: success + type: dict + vpc_peering_connection_id: + description: The ID of the VPC peering connection. + returned: success + type: str + example: "pcx-0123456789abcdef0" ''' try: @@ -231,6 +365,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def wait_for_state(client, module, state, pcx_id): @@ -254,9 +390,9 @@ def tags_changed(pcx_id, client, module): tags = dict() if module.params.get('tags'): tags = module.params.get('tags') - pcx = find_pcx_by_id(pcx_id, client, module) - if pcx['VpcPeeringConnections']: - pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']] + peering_connection = get_peering_connection_by_id(pcx_id, client, module) + if peering_connection['Tags']: + pcx_values = [t.values() for t in peering_connection['Tags']] pcx_tags = [item for sublist in pcx_values for item in sublist] tag_values = [[key, str(value)] for key, value in tags.items()] tags = [item for sublist in tag_values for item in sublist] @@ -283,6 +419,7 @@ def describe_peering_connections(params, client): aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) + return result @@ -311,9 +448,9 @@ def create_peer_connection(client, module): if tags_changed(pcx_id, client, module): changed = True if is_active(peering_conn): - return (changed, peering_conn['VpcPeeringConnectionId']) + return (changed, peering_conn) if is_pending(peering_conn): - return (changed, peering_conn['VpcPeeringConnectionId']) + return (changed, peering_conn) try: peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] @@ -322,7 +459,7 @@ def create_peer_connection(client, module): if module.params.get('tags'): create_tags(pcx_id, client, module) changed = True - return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']) + return (changed, peering_conn['VpcPeeringConnection']) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) @@ -330,7 +467,7 @@ def create_peer_connection(client, module): def remove_peer_connection(client, module): pcx_id = module.params.get('peering_id') if pcx_id: - peering_conns = client.describe_vpc_peering_connections(aws_retry=True, VpcPeeringConnectionIds=[pcx_id]) + peering_conn = get_peering_connection_by_id(pcx_id, client, module) else: params = dict() params['VpcId'] = module.params.get('vpc_id') @@ -338,17 +475,21 @@ def remove_peer_connection(client, module): params['PeerRegion'] = module.params.get('peer_region') if module.params.get('peer_owner_id'): params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) - peering_conns = describe_peering_connections(params, client) + peering_conn = describe_peering_connections(params, client)['VpcPeeringConnections'][0] - if not peering_conns: + if not peering_conn: module.exit_json(changed=False) else: - pcx_id = pcx_id or peering_conns['VpcPeeringConnections'][0]['VpcPeeringConnectionId'] - - if peering_conns['VpcPeeringConnections'][0]['Status']['Code'] == 'deleted': - module.exit_json(msg='Connection in deleted state.', changed=False) - if peering_conns['VpcPeeringConnections'][0]['Status']['Code'] == 'rejected': - module.exit_json(msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS', changed=False) + pcx_id = pcx_id or peering_conn['VpcPeeringConnectionId'] + + if peering_conn['Status']['Code'] == 'deleted': + module.exit_json(msg='Connection in deleted state.', changed=False, peering_id=pcx_id) + if peering_conn['Status']['Code'] == 'rejected': + module.exit_json( + msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS', + changed=False, + peering_id=pcx_id + ) try: params = dict() @@ -356,17 +497,17 @@ def remove_peer_connection(client, module): client.delete_vpc_peering_connection(aws_retry=True, **params) if module.params.get('wait'): wait_for_state(client, module, 'deleted', pcx_id) - module.exit_json(changed=True) + module.exit_json(changed=True, peering_id=pcx_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) -def peer_status(client, module): +def get_peering_connection_by_id(peering_id, client, module): params = dict() - params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')] + params['VpcPeeringConnectionIds'] = [peering_id] try: vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params) - return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code'] + return vpc_peering_connection['VpcPeeringConnections'][0] except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: module.fail_json_aws(e, msg='Malformed connection ID') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -376,10 +517,12 @@ def peer_status(client, module): def accept_reject(state, client, module): changed = False params = dict() - pcx_id = module.params.get('peering_id') - params['VpcPeeringConnectionId'] = pcx_id - current_state = peer_status(client, module) - if current_state not in ['active', 'rejected']: + peering_id = module.params.get('peering_id') + params['VpcPeeringConnectionId'] = peering_id + vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) + peering_status = vpc_peering_connection['Status']['Code'] + + if peering_status not in ['active', 'rejected']: try: if state == 'accept': client.accept_vpc_peering_connection(aws_retry=True, **params) @@ -388,15 +531,18 @@ def accept_reject(state, client, module): client.reject_vpc_peering_connection(aws_retry=True, **params) target_state = 'rejected' if module.params.get('tags'): - create_tags(params['VpcPeeringConnectionId'], client, module) + create_tags(peering_id, client, module) changed = True if module.params.get('wait'): - wait_for_state(client, module, target_state, pcx_id) + wait_for_state(client, module, target_state, peering_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) - if tags_changed(params['VpcPeeringConnectionId'], client, module): + if tags_changed(peering_id, client, module): changed = True - return changed, params['VpcPeeringConnectionId'] + + # Relaod peering conection infos to return latest state/params + vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) + return (changed, vpc_peering_connection) def load_tags(module): @@ -422,13 +568,6 @@ def delete_tags(pcx_id, client, module): module.fail_json(msg=str(e)) -def find_pcx_by_id(pcx_id, client, module): - try: - return client.describe_vpc_peering_connections(aws_retry=True, VpcPeeringConnectionIds=[pcx_id]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - - def main(): argument_spec = dict( vpc_id=dict(), @@ -460,7 +599,6 @@ def main(): if state == 'present': (changed, results) = create_peer_connection(client, module) - module.exit_json(changed=changed, peering_id=results) elif state == 'absent': if not peering_id and (not vpc_id or not peer_vpc_id): module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]') @@ -468,7 +606,12 @@ def main(): remove_peer_connection(client, module) else: (changed, results) = accept_reject(state, client, module) - module.exit_json(changed=changed, peering_id=results) + + formatted_results = camel_dict_to_snake_dict(results) + # Turn the resource tags from boto3 into an ansible friendly tag dictionary + formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', [])) + + module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId']) if __name__ == '__main__': From 4c0e6818a9e07c52ab5e96a1d6ce607dc9de846f Mon Sep 17 00:00:00 2001 From: Mike Svendsen Date: Wed, 31 Mar 2021 20:30:05 -0500 Subject: [PATCH 150/683] Fix target group remove and add logic Add additional integration tests to test linking target groups to autoscaling groups. Add changelog --- ec2_asg.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/ec2_asg.py b/ec2_asg.py index a87ce7f9681..68a0f9ec77c 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -1260,25 +1260,22 @@ def create_autoscaling_group(connection): # Get differences wanted_tgs = set(target_group_arns) has_tgs = set(as_group['TargetGroupARNs']) - # check if all requested are already existing - if has_tgs.issuperset(wanted_tgs): - # if wanted contains less than existing, then we need to delete some - tgs_to_detach = has_tgs.difference(wanted_tgs) - if tgs_to_detach: - changed = True - try: - detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) - if wanted_tgs.issuperset(has_tgs): - # if has contains less than wanted, then we need to add some - tgs_to_attach = wanted_tgs.difference(has_tgs) - if tgs_to_attach: - changed = True - try: - attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) + + tgs_to_detach = has_tgs.difference(wanted_tgs) + if tgs_to_detach: + changed = True + try: + detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) + + tgs_to_attach = wanted_tgs.difference(has_tgs) + if tgs_to_attach: + changed = True + try: + attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values From 9765613e0ebb79e47c3ab00b4dbf37a3356e9250 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 10 Apr 2021 09:55:59 +0200 Subject: [PATCH 151/683] Use shared normalize_boto3_result code --- ec2_vpc_peering_info.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index a086fde3639..92b2e1e8bd9 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -208,26 +208,23 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj - - def get_vpc_peers(client, module): params = dict() params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) if module.params.get('peer_connection_ids'): params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids') try: - result = json.loads(json.dumps(client.describe_vpc_peering_connections(**params), default=date_handler)) - except Exception as e: - module.fail_json(msg=to_native(e)) + result = client.describe_vpc_peering_connections(**params) + result = normalize_boto3_result(result) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe peering connections") return result['VpcPeeringConnections'] From 94367e81a4f76401056c5cb0c08c6773b81b6c6d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 10 Apr 2021 09:57:45 +0200 Subject: [PATCH 152/683] Add Retries --- ec2_vpc_peering_info.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 92b2e1e8bd9..5f3cb435de3 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -210,6 +210,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict @@ -221,7 +222,7 @@ def get_vpc_peers(client, module): if module.params.get('peer_connection_ids'): params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids') try: - result = client.describe_vpc_peering_connections(**params) + result = client.describe_vpc_peering_connections(aws_retry=True, **params) result = normalize_boto3_result(result) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe peering connections") @@ -241,7 +242,7 @@ def main(): module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws') try: - ec2 = module.client('ec2') + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') From c716b6ec0afa3e4396bffa951befea4cf335063d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 10 Apr 2021 12:13:51 +0200 Subject: [PATCH 153/683] Update ec2_vpc_endpoint_info AWS retries --- ec2_vpc_endpoint_info.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index 425e0c63ec7..d990a908943 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -126,21 +126,28 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -@AWSRetry.exponential_backoff() +@AWSRetry.jittered_backoff() +def _describe_endpoints(client, **params): + paginator = client.get_paginator('describe_vpc_endpoints') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.jittered_backoff() +def _describe_endpoint_services(client, **params): + paginator = client.get_paginator('describe_vpc_endpoint_services') + return paginator.paginate(**params).build_full_result() + + def get_supported_services(client, module): - results = list() - params = dict() - while True: - response = client.describe_vpc_endpoint_services(**params) - results.extend(response['ServiceNames']) - if 'NextToken' in response: - params['NextToken'] = response['NextToken'] - else: - break + try: + services = _describe_endpoint_services(client) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get endpoint servicess") + + results = list(services['ServiceNames']) return dict(service_names=results) -@AWSRetry.exponential_backoff() def get_endpoints(client, module): results = list() params = dict() @@ -148,14 +155,13 @@ def get_endpoints(client, module): if module.params.get('vpc_endpoint_ids'): params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') try: - paginator = client.get_paginator('describe_vpc_endpoints') - results = paginator.paginate(**params).build_full_result()['VpcEndpoints'] - + results = _describe_endpoints(client, **params)['VpcEndpoints'] results = normalize_boto3_result(results) except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get endpoints") + return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) From 1a6d44ee562a517cfffa34e7a40385d50871714f Mon Sep 17 00:00:00 2001 From: Dmytro Vorotyntsev <2937451+vorotech@users.noreply.github.com> Date: Mon, 15 Feb 2021 10:35:32 +0200 Subject: [PATCH 154/683] dict Tags to list on restore_db_instance_from_db_snapshot Also, map Tags parameters from the dict to list during `restore_db_instance_from_db_snapshot` method additionally to existing `create_db_instance` and `create_db_instance_read_replica` --- rds_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rds_instance.py b/rds_instance.py index 6e1312b739b..69d5836004f 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -861,7 +861,7 @@ def get_parameters(client, module, parameters, method_name): if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': parameters.pop('ProcessorFeatures') - if method_name == 'create_db_instance' or method_name == 'create_db_instance_read_replica': + if method_name in ['create_db_instance', 'create_db_instance_read_replica', 'restore_db_instance_from_db_snapshot']: if parameters.get('Tags'): parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) From e10a2e921db6ade860214ddf64ddfc74086cdc60 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 6 Apr 2021 13:10:43 +0200 Subject: [PATCH 155/683] Add compatibility shim so old values are still returned --- route53.py | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/route53.py b/route53.py index 945d5e8b679..d6f35b12c35 100644 --- a/route53.py +++ b/route53.py @@ -424,6 +424,49 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): return None +def format_record(record_in, zone_in, zone_id): + """ + Formats a record in a way that's consistent with the pre-boto3 migration values + as well as returning the 'normal' boto3 style values + """ + if not record_in: + return None + + record = dict(record_in) + record['zone'] = zone_in + record['hosted_zone_id'] = zone_id + + record['type'] = record_in.get('Type', None) + record['record'] = record_in.get('Name').encode().decode('unicode_escape') + record['ttl'] = record_in.get('TTL', None) + record['identifier'] = record_in.get('SetIdentifier', None) + record['weight'] = record_in.get('Weight', None) + record['region'] = record_in.get('Region', None) + record['failover'] = record_in.get('Failover', None) + record['health_check'] = record_in.get('HealthCheckId', None) + + if record['ttl']: + record['ttl'] = str(record['ttl']) + if record['weight']: + record['weight'] = str(record['weight']) + if record['region']: + record['region'] = str(record['region']) + + if record_in.get('AliasTarget'): + record['alias'] = True + record['value'] = record_in['AliasTarget'].get('DNSName') + record['values'] = [record_in['AliasTarget'].get('DNSName')] + record['alias_hosted_zone_id'] = record_in['AliasTarget'].get('HostedZoneId') + record['alias_evaluate_target_health'] = record_in['AliasTarget'].get('EvaluateTargetHealth') + else: + record['alias'] = False + records = [r.get('Value') for r in record_in.get('ResourceRecords')] + record['value'] = ','.join(sorted(records)) + record['values'] = sorted(records) + + return record + + def get_hosted_zone_nameservers(route53, zone_id): hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name'] resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id) @@ -587,7 +630,8 @@ def main(): # Retrieve name servers associated to the zone. ns = get_hosted_zone_nameservers(route53, zone_id) - module.exit_json(changed=False, set=aws_record, nameservers=ns) + formatted_aws = format_record(aws_record, zone_in, zone_id) + module.exit_json(changed=False, set=formatted_aws, nameservers=ns) if command_in == 'delete' and not aws_record: module.exit_json(changed=False) @@ -633,11 +677,14 @@ def main(): except Exception as e: module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + formatted_aws = format_record(aws_record, zone_in, zone_id) + formatted_record = format_record(resource_record_set, zone_in, zone_id) + module.exit_json( changed=True, diff=dict( - before=aws_record, - after=resource_record_set if command != 'delete' else {}, + before=formatted_aws, + after=formatted_record if command != 'delete' else {}, ), ) From 00007c3a34cbd3f38e7451a4dcc57aef472c70cd Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 6 Apr 2021 13:11:57 +0200 Subject: [PATCH 156/683] Add (snake-cased) boto3 format exit values --- route53.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/route53.py b/route53.py index d6f35b12c35..8cbb2647e4a 100644 --- a/route53.py +++ b/route53.py @@ -363,6 +363,7 @@ pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message @@ -621,7 +622,8 @@ def main(): aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value')) if command_in == 'create' and aws_record == resource_record_set: - module.exit_json(changed=False) + rr_sets = [camel_dict_to_snake_dict(resource_record_set)] + module.exit_json(changed=False, resource_records_sets=rr_sets) if command_in == 'get': if type_in == 'NS': @@ -631,7 +633,8 @@ def main(): ns = get_hosted_zone_nameservers(route53, zone_id) formatted_aws = format_record(aws_record, zone_in, zone_id) - module.exit_json(changed=False, set=formatted_aws, nameservers=ns) + rr_sets = [camel_dict_to_snake_dict(aws_record)] + module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets) if command_in == 'delete' and not aws_record: module.exit_json(changed=False) @@ -677,6 +680,7 @@ def main(): except Exception as e: module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + rr_sets = [camel_dict_to_snake_dict(resource_record_set)] formatted_aws = format_record(aws_record, zone_in, zone_id) formatted_record = format_record(resource_record_set, zone_in, zone_id) @@ -685,6 +689,7 @@ def main(): diff=dict( before=formatted_aws, after=formatted_record if command != 'delete' else {}, + resource_record_sets=rr_sets, ), ) From ecd7cb8f3cfb21c5e63ec4599aa567d4d3e87277 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Fri, 12 Feb 2021 15:20:54 +0100 Subject: [PATCH 157/683] Fix KeyError: SecurityGroup in elasticache module. Also improve docs a little --- elasticache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/elasticache.py b/elasticache.py index 93804562f2e..c292dbdc524 100644 --- a/elasticache.py +++ b/elasticache.py @@ -74,7 +74,7 @@ elements: str cache_security_groups: description: - - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a VPC. + - A list of cache security group names to associate with this cache cluster. Don't use if your Cache is inside a VPC. In that case use I(security_group_ids) instead! type: list elements: str zone: @@ -393,7 +393,7 @@ def _requires_modification(self): # check vpc security groups if self.security_group_ids: vpc_security_groups = [] - security_groups = self.data['SecurityGroups'] or [] + security_groups = self.data.get('SecurityGroups', []) for sg in security_groups: vpc_security_groups.append(sg['SecurityGroupId']) if set(vpc_security_groups) != set(self.security_group_ids): From aa8d21dff03d2b7f73db409e39a65a350d606359 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Fri, 12 Feb 2021 16:39:33 +0100 Subject: [PATCH 158/683] Shorter line --- elasticache.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/elasticache.py b/elasticache.py index c292dbdc524..53e72f664b4 100644 --- a/elasticache.py +++ b/elasticache.py @@ -74,7 +74,8 @@ elements: str cache_security_groups: description: - - A list of cache security group names to associate with this cache cluster. Don't use if your Cache is inside a VPC. In that case use I(security_group_ids) instead! + - A list of cache security group names to associate with this cache cluster. + - Don't use if your Cache is inside a VPC. In that case use I(security_group_ids) instead! type: list elements: str zone: From e99aefe6dc6b22326fdb67d86775f48117c2d7c5 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Mon, 1 Mar 2021 14:36:42 +0100 Subject: [PATCH 159/683] add wafv2 modules --- wafv2_resources.py | 166 ++++++++++++++++ wafv2_resources_info.py | 122 ++++++++++++ wafv2_rule_group.py | 407 +++++++++++++++++++++++++++++++++++++++ wafv2_rule_group_info.py | 155 +++++++++++++++ wafv2_web_acl.py | 364 ++++++++++++++++++++++++++++++++++ wafv2_web_acl_info.py | 134 +++++++++++++ 6 files changed, 1348 insertions(+) create mode 100644 wafv2_resources.py create mode 100644 wafv2_resources_info.py create mode 100644 wafv2_rule_group.py create mode 100644 wafv2_rule_group_info.py create mode 100644 wafv2_web_acl.py create mode 100644 wafv2_web_acl_info.py diff --git a/wafv2_resources.py b/wafv2_resources.py new file mode 100644 index 00000000000..36017114cd3 --- /dev/null +++ b/wafv2_resources.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_resources +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_web_acl +description: + - Create, modify and delete CloudWatch log group metric filter. + - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). +requirements: + - boto3 + - botocore +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + name: + description: + - The name web acl. + type: str + scope: + description: + - Scope of waf + choices: ["CLOUDFRONT","REGIONAL"] + type: str + arn: + description: + - AWS resources (ALB, API Gateway or AppSync GraphQL API) ARN + type: str + required: true + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: add test alb to waf string03 + community.aws.wafv2_resources: + name: string03 + scope: REGIONAL + state: present + arn: "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" +''' + +RETURN = """ +resource_arns: + description: Current resources where the wafv2 is applied on + sample: + - "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" + returned: Always, as long as the wafv2 exists + type: list +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_web_acl(wafv2, name, scope, id): + response = wafv2.get_web_acl( + Name=name, + Scope=scope, + Id=id + ) + return response + + +def list_wafv2_resources(wafv2, arn): + response = wafv2.list_resources_for_web_acl( + WebACLArn=arn + ) + return response + + +def add_wafv2_resources(wafv2, waf_arn, arn): + response = wafv2.associate_web_acl( + WebACLArn=waf_arn, + ResourceArn=arn + ) + return response + + +def remove_resources(wafv2, arn): + response = wafv2.disassociate_web_acl( + ResourceArn=arn + ) + return response + + +def main(): + + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + name=dict(type='str'), + scope=dict(type='str', choices=['CLOUDFRONT', 'REGIONAL']), + arn=dict(type='str', required=True) + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['name', 'scope']]] + ) + + state = module.params.get("state") + name = module.params.get("name") + scope = module.params.get("scope") + arn = module.params.get("arn") + check_mode = module.check_mode + + wafv2 = module.client('wafv2') + + # check if web acl exists + + response = wafv2_list_web_acls(wafv2, scope) + + id = None + retval = {} + change = False + + for item in response.get('WebACLs'): + if item.get('Name') == name: + id = item.get('Id') + + if id: + existing_acl = get_web_acl(wafv2, name, scope, id) + waf_arn = existing_acl.get('WebACL').get('ARN') + + retval = list_wafv2_resources(wafv2, waf_arn) + + if state == 'present': + if retval: + if arn not in retval.get('ResourceArns'): + change = True + if not check_mode: + retval = add_wafv2_resources(wafv2, waf_arn, arn) + + elif state == 'absent': + if retval: + if arn in retval.get('ResourceArns'): + change = True + if not check_mode: + retval = remove_resources(wafv2, arn) + + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) + + +if __name__ == '__main__': + main() diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py new file mode 100644 index 00000000000..b039c4feff3 --- /dev/null +++ b/wafv2_resources_info.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_resources_info +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_resources_info +description: + - List web acl resources. +requirements: + - boto3 + - botocore +options: + name: + description: + - The name wafv2 acl of interest. + type: str + required: true + scope: + description: + - Scope of wafv2 web acl. + required: true + choices: ["CLOUDFRONT","REGIONAL"] + type: str + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: get web acl + community.aws.wafv2_resources_info: + name: string03 + scope: REGIONAL +''' + +RETURN = """ +resource_arns: + description: Current resources where the wafv2 is applied on + sample: + - "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" + returned: Always, as long as the wafv2 exists + type: list +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_web_acl(wafv2, name, scope, id): + response = wafv2.get_web_acl( + Name=name, + Scope=scope, + Id=id + ) + return response + + +def list_web_acls(wafv2, scope): + response = wafv2.list_web_acls( + Scope=scope, + Limit=100 + ) + return response + + +def list_wafv2_resources(wafv2, arn): + response = wafv2.list_resources_for_web_acl( + WebACLArn=arn + ) + return response + + +def main(): + + arg_spec = dict( + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec + ) + + name = module.params.get("name") + scope = module.params.get("scope") + + wafv2 = module.client('wafv2') + # check if web acl exists + response = list_web_acls(wafv2, scope) + + id = None + retval = {} + + for item in response.get('WebACLs'): + if item.get('Name') == name: + id = item.get('Id') + + if id: + existing_acl = get_web_acl(wafv2, name, scope, id) + arn = existing_acl.get('WebACL').get('ARN') + + retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn)) + + module.exit_json(**retval) + + +if __name__ == '__main__': + main() diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py new file mode 100644 index 00000000000..6f87bb0488a --- /dev/null +++ b/wafv2_rule_group.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_rule_group +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_web_acl +description: + - Create, modify and delete CloudWatch log group metric filter. + - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). +requirements: + - boto3 + - botocore +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + name: + description: + - The name of the rule group. + required: true + type: str + rules: + description: + - The Rule statements used to identify the web requests that you want to allow, block, or count. + type: list + elements: dict + scope: + description: + - Scope of wafv2 rule group. + required: true + choices: ["CLOUDFRONT","REGIONAL"] + type: str + description: + description: + - Description of wafv2 rule group. + type: str + sampled_requests: + description: + - Sampled requests, true or false. + type: bool + default: false + cloudwatch_metrics: + description: + - Enable cloudwatch metric for wafv2 rule group + type: bool + default: true + metric_name: + description: + - Name of cloudwatch metrics. + - If not given and cloudwatch_metrics is enabled, the name of the rule group itself will be taken. + type: str + capacity: + description: + - capacity of wafv2 rule group. + type: int + tags: + description: + - tags for wafv2 rule group. + type: dict + purge_rules: + description: + - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. + default: yes + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: change description + community.aws.wafv2_rule_group: + name: test02 + state: present + description: hallo eins zwei + scope: REGIONAL + capacity: 500 + rules: + - name: eins + priority: 1 + action: + allow: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: fsd + statement: + ip_set_reference_statement: + arn: "{{ IPSET.arn }}" + cloudwatch_metrics: yes + tags: + A: B + C: D + register: out + +- name: add rule + community.aws.wafv2_rule_group: + name: test02 + state: present + description: hallo eins zwei + scope: REGIONAL + capacity: 500 + rules: + - name: eins + priority: 1 + action: + allow: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: fsd + statement: + ip_set_reference_statement: + arn: "{{ IPSET.arn }}" + - name: zwei + priority: 2 + action: + block: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: ddos + statement: + or_statement: + statements: + - byte_match_statement: + search_string: ansible.com + positional_constraint: CONTAINS + field_to_match: + single_header: + name: host + text_transformations: + - type: LOWERCASE + priority: 0 + - xss_match_statement: + field_to_match: + body: {} + text_transformations: + - type: NONE + priority: 0 + cloudwatch_metrics: yes + tags: + A: B + C: D + register: out +''' + +RETURN = """ +arn: + description: Rule group arn + sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 + type: str + returned: Always, as long as the web acl exists +description: + description: Description of the rule group + sample: Some rule group description + returned: Always, as long as the web acl exists + type: str +capacity: + description: Current capacity of the rule group + sample: 500 + returned: Always, as long as the rule group exists + type: int +name: + description: Rule group name + sample: test02 + returned: Always, as long as the rule group exists + type: str +rules: + description: Current rules of the rule group + returned: Always, as long as the rule group exists + type: list + sample: + - action: + allow: {} + name: eins + priority: 1 + statement: + ip_set_reference_statement: + arn: arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a + visibility_config: + cloud_watch_metrics_enabled: True + metric_name: fsd + sampled_requests_enabled: True +visibility_config: + description: Visibility config of the rule group + returned: Always, as long as the rule group exists + type: dict + sample: + cloud_watch_metrics_enabled: True + metric_name: blub + sampled_requests_enabled: False +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups, compare_priority_rules, wafv2_snake_dict_to_camel_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +class RuleGroup: + def __init__(self, wafv2, name, scope): + self.wafv2 = wafv2 + self.name = name + self.scope = scope + self.existing_group, self.id, self.locktoken = self.get_group() + + def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'Id': self.id, + 'Rules': rules, + 'LockToken': self.locktoken, + 'VisibilityConfig': { + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + } + } + + if description: + req_obj['Description'] = description + + response = self.wafv2.update_rule_group(**req_obj) + return response + + def get_group(self): + response = self.list() + id = None + locktoken = None + arn = None + + for item in response.get('RuleGroups'): + if item.get('Name') == self.name: + id = item.get('Id') + locktoken = item.get('LockToken') + arn = item.get('ARN') + + existing_group = None + if id: + existing_group = self.wafv2.get_rule_group( + Name=self.name, + Scope=self.scope, + Id=id + ) + + return existing_group, id, locktoken + + def list(self): + return wafv2_list_rule_groups(self.wafv2, self.scope) + + def get(self): + if self.existing_group: + return self.existing_group + return None + + def remove(self): + response = self.wafv2.delete_rule_group( + Name=self.name, + Scope=self.scope, + Id=self.id, + LockToken=self.locktoken + ) + return response + + def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'Capacity': capacity, + 'Rules': rules, + 'VisibilityConfig': { + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + } + } + + if description: + req_obj['Description'] = description + + if tags: + req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + + response = self.wafv2.create_rule_group(**req_obj) + self.existing_group, self.id, self.locktoken = self.get_group() + + return self.existing_group + + +def main(): + + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), + capacity=dict(type='int'), + description=dict(type='str'), + rules=dict(type='list', elements='dict'), + sampled_requests=dict(type='bool', default=False), + cloudwatch_metrics=dict(type='bool', default=True), + metric_name=dict(type='str'), + tags=dict(type='dict'), + purge_rules=dict(default=True, type='bool') + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['capacity', 'rules']]] + ) + + state = module.params.get("state") + name = module.params.get("name") + scope = module.params.get("scope") + capacity = module.params.get("capacity") + description = module.params.get("description") + rules = module.params.get("rules") + sampled_requests = module.params.get("sampled_requests") + cloudwatch_metrics = module.params.get("cloudwatch_metrics") + metric_name = module.params.get("metric_name") + tags = module.params.get("tags") + purge_rules = module.params.get("purge_rules") + check_mode = module.check_mode + + if rules: + rules = [] + for rule in module.params.get("rules"): + rules.append(wafv2_snake_dict_to_camel_dict(snake_dict_to_camel_dict(rule, capitalize_first=True))) + + if not metric_name: + metric_name = name + + rule_group = RuleGroup(module.client('wafv2'), name, scope) + + change = False + retval = {} + + if state == 'present': + if rule_group.get(): + change, rules = compare_priority_rules(rule_group.get().get('RuleGroup').get('Rules'), rules, purge_rules, state) + change = change or rule_group.get().get('RuleGroup').get('Description') != description + + if change and not check_mode: + retval = rule_group.update( + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name + ) + else: + retval = rule_group.get().get('RuleGroup') + + else: + change = True + if not check_mode: + retval = rule_group.create( + capacity, + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + tags + ) + + elif state == 'absent': + if rule_group.get(): + if rules: + if len(rules) > 0: + change, rules = compare_priority_rules(rule_group.get().get('RuleGroup').get('Rules'), rules, purge_rules, state) + if change and not check_mode: + retval = rule_group.update( + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name + ) + else: + change = True + if not check_mode: + retval = rule_group.remove() + + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) + + +if __name__ == '__main__': + main() diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py new file mode 100644 index 00000000000..c8192108e6b --- /dev/null +++ b/wafv2_rule_group_info.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_rule_group_info +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_web_acl_info +description: + - Create, modify and delete CloudWatch log group metric filter. + - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). +requirements: + - boto3 + - botocore +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + name: + description: + - The name of the rule group. + required: true + type: str + scope: + description: + - Scope of wafv2 rule group. + required: true + choices: ["CLOUDFRONT","REGIONAL"] + type: str + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: rule group info + community.aws.wafv2_rule_group_info: + name: test02 + state: present + scope: REGIONAL +''' + +RETURN = """ +arn: + description: Rule group arn + sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 + type: str + returned: Always, as long as the web acl exists +description: + description: Description of the rule group + sample: Some rule group description + returned: Always, as long as the web acl exists + type: str +capacity: + description: Current capacity of the rule group + sample: 500 + returned: Always, as long as the rule group exists + type: int +name: + description: Rule group name + sample: test02 + returned: Always, as long as the rule group exists + type: str +rules: + description: Current rules of the rule group + returned: Always, as long as the rule group exists + type: list + sample: + - action: + allow: {} + name: eins + priority: 1 + statement: + ip_set_reference_statement: + arn: arn:aws:wafv2:eu-central-1:111111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a + visibility_config: + cloud_watch_metrics_enabled: True + metric_name: fsd + sampled_requests_enabled: True +visibility_config: + description: Visibility config of the rule group + returned: Always, as long as the rule group exists + type: dict + sample: + cloud_watch_metrics_enabled: True + metric_name: blub + sampled_requests_enabled: False +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_rule_group(wafv2, name, scope, id): + response = wafv2.get_rule_group( + Name=name, + Scope=scope, + Id=id + ) + return response + + +def main(): + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + state = module.params.get("state") + name = module.params.get("name") + scope = module.params.get("scope") + + wafv2 = module.client('wafv2') + + # check if rule group exists + response = wafv2_list_rule_groups(wafv2, scope) + id = None + retval = {} + + for item in response.get('RuleGroups'): + if item.get('Name') == name: + id = item.get('Id') + + existing_group = None + if id: + existing_group = get_rule_group(wafv2, name, scope, id) + retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup')) + + module.exit_json(**retval) + + +if __name__ == '__main__': + main() diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py new file mode 100644 index 00000000000..43d9f8adb86 --- /dev/null +++ b/wafv2_web_acl.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_web_acl +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_web_acl +description: + - Create, modify or delete a wafv2 web acl. +requirements: + - boto3 + - botocore +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + name: + description: + - The name of the web acl. + required: true + type: str + scope: + description: + - Scope of wafv2 web acl. + required: true + choices: ["CLOUDFRONT","REGIONAL"] + type: str + description: + description: + - Description of wafv2 web acl. + type: str + default_action: + description: + - Default action of the wafv2 web acl. + choices: ["Block","Allow"] + type: str + sampled_requests: + description: + - Sampled requests, true or false. + type: bool + default: false + cloudwatch_metrics: + description: + - Enable cloudwatch metric for wafv2 web acl. + type: bool + default: true + metric_name: + description: + - Name of cloudwatch metrics. + - If not given and cloudwatch_metrics is enabled, the name of the web acl itself will be taken. + type: str + tags: + description: + - tags for wafv2 web acl. + type: dict + rules: + description: + - The Rule statements used to identify the web requests that you want to allow, block, or count. + type: list + elements: dict + purge_rules: + description: + - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. + default: yes + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: create web acl + community.aws.wafv2_web_acl: + name: test05 + state: present + description: hallo eins + scope: REGIONAL + default_action: Allow + sampled_requests: no + cloudwatch_metrics: yes + metric_name: blub + rules: + - name: zwei + priority: 2 + action: + block: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: ddos + statement: + xss_match_statement: + field_to_match: + body: {} + text_transformations: + - type: NONE + priority: 0 + - name: admin_protect + priority: 1 + override_action: + none: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: fsd + statement: + managed_rule_group_statement: + vendor_name: AWS + name: AWSManagedRulesAdminProtectionRuleSet + tags: + A: B + C: D + register: out +''' + +RETURN = """ +arn: + description: web acl arn + sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 + type: str + returned: Always, as long as the web acl exists +description: + description: Description of the web acl + sample: Some web acl description + returned: Always, as long as the web acl exists + type: str +capacity: + description: Current capacity of the web acl + sample: 140 + returned: Always, as long as the web acl exists + type: int +name: + description: Web acl name + sample: test02 + returned: Always, as long as the web acl exists + type: str +rules: + description: Current rules of the web acl + returned: Always, as long as the web acl exists + type: list +visibility_config: + description: Visibility config of the web acl + returned: Always, as long as the web acl exists + type: dict + sample: + cloud_watch_metrics_enabled: true + metric_name: blub + sampled_requests_enabled: false +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls, compare_priority_rules, wafv2_snake_dict_to_camel_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +class WebACL: + def __init__(self, wafv2, name, scope): + self.wafv2 = wafv2 + self.name = name + self.scope = scope + self.existing_acl, self.id, self.locktoken = self.get_web_acl() + + def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name): + response = self.wafv2.update_web_acl( + Name=self.name, + Scope=self.scope, + Id=self.id, + DefaultAction=default_action, + Description=description, + Rules=rules, + VisibilityConfig={ + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + }, + LockToken=self.locktoken + ) + return response + + def remove(self): + response = self.wafv2.delete_web_acl( + Name=self.name, + Scope=self.scope, + Id=self.id, + LockToken=self.locktoken + ) + return response + + def get(self): + if self.existing_acl: + return self.existing_acl + return None + + def get_web_acl(self): + id = None + locktoken = None + arn = None + existing_acl = None + response = self.list() + + for item in response.get('WebACLs'): + if item.get('Name') == self.name: + id = item.get('Id') + locktoken = item.get('LockToken') + arn = item.get('ARN') + + if id: + existing_acl = self.wafv2.get_web_acl( + Name=self.name, + Scope=self.scope, + Id=id + ) + return existing_acl, id, locktoken + + def list(self): + return wafv2_list_web_acls(self.wafv2, self.scope) + + def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'DefaultAction': default_action, + 'Rules': rules, + 'VisibilityConfig': { + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + } + } + if description: + req_obj['Description'] = description + if tags: + req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + + response = self.wafv2.create_web_acl(**req_obj) + self.existing_acl, self.id, self.locktoken = self.get_web_acl() + + return self.existing_acl + + +def main(): + + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), + description=dict(type='str'), + default_action=dict(type='str', choices=['Block', 'Allow']), + rules=dict(type='list', elements='dict'), + sampled_requests=dict(type='bool', default=False), + cloudwatch_metrics=dict(type='bool', default=True), + metric_name=dict(type='str'), + tags=dict(type='dict'), + purge_rules=dict(default=True, type='bool') + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['default_action', 'rules']]] + ) + + state = module.params.get("state") + name = module.params.get("name") + scope = module.params.get("scope") + description = module.params.get("description") + default_action = module.params.get("default_action") + rules = module.params.get("rules") + sampled_requests = module.params.get("sampled_requests") + cloudwatch_metrics = module.params.get("cloudwatch_metrics") + metric_name = module.params.get("metric_name") + tags = module.params.get("tags") + purge_rules = module.params.get("purge_rules") + check_mode = module.check_mode + + if default_action == 'Block': + default_action = {'Block': {}} + elif default_action == 'Allow': + default_action = {'Allow': {}} + + if rules: + rules = [] + for rule in module.params.get("rules"): + rules.append(wafv2_snake_dict_to_camel_dict(snake_dict_to_camel_dict(rule, capitalize_first=True))) + + if not metric_name: + metric_name = name + + web_acl = WebACL(module.client('wafv2'), name, scope) + change = False + retval = {} + + if state == 'present': + if web_acl.get(): + change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) + change = change or web_acl.get().get('WebACL').get('Description') != description + change = change or web_acl.get().get('WebACL').get('DefaultAction') != default_action + + if change and not check_mode: + retval = web_acl.update( + default_action, + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name + ) + + else: + retval = web_acl.get().get('WebACL') + + else: + change = True + if not check_mode: + retval = web_acl.create( + default_action, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + tags, + description + ) + + elif state == 'absent': + if web_acl.get(): + if rules: + if len(rules) > 0: + change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) + if change and not check_mode: + retval = web_acl.update( + default_action, + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name + ) + else: + change = True + if not check_mode: + retval = web_acl.remove() + + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) + + +if __name__ == '__main__': + main() diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py new file mode 100644 index 00000000000..1493947b259 --- /dev/null +++ b/wafv2_web_acl_info.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: wafv2_web_acl_info +version_added: 1.5.0 +author: + - "Markus Bergholz (@markuman)" +short_description: wafv2_web_acl +description: + - Info about web acl +requirements: + - boto3 + - botocore +options: + name: + description: + - The name of the web acl. + required: true + type: str + scope: + description: + - Scope of wafv2 web acl. + required: true + choices: ["CLOUDFRONT","REGIONAL"] + type: str + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +- name: get web acl + community.aws.wafv2_web_acl_info: + name: test05 + scope: REGIONAL + register: out +''' + +RETURN = """ +arn: + description: web acl arn + sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 + type: str + returned: Always, as long as the web acl exists +description: + description: Description of the web acl + sample: Some web acl description + returned: Always, as long as the web acl exists + type: str +capacity: + description: Current capacity of the web acl + sample: 140 + returned: Always, as long as the web acl exists + type: int +name: + description: Web acl name + sample: test02 + returned: Always, as long as the web acl exists + type: str +rules: + description: Current rules of the web acl + returned: Always, as long as the web acl exists + type: list +visibility_config: + description: Visibility config of the web acl + returned: Always, as long as the web acl exists + type: dict + sample: + cloud_watch_metrics_enabled: true + metric_name: blub + sampled_requests_enabled: false +""" +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_web_acl(wafv2, name, scope, id): + response = wafv2.get_web_acl( + Name=name, + Scope=scope, + Id=id + ) + return response + + +def main(): + + arg_spec = dict( + name=dict(type='str', required=True), + scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec + ) + + state = module.params.get("state") + name = module.params.get("name") + scope = module.params.get("scope") + + wafv2 = module.client('wafv2') + # check if web acl exists + response = wafv2_list_web_acls(wafv2, scope) + + id = None + retval = {} + + for item in response.get('WebACLs'): + if item.get('Name') == name: + id = item.get('Id') + + if id: + existing_acl = get_web_acl(wafv2, name, scope, id) + retval = camel_dict_to_snake_dict(existing_acl.get('WebACL')) + + module.exit_json(**retval) + + +if __name__ == '__main__': + main() From 7089072cddf26bc1d0926e52671649440c971587 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 8 Apr 2021 11:56:11 +0200 Subject: [PATCH 160/683] add exceptions --- wafv2_resources.py | 65 +++++++++++++++++++--------------- wafv2_resources_info.py | 47 +++++++++++++++---------- wafv2_rule_group.py | 51 +++++++++++++++++---------- wafv2_rule_group_info.py | 24 +++++++------ wafv2_web_acl.py | 75 +++++++++++++++++++++++----------------- wafv2_web_acl_info.py | 21 ++++++----- 6 files changed, 167 insertions(+), 116 deletions(-) diff --git a/wafv2_resources.py b/wafv2_resources.py index 36017114cd3..38757c1a18b 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -13,8 +13,7 @@ - "Markus Bergholz (@markuman)" short_description: wafv2_web_acl description: - - Create, modify and delete CloudWatch log group metric filter. - - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). + - Apply or remove wafv2 to other aws resources. requirements: - boto3 - botocore @@ -68,39 +67,51 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule -def get_web_acl(wafv2, name, scope, id): - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) +def get_web_acl(wafv2, name, scope, id, fail_json_aws): + try: + response = wafv2.get_web_acl( + Name=name, + Scope=scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response -def list_wafv2_resources(wafv2, arn): - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) +def list_wafv2_resources(wafv2, arn, fail_json_aws): + try: + response = wafv2.list_resources_for_web_acl( + WebACLArn=arn + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to list wafv2 web acl.") return response -def add_wafv2_resources(wafv2, waf_arn, arn): - response = wafv2.associate_web_acl( - WebACLArn=waf_arn, - ResourceArn=arn - ) +def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): + try: + response = wafv2.associate_web_acl( + WebACLArn=waf_arn, + ResourceArn=arn + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to add wafv2 web acl.") return response -def remove_resources(wafv2, arn): - response = wafv2.disassociate_web_acl( - ResourceArn=arn - ) +def remove_resources(wafv2, arn, fail_json_aws): + try: + response = wafv2.disassociate_web_acl( + ResourceArn=arn + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response @@ -129,7 +140,7 @@ def main(): # check if web acl exists - response = wafv2_list_web_acls(wafv2, scope) + response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) id = None retval = {} @@ -140,24 +151,24 @@ def main(): id = item.get('Id') if id: - existing_acl = get_web_acl(wafv2, name, scope, id) + existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) waf_arn = existing_acl.get('WebACL').get('ARN') - retval = list_wafv2_resources(wafv2, waf_arn) + retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws) if state == 'present': if retval: if arn not in retval.get('ResourceArns'): change = True if not check_mode: - retval = add_wafv2_resources(wafv2, waf_arn, arn) + retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws) elif state == 'absent': if retval: if arn in retval.get('ResourceArns'): change = True if not check_mode: - retval = remove_resources(wafv2, arn) + retval = remove_resources(wafv2, arn, module.fail_json_aws) module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index b039c4feff3..2829cfa1829 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -55,32 +55,41 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule -def get_web_acl(wafv2, name, scope, id): - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) +def get_web_acl(wafv2, name, scope, id, fail_json_aws): + try: + response = wafv2.get_web_acl( + Name=name, + Scope=scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response -def list_web_acls(wafv2, scope): - response = wafv2.list_web_acls( - Scope=scope, - Limit=100 - ) +def list_web_acls(wafv2, scope, fail_json_aws): + try: + response = wafv2.list_web_acls( + Scope=scope, + Limit=100 + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to list wafv2 web acl.") return response -def list_wafv2_resources(wafv2, arn): - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) +def list_wafv2_resources(wafv2, arn, fail_json_aws): + try: + response = wafv2.list_resources_for_web_acl( + WebACLArn=arn + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to list wafv2 resources.") return response @@ -100,7 +109,7 @@ def main(): wafv2 = module.client('wafv2') # check if web acl exists - response = list_web_acls(wafv2, scope) + response = list_web_acls(wafv2, scope, module.fail_json_aws) id = None retval = {} @@ -110,10 +119,10 @@ def main(): id = item.get('Id') if id: - existing_acl = get_web_acl(wafv2, name, scope, id) + existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) arn = existing_acl.get('WebACL').get('ARN') - retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn)) + retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws)) module.exit_json(**retval) diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 6f87bb0488a..d66d4864bb5 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -13,8 +13,7 @@ - "Markus Bergholz (@markuman)" short_description: wafv2_web_acl description: - - Create, modify and delete CloudWatch log group metric filter. - - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). + - Create, modify and delete wafv2 rule groups. requirements: - boto3 - botocore @@ -209,16 +208,17 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups, compare_priority_rules, wafv2_snake_dict_to_camel_dict try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule class RuleGroup: - def __init__(self, wafv2, name, scope): + def __init__(self, wafv2, name, scope, fail_json_aws): self.wafv2 = wafv2 self.name = name self.scope = scope + self.fail_json_aws = fail_json_aws self.existing_group, self.id, self.locktoken = self.get_group() def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): @@ -238,7 +238,10 @@ def update(self, description, rules, sampled_requests, cloudwatch_metrics, metri if description: req_obj['Description'] = description - response = self.wafv2.update_rule_group(**req_obj) + try: + response = self.wafv2.update_rule_group(**req_obj) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to update wafv2 rule group.") return response def get_group(self): @@ -255,16 +258,19 @@ def get_group(self): existing_group = None if id: - existing_group = self.wafv2.get_rule_group( - Name=self.name, - Scope=self.scope, - Id=id - ) + try: + existing_group = self.wafv2.get_rule_group( + Name=self.name, + Scope=self.scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to get wafv2 rule group.") return existing_group, id, locktoken def list(self): - return wafv2_list_rule_groups(self.wafv2, self.scope) + return wafv2_list_rule_groups(self.wafv2, self.scope, self.fail_json_aws) def get(self): if self.existing_group: @@ -272,12 +278,15 @@ def get(self): return None def remove(self): - response = self.wafv2.delete_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + try: + response = self.wafv2.delete_rule_group( + Name=self.name, + Scope=self.scope, + Id=self.id, + LockToken=self.locktoken + ) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.") return response def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags): @@ -299,7 +308,11 @@ def create(self, capacity, description, rules, sampled_requests, cloudwatch_metr if tags: req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) - response = self.wafv2.create_rule_group(**req_obj) + try: + response = self.wafv2.create_rule_group(**req_obj) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to create wafv2 rule group.") + self.existing_group, self.id, self.locktoken = self.get_group() return self.existing_group @@ -348,7 +361,7 @@ def main(): if not metric_name: metric_name = name - rule_group = RuleGroup(module.client('wafv2'), name, scope) + rule_group = RuleGroup(module.client('wafv2'), name, scope, module.fail_json_aws) change = False retval = {} diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index c8192108e6b..afd399574ff 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -13,8 +13,7 @@ - "Markus Bergholz (@markuman)" short_description: wafv2_web_acl_info description: - - Create, modify and delete CloudWatch log group metric filter. - - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). + - Get informations about existing wafv2 rule groups. requirements: - boto3 - botocore @@ -102,17 +101,20 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule -def get_rule_group(wafv2, name, scope, id): - response = wafv2.get_rule_group( - Name=name, - Scope=scope, - Id=id - ) +def get_rule_group(wafv2, name, scope, id, fail_json_aws): + try: + response = wafv2.get_rule_group( + Name=name, + Scope=scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to get wafv2 rule group.") return response @@ -135,7 +137,7 @@ def main(): wafv2 = module.client('wafv2') # check if rule group exists - response = wafv2_list_rule_groups(wafv2, scope) + response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None retval = {} @@ -145,7 +147,7 @@ def main(): existing_group = None if id: - existing_group = get_rule_group(wafv2, name, scope, id) + existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup')) module.exit_json(**retval) diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 43d9f8adb86..cc2aba98a8a 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -164,42 +164,49 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls, compare_priority_rules, wafv2_snake_dict_to_camel_dict try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule class WebACL: - def __init__(self, wafv2, name, scope): + def __init__(self, wafv2, name, scope, fail_json_aws): self.wafv2 = wafv2 self.name = name self.scope = scope + self.fail_json_aws = fail_json_aws self.existing_acl, self.id, self.locktoken = self.get_web_acl() def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name): - response = self.wafv2.update_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - DefaultAction=default_action, - Description=description, - Rules=rules, - VisibilityConfig={ - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - }, - LockToken=self.locktoken - ) + try: + response = self.wafv2.update_web_acl( + Name=self.name, + Scope=self.scope, + Id=self.id, + DefaultAction=default_action, + Description=description, + Rules=rules, + VisibilityConfig={ + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + }, + LockToken=self.locktoken + ) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to update wafv2 web acl.") return response def remove(self): - response = self.wafv2.delete_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + try: + response = self.wafv2.delete_web_acl( + Name=self.name, + Scope=self.scope, + Id=self.id, + LockToken=self.locktoken + ) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response def get(self): @@ -221,15 +228,18 @@ def get_web_acl(self): arn = item.get('ARN') if id: - existing_acl = self.wafv2.get_web_acl( - Name=self.name, - Scope=self.scope, - Id=id - ) + try: + existing_acl = self.wafv2.get_web_acl( + Name=self.name, + Scope=self.scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") return existing_acl, id, locktoken def list(self): - return wafv2_list_web_acls(self.wafv2, self.scope) + return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description): req_obj = { @@ -248,9 +258,12 @@ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, me if tags: req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) - response = self.wafv2.create_web_acl(**req_obj) - self.existing_acl, self.id, self.locktoken = self.get_web_acl() + try: + response = self.wafv2.create_web_acl(**req_obj) + except (BotoCoreError, ClientError) as e: + self.fail_json_aws(e, msg="Failed to create wafv2 web acl.") + self.existing_acl, self.id, self.locktoken = self.get_web_acl() return self.existing_acl @@ -302,7 +315,7 @@ def main(): if not metric_name: metric_name = name - web_acl = WebACL(module.client('wafv2'), name, scope) + web_acl = WebACL(module.client('wafv2'), name, scope, module.fail_json_aws) change = False retval = {} diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 1493947b259..9368853017c 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -83,17 +83,20 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule -def get_web_acl(wafv2, name, scope, id): - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) +def get_web_acl(wafv2, name, scope, id, fail_json_aws): + try: + response = wafv2.get_web_acl( + Name=name, + Scope=scope, + Id=id + ) + except (BotoCoreError, ClientError) as e: + fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -114,7 +117,7 @@ def main(): wafv2 = module.client('wafv2') # check if web acl exists - response = wafv2_list_web_acls(wafv2, scope) + response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) id = None retval = {} @@ -124,7 +127,7 @@ def main(): id = item.get('Id') if id: - existing_acl = get_web_acl(wafv2, name, scope, id) + existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_acl.get('WebACL')) module.exit_json(**retval) From e4f4a2ae2266d0834b08fa095733f80bcdc7f396 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Tue, 13 Apr 2021 10:57:11 +0200 Subject: [PATCH 161/683] fix function name, fix missing NextMarker usage --- wafv2_resources.py | 2 +- wafv2_resources_info.py | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/wafv2_resources.py b/wafv2_resources.py index 38757c1a18b..4bf5f1dcca3 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -26,7 +26,7 @@ type: str name: description: - - The name web acl. + - The name of the web acl. type: str scope: description: diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 2829cfa1829..469fc3b7184 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -53,6 +53,7 @@ """ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls try: from botocore.exceptions import ClientError, BotoCoreError @@ -73,14 +74,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): def list_web_acls(wafv2, scope, fail_json_aws): - try: - response = wafv2.list_web_acls( - Scope=scope, - Limit=100 - ) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to list wafv2 web acl.") - return response + return wafv2_list_web_acls(wafv2, scope, fail_json_aws) def list_wafv2_resources(wafv2, arn, fail_json_aws): From 07cdf51254a146ed886eaf345e10882d8bfa2f33 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Fri, 16 Apr 2021 11:02:13 +0200 Subject: [PATCH 162/683] fix documentation and integrationtest --- wafv2_web_acl.py | 36 +++++++++++++++++++++++++++++++++++- wafv2_web_acl_info.py | 13 +++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index cc2aba98a8a..ee70c65e54c 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -46,7 +46,7 @@ type: str sampled_requests: description: - - Sampled requests, true or false. + - Whether to store a sample of the web requests, true or false. type: bool default: false cloudwatch_metrics: @@ -68,6 +68,27 @@ - The Rule statements used to identify the web requests that you want to allow, block, or count. type: list elements: dict + suboptions: + name: + description: + - The name of the wafv2 rule + type: str + priority: + description: + - The rule priority + type: int + action: + description: + - Wether a rule is blocked, allowed or counted. + type: dict + visibility_config: + description: + - Visibility of single wafv2 rule. + type: dict + statement: + description: + - Rule configuration. + type: dict purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. @@ -150,6 +171,19 @@ description: Current rules of the web acl returned: Always, as long as the web acl exists type: list + sample: + - name: admin_protect + override_action: + none: {} + priority: 1 + statement: + managed_rule_group_statement: + name: AWSManagedRulesAdminProtectionRuleSet + vendor_name: AWS + visibility_config: + cloud_watch_metrics_enabled: true + metric_name: admin_protect + sampled_requests_enabled: true visibility_config: description: Visibility config of the web acl returned: Always, as long as the web acl exists diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 9368853017c..8b8c79ef75a 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -69,6 +69,19 @@ description: Current rules of the web acl returned: Always, as long as the web acl exists type: list + sample: + - name: admin_protect + override_action: + none: {} + priority: 1 + statement: + managed_rule_group_statement: + name: AWSManagedRulesAdminProtectionRuleSet + vendor_name: AWS + visibility_config: + cloud_watch_metrics_enabled: true + metric_name: admin_protect + sampled_requests_enabled: true visibility_config: description: Visibility config of the web acl returned: Always, as long as the web acl exists From ce40a12d03ce82625c986526846adb6d58aaf907 Mon Sep 17 00:00:00 2001 From: ichekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Wed, 21 Apr 2021 19:04:31 +0000 Subject: [PATCH 163/683] Fix KeyError in aws_config_aggregator module If `organization_source` attribute is specified, module fails with the following error on line 206: ``` KeyError: 'OrganizationAggregationSourcep' ``` If `organization_source` attribute is specified and `account_sources` attribute is empty, module fails with the following error on line 119: ``` KeyError: 'AccountAggregationSources' ``` This PR fixes both issues. --- aws_config_aggregator.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 250f004a0f7..16f6ff5152a 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -103,7 +103,7 @@ def resource_exists(client, module, params): try: aggregator = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['name']] + ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] ) return aggregator['ConfigurationAggregators'][0] except is_boto3_error_code('NoSuchConfigurationAggregatorException'): @@ -128,7 +128,7 @@ def create_resource(client, module, params, result): def update_resource(client, module, params, result): current_params = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['name']] + ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] ) del current_params['ConfigurationAggregatorArn'] @@ -181,8 +181,8 @@ def main(): params = {} if name: params['ConfigurationAggregatorName'] = name + params['AccountAggregationSources'] = [] if module.params.get('account_sources'): - params['AccountAggregationSources'] = [] for i in module.params.get('account_sources'): tmp_dict = {} if i.get('account_ids'): @@ -203,7 +203,7 @@ def main(): 'AwsRegions': module.params.get('organization_source').get('aws_regions') }) if module.params.get('organization_source').get('all_aws_regions') is not None: - params['OrganizationAggregationSourcep'].update({ + params['OrganizationAggregationSource'].update({ 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') }) From f1e78a2b08eee990e95633183c7fd36432c1abca Mon Sep 17 00:00:00 2001 From: travis Date: Mon, 19 Apr 2021 06:41:45 +1200 Subject: [PATCH 164/683] Collect arguments for application session stickiness and update docs --- elb_target_group.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/elb_target_group.py b/elb_target_group.py index 4980fc797ad..8d39fb3eae1 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -102,9 +102,21 @@ - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). type: int + stickiness_app_cookie_duration: + description: + - The time period, in seconds, during which requests from a client + should be routed to the same target. After this time period expires, + the application-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). + type: int + stickiness_app_cookie_name: + description: + - The name of the application cookie. Required if stickiness_type is + C(app_cookie). + type: string stickiness_type: description: - The type of sticky sessions. + - C(lb_cookie), C(app_cookie) or C(source_ip) - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers. type: str successful_response_codes: @@ -466,6 +478,8 @@ def create_or_update_target_group(connection, module): stickiness_enabled = module.params.get("stickiness_enabled") stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") stickiness_type = module.params.get("stickiness_type") + stickiness_app_cookie_duration = module.params.get("stickiness_app_cookie_duration") + stickiness_app_cookie_name = module.params.get("stickiness_app_cookie_name") health_option_keys = [ "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", @@ -753,6 +767,12 @@ def create_or_update_target_group(connection, module): if stickiness_type is not None: if stickiness_type != current_tg_attributes.get('stickiness_type'): update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) + if stickiness_app_cookie_name is not None: + if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'): + update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)}) + if stickiness_app_cookie_duration is not None: + if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: + update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) if update_attributes: try: @@ -833,6 +853,8 @@ def main(): stickiness_enabled=dict(type='bool'), stickiness_type=dict(), stickiness_lb_cookie_duration=dict(type='int'), + stickiness_app_cookie_duration=dict(type='int'), + stickiness_app_cookie_name=dict(), state=dict(required=True, choices=['present', 'absent']), successful_response_codes=dict(), tags=dict(default={}, type='dict'), From 8f7f380a14d4bcd2bbc70c5982ff8c861c397011 Mon Sep 17 00:00:00 2001 From: Travis Holton Date: Mon, 19 Apr 2021 22:09:48 +1200 Subject: [PATCH 165/683] Fix datatype of docs --- elb_target_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 8d39fb3eae1..7cf1cef2a7e 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -110,9 +110,9 @@ type: int stickiness_app_cookie_name: description: - - The name of the application cookie. Required if stickiness_type is + - The name of the application cookie. Required if C(stickiness_type) is C(app_cookie). - type: string + type: str stickiness_type: description: - The type of sticky sessions. From a093ce0e976dda3bbc29610a84b8ef3095bf1007 Mon Sep 17 00:00:00 2001 From: Travis Holton Date: Mon, 26 Apr 2021 12:28:13 +1200 Subject: [PATCH 166/683] Add version_added to docs and changelog entry --- elb_target_group.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/elb_target_group.py b/elb_target_group.py index 7cf1cef2a7e..42367323638 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -108,11 +108,13 @@ should be routed to the same target. After this time period expires, the application-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). type: int + version_added: 1.5.0 stickiness_app_cookie_name: description: - The name of the application cookie. Required if C(stickiness_type) is C(app_cookie). type: str + version_added: 1.5.0 stickiness_type: description: - The type of sticky sessions. From d97c25f3055946b8a9048cd6a9b1b45cc71b9230 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 26 Apr 2021 09:14:04 +0200 Subject: [PATCH 167/683] Minor docs tweaks --- elb_target_group.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 42367323638..53a25fa4419 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -111,14 +111,13 @@ version_added: 1.5.0 stickiness_app_cookie_name: description: - - The name of the application cookie. Required if C(stickiness_type) is - C(app_cookie). + - The name of the application cookie. Required if I(stickiness_type=app_cookie). type: str version_added: 1.5.0 stickiness_type: description: - The type of sticky sessions. - - C(lb_cookie), C(app_cookie) or C(source_ip) + - Valid values are C(lb_cookie), C(app_cookie) or C(source_ip). - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers. type: str successful_response_codes: From f49d0cf05ae1f6d0a3c2e4fc68db6433c080d10f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 8 Apr 2021 17:35:46 +0200 Subject: [PATCH 168/683] ecs_taskdefinition: Add depends_on feature Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index ed2825d0942..74ae0523f80 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -99,6 +99,24 @@ - If using the Fargate launch type, this field is required and is limited by the CPU. required: false type: str + depends_on: + version_added: 1.5.0 + description: + - The dependencies defined for container startup and shutdown. + - When a dependency is defined for container startup, for container shutdown it is reversed. + required: false + type: list + elements: dict + suboptions: + container_name: + description: The name of a container. + type: str + required: true + condition: + description: The dependency condition of the container. + type: str + required: true + choices: ["start", "complete", "success", "healthy"] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -233,7 +251,7 @@ def describe_task(self, task_name): except botocore.exceptions.ClientError: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory): + def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory, depends_on): validated_containers = [] # Ensures the number parameters are int as required by boto @@ -258,7 +276,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, family=family, taskRoleArn=task_role_arn, containerDefinitions=container_definitions, - volumes=volumes + volumes=volumes, ) if network_mode != 'default': params['networkMode'] = network_mode @@ -270,6 +288,8 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, params['requiresCompatibilities'] = [launch_type] if execution_role_arn: params['executionRoleArn'] = execution_role_arn + if depends_on: + pass['dependsOn'] = depends_on try: response = self.ecs.register_task_definition(**params) @@ -329,7 +349,8 @@ def main(): volumes=dict(required=False, type='list', elements='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), - memory=dict(required=False, type='str') + memory=dict(required=False, type='str'), + depends_on=dict(required=False, type='list', elements='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -483,7 +504,8 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ volumes, module.params['launch_type'], module.params['cpu'], - module.params['memory']) + module.params['memory'], + module.params['depends_on']) results['changed'] = True elif module.params['state'] == 'absent': From 8aae87ab47eb717bdf3c1c6df302304ba068f815 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 13 Apr 2021 21:48:49 +0200 Subject: [PATCH 169/683] * Fix documentation and imports Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 74ae0523f80..a51926f1f3a 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -113,14 +113,13 @@ type: str required: true condition: - description: The dependency condition of the container. + description: The dependency condition of the container. type: str required: true choices: ["start", "complete", "success", "healthy"] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = r''' @@ -350,7 +349,7 @@ def main(): launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), memory=dict(required=False, type='str'), - depends_on=dict(required=False, type='list', elements='dict'), + depends_on=dict(required=False, type='list', elements='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec, From 769be5a53444b85ec32efc00bcd9e7cdc09de3c3 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 16 Apr 2021 15:33:28 +0200 Subject: [PATCH 170/683] * Fix syntax errors Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index a51926f1f3a..4e06688e7b3 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -108,7 +108,7 @@ type: list elements: dict suboptions: - container_name: + containerName: description: The name of a container. type: str required: true @@ -194,6 +194,25 @@ state: present network_mode: awsvpc +- name: Create task definition + community.aws.ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + launch_type: FARGATE + cpu: 512 + memory: 1024 + state: present + network_mode: awsvpc + depends_on: + - containerName: "simple-container" + condition: "start" + # Create Task Definition with Environment Variables and Secrets - name: Create task definition community.aws.ecs_taskdefinition: @@ -288,7 +307,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, if execution_role_arn: params['executionRoleArn'] = execution_role_arn if depends_on: - pass['dependsOn'] = depends_on + params['dependsOn'] = depends_on try: response = self.ecs.register_task_definition(**params) @@ -386,6 +405,11 @@ def main(): if launch_type == 'FARGATE' and network_mode != 'awsvpc': module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") + depends_on = module.params['depends_on'] + if launch_type == 'FARGATE' and depends_on: + if not module.botocore_at_least('1.3.0'): + module.fail_json(msg='botocore needs to be version 1.3.0 or higher to use depends_on on Fargate launch_type') + family = module.params['family'] existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) @@ -504,7 +528,7 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ module.params['launch_type'], module.params['cpu'], module.params['memory'], - module.params['depends_on']) + depends_on) results['changed'] = True elif module.params['state'] == 'absent': From 101272d7080881d4a3fb48575390a1a1288bbe74 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 20 Apr 2021 14:55:33 +0200 Subject: [PATCH 171/683] * Update documentation Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 235 +++++++++++++++++++++++++++++++++++------- 1 file changed, 197 insertions(+), 38 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 4e06688e7b3..54142392d5b 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -46,9 +46,187 @@ containers: description: - A list of containers definitions. + - See U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html) for a complete list of parameters. required: False type: list elements: dict + contains: + name: + description: The name of a container. + required: false + type: str + image: + description: The image used to start a container. + required: false + type: str + repositoryCredentials: + description: The private repository authentication credentials to use. + required: false + type: dict + cpu: + description: The number of cpu units reserved for the container. + required: false + type: int + memory: + description: The amount (in MiB) of memory to present to the container. + required: false + type: int + memoryReservation: + description: The soft limit (in MiB) of memory to reserve for the container. + required: false + type: int + links: + description: + - Allows containers to communicate with each other without the need for port mappings. + - This parameter is only supported if the network mode of a task definition is bridge. + required: false + type: list + portMappings: + description: The list of port mappings for the container. + required: false + type: list + elements: dict + contains: + containerPort: + description: The port number on the container that is bound to the user-specified or automatically assigned host port. + required: false + type: int + hostPort: + description: The port number on the container instance to reserve for your container. + required: false + type: int + protocol: + description: The protocol used for the port mapping. Valid values are tcp and udp. + required: false + type: str + essential: + description: + - If essential is true, and the fails or stops for any reason, all other containers that are part of the task are stopped. + required: false + type: bool + entryPoint: + description: The entry point that is passed to the container. + required: false + type: str + command: + description: The command that is passed to the container. + required: false + type: list + environment: + description: The environment variables to pass to a container. + required: false + type: list + elements: dict + contains: + name: + description: The name of the key-value pair. + required: false + type: str + value: + description: The value of the key-value pair. + required: false + type: str + environmentFiles: + description: A list of files containing the environment variables to pass to a container. + required: false + type: list + elements: dict + contains: + value: + description: The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. + required: false + type: str + type: + description: The file type to use. The only supported value is s3. + required: false + type: str + volumesFrom: + description: Data volumes to mount from another container. + required: false + type: list + linuxParameters: + description: Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. + required: false + type: list + devices: + description: Any host devices to expose to the container. + required: false + type: list + initProcessEnabled: + description: Run an init process inside the container that forwards signals and reaps processes. + required: false + type: bool + sharedMemorySize: + description: The value for the size (in MiB) of the /dev/shm volume. + required: false + type: int + tmpfs: + description: The container path, mount options, and size (in MiB) of the tmpfs mount. + required: false + type: list + maxSwap: + description: The total amount of swap memory (in MiB) a container can use. + required: false + type: int + swappiness: + description: + - This allows you to tune a container's memory swappiness behavior. + - Accepted values are whole numbers between 0 and 100. + required: false + type: int + secrets: + description: The secrets to pass to the container. + required: false + type: list + dependsOn: + description: + - The dependencies defined for container startup and shutdown. + - When a dependency is defined for container startup, for container shutdown it is reversed. + required: false + type: list + elements: dict + suboptions: + containerName: + description: The name of a container. + type: str + required: true + condition: + description: The dependency condition of the container. + type: str + required: true + choices: ["start", "complete", "success", "healthy"] + startTimeout: + description: Time duration (in seconds) to wait before giving up on resolving dependencies for a container. + required: false + type: int + stopTimeout: + description: Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. + required: false + type: int + hostname: + description: The hostname to use for your container. + required: false + type: str + user: + description: The user to use inside the container. + required: false + type: str + workingDirectory: + description: The working directory in which to run commands inside the container. + required: false + type: str + disableNetworking: + description: When this parameter is true, networking is disabled within the container. + required: false + type: bool + privileged: + description: When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). + required: false + type: bool + readonlyRootFilesystem: + description: When this parameter is true, the container is given read-only access to its root file system. + required: false + type: bool network_mode: description: - The Docker networking mode to use for the containers in the task. @@ -99,24 +277,6 @@ - If using the Fargate launch type, this field is required and is limited by the CPU. required: false type: str - depends_on: - version_added: 1.5.0 - description: - - The dependencies defined for container startup and shutdown. - - When a dependency is defined for container startup, for container shutdown it is reversed. - required: false - type: list - elements: dict - suboptions: - containerName: - description: The name of a container. - type: str - required: true - condition: - description: The dependency condition of the container. - type: str - required: true - choices: ["start", "complete", "success", "healthy"] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -204,14 +364,11 @@ portMappings: - containerPort: 8080 hostPort: 8080 - launch_type: FARGATE - cpu: 512 - memory: 1024 - state: present - network_mode: awsvpc - depends_on: - - containerName: "simple-container" - condition: "start" + cpu: 512 + memory: 1024 + depends_on: + - containerName: "simple-app" + condition: "start" # Create Task Definition with Environment Variables and Secrets - name: Create task definition @@ -269,7 +426,7 @@ def describe_task(self, task_name): except botocore.exceptions.ClientError: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory, depends_on): + def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory): validated_containers = [] # Ensures the number parameters are int as required by boto @@ -294,7 +451,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, family=family, taskRoleArn=task_role_arn, containerDefinitions=container_definitions, - volumes=volumes, + volumes=volumes ) if network_mode != 'default': params['networkMode'] = network_mode @@ -306,8 +463,6 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, params['requiresCompatibilities'] = [launch_type] if execution_role_arn: params['executionRoleArn'] = execution_role_arn - if depends_on: - params['dependsOn'] = depends_on try: response = self.ecs.register_task_definition(**params) @@ -367,8 +522,7 @@ def main(): volumes=dict(required=False, type='list', elements='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), - memory=dict(required=False, type='str'), - depends_on=dict(required=False, type='list', elements='dict'), + memory=dict(required=False, type='str') ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -405,10 +559,16 @@ def main(): if launch_type == 'FARGATE' and network_mode != 'awsvpc': module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") - depends_on = module.params['depends_on'] - if launch_type == 'FARGATE' and depends_on: - if not module.botocore_at_least('1.3.0'): - module.fail_json(msg='botocore needs to be version 1.3.0 or higher to use depends_on on Fargate launch_type') + for container in module.params['containers']: + if container.get('links') and network_mode != 'bridge': + module.fail_json(msg='links parameter is only supported if the network mode of a task definition is bridge.') + + if container.get('swappiness') and (container.get('swappiness') < 0 or container.get('swappiness') > 100): + module.fail_json(msg='Accepted values are whole numbers between 0 and 100.') + + if container.get('dependsOn') and launch_type == 'FARGATE': + if not module.botocore_at_least('1.3.0'): + module.fail_json(msg='botocore needs to be version 1.3.0 or higher to use depends_on on Fargate launch_type') family = module.params['family'] existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) @@ -527,8 +687,7 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ volumes, module.params['launch_type'], module.params['cpu'], - module.params['memory'], - depends_on) + module.params['memory']) results['changed'] = True elif module.params['state'] == 'absent': From 7598d907aa6b097f8758d78877afc6d2a0089c51 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 21 Apr 2021 17:21:01 +0200 Subject: [PATCH 172/683] * Doc fix Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 288 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 268 insertions(+), 20 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 54142392d5b..6d858b24c91 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -50,7 +50,7 @@ required: False type: list elements: dict - contains: + suboptions: name: description: The name of a container. required: false @@ -63,6 +63,11 @@ description: The private repository authentication credentials to use. required: false type: dict + suboptions: + description: + - The Amazon Resource Name (ARN) of the secret containing the private repository credentials. + required: if C(repositoryCredentials) specified + type: str cpu: description: The number of cpu units reserved for the container. required: false @@ -78,7 +83,7 @@ links: description: - Allows containers to communicate with each other without the need for port mappings. - - This parameter is only supported if the network mode of a task definition is bridge. + - This parameter is only supported if I(network_mode=bridge). required: false type: list portMappings: @@ -86,7 +91,7 @@ required: false type: list elements: dict - contains: + suboptions: containerPort: description: The port number on the container that is bound to the user-specified or automatically assigned host port. required: false @@ -99,6 +104,8 @@ description: The protocol used for the port mapping. Valid values are tcp and udp. required: false type: str + default: tcp + choices: ['tcp', 'udp'] essential: description: - If essential is true, and the fails or stops for any reason, all other containers that are part of the task are stopped. @@ -117,7 +124,7 @@ required: false type: list elements: dict - contains: + suboptions: name: description: The name of the key-value pair. required: false @@ -131,7 +138,7 @@ required: false type: list elements: dict - contains: + suboptions: value: description: The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. required: false @@ -140,44 +147,159 @@ description: The file type to use. The only supported value is s3. required: false type: str + mountPoints: + description: The mount points for data volumes in your container. + required: false + type: list + elements: dict + suboptions: + sourceVolume: + description: The name of the volume to mount. + required: false + type: str + containerPath: + description: The path on the container to mount the host volume at. + required: false + type: str + readOnly: + description: + - If this value is true, the container has read-only access to the volume. + - If this value is false, then the container can write to the volume. + - The default value is false. + required: false + default: false + type: bool volumesFrom: description: Data volumes to mount from another container. required: false type: list + elements: dict + suboptions: + sourceContainer: + description: + - The name of another container within the same task definition from which to mount volumes. + required: false + type: str + readOnly: + description: + - If this value is true, the container has read-only access to the volume. + - If this value is false, then the container can write to the volume. + - The default value is false. + required: false + default: false + type: bool linuxParameters: description: Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. required: false type: list - devices: - description: Any host devices to expose to the container. - required: false - type: list + suboptions: + capabilities: + description: + - The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. + required: false + type: dict + suboptions: + add: + description: + - The Linux capabilities for the container that have been added to the default configuration provided by Docker. + - If I(launch_type=FARGATE), this parameter is not supported. + required: false + type: list + choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER", + "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD", + "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", + "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO", + "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"] + drop: + description: + - The Linux capabilities for the container that have been removed from the default configuration provided by Docker. + required: false + type: list + choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER", + "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD", + "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", + "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO", + "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"] + devices: + description: + - Any host devices to expose to the container. + - If I(launch_type=FARGATE), this parameter is not supported. + required: false + type: list + elements: dict + suboptions: + hostPath: + description: The path for the device on the host container instance. + required: if C(devices) specified + type: str + containerPath: + description: The path inside the container at which to expose the host device. + required: false + type: str + permissions: + description: The explicit permissions to provide to the container for the device. + required: false + type: list initProcessEnabled: description: Run an init process inside the container that forwards signals and reaps processes. required: false type: bool sharedMemorySize: - description: The value for the size (in MiB) of the /dev/shm volume. + description: + - The value for the size (in MiB) of the /dev/shm volume. + - If I(launch_type=FARGATE), this parameter is not supported. required: false type: int tmpfs: - description: The container path, mount options, and size (in MiB) of the tmpfs mount. + description: + - The container path, mount options, and size (in MiB) of the tmpfs mount. + - If Fargate launch type is used, this parameter is not supported. required: false type: list + elements: dict + suboptions: + containerPath: + description: The absolute file path where the tmpfs volume is to be mounted. + required: if C(tmpfs) specified + type: str + size: + description: The size (in MiB) of the tmpfs volume. + required: if C(tmpfs) specified + type: int + mountOptions: + description: The list of tmpfs volume mount options. + required: false + type: list + choices: ["defaults", "ro", "rw", "suid", "nosuid", "dev", "nodev", "exec", "noexec", "sync", "async", "dirsync", + "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable", + "runbindable", "private", "rprivate", "shared", "rshared", "slave", "rslave", "relatime", "norelatime", + "strictatime", "nostrictatime", "mode", "uid", "gid", "nr_inodes", "nr_blocks", "mpol"] maxSwap: - description: The total amount of swap memory (in MiB) a container can use. + description: + - The total amount of swap memory (in MiB) a container can use. + - If Fargate launch type is used, this parameter is not supported. required: false type: int swappiness: description: - This allows you to tune a container's memory swappiness behavior. - - Accepted values are whole numbers between 0 and 100. + - If Fargate launch type is used, this parameter is not supported. required: false type: int secrets: description: The secrets to pass to the container. required: false type: list + elements: dict + subpotions: + name: + description: The value to set as the environment variable on the container. + required: if C(secrets) specified + type: str + size: + description: The secret to expose to the container. + required: if C(secrets) specified + type: str dependsOn: description: - The dependencies defined for container startup and shutdown. @@ -204,11 +326,15 @@ required: false type: int hostname: - description: The hostname to use for your container. + description: + - The hostname to use for your container. + - This parameter is not supported if I(network_mode=awsvpc). required: false type: str user: - description: The user to use inside the container. + description: + - The user to use inside the container. + - This parameter is not supported for Windows containers. required: false type: str workingDirectory: @@ -227,6 +353,116 @@ description: When this parameter is true, the container is given read-only access to its root file system. required: false type: bool + dnsServers: + description: + - A list of DNS servers that are presented to the container. + - This parameter is not supported for Windows containers. + required: false + type: list + dnsSearchDomains: + description: + - A list of DNS search domains that are presented to the container. + - This parameter is not supported for Windows containers. + required: false + type: list + extraHosts: + description: + - A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. + - This parameter is not supported for Windows containers or tasks that use I(network_mode=awsvpc). + required: false + type: list + elements: dict + suboptions: + hostname: + description: The hostname to use in the /etc/hosts entry. + type: str + required: false + ipAddress: + description: The IP address to use in the /etc/hosts entry. + type: str + required: false + dockerSecurityOptions: + description: + - A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. + - This parameter is not supported for Windows containers. + required: false + type: list + interactive: + description: + - When it is true, it allows to deploy containerized applications that require stdin or a tty to be allocated. + required: false + type: bool + pseudoTerminal: + description: When this parameter is true, a TTY is allocated. + required: false + type: bool + dockerLabels: + description: A key/value map of labels to add to the container. + required: false + type: dict + ulimits: + description: + - A list of ulimits to set in the container. + - This parameter is not supported for Windows containers. + required: false + type: list + elements: dict + suboptions: + name: + description: The type of the ulimit . + type: str + required: false + softLimit: + description: The soft limit for the ulimit type. + type: int + required: false + hardLimit: + description: The hard limit for the ulimit type. + type: int + required: false + logConfiguration: + description: The log configuration specification for the container. + required: false + type: dict + suboptions: + logDriver: + description: + - The log driver to use for the container. + - For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, and awsfirelens. + - For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens. + type: str + required: false + options: + description: The configuration options to send to the log driver. + required: false + type: str + secretOptions: + description: The secrets to pass to the log configuration. + required: false + type: list + elements: dict + suboptions: + name: + description: The name of the secret. + type: str + required: false + valueFrom: + description: The secret to expose to the container. + type: str + required: false + healthCheck: + description: The health check command and associated configuration parameters for the container. + required: false + type: dict + systemControls: + description: A list of namespaced kernel parameters to set in the container. + required: false + type: list + resourceRequirements: + description: + - The type and amount of a resource to assign to a container. The only supported resource is a GPU. + required: false + type: list network_mode: description: - The Docker networking mode to use for the containers in the task. @@ -333,7 +569,7 @@ image: "nginx" portMappings: - containerPort: 8080 - hostPort: 8080 + hostPort: 8080 cpu: 512 memory: 1024 state: present @@ -347,7 +583,7 @@ image: "nginx" portMappings: - containerPort: 8080 - hostPort: 8080 + hostPort: 8080 launch_type: FARGATE cpu: 512 memory: 1024 @@ -363,10 +599,10 @@ image: "nginx" portMappings: - containerPort: 8080 - hostPort: 8080 + hostPort: 8080 cpu: 512 memory: 1024 - depends_on: + dependsOn: - containerName: "simple-app" condition: "start" @@ -568,7 +804,19 @@ def main(): if container.get('dependsOn') and launch_type == 'FARGATE': if not module.botocore_at_least('1.3.0'): - module.fail_json(msg='botocore needs to be version 1.3.0 or higher to use depends_on on Fargate launch_type') + module.fail_json(msg='botocore needs to be version 1.3.0 or higher to use depends_on on Fargate launch type') + + if container.get('sharedMemorySize') and launch_type == 'FARGATE': + module.fail_json(msg='sharedMemorySize parameter is only supported withFargate launch type.') + + if container.get('tmpfs') and launch_type == 'FARGATE': + module.fail_json(msg='tmpfs parameter is only supported with Fargate launch type.') + + if container.get('hostname') and network_mode == 'awsvpc': + module.fail_json(msg='hostname parameter is only supported with awsvpc network mode.') + + if container.get('extraHosts') and network_mode == 'awsvpc': + module.fail_json(msg='extraHosts parameter is only supported with awsvpc network mode.') family = module.params['family'] existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) From 8d913ba6c6595ac5c08c5ce928c7b020ea5f5bbb Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Apr 2021 12:19:31 +0200 Subject: [PATCH 173/683] * Doc fix Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 191 +++++++++++++++++++++--------------------- 1 file changed, 96 insertions(+), 95 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 6d858b24c91..8ea97a2f21e 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -53,157 +53,157 @@ suboptions: name: description: The name of a container. - required: false + required: False type: str image: description: The image used to start a container. - required: false + required: False type: str repositoryCredentials: description: The private repository authentication credentials to use. - required: false + required: False type: dict suboptions: description: - The Amazon Resource Name (ARN) of the secret containing the private repository credentials. - required: if C(repositoryCredentials) specified + required: True type: str cpu: description: The number of cpu units reserved for the container. - required: false + required: False type: int memory: description: The amount (in MiB) of memory to present to the container. - required: false + required: False type: int memoryReservation: description: The soft limit (in MiB) of memory to reserve for the container. - required: false + required: False type: int links: description: - Allows containers to communicate with each other without the need for port mappings. - This parameter is only supported if I(network_mode=bridge). - required: false + required: False type: list portMappings: description: The list of port mappings for the container. - required: false + required: False type: list elements: dict suboptions: containerPort: description: The port number on the container that is bound to the user-specified or automatically assigned host port. - required: false + required: False type: int hostPort: description: The port number on the container instance to reserve for your container. - required: false + required: False type: int protocol: description: The protocol used for the port mapping. Valid values are tcp and udp. - required: false + required: False type: str default: tcp choices: ['tcp', 'udp'] essential: description: - - If essential is true, and the fails or stops for any reason, all other containers that are part of the task are stopped. - required: false + - If C(essential) is True, and the fails or stops for any reason, all other containers that are part of the task are stopped. + required: False type: bool entryPoint: description: The entry point that is passed to the container. - required: false + required: False type: str command: description: The command that is passed to the container. - required: false + required: False type: list environment: description: The environment variables to pass to a container. - required: false + required: False type: list elements: dict suboptions: name: description: The name of the key-value pair. - required: false + required: False type: str value: description: The value of the key-value pair. - required: false + required: False type: str environmentFiles: description: A list of files containing the environment variables to pass to a container. - required: false + required: False type: list elements: dict suboptions: value: description: The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. - required: false + required: False type: str type: description: The file type to use. The only supported value is s3. - required: false + required: False type: str mountPoints: description: The mount points for data volumes in your container. - required: false + required: False type: list elements: dict suboptions: sourceVolume: description: The name of the volume to mount. - required: false + required: False type: str containerPath: description: The path on the container to mount the host volume at. - required: false + required: False type: str readOnly: description: - - If this value is true, the container has read-only access to the volume. - - If this value is false, then the container can write to the volume. - - The default value is false. - required: false - default: false + - If this value is True, the container has read-only access to the volume. + - If this value is False, then the container can write to the volume. + - The default value is False. + required: False + default: False type: bool volumesFrom: description: Data volumes to mount from another container. - required: false + required: False type: list elements: dict suboptions: sourceContainer: description: - The name of another container within the same task definition from which to mount volumes. - required: false + required: False type: str readOnly: description: - - If this value is true, the container has read-only access to the volume. - - If this value is false, then the container can write to the volume. - - The default value is false. - required: false - default: false + - If this value is True, the container has read-only access to the volume. + - If this value is False, then the container can write to the volume. + - The default value is False. + required: False + default: False type: bool linuxParameters: description: Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. - required: false + required: False type: list suboptions: capabilities: description: - The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. - required: false + required: False type: dict suboptions: add: description: - The Linux capabilities for the container that have been added to the default configuration provided by Docker. - If I(launch_type=FARGATE), this parameter is not supported. - required: false + required: False type: list choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER", "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD", @@ -213,7 +213,7 @@ drop: description: - The Linux capabilities for the container that have been removed from the default configuration provided by Docker. - required: false + required: False type: list choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER", "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD", @@ -224,51 +224,51 @@ description: - Any host devices to expose to the container. - If I(launch_type=FARGATE), this parameter is not supported. - required: false + required: False type: list elements: dict suboptions: hostPath: description: The path for the device on the host container instance. - required: if C(devices) specified + required: True type: str containerPath: description: The path inside the container at which to expose the host device. - required: false + required: False type: str permissions: description: The explicit permissions to provide to the container for the device. - required: false + required: False type: list initProcessEnabled: description: Run an init process inside the container that forwards signals and reaps processes. - required: false + required: False type: bool sharedMemorySize: description: - The value for the size (in MiB) of the /dev/shm volume. - If I(launch_type=FARGATE), this parameter is not supported. - required: false + required: False type: int tmpfs: description: - The container path, mount options, and size (in MiB) of the tmpfs mount. - If Fargate launch type is used, this parameter is not supported. - required: false + required: False type: list elements: dict suboptions: containerPath: description: The absolute file path where the tmpfs volume is to be mounted. - required: if C(tmpfs) specified + required: True type: str size: description: The size (in MiB) of the tmpfs volume. - required: if C(tmpfs) specified + required: True type: int mountOptions: description: The list of tmpfs volume mount options. - required: false + required: False type: list choices: ["defaults", "ro", "rw", "suid", "nosuid", "dev", "nodev", "exec", "noexec", "sync", "async", "dirsync", "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable", @@ -278,190 +278,191 @@ description: - The total amount of swap memory (in MiB) a container can use. - If Fargate launch type is used, this parameter is not supported. - required: false + required: False type: int swappiness: description: - This allows you to tune a container's memory swappiness behavior. - If Fargate launch type is used, this parameter is not supported. - required: false + required: False type: int secrets: description: The secrets to pass to the container. - required: false + required: False type: list elements: dict subpotions: name: description: The value to set as the environment variable on the container. - required: if C(secrets) specified + required: True type: str size: description: The secret to expose to the container. - required: if C(secrets) specified + required: True type: str dependsOn: description: - The dependencies defined for container startup and shutdown. - When a dependency is defined for container startup, for container shutdown it is reversed. - required: false + required: False type: list elements: dict suboptions: containerName: description: The name of a container. type: str - required: true + required: True condition: description: The dependency condition of the container. type: str - required: true + required: True choices: ["start", "complete", "success", "healthy"] startTimeout: description: Time duration (in seconds) to wait before giving up on resolving dependencies for a container. - required: false + required: False type: int stopTimeout: description: Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. - required: false + required: False type: int hostname: description: - The hostname to use for your container. - This parameter is not supported if I(network_mode=awsvpc). - required: false + required: False type: str user: description: - The user to use inside the container. - This parameter is not supported for Windows containers. - required: false + required: False type: str workingDirectory: description: The working directory in which to run commands inside the container. - required: false + required: False type: str disableNetworking: - description: When this parameter is true, networking is disabled within the container. - required: false + description: When this parameter is True, networking is disabled within the container. + required: False type: bool privileged: - description: When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). - required: false + description: When this parameter is True, the container is given elevated privileges on the host container instance (similar to the root user). + required: False type: bool readonlyRootFilesystem: - description: When this parameter is true, the container is given read-only access to its root file system. + description: When this parameter is True, the container is given read-only access to its root file system. required: false type: bool dnsServers: description: - A list of DNS servers that are presented to the container. - This parameter is not supported for Windows containers. - required: false + required: False type: list dnsSearchDomains: description: - A list of DNS search domains that are presented to the container. - This parameter is not supported for Windows containers. - required: false + required: False type: list extraHosts: description: - A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. - This parameter is not supported for Windows containers or tasks that use I(network_mode=awsvpc). - required: false + required: False type: list elements: dict suboptions: hostname: description: The hostname to use in the /etc/hosts entry. type: str - required: false + required: False ipAddress: description: The IP address to use in the /etc/hosts entry. type: str - required: false + required: False dockerSecurityOptions: description: - A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. - This parameter is not supported for Windows containers. - required: false + required: False type: list interactive: description: - When it is true, it allows to deploy containerized applications that require stdin or a tty to be allocated. - required: false + required: False type: bool pseudoTerminal: - description: When this parameter is true, a TTY is allocated. - required: false + description: When this parameter is True, a TTY is allocated. + required: False type: bool dockerLabels: description: A key/value map of labels to add to the container. - required: false + required: False type: dict ulimits: description: - A list of ulimits to set in the container. - This parameter is not supported for Windows containers. - required: false + required: False type: list elements: dict suboptions: name: description: The type of the ulimit . type: str - required: false + required: False softLimit: description: The soft limit for the ulimit type. type: int - required: false + required: False hardLimit: description: The hard limit for the ulimit type. type: int - required: false + required: False logConfiguration: description: The log configuration specification for the container. - required: false + required: False type: dict suboptions: logDriver: description: - The log driver to use for the container. - For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, and awsfirelens. - - For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens. + - For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, + gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens. type: str - required: false + required: False options: - description: The configuration options to send to the log driver. - required: false + description: The configuration options to send to the log driver. + required: False type: str secretOptions: description: The secrets to pass to the log configuration. - required: false + required: False type: list elements: dict suboptions: name: description: The name of the secret. type: str - required: false + required: False valueFrom: description: The secret to expose to the container. type: str - required: false + required: False healthCheck: description: The health check command and associated configuration parameters for the container. - required: false + required: False type: dict systemControls: description: A list of namespaced kernel parameters to set in the container. - required: false + required: False type: list resourceRequirements: description: - The type and amount of a resource to assign to a container. The only supported resource is a GPU. - required: false + required: False type: list network_mode: description: @@ -475,7 +476,7 @@ type: str task_role_arn: description: - - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted + - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume.All containers in this task are granted the permissions that are specified in this role. required: false type: str From 857db6fc69776047014e7fbaae366387ae4779de Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Apr 2021 13:18:43 +0200 Subject: [PATCH 174/683] * Documentation fix: add missing key Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 8ea97a2f21e..4341d656018 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -47,7 +47,7 @@ description: - A list of containers definitions. - See U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html) for a complete list of parameters. - required: False + required: True type: list elements: dict suboptions: @@ -64,10 +64,11 @@ required: False type: dict suboptions: - description: - - The Amazon Resource Name (ARN) of the secret containing the private repository credentials. - required: True - type: str + credentialsParameter: + description: + - The Amazon Resource Name (ARN) of the secret containing the private repository credentials. + required: True + type: str cpu: description: The number of cpu units reserved for the container. required: False @@ -291,7 +292,7 @@ required: False type: list elements: dict - subpotions: + suboptions: name: description: The value to set as the environment variable on the container. required: True @@ -752,7 +753,7 @@ def main(): family=dict(required=False, type='str'), revision=dict(required=False, type='int'), force_create=dict(required=False, default=False, type='bool'), - containers=dict(required=False, type='list', elements='dict'), + containers=dict(required=True, type='list', elements='dict'), network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), task_role_arn=dict(required=False, default='', type='str'), execution_role_arn=dict(required=False, default='', type='str'), @@ -779,11 +780,6 @@ def main(): if not module.botocore_at_least('1.10.44'): module.fail_json(msg='botocore needs to be version 1.10.44 or higher to use execution_role_arn') - if module.params['containers']: - for container in module.params['containers']: - for environment in container.get('environment', []): - environment['value'] = to_text(environment['value']) - if module.params['state'] == 'present': if 'containers' not in module.params or not module.params['containers']: module.fail_json(msg="To use task definitions, a list of containers must be specified") @@ -800,13 +796,21 @@ def main(): if container.get('links') and network_mode != 'bridge': module.fail_json(msg='links parameter is only supported if the network mode of a task definition is bridge.') + for environment in container.get('environment', []): + environment['value'] = to_text(environment['value']) + + for environment_file in container.get('environmentFiles', []): + if environment_file['value'] != 's3': + module.fail_json(msg='The only supported value for the file type is s3.') + + if container.get('maxSwap') and launch_type == 'FARGATE': + module.fail_json(msg='maxSwap parameter is only supported withFargate launch type.') + elif container.get('maxSwap') < 0: + module.fail_json(msg='Accepted values are 0 or any positive integer.') + if container.get('swappiness') and (container.get('swappiness') < 0 or container.get('swappiness') > 100): module.fail_json(msg='Accepted values are whole numbers between 0 and 100.') - if container.get('dependsOn') and launch_type == 'FARGATE': - if not module.botocore_at_least('1.3.0'): - module.fail_json(msg='botocore needs to be version 1.3.0 or higher to use depends_on on Fargate launch type') - if container.get('sharedMemorySize') and launch_type == 'FARGATE': module.fail_json(msg='sharedMemorySize parameter is only supported withFargate launch type.') From 33ef63f592f55cffa387ff4389107262adaca3dd Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Apr 2021 15:46:13 +0200 Subject: [PATCH 175/683] * Fix sanity Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 4341d656018..1703550c431 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -798,16 +798,16 @@ def main(): for environment in container.get('environment', []): environment['value'] = to_text(environment['value']) - + for environment_file in container.get('environmentFiles', []): if environment_file['value'] != 's3': module.fail_json(msg='The only supported value for the file type is s3.') - + if container.get('maxSwap') and launch_type == 'FARGATE': module.fail_json(msg='maxSwap parameter is only supported withFargate launch type.') elif container.get('maxSwap') < 0: module.fail_json(msg='Accepted values are 0 or any positive integer.') - + if container.get('swappiness') and (container.get('swappiness') < 0 or container.get('swappiness') > 100): module.fail_json(msg='Accepted values are whole numbers between 0 and 100.') From 47db1ed19632b6affb11dcda8a587c0e626d8c12 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 26 Apr 2021 11:44:58 +0200 Subject: [PATCH 176/683] * Address reviewer's comments Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 155 +++++++++++++++++++++--------------------- 1 file changed, 79 insertions(+), 76 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 1703550c431..d47c50fb8c3 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -102,14 +102,14 @@ required: False type: int protocol: - description: The protocol used for the port mapping. Valid values are tcp and udp. + description: The protocol used for the port mapping. required: False type: str default: tcp choices: ['tcp', 'udp'] essential: description: - - If C(essential) is True, and the fails or stops for any reason, all other containers that are part of the task are stopped. + - If I(essential=True), and the container fails or stops for any reason, all other containers that are part of the task are stopped. required: False type: bool entryPoint: @@ -145,7 +145,7 @@ required: False type: str type: - description: The file type to use. The only supported value is s3. + description: The file type to use. The only supported value is C(s3). required: False type: str mountPoints: @@ -164,9 +164,8 @@ type: str readOnly: description: - - If this value is True, the container has read-only access to the volume. - - If this value is False, then the container can write to the volume. - - The default value is False. + - If this value is C(True), the container has read-only access to the volume. + - If this value is C(False), then the container can write to the volume. required: False default: False type: bool @@ -183,9 +182,8 @@ type: str readOnly: description: - - If this value is True, the container has read-only access to the volume. - - If this value is False, then the container can write to the volume. - - The default value is False. + - If this value is C(True), the container has read-only access to the volume. + - If this value is C(False), then the container can write to the volume. required: False default: False type: bool @@ -241,52 +239,52 @@ description: The explicit permissions to provide to the container for the device. required: False type: list - initProcessEnabled: - description: Run an init process inside the container that forwards signals and reaps processes. - required: False - type: bool - sharedMemorySize: - description: - - The value for the size (in MiB) of the /dev/shm volume. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: int - tmpfs: - description: - - The container path, mount options, and size (in MiB) of the tmpfs mount. - - If Fargate launch type is used, this parameter is not supported. - required: False - type: list - elements: dict - suboptions: - containerPath: - description: The absolute file path where the tmpfs volume is to be mounted. - required: True - type: str - size: - description: The size (in MiB) of the tmpfs volume. - required: True + initProcessEnabled: + description: Run an init process inside the container that forwards signals and reaps processes. + required: False + type: bool + sharedMemorySize: + description: + - The value for the size (in MiB) of the /dev/shm volume. + - If I(launch_type=FARGATE), this parameter is not supported. + required: False type: int - mountOptions: - description: The list of tmpfs volume mount options. + tmpfs: + description: + - The container path, mount options, and size (in MiB) of the tmpfs mount. + - If I(launch_type=FARGATE), this parameter is not supported. required: False type: list - choices: ["defaults", "ro", "rw", "suid", "nosuid", "dev", "nodev", "exec", "noexec", "sync", "async", "dirsync", - "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable", - "runbindable", "private", "rprivate", "shared", "rshared", "slave", "rslave", "relatime", "norelatime", - "strictatime", "nostrictatime", "mode", "uid", "gid", "nr_inodes", "nr_blocks", "mpol"] - maxSwap: - description: - - The total amount of swap memory (in MiB) a container can use. - - If Fargate launch type is used, this parameter is not supported. - required: False - type: int - swappiness: - description: - - This allows you to tune a container's memory swappiness behavior. - - If Fargate launch type is used, this parameter is not supported. - required: False - type: int + elements: dict + suboptions: + containerPath: + description: The absolute file path where the tmpfs volume is to be mounted. + required: True + type: str + size: + description: The size (in MiB) of the tmpfs volume. + required: True + type: int + mountOptions: + description: The list of tmpfs volume mount options. + required: False + type: list + choices: ["defaults", "ro", "rw", "suid", "nosuid", "dev", "nodev", "exec", "noexec", "sync", "async", "dirsync", + "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable", + "runbindable", "private", "rprivate", "shared", "rshared", "slave", "rslave", "relatime", "norelatime", + "strictatime", "nostrictatime", "mode", "uid", "gid", "nr_inodes", "nr_blocks", "mpol"] + maxSwap: + description: + - The total amount of swap memory (in MiB) a container can use. + - If I(launch_type=FARGATE), this parameter is not supported. + required: False + type: int + swappiness: + description: + - This allows you to tune a container's memory swappiness behavior. + - If I(launch_type=FARGATE), this parameter is not supported. + required: False + type: int secrets: description: The secrets to pass to the container. required: False @@ -343,15 +341,15 @@ required: False type: str disableNetworking: - description: When this parameter is True, networking is disabled within the container. + description: When this parameter is C(True), networking is disabled within the container. required: False type: bool privileged: - description: When this parameter is True, the container is given elevated privileges on the host container instance (similar to the root user). + description: When this parameter is C(True), the container is given elevated privileges on the host container instance. required: False type: bool readonlyRootFilesystem: - description: When this parameter is True, the container is given read-only access to its root file system. + description: When this parameter is C(True), the container is given read-only access to its root file system. required: false type: bool dnsServers: @@ -390,11 +388,11 @@ type: list interactive: description: - - When it is true, it allows to deploy containerized applications that require stdin or a tty to be allocated. + - When I(interactive=True), it allows to deploy containerized applications that require stdin or a tty to be allocated. required: False type: bool pseudoTerminal: - description: When this parameter is True, a TTY is allocated. + description: When this parameter is C(True), a TTY is allocated. required: False type: bool dockerLabels: @@ -410,7 +408,7 @@ elements: dict suboptions: name: - description: The type of the ulimit . + description: The type of the ulimit. type: str required: False softLimit: @@ -429,9 +427,9 @@ logDriver: description: - The log driver to use for the container. - - For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, and awsfirelens. - - For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, - gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens. + - For tasks on AWS Fargate, the supported log drivers are C(awslogs), C(splunk), and C(awsfirelens). + - For tasks hosted on Amazon EC2 instances, the supported log drivers are C(awslogs), C(fluentd), + C(gelf), C(json-file), C(journald), C(logentries), C(syslog), C(splunk), and C(awsfirelens). type: str required: False options: @@ -462,7 +460,8 @@ type: list resourceRequirements: description: - - The type and amount of a resource to assign to a container. The only supported resource is a GPU. + - The type and amount of a resource to assign to a container. + - The only supported resource is a C(GPU). required: False type: list network_mode: @@ -793,8 +792,8 @@ def main(): module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") for container in module.params['containers']: - if container.get('links') and network_mode != 'bridge': - module.fail_json(msg='links parameter is only supported if the network mode of a task definition is bridge.') + if container.get('links') and network_mode == 'awsvpc': + module.fail_json(msg='links parameter is not supported if network mode is awsvpc.') for environment in container.get('environment', []): environment['value'] = to_text(environment['value']) @@ -803,25 +802,29 @@ def main(): if environment_file['value'] != 's3': module.fail_json(msg='The only supported value for the file type is s3.') - if container.get('maxSwap') and launch_type == 'FARGATE': - module.fail_json(msg='maxSwap parameter is only supported withFargate launch type.') - elif container.get('maxSwap') < 0: - module.fail_json(msg='Accepted values are 0 or any positive integer.') + for linux_param in container.get('linuxParameters', {}): + if linux_param.get('devices') and launch_type == 'FARGATE': + module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.') + + if linux_param.get('maxSwap') and launch_type == 'FARGATE': + module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') + elif linux_param.get('maxSwap') and linux_param['maxSwap'] < 0: + module.fail_json(msg='Accepted values are 0 or any positive integer.') - if container.get('swappiness') and (container.get('swappiness') < 0 or container.get('swappiness') > 100): - module.fail_json(msg='Accepted values are whole numbers between 0 and 100.') + if linux_param.get('swappiness') and (linux_param['swappiness'] < 0 or linux_param['swappiness'] > 100): + module.fail_json(msg='Accepted values are whole numbers between 0 and 100.') - if container.get('sharedMemorySize') and launch_type == 'FARGATE': - module.fail_json(msg='sharedMemorySize parameter is only supported withFargate launch type.') + if linux_param.get('sharedMemorySize') and launch_type == 'FARGATE': + module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.') - if container.get('tmpfs') and launch_type == 'FARGATE': - module.fail_json(msg='tmpfs parameter is only supported with Fargate launch type.') + if linux_param.get('tmpfs') and launch_type == 'FARGATE': + module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.') if container.get('hostname') and network_mode == 'awsvpc': - module.fail_json(msg='hostname parameter is only supported with awsvpc network mode.') + module.fail_json(msg='hostname parameter is not supported when the awsvpc network mode is used.') if container.get('extraHosts') and network_mode == 'awsvpc': - module.fail_json(msg='extraHosts parameter is only supported with awsvpc network mode.') + module.fail_json(msg='extraHosts parameter is not supported when the awsvpc network mode is used.') family = module.params['family'] existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) From 37d4fbd32e1b0b1c7f58035d6ad4b1aeb6dc1e0b Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 26 Apr 2021 15:43:37 +0200 Subject: [PATCH 177/683] * Doc fixing Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index d47c50fb8c3..7803b117891 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -469,14 +469,14 @@ - The Docker networking mode to use for the containers in the task. - C(awsvpc) mode was added in Ansible 2.5 - Windows containers must use I(network_mode=default), which will utilize docker NAT networking. - - Setting I(network_mode=default) for a Linux container will use bridge mode. + - Setting I(network_mode=default) for a Linux container will use C(bridge) mode. required: false default: bridge choices: [ 'default', 'bridge', 'host', 'none', 'awsvpc' ] type: str task_role_arn: description: - - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume.All containers in this task are granted + - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. required: false type: str @@ -504,14 +504,14 @@ choices: ["EC2", "FARGATE"] cpu: description: - - The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. - - If using the Fargate launch type, this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096). + - The number of cpu units used by the task. If I(launch_type=EC2), this field is optional and any value can be used. + - If I(launch_type=FARGATE), this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096). required: false type: str memory: description: - - The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. - - If using the Fargate launch type, this field is required and is limited by the CPU. + - The amount (in MiB) of memory used by the task. If I(launch_type=EC2), this field is optional and any value can be used. + - If I(launch_type=FARGATE), this field is required and is limited by the CPU. required: false type: str extends_documentation_fragment: @@ -800,7 +800,7 @@ def main(): for environment_file in container.get('environmentFiles', []): if environment_file['value'] != 's3': - module.fail_json(msg='The only supported value for the file type is s3.') + module.fail_json(msg='The only supported value for environmentFiles is s3.') for linux_param in container.get('linuxParameters', {}): if linux_param.get('devices') and launch_type == 'FARGATE': @@ -809,10 +809,10 @@ def main(): if linux_param.get('maxSwap') and launch_type == 'FARGATE': module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') elif linux_param.get('maxSwap') and linux_param['maxSwap'] < 0: - module.fail_json(msg='Accepted values are 0 or any positive integer.') + module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.') if linux_param.get('swappiness') and (linux_param['swappiness'] < 0 or linux_param['swappiness'] > 100): - module.fail_json(msg='Accepted values are whole numbers between 0 and 100.') + module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.') if linux_param.get('sharedMemorySize') and launch_type == 'FARGATE': module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.') From 5164ffc77606390ffb0e3ec8057e3f4d87afd686 Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 22 Mar 2021 16:10:05 +0100 Subject: [PATCH 178/683] test --- cloudfront_distribution.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index a48f687a890..e62fc89bfa4 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1975,7 +1975,7 @@ def validate_restrictions(self, config, restrictions, purge_restrictions=False): def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id): try: config['default_root_object'] = default_root_object or config.get('default_root_object', '') - config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled) + config['is_i_p_v6_enabled'] = ipv6_enabled if ipv6_enabled is not None else config.get('is_i_p_v6_enabled', self.__default_ipv6_enabled) if http_version is not None or config.get('http_version'): self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions) config['http_version'] = http_version or config.get('http_version') @@ -2056,7 +2056,7 @@ def validate_distribution_from_aliases_caller_reference(self, distribution_id, a if caller_reference is not None: return self.validate_distribution_from_caller_reference(caller_reference) else: - if aliases: + if aliases and distribution_id is None: distribution_id = self.validate_distribution_id_from_alias(aliases) if distribution_id: return self.__cloudfront_facts_mgr.get_distribution(distribution_id) From 5944f5eadd58e835079ba069bf9e435d4836cfe2 Mon Sep 17 00:00:00 2001 From: abikouo Date: Thu, 25 Mar 2021 14:23:46 +0100 Subject: [PATCH 179/683] update elb modules adding ip_address_type parameters --- elb_application_lb.py | 16 ++++++++++++++-- elb_application_lb_info.py | 31 +++++++++++++------------------ elb_network_lb.py | 19 ++++++++++++++++--- 3 files changed, 43 insertions(+), 23 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index f154c8803ff..9dcab533c9c 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -179,6 +179,12 @@ - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. default: yes type: bool + ip_address_type: + description: + - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. + default: "ipv4" + choices: [ 'ipv4', 'dualstack' ] + type: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -476,7 +482,6 @@ def create_or_update_elb(elb_obj): """Create ELB or modify main attributes. json_exit here""" - if elb_obj.elb: # ELB exists so check subnets, security groups and tags match what has been passed @@ -562,6 +567,9 @@ def create_or_update_elb(elb_obj): rule_obj.modify() elb_obj.changed = True + # Update ELB ip address type only if option has been provided + if elb_obj.module.params.get('ip_address_type') is not None : + elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) # Get the ELB again elb_obj.update() @@ -583,6 +591,9 @@ def create_or_update_elb(elb_obj): # Change tags to ansible friendly dict snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + # ip address type + snaked_elb['ip_address_type']=elb_obj.get_elb_ip_address_type() + elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) @@ -629,7 +640,8 @@ def main(): tags=dict(type='dict'), wait_timeout=dict(type='int'), wait=dict(default=False, type='bool'), - purge_rules=dict(default=True, type='bool') + purge_rules=dict(default=True, type='bool'), + ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) ) module = AnsibleAWSModule(argument_spec=argument_spec, diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index e3003789911..9d3ad385556 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -70,96 +70,82 @@ contains: access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. - returned: when status is present type: str sample: mys3bucket access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. - returned: when status is present type: str sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. - returned: when status is present type: str sample: /my/logs availability_zones: description: The Availability Zones for the load balancer. - returned: when status is present type: list sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" canonical_hosted_zone_id: description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. - returned: when status is present type: str sample: ABCDEF12345678 created_time: description: The date and time the load balancer was created. - returned: when status is present type: str sample: "2015-02-12T02:14:02+00:00" deletion_protection_enabled: description: Indicates whether deletion protection is enabled. - returned: when status is present type: str sample: true dns_name: description: The public DNS name of the load balancer. - returned: when status is present type: str sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. - returned: when status is present type: str sample: 60 ip_address_type: description: The type of IP addresses used by the subnets for the load balancer. - returned: when status is present type: str sample: ipv4 load_balancer_arn: description: The Amazon Resource Name (ARN) of the load balancer. - returned: when status is present type: str sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 load_balancer_name: description: The name of the load balancer. - returned: when status is present type: str sample: my-elb scheme: description: Internet-facing or internal load balancer. - returned: when status is present type: str sample: internal security_groups: description: The IDs of the security groups for the load balancer. - returned: when status is present type: list sample: ['sg-0011223344'] state: description: The state of the load balancer. - returned: when status is present type: dict sample: "{'code': 'active'}" tags: description: The tags attached to the load balancer. - returned: when status is present type: dict sample: "{ 'Tag': 'Example' }" type: description: The type of load balancer. - returned: when status is present type: str sample: application vpc_id: description: The ID of the VPC for the load balancer. - returned: when status is present type: str sample: vpc-0011223344 + ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + type: str + sample: ipv4 ''' try: @@ -213,6 +199,12 @@ def get_load_balancer_tags(connection, module, load_balancer_arn): module.fail_json_aws(e, msg="Failed to describe load balancer tags") +def get_load_balancer_ipaddresstype(connection, load_balancer_arn) : + try: + return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe load balancer tags") + def list_load_balancers(connection, module): load_balancer_arns = module.params.get("load_balancer_arns") @@ -242,6 +234,9 @@ def list_load_balancers(connection, module): for listener in load_balancer['listeners']: listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) + # Get ELB ip address type + load_balancer['IpAddressType']=get_load_balancer_ipaddresstype(connection, load_balancer['LoadBalancerArn']) + # Turn the boto3 result in to ansible_friendly_snaked_names snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] diff --git a/elb_network_lb.py b/elb_network_lb.py index 5e34c527276..d0d2d82f14f 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -125,6 +125,12 @@ description: - The duration in seconds to wait, used in conjunction with I(wait). type: int + ip_address_type: + description: + - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. + default: "ipv4" + choices: [ 'ipv4', 'dualstack' ] + type: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -311,10 +317,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener - def create_or_update_elb(elb_obj): """Create ELB or modify main attributes. json_exit here""" - if elb_obj.elb: # ELB exists so check subnets, security groups and tags match what has been passed @@ -339,6 +343,7 @@ def create_or_update_elb(elb_obj): # Create load balancer elb_obj.create_elb() + # ELB attributes elb_obj.update_elb_attributes() elb_obj.modify_elb_attributes() @@ -379,6 +384,10 @@ def create_or_update_elb(elb_obj): # Update the ELB attributes elb_obj.update_elb_attributes() + # Update ELB ip address type only if option has been provided + if elb_obj.module.params.get('ip_address_type') is not None : + elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + # Convert to snake_case and merge in everything we want to return to the user snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) @@ -389,6 +398,9 @@ def create_or_update_elb(elb_obj): # Change tags to ansible friendly dict snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + # ip address type + snaked_elb['ip_address_type']=elb_obj.get_elb_ip_address_type() + elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) @@ -425,7 +437,8 @@ def main(): state=dict(choices=['present', 'absent'], type='str'), tags=dict(type='dict'), wait_timeout=dict(type='int'), - wait=dict(type='bool') + wait=dict(type='bool'), + ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) ) ) From c074acf1613243ee0e0aaafacaa7c27b7eaceec1 Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 29 Mar 2021 15:04:09 +0200 Subject: [PATCH 180/683] lint validation --- elb_application_lb.py | 1 - elb_application_lb_info.py | 8 ++------ elb_network_lb.py | 5 ++--- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index 9dcab533c9c..0d0695a0352 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -182,7 +182,6 @@ ip_address_type: description: - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. - default: "ipv4" choices: [ 'ipv4', 'dualstack' ] type: str extends_documentation_fragment: diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 9d3ad385556..17fd1324683 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -142,10 +142,6 @@ description: The ID of the VPC for the load balancer. type: str sample: vpc-0011223344 - ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. - type: str - sample: ipv4 ''' try: @@ -199,7 +195,7 @@ def get_load_balancer_tags(connection, module, load_balancer_arn): module.fail_json_aws(e, msg="Failed to describe load balancer tags") -def get_load_balancer_ipaddresstype(connection, load_balancer_arn) : +def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn) : try: return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -235,7 +231,7 @@ def list_load_balancers(connection, module): listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) # Get ELB ip address type - load_balancer['IpAddressType']=get_load_balancer_ipaddresstype(connection, load_balancer['LoadBalancerArn']) + load_balancer['IpAddressType']=get_load_balancer_ipaddresstype(connection,module,load_balancer['LoadBalancerArn']) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] diff --git a/elb_network_lb.py b/elb_network_lb.py index d0d2d82f14f..70942bbf8ee 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -128,7 +128,6 @@ ip_address_type: description: - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. - default: "ipv4" choices: [ 'ipv4', 'dualstack' ] type: str extends_documentation_fragment: @@ -385,7 +384,7 @@ def create_or_update_elb(elb_obj): elb_obj.update_elb_attributes() # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None : + if elb_obj.module.params.get('ip_address_type') is not None: elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) # Convert to snake_case and merge in everything we want to return to the user @@ -399,7 +398,7 @@ def create_or_update_elb(elb_obj): snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) # ip address type - snaked_elb['ip_address_type']=elb_obj.get_elb_ip_address_type() + snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) From 17e9eee5bea8d978a4ad8ffafee0db9778e65e5e Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 29 Mar 2021 15:58:27 +0200 Subject: [PATCH 181/683] pep8 linting --- elb_application_lb.py | 6 +++--- elb_application_lb_info.py | 6 +++--- elb_network_lb.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index 0d0695a0352..284d392891f 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -567,8 +567,8 @@ def create_or_update_elb(elb_obj): elb_obj.changed = True # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None : - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + if elb_obj.module.params.get('ip_address_type') is not None: + elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) # Get the ELB again elb_obj.update() @@ -591,7 +591,7 @@ def create_or_update_elb(elb_obj): snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) # ip address type - snaked_elb['ip_address_type']=elb_obj.get_elb_ip_address_type() + snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 17fd1324683..fcf06e1473c 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -195,14 +195,14 @@ def get_load_balancer_tags(connection, module, load_balancer_arn): module.fail_json_aws(e, msg="Failed to describe load balancer tags") -def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn) : +def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn): try: return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer tags") -def list_load_balancers(connection, module): +def list_load_balancers(connection, module): load_balancer_arns = module.params.get("load_balancer_arns") names = module.params.get("names") @@ -231,7 +231,7 @@ def list_load_balancers(connection, module): listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) # Get ELB ip address type - load_balancer['IpAddressType']=get_load_balancer_ipaddresstype(connection,module,load_balancer['LoadBalancerArn']) + load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] diff --git a/elb_network_lb.py b/elb_network_lb.py index 70942bbf8ee..5eeb2ec6220 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -316,6 +316,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener + def create_or_update_elb(elb_obj): """Create ELB or modify main attributes. json_exit here""" if elb_obj.elb: @@ -342,7 +343,6 @@ def create_or_update_elb(elb_obj): # Create load balancer elb_obj.create_elb() - # ELB attributes elb_obj.update_elb_attributes() elb_obj.modify_elb_attributes() @@ -385,7 +385,7 @@ def create_or_update_elb(elb_obj): # Update ELB ip address type only if option has been provided if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) # Convert to snake_case and merge in everything we want to return to the user snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) From 1d39a46173e285f338fe60477225fb05b849306f Mon Sep 17 00:00:00 2001 From: abikouo <79859644+abikouo@users.noreply.github.com> Date: Fri, 23 Apr 2021 08:29:47 +0200 Subject: [PATCH 182/683] Update plugins/modules/elb_application_lb_info.py Co-authored-by: Jill R <4121322+jillr@users.noreply.github.com> --- elb_application_lb_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index fcf06e1473c..3848bc4766b 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -199,7 +199,7 @@ def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn): try: return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe load balancer tags") + module.fail_json_aws(e, msg="Failed to describe load balancer ip address type") def list_load_balancers(connection, module): From dd753f26d580585bf7c4cf4fae86a23272a1c273 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 1 May 2021 14:05:56 +0200 Subject: [PATCH 183/683] route53 - Fix typo in WaiterConfig --- route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route53.py b/route53.py index 8cbb2647e4a..eb8842951d5 100644 --- a/route53.py +++ b/route53.py @@ -668,7 +668,7 @@ def main(): Id=change_resource_record_sets['ChangeInfo']['Id'], WaiterConfig=dict( Delay=WAIT_RETRY, - MaxAttemps=wait_timeout_in // WAIT_RETRY, + MaxAttempts=wait_timeout_in // WAIT_RETRY, ) ) except is_boto3_error_message('but it already exists'): From e230641505650a80cd71a84d4318faf7ded1e00a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 1 May 2021 14:09:36 +0200 Subject: [PATCH 184/683] Add retries on PriorRequestNotComplete --- route53.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/route53.py b/route53.py index eb8842951d5..7c9d88c159e 100644 --- a/route53.py +++ b/route53.py @@ -574,12 +574,15 @@ def main(): if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.") + retry_decorator = AWSRetry.jittered_backoff( + retries=MAX_AWS_RETRIES, + delay=retry_interval_in, + catch_extra_error_codes=['PriorRequestNotComplete'], + ) + # connect to the route53 endpoint try: - route53 = module.client( - 'route53', - retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=retry_interval_in) - ) + route53 = module.client('route53', retry_decorator=retry_decorator) except botocore.exceptions.HTTPClientError as e: module.fail_json_aws(e, msg='Failed to connect to AWS') From 486e4c4b1e7bdfa9a05889e9fcdd49b6275e0577 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 1 May 2021 14:10:25 +0200 Subject: [PATCH 185/683] Bump max_delay for route53 retries based on retry_interval --- route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/route53.py b/route53.py index 7c9d88c159e..2bf956e5c60 100644 --- a/route53.py +++ b/route53.py @@ -578,6 +578,7 @@ def main(): retries=MAX_AWS_RETRIES, delay=retry_interval_in, catch_extra_error_codes=['PriorRequestNotComplete'], + max_delay=max(60, retry_interval_in), ) # connect to the route53 endpoint From 29625ee0eac2e4274b3e04e686fa1778a981dc1e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 1 May 2021 14:25:12 +0200 Subject: [PATCH 186/683] Use waiter with retrys --- route53.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/route53.py b/route53.py index 2bf956e5c60..ffbfdf4fc91 100644 --- a/route53.py +++ b/route53.py @@ -369,6 +369,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing WAIT_RETRY = 5 # how many seconds to wait between propagation status polls @@ -667,7 +668,7 @@ def main(): ) if wait_in: - waiter = route53.get_waiter('resource_record_sets_changed') + waiter = get_waiter(route53, 'resource_record_sets_changed') waiter.wait( Id=change_resource_record_sets['ChangeInfo']['Id'], WaiterConfig=dict( From 30ec4b1e34fe4530781d635ee9914ba43a9218ef Mon Sep 17 00:00:00 2001 From: Jill Rouleau Date: Thu, 13 May 2021 10:26:42 -0700 Subject: [PATCH 187/683] Remove ec2_instance modules and tests These modules have been migrated to amazon.aws in amazon.aws/pull/354 Update runtime.yml with redirects to that collection --- ec2_instance.py | 1830 ---------------------------------------- ec2_instance_facts.py | 1 - ec2_instance_info.py | 591 ------------- ec2_launch_template.py | 2 +- 4 files changed, 1 insertion(+), 2423 deletions(-) delete mode 100644 ec2_instance.py delete mode 120000 ec2_instance_facts.py delete mode 100644 ec2_instance_info.py diff --git a/ec2_instance.py b/ec2_instance.py deleted file mode 100644 index 5138fd7647a..00000000000 --- a/ec2_instance.py +++ /dev/null @@ -1,1830 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_instance -version_added: 1.0.0 -short_description: Create & manage EC2 instances -description: - - Create and manage AWS EC2 instances. - - > - Note: This module does not support creating - L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(amazon.aws.ec2) module - can create and manage spot instances. -author: - - Ryan Scott Brown (@ryansb) -requirements: [ "boto3", "botocore" ] -options: - instance_ids: - description: - - If you specify one or more instance IDs, only instances that have the specified IDs are returned. - type: list - elements: str - state: - description: - - Goal state for the instances. - choices: [present, terminated, running, started, stopped, restarted, rebooted, absent] - default: present - type: str - wait: - description: - - Whether or not to wait for the desired state (use wait_timeout to customize this). - default: true - type: bool - wait_timeout: - description: - - How long to wait (in seconds) for the instance to finish booting/terminating. - default: 600 - type: int - instance_type: - description: - - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) - Only required when instance is not already present. - default: t2.micro - type: str - user_data: - description: - - Opaque blob of data which is made available to the ec2 instance - type: str - tower_callback: - description: - - Preconfigured user-data to enable an instance to perform a Tower callback (Linux only). - - Mutually exclusive with I(user_data). - - For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password. - - If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible. - type: dict - suboptions: - tower_address: - description: - - IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in. - type: str - job_template_id: - description: - - Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+). - type: str - host_config_key: - description: - - Host configuration secret key generated by the Tower job template. - type: str - tags: - description: - - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one. - type: dict - purge_tags: - description: - - Delete any tags not specified in the task that are on the instance. - This means you have to specify all the desired tags on each task affecting an instance. - default: false - type: bool - image: - description: - - An image to use for the instance. The M(amazon.aws.ec2_ami_info) module may be used to retrieve images. - One of I(image) or I(image_id) are required when instance is not already present. - type: dict - suboptions: - id: - description: - - The AMI ID. - type: str - ramdisk: - description: - - Overrides the AMI's default ramdisk ID. - type: str - kernel: - description: - - a string AKI to override the AMI kernel. - image_id: - description: - - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present. - - This is an alias for I(image.id). - type: str - security_groups: - description: - - A list of security group IDs or names (strings). Mutually exclusive with I(security_group). - type: list - elements: str - security_group: - description: - - A security group ID or name. Mutually exclusive with I(security_groups). - type: str - name: - description: - - The Name tag for the instance. - type: str - vpc_subnet_id: - description: - - The subnet ID in which to launch the instance (VPC) - If none is provided, M(community.aws.ec2_instance) will chose the default zone of the default VPC. - aliases: ['subnet_id'] - type: str - network: - description: - - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or - containing specifications for a single network interface. - - Use the M(amazon.aws.ec2_eni) module to create ENIs with special settings. - type: dict - suboptions: - interfaces: - description: - - a list of ENI IDs (strings) or a list of objects containing the key I(id). - type: list - assign_public_ip: - description: - - when true assigns a public IP address to the interface - type: bool - private_ip_address: - description: - - an IPv4 address to assign to the interface - type: str - ipv6_addresses: - description: - - a list of IPv6 addresses to assign to the network interface - type: list - source_dest_check: - description: - - controls whether source/destination checking is enabled on the interface - type: bool - description: - description: - - a description for the network interface - type: str - private_ip_addresses: - description: - - a list of IPv4 addresses to assign to the network interface - type: list - subnet_id: - description: - - the subnet to connect the network interface to - type: str - delete_on_termination: - description: - - Delete the interface when the instance it is attached to is - terminated. - type: bool - device_index: - description: - - The index of the interface to modify - type: int - groups: - description: - - a list of security group IDs to attach to the interface - type: list - volumes: - description: - - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage. - - A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id, - ebs.iops, and ebs.delete_on_termination. - - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). - type: list - elements: dict - launch_template: - description: - - The EC2 launch template to base instance configuration on. - type: dict - suboptions: - id: - description: - - the ID of the launch template (optional if name is specified). - type: str - name: - description: - - the pretty name of the launch template (optional if id is specified). - type: str - version: - description: - - the specific version of the launch template to use. If unspecified, the template default is chosen. - key_name: - description: - - Name of the SSH access key to assign to the instance - must exist in the region the instance is created. - type: str - availability_zone: - description: - - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter. - - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted). - type: str - instance_initiated_shutdown_behavior: - description: - - Whether to stop or terminate an instance upon shutdown. - choices: ['stop', 'terminate'] - type: str - tenancy: - description: - - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. - choices: ['dedicated', 'default'] - type: str - termination_protection: - description: - - Whether to enable termination protection. - This module will not terminate an instance with termination protection active, it must be turned off first. - type: bool - cpu_credit_specification: - description: - - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted. - - Choose I(unlimited) to enable buying additional CPU credits. - choices: ['unlimited', 'standard'] - type: str - cpu_options: - description: - - Reduce the number of vCPU exposed to the instance. - - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory. - - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available. - - Requires botocore >= 1.10.16 - type: dict - suboptions: - threads_per_core: - description: - - Select the number of threads per core to enable. Disable or Enable Intel HT. - choices: [1, 2] - required: true - type: int - core_count: - description: - - Set the number of core to enable. - required: true - type: int - detailed_monitoring: - description: - - Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting. - type: bool - ebs_optimized: - description: - - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). - type: bool - filters: - description: - - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item - consists of a filter key and a filter value. See - U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html). - for possible filters. Filter names and values are case sensitive. - - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and - subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups. - type: dict - instance_role: - description: - - The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format - then the ListInstanceProfiles permission must also be granted. - U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided, - the role with a matching name will be used from the active AWS account. - type: str - placement_group: - description: - - The placement group that needs to be assigned to the instance - type: str - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Terminate every running instance in a region. Use with EXTREME caution. - community.aws.ec2_instance: - state: absent - filters: - instance-state-name: running - -- name: restart a particular instance by its ID - community.aws.ec2_instance: - state: restarted - instance_ids: - - i-12345678 - -- name: start an instance with a public IP address - community.aws.ec2_instance: - name: "public-compute-instance" - key_name: "prod-ssh-key" - vpc_subnet_id: subnet-5ca1ab1e - instance_type: c5.large - security_group: default - network: - assign_public_ip: true - image_id: ami-123456 - tags: - Environment: Testing - -- name: start an instance and Add EBS - community.aws.ec2_instance: - name: "public-withebs-instance" - vpc_subnet_id: subnet-5ca1ab1e - instance_type: t2.micro - key_name: "prod-ssh-key" - security_group: default - volumes: - - device_name: /dev/sda1 - ebs: - volume_size: 16 - delete_on_termination: true - -- name: start an instance with a cpu_options - community.aws.ec2_instance: - name: "public-cpuoption-instance" - vpc_subnet_id: subnet-5ca1ab1e - tags: - Environment: Testing - instance_type: c4.large - volumes: - - device_name: /dev/sda1 - ebs: - delete_on_termination: true - cpu_options: - core_count: 1 - threads_per_core: 1 - -- name: start an instance and have it begin a Tower callback on boot - community.aws.ec2_instance: - name: "tower-callback-test" - key_name: "prod-ssh-key" - vpc_subnet_id: subnet-5ca1ab1e - security_group: default - tower_callback: - # IP or hostname of tower server - tower_address: 1.2.3.4 - job_template_id: 876 - host_config_key: '[secret config key goes here]' - network: - assign_public_ip: true - image_id: ami-123456 - cpu_credit_specification: unlimited - tags: - SomeThing: "A value" - -- name: start an instance with ENI (An existing ENI ID is required) - community.aws.ec2_instance: - name: "public-eni-instance" - key_name: "prod-ssh-key" - vpc_subnet_id: subnet-5ca1ab1e - network: - interfaces: - - id: "eni-12345" - tags: - Env: "eni_on" - volumes: - - device_name: /dev/sda1 - ebs: - delete_on_termination: true - instance_type: t2.micro - image_id: ami-123456 - -- name: add second ENI interface - community.aws.ec2_instance: - name: "public-eni-instance" - network: - interfaces: - - id: "eni-12345" - - id: "eni-67890" - image_id: ami-123456 - tags: - Env: "eni_on" - instance_type: t2.micro -''' - -RETURN = ''' -instances: - description: a list of ec2 instances - returned: when wait == true - type: complex - contains: - ami_launch_index: - description: The AMI launch index, which can be used to find this instance in the launch group. - returned: always - type: int - sample: 0 - architecture: - description: The architecture of the image - returned: always - type: str - sample: x86_64 - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/sdh or xvdh). - returned: always - type: str - sample: /dev/sdh - ebs: - description: Parameters used to automatically set up EBS volumes when the instance is launched. - returned: always - type: complex - contains: - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2017-03-23T22:51:24+00:00" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: attached - volume_id: - description: The ID of the EBS volume - returned: always - type: str - sample: vol-12345678 - client_token: - description: The idempotency token you provided when you launched the instance, if applicable. - returned: always - type: str - sample: mytoken - ebs_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - hypervisor: - description: The hypervisor type of the instance. - returned: always - type: str - sample: xen - iam_instance_profile: - description: The IAM instance profile associated with the instance, if applicable. - returned: always - type: complex - contains: - arn: - description: The Amazon Resource Name (ARN) of the instance profile. - returned: always - type: str - sample: "arn:aws:iam::000012345678:instance-profile/myprofile" - id: - description: The ID of the instance profile - returned: always - type: str - sample: JFJ397FDG400FG9FD1N - image_id: - description: The ID of the AMI used to launch the instance. - returned: always - type: str - sample: ami-0011223344 - instance_id: - description: The ID of the instance. - returned: always - type: str - sample: i-012345678 - instance_type: - description: The instance type size of the running instance. - returned: always - type: str - sample: t2.micro - key_name: - description: The name of the key pair, if this instance was launched with an associated key pair. - returned: always - type: str - sample: my-key - launch_time: - description: The time the instance was launched. - returned: always - type: str - sample: "2017-03-23T22:51:24+00:00" - monitoring: - description: The monitoring for the instance. - returned: always - type: complex - contains: - state: - description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. - returned: always - type: str - sample: disabled - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - association: - description: The association information for an Elastic IPv4 associated with the network interface. - returned: always - type: complex - contains: - ip_owner_id: - description: The ID of the owner of the Elastic IP address. - returned: always - type: str - sample: amazon - public_dns_name: - description: The public DNS name. - returned: always - type: str - sample: "" - public_ip: - description: The public IP address or Elastic IP address bound to the network interface. - returned: always - type: str - sample: 1.2.3.4 - attachment: - description: The network interface attachment. - returned: always - type: complex - contains: - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2017-03-23T22:51:24+00:00" - attachment_id: - description: The ID of the network interface attachment. - returned: always - type: str - sample: eni-attach-3aff3f - delete_on_termination: - description: Indicates whether the network interface is deleted when the instance is terminated. - returned: always - type: bool - sample: true - device_index: - description: The index of the device on the instance for the network interface attachment. - returned: always - type: int - sample: 0 - status: - description: The attachment state. - returned: always - type: str - sample: attached - description: - description: The description. - returned: always - type: str - sample: My interface - groups: - description: One or more security groups. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-abcdef12 - group_name: - description: The name of the security group. - returned: always - type: str - sample: mygroup - ipv6_addresses: - description: One or more IPv6 addresses associated with the network interface. - returned: always - type: list - elements: dict - contains: - ipv6_address: - description: The IPv6 address. - returned: always - type: str - sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - owner_id: - description: The AWS account ID of the owner of the network interface. - returned: always - type: str - sample: 01234567890 - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - private_ip_addresses: - description: The private IPv4 addresses associated with the network interface. - returned: always - type: list - elements: dict - contains: - association: - description: The association information for an Elastic IP address (IPv4) associated with the network interface. - returned: always - type: complex - contains: - ip_owner_id: - description: The ID of the owner of the Elastic IP address. - returned: always - type: str - sample: amazon - public_dns_name: - description: The public DNS name. - returned: always - type: str - sample: "" - public_ip: - description: The public IP address or Elastic IP address bound to the network interface. - returned: always - type: str - sample: 1.2.3.4 - primary: - description: Indicates whether this IPv4 address is the primary private IP address of the network interface. - returned: always - type: bool - sample: true - private_ip_address: - description: The private IPv4 address of the network interface. - returned: always - type: str - sample: 10.0.0.1 - source_dest_check: - description: Indicates whether source/destination checking is enabled. - returned: always - type: bool - sample: true - status: - description: The status of the network interface. - returned: always - type: str - sample: in-use - subnet_id: - description: The ID of the subnet for the network interface. - returned: always - type: str - sample: subnet-0123456 - vpc_id: - description: The ID of the VPC for the network interface. - returned: always - type: str - sample: vpc-0123456 - placement: - description: The location where the instance launched, if applicable. - returned: always - type: complex - contains: - availability_zone: - description: The Availability Zone of the instance. - returned: always - type: str - sample: ap-southeast-2a - group_name: - description: The name of the placement group the instance is in (for cluster compute instances). - returned: always - type: str - sample: "" - tenancy: - description: The tenancy of the instance (if the instance is running in a VPC). - returned: always - type: str - sample: default - private_dns_name: - description: The private DNS name. - returned: always - type: str - sample: ip-10-0-0-1.ap-southeast-2.compute.internal - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - product_codes: - description: One or more product codes. - returned: always - type: list - elements: dict - contains: - product_code_id: - description: The product code. - returned: always - type: str - sample: aw0evgkw8ef3n2498gndfgasdfsd5cce - product_code_type: - description: The type of product code. - returned: always - type: str - sample: marketplace - public_dns_name: - description: The public DNS name assigned to the instance. - returned: always - type: str - sample: - public_ip_address: - description: The public IPv4 address assigned to the instance - returned: always - type: str - sample: 52.0.0.1 - root_device_name: - description: The device name of the root device - returned: always - type: str - sample: /dev/sda1 - root_device_type: - description: The type of root device used by the AMI. - returned: always - type: str - sample: ebs - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - network.source_dest_check: - description: Indicates whether source/destination checking is enabled. - returned: always - type: bool - sample: true - state: - description: The current state of the instance. - returned: always - type: complex - contains: - code: - description: The low byte represents the state. - returned: always - type: int - sample: 16 - name: - description: The name of the state. - returned: always - type: str - sample: running - state_transition_reason: - description: The reason for the most recent state transition. - returned: always - type: str - sample: - subnet_id: - description: The ID of the subnet in which the instance is running. - returned: always - type: str - sample: subnet-00abcdef - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - virtualization_type: - description: The type of virtualization of the AMI. - returned: always - type: str - sample: hvm - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: dict - sample: vpc-0011223344 -''' - -from collections import namedtuple -import re -import string -import textwrap -import time -import uuid - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib import parse as urlparse - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names - -module = None - - -def tower_callback_script(tower_conf, windows=False, passwd=None): - script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1' - if windows and passwd is not None: - script_tpl = """ - $admin = [adsi]("WinNT://./administrator, user") - $admin.PSBase.Invoke("SetPassword", "{PASS}") - Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}')) - - """ - return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url)) - elif windows and passwd is None: - script_tpl = """ - $admin = [adsi]("WinNT://./administrator, user") - Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}')) - - """ - return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url)) - elif not windows: - for p in ['tower_address', 'job_template_id', 'host_config_key']: - if p not in tower_conf: - module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p)) - - if isinstance(tower_conf['job_template_id'], string_types): - tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id']) - tpl = string.Template(textwrap.dedent("""#!/bin/bash - set -x - - retry_attempts=10 - attempt=0 - while [[ $attempt -lt $retry_attempts ]] - do - status_code=`curl --max-time 10 -v -k -s -i \ - --data "host_config_key=${host_config_key}" \ - 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \ - | head -n 1 \ - | awk '{print $2}'` - if [[ $status_code == 404 ]] - then - status_code=`curl --max-time 10 -v -k -s -i \ - --data "host_config_key=${host_config_key}" \ - 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \ - | head -n 1 \ - | awk '{print $2}'` - # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404 - fi - if [[ $status_code == 201 ]] - then - exit 0 - fi - attempt=$(( attempt + 1 )) - echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})" - sleep 60 - done - exit 1 - """)) - return tpl.safe_substitute(tower_address=tower_conf['tower_address'], - template_id=tower_conf['job_template_id'], - host_config_key=tower_conf['host_config_key']) - raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.") - - -def manage_tags(match, new_tags, purge_tags, ec2): - changed = False - old_tags = boto3_tag_list_to_ansible_dict(match.get('Tags', {})) - tags_to_set, tags_to_delete = compare_aws_tags( - old_tags, new_tags, - purge_tags=purge_tags, - ) - if module.check_mode: - return bool(tags_to_delete or tags_to_set) - try: - if tags_to_set: - ec2.create_tags( - aws_retry=True, - Resources=[match['InstanceId']], - Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) - changed |= True - if tags_to_delete: - delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete) - ec2.delete_tags( - aws_retry=True, - Resources=[match['InstanceId']], - Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values)) - changed |= True - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not update tags for instance {0}".format(match['InstanceId'])) - return changed - - -def build_volume_spec(params): - volumes = params.get('volumes') or [] - for volume in volumes: - if 'ebs' in volume: - for int_value in ['volume_size', 'iops']: - if int_value in volume['ebs']: - volume['ebs'][int_value] = int(volume['ebs'][int_value]) - return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] - - -def add_or_update_instance_profile(instance, desired_profile_name, ec2): - instance_profile_setting = instance.get('IamInstanceProfile') - if instance_profile_setting and desired_profile_name: - if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')): - # great, the profile we asked for is what's there - return False - else: - desired_arn = determine_iam_role(desired_profile_name) - if instance_profile_setting.get('Arn') == desired_arn: - return False - - # update association - try: - association = ec2.describe_iam_instance_profile_associations( - aws_retry=True, - Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - # check for InvalidAssociationID.NotFound - module.fail_json_aws(e, "Could not find instance profile association") - try: - resp = ec2.replace_iam_instance_profile_association( - aws_retry=True, - AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'], - IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)} - ) - return True - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, "Could not associate instance profile") - - if not instance_profile_setting and desired_profile_name: - # create association - try: - resp = ec2.associate_iam_instance_profile( - aws_retry=True, - IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}, - InstanceId=instance['InstanceId'] - ) - return True - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, "Could not associate new instance profile") - - return False - - -def build_network_spec(params, ec2): - """ - Returns list of interfaces [complex] - Interface type: { - 'AssociatePublicIpAddress': True|False, - 'DeleteOnTermination': True|False, - 'Description': 'string', - 'DeviceIndex': 123, - 'Groups': [ - 'string', - ], - 'Ipv6AddressCount': 123, - 'Ipv6Addresses': [ - { - 'Ipv6Address': 'string' - }, - ], - 'NetworkInterfaceId': 'string', - 'PrivateIpAddress': 'string', - 'PrivateIpAddresses': [ - { - 'Primary': True|False, - 'PrivateIpAddress': 'string' - }, - ], - 'SecondaryPrivateIpAddressCount': 123, - 'SubnetId': 'string' - }, - """ - - interfaces = [] - network = params.get('network') or {} - if not network.get('interfaces'): - # they only specified one interface - spec = { - 'DeviceIndex': 0, - } - if network.get('assign_public_ip') is not None: - spec['AssociatePublicIpAddress'] = network['assign_public_ip'] - - if params.get('vpc_subnet_id'): - spec['SubnetId'] = params['vpc_subnet_id'] - else: - default_vpc = get_default_vpc(ec2) - if default_vpc is None: - raise module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance") - else: - sub = get_default_subnet(ec2, default_vpc) - spec['SubnetId'] = sub['SubnetId'] - - if network.get('private_ip_address'): - spec['PrivateIpAddress'] = network['private_ip_address'] - - if params.get('security_group') or params.get('security_groups'): - groups = discover_security_groups( - group=params.get('security_group'), - groups=params.get('security_groups'), - subnet_id=spec['SubnetId'], - ec2=ec2 - ) - spec['Groups'] = groups - if network.get('description') is not None: - spec['Description'] = network['description'] - # TODO more special snowflake network things - - return [spec] - - # handle list of `network.interfaces` options - for idx, interface_params in enumerate(network.get('interfaces', [])): - spec = { - 'DeviceIndex': idx, - } - - if isinstance(interface_params, string_types): - # naive case where user gave - # network_interfaces: [eni-1234, eni-4567, ....] - # put into normal data structure so we don't dupe code - interface_params = {'id': interface_params} - - if interface_params.get('id') is not None: - # if an ID is provided, we don't want to set any other parameters. - spec['NetworkInterfaceId'] = interface_params['id'] - interfaces.append(spec) - continue - - spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True) - - if interface_params.get('ipv6_addresses'): - spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])] - - if interface_params.get('private_ip_address'): - spec['PrivateIpAddress'] = interface_params.get('private_ip_address') - - if interface_params.get('description'): - spec['Description'] = interface_params.get('description') - - if interface_params.get('subnet_id', params.get('vpc_subnet_id')): - spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id')) - elif not spec.get('SubnetId') and not interface_params['id']: - # TODO grab a subnet from default VPC - raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params)) - - interfaces.append(spec) - return interfaces - - -def warn_if_public_ip_assignment_changed(instance): - # This is a non-modifiable attribute. - assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip') - if assign_public_ip is None: - return - - # Check that public ip assignment is the same and warn if not - public_dns_name = instance.get('PublicDnsName') - if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name): - module.warn( - "Unable to modify public ip assignment to {0} for instance {1}. " - "Whether or not to assign a public IP is determined during instance creation.".format( - assign_public_ip, instance['InstanceId'])) - - -def warn_if_cpu_options_changed(instance): - # This is a non-modifiable attribute. - cpu_options = module.params.get('cpu_options') - if cpu_options is None: - return - - # Check that the CpuOptions set are the same and warn if not - core_count_curr = instance['CpuOptions'].get('CoreCount') - core_count = cpu_options.get('core_count') - threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore') - threads_per_core = cpu_options.get('threads_per_core') - if core_count_curr != core_count: - module.warn( - "Unable to modify core_count from {0} to {1}. " - "Assigning a number of core is determinted during instance creation".format( - core_count_curr, core_count)) - - if threads_per_core_curr != threads_per_core: - module.warn( - "Unable to modify threads_per_core from {0} to {1}. " - "Assigning a number of threads per core is determined during instance creation.".format( - threads_per_core_curr, threads_per_core)) - - -def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None): - - if subnet_id is not None: - try: - sub = ec2.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) - except is_boto3_error_code('InvalidGroup.NotFound'): - module.fail_json( - "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( - subnet_id - ) - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) - parent_vpc_id = sub['Subnets'][0]['VpcId'] - - if group: - return get_ec2_security_group_ids_from_names(group, ec2, vpc_id=parent_vpc_id) - if groups: - return get_ec2_security_group_ids_from_names(groups, ec2, vpc_id=parent_vpc_id) - return [] - - -def build_top_level_options(params): - spec = {} - if params.get('image_id'): - spec['ImageId'] = params['image_id'] - elif isinstance(params.get('image'), dict): - image = params.get('image', {}) - spec['ImageId'] = image.get('id') - if 'ramdisk' in image: - spec['RamdiskId'] = image['ramdisk'] - if 'kernel' in image: - spec['KernelId'] = image['kernel'] - if not spec.get('ImageId') and not params.get('launch_template'): - module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.") - - if params.get('key_name') is not None: - spec['KeyName'] = params.get('key_name') - if params.get('user_data') is not None: - spec['UserData'] = to_native(params.get('user_data')) - elif params.get('tower_callback') is not None: - spec['UserData'] = tower_callback_script( - tower_conf=params.get('tower_callback'), - windows=params.get('tower_callback').get('windows', False), - passwd=params.get('tower_callback').get('set_password'), - ) - - if params.get('launch_template') is not None: - spec['LaunchTemplate'] = {} - if not params.get('launch_template').get('id') or params.get('launch_template').get('name'): - module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required") - - if params.get('launch_template').get('id') is not None: - spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id') - if params.get('launch_template').get('name') is not None: - spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name') - if params.get('launch_template').get('version') is not None: - spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version')) - - if params.get('detailed_monitoring', False): - spec['Monitoring'] = {'Enabled': True} - if params.get('cpu_credit_specification') is not None: - spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')} - if params.get('tenancy') is not None: - spec['Placement'] = {'Tenancy': params.get('tenancy')} - if params.get('placement_group'): - if 'Placement' in spec: - spec['Placement']['GroupName'] = str(params.get('placement_group')) - else: - spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))}) - if params.get('ebs_optimized') is not None: - spec['EbsOptimized'] = params.get('ebs_optimized') - if params.get('instance_initiated_shutdown_behavior'): - spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior') - if params.get('termination_protection') is not None: - spec['DisableApiTermination'] = params.get('termination_protection') - if params.get('cpu_options') is not None: - spec['CpuOptions'] = {} - spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core') - spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count') - return spec - - -def build_instance_tags(params, propagate_tags_to_volumes=True): - tags = params.get('tags', {}) - if params.get('name') is not None: - if tags is None: - tags = {} - tags['Name'] = params.get('name') - return [ - { - 'ResourceType': 'volume', - 'Tags': ansible_dict_to_boto3_tag_list(tags), - }, - { - 'ResourceType': 'instance', - 'Tags': ansible_dict_to_boto3_tag_list(tags), - }, - ] - - -def build_run_instance_spec(params, ec2): - - spec = dict( - ClientToken=uuid.uuid4().hex, - MaxCount=1, - MinCount=1, - ) - # network parameters - spec['NetworkInterfaces'] = build_network_spec(params, ec2) - spec['BlockDeviceMappings'] = build_volume_spec(params) - spec.update(**build_top_level_options(params)) - spec['TagSpecifications'] = build_instance_tags(params) - - # IAM profile - if params.get('instance_role'): - spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role'))) - - spec['InstanceType'] = params['instance_type'] - return spec - - -def await_instances(ids, state='OK'): - if not module.params.get('wait', True): - # the user asked not to wait for anything - return - - if module.check_mode: - # In check mode, there is no change even if you wait. - return - - state_opts = { - 'OK': 'instance_status_ok', - 'STOPPED': 'instance_stopped', - 'TERMINATED': 'instance_terminated', - 'EXISTS': 'instance_exists', - 'RUNNING': 'instance_running', - } - if state not in state_opts: - module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state)) - waiter = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()).get_waiter(state_opts[state]) - try: - waiter.wait( - InstanceIds=ids, - WaiterConfig={ - 'Delay': 15, - 'MaxAttempts': module.params.get('wait_timeout', 600) // 15, - } - ) - except botocore.exceptions.WaiterConfigError as e: - module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format( - to_native(e), ', '.join(ids), state)) - except botocore.exceptions.WaiterError as e: - module.warn("Instances {0} took too long to reach state {1}. {2}".format( - ', '.join(ids), state, to_native(e))) - - -def diff_instance_and_params(instance, params, ec2, skip=None): - """boto3 instance obj, module params""" - - if skip is None: - skip = [] - - changes_to_apply = [] - id_ = instance['InstanceId'] - - ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value']) - - def value_wrapper(v): - return {'Value': v} - - param_mappings = [ - ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper), - ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper), - # user data is an immutable property - # ParamMapper('user_data', 'UserData', 'userData', value_wrapper), - ] - - for mapping in param_mappings: - if params.get(mapping.param_key) is None: - continue - if mapping.instance_key in skip: - continue - - try: - value = ec2.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_)) - if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): - arguments = dict( - InstanceId=instance['InstanceId'], - # Attribute=mapping.attribute_name, - ) - arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) - changes_to_apply.append(arguments) - - if params.get('security_group') or params.get('security_groups'): - try: - value = ec2.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_)) - # managing security groups - if params.get('vpc_subnet_id'): - subnet_id = params.get('vpc_subnet_id') - else: - default_vpc = get_default_vpc(ec2) - if default_vpc is None: - module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.") - else: - sub = get_default_subnet(ec2, default_vpc) - subnet_id = sub['SubnetId'] - - groups = discover_security_groups( - group=params.get('security_group'), - groups=params.get('security_groups'), - subnet_id=subnet_id, - ec2=ec2 - ) - expected_groups = groups - instance_groups = [g['GroupId'] for g in value['Groups']] - if set(instance_groups) != set(expected_groups): - changes_to_apply.append(dict( - Groups=expected_groups, - InstanceId=instance['InstanceId'] - )) - - if (params.get('network') or {}).get('source_dest_check') is not None: - # network.source_dest_check is nested, so needs to be treated separately - check = bool(params.get('network').get('source_dest_check')) - if instance['SourceDestCheck'] != check: - changes_to_apply.append(dict( - InstanceId=instance['InstanceId'], - SourceDestCheck={'Value': check}, - )) - - return changes_to_apply - - -def change_network_attachments(instance, params, ec2): - if (params.get('network') or {}).get('interfaces') is not None: - new_ids = [] - for inty in params.get('network').get('interfaces'): - if isinstance(inty, dict) and 'id' in inty: - new_ids.append(inty['id']) - elif isinstance(inty, string_types): - new_ids.append(inty) - # network.interfaces can create the need to attach new interfaces - old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] - to_attach = set(new_ids) - set(old_ids) - for eni_id in to_attach: - try: - ec2.attach_network_interface( - aws_retry=True, - DeviceIndex=new_ids.index(eni_id), - InstanceId=instance['InstanceId'], - NetworkInterfaceId=eni_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not attach interface {0} to instance {1}".format(eni_id, instance['InstanceId'])) - return bool(len(to_attach)) - return False - - -@AWSRetry.jittered_backoff() -def find_instances(ec2, ids=None, filters=None): - paginator = ec2.get_paginator('describe_instances') - if ids: - params = dict(InstanceIds=ids) - elif filters is None: - module.fail_json(msg="No filters provided when they were required") - else: - for key in list(filters.keys()): - if not key.startswith("tag:"): - filters[key.replace("_", "-")] = filters.pop(key) - params = dict(Filters=ansible_dict_to_boto3_filter_list(filters)) - - try: - results = _describe_instances(ec2, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe instances") - return list(results) - - -@AWSRetry.jittered_backoff() -def _describe_instances(ec2, **params): - paginator = ec2.get_paginator('describe_instances') - return paginator.paginate(**params).search('Reservations[].Instances[]') - - -def get_default_vpc(ec2): - try: - vpcs = ec2.describe_vpcs( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe default VPC") - if len(vpcs.get('Vpcs', [])): - return vpcs.get('Vpcs')[0] - return None - - -def get_default_subnet(ec2, vpc, availability_zone=None): - try: - subnets = ec2.describe_subnets( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({ - 'vpc-id': vpc['VpcId'], - 'state': 'available', - 'default-for-az': 'true', - }) - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc['VpcId'])) - if len(subnets.get('Subnets', [])): - if availability_zone is not None: - subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) - if availability_zone in subs_by_az: - return subs_by_az[availability_zone] - - # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first - # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list - by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone']) - return by_az[0] - return None - - -def ensure_instance_state(state, ec2): - if state in ('running', 'started'): - changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING', ec2=ec2) - - if failed: - module.fail_json( - msg="Unable to start instances: {0}".format(failure_reason), - reboot_success=list(changed), - reboot_failed=failed) - - module.exit_json( - msg='Instances started', - reboot_success=list(changed), - changed=bool(len(changed)), - reboot_failed=[], - instances=[pretty_instance(i) for i in instances], - ) - elif state in ('restarted', 'rebooted'): - changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), - desired_state='STOPPED', - ec2=ec2) - changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), - desired_state='RUNNING', - ec2=ec2) - - if failed: - module.fail_json( - msg="Unable to restart instances: {0}".format(failure_reason), - reboot_success=list(changed), - reboot_failed=failed) - - module.exit_json( - msg='Instances restarted', - reboot_success=list(changed), - changed=bool(len(changed)), - reboot_failed=[], - instances=[pretty_instance(i) for i in instances], - ) - elif state in ('stopped',): - changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), - desired_state='STOPPED', - ec2=ec2) - - if failed: - module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), - stop_success=list(changed), - stop_failed=failed) - - module.exit_json( - msg='Instances stopped', - stop_success=list(changed), - changed=bool(len(changed)), - stop_failed=[], - instances=[pretty_instance(i) for i in instances], - ) - elif state in ('absent', 'terminated'): - terminated, terminate_failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), - desired_state='TERMINATED', - ec2=ec2) - - if terminate_failed: - module.fail_json( - msg="Unable to terminate instances: {0}".format(failure_reason), - terminate_success=list(terminated), - terminate_failed=terminate_failed) - module.exit_json( - msg='Instances terminated', - terminate_success=list(terminated), - changed=bool(len(terminated)), - terminate_failed=[], - instances=[pretty_instance(i) for i in instances], - ) - - -def change_instance_state(filters, desired_state, ec2): - """Takes STOPPED/RUNNING/TERMINATED""" - - changed = set() - instances = find_instances(ec2, filters=filters) - to_change = set(i['InstanceId'] for i in instances if i['State']['Name'].upper() != desired_state) - unchanged = set() - failure_reason = "" - - # TODO: better check_moding in here https://github.com/ansible-collections/community.aws/issues/16 - for inst in instances: - try: - if desired_state == 'TERMINATED': - if module.check_mode: - changed.add(inst['InstanceId']) - continue - - # TODO use a client-token to prevent double-sends of these start/stop/terminate commands - # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html - resp = ec2.terminate_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] - if desired_state == 'STOPPED': - if inst['State']['Name'] in ('stopping', 'stopped'): - unchanged.add(inst['InstanceId']) - continue - - if module.check_mode: - changed.add(inst['InstanceId']) - continue - - resp = ec2.stop_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] - if desired_state == 'RUNNING': - if module.check_mode: - changed.add(inst['InstanceId']) - continue - - resp = ec2.start_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['StartingInstances']] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - try: - failure_reason = to_native(e.message) - except AttributeError: - failure_reason = to_native(e) - - if changed: - await_instances(ids=list(changed) + list(unchanged), state=desired_state) - - change_failed = list(to_change - changed) - - if instances: - instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances)) - return changed, change_failed, instances, failure_reason - - -def pretty_instance(i): - instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) - instance['tags'] = boto3_tag_list_to_ansible_dict(i.get('Tags', {})) - return instance - - -def determine_iam_role(name_or_arn): - if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): - return name_or_arn - iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - try: - role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return role['InstanceProfile']['Arn'] - except is_boto3_error_code('NoSuchEntity') as e: - module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) - - -def handle_existing(existing_matches, changed, ec2, state): - if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']: - ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING', ec2=ec2) - if failed: - module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason)) - module.exit_json( - changed=bool(len(ins_changed)) or changed, - instances=[pretty_instance(i) for i in instances], - instance_ids=[i['InstanceId'] for i in instances], - ) - changes = diff_instance_and_params(existing_matches[0], module.params, ec2) - for c in changes: - if not module.check_mode: - try: - ec2.modify_instance_attribute(aws_retry=True, **c) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c))) - changed |= bool(changes) - changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'), ec2) - changed |= change_network_attachments(existing_matches[0], module.params, ec2) - altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches]) - module.exit_json( - changed=bool(len(changes)) or changed, - instances=[pretty_instance(i) for i in altered], - instance_ids=[i['InstanceId'] for i in altered], - changes=changes, - ) - - -def ensure_present(existing_matches, changed, ec2, state): - if len(existing_matches): - try: - handle_existing(existing_matches, changed, ec2, state) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws( - e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])), - # instances=[pretty_instance(i) for i in existing_matches], - # instance_ids=[i['InstanceId'] for i in existing_matches], - ) - try: - instance_spec = build_run_instance_spec(module.params, ec2) - # If check mode is enabled,suspend 'ensure function'. - if module.check_mode: - module.exit_json( - changed=True, - spec=instance_spec, - ) - instance_response = run_instances(ec2, **instance_spec) - instances = instance_response['Instances'] - instance_ids = [i['InstanceId'] for i in instances] - - for ins in instances: - # Wait for instances to exist (don't check state) - try: - AWSRetry.jittered_backoff( - catch_extra_error_codes=['InvalidInstanceID.NotFound'], - )( - ec2.describe_instance_status - )( - InstanceIds=[ins['InstanceId']], - IncludeAllInstances=True, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to fetch status of new EC2 instance") - changes = diff_instance_and_params(ins, module.params, ec2, skip=['UserData', 'EbsOptimized']) - for c in changes: - try: - ec2.modify_instance_attribute(aws_retry=True, **c) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) - - if not module.params.get('wait'): - module.exit_json( - changed=True, - instance_ids=instance_ids, - spec=instance_spec, - ) - await_instances(instance_ids) - instances = find_instances(ec2, ids=instance_ids) - - module.exit_json( - changed=True, - instances=[pretty_instance(i) for i in instances], - instance_ids=instance_ids, - spec=instance_spec, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to create new EC2 instance") - - -def run_instances(ec2, **instance_spec): - try: - return ec2.run_instances(**instance_spec) - except is_boto3_error_message('Invalid IAM Instance Profile ARN'): - # If the instance profile has just been created, it takes some time to be visible by ec2 - # So we wait 10 second and retry the run_instances - time.sleep(10) - return ec2.run_instances(**instance_spec) - - -def main(): - global module - argument_spec = dict( - state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']), - wait=dict(default=True, type='bool'), - wait_timeout=dict(default=600, type='int'), - # count=dict(default=1, type='int'), - image=dict(type='dict'), - image_id=dict(type='str'), - instance_type=dict(default='t2.micro', type='str'), - user_data=dict(type='str'), - tower_callback=dict(type='dict'), - ebs_optimized=dict(type='bool'), - vpc_subnet_id=dict(type='str', aliases=['subnet_id']), - availability_zone=dict(type='str'), - security_groups=dict(default=[], type='list', elements='str'), - security_group=dict(type='str'), - instance_role=dict(type='str'), - name=dict(type='str'), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=False), - filters=dict(type='dict', default=None), - launch_template=dict(type='dict'), - key_name=dict(type='str'), - cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']), - cpu_options=dict(type='dict', options=dict( - core_count=dict(type='int', required=True), - threads_per_core=dict(type='int', choices=[1, 2], required=True) - )), - tenancy=dict(type='str', choices=['dedicated', 'default']), - placement_group=dict(type='str'), - instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), - termination_protection=dict(type='bool'), - detailed_monitoring=dict(type='bool'), - instance_ids=dict(default=[], type='list', elements='str'), - network=dict(default=None, type='dict'), - volumes=dict(default=None, type='list', elements='dict'), - ) - # running/present are synonyms - # as are terminated/absent - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['security_groups', 'security_group'], - ['availability_zone', 'vpc_subnet_id'], - ['tower_callback', 'user_data'], - ['image_id', 'image'], - ], - supports_check_mode=True - ) - - if module.params.get('network'): - if module.params.get('network').get('interfaces'): - if module.params.get('security_group'): - module.fail_json(msg="Parameter network.interfaces can't be used with security_group") - if module.params.get('security_groups'): - module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") - - state = module.params.get('state') - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - if module.params.get('filters') is None: - filters = { - # all states except shutting-down and terminated - 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'] - } - if state == 'stopped': - # only need to change instances that aren't already stopped - filters['instance-state-name'] = ['stopping', 'pending', 'running'] - - if isinstance(module.params.get('instance_ids'), string_types): - filters['instance-id'] = [module.params.get('instance_ids')] - elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')): - filters['instance-id'] = module.params.get('instance_ids') - else: - if not module.params.get('vpc_subnet_id'): - if module.params.get('network'): - # grab AZ from one of the ENIs - ints = module.params.get('network').get('interfaces') - if ints: - filters['network-interface.network-interface-id'] = [] - for i in ints: - if isinstance(i, dict): - i = i['id'] - filters['network-interface.network-interface-id'].append(i) - else: - sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone')) - filters['subnet-id'] = sub['SubnetId'] - else: - filters['subnet-id'] = [module.params.get('vpc_subnet_id')] - - if module.params.get('name'): - filters['tag:Name'] = [module.params.get('name')] - - if module.params.get('image_id'): - filters['image-id'] = [module.params.get('image_id')] - elif (module.params.get('image') or {}).get('id'): - filters['image-id'] = [module.params.get('image', {}).get('id')] - - module.params['filters'] = filters - - if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'): - module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16") - - existing_matches = find_instances(ec2, filters=module.params.get('filters')) - changed = False - - if state not in ('terminated', 'absent') and existing_matches: - for match in existing_matches: - warn_if_public_ip_assignment_changed(match) - warn_if_cpu_options_changed(match) - tags = module.params.get('tags') or {} - name = module.params.get('name') - if name: - tags['Name'] = name - changed |= manage_tags(match, tags, module.params.get('purge_tags', False), ec2) - - if state in ('present', 'running', 'started'): - ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state) - elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'): - if existing_matches: - ensure_instance_state(state, ec2) - else: - module.exit_json( - msg='No matching instances found', - changed=False, - instances=[], - ) - else: - module.fail_json(msg="We don't handle the state {0}".format(state)) - - -if __name__ == '__main__': - main() diff --git a/ec2_instance_facts.py b/ec2_instance_facts.py deleted file mode 120000 index 7010fdcb95f..00000000000 --- a/ec2_instance_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_instance_info.py \ No newline at end of file diff --git a/ec2_instance_info.py b/ec2_instance_info.py deleted file mode 100644 index dafe60ea4dd..00000000000 --- a/ec2_instance_info.py +++ /dev/null @@ -1,591 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_instance_info -version_added: 1.0.0 -short_description: Gather information about ec2 instances in AWS -description: - - Gather information about ec2 instances in AWS - - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change. -author: - - Michael Schuett (@michaeljs1990) - - Rob White (@wimnat) -requirements: [ "boto3", "botocore" ] -options: - instance_ids: - description: - - If you specify one or more instance IDs, only instances that have the specified IDs are returned. - required: false - type: list - elements: str - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See - U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter - names and values are case sensitive. - required: false - default: {} - type: dict - minimum_uptime: - description: - - Minimum running uptime in minutes of instances. For example if I(uptime) is C(60) return all instances that have run more than 60 minutes. - required: false - aliases: ['uptime'] - type: int - - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all instances - community.aws.ec2_instance_info: - -- name: Gather information about all instances in AZ ap-southeast-2a - community.aws.ec2_instance_info: - filters: - availability-zone: ap-southeast-2a - -- name: Gather information about a particular instance using ID - community.aws.ec2_instance_info: - instance_ids: - - i-12345678 - -- name: Gather information about any instance with a tag key Name and value Example - community.aws.ec2_instance_info: - filters: - "tag:Name": Example - -- name: Gather information about any instance in states "shutting-down", "stopping", "stopped" - community.aws.ec2_instance_info: - filters: - instance-state-name: [ "shutting-down", "stopping", "stopped" ] - -- name: Gather information about any instance with Name beginning with RHEL and an uptime of at least 60 minutes - community.aws.ec2_instance_info: - region: "{{ ec2_region }}" - uptime: 60 - filters: - "tag:Name": "RHEL-*" - instance-state-name: [ "running"] - register: ec2_node_info - -''' - -RETURN = r''' -instances: - description: a list of ec2 instances - returned: always - type: complex - contains: - ami_launch_index: - description: The AMI launch index, which can be used to find this instance in the launch group. - returned: always - type: int - sample: 0 - architecture: - description: The architecture of the image - returned: always - type: str - sample: x86_64 - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/sdh or xvdh). - returned: always - type: str - sample: /dev/sdh - ebs: - description: Parameters used to automatically set up EBS volumes when the instance is launched. - returned: always - type: complex - contains: - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2017-03-23T22:51:24+00:00" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: attached - volume_id: - description: The ID of the EBS volume - returned: always - type: str - sample: vol-12345678 - cpu_options: - description: The CPU options set for the instance. - returned: always if botocore version >= 1.10.16 - type: complex - contains: - core_count: - description: The number of CPU cores for the instance. - returned: always - type: int - sample: 1 - threads_per_core: - description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled. - returned: always - type: int - sample: 1 - client_token: - description: The idempotency token you provided when you launched the instance, if applicable. - returned: always - type: str - sample: mytoken - ebs_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - hypervisor: - description: The hypervisor type of the instance. - returned: always - type: str - sample: xen - iam_instance_profile: - description: The IAM instance profile associated with the instance, if applicable. - returned: always - type: complex - contains: - arn: - description: The Amazon Resource Name (ARN) of the instance profile. - returned: always - type: str - sample: "arn:aws:iam::000012345678:instance-profile/myprofile" - id: - description: The ID of the instance profile - returned: always - type: str - sample: JFJ397FDG400FG9FD1N - image_id: - description: The ID of the AMI used to launch the instance. - returned: always - type: str - sample: ami-0011223344 - instance_id: - description: The ID of the instance. - returned: always - type: str - sample: i-012345678 - instance_type: - description: The instance type size of the running instance. - returned: always - type: str - sample: t2.micro - key_name: - description: The name of the key pair, if this instance was launched with an associated key pair. - returned: always - type: str - sample: my-key - launch_time: - description: The time the instance was launched. - returned: always - type: str - sample: "2017-03-23T22:51:24+00:00" - monitoring: - description: The monitoring for the instance. - returned: always - type: complex - contains: - state: - description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. - returned: always - type: str - sample: disabled - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - association: - description: The association information for an Elastic IPv4 associated with the network interface. - returned: always - type: complex - contains: - ip_owner_id: - description: The ID of the owner of the Elastic IP address. - returned: always - type: str - sample: amazon - public_dns_name: - description: The public DNS name. - returned: always - type: str - sample: "" - public_ip: - description: The public IP address or Elastic IP address bound to the network interface. - returned: always - type: str - sample: 1.2.3.4 - attachment: - description: The network interface attachment. - returned: always - type: complex - contains: - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2017-03-23T22:51:24+00:00" - attachment_id: - description: The ID of the network interface attachment. - returned: always - type: str - sample: eni-attach-3aff3f - delete_on_termination: - description: Indicates whether the network interface is deleted when the instance is terminated. - returned: always - type: bool - sample: true - device_index: - description: The index of the device on the instance for the network interface attachment. - returned: always - type: int - sample: 0 - status: - description: The attachment state. - returned: always - type: str - sample: attached - description: - description: The description. - returned: always - type: str - sample: My interface - groups: - description: One or more security groups. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-abcdef12 - group_name: - description: The name of the security group. - returned: always - type: str - sample: mygroup - ipv6_addresses: - description: One or more IPv6 addresses associated with the network interface. - returned: always - type: list - elements: dict - contains: - ipv6_address: - description: The IPv6 address. - returned: always - type: str - sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - owner_id: - description: The AWS account ID of the owner of the network interface. - returned: always - type: str - sample: 01234567890 - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - private_ip_addresses: - description: The private IPv4 addresses associated with the network interface. - returned: always - type: list - elements: dict - contains: - association: - description: The association information for an Elastic IP address (IPv4) associated with the network interface. - returned: always - type: complex - contains: - ip_owner_id: - description: The ID of the owner of the Elastic IP address. - returned: always - type: str - sample: amazon - public_dns_name: - description: The public DNS name. - returned: always - type: str - sample: "" - public_ip: - description: The public IP address or Elastic IP address bound to the network interface. - returned: always - type: str - sample: 1.2.3.4 - primary: - description: Indicates whether this IPv4 address is the primary private IP address of the network interface. - returned: always - type: bool - sample: true - private_ip_address: - description: The private IPv4 address of the network interface. - returned: always - type: str - sample: 10.0.0.1 - source_dest_check: - description: Indicates whether source/destination checking is enabled. - returned: always - type: bool - sample: true - status: - description: The status of the network interface. - returned: always - type: str - sample: in-use - subnet_id: - description: The ID of the subnet for the network interface. - returned: always - type: str - sample: subnet-0123456 - vpc_id: - description: The ID of the VPC for the network interface. - returned: always - type: str - sample: vpc-0123456 - placement: - description: The location where the instance launched, if applicable. - returned: always - type: complex - contains: - availability_zone: - description: The Availability Zone of the instance. - returned: always - type: str - sample: ap-southeast-2a - group_name: - description: The name of the placement group the instance is in (for cluster compute instances). - returned: always - type: str - sample: "" - tenancy: - description: The tenancy of the instance (if the instance is running in a VPC). - returned: always - type: str - sample: default - private_dns_name: - description: The private DNS name. - returned: always - type: str - sample: ip-10-0-0-1.ap-southeast-2.compute.internal - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - product_codes: - description: One or more product codes. - returned: always - type: list - elements: dict - contains: - product_code_id: - description: The product code. - returned: always - type: str - sample: aw0evgkw8ef3n2498gndfgasdfsd5cce - product_code_type: - description: The type of product code. - returned: always - type: str - sample: marketplace - public_dns_name: - description: The public DNS name assigned to the instance. - returned: always - type: str - sample: - public_ip_address: - description: The public IPv4 address assigned to the instance - returned: always - type: str - sample: 52.0.0.1 - root_device_name: - description: The device name of the root device - returned: always - type: str - sample: /dev/sda1 - root_device_type: - description: The type of root device used by the AMI. - returned: always - type: str - sample: ebs - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - source_dest_check: - description: Indicates whether source/destination checking is enabled. - returned: always - type: bool - sample: true - state: - description: The current state of the instance. - returned: always - type: complex - contains: - code: - description: The low byte represents the state. - returned: always - type: int - sample: 16 - name: - description: The name of the state. - returned: always - type: str - sample: running - state_transition_reason: - description: The reason for the most recent state transition. - returned: always - type: str - sample: - subnet_id: - description: The ID of the subnet in which the instance is running. - returned: always - type: str - sample: subnet-00abcdef - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - virtualization_type: - description: The type of virtualization of the AMI. - returned: always - type: str - sample: hvm - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: dict - sample: vpc-0011223344 -''' - -import datetime - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - - -@AWSRetry.jittered_backoff() -def _describe_instances(connection, **params): - paginator = connection.get_paginator('describe_instances') - return paginator.paginate(**params).build_full_result() - - -def list_ec2_instances(connection, module): - - instance_ids = module.params.get("instance_ids") - uptime = module.params.get('minimum_uptime') - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - - try: - reservations = _describe_instances(connection, InstanceIds=instance_ids, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to list ec2 instances") - - instances = [] - - if uptime: - timedelta = int(uptime) if uptime else 0 - oldest_launch_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=timedelta) - # Get instances from reservations - for reservation in reservations['Reservations']: - instances += [instance for instance in reservation['Instances'] if instance['LaunchTime'].replace(tzinfo=None) < oldest_launch_time] - else: - for reservation in reservations['Reservations']: - instances = instances + reservation['Instances'] - - # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] - - # Turn the boto3 result in to ansible friendly tag dictionary - for instance in snaked_instances: - instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value') - - module.exit_json(instances=snaked_instances) - - -def main(): - - argument_spec = dict( - minimum_uptime=dict(required=False, type='int', default=None, aliases=['uptime']), - instance_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['instance_ids', 'filters'] - ], - supports_check_mode=True, - ) - if module._name == 'ec2_instance_facts': - module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws') - - try: - connection = module.client('ec2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - list_ec2_instances(connection, module) - - -if __name__ == '__main__': - main() diff --git a/ec2_launch_template.py b/ec2_launch_template.py index c2189081a68..691dc0306b0 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -13,7 +13,7 @@ description: - Create, modify, and delete EC2 Launch Templates, which can be used to create individual instances or with Autoscaling Groups. - - The M(community.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all + - The M(amazon.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all parameters on those tasks, be passed a Launch Template which contains settings like instance size, disk type, subnet, and more. requirements: From d233351729d972782616f5a85a44bdd1881d2b6e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 29 Jun 2021 10:36:30 +0200 Subject: [PATCH 188/683] Move ec2_vpc_peer over to shared ec2 tagging code Add NotFound retries when tagging a new VPC Peering connection --- ec2_vpc_peer.py | 73 +++++++++++++++++-------------------------------- 1 file changed, 25 insertions(+), 48 deletions(-) diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 29011094766..4e1e3a1847e 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -44,6 +44,12 @@ - Dictionary of tags to look for and apply when creating a Peering Connection. required: false type: dict + purge_tags: + description: + - Remove tags not listed in I(tags). + type: bool + default: true + version_added: 2.0.0 state: description: - Create, delete, accept, reject a peering connection. @@ -367,6 +373,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags def wait_for_state(client, module, state, pcx_id): @@ -385,26 +393,6 @@ def wait_for_state(client, module, state, pcx_id): module.fail_json_aws(e, "Enable to describe Peerig Connection while waiting for state to change") -def tags_changed(pcx_id, client, module): - changed = False - tags = dict() - if module.params.get('tags'): - tags = module.params.get('tags') - peering_connection = get_peering_connection_by_id(pcx_id, client, module) - if peering_connection['Tags']: - pcx_values = [t.values() for t in peering_connection['Tags']] - pcx_tags = [item for sublist in pcx_values for item in sublist] - tag_values = [[key, str(value)] for key, value in tags.items()] - tags = [item for sublist in tag_values for item in sublist] - if sorted(pcx_tags) == sorted(tags): - changed = False - elif tags: - delete_tags(pcx_id, client, module) - create_tags(pcx_id, client, module) - changed = True - return changed - - def describe_peering_connections(params, client): peer_filter = { 'requester-vpc-info.vpc-id': params['VpcId'], @@ -445,7 +433,10 @@ def create_peer_connection(client, module): peering_conns = describe_peering_connections(params, client) for peering_conn in peering_conns['VpcPeeringConnections']: pcx_id = peering_conn['VpcPeeringConnectionId'] - if tags_changed(pcx_id, client, module): + if ensure_ec2_tags(client, module, pcx_id, + purge_tags=module.params.get('purge_tags'), + tags=module.params.get('tags'), + ): changed = True if is_active(peering_conn): return (changed, peering_conn) @@ -454,10 +445,14 @@ def create_peer_connection(client, module): try: peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] + if module.params.get('tags'): + # Once the minimum botocore version is bumped to > 1.17.24 + # (hopefully community.aws 3.0.0) we can add the tags to the + # creation parameters + add_ec2_tags(client, module, pcx_id, module.params.get('tags'), + retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) if module.params.get('wait'): wait_for_state(client, module, 'pending-acceptance', pcx_id) - if module.params.get('tags'): - create_tags(pcx_id, client, module) changed = True return (changed, peering_conn['VpcPeeringConnection']) except botocore.exceptions.ClientError as e: @@ -531,13 +526,17 @@ def accept_reject(state, client, module): client.reject_vpc_peering_connection(aws_retry=True, **params) target_state = 'rejected' if module.params.get('tags'): - create_tags(peering_id, client, module) + add_ec2_tags(client, module, peering_id, module.params.get('tags'), + retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) changed = True if module.params.get('wait'): wait_for_state(client, module, target_state, peering_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) - if tags_changed(peering_id, client, module): + if ensure_ec2_tags(client, module, peering_id, + purge_tags=module.params.get('purge_tags'), + tags=module.params.get('tags'), + ): changed = True # Relaod peering conection infos to return latest state/params @@ -545,29 +544,6 @@ def accept_reject(state, client, module): return (changed, vpc_peering_connection) -def load_tags(module): - tags = [] - if module.params.get('tags'): - for name, value in module.params.get('tags').items(): - tags.append({'Key': name, 'Value': str(value)}) - return tags - - -def create_tags(pcx_id, client, module): - try: - delete_tags(pcx_id, client, module) - client.create_tags(aws_retry=True, Resources=[pcx_id], Tags=load_tags(module)) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - - -def delete_tags(pcx_id, client, module): - try: - client.delete_tags(aws_retry=True, Resources=[pcx_id]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - - def main(): argument_spec = dict( vpc_id=dict(), @@ -576,6 +552,7 @@ def main(): peering_id=dict(), peer_owner_id=dict(), tags=dict(required=False, type='dict'), + purge_tags=dict(default=True, type='bool'), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), wait=dict(default=False, type='bool'), ) From 46915813867345eac74131766ab699a36a9b5a28 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 29 Jun 2021 15:37:50 +0200 Subject: [PATCH 189/683] ec2_vpc_route_table - use ensure_ec2_tags --- ec2_vpc_route_table.py | 64 ++++++------------------------------------ 1 file changed, 8 insertions(+), 56 deletions(-) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 6549f78881b..049e81056f1 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -238,10 +238,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -257,14 +256,6 @@ def describe_igws_with_backoff(connection, **params): return paginator.paginate(**params).build_full_result()['InternetGateways'] -@AWSRetry.jittered_backoff() -def describe_tags_with_backoff(connection, resource_id): - filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id}) - paginator = connection.get_paginator('describe_tags') - tags = paginator.paginate(Filters=filters).build_full_result()['Tags'] - return boto3_tag_list_to_ansible_dict(tags) - - @AWSRetry.jittered_backoff() def describe_route_tables_with_backoff(connection, **params): try: @@ -349,45 +340,6 @@ def tags_match(match_tags, candidate_tags): for k, v in match_tags.items())) -def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None): - try: - cur_tags = describe_tags_with_backoff(connection, resource_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list tags for VPC') - - to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags) - - if not to_add and not to_delete: - return {'changed': False, 'tags': cur_tags} - if check_mode: - if not purge_tags: - tags = cur_tags.update(tags) - return {'changed': True, 'tags': tags} - - if to_delete: - try: - connection.delete_tags( - aws_retry=True, - Resources=[resource_id], - Tags=[{'Key': k} for k in to_delete]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete tags") - if to_add: - try: - connection.create_tags( - aws_retry=True, - Resources=[resource_id], - Tags=ansible_dict_to_boto3_tag_list(to_add)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create tags") - - try: - latest_tags = describe_tags_with_backoff(connection, resource_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list tags for VPC') - return {'changed': True, 'tags': latest_tags} - - def get_route_table_by_id(connection, module, route_table_id): route_table = None @@ -410,7 +362,7 @@ def get_route_table_by_tags(connection, module, vpc_id, tags): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route table") for table in route_tables: - this_tags = describe_tags_with_backoff(connection, table['RouteTableId']) + this_tags = describe_ec2_tags(connection, module, table['RouteTableId']) if tags_match(tags, this_tags): route_table = table count += 1 @@ -625,7 +577,7 @@ def ensure_route_table_absent(connection, module): def get_route_table_info(connection, module, route_table): result = get_route_table_by_id(connection, module, route_table['RouteTableId']) try: - result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId']) + result['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for route table") result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) @@ -711,10 +663,10 @@ def ensure_route_table_present(connection, module): changed = changed or result['changed'] if not tags_valid and tags is not None: - result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags, - purge_tags=purge_tags, check_mode=module.check_mode) - route_table['Tags'] = result['tags'] - changed = changed or result['changed'] + changed |= ensure_ec2_tags(connection, module, route_table['RouteTableId'], + tags=tags, purge_tags=purge_tags, + retry_codes=['InvalidRouteTableID.NotFound']) + route_table['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) if subnets is not None: associated_subnets = find_subnets(connection, module, vpc_id, subnets) From d9d0b7ce813863fd58aa70b8e1c9f424f5b48bbc Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 29 Jun 2021 15:48:53 +0200 Subject: [PATCH 190/683] Retry on InvalidRouteTableID.NotFound, to work around race conditions --- ec2_vpc_route_table.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py index 049e81056f1..afc3487110a 100644 --- a/ec2_vpc_route_table.py +++ b/ec2_vpc_route_table.py @@ -703,7 +703,9 @@ def main(): ['state', 'present', ['vpc_id']]], supports_check_mode=True) - retry_decorator = AWSRetry.jittered_backoff(retries=10) + # The tests for RouteTable existing uses its own decorator, we can safely + # retry on InvalidRouteTableID.NotFound + retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['InvalidRouteTableID.NotFound']) connection = module.client('ec2', retry_decorator=retry_decorator) state = module.params.get('state') From d566b8ea30c248e3f0aa49da7fbd2c8169beddbb Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Sun, 18 Apr 2021 12:04:07 +0300 Subject: [PATCH 191/683] feat(kafka): aws_msk_config and aws_msk_cluster modules have been added --- aws_msk_cluster.py | 827 +++++++++++++++++++++++++++++++++++++++++++++ aws_msk_config.py | 295 ++++++++++++++++ 2 files changed, 1122 insertions(+) create mode 100644 aws_msk_cluster.py create mode 100644 aws_msk_config.py diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py new file mode 100644 index 00000000000..f4d9a5db84f --- /dev/null +++ b/aws_msk_cluster.py @@ -0,0 +1,827 @@ +#!/usr/bin/python +# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: aws_msk_cluster +short_description: Manage Amazon MSK clusters. +version_added: "1.5.0" +requirements: + - botocore >= 1.17.42 + - boto3 >= 1.17.9 +description: + - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) clusters. +author: + - Daniil Kupchenko (@oukooveu) +options: + state: + description: Create (present) or delete (absent) cluster. + choices: ['present', 'absent'] + type: str + default: 'present' + name: + description: The name of the cluster. + required: true + type: str + version: + description: + - The version of Apache Kafka. + - This version should exist in given configuration. + - This parameter is required when I(state=present). + type: str + configuration_arn: + description: + - ARN of the configuration to use. + - This parameter is required when I(state=present). + type: str + configuration_revision: + description: + - The revision of the configuration to use. + - This parameter is required when I(state=present). + type: int + nodes: + description: The number of broker nodes in the cluster. Should be greater or equal to two. + type: int + default: 3 + instance_type: + description: + - The type of Amazon EC2 instances to use for Kafka brokers. + - Update operation requires boto3 version >= 1.16.58 + choices: + - kafka.t3.small + - kafka.m5.large + - kafka.m5.xlarge + - kafka.m5.2xlarge + - kafka.m5.4xlarge + default: kafka.t3.small + type: str + ebs_volume_size: + description: The size in GiB of the EBS volume for the data drive on each broker node. + type: int + default: 100 + subnets: + description: + - The list of subnets to connect to in the client virtual private cloud (VPC). + AWS creates elastic network interfaces inside these subnets. Client applications use + elastic network interfaces to produce and consume data. + Client subnets can't be in Availability Zone us-east-1e. + - This parameter is required when I(state=present). + type: list + elements: str + security_groups: + description: + - The AWS security groups to associate with the elastic network interfaces in order to specify + who can connect to and communicate with the Amazon MSK cluster. + If you don't specify a security group, Amazon MSK uses the default security group associated with the VPC. + type: list + elements: str + encryption: + description: + - Includes all encryption-related information. + - Effective only for new cluster and can not be updated. + type: dict + suboptions: + kms_key_id: + description: + - The ARN of the AWS KMS key for encrypting data at rest. If you don't specify a KMS key, MSK creates one for you and uses it. + default: Null + type: str + in_transit: + description: The details for encryption in transit. + type: dict + suboptions: + in_cluster: + description: + - When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted. + When set to false, the communication happens in plaintext. + type: bool + default: True + client_broker: + description: + - Indicates the encryption setting for data in transit between clients and brokers. The following are the possible values. + TLS means that client-broker communication is enabled with TLS only. + TLS_PLAINTEXT means that client-broker communication is enabled for both TLS-encrypted, as well as plaintext data. + PLAINTEXT means that client-broker communication is enabled in plaintext only. + choices: + - TLS + - TLS_PLAINTEXT + - PLAINTEXT + type: str + default: TLS + authentication: + description: + - Includes all client authentication related information. + - Effective only for new cluster and can not be updated. + type: dict + suboptions: + tls_ca_arn: + description: List of ACM Certificate Authority ARNs. + type: list + elements: str + sasl_scram: + description: SASL/SCRAM authentication is enabled or not. + type: bool + default: False + enhanced_monitoring: + description: Specifies the level of monitoring for the MSK cluster. + choices: + - DEFAULT + - PER_BROKER + - PER_TOPIC_PER_BROKER + - PER_TOPIC_PER_PARTITION + default: DEFAULT + type: str + open_monitoring: + description: The settings for open monitoring. + type: dict + suboptions: + jmx_exporter: + description: Indicates whether you want to enable or disable the JMX Exporter. + type: bool + default: False + node_exporter: + description: Indicates whether you want to enable or disable the Node Exporter. + type: bool + default: False + logging: + description: Logging configuration. + type: dict + suboptions: + cloudwatch: + description: Details of the CloudWatch Logs destination for broker logs. + type: dict + suboptions: + enabled: + description: Specifies whether broker logs get sent to the specified CloudWatch Logs destination. + type: bool + default: False + log_group: + description: The CloudWatch log group that is the destination for broker logs. + type: str + required: False + firehose: + description: Details of the Kinesis Data Firehose delivery stream that is the destination for broker logs. + type: dict + suboptions: + enabled: + description: Specifies whether broker logs get send to the specified Kinesis Data Firehose delivery stream. + type: bool + default: False + delivery_stream: + description: The Kinesis Data Firehose delivery stream that is the destination for broker logs. + type: str + required: False + s3: + description: Details of the Amazon S3 destination for broker logs. + type: dict + suboptions: + enabled: + description: Specifies whether broker logs get sent to the specified Amazon S3 destination. + type: bool + default: False + bucket: + description: The name of the S3 bucket that is the destination for broker logs. + type: str + required: False + prefix: + description: The S3 prefix that is the destination for broker logs. + type: str + required: False + wait: + description: Whether to wait for the cluster to be available or deleted. + type: bool + default: false + wait_timeout: + description: How long to wait, seconds. Cluster creation can take up to 20-30 minutes. + type: int + default: 3600 + tags: + description: Tag dictionary to apply to the cluster. + type: dict + purge_tags: + description: Remove tags not listed in I(tags) when tags is specified. + default: true + type: bool +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +notes: + - All operations are time consuming, for example create takes 20-30 minutes, + update kafka version -- more than one hour, update configuration -- 10-15 minutes; + - Cluster's brokers get evenly distributed over a number of availability zones + that's equal to the number of subnets. +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- aws_msk_cluster: + name: kafka-cluster + state: present + version: 2.6.1 + nodes: 6 + ebs_volume_size: "{{ aws_msk_options.ebs_volume_size }}" + subnets: + - subnet-e3b48ce7c25861eeb + - subnet-2990c8b25b07ddd43 + - subnet-d9fbeaf46c54bfab6 + wait: true + wait_timeout: 1800 + configuration_arn: arn:aws:kafka:us-east-1:000000000001:configuration/kafka-cluster-configuration/aaaaaaaa-bbbb-4444-3333-ccccccccc-1 + configuration_revision: 1 + +- aws_msk_cluster: + name: kafka-cluster + state: absent +""" + +RETURN = r""" +# These are examples of possible return values, and in general should use other names for return values. + +bootstrap_broker_string: + description: A list of brokers that a client application can use to bootstrap. + type: complex + contains: + plain: + description: A string containing one or more hostname:port pairs. + type: str + tls: + description: A string containing one or more DNS names (or IP) and TLS port pairs. + type: str + returned: I(state=present) and cluster state is I(ACTIVE) +cluster_info: + description: Description of the MSK cluster. + type: dict + returned: I(state=present) +response: + description: The response from actual API call. + type: dict + returned: always + sample: {} +""" + +import time + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( + camel_dict_to_snake_dict, + compare_aws_tags, + AWSRetry, +) + + +@AWSRetry.backoff(tries=5, delay=5) +def list_clusters_with_backoff(client, cluster_name): + paginator = client.get_paginator("list_clusters") + return paginator.paginate(ClusterNameFilter=cluster_name).build_full_result() + + +@AWSRetry.backoff(tries=5, delay=5) +def list_nodes_with_backoff(client, cluster_arn): + paginator = client.get_paginator("list_nodes") + return paginator.paginate(ClusterArn=cluster_arn).build_full_result() + + +def find_cluster_by_name(client, module, cluster_name): + cluster_list = list_clusters_with_backoff(client, cluster_name).get("ClusterInfoList", []) + if cluster_list: + if len(cluster_list) != 1: + module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name)) + return cluster_list[0] + return {} + + +def get_cluster_state(client, module, arn): + try: + response = client.describe_cluster(ClusterArn=arn) + except client.exceptions.NotFoundException: + return "DELETED" + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "Failed to get kafka cluster state") + return response["ClusterInfo"]["State"] + + +def get_cluster_version(client, module, arn): + try: + response = client.describe_cluster(ClusterArn=arn) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "Failed to get kafka cluster version") + return response["ClusterInfo"]["CurrentVersion"] + + +def wait_for_cluster_state(client, module, arn, state="ACTIVE"): + start = time.time() + timeout = int(module.params.get("wait_timeout")) + check_interval = 60 + while True: + current_state = get_cluster_state(client, module, arn) + if current_state == state: + return + if time.time() - start > timeout: + module.fail_json( + msg="Timeout waiting for cluster {0} (desired state is '{1}')".format( + current_state, state + ) + ) + time.sleep(check_interval) + + +def prepare_create_options(module): + """ + Return data structure for cluster create operation + """ + + c_params = { + "ClusterName": module.params["name"], + "KafkaVersion": module.params["version"], + "ConfigurationInfo": { + "Arn": module.params["configuration_arn"], + "Revision": module.params["configuration_revision"], + }, + "NumberOfBrokerNodes": module.params["nodes"], + "BrokerNodeGroupInfo": { + "ClientSubnets": module.params["subnets"], + "InstanceType": module.params["instance_type"], + } + } + + if module.params["security_groups"] and len(module.params["security_groups"]) != 0: + c_params["BrokerNodeGroupInfo"]["SecurityGroups"] = module.params.get("security_groups") + + if module.params["ebs_volume_size"]: + c_params["BrokerNodeGroupInfo"]["StorageInfo"] = { + "EbsStorageInfo": { + "VolumeSize": module.params.get("ebs_volume_size") + } + } + + if module.params["encryption"]: + c_params["EncryptionInfo"] = {} + if module.params["encryption"].get("kms_key_id"): + c_params["EncryptionInfo"]["EncryptionAtRest"] = { + "DataVolumeKMSKeyId": module.params["encryption"]["kms_key_id"] + } + c_params["EncryptionInfo"]["EncryptionInTransit"] = { + "ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"), + "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True) + } + + if module.params["authentication"]: + c_params["ClientAuthentication"] = {} + if module.params["authentication"].get("sasl_scram"): + c_params["ClientAuthentication"]["Sasl"] = { + "Scram": module.params["authentication"]["sasl_scram"] + } + if module.params["authentication"].get("tls_ca_arn"): + c_params["ClientAuthentication"]["Tls"] = { + "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"] + } + + c_params.update(prepare_enhanced_monitoring_options(module)) + c_params.update(prepare_open_monitoring_options(module)) + c_params.update(prepare_logging_options(module)) + + return c_params + + +def prepare_enhanced_monitoring_options(module): + m_params = {} + m_params["EnhancedMonitoring"] = module.params["enhanced_monitoring"] or "DEFAULT" + return m_params + + +def prepare_open_monitoring_options(module): + m_params = {} + open_monitoring = module.params["open_monitoring"] or {} + m_params["OpenMonitoring"] = { + "Prometheus": { + "JmxExporter": { + "EnabledInBroker": open_monitoring.get("jmx_exporter", False) + }, + "NodeExporter": { + "EnabledInBroker": open_monitoring.get("node_exporter", False) + } + } + } + return m_params + + +def prepare_logging_options(module): + l_params = {} + logging = module.params["logging"] or {} + if logging.get("cloudwatch"): + l_params["CloudWatchLogs"] = { + "Enabled": module.params["logging"]["cloudwatch"].get("enabled"), + "LogGroup": module.params["logging"]["cloudwatch"].get("log_group") + } + else: + l_params["CloudWatchLogs"] = { + "Enabled": False + } + if logging.get("firehose"): + l_params["Firehose"] = { + "Enabled": module.params["logging"]["firehose"].get("enabled"), + "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream") + } + else: + l_params["Firehose"] = { + "Enabled": False + } + if logging.get("s3"): + l_params["S3"] = { + "Enabled": module.params["logging"]["s3"].get("enabled"), + "Bucket": module.params["logging"]["s3"].get("bucket"), + "Prefix": module.params["logging"]["s3"].get("prefix") + } + else: + l_params["S3"] = { + "Enabled": False + } + return { + "LoggingInfo": { + "BrokerLogs": l_params + } + } + + +def create_or_update_cluster(client, module): + """ + Create new or update existing cluster + """ + + changed = False + response = {} + + cluster = find_cluster_by_name(client, module, module.params["name"]) + + if not cluster: + + changed = True + + if module.check_mode: + return True, {} + + create_params = prepare_create_options(module) + + try: + response = client.create_cluster(**create_params) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "Failed to create kafka cluster") + + if module.params.get("wait"): + wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE") + + else: + + response["ClusterArn"] = cluster["ClusterArn"] + response["changes"] = {} + + # prepare available update methods definitions with current/target values and options + msk_cluster_changes = { + "broker_count": { + "current_value": cluster["NumberOfBrokerNodes"], + "target_value": module.params.get("nodes"), + "update_params": { + "TargetNumberOfBrokerNodes": module.params.get("nodes") + } + }, + "broker_storage": { + "current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"], + "target_value": module.params.get("ebs_volume_size"), + "update_params": { + "TargetBrokerEBSVolumeInfo": [ + {"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")} + ] + } + }, + "broker_type": { + "boto3_version": "1.16.58", + "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], + "target_value": module.params.get("instance_type"), + "update_params": { + "TargetInstanceType": module.params.get("instance_type") + } + }, + "cluster_configuration": { + "current_value": { + "arn": cluster["CurrentBrokerSoftwareInfo"]["ConfigurationArn"], + "revision": cluster["CurrentBrokerSoftwareInfo"]["ConfigurationRevision"], + }, + "target_value": { + "arn": module.params.get("configuration_arn"), + "revision": module.params.get("configuration_revision"), + }, + "update_params": { + "ConfigurationInfo": { + "Arn": module.params.get("configuration_arn"), + "Revision": module.params.get("configuration_revision") + } + } + }, + "cluster_kafka_version": { + "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], + "target_value": module.params.get("version"), + "update_params": { + "TargetKafkaVersion": module.params.get("version") + } + }, + "enhanced_monitoring": { + "current_value": cluster["EnhancedMonitoring"], + "target_value": module.params.get("enhanced_monitoring"), + "update_method": "update_monitoring", + "update_params": prepare_enhanced_monitoring_options(module) + }, + "open_monitoring": { + "current_value": { + "OpenMonitoring": cluster["OpenMonitoring"] + }, + "target_value": prepare_open_monitoring_options(module), + "update_method": "update_monitoring", + "update_params": prepare_open_monitoring_options(module) + }, + "logging": { + "current_value": { + "LoggingInfo": cluster["LoggingInfo"] + }, + "target_value": prepare_logging_options(module), + "update_method": "update_monitoring", + "update_params": prepare_logging_options(module) + } + } + + for method, options in msk_cluster_changes.items(): + + if 'boto3_version' in options: + if not module.boto3_at_least(options["boto3_version"]): + continue + + try: + update_method = getattr(client, options.get("update_method", "update_" + method)) + except AttributeError as e: + module.fail_json_aws(e, "There is no update method 'update_{0}'".format(method)) + + if options["current_value"] != options["target_value"]: + changed = True + if module.check_mode: + return True, {} + + # need to get cluster version and check for the state because + # there can be several updates requested but only one in time can be performed + version = get_cluster_version(client, module, cluster["ClusterArn"]) + state = get_cluster_state(client, module, cluster["ClusterArn"]) + if state != "ACTIVE": + if module.params["wait"]: + wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") + else: + module.fail_json( + msg="Cluster can be updated only in active state, current state is '{0}'. check cluster state or use wait option".format( + state + ) + ) + try: + response["changes"][method] = update_method( + ClusterArn=cluster["ClusterArn"], + CurrentVersion=version, + **options["update_params"] + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws( + e, "Failed to update cluster via 'update_{0}'".format(method) + ) + + if module.params["wait"]: + wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") + + changed |= update_cluster_tags(client, module, response["ClusterArn"]) + + return changed, response + + +def update_cluster_tags(client, module, arn): + new_tags = module.params.get('tags') + if new_tags is None: + return False + purge_tags = module.params.get('purge_tags') + + try: + existing_tags = client.list_tags_for_resource(ResourceArn=arn)['Tags'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + + if not module.check_mode: + try: + if tags_to_remove: + client.untag_resource(ResourceArn=arn, TagKeys=tags_to_remove) + if tags_to_add: + client.tag_resource(ResourceArn=arn, Tags=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn)) + + changed = bool(tags_to_add) or bool(tags_to_remove) + return changed + + +def delete_cluster(client, module): + + cluster = find_cluster_by_name(client, module, module.params["name"]) + + if module.check_mode: + if cluster: + return True, cluster + else: + return False, {} + + if not cluster: + return False, {} + + try: + response = client.delete_cluster( + ClusterArn=cluster["ClusterArn"], + CurrentVersion=cluster["CurrentVersion"], + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, "Failed to delete kafka cluster") + + if module.params["wait"]: + wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="DELETED") + + response["bootstrap_broker_string"] = {} + + return True, response + + +def main(): + + module_args = dict( + name=dict(type="str", required=True), + state=dict(type="str", choices=["present", "absent"], default="present"), + version=dict(type="str"), + configuration_arn=dict(type="str"), + configuration_revision=dict(type="int"), + nodes=dict(type="int", default=3), + instance_type=dict( + choices=[ + "kafka.t3.small", + "kafka.m5.large", + "kafka.m5.xlarge", + "kafka.m5.2xlarge", + "kafka.m5.4xlarge", + ], + default="kafka.t3.small", + ), + ebs_volume_size=dict(type="int", default=100), + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str", required=False), + encryption=dict( + type="dict", + options=dict( + kms_key_id=dict(type="str", required=False), + in_transit=dict( + type="dict", + options=dict( + in_cluster=dict(type="bool", default=True), + client_broker=dict( + choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], + default="TLS" + ), + ), + ), + ), + ), + authentication=dict( + type="dict", + options=dict( + tls_ca_arn=dict(type="list", elements="str", required=False), + sasl_scram=dict(type="bool", default=False), + ), + ), + enhanced_monitoring=dict( + choices=[ + "DEFAULT", + "PER_BROKER", + "PER_TOPIC_PER_BROKER", + "PER_TOPIC_PER_PARTITION", + ], + default="DEFAULT", + required=False, + ), + open_monitoring=dict( + type="dict", + options=dict( + jmx_exporter=dict(type="bool", default=False), + node_exporter=dict(type="bool", default=False), + ), + ), + logging=dict( + type="dict", + options=dict( + cloudwatch=dict( + type="dict", + options=dict( + enabled=dict(type="bool", default=False), + log_group=dict(type="str", required=False), + ), + ), + firehose=dict( + type="dict", + options=dict( + enabled=dict(type="bool", default=False), + delivery_stream=dict(type="str", required=False), + ), + ), + s3=dict( + type="dict", + options=dict( + enabled=dict(type="bool", default=False), + bucket=dict(type="str", required=False), + prefix=dict(type="str", required=False), + ), + ), + ), + ), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=3600), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + ) + + module = AnsibleAWSModule( + argument_spec=module_args, + required_if=[['state', 'present', ['version', 'configuration_arn', 'configuration_revision', 'subnets']]], + supports_check_mode=True + ) + + client = module.client("kafka") + + if module.params["state"] == "present": + if len(module.params["subnets"]) < 2: + module.fail_json( + msg="At least two client subnets should be provided" + ) + if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0: + module.fail_json( + msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" + ) + changed, response = create_or_update_cluster(client, module) + elif module.params["state"] == "absent": + changed, response = delete_cluster(client, module) + + cluster_info = {} + bootstrap_broker_string = {} + if response.get("ClusterArn") and module.params["state"] == "present": + try: + cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"])[ + "ClusterInfo" + ] + if cluster_info.get("State") == "ACTIVE": + brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"]) + if brokers.get("BootstrapBrokerString"): + bootstrap_broker_string["plain"] = brokers["BootstrapBrokerString"] + if brokers.get("BootstrapBrokerStringTls"): + bootstrap_broker_string["tls"] = brokers["BootstrapBrokerStringTls"] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws( + e, + "Can not obtain information about cluster {0}".format( + response["ClusterArn"] + ), + ) + + module.exit_json( + changed=changed, + bootstrap_broker_string=bootstrap_broker_string, + cluster_info=camel_dict_to_snake_dict(cluster_info), + response=camel_dict_to_snake_dict(response), + ) + + +if __name__ == "__main__": + main() diff --git a/aws_msk_config.py b/aws_msk_config.py new file mode 100644 index 00000000000..5b3cbd9e492 --- /dev/null +++ b/aws_msk_config.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: aws_msk_config +short_description: Manage Amazon MSK cluster configurations. +version_added: "1.5.0" +requirements: + - botocore >= 1.17.42 + - boto3 >= 1.17.9 +description: + - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations. +author: + - Daniil Kupchenko (@oukooveu) +options: + state: + description: Create (present) or delete (absent) cluster configuration. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: The name of the configuration. + required: true + type: str + description: + description: The description of the configuration. + type: str + config: + description: Contents of the server.properties file. + type: dict + aliases: ['configuration'] + kafka_versions: + description: + - The versions of Apache Kafka with which you can use this MSK configuration. + - Required when I(state=present). + type: list + elements: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- aws_msk_config: + name: kafka-cluster-configuration + state: present + kafka_versions: + - 2.6.0 + - 2.6.1 + config: + auto.create.topics.enable=false + num.partitions=1 + default.replication.factor=3 + zookeeper.session.timeout.ms=18000 + +- aws_msk_config: + name: kafka-cluster-configuration + state: absent +""" + +RETURN = r""" +# These are examples of possible return values, and in general should use other names for return values. + +arn: + description: The Amazon Resource Name (ARN) of the configuration. + type: str + returned: I(state=present) + sample: "arn:aws:kafka:::configuration//" +revision: + description: The revision number. + type: int + returned: I(state=present) + sample: 1 +server_properties: + description: Contents of the server.properties file. + type: str + returned: I(state=present) + sample: "default.replication.factor=3\nnum.io.threads=8\nzookeeper.session.timeout.ms=18000" +response: + description: The response from actual API call. + type: dict + returned: always + sample: {} +""" + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( + camel_dict_to_snake_dict, + AWSRetry, +) + + +BOTOCORE_MIN_VERSION = "1.17.42" + + +def dict_to_prop(d): + """convert dictionary to multi-line properties""" + if len(d) == 0: + return "" + return "\n".join("{0}={1}".format(k, v) for k, v in d.items()) + + +def prop_to_dict(p): + """convert properties to dictionary""" + if len(p) == 0: + return {} + return { + k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n")) + } + + +@AWSRetry.backoff(tries=5, delay=5) +def get_configurations_with_backoff(client): + paginator = client.get_paginator("list_configurations") + return paginator.paginate().build_full_result() + + +def find_active_config(client, module): + """ + looking for configuration by name + status is not returned for list_configurations in botocore 1.17.42 + delete_configuration method was added in botocore 1.17.48 + """ + + name = module.params["name"] + + try: + all_configs = get_configurations_with_backoff(client)["Configurations"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="failed to obtain kafka configurations") + + active_configs = list( + item + for item in all_configs + if item["Name"] == name and item["State"] == "ACTIVE" + ) + + if active_configs: + if len(active_configs) == 1: + return active_configs[0] + else: + module.fail_json_aws( + msg="found more than one active config with name '{0}'".format(name) + ) + + return None + + +def create_config(client, module): + """create new or update existing configuration""" + + config = find_active_config(client, module) + + # create new configuration + if not config: + + if module.check_mode: + return True, {} + + try: + response = client.create_configuration( + Name=module.params.get("name"), + Description=module.params.get("description"), + KafkaVersions=module.params.get("kafka_versions"), + ServerProperties=dict_to_prop(module.params.get("config")).encode(), + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "failed to create kafka configuration") + + # update existing configuration (creates new revision) + else: + # it's required because 'config' doesn't contain 'ServerProperties' + response = client.describe_configuration_revision( + Arn=config["Arn"], Revision=config["LatestRevision"]["Revision"] + ) + + # compare configurations (description and properties) and update if required + prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} + if prop_to_dict(response.get("ServerProperties", "")) == prop_module: + if response.get("Description", "") == module.params.get("description"): + return False, response + + if module.check_mode: + return True, {} + + try: + response = client.update_configuration( + Arn=config["Arn"], + Description=module.params.get("description"), + ServerProperties=dict_to_prop(module.params.get("config")).encode(), + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "failed to update kafka configuration") + + arn = response["Arn"] + revision = response["LatestRevision"]["Revision"] + + result = client.describe_configuration_revision(Arn=arn, Revision=revision) + + return True, result + + +def delete_config(client, module): + """delete configuration""" + + config = find_active_config(client, module) + + if module.check_mode: + if config: + return True, config + else: + return False, {} + + if config: + try: + response = client.delete_configuration(Arn=config["Arn"]) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "failed to delete the kafka configuration") + return True, response + + return False, {} + + +def main(): + + module_args = dict( + name=dict(type="str", required=True), + description=dict(type="str", default=""), + state=dict(choices=["present", "absent"], default="present"), + config=dict(type="dict", aliases=["configuration"], default={}), + kafka_versions=dict(type="list", elements="str"), + ) + + module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True) + + if not module.botocore_at_least(BOTOCORE_MIN_VERSION): + module.fail_json( + msg="aws_msk_config module requires botocore >= {0}".format( + BOTOCORE_MIN_VERSION + ) + ) + + client = module.client("kafka") + + if module.params["state"] == "present": + changed, response = create_config(client, module) + + elif module.params["state"] == "absent": + changed, response = delete_config(client, module) + + # return some useless staff in check mode if configuration doesn't exists + # can be useful when these options are referenced by other modules during check mode run + if module.check_mode and not response.get("Arn"): + arn = "arn:aws:kafka:region:account:configuration/name/id" + revision = 1 + server_properties = "" + else: + arn = response.get("Arn") + revision = response.get("Revision") + server_properties = response.get("ServerProperties", "") + + module.exit_json( + changed=changed, + arn=arn, + revision=revision, + server_properties=server_properties, + response=camel_dict_to_snake_dict(response), + ) + + +if __name__ == "__main__": + main() From 2056fa9bbeb9e7e194bd134994810cb6c96c5fad Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Sun, 18 Apr 2021 14:15:20 +0300 Subject: [PATCH 192/683] fix: fix for python 2.6 related errors --- aws_msk_config.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index 5b3cbd9e492..1e5f253501b 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -118,9 +118,15 @@ def prop_to_dict(p): """convert properties to dictionary""" if len(p) == 0: return {} - return { - k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n")) - } + r_dict = {} + for s in p.decode().split("\n"): + kv = s.split("=") + r_dict[kv[0].strip()] = kv[1].strip() + return r_dict + # python >= 2.7 is required: + # return { + # k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n")) + # } @AWSRetry.backoff(tries=5, delay=5) @@ -192,7 +198,10 @@ def create_config(client, module): ) # compare configurations (description and properties) and update if required - prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} + # prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} + prop_module = {} + for k, v in module.params.get("config").items(): + prop_module[str(k)] = str(v) if prop_to_dict(response.get("ServerProperties", "")) == prop_module: if response.get("Description", "") == module.params.get("description"): return False, response From e536f529c81b7e1a3a7498614617cef447a58201 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 21:58:56 +0300 Subject: [PATCH 193/683] chore(doc): formatting Co-authored-by: Mark Chappell --- aws_msk_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index 1e5f253501b..259517395ad 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -21,7 +21,7 @@ - Daniil Kupchenko (@oukooveu) options: state: - description: Create (present) or delete (absent) cluster configuration. + description: Create (C(present)) or delete (C(absent)) cluster configuration. choices: ['present', 'absent'] default: 'present' type: str From 441bef9a51b104f0314d6606216704357d116db3 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 22:02:21 +0300 Subject: [PATCH 194/683] fix: misprint in config sample Co-authored-by: Mark Chappell --- aws_msk_config.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index 259517395ad..2fbb66262d2 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -57,10 +57,10 @@ - 2.6.0 - 2.6.1 config: - auto.create.topics.enable=false - num.partitions=1 - default.replication.factor=3 - zookeeper.session.timeout.ms=18000 + auto.create.topics.enable: false + num.partitions: 1 + default.replication.factor: 3 + zookeeper.session.timeout.ms: 18000 - aws_msk_config: name: kafka-cluster-configuration From e9761e59b2070708d2bc52787a7a1437b8d45940 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 22:08:17 +0300 Subject: [PATCH 195/683] chore: jittered_backoff instead of backoff --- aws_msk_cluster.py | 4 ++-- aws_msk_config.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index f4d9a5db84f..cfecbc9b16a 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -281,13 +281,13 @@ ) -@AWSRetry.backoff(tries=5, delay=5) +@AWSRetry.jittered_backoff(retries=5, delay=5) def list_clusters_with_backoff(client, cluster_name): paginator = client.get_paginator("list_clusters") return paginator.paginate(ClusterNameFilter=cluster_name).build_full_result() -@AWSRetry.backoff(tries=5, delay=5) +@AWSRetry.jittered_backoff(retries=5, delay=5) def list_nodes_with_backoff(client, cluster_arn): paginator = client.get_paginator("list_nodes") return paginator.paginate(ClusterArn=cluster_arn).build_full_result() diff --git a/aws_msk_config.py b/aws_msk_config.py index 2fbb66262d2..e2ce939268b 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -129,7 +129,7 @@ def prop_to_dict(p): # } -@AWSRetry.backoff(tries=5, delay=5) +@AWSRetry.jittered_backoff(retries=5, delay=5) def get_configurations_with_backoff(client): paginator = client.get_paginator("list_configurations") return paginator.paginate().build_full_result() From c129ade169a0375b7ae3016280a79aaca1004d20 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 22:11:44 +0300 Subject: [PATCH 196/683] chore: retry decorator for client invocation --- aws_msk_cluster.py | 2 +- aws_msk_config.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index cfecbc9b16a..a94405fb64d 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -776,7 +776,7 @@ def main(): supports_check_mode=True ) - client = module.client("kafka") + client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": if len(module.params["subnets"]) < 2: diff --git a/aws_msk_config.py b/aws_msk_config.py index e2ce939268b..5952a970f8c 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -272,7 +272,7 @@ def main(): ) ) - client = module.client("kafka") + client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": changed, response = create_config(client, module) From ff4b0f883c677af12519dd4b0b73cda6a26cbf6f Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 22:20:03 +0300 Subject: [PATCH 197/683] chore: aws_retry for aws calls --- aws_msk_cluster.py | 16 ++++++++-------- aws_msk_config.py | 8 +++++--- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index a94405fb64d..d47ce2145ae 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -304,7 +304,7 @@ def find_cluster_by_name(client, module, cluster_name): def get_cluster_state(client, module, arn): try: - response = client.describe_cluster(ClusterArn=arn) + response = client.describe_cluster(ClusterArn=arn, aws_retry=True) except client.exceptions.NotFoundException: return "DELETED" except ( @@ -317,7 +317,7 @@ def get_cluster_state(client, module, arn): def get_cluster_version(client, module, arn): try: - response = client.describe_cluster(ClusterArn=arn) + response = client.describe_cluster(ClusterArn=arn, aws_retry=True) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, @@ -481,7 +481,7 @@ def create_or_update_cluster(client, module): create_params = prepare_create_options(module) try: - response = client.create_cluster(**create_params) + response = client.create_cluster(**create_params, aws_retry=True) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, @@ -627,7 +627,7 @@ def update_cluster_tags(client, module, arn): purge_tags = module.params.get('purge_tags') try: - existing_tags = client.list_tags_for_resource(ResourceArn=arn)['Tags'] + existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)['Tags'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) @@ -636,9 +636,9 @@ def update_cluster_tags(client, module, arn): if not module.check_mode: try: if tags_to_remove: - client.untag_resource(ResourceArn=arn, TagKeys=tags_to_remove) + client.untag_resource(ResourceArn=arn, TagKeys=tags_to_remove, aws_retry=True) if tags_to_add: - client.tag_resource(ResourceArn=arn, Tags=tags_to_add) + client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn)) @@ -795,11 +795,11 @@ def main(): bootstrap_broker_string = {} if response.get("ClusterArn") and module.params["state"] == "present": try: - cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"])[ + cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)[ "ClusterInfo" ] if cluster_info.get("State") == "ACTIVE": - brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"]) + brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True) if brokers.get("BootstrapBrokerString"): bootstrap_broker_string["plain"] = brokers["BootstrapBrokerString"] if brokers.get("BootstrapBrokerStringTls"): diff --git a/aws_msk_config.py b/aws_msk_config.py index 5952a970f8c..5cbe5727c41 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -183,6 +183,7 @@ def create_config(client, module): Description=module.params.get("description"), KafkaVersions=module.params.get("kafka_versions"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), + aws_retry=True ) except ( botocore.exceptions.BotoCoreError, @@ -194,7 +195,7 @@ def create_config(client, module): else: # it's required because 'config' doesn't contain 'ServerProperties' response = client.describe_configuration_revision( - Arn=config["Arn"], Revision=config["LatestRevision"]["Revision"] + Arn=config["Arn"], Revision=config["LatestRevision"]["Revision"], aws_retry=True ) # compare configurations (description and properties) and update if required @@ -214,6 +215,7 @@ def create_config(client, module): Arn=config["Arn"], Description=module.params.get("description"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), + aws_retry=True ) except ( botocore.exceptions.BotoCoreError, @@ -224,7 +226,7 @@ def create_config(client, module): arn = response["Arn"] revision = response["LatestRevision"]["Revision"] - result = client.describe_configuration_revision(Arn=arn, Revision=revision) + result = client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True) return True, result @@ -242,7 +244,7 @@ def delete_config(client, module): if config: try: - response = client.delete_configuration(Arn=config["Arn"]) + response = client.delete_configuration(Arn=config["Arn"], aws_retry=True) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, From 6173956dfd14cc6082464eefd9eb10ea7c432074 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 22:37:13 +0300 Subject: [PATCH 198/683] fix: missing try/except for api call --- aws_msk_config.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index 5cbe5727c41..8c9da121287 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -166,6 +166,16 @@ def find_active_config(client, module): return None +def get_configuration_revision(client, module, arn, revision): + try: + return client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "failed to describe kafka configuration revision") + + def create_config(client, module): """create new or update existing configuration""" @@ -194,9 +204,7 @@ def create_config(client, module): # update existing configuration (creates new revision) else: # it's required because 'config' doesn't contain 'ServerProperties' - response = client.describe_configuration_revision( - Arn=config["Arn"], Revision=config["LatestRevision"]["Revision"], aws_retry=True - ) + response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"]) # compare configurations (description and properties) and update if required # prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} @@ -226,7 +234,7 @@ def create_config(client, module): arn = response["Arn"] revision = response["LatestRevision"]["Revision"] - result = client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True) + result = get_configuration_revision(client, module, arn=arn, revision=revision) return True, result From 6cc00bb5f4e7996b6607df23dba3e2efd5710161 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Mon, 19 Apr 2021 23:07:39 +0300 Subject: [PATCH 199/683] chore: dedicated function for changes comparison --- aws_msk_config.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index 8c9da121287..aa4d425f73c 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -176,6 +176,21 @@ def get_configuration_revision(client, module, arn, revision): module.fail_json_aws(e, "failed to describe kafka configuration revision") +def is_configuration_changed(module, current): + """ + compare configuration's description and properties + python 2.7+ version: + prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} + """ + prop_module = {} + for k, v in module.params.get("config").items(): + prop_module[str(k)] = str(v) + if prop_to_dict(current.get("ServerProperties", "")) == prop_module: + if current.get("Description", "") == module.params.get("description"): + return False + return True + + def create_config(client, module): """create new or update existing configuration""" @@ -206,14 +221,8 @@ def create_config(client, module): # it's required because 'config' doesn't contain 'ServerProperties' response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"]) - # compare configurations (description and properties) and update if required - # prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} - prop_module = {} - for k, v in module.params.get("config").items(): - prop_module[str(k)] = str(v) - if prop_to_dict(response.get("ServerProperties", "")) == prop_module: - if response.get("Description", "") == module.params.get("description"): - return False, response + if not is_configuration_changed(module, response): + return False, response if module.check_mode: return True, {} From f8cc8e16565c45121ba39e0b36f23428324744ae Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Tue, 20 Apr 2021 00:56:32 +0300 Subject: [PATCH 200/683] fix: parameters order --- aws_msk_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index d47ce2145ae..6efdde656c4 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -481,7 +481,7 @@ def create_or_update_cluster(client, module): create_params = prepare_create_options(module) try: - response = client.create_cluster(**create_params, aws_retry=True) + response = client.create_cluster(aws_retry=True, **create_params) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, From 9729410ab87940d6ebaab0dd1f478e6e03201235 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Wed, 12 May 2021 10:51:22 +0300 Subject: [PATCH 201/683] chore: bump version_added --- aws_msk_cluster.py | 2 +- aws_msk_config.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 6efdde656c4..0ce40761831 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -11,7 +11,7 @@ --- module: aws_msk_cluster short_description: Manage Amazon MSK clusters. -version_added: "1.5.0" +version_added: "2.0.0" requirements: - botocore >= 1.17.42 - boto3 >= 1.17.9 diff --git a/aws_msk_config.py b/aws_msk_config.py index aa4d425f73c..c02769152a5 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -11,7 +11,7 @@ --- module: aws_msk_config short_description: Manage Amazon MSK cluster configurations. -version_added: "1.5.0" +version_added: "2.0.0" requirements: - botocore >= 1.17.42 - boto3 >= 1.17.9 From f6e212e85e45537d8eb7a7a7f1b66a10e618e154 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Tue, 22 Jun 2021 14:24:46 +0300 Subject: [PATCH 202/683] chore(doc): highlighting in description Co-authored-by: Mark Chappell --- aws_msk_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 0ce40761831..acc6d0f8c26 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -21,7 +21,7 @@ - Daniil Kupchenko (@oukooveu) options: state: - description: Create (present) or delete (absent) cluster. + description: Create (C(present)) or delete (C(absent)) cluster. choices: ['present', 'absent'] type: str default: 'present' From 2d84824a3bfe7f96458135498c5d3fd515386641 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Tue, 22 Jun 2021 14:25:59 +0300 Subject: [PATCH 203/683] chore(doc): dedicated note about us-east-1e zone Co-authored-by: Mark Chappell --- aws_msk_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index acc6d0f8c26..e092db97ac8 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -70,7 +70,7 @@ - The list of subnets to connect to in the client virtual private cloud (VPC). AWS creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data. - Client subnets can't be in Availability Zone us-east-1e. + - Client subnets can't be in Availability Zone us-east-1e. - This parameter is required when I(state=present). type: list elements: str From eff509d7c52858606b178bd55de60ff1171c52e6 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Tue, 22 Jun 2021 14:26:32 +0300 Subject: [PATCH 204/683] chore(doc): reformatting Co-authored-by: Mark Chappell --- aws_msk_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index e092db97ac8..e5e62bf4b5a 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -198,7 +198,7 @@ type: bool default: false wait_timeout: - description: How long to wait, seconds. Cluster creation can take up to 20-30 minutes. + description: How many seconds to wait. Cluster creation can take up to 20-30 minutes. type: int default: 3600 tags: From 77f587105056174a48a68a13150eec5835bb1898 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Tue, 22 Jun 2021 14:27:23 +0300 Subject: [PATCH 205/683] chore(doc): wait_for_cluster_state explanation Co-authored-by: Mark Chappell --- aws_msk_cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index e5e62bf4b5a..81aae367b2a 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -327,6 +327,7 @@ def get_cluster_version(client, module, arn): def wait_for_cluster_state(client, module, arn, state="ACTIVE"): + # As of 2021-06 boto3 doesn't offer any built in waiters start = time.time() timeout = int(module.params.get("wait_timeout")) check_interval = 60 From 8822a593266c8486f5322a64e1c49178bc5e94a1 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Tue, 22 Jun 2021 14:32:01 +0300 Subject: [PATCH 206/683] chore: catch exception for find_cluster_by_name --- aws_msk_cluster.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 81aae367b2a..3b6794e689d 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -294,7 +294,13 @@ def list_nodes_with_backoff(client, cluster_arn): def find_cluster_by_name(client, module, cluster_name): - cluster_list = list_clusters_with_backoff(client, cluster_name).get("ClusterInfoList", []) + try: + cluster_list = list_clusters_with_backoff(client, cluster_name).get("ClusterInfoList", []) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws(e, "Failed to find kafka cluster by name") if cluster_list: if len(cluster_list) != 1: module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name)) From 669e36cf8893f85f2ca58574c8f34e535a746235 Mon Sep 17 00:00:00 2001 From: Daniil Kupchenko Date: Sun, 27 Jun 2021 00:51:25 +0300 Subject: [PATCH 207/683] fix: cluster name should not exceed 64 characters limit --- aws_msk_cluster.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 3b6794e689d..7f85c00a59b 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -794,6 +794,10 @@ def main(): module.fail_json( msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" ) + if len(module.params["name"]) > 64: + module.fail_json( + module.fail_json(msg='Cluster name "{0}" exceeds 64 character limit'.format(module.params["name"])) + ) changed, response = create_or_update_cluster(client, module) elif module.params["state"] == "absent": changed, response = delete_cluster(client, module) From 0038453327a145e6819c777d9ce263e980f9a11f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 6 May 2021 21:01:46 +0200 Subject: [PATCH 208/683] Update the default module requirements from python 2.6/boto to python 3.6/boto3 --- aws_acm.py | 3 --- aws_acm_info.py | 3 --- aws_api_gateway.py | 2 -- aws_application_scaling_policy.py | 2 -- aws_batch_compute_environment.py | 3 --- aws_batch_job_definition.py | 2 -- aws_batch_job_queue.py | 2 -- aws_codebuild.py | 1 - aws_codecommit.py | 6 ------ aws_codepipeline.py | 1 - aws_config_aggregation_authorization.py | 1 - aws_config_aggregator.py | 1 - aws_config_delivery_channel.py | 1 - aws_config_recorder.py | 1 - aws_config_rule.py | 1 - aws_direct_connect_confirm_connection.py | 4 ---- aws_direct_connect_connection.py | 4 ---- aws_direct_connect_gateway.py | 2 -- aws_direct_connect_link_aggregation_group.py | 4 ---- aws_direct_connect_virtual_interface.py | 3 --- aws_eks_cluster.py | 2 -- aws_glue_connection.py | 1 - aws_glue_job.py | 1 - aws_inspector_target.py | 4 ---- aws_region_info.py | 2 -- aws_s3_bucket_info.py | 3 --- aws_secret.py | 1 - aws_ses_identity.py | 1 - aws_ses_identity_policy.py | 1 - aws_ses_rule_set.py | 1 - aws_sgw_info.py | 1 - aws_ssm_parameter_store.py | 2 -- aws_waf_info.py | 1 - cloudformation_exports_info.py | 1 - cloudformation_stack_set.py | 2 -- cloudfront_distribution.py | 5 ----- cloudfront_info.py | 3 --- cloudfront_invalidation.py | 5 ----- cloudfront_origin_access_identity.py | 5 ----- cloudtrail.py | 3 --- cloudwatchevent_rule.py | 3 --- cloudwatchlogs_log_group.py | 1 - cloudwatchlogs_log_group_info.py | 1 - cloudwatchlogs_log_group_metric_filter.py | 3 --- data_pipeline.py | 1 - dynamodb_table.py | 6 ++++-- dynamodb_ttl.py | 5 +---- ec2_ami_copy.py | 3 --- ec2_asg.py | 1 - ec2_asg_info.py | 1 - ec2_asg_lifecycle_hook.py | 2 -- ec2_customer_gateway.py | 1 - ec2_customer_gateway_info.py | 1 - ec2_elb.py | 4 +++- ec2_elb_info.py | 4 +++- ec2_launch_template.py | 3 --- ec2_lc.py | 4 ---- ec2_lc_find.py | 3 --- ec2_lc_info.py | 1 - ec2_snapshot_copy.py | 3 --- ec2_transit_gateway.py | 1 - ec2_transit_gateway_info.py | 3 --- ec2_vpc_endpoint.py | 1 - ec2_vpc_endpoint_info.py | 1 - ec2_vpc_endpoint_service_info.py | 1 - ec2_vpc_igw.py | 4 ---- ec2_vpc_igw_info.py | 1 - ec2_vpc_nacl.py | 2 -- ec2_vpc_nacl_info.py | 1 - ec2_vpc_nat_gateway.py | 1 - ec2_vpc_nat_gateway_info.py | 1 - ec2_vpc_peer.py | 2 -- ec2_vpc_peering_info.py | 1 - ec2_vpc_vgw.py | 1 - ec2_vpc_vgw_info.py | 1 - ec2_vpc_vpn.py | 2 -- ec2_vpc_vpn_info.py | 1 - ec2_win_password.py | 6 +++--- ecs_attribute.py | 1 - ecs_cluster.py | 1 - ecs_ecr.py | 1 - ecs_service.py | 2 -- ecs_service_info.py | 1 - ecs_tag.py | 1 - ecs_task.py | 1 - ecs_taskdefinition.py | 1 - ecs_taskdefinition_info.py | 1 - efs.py | 1 - efs_info.py | 1 - elasticache.py | 1 - elasticache_parameter_group.py | 1 - elasticache_snapshot.py | 2 -- elasticache_subnet_group.py | 4 +++- elb_application_lb.py | 1 - elb_application_lb_info.py | 1 - elb_classic_lb.py | 3 +++ elb_classic_lb_info.py | 4 ---- elb_instance.py | 4 +++- elb_network_lb.py | 1 - elb_target_group.py | 1 - elb_target_group_info.py | 1 - elb_target_info.py | 4 ---- execute_lambda.py | 3 --- iam.py | 4 +++- iam_cert.py | 5 +++-- iam_group.py | 1 - iam_managed_policy.py | 4 ---- iam_mfa_device_info.py | 4 ---- iam_password_policy.py | 1 - iam_role.py | 1 - iam_role_info.py | 1 - iam_saml_federation.py | 2 -- iam_server_certificate_info.py | 1 - iam_user.py | 1 - iam_user_info.py | 3 --- kinesis_stream.py | 1 - lambda.py | 1 - lambda_alias.py | 2 -- lambda_event.py | 2 -- lambda_facts.py | 2 -- lambda_info.py | 2 -- lambda_policy.py | 2 -- lightsail.py | 3 --- rds.py | 7 +++---- rds_instance.py | 4 ---- rds_instance_info.py | 3 --- rds_param_group.py | 1 - rds_snapshot.py | 3 --- rds_snapshot_info.py | 3 --- redshift.py | 1 - redshift_cross_region_snapshots.py | 1 - redshift_info.py | 1 - redshift_subnet_group.py | 5 +++-- route53.py | 1 - route53_health_check.py | 4 +++- route53_zone.py | 1 - s3_bucket_notification.py | 2 -- s3_sync.py | 5 ----- s3_website.py | 1 - sns.py | 4 ---- sns_topic.py | 2 -- sqs_queue.py | 2 -- sts_assume_role.py | 5 ----- sts_session_token.py | 5 ----- wafv2_ip_set.py | 3 --- wafv2_ip_set_info.py | 3 --- wafv2_resources.py | 3 --- wafv2_resources_info.py | 3 --- wafv2_rule_group.py | 3 --- wafv2_rule_group_info.py | 3 --- wafv2_web_acl.py | 3 --- wafv2_web_acl_info.py | 3 --- 152 files changed, 38 insertions(+), 307 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index 4f17e83e0be..65c95212170 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -145,14 +145,11 @@ choices: [present, absent] default: present type: str -requirements: - - boto3 author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = ''' diff --git a/aws_acm_info.py b/aws_acm_info.py index 97d9a879152..f0b77b8958f 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -39,14 +39,11 @@ description: - Filter results to show only certificates with tags that match all the tags specified here. type: dict -requirements: - - boto3 author: - Will Thames (@willthames) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = r''' diff --git a/aws_api_gateway.py b/aws_api_gateway.py index ccf7c097b57..9eadf88d48b 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -26,7 +26,6 @@ - swagger_file and swagger_text are passed directly on to AWS transparently whilst swagger_dict is an ansible dict which is converted to JSON before the API definitions are uploaded. -requirements: [ boto3 ] options: api_id: description: @@ -114,7 +113,6 @@ ID so that an API can be created only once. - As an early work around an intermediate version will probably do the same using a tag embedded in the API name. - ''' EXAMPLES = ''' diff --git a/aws_application_scaling_policy.py b/aws_application_scaling_policy.py index dcc8b8b1691..5c8ac9b24ad 100644 --- a/aws_application_scaling_policy.py +++ b/aws_application_scaling_policy.py @@ -19,7 +19,6 @@ author: - Gustavo Maia (@gurumaia) - Chen Leibovich (@chenl87) -requirements: [ json, botocore, boto3 ] options: state: description: Whether a policy should be C(present) or C(absent). @@ -105,7 +104,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = ''' diff --git a/aws_batch_compute_environment.py b/aws_batch_compute_environment.py index 68044a8d11e..86a971ea0e6 100644 --- a/aws_batch_compute_environment.py +++ b/aws_batch_compute_environment.py @@ -118,9 +118,6 @@ description: - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. type: str - -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 18d0429a831..4beb1ab2c26 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -169,8 +169,6 @@ attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that many times. type: int -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/aws_batch_job_queue.py b/aws_batch_job_queue.py index b472371eb84..7091c0756b3 100644 --- a/aws_batch_job_queue.py +++ b/aws_batch_job_queue.py @@ -59,8 +59,6 @@ compute_environment: type: str description: The name of the compute environment. -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/aws_codebuild.py b/aws_codebuild.py index e56b1a566b0..9462d180e78 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -18,7 +18,6 @@ - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code. author: - Stefan Horning (@stefanhorning) -requirements: [ botocore, boto3 ] options: name: description: diff --git a/aws_codecommit.py b/aws_codecommit.py index 18fc10a2d69..090d0fd06f8 100644 --- a/aws_codecommit.py +++ b/aws_codecommit.py @@ -16,12 +16,6 @@ - Supports creation and deletion of CodeCommit repositories. - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit. author: Shuang Wang (@ptux) - -requirements: - - botocore - - boto3 - - python >= 2.6 - options: name: description: diff --git a/aws_codepipeline.py b/aws_codepipeline.py index 101ccaee4df..e7f65705269 100644 --- a/aws_codepipeline.py +++ b/aws_codepipeline.py @@ -18,7 +18,6 @@ - Create or delete a CodePipeline on AWS. author: - Stefan Horning (@stefanhorning) -requirements: [ botocore, boto3 ] options: name: description: diff --git a/aws_config_aggregation_authorization.py b/aws_config_aggregation_authorization.py index e0f4af6f5b4..f3a8591c697 100644 --- a/aws_config_aggregation_authorization.py +++ b/aws_config_aggregation_authorization.py @@ -14,7 +14,6 @@ short_description: Manage cross-account AWS Config authorizations description: - Module manages AWS Config resources. -requirements: [ 'botocore', 'boto3' ] author: - "Aaron Smith (@slapula)" options: diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 16f6ff5152a..393413c07b9 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -14,7 +14,6 @@ short_description: Manage AWS Config aggregations across multiple accounts description: - Module manages AWS Config resources -requirements: [ 'botocore', 'boto3' ] author: - "Aaron Smith (@slapula)" options: diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index 117afe80c0b..e6e9d40e62c 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -14,7 +14,6 @@ short_description: Manage AWS Config delivery channels description: - This module manages AWS Config delivery locations for rule checks and configuration info. -requirements: [ 'botocore', 'boto3' ] author: - "Aaron Smith (@slapula)" options: diff --git a/aws_config_recorder.py b/aws_config_recorder.py index e740241c082..e9c2cbc17ad 100644 --- a/aws_config_recorder.py +++ b/aws_config_recorder.py @@ -14,7 +14,6 @@ short_description: Manage AWS Config Recorders description: - Module manages AWS Config configuration recorder settings. -requirements: [ 'botocore', 'boto3' ] author: - "Aaron Smith (@slapula)" options: diff --git a/aws_config_rule.py b/aws_config_rule.py index 0beae0b63a8..ed4de6ab7e2 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -14,7 +14,6 @@ short_description: Manage AWS Config resources description: - Module manages AWS Config rules -requirements: [ 'botocore', 'boto3' ] author: - "Aaron Smith (@slapula)" options: diff --git a/aws_direct_connect_confirm_connection.py b/aws_direct_connect_confirm_connection.py index 642c9c306ca..7ea8527db72 100644 --- a/aws_direct_connect_confirm_connection.py +++ b/aws_direct_connect_confirm_connection.py @@ -21,10 +21,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore options: name: description: diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index e2ea2d5e232..98afd701f3d 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -19,10 +19,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore options: state: description: diff --git a/aws_direct_connect_gateway.py b/aws_direct_connect_gateway.py index e1e6ae093f5..54c336fffbe 100644 --- a/aws_direct_connect_gateway.py +++ b/aws_direct_connect_gateway.py @@ -19,8 +19,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ boto3 ] options: state: description: diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 65294317b01..7b287bd61f3 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -17,10 +17,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore options: state: description: diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index eb4906cc730..d520f0ee84f 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -14,9 +14,6 @@ description: - Create, delete, or modify a Direct Connect public or private virtual interface. author: "Sloane Hertel (@s-hertel)" -requirements: - - boto3 - - botocore options: state: description: diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index d6df16093f3..3d8f2696d5f 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -54,8 +54,6 @@ to 1200 seconds (20 minutes). default: 1200 type: int - -requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/aws_glue_connection.py b/aws_glue_connection.py index b279509be18..07bdddd92ab 100644 --- a/aws_glue_connection.py +++ b/aws_glue_connection.py @@ -13,7 +13,6 @@ short_description: Manage an AWS Glue connection description: - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details. -requirements: [ boto3 ] author: "Rob White (@wimnat)" options: availability_zone: diff --git a/aws_glue_job.py b/aws_glue_job.py index d1f249aaefc..dac91ecc794 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -13,7 +13,6 @@ short_description: Manage an AWS Glue job description: - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. -requirements: [ boto3 ] author: - "Rob White (@wimnat)" - "Vijayanand Sharma (@vijayanandsharma)" diff --git a/aws_inspector_target.py b/aws_inspector_target.py index b71fbf61c0d..ceb4abd63dd 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -38,10 +38,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore ''' EXAMPLES = ''' diff --git a/aws_region_info.py b/aws_region_info.py index bedb8a5f1fa..67b71d6f868 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -28,8 +28,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [botocore, boto3] ''' EXAMPLES = ''' diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 05d92310013..06885dfcd13 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -14,9 +14,6 @@ version_added: 1.0.0 author: "Gerben Geijteman (@hyperized)" short_description: lists S3 buckets in AWS -requirements: - - boto3 >= 1.4.4 - - python >= 2.6 description: - Lists S3 buckets and details about those buckets. - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts). diff --git a/aws_secret.py b/aws_secret.py index 22141ce24a6..86c6d6e3521 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -15,7 +15,6 @@ description: - Create, update, and delete secrets stored in AWS Secrets Manager. author: "REY Remi (@rrey)" -requirements: [ 'botocore>=1.10.0', 'boto3' ] options: name: description: diff --git a/aws_ses_identity.py b/aws_ses_identity.py index d3c88156114..caa250c220c 100644 --- a/aws_ses_identity.py +++ b/aws_ses_identity.py @@ -85,7 +85,6 @@ - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics. type: 'bool' default: True -requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/aws_ses_identity_policy.py b/aws_ses_identity_policy.py index bb743c6b14e..e7f9e9417e9 100644 --- a/aws_ses_identity_policy.py +++ b/aws_ses_identity_policy.py @@ -36,7 +36,6 @@ default: present choices: [ 'present', 'absent' ] type: str -requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index 0996497c5ca..9b0b66cc30f 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -16,7 +16,6 @@ author: - "Ben Tomasik (@tomislacker)" - "Ed Costello (@orthanc)" -requirements: [ boto3, botocore ] options: name: description: diff --git a/aws_sgw_info.py b/aws_sgw_info.py index adf7dde86aa..fac2e346095 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -16,7 +16,6 @@ description: - Fetch AWS Storage Gateway information - This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: Loic Blot (@nerzhul) options: gather_local_disks: diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 3d49c133048..856f7eec8a7 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -83,8 +83,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ botocore, boto3 ] ''' EXAMPLES = ''' diff --git a/aws_waf_info.py b/aws_waf_info.py index 98840668656..9a1015d6858 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -13,7 +13,6 @@ description: - Retrieve information for WAF ACLs, Rule , Conditions and Filters. - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] options: name: description: diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 8eab5325be3..e9ef34a20e7 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Module retrieves a value from CloudFormation Exports -requirements: ['boto3 >= 1.11.15'] author: - "Michael Moyle (@mmoyle)" extends_documentation_fragment: diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index ac7b57bfe9d..72b6aa05bef 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -172,8 +172,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ boto3>=1.6, botocore>=1.10.26 ] ''' EXAMPLES = r''' diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index e62fc89bfa4..075a106246d 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -17,11 +17,6 @@ description: - Allows for easy creation, updating and deletion of CloudFront distributions. -requirements: - - boto3 >= 1.0.0 - - python >= 2.6 - - author: - Willem van Ketwich (@wilvk) - Will Thames (@willthames) diff --git a/cloudfront_info.py b/cloudfront_info.py index 2b0edcaf841..e5cf39ebb4b 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -15,9 +15,6 @@ - Gets information about an AWS CloudFront distribution. - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts). Note that the M(community.aws.cloudfront_info) module no longer returns C(ansible_facts)! -requirements: - - boto3 >= 1.0.0 - - python >= 2.6 author: Willem van Ketwich (@wilvk) options: distribution_id: diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 974358f3967..4fb602f7a77 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -16,11 +16,6 @@ description: - Allows for invalidation of a batch of paths for a CloudFront distribution. -requirements: - - boto3 >= 1.0.0 - - python >= 2.6 - - author: Willem van Ketwich (@wilvk) extends_documentation_fragment: diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 7370f98625c..9fc83f64820 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -19,11 +19,6 @@ - Allows for easy creation, updating and deletion of origin access identities. -requirements: - - boto3 >= 1.0.0 - - python >= 2.6 - - author: Willem van Ketwich (@wilvk) extends_documentation_fragment: diff --git a/cloudtrail.py b/cloudtrail.py index a2f2076993f..d30466710eb 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -17,9 +17,6 @@ - Ansible Core Team - Ted Timmons (@tedder) - Daniel Shepherd (@shepdelacreme) -requirements: - - boto3 - - botocore options: state: description: diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 8de7dc7d291..e7a200dd960 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -18,9 +18,6 @@ - amazon.aws.ec2 author: "Jim Dalton (@jsdalton) " -requirements: - - python >= 2.6 - - boto3 notes: - A rule must contain at least an I(event_pattern) or I(schedule_expression). A rule can have both an I(event_pattern) and a I(schedule_expression), in which diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index a5e9ab3192d..295ff48e669 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -18,7 +18,6 @@ - Create or delete log_group in CloudWatchLogs. author: - Willian Ricardo (@willricardo) -requirements: [ json, botocore, boto3 ] options: state: description: diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index a7f311826e9..c53e501717f 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -17,7 +17,6 @@ - This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change. author: - Willian Ricardo (@willricardo) -requirements: [ botocore, boto3 ] options: log_group_name: description: diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py index 04d0219e48b..26cbc65ce34 100644 --- a/cloudwatchlogs_log_group_metric_filter.py +++ b/cloudwatchlogs_log_group_metric_filter.py @@ -15,9 +15,6 @@ description: - Create, modify and delete CloudWatch log group metric filter. - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). -requirements: - - boto3 - - botocore options: state: description: diff --git a/data_pipeline.py b/data_pipeline.py index a3821a068d8..d10e7989f32 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -14,7 +14,6 @@ author: - Raghu Udiyar (@raags) - Sloane Hertel (@s-hertel) -requirements: [ "boto3" ] short_description: Create and manage AWS Datapipelines extends_documentation_fragment: - amazon.aws.aws diff --git a/dynamodb_table.py b/dynamodb_table.py index 35d9cd4d64a..db9710f12d6 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -17,8 +17,10 @@ - Returns the status of the specified table. author: Alan Loi (@loia) requirements: - - "boto >= 2.37.0" - - "boto3 >= 1.4.4 (for tagging)" +- python >= 3.6 +- boto >= 2.49.0 +- boto3 >= 1.13.0 +- botocore >= 1.16.0 options: state: description: diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 95bf95ffe78..490a948f9a9 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -12,8 +12,7 @@ version_added: 1.0.0 short_description: Set TTL for a given DynamoDB table description: -- Uses boto3 to set TTL. -- Requires botocore version 1.5.24 or higher. +- Sets the TTL for a given DynamoDB table. options: state: description: @@ -37,8 +36,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ botocore>=1.5.24, boto3 ] ''' EXAMPLES = ''' diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index a3a23454b28..15acfe4e4a9 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -72,9 +72,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 ''' EXAMPLES = ''' diff --git a/ec2_asg.py b/ec2_asg.py index 68a0f9ec77c..59e74040d64 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -15,7 +15,6 @@ - Can create or delete AWS AutoScaling Groups. - Can be used with the M(community.aws.ec2_lc) module to manage Launch Configurations. author: "Gareth Rushgrove (@garethr)" -requirements: [ "boto3", "botocore" ] options: state: description: diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 3c809e069be..0a6cb27d9b0 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -14,7 +14,6 @@ description: - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS - This module was called C(ec2_asg_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: "Rob White (@wimnat)" options: name: diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index bab1ef37f32..fbdc4a3150d 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -74,8 +74,6 @@ - amazon.aws.aws - amazon.aws.ec2 -requirements: [ boto3>=1.4.4 ] - ''' EXAMPLES = ''' diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 5343b316a4f..9c00783a58a 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -14,7 +14,6 @@ description: - Manage an AWS customer gateway. author: Michael Baydoun (@MichaelBaydoun) -requirements: [ botocore, boto3 ] notes: - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 12c6320e6a8..ab8ac41e505 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -14,7 +14,6 @@ description: - Gather information about customer gateways in AWS. - This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: Madhura Naniwadekar (@Madhura-CSI) options: filters: diff --git a/ec2_elb.py b/ec2_elb.py index f2c124e6e00..1ac6e314b92 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -53,10 +53,12 @@ If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. default: 0 type: int +requirements: +- python >= 2.6 +- boto >= 2.49.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - ''' EXAMPLES = r""" diff --git a/ec2_elb_info.py b/ec2_elb_info.py index b18e502de34..5eb46ee3fa6 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -37,7 +37,9 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = r''' diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 691dc0306b0..cebae8b2fec 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -16,9 +16,6 @@ - The M(amazon.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all parameters on those tasks, be passed a Launch Template which contains settings like instance size, disk type, subnet, and more. -requirements: - - botocore - - boto3 >= 1.6.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/ec2_lc.py b/ec2_lc.py index 1ba881dc245..9aaa96538db 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -184,10 +184,6 @@ - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 >= 1.4.4 - ''' EXAMPLES = r''' diff --git a/ec2_lc_find.py b/ec2_lc_find.py index 1db33a20036..6657de27349 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -38,9 +38,6 @@ - How many results to show. - Corresponds to Python slice notation like list[:limit]. type: int -requirements: - - "python >= 2.6" - - boto3 extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws diff --git a/ec2_lc_info.py b/ec2_lc_info.py index 1a51eb580b3..d3b81deaa75 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -16,7 +16,6 @@ - Gather information about AWS Autoscaling Launch Configurations. - This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change. author: "Loïc Latreille (@psykotox)" -requirements: [ boto3 ] options: name: description: diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 2bf1d723b7e..5ad307dd693 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -56,9 +56,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 ''' EXAMPLES = ''' diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index a0595b4b7e1..8435491388a 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -14,7 +14,6 @@ - Creates AWS Transit Gateways. - Deletes AWS Transit Gateways. - Updates tags on existing transit gateways. -requirements: [ 'botocore', 'boto3' ] options: asn: description: diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 707e375a7ee..c23289eaa1c 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -14,9 +14,6 @@ description: - Gather information about ec2 transit gateways in AWS author: "Bob Boldin (@BobBoldin)" -requirements: - - botocore - - boto3 options: transit_gateway_ids: description: diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 2aa4441fac7..62424f93a11 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -14,7 +14,6 @@ - Creates AWS VPC endpoints. - Deletes AWS VPC endpoints. - This module supports check mode. -requirements: [ boto3 ] options: vpc_id: description: diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index d990a908943..fabeb46afe4 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -12,7 +12,6 @@ description: - Gets various details related to AWS VPC endpoints. - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] options: query: description: diff --git a/ec2_vpc_endpoint_service_info.py b/ec2_vpc_endpoint_service_info.py index 2afd0e5e906..8dee0652b84 100644 --- a/ec2_vpc_endpoint_service_info.py +++ b/ec2_vpc_endpoint_service_info.py @@ -11,7 +11,6 @@ version_added: 1.5.0 description: - Gets details related to AWS VPC Endpoint Services. -requirements: [ boto3 ] options: filters: description: diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py index 0d649d2131c..c578df12939 100644 --- a/ec2_vpc_igw.py +++ b/ec2_vpc_igw.py @@ -41,10 +41,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - botocore - - boto3 ''' EXAMPLES = ''' diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py index 17e5cc805ae..00ecac957ab 100644 --- a/ec2_vpc_igw_info.py +++ b/ec2_vpc_igw_info.py @@ -14,7 +14,6 @@ description: - Gather information about internet gateways in AWS. - This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: "Nick Aslanidis (@naslanidis)" options: filters: diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index da053f55a46..04da531a2f8 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -81,8 +81,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ botocore, boto3, json ] ''' EXAMPLES = r''' diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 8c905f67e58..88786bf76ca 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -14,7 +14,6 @@ - Gather information about Network ACLs in an AWS VPC - This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change. author: "Brad Davidson (@brandond)" -requirements: [ boto3 ] options: nacl_ids: description: diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py index 87511fa2582..30a28ca1391 100644 --- a/ec2_vpc_nat_gateway.py +++ b/ec2_vpc_nat_gateway.py @@ -13,7 +13,6 @@ short_description: Manage AWS VPC NAT Gateways. description: - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids. -requirements: [boto3, botocore] options: state: description: diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py index a9337ecd9f8..5acd59a819a 100644 --- a/ec2_vpc_nat_gateway_info.py +++ b/ec2_vpc_nat_gateway_info.py @@ -13,7 +13,6 @@ description: - Gets various details related to AWS VPC Managed Nat Gateways - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] options: nat_gateway_ids: description: diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 4e1e3a1847e..c45a003903c 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -67,8 +67,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ botocore, boto3, json ] ''' EXAMPLES = ''' diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 5f3cb435de3..6b810a25099 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -13,7 +13,6 @@ description: - Gets various details related to AWS VPC Peers - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] options: peer_connection_ids: description: diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 4dd9a2cb456..77ed96696bc 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -15,7 +15,6 @@ - Deletes AWS VPN Virtual Gateways - Attaches Virtual Gateways to VPCs - Detaches Virtual Gateways from VPCs -requirements: [ boto3 ] options: state: description: diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 5a27f9d672f..7cb7b15798e 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -14,7 +14,6 @@ description: - Gather information about virtual gateways in AWS. - This module was called C(ec2_vpc_vgw_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] options: filters: description: diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 56bb4e9b8fd..e69d3f55e82 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -17,8 +17,6 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws - -requirements: ['boto3', 'botocore'] author: "Sloane Hertel (@s-hertel)" options: state: diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index e96583f669e..31fe02621b4 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -14,7 +14,6 @@ description: - Gather information about VPN Connections in AWS. - This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: Madhura Naniwadekar (@Madhura-CSI) options: filters: diff --git a/ec2_win_password.py b/ec2_win_password.py index ed06f705485..32b45edd953 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -53,10 +53,10 @@ - amazon.aws.aws - amazon.aws.ec2 - requirements: - - cryptography - +- cryptography +- python >= 2.6 +- boto >= 2.49.0 notes: - As of Ansible 2.4, this module requires the python cryptography module rather than the older pycrypto module. diff --git a/ecs_attribute.py b/ecs_attribute.py index be9210f3272..7384dfb4692 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -14,7 +14,6 @@ description: - Create, update or delete ECS container instance attributes. author: Andrej Svenke (@anryko) -requirements: [ botocore, boto3 ] options: cluster: description: diff --git a/ecs_cluster.py b/ecs_cluster.py index 87e0476be9b..3074e8914f2 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -17,7 +17,6 @@ description: - Creates or terminates ecs clusters. author: Mark Chance (@Java1Guy) -requirements: [ boto3 ] options: state: description: diff --git a/ecs_ecr.py b/ecs_ecr.py index 768589dbdff..a20262956da 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -16,7 +16,6 @@ short_description: Manage Elastic Container Registry repositories description: - Manage Elastic Container Registry repositories. -requirements: [ boto3 ] options: name: description: diff --git a/ecs_service.py b/ecs_service.py index e498bb6d827..89cfaf486aa 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -22,8 +22,6 @@ - "Darek Kaczynski (@kaczynskid)" - "Stephane Maarek (@simplesteph)" - "Zac Blazic (@zacblazic)" - -requirements: [ json, botocore, boto3 ] options: state: description: diff --git a/ecs_service_info.py b/ecs_service_info.py index 2d64a89e6dd..9b47b02a714 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -18,7 +18,6 @@ author: - "Mark Chance (@Java1Guy)" - "Darek Kaczynski (@kaczynskid)" -requirements: [ json, botocore, boto3 ] options: details: description: diff --git a/ecs_tag.py b/ecs_tag.py index 9e4f97989f8..32915d6e0c5 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -17,7 +17,6 @@ - Resources are referenced by their cluster name. author: - Michael Pechner (@mpechner) -requirements: [ boto3, botocore ] options: cluster_name: description: diff --git a/ecs_task.py b/ecs_task.py index 03295c16eac..411121372cf 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -14,7 +14,6 @@ description: - Creates or deletes instances of task definitions. author: Mark Chance (@Java1Guy) -requirements: [ json, botocore, boto3 ] options: operation: description: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 7803b117891..beb8a0c76fc 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -14,7 +14,6 @@ description: - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS). author: Mark Chance (@Java1Guy) -requirements: [ json, botocore, boto3 ] options: state: description: diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index ef5b20c4602..ad351576dca 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -21,7 +21,6 @@ - Gustavo Maia (@gurumaia) - Mark Chance (@Java1Guy) - Darek Kaczynski (@kaczynskid) -requirements: [ json, botocore, boto3 ] options: task_definition: description: diff --git a/efs.py b/efs.py index f46c3f588c4..49fbd73c9a3 100644 --- a/efs.py +++ b/efs.py @@ -13,7 +13,6 @@ short_description: create and maintain EFS file systems description: - Module allows create, search and destroy Amazon EFS file systems. -requirements: [ boto3 ] author: - "Ryan Sydnor (@ryansydnor)" - "Artem Kazakov (@akazakov)" diff --git a/efs_info.py b/efs_info.py index 62fd583785d..2384af97ee1 100644 --- a/efs_info.py +++ b/efs_info.py @@ -15,7 +15,6 @@ - This module can be used to search Amazon EFS file systems. - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts). Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! -requirements: [ boto3 ] author: - "Ryan Sydnor (@ryansydnor)" options: diff --git a/elasticache.py b/elasticache.py index 53e72f664b4..3b3196dd133 100644 --- a/elasticache.py +++ b/elasticache.py @@ -15,7 +15,6 @@ description: - Manage cache clusters in Amazon ElastiCache. - Returns information about the specified cache cluster. -requirements: [ boto3 ] author: "Jim Dalton (@jsdalton)" options: state: diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index dd5dffbc4e9..4cb553931f0 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -19,7 +19,6 @@ - amazon.aws.aws - amazon.aws.ec2 -requirements: [ boto3, botocore ] options: group_family: description: diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 42920a3c2c4..d9e11345a69 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -18,8 +18,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ boto3, botocore ] options: name: description: diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index ab25e294eeb..2e4b901a2b7 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -38,7 +38,9 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = r''' diff --git a/elb_application_lb.py b/elb_application_lb.py index 284d392891f..4b547ace1c2 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -25,7 +25,6 @@ short_description: Manage an Application Load Balancer description: - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. -requirements: [ boto3 ] author: "Rob White (@wimnat)" options: access_logs_enabled: diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index 3848bc4766b..d04bd0d8261 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -14,7 +14,6 @@ description: - Gather information about application ELBs in AWS - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: Rob White (@wimnat) options: load_balancer_arns: diff --git a/elb_classic_lb.py b/elb_classic_lb.py index cd7d45875d1..63cb53e6657 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -138,6 +138,9 @@ - amazon.aws.aws - amazon.aws.ec2 +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = r""" diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index a1a0c39e042..f57f4dd391c 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -37,10 +37,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - botocore - - boto3 ''' EXAMPLES = r''' diff --git a/elb_instance.py b/elb_instance.py index 97682acb659..801033fc40d 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -60,7 +60,9 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = r""" diff --git a/elb_network_lb.py b/elb_network_lb.py index 5eeb2ec6220..47ac7b1d0d7 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -15,7 +15,6 @@ description: - Manage an AWS Network Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details. -requirements: [ boto3 ] author: "Rob White (@wimnat)" options: cross_zone_load_balancing: diff --git a/elb_target_group.py b/elb_target_group.py index 53a25fa4419..7bb105b6d55 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -15,7 +15,6 @@ - Manage an AWS Elastic Load Balancer target group. See U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details. -requirements: [ boto3 ] author: "Rob White (@wimnat)" options: deregistration_delay_timeout: diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 973743766b1..59bfbbc66ff 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -14,7 +14,6 @@ description: - Gather information about ELB target groups in AWS - This module was called C(elb_target_group_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: Rob White (@wimnat) options: load_balancer_arn: diff --git a/elb_target_info.py b/elb_target_info.py index 924632339de..507380c9717 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -26,10 +26,6 @@ - Whether or not to get target groups not used by any load balancers. type: bool default: true - -requirements: - - boto3 - - botocore extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/execute_lambda.py b/execute_lambda.py index e5e21eacb61..7af644810a8 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -19,9 +19,6 @@ - amazon.aws.ec2 author: "Ryan Scott Brown (@ryansb) " -requirements: - - python >= 2.6 - - boto3 notes: - Async invocation will always return an empty C(output) key. - Synchronous invocation may result in a function timeout, resulting in an diff --git a/iam.py b/iam.py index e65824a4862..fc5d3b67248 100644 --- a/iam.py +++ b/iam.py @@ -97,7 +97,9 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = r''' diff --git a/iam_cert.py b/iam_cert.py index 96c9bccae7c..507b7e6f79f 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -75,12 +75,13 @@ - Defaults to C(false). type: bool -requirements: [ "boto" ] author: Jonathan I. Davila (@defionscode) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = ''' diff --git a/iam_group.py b/iam_group.py index af9d781a92f..7b534aa0504 100644 --- a/iam_group.py +++ b/iam_group.py @@ -67,7 +67,6 @@ required: false default: false type: bool -requirements: [ botocore, boto3 ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/iam_managed_policy.py b/iam_managed_policy.py index f0fa588c44e..a56e76d037f 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -53,10 +53,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore ''' EXAMPLES = ''' diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index e86687134a9..b04b912549c 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -23,10 +23,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore ''' RETURN = """ diff --git a/iam_password_policy.py b/iam_password_policy.py index 852deb0d10b..51291092b0b 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -14,7 +14,6 @@ short_description: Update an IAM Password Policy description: - Module updates an IAM Password Policy on a given AWS account -requirements: [ 'botocore', 'boto3' ] author: - "Aaron Smith (@slapula)" options: diff --git a/iam_role.py b/iam_role.py index ddc8ad23041..45551cdf188 100644 --- a/iam_role.py +++ b/iam_role.py @@ -89,7 +89,6 @@ - Remove tags not listed in I(tags) when tags is specified. default: true type: bool -requirements: [ botocore, boto3 ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/iam_role_info.py b/iam_role_info.py index 132bdeedcc9..0a627d10cc7 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -14,7 +14,6 @@ description: - Gathers information about IAM roles. - This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] author: - "Will Thames (@willthames)" options: diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 895631b7e05..a78decfe625 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -24,8 +24,6 @@ module: iam_saml_federation version_added: 1.0.0 short_description: Maintain IAM SAML federation configuration. -requirements: - - boto3 description: - Provides a mechanism to manage AWS IAM SAML Identity Federation providers (create/update/delete metadata). options: diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 994344147e4..622e2ee8e86 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -15,7 +15,6 @@ - Retrieve the attributes of a server certificate. - This module was called C(iam_server_certificate_facts) before Ansible 2.9. The usage did not change. author: "Allen Sanabria (@linuxdynasty)" -requirements: [boto3, botocore] options: name: description: diff --git a/iam_user.py b/iam_user.py index 531ae6ba9ae..b88953a6868 100644 --- a/iam_user.py +++ b/iam_user.py @@ -41,7 +41,6 @@ default: false type: bool aliases: ['purge_policy', 'purge_managed_policies'] -requirements: [ botocore, boto3 ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/iam_user_info.py b/iam_user_info.py index f6aaa842eef..5ada74c612b 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -36,9 +36,6 @@ required: false default: '/' type: str -requirements: - - botocore - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/kinesis_stream.py b/kinesis_stream.py index 755cfa096d4..0f28856bfa9 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -16,7 +16,6 @@ - Update the retention period of a Kinesis Stream. - Update Tags on a Kinesis Stream. - Enable/disable server side encryption on a Kinesis Stream. -requirements: [ boto3 ] author: Allen Sanabria (@linuxdynasty) options: name: diff --git a/lambda.py b/lambda.py index c6960d99c98..0a25214ca37 100644 --- a/lambda.py +++ b/lambda.py @@ -13,7 +13,6 @@ short_description: Manage AWS Lambda functions description: - Allows for the management of Lambda functions. -requirements: [ boto3 ] options: name: description: diff --git a/lambda_alias.py b/lambda_alias.py index 9ccfbef7ea6..adc89f4cb99 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -46,8 +46,6 @@ A value of 0 (or omitted parameter) sets the alias to the $LATEST version. aliases: ['version'] type: int -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/lambda_event.py b/lambda_event.py index 3906771255f..28d1d7bdd67 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -78,8 +78,6 @@ type: str required: true type: dict -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/lambda_facts.py b/lambda_facts.py index b1a223b61db..010add7985d 100644 --- a/lambda_facts.py +++ b/lambda_facts.py @@ -39,8 +39,6 @@ - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. type: str author: Pierre Jodouin (@pjodouin) -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/lambda_info.py b/lambda_info.py index 725149d9c3b..c95c0218132 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -34,8 +34,6 @@ - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. type: str author: Pierre Jodouin (@pjodouin) -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/lambda_policy.py b/lambda_policy.py index 5c65b7969da..4fc5b084ed9 100644 --- a/lambda_policy.py +++ b/lambda_policy.py @@ -97,8 +97,6 @@ - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account). type: str -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/lightsail.py b/lightsail.py index a996edc5e85..75796774580 100644 --- a/lightsail.py +++ b/lightsail.py @@ -67,9 +67,6 @@ default: 300 type: int -requirements: - - boto3 - extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/rds.py b/rds.py index 1321186497a..b979a3bfaee 100644 --- a/rds.py +++ b/rds.py @@ -221,16 +221,15 @@ - Used with I(command=create), I(command=replicate), I(command=restore). - Requires boto >= 2.26.0 type: dict -requirements: - - "python >= 2.6" - - "boto" author: - "Bruce Pennypacker (@bpennypacker)" - "Will Thames (@willthames)" extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD diff --git a/rds_instance.py b/rds_instance.py index 69d5836004f..ea6da26f0b6 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -13,10 +13,6 @@ short_description: Manage RDS instances description: - Create, modify, and delete RDS instances. - -requirements: - - botocore - - boto3 >= 1.5.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/rds_instance_info.py b/rds_instance_info.py index cccd2b3f271..fba7804012a 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -29,9 +29,6 @@ - A filter that specifies one or more DB instances to describe. See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) type: dict -requirements: - - "python >= 2.7" - - "boto3" author: - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" diff --git a/rds_param_group.py b/rds_param_group.py index ab0718e4b04..76e6138b466 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -13,7 +13,6 @@ short_description: manage RDS parameter groups description: - Creates, modifies, and deletes RDS parameter groups. -requirements: [ boto3 ] options: state: description: diff --git a/rds_snapshot.py b/rds_snapshot.py index dd9f502886a..0ca957c1b68 100644 --- a/rds_snapshot.py +++ b/rds_snapshot.py @@ -55,9 +55,6 @@ - whether to remove tags not present in the C(tags) parameter. default: True type: bool -requirements: - - "python >= 2.6" - - "boto3" author: - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index 63a5e47b09b..d374520ab89 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -51,9 +51,6 @@ required: false choices: ['automated', 'manual', 'shared', 'public'] type: str -requirements: - - "python >= 2.6" - - "boto3" author: - "Will Thames (@willthames)" extends_documentation_fragment: diff --git a/redshift.py b/redshift.py index c409545e62b..41482c682b7 100644 --- a/redshift.py +++ b/redshift.py @@ -179,7 +179,6 @@ type: bool default: 'yes' version_added: "1.3.0" -requirements: [ 'boto3' ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index fbcf5543aee..3e3653473f2 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -53,7 +53,6 @@ required: true aliases: [ "retention_period" ] type: int -requirements: [ "botocore", "boto3" ] extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws diff --git a/redshift_info.py b/redshift_info.py index 679f53c58d2..bc4cb021840 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -16,7 +16,6 @@ description: - Gather information about Redshift cluster(s). - This module was called C(redshift_facts) before Ansible 2.9. The usage did not change. -requirements: [ boto3 ] options: cluster_identifier: description: diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index be42fa2d720..42681eaf5e0 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -40,11 +40,12 @@ aliases: ['subnets'] type: list elements: str -requirements: [ 'boto' ] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = r''' diff --git a/route53.py b/route53.py index ffbfdf4fc91..d1391cfac58 100644 --- a/route53.py +++ b/route53.py @@ -13,7 +13,6 @@ --- module: route53 version_added: 1.0.0 -requirements: [ "boto3", "botocore" ] short_description: add or delete entries in Amazons Route 53 DNS service description: - Creates and deletes DNS records in Amazons Route 53 service. diff --git a/route53_health_check.py b/route53_health_check.py index 03ac8b09af0..8f89aec5936 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -81,7 +81,9 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +requirements: +- python >= 2.6 +- boto >= 2.49.0 ''' EXAMPLES = ''' diff --git a/route53_zone.py b/route53_zone.py index 6467dd04527..cdc5538c027 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Creates and deletes Route53 private and public zones. -requirements: [ boto3 ] options: zone: description: diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index f42c64a0028..cb398f5ac11 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -81,8 +81,6 @@ - Optional suffix to limit the notifications to objects with keys that end with matching characters. type: str -requirements: - - boto3 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/s3_sync.py b/s3_sync.py index 589dcd5ba3b..1e7d01680f1 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -129,11 +129,6 @@ - The I(retries) option does nothing and will be removed after 2022-06-01 type: str -requirements: - - boto3 >= 1.4.4 - - botocore - - python-dateutil - author: Ted Timmons (@tedder) extends_documentation_fragment: - amazon.aws.aws diff --git a/s3_website.py b/s3_website.py index 57251826a04..4b9e911662f 100644 --- a/s3_website.py +++ b/s3_website.py @@ -13,7 +13,6 @@ short_description: Configure an s3 bucket as a website description: - Configure an s3 bucket as a website -requirements: [ boto3 ] author: Rob White (@wimnat) options: name: diff --git a/sns.py b/sns.py index 2840ddd2ac9..a18c3279173 100644 --- a/sns.py +++ b/sns.py @@ -81,10 +81,6 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws - -requirements: - - boto3 - - botocore ''' EXAMPLES = """ diff --git a/sns_topic.py b/sns_topic.py index 1be60a38ec9..10d98e3034d 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -69,8 +69,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: [ "boto" ] ''' EXAMPLES = r""" diff --git a/sqs_queue.py b/sqs_queue.py index b76cdb31410..d32732c8fc8 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -19,8 +19,6 @@ - Fernando Jose Pando (@nand0p) - Nadir Lloret (@nadirollo) - Dennis Podkovyrin (@sbj-ss) -requirements: - - boto3 options: state: description: diff --git a/sts_assume_role.py b/sts_assume_role.py index d1203a3c5a5..c7435ad6fdc 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -55,11 +55,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore - - python >= 2.6 ''' RETURN = ''' diff --git a/sts_session_token.py b/sts_session_token.py index 7e51fb08ac3..137d03c8e0d 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -34,11 +34,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - -requirements: - - boto3 - - botocore - - python >= 2.6 ''' RETURN = """ diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index f183211bcf8..179b065cf4d 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -14,9 +14,6 @@ short_description: wafv2_ip_set description: - Create, modify and delete IP sets for WAFv2. -requirements: - - boto3 - - botocore options: state: description: diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index 23b3abed4ec..251fc0b5483 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -14,9 +14,6 @@ short_description: Get information about wafv2 ip sets description: - Get information about existing wafv2 ip sets. -requirements: - - boto3 - - botocore options: name: description: diff --git a/wafv2_resources.py b/wafv2_resources.py index 4bf5f1dcca3..70dffaa5f0d 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -14,9 +14,6 @@ short_description: wafv2_web_acl description: - Apply or remove wafv2 to other aws resources. -requirements: - - boto3 - - botocore options: state: description: diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 469fc3b7184..5add2169cf9 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -14,9 +14,6 @@ short_description: wafv2_resources_info description: - List web acl resources. -requirements: - - boto3 - - botocore options: name: description: diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index d66d4864bb5..474897f8ca9 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -14,9 +14,6 @@ short_description: wafv2_web_acl description: - Create, modify and delete wafv2 rule groups. -requirements: - - boto3 - - botocore options: state: description: diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index afd399574ff..ce50b2983f4 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -14,9 +14,6 @@ short_description: wafv2_web_acl_info description: - Get informations about existing wafv2 rule groups. -requirements: - - boto3 - - botocore options: state: description: diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index ee70c65e54c..63b7ca38fcb 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -14,9 +14,6 @@ short_description: wafv2_web_acl description: - Create, modify or delete a wafv2 web acl. -requirements: - - boto3 - - botocore options: state: description: diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 8b8c79ef75a..0e6a5d4f4ba 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -14,9 +14,6 @@ short_description: wafv2_web_acl description: - Info about web acl -requirements: - - boto3 - - botocore options: name: description: From 1d5c68bab703660e966eb69849fef0081fb67396 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 25 Jun 2021 10:08:27 +0200 Subject: [PATCH 209/683] Also remove python >= 2.6 from boto based modules --- ec2_elb.py | 1 - ec2_elb_info.py | 1 - ec2_win_password.py | 1 - elasticache_subnet_group.py | 1 - elb_classic_lb.py | 1 - elb_instance.py | 1 - iam.py | 1 - iam_cert.py | 1 - rds.py | 1 - redshift_subnet_group.py | 1 - route53_health_check.py | 1 - 11 files changed, 11 deletions(-) diff --git a/ec2_elb.py b/ec2_elb.py index 1ac6e314b92..189676ea970 100644 --- a/ec2_elb.py +++ b/ec2_elb.py @@ -54,7 +54,6 @@ default: 0 type: int requirements: -- python >= 2.6 - boto >= 2.49.0 extends_documentation_fragment: - amazon.aws.aws diff --git a/ec2_elb_info.py b/ec2_elb_info.py index 5eb46ee3fa6..2b58b4aa5a3 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -38,7 +38,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/ec2_win_password.py b/ec2_win_password.py index 32b45edd953..3ed0afb79d4 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -55,7 +55,6 @@ requirements: - cryptography -- python >= 2.6 - boto >= 2.49.0 notes: - As of Ansible 2.4, this module requires the python cryptography module rather than the diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 2e4b901a2b7..44a3e39ae6f 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -39,7 +39,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/elb_classic_lb.py b/elb_classic_lb.py index 63cb53e6657..db4afaaf457 100644 --- a/elb_classic_lb.py +++ b/elb_classic_lb.py @@ -139,7 +139,6 @@ - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/elb_instance.py b/elb_instance.py index 801033fc40d..d522d3e72ad 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -61,7 +61,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/iam.py b/iam.py index fc5d3b67248..67c55a277c5 100644 --- a/iam.py +++ b/iam.py @@ -98,7 +98,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/iam_cert.py b/iam_cert.py index 507b7e6f79f..fbe984670aa 100644 --- a/iam_cert.py +++ b/iam_cert.py @@ -80,7 +80,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/rds.py b/rds.py index b979a3bfaee..817f53ff779 100644 --- a/rds.py +++ b/rds.py @@ -228,7 +228,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 42681eaf5e0..fa210a5bee4 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -44,7 +44,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' diff --git a/route53_health_check.py b/route53_health_check.py index 8f89aec5936..7db80d875a5 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -82,7 +82,6 @@ - amazon.aws.aws - amazon.aws.ec2 requirements: -- python >= 2.6 - boto >= 2.49.0 ''' From e1b8caca5f6007ca11f44ce6a9a5e6bbab17f5fd Mon Sep 17 00:00:00 2001 From: Vikalp Jain Date: Tue, 15 Jun 2021 04:15:36 +0530 Subject: [PATCH 210/683] fix: typo for verifying environmentFiles --- ecs_taskdefinition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index beb8a0c76fc..6696e92acb3 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -798,7 +798,7 @@ def main(): environment['value'] = to_text(environment['value']) for environment_file in container.get('environmentFiles', []): - if environment_file['value'] != 's3': + if environment_file['type'] != 's3': module.fail_json(msg='The only supported value for environmentFiles is s3.') for linux_param in container.get('linuxParameters', {}): From 17ce59231907de75adf2c6d833e1d816e0e7e9e4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 28 May 2021 12:28:17 +0200 Subject: [PATCH 211/683] Add a redirect from ec2_elb to elb_instance - the code is a simple copy --- ec2_elb.py | 367 ----------------------------------------------------- 1 file changed, 367 deletions(-) delete mode 100644 ec2_elb.py diff --git a/ec2_elb.py b/ec2_elb.py deleted file mode 100644 index 189676ea970..00000000000 --- a/ec2_elb.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_elb -version_added: 1.0.0 -short_description: De-registers or registers instances from EC2 ELBs -description: - - This module de-registers or registers an AWS EC2 instance from the ELBs - that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - - Will be marked changed when called only if there are ELBs found to operate on. -author: "John Jarvis (@jarv)" -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - type: str - instance_id: - description: - - EC2 Instance ID - required: true - type: str - ec2_elbs: - description: - - List of ELB names, required for registration. - - The ec2_elbs fact should be used if there was a previous de-register. - type: list - elements: str - enable_availability_zone: - description: - - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. - type: bool - default: 'yes' - wait: - description: - - Wait for instance registration or deregistration to complete successfully before returning. - type: bool - default: 'yes' - wait_timeout: - description: - - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. - If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. - default: 0 - type: int -requirements: -- boto >= 2.49.0 -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -''' - -EXAMPLES = r""" -# basic pre_task and post_task example -pre_tasks: - - name: Instance De-register - community.aws.ec2_elb: - instance_id: "{{ ansible_ec2_instance_id }}" - state: absent -roles: - - myrole -post_tasks: - - name: Instance Register - community.aws.ec2_elb: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" - state: present - loop: "{{ ec2_elbs }}" -""" - -import time - -try: - import boto - import boto.ec2 - import boto.ec2.autoscale - import boto.ec2.elb -except ImportError: - pass # Handled by HAS_BOTO - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - - -class ElbManager: - """Handles EC2 instance ELB registration and de-registration""" - - def __init__(self, module, instance_id=None, ec2_elbs=None, - region=None, **aws_connect_params): - self.module = module - self.instance_id = instance_id - self.region = region - self.aws_connect_params = aws_connect_params - self.lbs = self._get_instance_lbs(ec2_elbs) - self.changed = False - - def deregister(self, wait, timeout): - """De-register the instance from all ELBs and wait for the ELB - to report it out-of-service""" - - for lb in self.lbs: - initial_state = self._get_instance_health(lb) - if initial_state is None: - # Instance isn't registered with this load - # balancer. Ignore it and try the next one. - continue - - # The instance is not associated with any load balancer so nothing to do - if not self._get_instance_lbs(): - return - - lb.deregister_instances([self.instance_id]) - - # The ELB is changing state in some way. Either an instance that's - # InService is moving to OutOfService, or an instance that's - # already OutOfService is being deregistered. - self.changed = True - - if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) - - def register(self, wait, enable_availability_zone, timeout): - """Register the instance for all ELBs and wait for the ELB - to report the instance in-service""" - for lb in self.lbs: - initial_state = self._get_instance_health(lb) - - if enable_availability_zone: - self._enable_availailability_zone(lb) - - lb.register_instances([self.instance_id]) - - if wait: - self._await_elb_instance_state(lb, 'InService', initial_state, timeout) - else: - # We cannot assume no change was made if we don't wait - # to find out - self.changed = True - - def exists(self, lbtest): - """ Verify that the named ELB actually exists """ - - found = False - for lb in self.lbs: - if lb.name == lbtest: - found = True - break - return found - - def _enable_availailability_zone(self, lb): - """Enable the current instance's availability zone in the provided lb. - Returns True if the zone was enabled or False if no change was made. - lb: load balancer""" - instance = self._get_instance() - if instance.placement in lb.availability_zones: - return False - - lb.enable_zones(zones=instance.placement) - - # If successful, the new zone will have been added to - # lb.availability_zones - return instance.placement in lb.availability_zones - - def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" - - wait_timeout = time.time() + timeout - while True: - instance_state = self._get_instance_health(lb) - - if not instance_state: - msg = ("The instance %s could not be put in service on %s." - " Reason: Invalid Instance") - self.module.fail_json(msg=msg % (self.instance_id, lb)) - - if instance_state.state == awaited_state: - # Check the current state against the initial state, and only set - # changed if they are different. - if (initial_state is None) or (instance_state.state != initial_state.state): - self.changed = True - break - elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks and continue waiting - pass - elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance" - and time.time() >= wait_timeout): - # If the reason_code for the instance being out of service is - # "Instance" this indicates a failure state, e.g. the instance - # has failed a health check or the ELB does not have the - # instance's availability zone enabled. The exact reason why is - # described in InstantState.description. - msg = ("The instance %s could not be put in service on %s." - " Reason: %s") - self.module.fail_json(msg=msg % (self.instance_id, - lb, - instance_state.description)) - time.sleep(1) - - def _is_instance_state_pending(self, instance_state): - """ - Determines whether the instance_state is "pending", meaning there is - an operation under way to bring it in service. - """ - # This is messy, because AWS provides no way to distinguish between - # an instance that is is OutOfService because it's pending vs. OutOfService - # because it's failing health checks. So we're forced to analyze the - # description, which is likely to be brittle. - return (instance_state and 'pending' in instance_state.description) - - def _get_instance_health(self, lb): - """ - Check instance health, should return status object or None under - certain error conditions. - """ - try: - status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError as e: - if e.error_code == 'InvalidInstance': - return None - else: - raise - return status - - def _get_instance_lbs(self, ec2_elbs=None): - """Returns a list of ELBs attached to self.instance_id - ec2_elbs: an optional list of elb names that will be used - for elb lookup instead of returning what elbs - are attached to self.instance_id""" - - if not ec2_elbs: - ec2_elbs = self._get_auto_scaling_group_lbs() - - try: - elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) - - elbs = [] - marker = None - while True: - try: - newelbs = elb.get_all_load_balancers(marker=marker) - marker = newelbs.next_marker - elbs.extend(newelbs) - if not marker: - break - except TypeError: - # Older version of boto do not allow for params - elbs = elb.get_all_load_balancers() - break - - if ec2_elbs: - lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) - return lbs - - def _get_auto_scaling_group_lbs(self): - """Returns a list of ELBs associated with self.instance_id - indirectly through its auto scaling group membership""" - - try: - asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) - - asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) - if len(asg_instances) > 1: - self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") - - if not asg_instances: - asg_elbs = [] - else: - asg_name = asg_instances[0].group_name - - asgs = asg.get_all_groups([asg_name]) - if len(asg_instances) != 1: - self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - - asg_elbs = asgs[0].load_balancers - - return asg_elbs - - def _get_instance(self): - """Returns a boto.ec2.InstanceObject for self.instance_id""" - try: - ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) - return ec2.get_only_instances(instance_ids=[self.instance_id])[0] - - -def main(): - argument_spec = dict( - state={'required': True, 'choices': ['present', 'absent']}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, - wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'required': False, 'default': 0, 'type': 'int'}, - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - check_boto3=False, - ) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - ec2_elbs = module.params['ec2_elbs'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - timeout = module.params['wait_timeout'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) - - if ec2_elbs is not None: - for elb in ec2_elbs: - if not elb_man.exists(elb): - msg = "ELB %s does not exist" % elb - module.fail_json(msg=msg) - - if not module.check_mode: - if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': - elb_man.deregister(wait, timeout) - - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - - -if __name__ == '__main__': - main() From cf09b3f57cf47bd1d20bbb7a2f00524984f53087 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 28 May 2021 12:30:22 +0200 Subject: [PATCH 212/683] Deprecate ec2_elb_info - a boto3 version (elb_classic_lb_info) exists --- ec2_elb_info.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ec2_elb_info.py b/ec2_elb_info.py index 2b58b4aa5a3..add102ab87a 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -21,6 +21,10 @@ --- module: ec2_elb_info version_added: 1.0.0 +deprecated: + removed_in: 3.0.0 + why: The ec2_elb_info is based upon a deprecated version of the AWS SDK. + alternative: Use M(elb_classic_lb_info). short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - Gather information about EC2 Elastic Load Balancers in AWS @@ -225,8 +229,14 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._name == 'ec2_elb_facts': - module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", date='2021-12-01', collection_name='community.aws') + # The ec2_elb_facts alias was already deprecated + module.deprecate("The 'ec2_elb_facts' module has been deprecated and replaced by the 'elb_classic_lb_info' module'", + version='3.0.0', collection_name='community.aws') + if module._name == 'ec2_elb_info': + module.deprecate("The 'ec2_elb_info' module has been deprecated and replaced by the 'elb_classic_lb_info' module'", + version='3.0.0', collection_name='community.aws') if not HAS_BOTO: module.fail_json(msg='boto required for this module') From 1386df31f0c60562a9bb0f8c42f08d0f4e118911 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 28 May 2021 12:50:07 +0200 Subject: [PATCH 213/683] Redirect elb_classic_lb to amazon.aws.ec2_elb_lb - it's the same code --- elb_classic_lb.py | 1356 --------------------------------------------- 1 file changed, 1356 deletions(-) delete mode 100644 elb_classic_lb.py diff --git a/elb_classic_lb.py b/elb_classic_lb.py deleted file mode 100644 index db4afaaf457..00000000000 --- a/elb_classic_lb.py +++ /dev/null @@ -1,1356 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: elb_classic_lb -version_added: 1.0.0 -description: - - Returns information about the load balancer. - - Will be marked changed when called only if state is changed. -short_description: Creates or destroys Amazon ELB. -author: - - "Jim Dalton (@jsdalton)" -options: - state: - description: - - Create or destroy the ELB. - choices: ["present", "absent"] - required: true - type: str - name: - description: - - The name of the ELB. - required: true - type: str - listeners: - description: - - List of ports/protocols for this ELB to listen on (see example). - type: list - elements: dict - purge_listeners: - description: - - Purge existing listeners on ELB that are not found in listeners. - type: bool - default: true - instance_ids: - description: - - List of instance ids to attach to this ELB. - type: list - elements: str - purge_instance_ids: - description: - - Purge existing instance ids on ELB that are not found in I(instance_ids). - type: bool - default: false - zones: - description: - - List of availability zones to enable on this ELB. - type: list - elements: str - purge_zones: - description: - - Purge existing availability zones on ELB that are not found in zones. - type: bool - default: false - security_group_ids: - description: - - A list of security groups to apply to the ELB. - type: list - elements: str - security_group_names: - description: - - A list of security group names to apply to the ELB. - type: list - elements: str - health_check: - description: - - An associative array of health check configuration settings (see example). - type: dict - access_logs: - description: - - An associative array of access logs configuration settings (see example). - type: dict - subnets: - description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. - type: list - elements: str - purge_subnets: - description: - - Purge existing subnets on ELB that are not found in subnets. - type: bool - default: false - scheme: - description: - - The scheme to use when creating the ELB. - - For a private VPC-visible ELB use C(internal). - - If you choose to update your scheme with a different value the ELB will be destroyed and - recreated. To update scheme you must set I(wait=true). - choices: ["internal", "internet-facing"] - default: 'internet-facing' - type: str - validate_certs: - description: - - When set to C(false), SSL certificates will not be validated for boto versions >= 2.6.0. - type: bool - default: true - connection_draining_timeout: - description: - - Wait a specified timeout allowing connections to drain before terminating an instance. - type: int - idle_timeout: - description: - - ELB connections from clients and to servers are timed out after this amount of time. - type: int - cross_az_load_balancing: - description: - - Distribute load across all configured Availability Zones. - - Defaults to C(false). - type: bool - stickiness: - description: - - An associative array of stickiness policy settings. Policy will be applied to all listeners (see example). - type: dict - wait: - description: - - When specified, Ansible will check the status of the load balancer to ensure it has been successfully - removed from AWS. - type: bool - default: false - wait_timeout: - description: - - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated. - A maximum of C(600) seconds (10 minutes) is allowed. - default: 60 - type: int - tags: - description: - - An associative array of tags. To delete all tags, supply an empty dict. - type: dict - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -requirements: -- boto >= 2.49.0 -''' - -EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example (non-VPC) - -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - proxy_protocol: True - - protocol: https - load_balancer_port: 443 - instance_protocol: http # optional, defaults to value of protocol setting - instance_port: 80 - # ssl certificate required for https or ssl - ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" - delegate_to: localhost - -# Internal ELB example - -- community.aws.elb_classic_lb: - name: "test-vpc" - scheme: internal - state: present - instance_ids: - - i-abcd1234 - purge_instance_ids: true - subnets: - - subnet-abcd1234 - - subnet-1a2b3c4d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - delegate_to: localhost - -# Configure a health check and the access logs -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - health_check: - ping_protocol: http # options are http, https, ssl, tcp - ping_port: 80 - ping_path: "/index.html" # not required for tcp or ssl - response_timeout: 5 # seconds - interval: 30 # seconds - unhealthy_threshold: 2 - healthy_threshold: 10 - access_logs: - interval: 5 # minutes (defaults to 60) - s3_location: "my-bucket" # This value is required if access_logs is set - s3_prefix: "logs" - delegate_to: localhost - -# Ensure ELB is gone -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: absent - delegate_to: localhost - -# Ensure ELB is gone and wait for check (for default timeout) -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: absent - wait: yes - delegate_to: localhost - -# Ensure ELB is gone and wait for check with timeout value -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: absent - wait: yes - wait_timeout: 600 - delegate_to: localhost - -# Normally, this module will purge any listeners that exist on the ELB -# but aren't specified in the listeners parameter. If purge_listeners is -# false it leaves them alone -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_listeners: no - delegate_to: localhost - -# Normally, this module will leave availability zones that are enabled -# on the ELB alone. If purge_zones is true, then any extraneous zones -# will be removed -- community.aws.elb_classic_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_zones: yes - delegate_to: localhost - -# Creates a ELB and assigns a list of subnets to it. -- community.aws.elb_classic_lb: - state: present - name: 'New ELB' - security_group_ids: 'sg-123456, sg-67890' - region: us-west-2 - subnets: 'subnet-123456,subnet-67890' - purge_subnets: yes - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - delegate_to: localhost - -# Create an ELB with connection draining, increased idle timeout and cross availability -# zone load balancing -- community.aws.elb_classic_lb: - name: "New ELB" - state: present - connection_draining_timeout: 60 - idle_timeout: 300 - cross_az_load_balancing: "yes" - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - delegate_to: localhost - -# Create an ELB with load balancer stickiness enabled -- community.aws.elb_classic_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - stickiness: - type: loadbalancer - enabled: yes - expiration: 300 - delegate_to: localhost - -# Create an ELB with application stickiness enabled -- community.aws.elb_classic_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - stickiness: - type: application - enabled: yes - cookie: SESSIONID - delegate_to: localhost - -# Create an ELB and add tags -- community.aws.elb_classic_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - tags: - Name: "New ELB" - stack: "production" - client: "Bob" - delegate_to: localhost - -# Delete all tags from an ELB -- community.aws.elb_classic_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - tags: {} - delegate_to: localhost -""" - -import random -import time -import traceback - -try: - import boto - import boto.ec2.elb - import boto.ec2.elb.attributes - import boto.vpc - from boto.ec2.elb.healthcheck import HealthCheck - from boto.ec2.tag import Tag -except ImportError: - pass # Handled by HAS_BOTO - -from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO - - -def _throttleable_operation(max_retries): - def _operation_wrapper(op): - def _do_op(*args, **kwargs): - retry = 0 - while True: - try: - return op(*args, **kwargs) - except boto.exception.BotoServerError as e: - if retry < max_retries and e.code in \ - ("Throttling", "RequestLimitExceeded"): - retry = retry + 1 - time.sleep(min(random.random() * (2 ** retry), 300)) - continue - else: - raise - return _do_op - return _operation_wrapper - - -def _get_vpc_connection(module, region, aws_connect_params): - try: - return connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - module.fail_json(msg=str(e)) - - -_THROTTLING_RETRIES = 5 - - -class ElbManager(object): - """Handles ELB creation and destruction""" - - def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, - health_check=None, subnets=None, purge_subnets=None, - scheme="internet-facing", connection_draining_timeout=None, - idle_timeout=None, - cross_az_load_balancing=None, access_logs=None, - stickiness=None, wait=None, wait_timeout=None, tags=None, - region=None, - instance_ids=None, purge_instance_ids=None, **aws_connect_params): - - self.module = module - self.name = name - self.listeners = listeners - self.purge_listeners = purge_listeners - self.instance_ids = instance_ids - self.purge_instance_ids = purge_instance_ids - self.zones = zones - self.purge_zones = purge_zones - self.security_group_ids = security_group_ids - self.health_check = health_check - self.subnets = subnets - self.purge_subnets = purge_subnets - self.scheme = scheme - self.connection_draining_timeout = connection_draining_timeout - self.idle_timeout = idle_timeout - self.cross_az_load_balancing = cross_az_load_balancing - self.access_logs = access_logs - self.stickiness = stickiness - self.wait = wait - self.wait_timeout = wait_timeout - self.tags = tags - - self.aws_connect_params = aws_connect_params - self.region = region - - self.changed = False - self.status = 'gone' - self.elb_conn = self._get_elb_connection() - - try: - self.elb = self._get_elb() - except boto.exception.BotoServerError as e: - module.fail_json(msg='unable to get all load balancers: %s' % to_native(e), exception=traceback.format_exc()) - - self.ec2_conn = self._get_ec2_connection() - - @_throttleable_operation(_THROTTLING_RETRIES) - def ensure_ok(self): - """Create the ELB""" - if not self.elb: - # Zones and listeners will be added at creation - self._create_elb() - else: - if self._get_scheme(): - # the only way to change the scheme is by recreating the resource - self.ensure_gone() - self._create_elb() - else: - self._set_zones() - self._set_security_groups() - self._set_elb_listeners() - self._set_subnets() - self._set_health_check() - # boto has introduced support for some ELB attributes in - # different versions, so we check first before trying to - # set them to avoid errors - if self._check_attribute_support('connection_draining'): - self._set_connection_draining_timeout() - if self._check_attribute_support('connecting_settings'): - self._set_idle_timeout() - if self._check_attribute_support('cross_zone_load_balancing'): - self._set_cross_az_load_balancing() - if self._check_attribute_support('access_log'): - self._set_access_log() - # add sticky options - self.select_stickiness_policy() - - # ensure backend server policies are correct - self._set_backend_policies() - # set/remove instance ids - self._set_instance_ids() - - self._set_tags() - - def ensure_gone(self): - """Destroy the ELB""" - if self.elb: - self._delete_elb() - if self.wait: - elb_removed = self._wait_for_elb_removed() - # Unfortunately even though the ELB itself is removed quickly - # the interfaces take longer so reliant security groups cannot - # be deleted until the interface has registered as removed. - elb_interface_removed = self._wait_for_elb_interface_removed() - if not (elb_removed and elb_interface_removed): - self.module.fail_json(msg='Timed out waiting for removal of load balancer.') - - def get_info(self): - try: - check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] - except Exception: - check_elb = None - - if not check_elb: - info = { - 'name': self.name, - 'status': self.status, - 'region': self.region - } - else: - try: - lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name'] - except Exception: - lb_cookie_policy = None - try: - app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name'] - except Exception: - app_cookie_policy = None - - info = { - 'name': check_elb.name, - 'dns_name': check_elb.dns_name, - 'zones': check_elb.availability_zones, - 'security_group_ids': check_elb.security_groups, - 'status': self.status, - 'subnets': self.subnets, - 'scheme': check_elb.scheme, - 'hosted_zone_name': check_elb.canonical_hosted_zone_name, - 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, - 'lb_cookie_policy': lb_cookie_policy, - 'app_cookie_policy': app_cookie_policy, - 'proxy_policy': self._get_proxy_protocol_policy(), - 'backends': self._get_backend_policies(), - 'instances': [instance.id for instance in check_elb.instances], - 'out_of_service_count': 0, - 'in_service_count': 0, - 'unknown_instance_state_count': 0, - 'region': self.region - } - - # status of instances behind the ELB - if info['instances']: - info['instance_health'] = [dict( - instance_id=instance_state.instance_id, - reason_code=instance_state.reason_code, - state=instance_state.state - ) for instance_state in self.elb_conn.describe_instance_health(self.name)] - else: - info['instance_health'] = [] - - # instance state counts: InService or OutOfService - if info['instance_health']: - for instance_state in info['instance_health']: - if instance_state['state'] == "InService": - info['in_service_count'] += 1 - elif instance_state['state'] == "OutOfService": - info['out_of_service_count'] += 1 - else: - info['unknown_instance_state_count'] += 1 - - if check_elb.health_check: - info['health_check'] = { - 'target': check_elb.health_check.target, - 'interval': check_elb.health_check.interval, - 'timeout': check_elb.health_check.timeout, - 'healthy_threshold': check_elb.health_check.healthy_threshold, - 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, - } - - if check_elb.listeners: - info['listeners'] = [self._api_listener_as_tuple(l) - for l in check_elb.listeners] - elif self.status == 'created': - # When creating a new ELB, listeners don't show in the - # immediately returned result, so just include the - # ones that were added - info['listeners'] = [self._listener_as_tuple(l) - for l in self.listeners] - else: - info['listeners'] = [] - - if self._check_attribute_support('connection_draining'): - info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout) - - if self._check_attribute_support('connecting_settings'): - info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout - - if self._check_attribute_support('cross_zone_load_balancing'): - is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') - if is_cross_az_lb_enabled: - info['cross_az_load_balancing'] = 'yes' - else: - info['cross_az_load_balancing'] = 'no' - - # return stickiness info? - - info['tags'] = self.tags - - return info - - @_throttleable_operation(_THROTTLING_RETRIES) - def _wait_for_elb_removed(self): - polling_increment_secs = 15 - max_retries = (self.wait_timeout // polling_increment_secs) - status_achieved = False - - for x in range(0, max_retries): - try: - self.elb_conn.get_all_lb_attributes(self.name) - except (boto.exception.BotoServerError, Exception) as e: - if "LoadBalancerNotFound" in e.code: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - - return status_achieved - - @_throttleable_operation(_THROTTLING_RETRIES) - def _wait_for_elb_interface_removed(self): - polling_increment_secs = 15 - max_retries = (self.wait_timeout // polling_increment_secs) - status_achieved = False - - elb_interfaces = self.ec2_conn.get_all_network_interfaces( - filters={'attachment.instance-owner-id': 'amazon-elb', - 'description': 'ELB {0}'.format(self.name)}) - - for x in range(0, max_retries): - for interface in elb_interfaces: - try: - result = self.ec2_conn.get_all_network_interfaces(interface.id) - if result == []: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - except (boto.exception.BotoServerError, Exception) as e: - if 'InvalidNetworkInterfaceID' in e.code: - status_achieved = True - break - else: - self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - return status_achieved - - @_throttleable_operation(_THROTTLING_RETRIES) - def _get_elb(self): - elbs = self.elb_conn.get_all_load_balancers() - for elb in elbs: - if self.name == elb.name: - self.status = 'ok' - return elb - - def _get_elb_connection(self): - try: - return connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) - - def _get_ec2_connection(self): - try: - return connect_to_aws(boto.ec2, self.region, - **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, Exception) as e: - self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - @_throttleable_operation(_THROTTLING_RETRIES) - def _delete_elb(self): - # True if succeeds, exception raised if not - result = self.elb_conn.delete_load_balancer(name=self.name) - if result: - self.changed = True - self.status = 'deleted' - - def _create_elb(self): - listeners = [self._listener_as_tuple(l) for l in self.listeners] - self.elb = self.elb_conn.create_load_balancer(name=self.name, - zones=self.zones, - security_groups=self.security_group_ids, - complex_listeners=listeners, - subnets=self.subnets, - scheme=self.scheme) - if self.elb: - # HACK: Work around a boto bug in which the listeners attribute is - # always set to the listeners argument to create_load_balancer, and - # not the complex_listeners - # We're not doing a self.elb = self._get_elb here because there - # might be eventual consistency issues and it doesn't necessarily - # make sense to wait until the ELB gets returned from the EC2 API. - # This is necessary in the event we hit the throttling errors and - # need to retry ensure_ok - # See https://github.com/boto/boto/issues/3526 - self.elb.listeners = self.listeners - self.changed = True - self.status = 'created' - - def _create_elb_listeners(self, listeners): - """Takes a list of listener tuples and creates them""" - # True if succeeds, exception raised if not - self.changed = self.elb_conn.create_load_balancer_listeners(self.name, - complex_listeners=listeners) - - def _delete_elb_listeners(self, listeners): - """Takes a list of listener tuples and deletes them from the elb""" - ports = [l[0] for l in listeners] - - # True if succeeds, exception raised if not - self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, - ports) - - def _set_elb_listeners(self): - """ - Creates listeners specified by self.listeners; overwrites existing - listeners on these ports; removes extraneous listeners - """ - listeners_to_add = [] - listeners_to_remove = [] - listeners_to_keep = [] - - # Check for any listeners we need to create or overwrite - for listener in self.listeners: - listener_as_tuple = self._listener_as_tuple(listener) - - # First we loop through existing listeners to see if one is - # already specified for this port - existing_listener_found = None - for existing_listener in self.elb.listeners: - # Since ELB allows only one listener on each incoming port, a - # single match on the incoming port is all we're looking for - if existing_listener[0] == int(listener['load_balancer_port']): - existing_listener_found = self._api_listener_as_tuple(existing_listener) - break - - if existing_listener_found: - # Does it match exactly? - if listener_as_tuple != existing_listener_found: - # The ports are the same but something else is different, - # so we'll remove the existing one and add the new one - listeners_to_remove.append(existing_listener_found) - listeners_to_add.append(listener_as_tuple) - else: - # We already have this listener, so we're going to keep it - listeners_to_keep.append(existing_listener_found) - else: - # We didn't find an existing listener, so just add the new one - listeners_to_add.append(listener_as_tuple) - - # Check for any extraneous listeners we need to remove, if desired - if self.purge_listeners: - for existing_listener in self.elb.listeners: - existing_listener_tuple = self._api_listener_as_tuple(existing_listener) - if existing_listener_tuple in listeners_to_remove: - # Already queued for removal - continue - if existing_listener_tuple in listeners_to_keep: - # Keep this one around - continue - # Since we're not already removing it and we don't need to keep - # it, let's get rid of it - listeners_to_remove.append(existing_listener_tuple) - - if listeners_to_remove: - self._delete_elb_listeners(listeners_to_remove) - - if listeners_to_add: - self._create_elb_listeners(listeners_to_add) - - def _api_listener_as_tuple(self, listener): - """Adds ssl_certificate_id to ELB API tuple if present""" - base_tuple = listener.get_complex_tuple() - if listener.ssl_certificate_id and len(base_tuple) < 5: - return base_tuple + (listener.ssl_certificate_id,) - return base_tuple - - def _listener_as_tuple(self, listener): - """Formats listener as a 4- or 5-tuples, in the order specified by the - ELB API""" - # N.B. string manipulations on protocols below (str(), upper()) is to - # ensure format matches output from ELB API - listener_list = [ - int(listener['load_balancer_port']), - int(listener['instance_port']), - str(listener['protocol'].upper()), - ] - - # Instance protocol is not required by ELB API; it defaults to match - # load balancer protocol. We'll mimic that behavior here - if 'instance_protocol' in listener: - listener_list.append(str(listener['instance_protocol'].upper())) - else: - listener_list.append(str(listener['protocol'].upper())) - - if 'ssl_certificate_id' in listener: - listener_list.append(str(listener['ssl_certificate_id'])) - - return tuple(listener_list) - - def _enable_zones(self, zones): - try: - self.elb.enable_zones(zones) - except boto.exception.BotoServerError as e: - self.module.fail_json(msg='unable to enable zones: %s' % to_native(e), exception=traceback.format_exc()) - - self.changed = True - - def _disable_zones(self, zones): - try: - self.elb.disable_zones(zones) - except boto.exception.BotoServerError as e: - self.module.fail_json(msg='unable to disable zones: %s' % to_native(e), exception=traceback.format_exc()) - self.changed = True - - def _attach_subnets(self, subnets): - self.elb_conn.attach_lb_to_subnets(self.name, subnets) - self.changed = True - - def _detach_subnets(self, subnets): - self.elb_conn.detach_lb_from_subnets(self.name, subnets) - self.changed = True - - def _set_subnets(self): - """Determine which subnets need to be attached or detached on the ELB""" - if self.subnets: - if self.purge_subnets: - subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - else: - subnets_to_detach = None - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - - if subnets_to_attach: - self._attach_subnets(subnets_to_attach) - if subnets_to_detach: - self._detach_subnets(subnets_to_detach) - - def _get_scheme(self): - """Determine if the current scheme is different than the scheme of the ELB""" - if self.scheme: - if self.elb.scheme != self.scheme: - if not self.wait: - self.module.fail_json(msg="Unable to modify scheme without using the wait option") - return True - return False - - def _set_zones(self): - """Determine which zones need to be enabled or disabled on the ELB""" - if self.zones: - if self.purge_zones: - zones_to_disable = list(set(self.elb.availability_zones) - - set(self.zones)) - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - else: - zones_to_disable = None - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - if zones_to_enable: - self._enable_zones(zones_to_enable) - # N.B. This must come second, in case it would have removed all zones - if zones_to_disable: - self._disable_zones(zones_to_disable) - - def _set_security_groups(self): - if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids): - self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) - self.changed = True - - def _set_health_check(self): - """Set health check values on ELB as needed""" - if self.health_check: - # This just makes it easier to compare each of the attributes - # and look for changes. Keys are attributes of the current - # health_check; values are desired values of new health_check - health_check_config = { - "target": self._get_health_check_target(), - "timeout": self.health_check['response_timeout'], - "interval": self.health_check['interval'], - "unhealthy_threshold": self.health_check['unhealthy_threshold'], - "healthy_threshold": self.health_check['healthy_threshold'], - } - - update_health_check = False - - # The health_check attribute is *not* set on newly created - # ELBs! So we have to create our own. - if not self.elb.health_check: - self.elb.health_check = HealthCheck() - - for attr, desired_value in health_check_config.items(): - if getattr(self.elb.health_check, attr) != desired_value: - setattr(self.elb.health_check, attr, desired_value) - update_health_check = True - - if update_health_check: - self.elb.configure_health_check(self.elb.health_check) - self.changed = True - - def _check_attribute_support(self, attr): - return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) - - def _set_cross_az_load_balancing(self): - attributes = self.elb.get_attributes() - if self.cross_az_load_balancing: - if not attributes.cross_zone_load_balancing.enabled: - self.changed = True - attributes.cross_zone_load_balancing.enabled = True - else: - if attributes.cross_zone_load_balancing.enabled: - self.changed = True - attributes.cross_zone_load_balancing.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', - attributes.cross_zone_load_balancing.enabled) - - def _set_access_log(self): - attributes = self.elb.get_attributes() - if self.access_logs: - if 's3_location' not in self.access_logs: - self.module.fail_json(msg='s3_location information required') - - access_logs_config = { - "enabled": True, - "s3_bucket_name": self.access_logs['s3_location'], - "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''), - "emit_interval": self.access_logs.get('interval', 60), - } - - update_access_logs_config = False - for attr, desired_value in access_logs_config.items(): - if getattr(attributes.access_log, attr) != desired_value: - setattr(attributes.access_log, attr, desired_value) - update_access_logs_config = True - if update_access_logs_config: - self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) - self.changed = True - elif attributes.access_log.enabled: - attributes.access_log.enabled = False - self.changed = True - self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) - - def _set_connection_draining_timeout(self): - attributes = self.elb.get_attributes() - if self.connection_draining_timeout is not None: - if not attributes.connection_draining.enabled or \ - attributes.connection_draining.timeout != self.connection_draining_timeout: - self.changed = True - attributes.connection_draining.enabled = True - attributes.connection_draining.timeout = self.connection_draining_timeout - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - else: - if attributes.connection_draining.enabled: - self.changed = True - attributes.connection_draining.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - - def _set_idle_timeout(self): - attributes = self.elb.get_attributes() - if self.idle_timeout is not None: - if attributes.connecting_settings.idle_timeout != self.idle_timeout: - self.changed = True - attributes.connecting_settings.idle_timeout = self.idle_timeout - self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) - - def _policy_name(self, policy_type): - return 'elb-classic-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict')) - - def _create_policy(self, policy_param, policy_meth, policy): - getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy) - - def _delete_policy(self, elb_name, policy): - self.elb_conn.delete_lb_policy(elb_name, policy) - - def _update_policy(self, policy_param, policy_meth, policy_attr, policy): - self._delete_policy(self.elb.name, policy) - self._create_policy(policy_param, policy_meth, policy) - - def _set_listener_policy(self, listeners_dict, policy=None): - policy = [] if policy is None else policy - - for listener_port in listeners_dict: - if listeners_dict[listener_port].startswith('HTTP'): - self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy) - - def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs): - for p in getattr(elb_info.policies, policy_attrs['attr']): - if str(p.__dict__['policy_name']) == str(policy[0]): - if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0): - self._set_listener_policy(listeners_dict) - self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0]) - self.changed = True - break - else: - self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) - self.changed = True - - self._set_listener_policy(listeners_dict, policy) - - def select_stickiness_policy(self): - if self.stickiness: - - if 'cookie' in self.stickiness and 'expiration' in self.stickiness: - self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time') - - elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0] - d = {} - for listener in elb_info.listeners: - d[listener[0]] = listener[2] - listeners_dict = d - - if self.stickiness['type'] == 'loadbalancer': - policy = [] - policy_type = 'LBCookieStickinessPolicyType' - - if self.module.boolean(self.stickiness['enabled']): - - if 'expiration' not in self.stickiness: - self.module.fail_json(msg='expiration must be set when type is loadbalancer') - - try: - expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None - except ValueError: - self.module.fail_json(msg='expiration must be set to an integer') - - policy_attrs = { - 'type': policy_type, - 'attr': 'lb_cookie_stickiness_policies', - 'method': 'create_lb_cookie_stickiness_policy', - 'dict_key': 'cookie_expiration_period', - 'param_value': expiration - } - policy.append(self._policy_name(policy_attrs['type'])) - - self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) - elif not self.module.boolean(self.stickiness['enabled']): - if len(elb_info.policies.lb_cookie_stickiness_policies): - if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): - self.changed = True - else: - self.changed = False - self._set_listener_policy(listeners_dict) - self._delete_policy(self.elb.name, self._policy_name(policy_type)) - - elif self.stickiness['type'] == 'application': - policy = [] - policy_type = 'AppCookieStickinessPolicyType' - if self.module.boolean(self.stickiness['enabled']): - - if 'cookie' not in self.stickiness: - self.module.fail_json(msg='cookie must be set when type is application') - - policy_attrs = { - 'type': policy_type, - 'attr': 'app_cookie_stickiness_policies', - 'method': 'create_app_cookie_stickiness_policy', - 'dict_key': 'cookie_name', - 'param_value': self.stickiness['cookie'] - } - policy.append(self._policy_name(policy_attrs['type'])) - self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) - elif not self.module.boolean(self.stickiness['enabled']): - if len(elb_info.policies.app_cookie_stickiness_policies): - if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): - self.changed = True - self._set_listener_policy(listeners_dict) - self._delete_policy(self.elb.name, self._policy_name(policy_type)) - - else: - self._set_listener_policy(listeners_dict) - - def _get_backend_policies(self): - """Get a list of backend policies""" - policies = [] - if self.elb.backends is not None: - for backend in self.elb.backends: - if backend.policies is not None: - for policy in backend.policies: - policies.append(str(backend.instance_port) + ':' + policy.policy_name) - - return policies - - def _set_backend_policies(self): - """Sets policies for all backends""" - ensure_proxy_protocol = False - replace = [] - backend_policies = self._get_backend_policies() - - # Find out what needs to be changed - for listener in self.listeners: - want = False - - if 'proxy_protocol' in listener and listener['proxy_protocol']: - ensure_proxy_protocol = True - want = True - - if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies: - if not want: - replace.append({'port': listener['instance_port'], 'policies': []}) - elif want: - replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']}) - - # enable or disable proxy protocol - if ensure_proxy_protocol: - self._set_proxy_protocol_policy() - - # Make the backend policies so - for item in replace: - self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies']) - self.changed = True - - def _get_proxy_protocol_policy(self): - """Find out if the elb has a proxy protocol enabled""" - if self.elb.policies is not None and self.elb.policies.other_policies is not None: - for policy in self.elb.policies.other_policies: - if policy.policy_name == 'ProxyProtocol-policy': - return policy.policy_name - - return None - - def _set_proxy_protocol_policy(self): - """Install a proxy protocol policy if needed""" - proxy_policy = self._get_proxy_protocol_policy() - - if proxy_policy is None: - self.elb_conn.create_lb_policy( - self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True} - ) - self.changed = True - - # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there - - def _diff_list(self, a, b): - """Find the entries in list a that are not in list b""" - b = set(b) - return [aa for aa in a if aa not in b] - - def _get_instance_ids(self): - """Get the current list of instance ids installed in the elb""" - instances = [] - if self.elb.instances is not None: - for instance in self.elb.instances: - instances.append(instance.id) - - return instances - - def _set_instance_ids(self): - """Register or deregister instances from an lb instance""" - assert_instances = self.instance_ids or [] - - has_instances = self._get_instance_ids() - - add_instances = self._diff_list(assert_instances, has_instances) - if add_instances: - self.elb_conn.register_instances(self.elb.name, add_instances) - self.changed = True - - if self.purge_instance_ids: - remove_instances = self._diff_list(has_instances, assert_instances) - if remove_instances: - self.elb_conn.deregister_instances(self.elb.name, remove_instances) - self.changed = True - - def _set_tags(self): - """Add/Delete tags""" - if self.tags is None: - return - - params = {'LoadBalancerNames.member.1': self.name} - - tagdict = dict() - - # get the current list of tags from the ELB, if ELB exists - if self.elb: - current_tags = self.elb_conn.get_list('DescribeTags', params, - [('member', Tag)]) - tagdict = dict((tag.Key, tag.Value) for tag in current_tags - if hasattr(tag, 'Key')) - - # Add missing tags - dictact = dict(set(self.tags.items()) - set(tagdict.items())) - if dictact: - for i, key in enumerate(dictact): - params['Tags.member.%d.Key' % (i + 1)] = key - params['Tags.member.%d.Value' % (i + 1)] = dictact[key] - - self.elb_conn.make_request('AddTags', params) - self.changed = True - - # Remove extra tags - dictact = dict(set(tagdict.items()) - set(self.tags.items())) - if dictact: - for i, key in enumerate(dictact): - params['Tags.member.%d.Key' % (i + 1)] = key - - self.elb_conn.make_request('RemoveTags', params) - self.changed = True - - def _get_health_check_target(self): - """Compose target string from healthcheck parameters""" - protocol = self.health_check['ping_protocol'].upper() - path = "" - - if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: - path = self.health_check['ping_path'] - - return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) - - -def main(): - argument_spec = dict( - state={'required': True, 'choices': ['present', 'absent']}, - name={'required': True}, - listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'}, - purge_listeners={'default': True, 'required': False, 'type': 'bool'}, - instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - purge_instance_ids={'default': False, 'required': False, 'type': 'bool'}, - zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - purge_zones={'default': False, 'required': False, 'type': 'bool'}, - security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - health_check={'default': None, 'required': False, 'type': 'dict'}, - subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - purge_subnets={'default': False, 'required': False, 'type': 'bool'}, - scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']}, - connection_draining_timeout={'default': None, 'required': False, 'type': 'int'}, - idle_timeout={'default': None, 'type': 'int', 'required': False}, - cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False}, - stickiness={'default': None, 'required': False, 'type': 'dict'}, - access_logs={'default': None, 'required': False, 'type': 'dict'}, - wait={'default': False, 'type': 'bool', 'required': False}, - wait_timeout={'default': 60, 'type': 'int', 'required': False}, - tags={'default': None, 'required': False, 'type': 'dict'}, - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[['security_group_ids', 'security_group_names']], - check_boto3=False, - ) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - name = module.params['name'] - state = module.params['state'] - listeners = module.params['listeners'] - purge_listeners = module.params['purge_listeners'] - instance_ids = module.params['instance_ids'] - purge_instance_ids = module.params['purge_instance_ids'] - zones = module.params['zones'] - purge_zones = module.params['purge_zones'] - security_group_ids = module.params['security_group_ids'] - security_group_names = module.params['security_group_names'] - health_check = module.params['health_check'] - access_logs = module.params['access_logs'] - subnets = module.params['subnets'] - purge_subnets = module.params['purge_subnets'] - scheme = module.params['scheme'] - connection_draining_timeout = module.params['connection_draining_timeout'] - idle_timeout = module.params['idle_timeout'] - cross_az_load_balancing = module.params['cross_az_load_balancing'] - stickiness = module.params['stickiness'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] - tags = module.params['tags'] - - if state == 'present' and not listeners: - module.fail_json(msg="At least one listener is required for ELB creation") - - if state == 'present' and not (zones or subnets): - module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") - - if wait_timeout > 600: - module.fail_json(msg='wait_timeout maximum is 600 seconds') - - if security_group_names: - security_group_ids = [] - try: - ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) - if subnets: # We have at least one subnet, ergo this is a VPC - vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params) - vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id - filters = {'vpc_id': vpc_id} - else: - filters = None - grp_details = ec2.get_all_security_groups(filters=filters) - - for group_name in security_group_names: - if isinstance(group_name, string_types): - group_name = [group_name] - - group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] - security_group_ids.extend(group_id) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=str(e)) - - elb_man = ElbManager(module, name, listeners, purge_listeners, zones, - purge_zones, security_group_ids, health_check, - subnets, purge_subnets, scheme, - connection_draining_timeout, idle_timeout, - cross_az_load_balancing, - access_logs, stickiness, wait, wait_timeout, tags, - region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids, - **aws_connect_params) - - # check for unsupported attributes for this version of boto - if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): - module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") - - if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): - module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") - - if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): - module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") - - if state == 'present': - elb_man.ensure_ok() - elif state == 'absent': - elb_man.ensure_gone() - - ansible_facts = {'ec2_elb': 'info'} - ec2_facts_result = dict(changed=elb_man.changed, - elb=elb_man.get_info(), - ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - - -if __name__ == '__main__': - main() From a8dd56c4b98fd12d75d3667db0dc50113f463ab5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 8 Jun 2021 12:48:00 +0200 Subject: [PATCH 214/683] elb_instance docs cleanup --- elb_instance.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/elb_instance.py b/elb_instance.py index d522d3e72ad..7e15354fd0e 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -21,7 +21,7 @@ options: state: description: - - register or deregister the instance + - Register or deregister the instance. required: true choices: ['present', 'absent'] type: str @@ -32,13 +32,15 @@ type: str ec2_elbs: description: - - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. + - List of ELB names, required for registration. + - The ec2_elbs fact should be used if there was a previous de-register. type: list elements: str enable_availability_zone: description: - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. + been enabled. + - If I(enable_availability_zone=no), the task will fail if the availability zone is not enabled on the ELB. type: bool default: 'yes' wait: @@ -46,15 +48,12 @@ - Wait for instance registration or deregistration to complete successfully before returning. type: bool default: 'yes' - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - type: bool - default: 'yes' wait_timeout: description: - - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. - If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. + - Number of seconds to wait for an instance to change state. + - If I(wait_timeout=0) then this module may return an error if a transient error occurs. + - If non-zero then any transient errors are ignored until the timeout is reached. + - Ignored when I(wait=no). default: 0 type: int extends_documentation_fragment: From 2fc82442eeb36d22085d9c451d9449c6021477b6 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 8 Jun 2021 12:49:56 +0200 Subject: [PATCH 215/683] Ensure https://github.com/ansible/ansible/pull/31660 is applied --- elb_instance.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/elb_instance.py b/elb_instance.py index 7e15354fd0e..5759b0b2ccc 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -123,6 +123,10 @@ def deregister(self, wait, timeout): # balancer. Ignore it and try the next one. continue + # The instance is not associated with any load balancer so nothing to do + if not self._get_instance_lbs(): + return + lb.deregister_instances([self.instance_id]) # The ELB is changing state in some way. Either an instance that's From c63fbf24b3a27431c6ae37550e8f3ee999eff843 Mon Sep 17 00:00:00 2001 From: Mike Svendsen Date: Wed, 2 Jun 2021 21:16:28 -0500 Subject: [PATCH 216/683] Fix queue attribute comparison --- sqs_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqs_queue.py b/sqs_queue.py index d32732c8fc8..0de9d205b35 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -375,7 +375,7 @@ def update_sqs_queue(module, client, queue_url): new_value = str(new_value).lower() value = str(value).lower() - if new_value == value: + if str(new_value) == str(value): continue # Boto3 expects strings From 03e264ca4f0bdc898111dbbe9c9144835fbd3ca3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 9 Jul 2021 09:59:33 +0200 Subject: [PATCH 217/683] Remove unused imports --- ec2_vpc_endpoint.py | 1 - ec2_vpc_endpoint_info.py | 2 -- ec2_vpc_peering_info.py | 2 -- kinesis_stream.py | 5 ----- wafv2_ip_set.py | 3 ++- wafv2_ip_set_info.py | 3 ++- wafv2_resources.py | 5 +++-- wafv2_resources_info.py | 5 +++-- wafv2_rule_group.py | 3 ++- wafv2_rule_group_info.py | 5 +++-- wafv2_web_acl.py | 3 ++- wafv2_web_acl_info.py | 5 +++-- 12 files changed, 20 insertions(+), 22 deletions(-) diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py index 62424f93a11..75ba2479afe 100644 --- a/ec2_vpc_endpoint.py +++ b/ec2_vpc_endpoint.py @@ -200,7 +200,6 @@ import datetime import json -import time import traceback try: diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py index fabeb46afe4..f84434cb9af 100644 --- a/ec2_vpc_endpoint_info.py +++ b/ec2_vpc_endpoint_info.py @@ -109,8 +109,6 @@ vpc_id: "vpc-1111ffff" ''' -import json - try: import botocore except ImportError: diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 6b810a25099..048747abcd8 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -200,8 +200,6 @@ type: list ''' -import json - try: import botocore except ImportError: diff --git a/kinesis_stream.py b/kinesis_stream.py index 0f28856bfa9..f085740e318 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -180,10 +180,7 @@ } ''' -import re -import datetime import time -from functools import reduce try: import botocore.exceptions @@ -192,10 +189,8 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index 179b065cf4d..c70a0409ed1 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -116,7 +116,8 @@ returned: Always, as long as the ip set exists type: str """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list try: diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index 251fc0b5483..3a9d1f331a8 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -69,7 +69,8 @@ returned: Always, as long as the ip set exists type: str """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/wafv2_resources.py b/wafv2_resources.py index 70dffaa5f0d..11349bbd2f3 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -59,8 +59,9 @@ returned: Always, as long as the wafv2 exists type: list """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls try: diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 5add2169cf9..913474fbeec 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -48,8 +48,9 @@ returned: Always, as long as the wafv2 exists type: list """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls try: diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 474897f8ca9..4587a0e0959 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -200,7 +200,8 @@ metric_name: blub sampled_requests_enabled: False """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups, compare_priority_rules, wafv2_snake_dict_to_camel_dict diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index ce50b2983f4..e43957018a1 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -93,8 +93,9 @@ metric_name: blub sampled_requests_enabled: False """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups try: diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 63b7ca38fcb..4b65e329a94 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -190,7 +190,8 @@ metric_name: blub sampled_requests_enabled: false """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls, compare_priority_rules, wafv2_snake_dict_to_camel_dict diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 0e6a5d4f4ba..15674cd0aa6 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -88,8 +88,9 @@ metric_name: blub sampled_requests_enabled: false """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls try: From f48699211d3f00ff76e64ce19164940e0b7e1623 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 9 Jul 2021 10:19:16 +0200 Subject: [PATCH 218/683] Split imports to make merge conflicts easier to resolve --- kinesis_stream.py | 2 +- wafv2_ip_set.py | 7 ++++--- wafv2_rule_group.py | 12 ++++++++---- wafv2_web_acl.py | 12 ++++++++---- 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/kinesis_stream.py b/kinesis_stream.py index f085740e318..f3ff171b421 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -183,7 +183,7 @@ import time try: - import botocore.exceptions + import botocore except ImportError: pass # Handled by AnsibleAWSModule diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index c70a0409ed1..1efaf31f77a 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -117,14 +117,15 @@ type: str """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + class IpSet: def __init__(self, wafv2, name, scope, fail_json_aws): diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 4587a0e0959..179ac2e85f2 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -201,15 +201,19 @@ sampled_requests_enabled: False """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups, compare_priority_rules, wafv2_snake_dict_to_camel_dict - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict + class RuleGroup: def __init__(self, wafv2, name, scope, fail_json_aws): diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 4b65e329a94..1476b1d48d0 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -191,15 +191,19 @@ sampled_requests_enabled: false """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls, compare_priority_rules, wafv2_snake_dict_to_camel_dict - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict + class WebACL: def __init__(self, wafv2, name, scope, fail_json_aws): From 1af39c457e4f5b59365165768bb7624d395fdf28 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 9 Jul 2021 10:22:26 +0200 Subject: [PATCH 219/683] Reorder imports based on PEP8 import recommendations --- ec2_vpc_peering_info.py | 2 +- wafv2_ip_set_info.py | 6 +++--- wafv2_resources.py | 8 ++++---- wafv2_resources_info.py | 8 ++++---- wafv2_rule_group_info.py | 8 ++++---- wafv2_web_acl_info.py | 8 ++++---- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 048747abcd8..f43d1378aa8 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -208,8 +208,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index 3a9d1f331a8..0c2bf3f0e4e 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -70,14 +70,14 @@ type: str """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): # there is currently no paginator for wafv2 diff --git a/wafv2_resources.py b/wafv2_resources.py index 11349bbd2f3..bbed06a0499 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -60,15 +60,15 @@ type: list """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls + def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 913474fbeec..6ab7aa04ca1 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -49,15 +49,15 @@ type: list """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls + def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index e43957018a1..47d1e68cc55 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -94,15 +94,15 @@ sampled_requests_enabled: False """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups + def get_rule_group(wafv2, name, scope, id, fail_json_aws): try: diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 15674cd0aa6..54545c10acc 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -89,15 +89,15 @@ sampled_requests_enabled: false """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls - try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls + def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: From 597b4ae2f6ab52cb121420b93646e053ae1ed57d Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Fri, 11 Jun 2021 17:18:06 +0200 Subject: [PATCH 220/683] Allow specifying topic type for SNS module. Add changelog fragment and extended integration test for sns_topic module --- sns_topic.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/sns_topic.py b/sns_topic.py index 10d98e3034d..dd5af417bab 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -24,6 +24,13 @@ - The name or ARN of the SNS topic to manage. required: true type: str + topic_type: + description: + - The type of topic that should be created. Either Standard for FIFO (first-in, first-out) + choices: ['standard', 'fifo'] + default: 'standard' + type: str + version_added: 2.0.0 state: description: - Whether to create or destroy an SNS topic. @@ -228,6 +235,7 @@ class SnsTopicManager(object): def __init__(self, module, name, + topic_type, state, display_name, policy, @@ -239,6 +247,7 @@ def __init__(self, self.connection = module.client('sns') self.module = module self.name = name + self.topic_type = topic_type self.state = state self.display_name = display_name self.policy = policy @@ -285,9 +294,17 @@ def _topic_arn_lookup(self): return topic def _create_topic(self): + attributes = {'FifoTopic': 'false'} + tags = [] + + if self.topic_type == 'fifo': + attributes['FifoTopic'] = 'true' + if not self.name.endswith('.fifo'): + self.name = self.name + '.fifo' + if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name) + response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) self.topic_arn = response['TopicArn'] @@ -456,6 +473,7 @@ def ensure_gone(self): def get_info(self): info = { 'name': self.name, + 'topic_type': self.topic_type, 'state': self.state, 'subscriptions_new': self.subscriptions, 'subscriptions_existing': self.subscriptions_existing, @@ -479,6 +497,7 @@ def get_info(self): def main(): argument_spec = dict( name=dict(required=True), + topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']), state=dict(default='present', choices=['present', 'absent']), display_name=dict(), policy=dict(type='dict'), @@ -491,6 +510,7 @@ def main(): supports_check_mode=True) name = module.params.get('name') + topic_type = module.params.get('topic_type') state = module.params.get('state') display_name = module.params.get('display_name') policy = module.params.get('policy') @@ -501,6 +521,7 @@ def main(): sns_topic = SnsTopicManager(module, name, + topic_type, state, display_name, policy, From be0c05cf9aca25f426b99037ed1e579e3cb07b19 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 30 Jul 2021 10:34:01 +0200 Subject: [PATCH 221/683] Formally start the rds deprecation process --- rds.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/rds.py b/rds.py index 817f53ff779..a59b183925b 100644 --- a/rds.py +++ b/rds.py @@ -10,13 +10,16 @@ --- module: rds version_added: 1.0.0 +deprecated: + removed_in: 3.0.0 + why: The rds module is based upon a deprecated version of the AWS SDK. + alternative: Use M(rds_instance), M(rds_instance_info), and M(rds_snapshot). short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts description: - Creates, deletes, or modifies rds resources. - When creating an instance it can be either a new instance or a read-only replica of an existing instance. - - This module has a dependency on python-boto >= 2.5 and will soon be deprecated. - The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0). - - Please use boto3 based M(community.aws.rds_instance) instead. + - Please use the boto3 based M(community.aws.rds_instance) instead. options: command: description: @@ -1354,6 +1357,9 @@ def main(): check_boto3=False, ) + module.deprecate("The 'rds' module has been deprecated and replaced by the 'rds_instance' module'", + version='3.0.0', collection_name='community.aws') + if not HAS_BOTO: module.fail_json(msg='boto required for this module') From 142c9135593d5e1597e43008219a01c6b0bb1ff0 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 30 Jul 2021 10:51:06 +0200 Subject: [PATCH 222/683] Formally start the iam deprecation process --- iam.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/iam.py b/iam.py index 67c55a277c5..2aacd151cd1 100644 --- a/iam.py +++ b/iam.py @@ -10,6 +10,11 @@ --- module: iam version_added: 1.0.0 +deprecated: + removed_in: 3.0.0 + why: The iam module is based upon a deprecated version of the AWS SDK. + alternative: Use M(iam_user), M(iam_group), M(iam_role), M(iam_policy) and M(iam_managed_policy) modules. + short_description: Manage IAM users, groups, roles and keys description: - Allows for the management of IAM users, user API keys, groups, roles. @@ -644,6 +649,9 @@ def main(): check_boto3=False, ) + module.deprecate("The 'iam' module has been deprecated and replaced by the 'iam_user', 'iam_group'" + " and 'iam_role' modules'", version='3.0.0', collection_name='community.aws') + if not HAS_BOTO: module.fail_json(msg='This module requires boto, please install it') From d2089adc82c28cc2118e23642bf7cd7466537886 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 28 Jul 2021 08:47:51 +0200 Subject: [PATCH 223/683] Update remaining _info modules so that they run in check_mode https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst#development-conventions https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html#following-ansible-conventions https://github.com/ansible/ansible/pull/75324 fixes: https://github.com/ansible-collections/community.aws/issues/659 --- aws_sgw_info.py | 6 +++++- ec2_asg_info.py | 7 ++++++- ec2_lc_info.py | 6 +++++- iam_mfa_device_info.py | 5 ++++- iam_server_certificate_info.py | 6 +++++- wafv2_resources_info.py | 3 ++- wafv2_web_acl_info.py | 3 ++- 7 files changed, 29 insertions(+), 7 deletions(-) diff --git a/aws_sgw_info.py b/aws_sgw_info.py index fac2e346095..37caabf3fd9 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -343,7 +343,11 @@ def main(): gather_volumes=dict(type='bool', default=True) ) - module = AnsibleAWSModule(argument_spec=argument_spec) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if module._name == 'aws_sgw_facts': module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", date='2021-12-01', collection_name='community.aws') client = module.client('storagegateway') diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 0a6cb27d9b0..2b8cf4bc90c 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -438,7 +438,12 @@ def main(): name=dict(type='str'), tags=dict(type='dict'), ) - module = AnsibleAWSModule(argument_spec=argument_spec) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if module._name == 'ec2_asg_facts': module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", date='2021-12-01', collection_name='community.aws') diff --git a/ec2_lc_info.py b/ec2_lc_info.py index d3b81deaa75..ea3832e1234 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -207,7 +207,11 @@ def main(): sort_end=dict(required=False, type='int'), ) - module = AnsibleAWSModule(argument_spec=argument_spec) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if module._name == 'ec2_lc_facts': module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws') diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index b04b912549c..78cfe8249d0 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -87,7 +87,10 @@ def main(): user_name=dict(required=False, default=None), ) - module = AnsibleAWSModule(argument_spec=argument_spec) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) if module._name == 'iam_mfa_device_facts': module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws') diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 622e2ee8e86..a37c9e88c83 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -142,7 +142,11 @@ def main(): name=dict(type='str'), ) - module = AnsibleAWSModule(argument_spec=argument_spec,) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if module._name == 'iam_server_certificate_facts': module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", date='2021-12-01', collection_name='community.aws') diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 6ab7aa04ca1..d45c274d481 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -93,7 +93,8 @@ def main(): ) module = AnsibleAWSModule( - argument_spec=arg_spec + argument_spec=arg_spec, + supports_check_mode=True, ) name = module.params.get("name") diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 54545c10acc..a0de1131cf6 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -119,7 +119,8 @@ def main(): ) module = AnsibleAWSModule( - argument_spec=arg_spec + argument_spec=arg_spec, + supports_check_mode=True, ) state = module.params.get("state") From ee1777723dcfeabd1c6f07c06c90c2a8574549b9 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 6 Aug 2021 18:09:30 +0200 Subject: [PATCH 224/683] Add supports_check_mode to cloudformation_exports_info cloudfront_info --- cloudformation_exports_info.py | 2 +- cloudfront_info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index e9ef34a20e7..dc8caae55a4 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -67,7 +67,7 @@ def main(): original_message='' ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) cloudformation_client = module.client('cloudformation') try: diff --git a/cloudfront_info.py b/cloudfront_info.py index e5cf39ebb4b..767557bf6e9 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -553,7 +553,7 @@ def main(): summary=dict(required=False, default=False, type='bool'), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) is_old_facts = module._name == 'cloudfront_facts' if is_old_facts: module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', " From 068c36559e44d47cfcdb4f51a3c76327c5c5dde6 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 19 Jul 2021 10:08:23 +0200 Subject: [PATCH 225/683] Add keys_attr parameter to aws_kms_info --- aws_kms_info.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index 879cf317497..d517ac4abd5 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -46,6 +46,17 @@ description: Whether to get full details (tags, grants etc.) of keys pending deletion default: False type: bool + keys_attr: + description: + - Whether to return the results in the C(keys) attribute as well as the + C(kms_keys) attribute. + - Returning the C(keys) attribute conflicts with the builtin keys() + method on dictionaries and as such has been deprecated. + - After version C(3.0.0) this parameter will do nothing, and after + version C(4.0.0) this parameter will be removed. + type: bool + default: True + version_added: 2.0.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -70,7 +81,7 @@ ''' RETURN = ''' -keys: +kms_keys: description: list of keys type: complex returned: always @@ -441,6 +452,7 @@ def main(): key_id=dict(aliases=['key_arn']), filters=dict(type='dict'), pending_deletion=dict(type='bool', default=False), + keys_attr=dict(type='bool', default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -455,7 +467,17 @@ def main(): module.fail_json_aws(e, msg='Failed to connect to AWS') all_keys = get_kms_info(connection, module) - module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])]) + filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] + ret_params = dict(kms_keys=filtered_keys) + + # We originally returned "keys" + if module.params['keys_attr']: + module.deprecate("Returning results in the 'keys' attribute conflicts with the builtin keys() method on " + "dicts and as such is deprecated. Please use the kms_keys attribute. This warning can be " + "silenced by setting keys_attr to False.", + version='3.0.0', collection_name='community.aws') + ret_params.update(dict(keys=filtered_keys)) + module.exit_json(**ret_params) if __name__ == '__main__': From 284d71754f5600fee2d945035c3c7303fa963e48 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 6 Aug 2021 10:43:00 +0200 Subject: [PATCH 226/683] Remove code testing for unsupported versions of boto3/botocore --- aws_eks_cluster.py | 8 -------- cloudformation_stack_set.py | 2 -- dynamodb_table.py | 2 -- dynamodb_ttl.py | 4 ---- ec2_ami_copy.py | 1 - ec2_asg.py | 16 --------------- ec2_launch_template.py | 3 --- ec2_transit_gateway.py | 3 --- ec2_transit_gateway_info.py | 3 --- ec2_vpc_peer.py | 2 -- ecs_ecr.py | 1 - ecs_service.py | 41 ++++++------------------------------- ecs_task.py | 36 -------------------------------- ecs_taskdefinition.py | 8 -------- efs.py | 21 +++---------------- efs_info.py | 4 ++-- elb_target_group.py | 10 --------- iam_role.py | 10 --------- lambda.py | 6 +----- rds_instance.py | 3 --- sqs_queue.py | 2 +- 21 files changed, 13 insertions(+), 173 deletions(-) diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 3d8f2696d5f..64627377c41 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -281,14 +281,6 @@ def main(): supports_check_mode=True, ) - if not module.botocore_at_least("1.10.32"): - module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32') - - if (not module.botocore_at_least("1.12.38") and - module.params.get('state') == 'absent' and - module.params.get('wait')): - module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38') - client = module.client('eks') if module.params.get('state') == 'present': diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 72b6aa05bef..b10addf7485 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -529,8 +529,6 @@ def main(): mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True ) - if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')): - module.fail_json(msg="Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26") # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes diff --git a/dynamodb_table.py b/dynamodb_table.py index db9710f12d6..b23c443cac9 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -494,8 +494,6 @@ def main(): if module.params.get('tags'): try: boto3_dynamodb = module.client('dynamodb') - if not hasattr(boto3_dynamodb, 'tag_resource'): - module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version') boto3_sts = module.client('sts') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 490a948f9a9..2bdd9a21d45 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -120,10 +120,6 @@ def main(): argument_spec=argument_spec, ) - if not module.botocore_at_least('1.5.24'): - # TTL was added in 1.5.24 - module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24')) - try: dbclient = module.client('dynamodb') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 15acfe4e4a9..e5628b00034 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -212,7 +212,6 @@ def main(): tag_equality=dict(type='bool', default=False)) module = AnsibleAWSModule(argument_spec=argument_spec) - # TODO: Check botocore version ec2 = module.client('ec2') copy_image(module, ec2) diff --git a/ec2_asg.py b/ec2_asg.py index 59e74040d64..662c23873b1 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -1825,22 +1825,6 @@ def main(): ] ) - if ( - module.params.get('max_instance_lifetime') is not None - and not module.botocore_at_least('1.13.21') - ): - module.fail_json( - msg='Botocore needs to be version 1.13.21 or higher to use max_instance_lifetime.' - ) - - if ( - module.params.get('mixed_instances_policy') is not None - and not module.botocore_at_least('1.12.45') - ): - module.fail_json( - msg='Botocore needs to be version 1.12.45 or higher to use mixed_instances_policy.' - ) - state = module.params.get('state') replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') diff --git a/ec2_launch_template.py b/ec2_launch_template.py index cebae8b2fec..e96049fa347 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -719,9 +719,6 @@ def main(): supports_check_mode=True ) - if not module.boto3_at_least('1.6.0'): - module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0") - for interface in (module.params.get('network_interfaces') or []): if interface.get('ipv6_addresses'): interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']] diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 8435491388a..c013ea67379 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -245,9 +245,6 @@ def __init__(self, module, results): self._connection = self._module.client('ec2') self._check_mode = self._module.check_mode - if not hasattr(self._connection, 'describe_transit_gateways'): - self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52') - def process(self): """ Process the request based on state parameter . state = present will search for an existing tgw based and return the object data. diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index c23289eaa1c..024aa5dcec9 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -183,9 +183,6 @@ def __init__(self, module, results): self._connection = self._module.client('ec2') self._check_mode = self._module.check_mode - if not hasattr(self._connection, 'describe_transit_gateways'): - self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52') - @AWSRetry.exponential_backoff() def describe_transit_gateways(self): """ diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index c45a003903c..b651b173ce4 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -423,8 +423,6 @@ def create_peer_connection(client, module): params['VpcId'] = module.params.get('vpc_id') params['PeerVpcId'] = module.params.get('peer_vpc_id') if module.params.get('peer_region'): - if not module.botocore_at_least('1.8.6'): - module.fail_json(msg="specifying peer_region parameter requires botocore >= 1.8.6") params['PeerRegion'] = module.params.get('peer_region') if module.params.get('peer_owner_id'): params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) diff --git a/ecs_ecr.py b/ecs_ecr.py index a20262956da..2b22147212b 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -76,7 +76,6 @@ scan_on_push: description: - if C(true), images are scanned for known vulnerabilities after being pushed to the repository. - - I(scan_on_push) requires botocore >= 1.13.3 required: false default: false type: bool diff --git a/ecs_service.py b/ecs_service.py index 89cfaf486aa..590276e0ab1 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -130,7 +130,6 @@ network_configuration: description: - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). - - I(assign_public_ip) requires botocore >= 1.8.4 type: dict suboptions: subnets: @@ -146,7 +145,6 @@ assign_public_ip: description: - Whether the task's elastic network interface receives a public IP address. - - This option requires botocore >= 1.8.4. type: bool launch_type: description: @@ -164,7 +162,6 @@ health_check_grace_period_seconds: description: - Seconds to wait before health checking the freshly added/updated services. - - This option requires botocore >= 1.8.20. required: false type: int service_registries: @@ -516,13 +513,10 @@ def format_network_configuration(self, network_config): self.module.fail_json_aws(e, msg="Couldn't look up security groups") result['securityGroups'] = groups if network_config['assign_public_ip'] is not None: - if self.module.botocore_at_least('1.8.4'): - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" - else: - result['assignPublicIp'] = "DISABLED" + if network_config['assign_public_ip'] is True: + result['assignPublicIp'] = "ENABLED" else: - self.module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration') + result['assignPublicIp'] = "DISABLED" return dict(awsvpcConfiguration=result) def find_in_array(self, array_of_services, service_name, field_name='serviceArn'): @@ -640,16 +634,9 @@ def jsonize(self, service): def delete_service(self, service, cluster=None): return self.ecs.delete_service(cluster=cluster, service=service) - def ecs_api_handles_network_configuration(self): - # There doesn't seem to be a nice way to inspect botocore to look - # for attributes (and networkConfiguration is not an explicit argument - # to e.g. ecs.run_task, it's just passed as a keyword argument) - return self.module.botocore_at_least('1.7.44') - def health_check_setable(self, params): load_balancers = params.get('loadBalancers', []) - # check if botocore (and thus boto3) is new enough for using the healthCheckGracePeriodSeconds parameter - return len(load_balancers) > 0 and self.module.botocore_at_least('1.8.20') + return len(load_balancers) > 0 def main(): @@ -710,8 +697,6 @@ def main(): service_mgr = EcsServiceManager(module) if module.params['network_configuration']: - if not service_mgr.ecs_api_handles_network_configuration(): - module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration') network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) else: network_configuration = None @@ -729,16 +714,6 @@ def main(): results = dict(changed=False) - if module.params['launch_type']: - if not module.botocore_at_least('1.8.4'): - module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type') - if module.params['force_new_deployment']: - if not module.botocore_at_least('1.8.4'): - module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use force_new_deployment') - if module.params['health_check_grace_period_seconds']: - if not module.botocore_at_least('1.8.20'): - module.fail_json(msg='botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds') - if module.params['state'] == 'present': matching = False @@ -773,15 +748,11 @@ def main(): # check various parameters and boto versions and give a helpful error in boto is not new enough for feature if module.params['scheduling_strategy']: - if not module.botocore_at_least('1.10.37'): - module.fail_json(msg='botocore needs to be version 1.10.37 or higher to use scheduling_strategy') - elif (existing['schedulingStrategy']) != module.params['scheduling_strategy']: + if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service") if module.params['service_registries']: - if not module.botocore_at_least('1.9.15'): - module.fail_json(msg='botocore needs to be version 1.9.15 or higher to use service_registries') - elif (existing['serviceRegistries'] or []) != serviceRegistries: + if (existing['serviceRegistries'] or []) != serviceRegistries: module.fail_json(msg="It is not possible to update the service registries of an existing service") if (existing['loadBalancers'] or []) != loadBalancers: diff --git a/ecs_task.py b/ecs_task.py index 411121372cf..6bcc3c5d850 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -63,7 +63,6 @@ network_configuration: description: - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). - - I(assign_public_ip) requires botocore >= 1.8.4 type: dict suboptions: assign_public_ip: @@ -334,34 +333,10 @@ def stop_task(self, cluster, task): response = self.ecs.stop_task(cluster=cluster, task=task) return response['task'] - def ecs_api_handles_launch_type(self): - # There doesn't seem to be a nice way to inspect botocore to look - # for attributes (and networkConfiguration is not an explicit argument - # to e.g. ecs.run_task, it's just passed as a keyword argument) - return self.module.botocore_at_least('1.8.4') - def ecs_task_long_format_enabled(self): account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True) return account_support['settings'][0]['value'] == 'enabled' - def ecs_api_handles_tags(self): - # There doesn't seem to be a nice way to inspect botocore to look - # for attributes (and networkConfiguration is not an explicit argument - # to e.g. ecs.run_task, it's just passed as a keyword argument) - return self.module.botocore_at_least('1.12.46') - - def ecs_api_handles_network_configuration(self): - # There doesn't seem to be a nice way to inspect botocore to look - # for attributes (and networkConfiguration is not an explicit argument - # to e.g. ecs.run_task, it's just passed as a keyword argument) - return self.module.botocore_at_least('1.7.44') - - def ecs_api_handles_network_configuration_assignIp(self): - # There doesn't seem to be a nice way to inspect botocore to look - # for attributes (and networkConfiguration is not an explicit argument - # to e.g. ecs.run_task, it's just passed as a keyword argument) - return self.module.botocore_at_least('1.8.4') - def main(): argument_spec = dict( @@ -404,18 +379,7 @@ def main(): service_mgr = EcsExecManager(module) - if module.params['network_configuration']: - if 'assignPublicIp' in module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration_assignIp(): - module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration') - elif not service_mgr.ecs_api_handles_network_configuration(): - module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration') - - if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type(): - module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type') - if module.params['tags']: - if not service_mgr.ecs_api_handles_tags(): - module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags")) if not service_mgr.ecs_task_long_format_enabled(): module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags") diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 6696e92acb3..8c8a2960ac2 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -770,14 +770,6 @@ def main(): task_mgr = EcsTaskManager(module) results = dict(changed=False) - if module.params['launch_type']: - if not module.botocore_at_least('1.8.4'): - module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type') - - if module.params['execution_role_arn']: - if not module.botocore_at_least('1.10.44'): - module.fail_json(msg='botocore needs to be version 1.10.44 or higher to use execution_role_arn') - if module.params['state'] == 'present': if 'containers' not in module.params or not module.params['containers']: module.fail_json(msg="To use task definitions, a list of containers must be specified") diff --git a/efs.py b/efs.py index 49fbd73c9a3..0cf4f88c1d7 100644 --- a/efs.py +++ b/efs.py @@ -79,13 +79,11 @@ throughput_mode: description: - The throughput_mode for the file system to be created. - - Requires botocore >= 1.10.57 choices: ['bursting', 'provisioned'] type: str provisioned_throughput_in_mibps: description: - If the throughput_mode is provisioned, select the amount of throughput to provisioned in Mibps. - - Requires botocore >= 1.10.57 type: float wait: description: @@ -370,12 +368,6 @@ def get_mount_targets_in_state(self, file_system_id, states=None): return list(targets) - def supports_provisioned_mode(self): - """ - Ensure boto3 includes provisioned throughput mode feature - """ - return hasattr(self.connection, 'update_file_system') - def get_throughput_mode(self, **kwargs): """ Returns throughput mode for selected EFS instance @@ -413,15 +405,9 @@ def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throug if kms_key_id is not None: params['KmsKeyId'] = kms_key_id if throughput_mode: - if self.supports_provisioned_mode(): - params['ThroughputMode'] = throughput_mode - else: - self.module.fail_json(msg="throughput_mode parameter requires botocore >= 1.10.57") + params['ThroughputMode'] = throughput_mode if provisioned_throughput_in_mibps: - if self.supports_provisioned_mode(): - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps - else: - self.module.fail_json(msg="provisioned_throughput_in_mibps parameter requires botocore >= 1.10.57") + params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps if state in [self.STATE_DELETING, self.STATE_DELETED]: wait_for( @@ -731,8 +717,7 @@ def main(): module.fail_json(msg='Name parameter is required for create') changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps) - if connection.supports_provisioned_mode(): - changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed + changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets, throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed result = first_or_default(connection.get_file_systems(CreationToken=name)) diff --git a/efs_info.py b/efs_info.py index 2384af97ee1..9a6ce1786fc 100644 --- a/efs_info.py +++ b/efs_info.py @@ -148,12 +148,12 @@ sample: "generalPurpose" throughput_mode: description: mode of throughput for the file system - returned: when botocore >= 1.10.57 + returned: always type: str sample: "bursting" provisioned_throughput_in_mibps: description: throughput provisioned in Mibps - returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned" + returned: when throughput_mode is set to "provisioned" type: float sample: 15.0 tags: diff --git a/elb_target_group.py b/elb_target_group.py index 7bb105b6d55..722d7afa013 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -454,12 +454,6 @@ def wait_for_status(connection, module, target_group_arn, targets, status): return status_achieved, result -def fail_if_ip_target_type_not_supported(module): - if not module.botocore_at_least('1.7.2'): - module.fail_json(msg="target_type ip requires botocore version 1.7.2 or later. Version %s is installed" % - botocore.__version__) - - def create_or_update_target_group(connection, module): changed = False @@ -519,10 +513,6 @@ def create_or_update_target_group(connection, module): params['Matcher'] = {} params['Matcher']['HttpCode'] = module.params.get("successful_response_codes") - # Get target type - if target_type == 'ip': - fail_if_ip_target_type_not_supported(module) - # Get target group tg = get_target_group(connection, module) diff --git a/iam_role.py b/iam_role.py index 45551cdf188..e696d9d7417 100644 --- a/iam_role.py +++ b/iam_role.py @@ -34,7 +34,6 @@ - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false). - This is intended for roles/users that have permissions to create new IAM objects. - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). - - Requires botocore 1.10.57 or above. aliases: [boundary_policy_arn] type: str assume_role_policy_document: @@ -82,7 +81,6 @@ tags: description: - Tag dict to apply to the queue. - - Requires botocore 1.12.46 or above. type: dict purge_tags: description: @@ -566,8 +564,6 @@ def get_attached_policy_list(connection, module, name): def get_role_tags(connection, module): role_name = module.params.get('name') - if not hasattr(connection, 'list_role_tags'): - return {} try: return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -633,12 +629,6 @@ def main(): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") if not module.params.get('boundary').startswith('arn:aws:iam'): module.fail_json(msg="Boundary policy must be an ARN") - if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'): - module.fail_json(msg="When managing tags botocore must be at least v1.12.46. " - "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions())) - if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'): - module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. " - "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions())) if module.params.get('max_session_duration'): max_session_duration = module.params.get('max_session_duration') if max_session_duration < 3600 or max_session_duration > 43200: diff --git a/lambda.py b/lambda.py index 0a25214ca37..1605d6497db 100644 --- a/lambda.py +++ b/lambda.py @@ -105,7 +105,7 @@ type: str tags: description: - - tag dict to apply to the function (requires botocore 1.5.40 or above). + - Tag dict to apply to the function. type: dict author: - 'Steyn Huizinga (@steynovich)' @@ -384,10 +384,6 @@ def main(): except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Trying to connect to AWS") - if tags is not None: - if not hasattr(client, "list_tags"): - module.fail_json(msg="Using tags requires botocore 1.5.40 or above") - if state == 'present': if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): role_arn = role diff --git a/rds_instance.py b/rds_instance.py index ea6da26f0b6..6e894005ab9 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -1198,9 +1198,6 @@ def main(): supports_check_mode=True ) - if not module.boto3_at_least('1.5.0'): - module.fail_json(msg="rds_instance requires boto3 > 1.5.0") - # Sanitize instance identifiers module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() if module.params['new_db_instance_identifier']: diff --git a/sqs_queue.py b/sqs_queue.py index 0de9d205b35..45a8ccfc079 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -86,7 +86,7 @@ - Defaults to C(false). tags: description: - - Tag dict to apply to the queue (requires botocore 1.5.40 or above). + - Tag dict to apply to the queue. - To remove all tags set I(tags={}) and I(purge_tags=true). type: dict purge_tags: From 38c27d13d2ea81114cf5dcde03848aa4034130ec Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 6 Aug 2021 10:45:49 +0200 Subject: [PATCH 227/683] Move MSK dependencies to botocore version rather than boto3 version The relevant method definitions live (as data files) in botocore. boto3 -> botocore version mapping is unreliable. --- aws_msk_cluster.py | 13 ++++++------- aws_msk_config.py | 18 +++++------------- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 7f85c00a59b..41f2dd62e44 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -12,9 +12,6 @@ module: aws_msk_cluster short_description: Manage Amazon MSK clusters. version_added: "2.0.0" -requirements: - - botocore >= 1.17.42 - - boto3 >= 1.17.9 description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) clusters. author: @@ -34,6 +31,7 @@ - The version of Apache Kafka. - This version should exist in given configuration. - This parameter is required when I(state=present). + - Update operation requires botocore version >= 1.16.19. type: str configuration_arn: description: @@ -52,7 +50,7 @@ instance_type: description: - The type of Amazon EC2 instances to use for Kafka brokers. - - Update operation requires boto3 version >= 1.16.58 + - Update operation requires botocore version >= 1.19.58. choices: - kafka.t3.small - kafka.m5.large @@ -522,7 +520,7 @@ def create_or_update_cluster(client, module): } }, "broker_type": { - "boto3_version": "1.16.58", + "botocore_version": "1.19.58", "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], "target_value": module.params.get("instance_type"), "update_params": { @@ -546,6 +544,7 @@ def create_or_update_cluster(client, module): } }, "cluster_kafka_version": { + "botocore_version": "1.16.19", "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], "target_value": module.params.get("version"), "update_params": { @@ -578,8 +577,8 @@ def create_or_update_cluster(client, module): for method, options in msk_cluster_changes.items(): - if 'boto3_version' in options: - if not module.boto3_at_least(options["boto3_version"]): + if 'botocore_version' in options: + if not module.botocore_at_least(options["botocore_version"]): continue try: diff --git a/aws_msk_config.py b/aws_msk_config.py index c02769152a5..2d0d6738b3f 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -13,8 +13,8 @@ short_description: Manage Amazon MSK cluster configurations. version_added: "2.0.0" requirements: - - botocore >= 1.17.42 - - boto3 >= 1.17.9 + - botocore >= 1.17.48 + - boto3 description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations. author: @@ -104,9 +104,6 @@ ) -BOTOCORE_MIN_VERSION = "1.17.42" - - def dict_to_prop(d): """convert dictionary to multi-line properties""" if len(d) == 0: @@ -138,8 +135,6 @@ def get_configurations_with_backoff(client): def find_active_config(client, module): """ looking for configuration by name - status is not returned for list_configurations in botocore 1.17.42 - delete_configuration method was added in botocore 1.17.48 """ name = module.params["name"] @@ -284,12 +279,9 @@ def main(): module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True) - if not module.botocore_at_least(BOTOCORE_MIN_VERSION): - module.fail_json( - msg="aws_msk_config module requires botocore >= {0}".format( - BOTOCORE_MIN_VERSION - ) - ) + # Support for update_configuration and delete_configuration added in 1.17.48 + if not module.botocore_at_least("1.17.48"): + module.fail_json(msg="aws_msk_config module requires botocore >= 1.17.48") client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) From f33dbc485029afc23ea39f4d3b92aa8f0fac8977 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 6 Aug 2021 16:23:11 +0200 Subject: [PATCH 228/683] use require_botocore_at_least for aws_msk_config --- aws_msk_config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index 2d0d6738b3f..6258ae916f6 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -280,8 +280,7 @@ def main(): module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True) # Support for update_configuration and delete_configuration added in 1.17.48 - if not module.botocore_at_least("1.17.48"): - module.fail_json(msg="aws_msk_config module requires botocore >= 1.17.48") + module.require_botocore_at_least('1.17.48') client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) From 1f455a935f0c64db385c4c81964b281b52c587cf Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Sun, 9 May 2021 21:11:47 +0200 Subject: [PATCH 229/683] ecs_taskdefinition: Fix some parameters cast to int Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 8c8a2960ac2..ca65cbe70d8 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -189,7 +189,7 @@ linuxParameters: description: Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. required: False - type: list + type: dict suboptions: capabilities: description: @@ -410,6 +410,8 @@ description: The type of the ulimit. type: str required: False + choices: ['core', 'cpu', 'data', 'fsize', 'locks', 'memlock', 'msgqueue', 'nice', 'nofile', 'nproc', 'rss', + 'rtprio', 'rttime', 'sigpending', 'stack'] softLimit: description: The soft limit for the ulimit type. type: int @@ -667,7 +669,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, # Ensures the number parameters are int as required by boto for container in container_definitions: - for param in ('memory', 'cpu', 'memoryReservation'): + for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'): if param in container: container[param] = int(container[param]) @@ -681,6 +683,23 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as " "container port or not be set") + if 'linuxParameters' in container: + for linux_param in container.get('linuxParameters'): + if linux_param == 'tmpfs': + for tmpfs_param in container['linuxParameters']['tmpfs']: + if 'size' in tmpfs_param: + tmpfs_param['size'] = int(tmpfs_param['size']) + + for param in ('maxSwap', 'swappiness', 'sharedMemorySize'): + if param in linux_param: + container['linuxParameters'][param] = int(container['linuxParameters'][param]) + + if 'ulimits' in container: + for limits_mapping in container['ulimits']: + for limit in ('softLimit', 'hardLimit'): + if limit in limits_mapping: + limits_mapping[limit] = int(limits_mapping[limit]) + validated_containers.append(container) params = dict( @@ -794,21 +813,24 @@ def main(): module.fail_json(msg='The only supported value for environmentFiles is s3.') for linux_param in container.get('linuxParameters', {}): - if linux_param.get('devices') and launch_type == 'FARGATE': + if linux_param == 'maxSwap' and launch_type == 'FARGATE': module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.') - if linux_param.get('maxSwap') and launch_type == 'FARGATE': + if linux_param == 'maxSwap' and launch_type == 'FARGATE': module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') - elif linux_param.get('maxSwap') and linux_param['maxSwap'] < 0: + elif linux_param == 'maxSwap' and container['linuxParameters']['maxSwap'] < 0: module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.') - if linux_param.get('swappiness') and (linux_param['swappiness'] < 0 or linux_param['swappiness'] > 100): + if ( + linux_param == 'swappiness' and + (container['linuxParameters']['swappiness'] < 0 or container['linuxParameters']['swappiness'] > 100) + ): module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.') - if linux_param.get('sharedMemorySize') and launch_type == 'FARGATE': + if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE': module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.') - if linux_param.get('tmpfs') and launch_type == 'FARGATE': + if linux_param == 'tmpfs' and launch_type == 'FARGATE': module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.') if container.get('hostname') and network_mode == 'awsvpc': From eef0a27e581e0379312bcd0385acce1a3cef18c9 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 10 May 2021 12:49:47 +0200 Subject: [PATCH 230/683] Fix idempotence Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index ca65cbe70d8..c9db3c77660 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -13,7 +13,10 @@ short_description: register a task definition in ecs description: - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS). -author: Mark Chance (@Java1Guy) +author: + - Mark Chance (@Java1Guy) + - Alina Buzachis (@alinabuzachis) +requirements: [ json, botocore, boto3 ] options: state: description: @@ -644,9 +647,9 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils._text import to_text from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class EcsTaskManager: @@ -655,13 +658,13 @@ class EcsTaskManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') + self.ecs = module.client('ecs', AWSRetry.jittered_backoff()) def describe_task(self, task_name): try: - response = self.ecs.describe_task_definition(taskDefinition=task_name) + response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name) return response['taskDefinition'] - except botocore.exceptions.ClientError: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: return None def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory): @@ -720,8 +723,8 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, params['executionRoleArn'] = execution_role_arn try: - response = self.ecs.register_task_definition(**params) - except botocore.exceptions.ClientError as e: + response = self.ecs.register_task_definition(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to register task") return response['taskDefinition'] @@ -806,7 +809,7 @@ def main(): module.fail_json(msg='links parameter is not supported if network mode is awsvpc.') for environment in container.get('environment', []): - environment['value'] = to_text(environment['value']) + environment['value'] = environment['value'] for environment_file in container.get('environmentFiles', []): if environment_file['type'] != 's3': @@ -876,14 +879,24 @@ def _right_has_values_of_left(left, right): for list_val in left_list: if list_val not in right_list: - return False + # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp') + # fill in that default if absent and see if it is in right_list then + if isinstance(list_val, dict) and not list_val.get('protocol'): + modified_list_val = dict(list_val) + modified_list_val.update(protocol='tcp') + if modified_list_val in right_list: + continue else: return False # Make sure right doesn't have anything that left doesn't for k, v in right.items(): if v and k not in left: - return False + # 'essential' defaults to True when not specified + if k == 'essential' and v is True: + pass + else: + return False return True From 6526e146629a02b08cf54a5d65ffa3354c7ada76 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 11 May 2021 23:34:50 +0200 Subject: [PATCH 231/683] Add more casts to int Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index c9db3c77660..86319b8bb3c 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -821,12 +821,12 @@ def main(): if linux_param == 'maxSwap' and launch_type == 'FARGATE': module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') - elif linux_param == 'maxSwap' and container['linuxParameters']['maxSwap'] < 0: + elif linux_param == 'maxSwap' and int(container['linuxParameters']['maxSwap']) < 0: module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.') if ( linux_param == 'swappiness' and - (container['linuxParameters']['swappiness'] < 0 or container['linuxParameters']['swappiness'] > 100) + (int(container['linuxParameters']['swappiness']) < 0 or int(container['linuxParameters']['swappiness']) > 100) ): module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.') From 897290c0b66fdde860105c475c059f5562ffbc7a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 12 Aug 2021 09:45:50 +0200 Subject: [PATCH 232/683] ecs_task remove unused import --- ecs_task.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ecs_task.py b/ecs_task.py index 6bcc3c5d850..b2ca36e21de 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -234,7 +234,6 @@ ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible.module_utils.basic import missing_required_lib from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list try: From 132f535299a88b5aab273c60d6b92048efaff8fc Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 12 Aug 2021 09:55:55 +0200 Subject: [PATCH 233/683] Fix pylint test errors "arguments-renamed" --- ec2_vpc_vgw.py | 10 +++++----- ec2_vpc_vpn.py | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 77ed96696bc..b46d0f9ac47 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -131,16 +131,16 @@ def status_code_from_exception(error): return (error.response['Error']['Code'], error.response['Error']['Message'],) @staticmethod - def found(response_codes, catch_extra_error_codes=None): + def found(response_code, catch_extra_error_codes=None): retry_on = ['The maximum number of mutating objects has been reached.'] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) - if not isinstance(response_codes, tuple): - response_codes = (response_codes,) + if not isinstance(response_code, tuple): + response_code = (response_code,) - for code in response_codes: - if super().found(response_codes, catch_extra_error_codes): + for code in response_code: + if super().found(response_code, catch_extra_error_codes): return True return False diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index e69d3f55e82..df060eaa4c8 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -324,16 +324,16 @@ def status_code_from_exception(error): return (error.response['Error']['Code'], error.response['Error']['Message'],) @staticmethod - def found(response_codes, catch_extra_error_codes=None): + def found(response_code, catch_extra_error_codes=None): retry_on = ['The maximum number of mutating objects has been reached.'] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) - if not isinstance(response_codes, tuple): - response_codes = (response_codes,) + if not isinstance(response_code, tuple): + response_code = (response_code,) - for code in response_codes: - if super().found(response_codes, catch_extra_error_codes): + for code in response_code: + if super().found(response_code, catch_extra_error_codes): return True return False From a8ad36b7bb6ad3b65cf7955b92d740f952a7bdaf Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 11 Aug 2021 14:16:25 +0200 Subject: [PATCH 234/683] aws_s3_bucket_info - Add a check for botocore>='1.18.11' when pulling bucket_ownership_controls --- aws_s3_bucket_info.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 06885dfcd13..f5b9c44f04c 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -78,7 +78,9 @@ type: bool default: False bucket_ownership_controls: - description: Retrive S3 ownership controls. + description: + - Retrive S3 ownership controls. + - Access to bucket ownership controls requires botocore>=1.18.11. type: bool default: False bucket_website: @@ -593,6 +595,9 @@ def main(): module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') + if module.params.get("bucket_ownership_controls"): + module.require_botocore_at_least('1.18.11', reason='to retreive bucket ownership controls') + # Get parameters name = module.params.get("name") name_filter = module.params.get("name_filter") From 0fef91f797d60b74f56dc1669cef4dad28136974 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 12 Aug 2021 21:01:04 +0200 Subject: [PATCH 235/683] use a generator rather than list comprehension when using any()/all() See also https://www.python.org/dev/peps/pep-0289/#rationale --- aws_kms_info.py | 2 +- aws_ses_rule_set.py | 2 +- cloudwatchevent_rule.py | 4 ++-- elb_target_group.py | 2 +- iam.py | 4 ++-- iam_group.py | 2 +- iam_role.py | 2 +- iam_user.py | 2 +- rds_instance.py | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index d517ac4abd5..3e606481e15 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -386,7 +386,7 @@ def key_matches_filters(key, filters): if not filters: return True else: - return all([key_matches_filter(key, filtr) for filtr in filters.items()]) + return all(key_matches_filter(key, filtr) for filtr in filters.items()) def get_key_details(connection, module, key_id, tokens=None): diff --git a/aws_ses_rule_set.py b/aws_ses_rule_set.py index 9b0b66cc30f..c87145eab5e 100644 --- a/aws_ses_rule_set.py +++ b/aws_ses_rule_set.py @@ -116,7 +116,7 @@ def list_rule_sets(client, module): def rule_set_in(name, rule_sets): - return any([s for s in rule_sets if s['Name'] == name]) + return any(s for s in rule_sets if s['Name'] == name) def ruleset_active(client, module, name): diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index e7a200dd960..d38db416864 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -388,10 +388,10 @@ def _rule_matches_aws(self): # The rule matches AWS only if all rule data fields are equal # to their corresponding local value defined in the task - return all([ + return all( getattr(self.rule, field) == aws_rule_data.get(field, None) for field in self.RULE_FIELDS - ]) + ) def _targets_to_put(self): """Returns a list of targets that need to be updated or added remotely""" diff --git a/elb_target_group.py b/elb_target_group.py index 722d7afa013..45649e7e651 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -479,7 +479,7 @@ def create_or_update_target_group(connection, module): "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes" ] - health_options = any([module.params[health_option_key] is not None for health_option_key in health_option_keys]) + health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys) # Set health check if anything set if health_options: diff --git a/iam.py b/iam.py index 2aacd151cd1..4dd11aa0672 100644 --- a/iam.py +++ b/iam.py @@ -672,7 +672,7 @@ def main(): if key_state: key_state = key_state.lower() - if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: + if any(n in key_state for n in ['active', 'inactive']) and not key_ids: module.fail_json(changed=False, msg="At least one access key has to be defined in order" " to use 'active' or 'inactive'") @@ -735,7 +735,7 @@ def main(): if iam_type == 'user': been_updated = False user_groups = None - user_exists = any([n in [name, new_name] for n in orig_user_list]) + user_exists = any(n in [name, new_name] for n in orig_user_list) if user_exists: current_path = iam.get_user(name).get_user_result.user['path'] if not new_path and current_path != path: diff --git a/iam_group.py b/iam_group.py index 7b534aa0504..5f85c4bfc8c 100644 --- a/iam_group.py +++ b/iam_group.py @@ -217,7 +217,7 @@ def compare_group_members(current_group_members, new_group_members): def convert_friendly_names_to_arns(connection, module, policy_names): - if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]): + if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): return policy_names allpolicies = {} paginator = connection.get_paginator('list_policies') diff --git a/iam_role.py b/iam_role.py index e696d9d7417..e95ed0afddf 100644 --- a/iam_role.py +++ b/iam_role.py @@ -221,7 +221,7 @@ def _list_policies(connection): def convert_friendly_names_to_arns(connection, module, policy_names): - if not any([not policy.startswith('arn:') for policy in policy_names]): + if not any(not policy.startswith('arn:') for policy in policy_names): return policy_names allpolicies = {} policies = _list_policies(connection) diff --git a/iam_user.py b/iam_user.py index b88953a6868..659eec56354 100644 --- a/iam_user.py +++ b/iam_user.py @@ -137,7 +137,7 @@ def convert_friendly_names_to_arns(connection, module, policy_names): # List comprehension that looks for any policy in the 'policy_names' list # that does not begin with 'arn'. If there aren't any, short circuit. # If there are, translate friendly name to the full arn - if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]): + if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): return policy_names allpolicies = {} paginator = connection.get_paginator('list_policies') diff --git a/rds_instance.py b/rds_instance.py index 6e894005ab9..c1f118db514 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -844,7 +844,7 @@ def get_parameters(client, module, parameters, method_name): parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] required_options = get_boto3_client_method_parameters(client, method_name, required=True) - if any([parameters.get(k) is None for k in required_options]): + if any(parameters.get(k) is None for k in required_options): module.fail_json(msg='To {0} requires the parameters: {1}'.format( get_rds_method_attribute(method_name, module).operation_description, required_options)) options = get_boto3_client_method_parameters(client, method_name) From 63f5100327f147b307285a322bf223ab776bf34c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 11 Aug 2021 12:25:22 +0200 Subject: [PATCH 236/683] aws_secret - fix deletion idempotency when not using instant deletion --- aws_secret.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws_secret.py b/aws_secret.py index 86c6d6e3521..dfe1013194d 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -367,6 +367,8 @@ def main(): elif current_secret.get("DeletedDate") and recovery_window == 0: result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) changed = True + else: + result = "secret already scheduled for deletion" else: result = "secret does not exist" if state == 'present': @@ -393,6 +395,7 @@ def main(): changed = True result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) result.pop("response_metadata") + module.exit_json(changed=changed, secret=result) From 3749915be9a4d7ed3f634d19e908de09f1bc2c90 Mon Sep 17 00:00:00 2001 From: Phil Sharfstein Date: Thu, 5 Aug 2021 07:36:07 -0700 Subject: [PATCH 237/683] Updating documentation for https://github.com/ansible-collections/community.aws/pull/39 Adding `s3_origin_config` to docs in parameters and return values --- cloudfront_distribution.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 075a106246d..1d63619b04e 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -149,9 +149,15 @@ s3_origin_access_identity_enabled: description: - Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. - - Will automatically create an Identity for you. + - Will automatically create an Identity for you if no I(s3_origin_config) is specified. - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html). type: bool + s3_origin_config: + type: dict + suboptions: + origin_access_identity: + description: Existing origin access identity in the format C(origin-access-identity/cloudfront/OID_ID) + type: str custom_origin_config: description: Connection information about the origin. type: dict @@ -1275,6 +1281,15 @@ returned: always type: str sample: '' + s3_origin_config: + descrption: Origin access identity configuration for S3 Origin. + returned: when s3_origin_access_identity_enabled is true + type: dict + contains: + origin_access_identity: + type: str + description: The origin access id as a path + sample: origin-access-identity/cloudfront/EXAMPLEID quantity: description: Count of origins. returned: always From ead5a83b09572851d70fa5e5752928d6d9fab014 Mon Sep 17 00:00:00 2001 From: Phil Sharfstein Date: Thu, 5 Aug 2021 09:41:35 -0700 Subject: [PATCH 238/683] Fix typo and missing description field Fix typo and missing description field- fix automated test failures --- cloudfront_distribution.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 1d63619b04e..e01bcec2017 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -153,6 +153,7 @@ - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html). type: bool s3_origin_config: + description: Specify origin access identity for S3 origins type: dict suboptions: origin_access_identity: @@ -1282,7 +1283,7 @@ type: str sample: '' s3_origin_config: - descrption: Origin access identity configuration for S3 Origin. + description: Origin access identity configuration for S3 Origin. returned: when s3_origin_access_identity_enabled is true type: dict contains: From b2a6d61d7b829f5f68a8bc42b4cf570dcca90af7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 10 Aug 2021 15:41:52 +0200 Subject: [PATCH 239/683] Minor formating tweaks for origin_access_identity docs --- cloudfront_distribution.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index e01bcec2017..9887a8d373a 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -153,11 +153,11 @@ - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html). type: bool s3_origin_config: - description: Specify origin access identity for S3 origins + description: Specify origin access identity for S3 origins. type: dict suboptions: origin_access_identity: - description: Existing origin access identity in the format C(origin-access-identity/cloudfront/OID_ID) + description: Existing origin access identity in the format C(origin-access-identity/cloudfront/OID_ID). type: str custom_origin_config: description: Connection information about the origin. @@ -1289,7 +1289,7 @@ contains: origin_access_identity: type: str - description: The origin access id as a path + description: The origin access id as a path. sample: origin-access-identity/cloudfront/EXAMPLEID quantity: description: Count of origins. From 6dc18e7f591f1941e9ae1f4b08210dcefedb4e62 Mon Sep 17 00:00:00 2001 From: Milan Zink Date: Thu, 24 Jun 2021 14:37:24 +0200 Subject: [PATCH 240/683] new module 'efs_tag' --- efs_tag.py | 181 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 efs_tag.py diff --git a/efs_tag.py b/efs_tag.py new file mode 100644 index 00000000000..f44b28833dd --- /dev/null +++ b/efs_tag.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +""" +Copyright: (c) 2021, Milan Zink +GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: efs_tag +version_added: 2.0.0 +short_description: create and remove tags on Amazon EFS resources +description: + - Creates and removes tags for Amazon EFS resources. + - Resources are referenced by their ID (filesystem or filesystem access point). +author: + - Milan Zink (@zeten30) +options: + resource: + description: + - EFS Filesystem ID or EFS Filesystem Access Point ID. + type: str + required: True + state: + description: + - Whether the tags should be present or absent on the resource. + default: present + choices: ['present', 'absent'] + type: str + tags: + description: + - A dictionary of tags to add or remove from the resource. + - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. + type: dict + required: True + purge_tags: + description: + - Whether unspecified tags should be removed from the resource. + - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. + type: bool + default: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = r''' +- name: Ensure tags are present on a resource + community.aws.efs_tag: + resource: fs-123456ab + state: present + tags: + Name: MyEFS + Env: Production + +- name: Remove the Env tag if it's currently 'development' + community.aws.efs_tag: + resource: fsap-78945ff + state: absent + tags: + Env: development + +- name: Remove all tags except for Name + community.aws.efs_tag: + resource: fsap-78945ff + state: absent + tags: + Name: foo + purge_tags: true + +- name: Remove all tags + community.aws.efs_tag: + resource: fsap-78945ff + state: absent + tags: {} + purge_tags: true +''' + +RETURN = r''' +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +added_tags: + description: A dict of tags that were added to the resource + returned: If tags were added + type: dict +removed_tags: + description: A dict of tags that were removed from the resource + returned: If tags were removed + type: dict +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + # Handled by AnsibleAWSModule + pass + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + +MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing +WAIT_RETRY = 5 # how many seconds to wait between propagation status polls + + +def get_tags(efs, module, resource): + ''' + Get resource tags + ''' + try: + return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)['Tags']) + except (BotoCoreError, ClientError) as get_tags_error: + module.fail_json_aws(get_tags_error, msg='Failed to fetch tags for resource {0}'.format(resource)) + + +def main(): + ''' + MAIN + ''' + argument_spec = dict( + resource=dict(required=True), + tags=dict(type='dict', required=True), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + resource = module.params['resource'] + tags = module.params['tags'] + state = module.params['state'] + purge_tags = module.params['purge_tags'] + + result = {'changed': False} + + efs = module.client('efs', retry_decorator=AWSRetry.jittered_backoff()) + + current_tags = get_tags(efs, module, resource) + + add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + remove_tags = {} + + if state == 'absent': + for key in tags: + if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): + remove_tags[key] = current_tags[key] + + for key in remove: + remove_tags[key] = current_tags[key] + + if remove_tags: + result['changed'] = True + result['removed_tags'] = remove_tags + if not module.check_mode: + try: + efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) + except (BotoCoreError, ClientError) as remove_tag_error: + module.fail_json_aws(remove_tag_error, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + + if state == 'present' and add_tags: + result['changed'] = True + result['added_tags'] = add_tags + current_tags.update(add_tags) + if not module.check_mode: + try: + tags = ansible_dict_to_boto3_tag_list(add_tags) + efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) + except (BotoCoreError, ClientError) as set_tag_error: + module.fail_json_aws(set_tag_error, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + + result['tags'] = get_tags(efs, module, resource) + module.exit_json(**result) + + +if __name__ == '__main__': + main() From 0e416e813a8978c4d22a3981dba11f9fb8d8f18a Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Wed, 18 Aug 2021 18:03:36 -0700 Subject: [PATCH 241/683] Feat : Handle path to sync single file --- s3_sync.py | 86 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 35 deletions(-) diff --git a/s3_sync.py b/s3_sync.py index 1e7d01680f1..b5b9687f19b 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -322,41 +322,57 @@ def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): def gather_files(fileroot, include=None, exclude=None): ret = [] - for (dirpath, dirnames, filenames) in os.walk(fileroot): - for fn in filenames: - fullpath = os.path.join(dirpath, fn) - # include/exclude - if include: - found = False - for x in include.split(','): - if fnmatch.fnmatch(fn, x): - found = True - if not found: - # not on the include list, so we don't want it. - continue - - if exclude: - found = False - for x in exclude.split(','): - if fnmatch.fnmatch(fn, x): - found = True - if found: - # skip it, even if previously included. - continue - - chopped_path = os.path.relpath(fullpath, start=fileroot) - fstat = os.stat(fullpath) - f_size = fstat[osstat.ST_SIZE] - f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) - # dirpath = path *to* the directory - # dirnames = subdirs *in* our directory - # filenames + + if os.path.isfile(fileroot): + fullpath = fileroot + fstat = os.stat(fullpath) + path_array = fileroot.split('/') + chopped_path = path_array[-1] + f_size = fstat[osstat.ST_SIZE] + f_modified_epoch = fstat[osstat.ST_MTIME] + ret.append({ + 'fullpath': fullpath, + 'chopped_path': chopped_path, + 'modified_epoch': f_modified_epoch, + 'bytes': f_size, + }) + + else: + for (dirpath, dirnames, filenames) in os.walk(fileroot): + for fn in filenames: + fullpath = os.path.join(dirpath, fn) + # include/exclude + if include: + found = False + for x in include.split(','): + if fnmatch.fnmatch(fn, x): + found = True + if not found: + # not on the include list, so we don't want it. + continue + + if exclude: + found = False + for x in exclude.split(','): + if fnmatch.fnmatch(fn, x): + found = True + if found: + # skip it, even if previously included. + continue + + chopped_path = os.path.relpath(fullpath, start=fileroot) + fstat = os.stat(fullpath) + f_size = fstat[osstat.ST_SIZE] + f_modified_epoch = fstat[osstat.ST_MTIME] + ret.append({ + 'fullpath': fullpath, + 'chopped_path': chopped_path, + 'modified_epoch': f_modified_epoch, + 'bytes': f_size, + }) + # dirpath = path *to* the directory + # dirnames = subdirs *in* our directory + # filenames return ret From 4a65a7d54cf0d5a0dfbe5941f784c19338e689ae Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Wed, 18 Aug 2021 22:43:30 -0700 Subject: [PATCH 242/683] Added example --- s3_sync.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/s3_sync.py b/s3_sync.py index b5b9687f19b..c9021c3dbf9 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -148,6 +148,11 @@ file_root: roles/s3/files/ storage_class: GLACIER +- name: basic individual file upload + community.aws.s3_sync: + bucket: tedder + file_root: roles/s3/files/file_name + - name: all the options community.aws.s3_sync: bucket: tedder From adeeba7ce295c85e4e11bb15425004a06577ecf9 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Mon, 23 Aug 2021 17:31:43 -0700 Subject: [PATCH 243/683] iam_role.py: update update_role_description to update_role --- iam_role.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iam_role.py b/iam_role.py index e95ed0afddf..f5699edf8b5 100644 --- a/iam_role.py +++ b/iam_role.py @@ -327,7 +327,7 @@ def update_role_description(connection, module, params, role): return True try: - connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) + connection.update_role(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName'])) return True From d51f2077b9bf477a59b4bef92523e5052b90ab9f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 26 Aug 2021 15:26:28 +0200 Subject: [PATCH 244/683] Remove modules --- ec2_vpc_nat_gateway.py | 1000 ---------------------------------- ec2_vpc_nat_gateway_facts.py | 1 - ec2_vpc_nat_gateway_info.py | 218 -------- 3 files changed, 1219 deletions(-) delete mode 100644 ec2_vpc_nat_gateway.py delete mode 120000 ec2_vpc_nat_gateway_facts.py delete mode 100644 ec2_vpc_nat_gateway_info.py diff --git a/ec2_vpc_nat_gateway.py b/ec2_vpc_nat_gateway.py deleted file mode 100644 index 30a28ca1391..00000000000 --- a/ec2_vpc_nat_gateway.py +++ /dev/null @@ -1,1000 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_vpc_nat_gateway -version_added: 1.0.0 -short_description: Manage AWS VPC NAT Gateways. -description: - - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids. -options: - state: - description: - - Ensure NAT Gateway is present or absent. - default: "present" - choices: ["present", "absent"] - type: str - nat_gateway_id: - description: - - The id AWS dynamically allocates to the NAT Gateway on creation. - This is required when the absent option is present. - type: str - subnet_id: - description: - - The id of the subnet to create the NAT Gateway in. This is required - with the present option. - type: str - allocation_id: - description: - - The id of the elastic IP allocation. If this is not passed and the - eip_address is not passed. An EIP is generated for this NAT Gateway. - type: str - eip_address: - description: - - The elastic IP address of the EIP you want attached to this NAT Gateway. - If this is not passed and the allocation_id is not passed, - an EIP is generated for this NAT Gateway. - type: str - if_exist_do_not_create: - description: - - if a NAT Gateway exists already in the subnet_id, then do not create a new one. - required: false - default: false - type: bool - tags: - description: - - A dict of tags to apply to the NAT gateway. - - To remove all tags set I(tags={}) and I(purge_tags=true). - aliases: [ 'resource_tags' ] - type: dict - version_added: 1.4.0 - purge_tags: - description: - - Remove tags not listed in I(tags). - type: bool - default: true - version_added: 1.4.0 - release_eip: - description: - - Deallocate the EIP from the VPC. - - Option is only valid with the absent state. - - You should use this with the wait option. Since you can not release an address while a delete operation is happening. - default: false - type: bool - wait: - description: - - Wait for operation to complete before returning. - default: false - type: bool - wait_timeout: - description: - - How many seconds to wait for an operation to complete before timing out. - default: 320 - type: int - client_token: - description: - - Optional unique token to be used during create to ensure idempotency. - When specifying this option, ensure you specify the eip_address parameter - as well otherwise any subsequent runs will fail. - type: str -author: - - Allen Sanabria (@linuxdynasty) - - Jon Hadfield (@jonhadfield) - - Karen Cheng (@Etherdaemon) - - Alina Buzachis (@alinabuzachis) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create new nat gateway with client token. - community.aws.ec2_vpc_nat_gateway: - state: present - subnet_id: subnet-12345678 - eip_address: 52.1.1.1 - region: ap-southeast-2 - client_token: abcd-12345678 - register: new_nat_gateway - -- name: Create new nat gateway using an allocation-id. - community.aws.ec2_vpc_nat_gateway: - state: present - subnet_id: subnet-12345678 - allocation_id: eipalloc-12345678 - region: ap-southeast-2 - register: new_nat_gateway - -- name: Create new nat gateway, using an EIP address and wait for available status. - community.aws.ec2_vpc_nat_gateway: - state: present - subnet_id: subnet-12345678 - eip_address: 52.1.1.1 - wait: true - region: ap-southeast-2 - register: new_nat_gateway - -- name: Create new nat gateway and allocate new EIP. - community.aws.ec2_vpc_nat_gateway: - state: present - subnet_id: subnet-12345678 - wait: true - region: ap-southeast-2 - register: new_nat_gateway - -- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet. - community.aws.ec2_vpc_nat_gateway: - state: present - subnet_id: subnet-12345678 - wait: true - region: ap-southeast-2 - if_exist_do_not_create: true - register: new_nat_gateway - -- name: Delete nat gateway using discovered nat gateways from facts module. - community.aws.ec2_vpc_nat_gateway: - state: absent - region: ap-southeast-2 - wait: true - nat_gateway_id: "{{ item.NatGatewayId }}" - release_eip: true - register: delete_nat_gateway_result - loop: "{{ gateways_to_remove.result }}" - -- name: Delete nat gateway and wait for deleted status. - community.aws.ec2_vpc_nat_gateway: - state: absent - nat_gateway_id: nat-12345678 - wait: true - wait_timeout: 500 - region: ap-southeast-2 - -- name: Delete nat gateway and release EIP. - community.aws.ec2_vpc_nat_gateway: - state: absent - nat_gateway_id: nat-12345678 - release_eip: true - wait: yes - wait_timeout: 300 - region: ap-southeast-2 - -- name: Create new nat gateway using allocation-id and tags. - community.aws.ec2_vpc_nat_gateway: - state: present - subnet_id: subnet-12345678 - allocation_id: eipalloc-12345678 - region: ap-southeast-2 - tags: - Tag1: tag1 - Tag2: tag2 - register: new_nat_gateway - -- name: Update tags without purge - community.aws.ec2_vpc_nat_gateway: - subnet_id: subnet-12345678 - allocation_id: eipalloc-12345678 - region: ap-southeast-2 - purge_tags: no - tags: - Tag3: tag3 - wait: yes - register: update_tags_nat_gateway -''' - -RETURN = r''' -create_time: - description: The ISO 8601 date time format in UTC. - returned: In all cases. - type: str - sample: "2016-03-05T05:19:20.282000+00:00'" -nat_gateway_id: - description: id of the VPC NAT Gateway - returned: In all cases. - type: str - sample: "nat-0d1e3a878585988f8" -subnet_id: - description: id of the Subnet - returned: In all cases. - type: str - sample: "subnet-12345" -state: - description: The current state of the NAT Gateway. - returned: In all cases. - type: str - sample: "available" -tags: - description: The tags associated the VPC NAT Gateway. - type: dict - returned: When tags are present. - sample: - tags: - "Ansible": "Test" -vpc_id: - description: id of the VPC. - returned: In all cases. - type: str - sample: "vpc-12345" -nat_gateway_addresses: - description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id. - returned: In all cases. - type: str - sample: [ - { - 'public_ip': '52.52.52.52', - 'network_interface_id': 'eni-12345', - 'private_ip': '10.0.0.100', - 'allocation_id': 'eipalloc-12345' - } - ] -''' - -import datetime - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -@AWSRetry.jittered_backoff(retries=10) -def _describe_nat_gateways(client, **params): - try: - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] - except is_boto3_error_code('InvalidNatGatewayID.NotFound'): - return None - - -def wait_for_status(client, module, waiter_name, nat_gateway_id): - wait_timeout = module.params.get('wait_timeout') - try: - waiter = get_waiter(client, waiter_name) - attempts = 1 + int(wait_timeout / waiter.config.delay) - waiter.wait( - NatGatewayIds=[nat_gateway_id], - WaiterConfig={'MaxAttempts': attempts} - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="NAT gateway failed to reach expected state.") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to wait for NAT gateway state to update.") - - -def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states=None): - """Retrieve a list of NAT Gateways - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - - Kwargs: - subnet_id (str): The subnet_id the nat resides in. - nat_gateway_id (str): The Amazon NAT id. - states (list): States available (pending, failed, available, deleting, and deleted) - default=None - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> subnet_id = 'subnet-12345678' - >>> get_nat_gateways(client, module, subnet_id) - [ - true, - "", - { - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", - "nat_gateway_addresses": [ - { - "public_ip": "55.55.55.55", - "network_interface_id": "eni-1234567", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-1234567" - } - ], - "nat_gateway_id": "nat-123456789", - "state": "deleted", - "subnet_id": "subnet-123456789", - "tags": {}, - "vpc_id": "vpc-12345678" - } - - Returns: - Tuple (bool, str, list) - """ - - params = dict() - existing_gateways = list() - - if not states: - states = ['available', 'pending'] - if nat_gateway_id: - params['NatGatewayIds'] = [nat_gateway_id] - else: - params['Filter'] = [ - { - 'Name': 'subnet-id', - 'Values': [subnet_id] - }, - { - 'Name': 'state', - 'Values': states - } - ] - - try: - gateways = _describe_nat_gateways(client, **params) - if gateways: - for gw in gateways: - existing_gateways.append(camel_dict_to_snake_dict(gw)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - return existing_gateways - - -def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None): - """Retrieve all NAT Gateways for a subnet. - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - subnet_id (str): The subnet_id the nat resides in. - - Kwargs: - allocation_id (str): The EIP Amazon identifier. - default = None - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> subnet_id = 'subnet-1234567' - >>> allocation_id = 'eipalloc-1234567' - >>> gateway_in_subnet_exists(client, module, subnet_id, allocation_id) - ( - [ - { - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", - "nat_gateway_addresses": [ - { - "public_ip": "55.55.55.55", - "network_interface_id": "eni-1234567", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-1234567" - } - ], - "nat_gateway_id": "nat-123456789", - "state": "deleted", - "subnet_id": "subnet-123456789", - "tags": {}, - "vpc_id": "vpc-1234567" - } - ], - False - ) - - Returns: - Tuple (list, bool) - """ - - allocation_id_exists = False - gateways = [] - states = ['available', 'pending'] - - gws_retrieved = (get_nat_gateways(client, module, subnet_id, states=states)) - - if gws_retrieved: - for gw in gws_retrieved: - for address in gw['nat_gateway_addresses']: - if allocation_id: - if address.get('allocation_id') == allocation_id: - allocation_id_exists = True - gateways.append(gw) - else: - gateways.append(gw) - - return gateways, allocation_id_exists - - -def get_eip_allocation_id_by_address(client, module, eip_address): - """Release an EIP from your EIP Pool - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - eip_address (str): The Elastic IP Address of the EIP. - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> eip_address = '52.87.29.36' - >>> get_eip_allocation_id_by_address(client, module, eip_address) - 'eipalloc-36014da3' - - Returns: - Tuple (str, str) - """ - - params = { - 'PublicIps': [eip_address], - } - allocation_id = None - msg = '' - - try: - allocations = client.describe_addresses(aws_retry=True, **params)['Addresses'] - - if len(allocations) == 1: - allocation = allocations[0] - else: - allocation = None - - if allocation: - if allocation.get('Domain') != 'vpc': - msg = ( - "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" - .format(eip_address) - ) - else: - allocation_id = allocation.get('AllocationId') - - except is_boto3_error_code('InvalidAddress.Malformed') as e: - module.fail_json(msg='EIP address {0} is invalid.'.format(eip_address)) - except is_boto3_error_code('InvalidAddress.NotFound') as e: # pylint: disable=duplicate-except - msg = ( - "EIP {0} does not exist".format(eip_address) - ) - allocation_id = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - return allocation_id, msg - - -def allocate_eip_address(client, module): - """Release an EIP from your EIP Pool - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> allocate_eip_address(client, module) - True - - Returns: - Tuple (bool, str) - """ - - new_eip = None - msg = '' - params = { - 'Domain': 'vpc', - } - - if module.check_mode: - ip_allocated = True - new_eip = None - return ip_allocated, msg, new_eip - - try: - new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] - ip_allocated = True - msg = 'eipalloc id {0} created'.format(new_eip) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - return ip_allocated, msg, new_eip - - -def release_address(client, module, allocation_id): - """Release an EIP from your EIP Pool - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - allocation_id (str): The eip Amazon identifier. - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> allocation_id = "eipalloc-123456" - >>> release_address(client, module, allocation_id) - True - - Returns: - Boolean, string - """ - - msg = '' - - if module.check_mode: - return True, '' - - ip_released = False - - try: - client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id]) - except is_boto3_error_code('InvalidAllocationID.NotFound') as e: - # IP address likely already released - # Happens with gateway in 'deleted' state that - # still lists associations - return True, e - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - try: - client.release_address(aws_retry=True, AllocationId=allocation_id) - ip_released = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - return ip_released, msg - - -def create(client, module, subnet_id, allocation_id, tags, purge_tags, client_token=None, - wait=False): - """Create an Amazon NAT Gateway. - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - subnet_id (str): The subnet_id the nat resides in - allocation_id (str): The eip Amazon identifier - tags (dict): Tags to associate to the NAT gateway - purge_tags (bool): If true, remove tags not listed in I(tags) - type: bool - - Kwargs: - wait (bool): Wait for the nat to be in the deleted state before returning. - default = False - client_token (str): - default = None - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> subnet_id = 'subnet-1234567' - >>> allocation_id = 'eipalloc-1234567' - >>> create(client, module, subnet_id, allocation_id, wait=True) - [ - true, - "", - { - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", - "nat_gateway_addresses": [ - { - "public_ip": "55.55.55.55", - "network_interface_id": "eni-1234567", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-1234567" - } - ], - "nat_gateway_id": "nat-123456789", - "state": "deleted", - "subnet_id": "subnet-1234567", - "tags": {}, - "vpc_id": "vpc-1234567" - } - ] - - Returns: - Tuple (bool, str, list) - """ - - params = { - 'SubnetId': subnet_id, - 'AllocationId': allocation_id - } - request_time = datetime.datetime.utcnow() - changed = False - token_provided = False - result = {} - msg = '' - - if client_token: - token_provided = True - params['ClientToken'] = client_token - - if module.check_mode: - changed = True - return changed, result, msg - - try: - result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) - changed = True - - create_time = result['create_time'].replace(tzinfo=None) - - if token_provided and (request_time > create_time): - changed = False - - elif wait and result.get('state') != 'available': - wait_for_status(client, module, 'nat_gateway_available', result['nat_gateway_id']) - - # Get new result - result = camel_dict_to_snake_dict( - _describe_nat_gateways(client, NatGatewayIds=[result['nat_gateway_id']])[0] - ) - - result['tags'], _tags_update_exists = ensure_tags( - client, module, nat_gw_id=result['nat_gateway_id'], tags=tags, - purge_tags=purge_tags - ) - except is_boto3_error_code('IdempotentParameterMismatch') as e: - msg = ( - 'NAT Gateway does not support update and token has already been provided:' + e - ) - changed = False - result = None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - return changed, result, msg - - -def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None, - if_exist_do_not_create=False, wait=False, client_token=None): - """Create an Amazon NAT Gateway. - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - subnet_id (str): The subnet_id the nat resides in - tags (dict): Tags to associate to the NAT gateway - purge_tags (bool): If true, remove tags not listed in I(tags) - - Kwargs: - allocation_id (str): The EIP Amazon identifier. - default = None - eip_address (str): The Elastic IP Address of the EIP. - default = None - if_exist_do_not_create (bool): if a nat gateway already exists in this - subnet, than do not create another one. - default = False - wait (bool): Wait for the nat to be in the deleted state before returning. - default = False - client_token (str): - default = None - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> subnet_id = 'subnet-w4t12897' - >>> allocation_id = 'eipalloc-36014da3' - >>> pre_create(client, module, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True) - [ - true, - "", - { - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", - "nat_gateway_addresses": [ - { - "public_ip": "52.87.29.36", - "network_interface_id": "eni-5579742d", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-36014da3" - } - ], - "nat_gateway_id": "nat-03835afb6e31df79b", - "state": "deleted", - "subnet_id": "subnet-w4t12897", - "tags": {}, - "vpc_id": "vpc-w68571b5" - } - ] - - Returns: - Tuple (bool, bool, str, list) - """ - - changed = False - msg = '' - results = {} - - if not allocation_id and not eip_address: - existing_gateways, allocation_id_exists = (gateway_in_subnet_exists(client, module, subnet_id)) - - if len(existing_gateways) > 0 and if_exist_do_not_create: - results = existing_gateways[0] - results['tags'], tags_update_exists = ensure_tags( - client, module, results['nat_gateway_id'], tags, purge_tags - ) - - if tags_update_exists: - changed = True - return changed, msg, results - - changed = False - msg = ( - 'NAT Gateway {0} already exists in subnet_id {1}' - .format( - existing_gateways[0]['nat_gateway_id'], subnet_id - ) - ) - return changed, msg, results - else: - changed, msg, allocation_id = ( - allocate_eip_address(client, module) - ) - if not changed: - return changed, msg, dict() - - elif eip_address or allocation_id: - if eip_address and not allocation_id: - allocation_id, msg = ( - get_eip_allocation_id_by_address( - client, module, eip_address - ) - ) - if not allocation_id: - changed = False - return changed, msg, dict() - - existing_gateways, allocation_id_exists = ( - gateway_in_subnet_exists( - client, module, subnet_id, allocation_id - ) - ) - - if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): - results = existing_gateways[0] - results['tags'], tags_update_exists = ensure_tags( - client, module, results['nat_gateway_id'], tags, purge_tags - ) - - if tags_update_exists: - changed = True - return changed, msg, results - - changed = False - msg = ( - 'NAT Gateway {0} already exists in subnet_id {1}' - .format( - existing_gateways[0]['nat_gateway_id'], subnet_id - ) - ) - return changed, msg, results - - changed, results, msg = create( - client, module, subnet_id, allocation_id, tags, purge_tags, client_token, wait - ) - - return changed, msg, results - - -def remove(client, module, nat_gateway_id, wait=False, release_eip=False): - """Delete an Amazon NAT Gateway. - Args: - client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance - nat_gateway_id (str): The Amazon nat id - - Kwargs: - wait (bool): Wait for the nat to be in the deleted state before returning. - release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc. - - Basic Usage: - >>> client = boto3.client('ec2') - >>> module = AnsibleAWSModule(...) - >>> nat_gw_id = 'nat-03835afb6e31df79b' - >>> remove(client, module, nat_gw_id, wait=True, release_eip=True) - [ - true, - "", - { - "create_time": "2016-03-05T00:33:21.209000+00:00", - "delete_time": "2016-03-05T00:36:37.329000+00:00", - "nat_gateway_addresses": [ - { - "public_ip": "52.87.29.36", - "network_interface_id": "eni-5579742d", - "private_ip": "10.0.0.102", - "allocation_id": "eipalloc-36014da3" - } - ], - "nat_gateway_id": "nat-03835afb6e31df79b", - "state": "deleted", - "subnet_id": "subnet-w4t12897", - "tags": {}, - "vpc_id": "vpc-w68571b5" - } - ] - - Returns: - Tuple (bool, str, list) - """ - - params = { - 'NatGatewayId': nat_gateway_id - } - changed = False - results = {} - states = ['pending', 'available'] - msg = '' - - if module.check_mode: - changed = True - return changed, msg, results - - try: - gw_list = ( - get_nat_gateways( - client, module, nat_gateway_id=nat_gateway_id, - states=states - ) - ) - - if len(gw_list) == 1: - results = gw_list[0] - client.delete_nat_gateway(aws_retry=True, **params) - allocation_id = ( - results['nat_gateway_addresses'][0]['allocation_id'] - ) - changed = True - msg = ( - 'NAT gateway {0} is in a deleting state. Delete was successful' - .format(nat_gateway_id) - ) - - if wait and results.get('state') != 'deleted': - wait_for_status(client, module, 'nat_gateway_deleted', nat_gateway_id) - - # Get new results - results = camel_dict_to_snake_dict( - _describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - if release_eip: - eip_released, msg = ( - release_address(client, module, allocation_id)) - if not eip_released: - module.fail_json( - msg="Failed to release EIP {0}: {1}".format(allocation_id, msg) - ) - - return changed, msg, results - - -def ensure_tags(client, module, nat_gw_id, tags, purge_tags): - final_tags = [] - changed = False - - if module.check_mode and nat_gw_id is None: - # We can't describe tags without an EIP id, we might get here when creating a new EIP in check_mode - return final_tags, changed - - filters = ansible_dict_to_boto3_filter_list({'resource-id': nat_gw_id, 'resource-type': 'natgateway'}) - cur_tags = None - try: - cur_tags = client.describe_tags(aws_retry=True, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Couldnt describe tags') - if tags is None: - return boto3_tag_list_to_ansible_dict(cur_tags['Tags']), changed - - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) - final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) - - if to_update: - try: - if module.check_mode: - final_tags.update(to_update) - else: - client.create_tags( - aws_retry=True, - Resources=[nat_gw_id], - Tags=ansible_dict_to_boto3_tag_list(to_update) - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't create tags") - - if to_delete: - try: - if module.check_mode: - for key in to_delete: - del final_tags[key] - else: - tags_list = [] - for key in to_delete: - tags_list.append({'Key': key}) - - client.delete_tags(aws_retry=True, Resources=[nat_gw_id], Tags=tags_list) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't delete tags") - - if not module.check_mode and (to_update or to_delete): - try: - response = client.describe_tags(aws_retry=True, Filters=filters) - final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't describe tags") - - return final_tags, changed - - -def main(): - argument_spec = dict( - subnet_id=dict(type='str'), - eip_address=dict(type='str'), - allocation_id=dict(type='str'), - if_exist_do_not_create=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - release_eip=dict(type='bool', default=False), - nat_gateway_id=dict(type='str'), - client_token=dict(type='str', no_log=False), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ['allocation_id', 'eip_address'] - ], - required_if=[['state', 'absent', ['nat_gateway_id']], - ['state', 'present', ['subnet_id']]], - ) - - state = module.params.get('state').lower() - subnet_id = module.params.get('subnet_id') - allocation_id = module.params.get('allocation_id') - eip_address = module.params.get('eip_address') - nat_gateway_id = module.params.get('nat_gateway_id') - wait = module.params.get('wait') - release_eip = module.params.get('release_eip') - client_token = module.params.get('client_token') - if_exist_do_not_create = module.params.get('if_exist_do_not_create') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - try: - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') - - changed = False - msg = '' - - if state == 'present': - changed, msg, results = ( - pre_create( - client, module, subnet_id, tags, purge_tags, allocation_id, eip_address, - if_exist_do_not_create, wait, client_token - ) - ) - else: - changed, msg, results = ( - remove( - client, module, nat_gateway_id, wait, release_eip - ) - ) - - module.exit_json(msg=msg, changed=changed, **results) - - -if __name__ == '__main__': - main() diff --git a/ec2_vpc_nat_gateway_facts.py b/ec2_vpc_nat_gateway_facts.py deleted file mode 120000 index fd969989977..00000000000 --- a/ec2_vpc_nat_gateway_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_nat_gateway_info.py \ No newline at end of file diff --git a/ec2_vpc_nat_gateway_info.py b/ec2_vpc_nat_gateway_info.py deleted file mode 100644 index 5acd59a819a..00000000000 --- a/ec2_vpc_nat_gateway_info.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: ec2_vpc_nat_gateway_info -short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods. -version_added: 1.0.0 -description: - - Gets various details related to AWS VPC Managed Nat Gateways - - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change. -options: - nat_gateway_ids: - description: - - List of specific nat gateway IDs to fetch details for. - type: list - elements: str - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html) - for possible filters. - type: dict -author: Karen Cheng (@Etherdaemon) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -''' - -EXAMPLES = r''' -# Simple example of listing all nat gateways -- name: List all managed nat gateways in ap-southeast-2 - community.aws.ec2_vpc_nat_gateway_info: - region: ap-southeast-2 - register: all_ngws - -- name: Debugging the result - ansible.builtin.debug: - msg: "{{ all_ngws.result }}" - -- name: Get details on specific nat gateways - community.aws.ec2_vpc_nat_gateway_info: - nat_gateway_ids: - - nat-1234567891234567 - - nat-7654321987654321 - region: ap-southeast-2 - register: specific_ngws - -- name: Get all nat gateways with specific filters - community.aws.ec2_vpc_nat_gateway_info: - region: ap-southeast-2 - filters: - state: ['pending'] - register: pending_ngws - -- name: Get nat gateways with specific filter - community.aws.ec2_vpc_nat_gateway_info: - region: ap-southeast-2 - filters: - subnet-id: subnet-12345678 - state: ['available'] - register: existing_nat_gateways -''' - -RETURN = r''' -changed: - description: True if listing the internet gateways succeeds - type: bool - returned: always - sample: false -result: - description: - - The result of the describe, converted to ansible snake case style. - - See also U(http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways) - returned: suceess - type: list - contains: - create_time: - description: The date and time the NAT gateway was created - returned: always - type: str - sample: "2021-03-11T22:43:25+00:00" - delete_time: - description: The date and time the NAT gateway was deleted - returned: when the NAT gateway has been deleted - type: str - sample: "2021-03-11T22:43:25+00:00" - nat_gateway_addresses: - description: List containing a dictionary with the IP addresses and network interface associated with the NAT gateway - returned: always - type: dict - contains: - allocation_id: - description: The allocation ID of the Elastic IP address that's associated with the NAT gateway - returned: always - type: str - sample: eipalloc-0853e66a40803da76 - network_interface_id: - description: The ID of the network interface associated with the NAT gateway - returned: always - type: str - sample: eni-0a37acdbe306c661c - private_ip: - description: The private IP address associated with the Elastic IP address - returned: always - type: str - sample: 10.0.238.227 - public_ip: - description: The Elastic IP address associated with the NAT gateway - returned: always - type: str - sample: 34.204.123.52 - nat_gateway_id: - description: The ID of the NAT gateway - returned: always - type: str - sample: nat-0c242a2397acf6173 - state: - description: state of the NAT gateway - returned: always - type: str - sample: available - subnet_id: - description: The ID of the subnet in which the NAT gateway is located - returned: always - type: str - sample: subnet-098c447465d4344f9 - vpc_id: - description: The ID of the VPC in which the NAT gateway is located - returned: always - type: str - sample: vpc-02f37f48438ab7d4c - tags: - description: Tags applied to the NAT gateway - returned: always - type: dict - sample: - Tag1: tag1 - Tag_2: tag_2 -''' - - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result - - -@AWSRetry.jittered_backoff(retries=10) -def _describe_nat_gateways(client, module, **params): - try: - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] - except is_boto3_error_code('InvalidNatGatewayID.NotFound'): - module.exit_json(msg="NAT gateway not found.") - except is_boto3_error_code('NatGatewayMalformed'): # pylint: disable=duplicate-except - module.fail_json_aws(msg="NAT gateway id is malformed.") - - -def get_nat_gateways(client, module): - params = dict() - nat_gateways = list() - - params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['NatGatewayIds'] = module.params.get('nat_gateway_ids') - - try: - result = normalize_boto3_result(_describe_nat_gateways(client, module, **params)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Unable to describe NAT gateways.') - - for gateway in result: - # Turn the boto3 result into ansible_friendly_snaked_names - converted_gateway = camel_dict_to_snake_dict(gateway) - if 'tags' in converted_gateway: - # Turn the boto3 result into ansible friendly tag dictionary - converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) - nat_gateways.append(converted_gateway) - - return nat_gateways - - -def main(): - argument_spec = dict( - filters=dict(default={}, type='dict'), - nat_gateway_ids=dict(default=[], type='list', elements='str'), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True,) - if module._name == 'ec2_vpc_nat_gateway_facts': - module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'", - date='2021-12-01', collection_name='community.aws') - - try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - results = get_nat_gateways(connection, module) - - module.exit_json(result=results) - - -if __name__ == '__main__': - main() From 7dfb6ba3aab3da8721cbe4d9367f062c786799c1 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 26 Aug 2021 15:23:19 +0200 Subject: [PATCH 245/683] Remove modules --- ec2_vpc_endpoint.py | 480 ------------------------------- ec2_vpc_endpoint_facts.py | 1 - ec2_vpc_endpoint_info.py | 210 -------------- ec2_vpc_endpoint_service_info.py | 179 ------------ 4 files changed, 870 deletions(-) delete mode 100644 ec2_vpc_endpoint.py delete mode 120000 ec2_vpc_endpoint_facts.py delete mode 100644 ec2_vpc_endpoint_info.py delete mode 100644 ec2_vpc_endpoint_service_info.py diff --git a/ec2_vpc_endpoint.py b/ec2_vpc_endpoint.py deleted file mode 100644 index 75ba2479afe..00000000000 --- a/ec2_vpc_endpoint.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: ec2_vpc_endpoint -short_description: Create and delete AWS VPC Endpoints. -version_added: 1.0.0 -description: - - Creates AWS VPC endpoints. - - Deletes AWS VPC endpoints. - - This module supports check mode. -options: - vpc_id: - description: - - Required when creating a VPC endpoint. - required: false - type: str - vpc_endpoint_type: - description: - - The type of endpoint. - required: false - default: Gateway - choices: [ "Interface", "Gateway", "GatewayLoadBalancer" ] - type: str - version_added: 1.5.0 - service: - description: - - An AWS supported vpc endpoint service. Use the M(community.aws.ec2_vpc_endpoint_info) - module to describe the supported endpoint services. - - Required when creating an endpoint. - required: false - type: str - policy: - description: - - A properly formatted json policy as string, see - U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813). - Cannot be used with I(policy_file). - - Option when creating an endpoint. If not provided AWS will - utilise a default policy which provides full access to the service. - required: false - type: json - policy_file: - description: - - The path to the properly json formatted policy file, see - U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) - on how to use it properly. Cannot be used with I(policy). - - Option when creating an endpoint. If not provided AWS will - utilise a default policy which provides full access to the service. - - This option has been deprecated and will be removed after 2022-12-01 - to maintain the existing functionality please use the I(policy) option - and a file lookup. - required: false - aliases: [ "policy_path" ] - type: path - state: - description: - - present to ensure resource is created. - - absent to remove resource - required: false - default: present - choices: [ "present", "absent" ] - type: str - tags: - description: - - A dict of tags to apply to the internet gateway. - - To remove all tags set I(tags={}) and I(purge_tags=true). - type: dict - version_added: 1.5.0 - purge_tags: - description: - - Delete any tags not specified in the task that are on the instance. - This means you have to specify all the desired tags on each task affecting an instance. - default: false - type: bool - version_added: 1.5.0 - wait: - description: - - When specified, will wait for either available status for state present. - Unfortunately this is ignored for delete actions due to a difference in - behaviour from AWS. - required: false - default: no - type: bool - wait_timeout: - description: - - Used in conjunction with wait. Number of seconds to wait for status. - Unfortunately this is ignored for delete actions due to a difference in - behaviour from AWS. - required: false - default: 320 - type: int - route_table_ids: - description: - - List of one or more route table ids to attach to the endpoint. A route - is added to the route table with the destination of the endpoint if - provided. - required: false - type: list - elements: str - vpc_endpoint_id: - description: - - One or more vpc endpoint ids to remove from the AWS account - required: false - type: str - client_token: - description: - - Optional client token to ensure idempotency - required: false - type: str -author: Karen Cheng (@Etherdaemon) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create new vpc endpoint with a json template for policy - community.aws.ec2_vpc_endpoint: - state: present - region: ap-southeast-2 - vpc_id: vpc-12345678 - service: com.amazonaws.ap-southeast-2.s3 - policy: " {{ lookup( 'template', 'endpoint_policy.json.j2') }} " - route_table_ids: - - rtb-12345678 - - rtb-87654321 - register: new_vpc_endpoint - -- name: Create new vpc endpoint with the default policy - community.aws.ec2_vpc_endpoint: - state: present - region: ap-southeast-2 - vpc_id: vpc-12345678 - service: com.amazonaws.ap-southeast-2.s3 - route_table_ids: - - rtb-12345678 - - rtb-87654321 - register: new_vpc_endpoint - -- name: Create new vpc endpoint with json file - community.aws.ec2_vpc_endpoint: - state: present - region: ap-southeast-2 - vpc_id: vpc-12345678 - service: com.amazonaws.ap-southeast-2.s3 - policy_file: "{{ role_path }}/files/endpoint_policy.json" - route_table_ids: - - rtb-12345678 - - rtb-87654321 - register: new_vpc_endpoint - -- name: Delete newly created vpc endpoint - community.aws.ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}" - region: ap-southeast-2 -''' - -RETURN = r''' -endpoints: - description: The resulting endpoints from the module call - returned: success - type: list - sample: [ - { - "creation_timestamp": "2017-02-20T05:04:15+00:00", - "policy_document": { - "Id": "Policy1450910922815", - "Statement": [ - { - "Action": "s3:*", - "Effect": "Allow", - "Principal": "*", - "Resource": [ - "arn:aws:s3:::*/*", - "arn:aws:s3:::*" - ], - "Sid": "Stmt1450910920641" - } - ], - "Version": "2012-10-17" - }, - "route_table_ids": [ - "rtb-abcd1234" - ], - "service_name": "com.amazonaws.ap-southeast-2.s3", - "vpc_endpoint_id": "vpce-a1b2c3d4", - "vpc_id": "vpc-abbad0d0" - } - ] -''' - -import datetime -import json -import traceback - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.six import string_types -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - - -def get_endpoints(client, module, endpoint_id=None): - params = dict() - if endpoint_id: - params['VpcEndpointIds'] = [endpoint_id] - else: - filters = list() - if module.params.get('service'): - filters.append({'Name': 'service-name', 'Values': [module.params.get('service')]}) - if module.params.get('vpc_id'): - filters.append({'Name': 'vpc-id', 'Values': [module.params.get('vpc_id')]}) - params['Filters'] = filters - try: - result = client.describe_vpc_endpoints(aws_retry=True, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to get endpoints") - - # normalize iso datetime fields in result - normalized_result = normalize_boto3_result(result) - return normalized_result - - -def match_endpoints(route_table_ids, service_name, vpc_id, endpoint): - found = False - sorted_route_table_ids = [] - - if route_table_ids: - sorted_route_table_ids = sorted(route_table_ids) - - if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: - sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) - if sorted_endpoint_rt_ids == sorted_route_table_ids: - - found = True - return found - - -def ensure_tags(client, module, vpc_endpoint_id): - changed = False - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - - filters = ansible_dict_to_boto3_filter_list({'resource-id': vpc_endpoint_id}) - try: - current_tags = client.describe_tags(aws_retry=True, Filters=filters) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe tags for VPC Endpoint: {0}".format(vpc_endpoint_id)) - - tags_to_set, tags_to_unset = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags.get('Tags')), tags, purge_tags=purge_tags) - if purge_tags and not tags: - tags_to_unset = current_tags - - if tags_to_unset: - changed = True - if not module.check_mode: - try: - client.delete_tags(aws_retry=True, Resources=[vpc_endpoint_id], Tags=[dict(Key=tagkey) for tagkey in tags_to_unset]) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_unset)) - - if tags_to_set: - changed = True - if not module.check_mode: - try: - client.create_tags(aws_retry=True, Resources=[vpc_endpoint_id], Tags=ansible_dict_to_boto3_tag_list(tags_to_set)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_to_set)) - return changed - - -def setup_creation(client, module): - endpoint_id = module.params.get('vpc_endpoint_id') - route_table_ids = module.params.get('route_table_ids') - service_name = module.params.get('service') - vpc_id = module.params.get('vpc_id') - changed = False - - if not endpoint_id: - # Try to use the module parameters to match any existing endpoints - all_endpoints = get_endpoints(client, module, endpoint_id) - if len(all_endpoints['VpcEndpoints']) > 0: - for endpoint in all_endpoints['VpcEndpoints']: - if match_endpoints(route_table_ids, service_name, vpc_id, endpoint): - endpoint_id = endpoint['VpcEndpointId'] - break - - if endpoint_id: - # If we have an endpoint now, just ensure tags and exit - if module.params.get('tags'): - changed = ensure_tags(client, module, endpoint_id) - normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)['VpcEndpoints'][0] - return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=['Tags']) - - changed, result = create_vpc_endpoint(client, module) - - return changed, camel_dict_to_snake_dict(result, ignore_list=['Tags']) - - -def create_vpc_endpoint(client, module): - params = dict() - changed = False - token_provided = False - params['VpcId'] = module.params.get('vpc_id') - params['VpcEndpointType'] = module.params.get('vpc_endpoint_type') - params['ServiceName'] = module.params.get('service') - - if module.check_mode: - changed = True - result = 'Would have created VPC Endpoint if not in check mode' - module.exit_json(changed=changed, result=result) - - if module.params.get('route_table_ids'): - params['RouteTableIds'] = module.params.get('route_table_ids') - - if module.params.get('client_token'): - token_provided = True - request_time = datetime.datetime.utcnow() - params['ClientToken'] = module.params.get('client_token') - - policy = None - if module.params.get('policy'): - try: - policy = json.loads(module.params.get('policy')) - except ValueError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - - elif module.params.get('policy_file'): - try: - with open(module.params.get('policy_file'), 'r') as json_data: - policy = json.load(json_data) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - - if policy: - params['PolicyDocument'] = json.dumps(policy) - - try: - changed = True - result = client.create_vpc_endpoint(aws_retry=True, **params)['VpcEndpoint'] - if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)): - changed = False - elif module.params.get('wait') and not module.check_mode: - try: - waiter = get_waiter(client, 'vpc_endpoint_exists') - waiter.wait(VpcEndpointIds=[result['VpcEndpointId']], WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get('wait_timeout') // 15)) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(msg='Error waiting for vpc endpoint to become available - please check the AWS console') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failure while waiting for status') - - except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except - module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") - except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except - module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to create VPC.") - - if module.params.get('tags'): - ensure_tags(client, module, result['VpcEndpointId']) - - # describe and normalize iso datetime fields in result after adding tags - normalized_result = get_endpoints(client, module, endpoint_id=result['VpcEndpointId'])['VpcEndpoints'][0] - return changed, normalized_result - - -def setup_removal(client, module): - params = dict() - changed = False - - if module.check_mode: - try: - exists = client.describe_vpc_endpoints(aws_retry=True, VpcEndpointIds=[module.params.get('vpc_endpoint_id')]) - if exists: - result = {'msg': 'Would have deleted VPC Endpoint if not in check mode'} - changed = True - except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): - result = {'msg': 'Endpoint does not exist, nothing to delete.'} - changed = False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get endpoints") - - return changed, result - - if isinstance(module.params.get('vpc_endpoint_id'), string_types): - params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')] - else: - params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') - try: - result = client.delete_vpc_endpoints(aws_retry=True, **params)['Unsuccessful'] - if len(result) < len(params['VpcEndpointIds']): - changed = True - # For some reason delete_vpc_endpoints doesn't throw exceptions it - # returns a list of failed 'results' instead. Throw these so we can - # catch them the way we expect - for r in result: - try: - raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints') - except is_boto3_error_code('InvalidVpcEndpoint.NotFound'): - continue - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "Failed to delete VPC endpoint") - return changed, result - - -def main(): - argument_spec = dict( - vpc_id=dict(), - vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']), - service=dict(), - policy=dict(type='json'), - policy_file=dict(type='path', aliases=['policy_path']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - route_table_ids=dict(type='list', elements='str'), - vpc_endpoint_id=dict(), - client_token=dict(no_log=False), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=False), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['policy', 'policy_file']], - required_if=[ - ['state', 'present', ['vpc_id', 'service']], - ['state', 'absent', ['vpc_endpoint_id']], - ], - ) - - # Validate Requirements - state = module.params.get('state') - - if module.params.get('policy_file'): - module.deprecate('The policy_file option has been deprecated and' - ' will be removed after 2022-12-01', - date='2022-12-01', collection_name='community.aws') - - try: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - # Ensure resource is present - if state == 'present': - (changed, results) = setup_creation(ec2, module) - else: - (changed, results) = setup_removal(ec2, module) - - module.exit_json(changed=changed, result=results) - - -if __name__ == '__main__': - main() diff --git a/ec2_vpc_endpoint_facts.py b/ec2_vpc_endpoint_facts.py deleted file mode 120000 index d2a144a7b86..00000000000 --- a/ec2_vpc_endpoint_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_endpoint_info.py \ No newline at end of file diff --git a/ec2_vpc_endpoint_info.py b/ec2_vpc_endpoint_info.py deleted file mode 100644 index f84434cb9af..00000000000 --- a/ec2_vpc_endpoint_info.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: ec2_vpc_endpoint_info -short_description: Retrieves AWS VPC endpoints details using AWS methods. -version_added: 1.0.0 -description: - - Gets various details related to AWS VPC endpoints. - - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change. -options: - query: - description: - - Defaults to C(endpoints). - - Specifies the query action to take. - - I(query=endpoints) returns information about AWS VPC endpoints. - - Retrieving information about services using I(query=services) has been - deprecated in favour of the M(ec2_vpc_endpoint_service_info) module. - - The I(query) option has been deprecated and will be removed after 2022-12-01. - required: False - choices: - - services - - endpoints - type: str - vpc_endpoint_ids: - description: - - The IDs of specific endpoints to retrieve the details of. - type: list - elements: str - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html) - for possible filters. - type: dict -author: Karen Cheng (@Etherdaemon) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Simple example of listing all support AWS services for VPC endpoints -- name: List supported AWS endpoint services - community.aws.ec2_vpc_endpoint_info: - query: services - region: ap-southeast-2 - register: supported_endpoint_services - -- name: Get all endpoints in ap-southeast-2 region - community.aws.ec2_vpc_endpoint_info: - query: endpoints - region: ap-southeast-2 - register: existing_endpoints - -- name: Get all endpoints with specific filters - community.aws.ec2_vpc_endpoint_info: - query: endpoints - region: ap-southeast-2 - filters: - vpc-id: - - vpc-12345678 - - vpc-87654321 - vpc-endpoint-state: - - available - - pending - register: existing_endpoints - -- name: Get details on specific endpoint - community.aws.ec2_vpc_endpoint_info: - query: endpoints - region: ap-southeast-2 - vpc_endpoint_ids: - - vpce-12345678 - register: endpoint_details -''' - -RETURN = r''' -service_names: - description: AWS VPC endpoint service names - returned: I(query) is C(services) - type: list - sample: - service_names: - - com.amazonaws.ap-southeast-2.s3 -vpc_endpoints: - description: - - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp, - policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id. - returned: I(query) is C(endpoints) - type: list - sample: - vpc_endpoints: - - creation_timestamp: "2017-02-16T11:06:48+00:00" - policy_document: > - "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\", - \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\", - \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}" - route_table_ids: - - rtb-abcd1234 - service_name: "com.amazonaws.ap-southeast-2.s3" - state: "available" - vpc_endpoint_id: "vpce-abbad0d0" - vpc_id: "vpc-1111ffff" -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list - - -@AWSRetry.jittered_backoff() -def _describe_endpoints(client, **params): - paginator = client.get_paginator('describe_vpc_endpoints') - return paginator.paginate(**params).build_full_result() - - -@AWSRetry.jittered_backoff() -def _describe_endpoint_services(client, **params): - paginator = client.get_paginator('describe_vpc_endpoint_services') - return paginator.paginate(**params).build_full_result() - - -def get_supported_services(client, module): - try: - services = _describe_endpoint_services(client) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to get endpoint servicess") - - results = list(services['ServiceNames']) - return dict(service_names=results) - - -def get_endpoints(client, module): - results = list() - params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('vpc_endpoint_ids'): - params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') - try: - results = _describe_endpoints(client, **params)['VpcEndpoints'] - results = normalize_boto3_result(results) - except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): - module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[]) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get endpoints") - - return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) - - -def main(): - argument_spec = dict( - query=dict(choices=['services', 'endpoints'], required=False), - filters=dict(default={}, type='dict'), - vpc_endpoint_ids=dict(type='list', elements='str'), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'ec2_vpc_endpoint_facts': - module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", date='2021-12-01', collection_name='community.aws') - - # Validate Requirements - try: - connection = module.client('ec2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - query = module.params.get('query') - if query == 'endpoints': - module.deprecate('The query option has been deprecated and' - ' will be removed after 2022-12-01. Searching for' - ' `endpoints` is now the default and after' - ' 2022-12-01 this module will only support fetching' - ' endpoints.', - date='2022-12-01', collection_name='community.aws') - elif query == 'services': - module.deprecate('Support for fetching service information with this ' - 'module has been deprecated and will be removed after' - ' 2022-12-01. ' - 'Please use the ec2_vpc_endpoint_service_info module ' - 'instead.', date='2022-12-01', - collection_name='community.aws') - else: - query = 'endpoints' - - invocations = { - 'services': get_supported_services, - 'endpoints': get_endpoints, - } - results = invocations[query](connection, module) - - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/ec2_vpc_endpoint_service_info.py b/ec2_vpc_endpoint_service_info.py deleted file mode 100644 index 8dee0652b84..00000000000 --- a/ec2_vpc_endpoint_service_info.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: ec2_vpc_endpoint_service_info -short_description: retrieves AWS VPC endpoint service details -version_added: 1.5.0 -description: - - Gets details related to AWS VPC Endpoint Services. -options: - filters: - description: - - A dict of filters to apply. - - Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpointServices.html) - for possible filters. - type: dict - service_names: - description: - - A list of service names which can be used to narrow the search results. - type: list - elements: str -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Simple example of listing all supported AWS services for VPC endpoints -- name: List supported AWS endpoint services - community.aws.ec2_vpc_endpoint_service_info: - region: ap-southeast-2 - register: supported_endpoint_services -''' - -RETURN = r''' -service_names: - description: List of supported AWS VPC endpoint service names. - returned: success - type: list - sample: - service_names: - - com.amazonaws.ap-southeast-2.s3 -service_details: - description: Detailed information about the AWS VPC endpoint services. - returned: success - type: complex - contains: - service_name: - returned: success - description: The ARN of the endpoint service. - type: str - service_id: - returned: success - description: The ID of the endpoint service. - type: str - service_type: - returned: success - description: The type of the service - type: list - availability_zones: - returned: success - description: The Availability Zones in which the service is available. - type: list - owner: - returned: success - description: The AWS account ID of the service owner. - type: str - base_endpoint_dns_names: - returned: success - description: The DNS names for the service. - type: list - private_dns_name: - returned: success - description: The private DNS name for the service. - type: str - private_dns_names: - returned: success - description: The private DNS names assigned to the VPC endpoint service. - type: list - vpc_endpoint_policy_supported: - returned: success - description: Whether the service supports endpoint policies. - type: bool - acceptance_required: - returned: success - description: - Whether VPC endpoint connection requests to the service must be - accepted by the service owner. - type: bool - manages_vpc_endpoints: - returned: success - description: Whether the service manages its VPC endpoints. - type: bool - tags: - returned: success - description: A dict of tags associated with the service - type: dict - private_dns_name_verification_state: - returned: success - description: - - The verification state of the VPC endpoint service. - - Consumers of an endpoint service cannot use the private name when the state is not C(verified). - type: str -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -# We're using a paginator so we can't use the client decorators -@AWSRetry.jittered_backoff() -def get_services(client, module): - paginator = client.get_paginator('describe_vpc_endpoint_services') - params = {} - if module.params.get("filters"): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - - if module.params.get("service_names"): - params['ServiceNames'] = module.params.get("service_names") - - results = paginator.paginate(**params).build_full_result() - return results - - -def normalize_service(service): - normalized = camel_dict_to_snake_dict(service, ignore_list=['Tags']) - normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get('Tags')) - return normalized - - -def normalize_result(result): - normalized = {} - normalized['service_details'] = [normalize_service(service) for service in result.get('ServiceDetails')] - normalized['service_names'] = result.get('ServiceNames', []) - return normalized - - -def main(): - argument_spec = dict( - filters=dict(default={}, type='dict'), - service_names=dict(type='list', elements='str'), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - # Validate Requirements - try: - client = module.client('ec2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - try: - results = get_services(client, module) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to retrieve service details') - normalized_result = normalize_result(results) - - module.exit_json(changed=False, **normalized_result) - - -if __name__ == '__main__': - main() From d6476608f017afbd7fcee0db78451653aeb3317a Mon Sep 17 00:00:00 2001 From: Jill Rouleau Date: Tue, 27 Jul 2021 14:00:13 -0700 Subject: [PATCH 246/683] Remove modules --- ec2_vpc_igw.py | 291 ------------------------------------------- ec2_vpc_igw_facts.py | 1 - ec2_vpc_igw_info.py | 184 --------------------------- 3 files changed, 476 deletions(-) delete mode 100644 ec2_vpc_igw.py delete mode 120000 ec2_vpc_igw_facts.py delete mode 100644 ec2_vpc_igw_info.py diff --git a/ec2_vpc_igw.py b/ec2_vpc_igw.py deleted file mode 100644 index c578df12939..00000000000 --- a/ec2_vpc_igw.py +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ec2_vpc_igw -version_added: 1.0.0 -short_description: Manage an AWS VPC Internet gateway -description: - - Manage an AWS VPC Internet gateway -author: Robert Estelle (@erydo) -options: - vpc_id: - description: - - The VPC ID for the VPC in which to manage the Internet Gateway. - required: true - type: str - tags: - description: - - A dict of tags to apply to the internet gateway. - - To remove all tags set I(tags={}) and I(purge_tags=true). - aliases: [ 'resource_tags' ] - type: dict - purge_tags: - description: - - Remove tags not listed in I(tags). - type: bool - default: true - version_added: 1.3.0 - state: - description: - - Create or terminate the IGW - default: present - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Ensure that the VPC has an Internet Gateway. -# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc. -- name: Create Internet gateway - community.aws.ec2_vpc_igw: - vpc_id: vpc-abcdefgh - state: present - register: igw - -- name: Create Internet gateway with tags - community.aws.ec2_vpc_igw: - vpc_id: vpc-abcdefgh - state: present - tags: - Tag1: tag1 - Tag2: tag2 - register: igw - -- name: Delete Internet gateway - community.aws.ec2_vpc_igw: - state: absent - vpc_id: vpc-abcdefgh - register: vpc_igw_delete -''' - -RETURN = ''' -changed: - description: If any changes have been made to the Internet Gateway. - type: bool - returned: always - sample: - changed: false -gateway_id: - description: The unique identifier for the Internet Gateway. - type: str - returned: I(state=present) - sample: - gateway_id: "igw-XXXXXXXX" -tags: - description: The tags associated the Internet Gateway. - type: dict - returned: I(state=present) - sample: - tags: - "Ansible": "Test" -vpc_id: - description: The VPC ID associated with the Internet Gateway. - type: str - returned: I(state=present) - sample: - vpc_id: "vpc-XXXXXXXX" -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -class AnsibleEc2Igw(object): - - def __init__(self, module, results): - self._module = module - self._results = results - self._connection = self._module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - self._check_mode = self._module.check_mode - - def process(self): - vpc_id = self._module.params.get('vpc_id') - state = self._module.params.get('state', 'present') - tags = self._module.params.get('tags') - purge_tags = self._module.params.get('purge_tags') - - if state == 'present': - self.ensure_igw_present(vpc_id, tags, purge_tags) - elif state == 'absent': - self.ensure_igw_absent(vpc_id) - - def get_matching_igw(self, vpc_id): - filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) - igws = [] - try: - response = self._connection.describe_internet_gateways(aws_retry=True, Filters=filters) - igws = response.get('InternetGateways', []) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e) - - igw = None - if len(igws) > 1: - self._module.fail_json( - msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id)) - elif igws: - igw = camel_dict_to_snake_dict(igws[0]) - - return igw - - def ensure_tags(self, igw_id, tags, purge_tags): - final_tags = [] - - filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'}) - cur_tags = None - try: - cur_tags = self._connection.describe_tags(aws_retry=True, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't describe tags") - - if tags is None: - return boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) - - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) - final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) - - if to_update: - try: - if self._check_mode: - final_tags.update(to_update) - else: - self._connection.create_tags( - aws_retry=True, - Resources=[igw_id], - Tags=ansible_dict_to_boto3_tag_list(to_update) - ) - - self._results['changed'] = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't create tags") - - if to_delete: - try: - if self._check_mode: - for key in to_delete: - del final_tags[key] - else: - tags_list = [] - for key in to_delete: - tags_list.append({'Key': key}) - - self._connection.delete_tags(aws_retry=True, Resources=[igw_id], Tags=tags_list) - - self._results['changed'] = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't delete tags") - - if not self._check_mode and (to_update or to_delete): - try: - response = self._connection.describe_tags(aws_retry=True, Filters=filters) - final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't describe tags") - - return final_tags - - @staticmethod - def get_igw_info(igw): - return { - 'gateway_id': igw['internet_gateway_id'], - 'tags': igw['tags'], - 'vpc_id': igw['vpc_id'] - } - - def ensure_igw_absent(self, vpc_id): - igw = self.get_matching_igw(vpc_id) - if igw is None: - return self._results - - if self._check_mode: - self._results['changed'] = True - return self._results - - try: - self._results['changed'] = True - self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) - self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway") - - return self._results - - def ensure_igw_present(self, vpc_id, tags, purge_tags): - igw = self.get_matching_igw(vpc_id) - - if igw is None: - if self._check_mode: - self._results['changed'] = True - self._results['gateway_id'] = None - return self._results - - try: - response = self._connection.create_internet_gateway(aws_retry=True) - - # Ensure the gateway exists before trying to attach it or add tags - waiter = get_waiter(self._connection, 'internet_gateway_exists') - waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']]) - - igw = camel_dict_to_snake_dict(response['InternetGateway']) - self._connection.attach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) - self._results['changed'] = True - except botocore.exceptions.WaiterError as e: - self._module.fail_json_aws(e, msg="No Internet Gateway exists.") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') - - igw['vpc_id'] = vpc_id - - igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, purge_tags=purge_tags) - - igw_info = self.get_igw_info(igw) - self._results.update(igw_info) - - return self._results - - -def main(): - argument_spec = dict( - vpc_id=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - results = dict( - changed=False - ) - igw_manager = AnsibleEc2Igw(module=module, results=results) - igw_manager.process() - - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/ec2_vpc_igw_facts.py b/ec2_vpc_igw_facts.py deleted file mode 120000 index b3eeb3fee6e..00000000000 --- a/ec2_vpc_igw_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_igw_info.py \ No newline at end of file diff --git a/ec2_vpc_igw_info.py b/ec2_vpc_igw_info.py deleted file mode 100644 index 00ecac957ab..00000000000 --- a/ec2_vpc_igw_info.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_vpc_igw_info -version_added: 1.0.0 -short_description: Gather information about internet gateways in AWS -description: - - Gather information about internet gateways in AWS. - - This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change. -author: "Nick Aslanidis (@naslanidis)" -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters. - type: dict - internet_gateway_ids: - description: - - Get details of specific Internet Gateway ID. Provide this value as a list. - type: list - elements: str - convert_tags: - description: - - Convert tags from boto3 format (list of dictionaries) to the standard dictionary format. - - This currently defaults to C(False). The default will be changed to C(True) after 2022-06-22. - type: bool - version_added: 1.3.0 -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# # Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all Internet Gateways for an account or profile - community.aws.ec2_vpc_igw_info: - region: ap-southeast-2 - profile: production - register: igw_info - -- name: Gather information about a filtered list of Internet Gateways - community.aws.ec2_vpc_igw_info: - region: ap-southeast-2 - profile: production - filters: - "tag:Name": "igw-123" - register: igw_info - -- name: Gather information about a specific internet gateway by InternetGatewayId - community.aws.ec2_vpc_igw_info: - region: ap-southeast-2 - profile: production - internet_gateway_ids: igw-c1231234 - register: igw_info -''' - -RETURN = r''' -changed: - description: True if listing the internet gateways succeeds. - type: bool - returned: always - sample: "false" -internet_gateways: - description: The internet gateways for the account. - returned: always - type: complex - contains: - attachments: - description: Any VPCs attached to the internet gateway - returned: I(state=present) - type: complex - contains: - state: - description: The current state of the attachment - returned: I(state=present) - type: str - sample: available - vpc_id: - description: The ID of the VPC. - returned: I(state=present) - type: str - sample: vpc-02123b67 - internet_gateway_id: - description: The ID of the internet gateway - returned: I(state=present) - type: str - sample: igw-2123634d - tags: - description: Any tags assigned to the internet gateway - returned: I(state=present) - type: dict - sample: - tags: - "Ansible": "Test" -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - - -def get_internet_gateway_info(internet_gateway, convert_tags): - if convert_tags: - tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags']) - ignore_list = ["Tags"] - else: - tags = internet_gateway['Tags'] - ignore_list = [] - internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'], - 'Attachments': internet_gateway['Attachments'], - 'Tags': tags} - - internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list) - return internet_gateway_info - - -def list_internet_gateways(connection, module): - params = dict() - - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - convert_tags = module.params.get('convert_tags') - - if module.params.get("internet_gateway_ids"): - params['InternetGatewayIds'] = module.params.get("internet_gateway_ids") - - try: - all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params) - except is_boto3_error_code('InvalidInternetGatewayID.NotFound'): - module.fail_json('InternetGateway not found') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Unable to describe internet gateways') - - return [get_internet_gateway_info(igw, convert_tags) - for igw in all_internet_gateways['InternetGateways']] - - -def main(): - argument_spec = dict( - filters=dict(type='dict', default=dict()), - internet_gateway_ids=dict(type='list', default=None, elements='str'), - convert_tags=dict(type='bool'), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'ec2_vpc_igw_facts': - module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", date='2021-12-01', collection_name='community.aws') - - if module.params.get('convert_tags') is None: - module.deprecate('This module currently returns boto3 style tags by default. ' - 'This default has been deprecated and the module will return a simple dictionary in future. ' - 'This behaviour can be controlled through the convert_tags parameter.', - date='2021-12-01', collection_name='community.aws') - - # Validate Requirements - try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - # call your function here - results = list_internet_gateways(connection, module) - - module.exit_json(internet_gateways=results) - - -if __name__ == '__main__': - main() From 0327ce7efd2989e6791aca11e60e747e2167c039 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 15 Sep 2021 06:44:54 +0200 Subject: [PATCH 247/683] Bump botocore/boto3 requirements prior to release of 2.0.0 --- aws_msk_cluster.py | 1 - aws_msk_config.py | 3 --- dynamodb_table.py | 4 ++-- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 41f2dd62e44..d6cf35d3ba3 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -544,7 +544,6 @@ def create_or_update_cluster(client, module): } }, "cluster_kafka_version": { - "botocore_version": "1.16.19", "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], "target_value": module.params.get("version"), "update_params": { diff --git a/aws_msk_config.py b/aws_msk_config.py index 6258ae916f6..f1966847422 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -279,9 +279,6 @@ def main(): module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True) - # Support for update_configuration and delete_configuration added in 1.17.48 - module.require_botocore_at_least('1.17.48') - client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": diff --git a/dynamodb_table.py b/dynamodb_table.py index b23c443cac9..7a3add3727a 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -19,8 +19,8 @@ requirements: - python >= 3.6 - boto >= 2.49.0 -- boto3 >= 1.13.0 -- botocore >= 1.16.0 +- boto3 >= 1.15.0 +- botocore >= 1.18.0 options: state: description: From f29b6ebd4bafaa7e814847e7dd7cec77ce4e616c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 17 Sep 2021 20:38:34 +0200 Subject: [PATCH 248/683] Migrate elasticache_subnet_group to boto3 --- elasticache_subnet_group.py | 196 +++++++++++++++++++++++------------- 1 file changed, 127 insertions(+), 69 deletions(-) diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 44a3e39ae6f..f982a7a253c 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -12,34 +12,35 @@ version_added: 1.0.0 short_description: manage ElastiCache subnet groups description: - - Creates, modifies, and deletes ElastiCache subnet groups. This module has a dependency on python-boto >= 2.5. + - Creates, modifies, and deletes ElastiCache subnet groups. options: state: description: - Specifies whether the subnet should be present or absent. - required: true choices: [ 'present' , 'absent' ] + default: 'present' type: str name: description: - Database subnet group identifier. + - This value is automatically converted to lowercase. required: true type: str description: description: - - ElastiCache subnet group description. Only set when a new group is added. + - ElastiCache subnet group description. + - When not provided defaults to I(name) on subnet group creation. type: str subnets: description: - List of subnet IDs that make up the ElastiCache subnet group. type: list elements: str -author: "Tim Mahoney (@timmahoney)" +author: + - "Tim Mahoney (@timmahoney)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -requirements: -- boto >= 2.49.0 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' @@ -59,86 +60,143 @@ ''' try: - import boto - from boto.elasticache import connect_to_region - from boto.exception import BotoServerError + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def get_subnet_group(name): + try: + groups = client.describe_cache_subnet_groups( + aws_retry=True, + CacheSubnetGroupName=name, + )['CacheSubnetGroups'] + except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe subnet group") + + if not groups: + return None + + if len(groups) > 1: + module.fail_aws( + msg="Found multiple matches for subnet group", + cache_subnet_groups=camel_dict_to_snake_dict(groups), + ) + + subnet_group = camel_dict_to_snake_dict(groups[0]) + + subnet_group['name'] = subnet_group['cache_subnet_group_name'] + subnet_group['description'] = subnet_group['cache_subnet_group_description'] + + subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) + subnet_group['subnet_ids'] = subnet_ids + + return subnet_group + + +def create_subnet_group(name, description, subnets): + try: + if not description: + description = name + if not subnets: + subnets = [] + client.create_cache_subnet_group( + aws_retry=True, + CacheSubnetGroupName=name, + CacheSubnetGroupDescription=description, + SubnetIds=subnets, + ) + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create subnet group") + + +def update_subnet_group(subnet_group, name, description, subnets): + update_params = dict() + if description and subnet_group['description'] != description: + update_params['CacheSubnetGroupDescription'] = description + if subnets: + old_subnets = set(subnet_group['subnet_ids']) + new_subnets = set(subnets) + if old_subnets != new_subnets: + update_params['SubnetIds'] = list(subnets) + + if not update_params: + return False + + try: + client.modify_cache_subnet_group( + aws_retry=True, + CacheSubnetGroupName=name, + **update_params, + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update subnet group") + + return True + + +def delete_subnet_group(name): + try: + client.delete_cache_subnet_group( + aws_retry=True, + CacheSubnetGroupName=name, + ) + return True + except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + # AWS is "eventually consistent", cope with the race conditions where + # deletion hadn't completed when we ran describe + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(default='present', choices=['present', 'absent']), name=dict(required=True), description=dict(required=False), subnets=dict(required=False, type='list', elements='str'), ) - module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') + global module + global client - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or {} + module = AnsibleAWSModule(argument_spec=argument_spec) - if state == 'present': - for required in ['name', 'description', 'subnets']: - if not module.params.get(required): - module.fail_json(msg=str("Parameter %s required for state='present'" % required)) - else: - for not_allowed in ['description', 'subnets']: - if module.params.get(not_allowed): - module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed)) - - # Retrieve any AWS settings from the environment. - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + state = module.params.get('state') + name = module.params.get('name').lower() + description = module.params.get('description') + subnets = module.params.get('subnets') - if not region: - module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + client = module.client('elasticache', retry_decorator=AWSRetry.jittered_backoff()) - """Get an elasticache connection""" - try: - conn = connect_to_region(region_name=region, **aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=to_native(e)) + subnet_group = get_subnet_group(name) + changed = False - try: - changed = False - exists = False - - try: - matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) - exists = len(matching_groups) > 0 - except BotoServerError as e: - if e.error_code != 'CacheSubnetGroupNotFoundFault': - module.fail_json(msg=e.error_message) - - if state == 'absent': - if exists: - conn.delete_cache_subnet_group(group_name) - changed = True - else: - if not exists: - new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) - changed = True - else: - changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) - changed = True - - except BotoServerError as e: - if e.error_message != 'No modifications were requested.': - module.fail_json(msg=e.error_message) + if state == 'present': + if not subnet_group: + result = create_subnet_group(name, description, subnets) + changed |= result else: - changed = False + result = update_subnet_group(subnet_group, name, description, subnets) + changed |= result + subnet_group = get_subnet_group(name) + else: + if subnet_group: + result = delete_subnet_group(name) + changed |= result + subnet_group = None - module.exit_json(changed=changed) + module.exit_json(changed=changed, cache_subnet_group=subnet_group) if __name__ == '__main__': From f403b73bd82734a7f6550c8d2e3e3def32cd223a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 17 Sep 2021 22:06:13 +0200 Subject: [PATCH 249/683] Add documentation and tests for return values --- elasticache_subnet_group.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index f982a7a253c..c5ef1d0f606 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -59,6 +59,42 @@ name: norwegian-blue ''' +RETURN = r''' +cache_subnet_group: + description: Description of the Elasticache Subnet Group. + returned: always + type: dict + contains: + arn: + description: The Amazon Resource Name (ARN) of the cache subnet group. + returned: when the subnet group exists + type: str + sample: arn:aws:elasticache:us-east-1:012345678901:subnetgroup:norwegian-blue + description: + description: The description of the cache subnet group. + returned: when the cache subnet group exists + type: str + sample: My Fancy Ex Parrot Subnet Group + name: + description: The name of the cache subnet group. + returned: when the cache subnet group exists + type: str + sample: norwegian-blue + vpc_id: + description: The VPC ID of the cache subnet group. + returned: when the cache subnet group exists + type: str + sample: norwegian-blue + subnet_ids: + description: The IDs of the subnets beloging to the cache subnet group. + returned: when the cache subnet group exists + type: list + elements: str + sample: + - subnet-aaaaaaaa + - subnet-bbbbbbbb +''' + try: import botocore except ImportError: From 79af0aa1930dc94fe386d7564e541d934b6dcfce Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 17 Sep 2021 20:48:03 +0200 Subject: [PATCH 250/683] Add support for check_mode --- elasticache_subnet_group.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index c5ef1d0f606..5e813b64ad1 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -139,6 +139,10 @@ def get_subnet_group(name): def create_subnet_group(name, description, subnets): + + if module.check_mode: + return True + try: if not description: description = name @@ -168,6 +172,9 @@ def update_subnet_group(subnet_group, name, description, subnets): if not update_params: return False + if module.check_mode: + return True + try: client.modify_cache_subnet_group( aws_retry=True, @@ -181,6 +188,10 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): + + if module.check_mode: + return True + try: client.delete_cache_subnet_group( aws_retry=True, @@ -206,7 +217,10 @@ def main(): global module global client - module = AnsibleAWSModule(argument_spec=argument_spec) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) state = module.params.get('state') name = module.params.get('name').lower() From 5129667c3f38e6418c5a78ef149f61151ecb7ce5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 18 Sep 2021 08:35:38 +0200 Subject: [PATCH 251/683] Fail cleanly if no subnets are provided on creation. --- elasticache_subnet_group.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 5e813b64ad1..eda678205d0 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -34,6 +34,7 @@ subnets: description: - List of subnet IDs that make up the ElastiCache subnet group. + - At least one subnet must be provided when creating an ElastiCache subnet group. type: list elements: str author: @@ -140,14 +141,15 @@ def get_subnet_group(name): def create_subnet_group(name, description, subnets): + if not subnets: + module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + if module.check_mode: return True try: if not description: description = name - if not subnets: - subnets = [] client.create_cache_subnet_group( aws_retry=True, CacheSubnetGroupName=name, From 84a9991921d632d63bc5f03de91cec8a01426822 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 16 Sep 2021 18:03:30 +0200 Subject: [PATCH 252/683] Add integration tests for redshift_subnet_group --- redshift_subnet_group.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index fa210a5bee4..05397286041 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -32,11 +32,13 @@ group_description: description: - Database subnet group description. + - Required when I(state=present). aliases: ['description'] type: str group_subnets: description: - List of subnet IDs that make up the cluster subnet group. + - Required when I(state=present). aliases: ['subnets'] type: list elements: str From 89c6a29e31222b5dc36d4688bd597bc7b1fe1cad Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 17 Sep 2021 23:56:02 +0200 Subject: [PATCH 253/683] Migrate redshift_subnet_group to boto3 --- redshift_subnet_group.py | 242 ++++++++++++++++++++++++--------------- 1 file changed, 151 insertions(+), 91 deletions(-) diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 05397286041..dae91d86750 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -9,8 +9,6 @@ DOCUMENTATION = r''' --- -author: - - "Jens Carl (@j-carl), Hothead Games Inc." module: redshift_subnet_group version_added: 1.0.0 short_description: manage Redshift cluster subnet groups @@ -20,33 +18,31 @@ state: description: - Specifies whether the subnet should be present or absent. - required: true + default: 'present' choices: ['present', 'absent' ] type: str - group_name: + name: description: - Cluster subnet group name. required: true - aliases: ['name'] + aliases: ['group_name'] type: str - group_description: + description: description: - - Database subnet group description. - - Required when I(state=present). - aliases: ['description'] + - Cluster subnet group description. + aliases: ['group_description'] type: str - group_subnets: + subnets: description: - List of subnet IDs that make up the cluster subnet group. - - Required when I(state=present). - aliases: ['subnets'] + aliases: ['group_subnets'] type: list elements: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 -requirements: -- boto >= 2.49.0 +author: + - "Jens Carl (@j-carl), Hothead Games Inc." ''' EXAMPLES = r''' @@ -66,10 +62,10 @@ ''' RETURN = r''' -group: - description: dictionary containing all Redshift subnet group information +cluster_subnet_group: + description: dictionary containing Redshift subnet group information returned: success - type: complex + type: dict contains: name: description: name of the Redshift subnet group @@ -84,95 +80,159 @@ ''' try: - import boto - import boto.redshift + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -def main(): - argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - group_name=dict(required=True, aliases=['name']), - group_description=dict(required=False, aliases=['description']), - group_subnets=dict(required=False, aliases=['subnets'], type='list', elements='str'), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) +def get_subnet_group(name): + try: + groups = client.describe_cluster_subnet_groups( + aws_retry=True, + ClusterSubnetGroupName=name, + )['ClusterSubnetGroups'] + except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe subnet group") - if not HAS_BOTO: - module.fail_json(msg='boto v2.9.0+ required for this module') + if not groups: + return None - state = module.params.get('state') - group_name = module.params.get('group_name') - group_description = module.params.get('group_description') - group_subnets = module.params.get('group_subnets') + if len(groups) > 1: + module.fail_aws( + msg="Found multiple matches for subnet group", + cluster_subnet_groups=camel_dict_to_snake_dict(groups), + ) - if state == 'present': - for required in ('group_name', 'group_description', 'group_subnets'): - if not module.params.get(required): - module.fail_json(msg=str("parameter %s required for state='present'" % required)) - else: - for not_allowed in ('group_description', 'group_subnets'): - if module.params.get(not_allowed): - module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed)) + subnet_group = camel_dict_to_snake_dict(groups[0]) + + subnet_group['name'] = subnet_group['cluster_subnet_group_name'] - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg=str("Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")) + subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) + subnet_group['subnet_ids'] = subnet_ids - # Connect to the Redshift endpoint. + return subnet_group + + +def create_subnet_group(name, description, subnets): try: - conn = connect_to_aws(boto.redshift, region, **aws_connect_params) - except boto.exception.JSONResponseError as e: - module.fail_json(msg=str(e)) + if not description: + description = name + if not subnets: + subnets = [] + client.create_cluster_subnet_group( + aws_retry=True, + ClusterSubnetGroupName=name, + Description=description, + SubnetIds=subnets, + ) + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create subnet group") + + +def update_subnet_group(subnet_group, name, description, subnets): + update_params = dict() + if description and subnet_group['description'] != description: + update_params['Description'] = description + if subnets: + old_subnets = set(subnet_group['subnet_ids']) + new_subnets = set(subnets) + if old_subnets != new_subnets: + update_params['SubnetIds'] = list(subnets) + + if not update_params: + return False + + # Description is optional, SubnetIds is not + if 'SubnetIds' not in update_params: + update_params['SubnetIds'] = subnet_group['subnet_ids'] try: - changed = False - exists = False - group = None - - try: - matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100) - exists = len(matching_groups) > 0 - except boto.exception.JSONResponseError as e: - if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault': - # if e.code != 'ClusterSubnetGroupNotFoundFault': - module.fail_json(msg=str(e)) - - if state == 'absent': - if exists: - conn.delete_cluster_subnet_group(group_name) - changed = True + client.modify_cluster_subnet_group( + aws_retry=True, + ClusterSubnetGroupName=name, + **update_params, + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update subnet group") + + return True + +def delete_subnet_group(name): + try: + client.delete_cluster_subnet_group( + aws_retry=True, + ClusterSubnetGroupName=name, + ) + return True + except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + # AWS is "eventually consistent", cope with the race conditions where + # deletion hadn't completed when we ran describe + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete subnet group") + + +def main(): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, aliases=['group_name']), + description=dict(required=False, aliases=['group_description']), + subnets=dict(required=False, aliases=['group_subnets'], type='list', elements='str'), + ) + + global module + global client + + module = AnsibleAWSModule( + argument_spec=argument_spec, + ) + + state = module.params.get('state') + name = module.params.get('group_name') + description = module.params.get('group_description') + subnets = module.params.get('group_subnets') + + client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff()) + + subnet_group = get_subnet_group(name) + changed = False + + if state == 'present': + if not subnet_group: + result = create_subnet_group(name, description, subnets) + changed |= result else: - if not exists: - new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets) - group = { - 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] - ['ClusterSubnetGroup']['ClusterSubnetGroupName'], - 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] - ['ClusterSubnetGroup']['VpcId'], - } - else: - changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description) - group = { - 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] - ['ClusterSubnetGroup']['ClusterSubnetGroupName'], - 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] - ['ClusterSubnetGroup']['VpcId'], - } - - changed = True - - except boto.exception.JSONResponseError as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, group=group) + result = update_subnet_group(subnet_group, name, description, subnets) + changed |= result + subnet_group = get_subnet_group(name) + else: + if subnet_group: + result = delete_subnet_group(name) + changed |= result + subnet_group = None + + compat_results = dict() + if subnet_group: + compat_results['group'] = dict( + name=subnet_group['name'], + vpc_id=subnet_group['vpc_id'], + ) + + module.exit_json( + changed=changed, + cluster_subnet_group=subnet_group, + **compat_results, + ) if __name__ == '__main__': From f34004d55d1ad29d6348d18ca1783c6ea2274333 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 18 Sep 2021 09:12:11 +0200 Subject: [PATCH 254/683] Add additional tests for minimal/partial parameters. --- redshift_subnet_group.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index dae91d86750..c67a46e4c00 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -17,7 +17,7 @@ options: state: description: - - Specifies whether the subnet should be present or absent. + - Specifies whether the subnet group should be present or absent. default: 'present' choices: ['present', 'absent' ] type: str @@ -35,6 +35,7 @@ subnets: description: - List of subnet IDs that make up the cluster subnet group. + - At least one subnet must be provided when creating a cluster subnet group. aliases: ['group_subnets'] type: list elements: str @@ -122,11 +123,13 @@ def get_subnet_group(name): def create_subnet_group(name, description, subnets): + + if not subnets: + module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + try: if not description: description = name - if not subnets: - subnets = [] client.create_cluster_subnet_group( aws_retry=True, ClusterSubnetGroupName=name, @@ -198,9 +201,9 @@ def main(): ) state = module.params.get('state') - name = module.params.get('group_name') - description = module.params.get('group_description') - subnets = module.params.get('group_subnets') + name = module.params.get('name') + description = module.params.get('description') + subnets = module.params.get('subnets') client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff()) From 3792cb6854bc04fe337173927ec3dbcc0046a1e2 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 18 Sep 2021 09:59:28 +0200 Subject: [PATCH 255/683] Add additional tests and documentation around new return values --- redshift_subnet_group.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index c67a46e4c00..a609dd47a2c 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -64,20 +64,33 @@ RETURN = r''' cluster_subnet_group: - description: dictionary containing Redshift subnet group information + description: A dictionary containing information about the Redshift subnet group. returned: success type: dict contains: name: - description: name of the Redshift subnet group - returned: success + description: Name of the Redshift subnet group. + returned: when the cache subnet group exists type: str sample: "redshift_subnet_group_name" vpc_id: - description: Id of the VPC where the subnet is located - returned: success + description: Id of the VPC where the subnet is located. + returned: when the cache subnet group exists type: str sample: "vpc-aabb1122" + description: + description: The description of the cache subnet group. + returned: when the cache subnet group exists + type: str + sample: Redshift subnet + subnet_ids: + description: The IDs of the subnets beloging to the Redshift subnet group. + returned: when the cache subnet group exists + type: list + elements: str + sample: + - subnet-aaaaaaaa + - subnet-bbbbbbbb ''' try: @@ -90,6 +103,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def get_subnet_group(name): @@ -112,8 +126,10 @@ def get_subnet_group(name): cluster_subnet_groups=camel_dict_to_snake_dict(groups), ) + tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags']) subnet_group = camel_dict_to_snake_dict(groups[0]) + subnet_group['tags'] = tags subnet_group['name'] = subnet_group['cluster_subnet_group_name'] subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) From 31c3eb9ff151d286afd4cab620570f2e0eab4f48 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 18 Sep 2021 10:13:55 +0200 Subject: [PATCH 256/683] Add documentation and support for check_mode --- redshift_subnet_group.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index a609dd47a2c..89e8bfa8042 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -126,7 +126,10 @@ def get_subnet_group(name): cluster_subnet_groups=camel_dict_to_snake_dict(groups), ) + # No support for managing tags yet, but make sure that we don't need to + # change the return value structure after it's been available in a release. tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags']) + subnet_group = camel_dict_to_snake_dict(groups[0]) subnet_group['tags'] = tags @@ -143,6 +146,9 @@ def create_subnet_group(name, description, subnets): if not subnets: module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + if module.check_mode: + return True + try: if not description: description = name @@ -170,6 +176,9 @@ def update_subnet_group(subnet_group, name, description, subnets): if not update_params: return False + if module.check_mode: + return True + # Description is optional, SubnetIds is not if 'SubnetIds' not in update_params: update_params['SubnetIds'] = subnet_group['subnet_ids'] @@ -187,6 +196,10 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): + + if module.check_mode: + return True + try: client.delete_cluster_subnet_group( aws_retry=True, @@ -214,6 +227,7 @@ def main(): module = AnsibleAWSModule( argument_spec=argument_spec, + supports_check_mode=True, ) state = module.params.get('state') From f5cbb393df5c7b76b31888c6370786ea7b0d0682 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 23 Sep 2021 10:46:27 +0200 Subject: [PATCH 257/683] Rename iam_cert to iam_server_certificate for consistency --- iam_cert.py => iam_server_certificate.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename iam_cert.py => iam_server_certificate.py (98%) diff --git a/iam_cert.py b/iam_server_certificate.py similarity index 98% rename from iam_cert.py rename to iam_server_certificate.py index fbe984670aa..79ef9c53ef2 100644 --- a/iam_cert.py +++ b/iam_server_certificate.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: iam_cert +module: iam_server_certificate version_added: 1.0.0 short_description: Manage server certificates for use on ELBs and CloudFront description: @@ -85,7 +85,7 @@ EXAMPLES = ''' - name: Basic server certificate upload from local file - community.aws.iam_cert: + community.aws.iam_server_certificate: name: very_ssl state: present cert: "{{ lookup('file', 'path/to/cert') }}" @@ -93,7 +93,7 @@ cert_chain: "{{ lookup('file', 'path/to/certchain') }}" - name: Basic server certificate upload - community.aws.iam_cert: + community.aws.iam_server_certificate: name: very_ssl state: present cert: path/to/cert @@ -101,7 +101,7 @@ cert_chain: path/to/certchain - name: Server certificate upload using key string - community.aws.iam_cert: + community.aws.iam_server_certificate: name: very_ssl state: present path: "/a/cert/path/" @@ -110,7 +110,7 @@ cert_chain: body_of_myverytrustedchain - name: Basic rename of existing certificate - community.aws.iam_cert: + community.aws.iam_server_certificate: name: very_ssl new_name: new_very_ssl state: present From 23557033344a93e10cc0965c6297c19963a685a3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 24 Sep 2021 12:10:08 +0200 Subject: [PATCH 258/683] Fix bug when allocating an EIP with in_vpc not set --- ec2_eip.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ec2_eip.py b/ec2_eip.py index adf6f0bda41..927d31551b7 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -344,10 +344,11 @@ def address_is_associated_with_device(ec2, module, address, device_id, is_instan def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None): """ Allocate a new elastic IP address (when needed) and return it """ + if not domain: + domain = 'standard' + if reuse_existing_ip_allowed: filters = [] - if not domain: - domain = 'standard' filters.append({'Name': 'domain', "Values": [domain]}) if tag_dict is not None: From 0f80054bf486e0ff592332a3e73c28e9bc2903e2 Mon Sep 17 00:00:00 2001 From: Nicolas Boutet Date: Thu, 9 Sep 2021 10:23:56 +0200 Subject: [PATCH 259/683] cloudfront_distribution: add new cipher version TLSv1.2_2021 --- cloudfront_distribution.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 9887a8d373a..80ac6dcec4b 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1591,7 +1591,8 @@ def __init__(self, module): 'TLSv1_2016', 'TLSv1.1_2016', 'TLSv1.2_2018', - 'TLSv1.2_2019' + 'TLSv1.2_2019', + 'TLSv1.2_2021' ]) self.__valid_viewer_certificate_certificate_sources = set([ 'cloudfront', From bf5af0d8a2310422c3493f9e569521d32539eda9 Mon Sep 17 00:00:00 2001 From: John Mahoney Date: Thu, 2 Sep 2021 16:11:50 -0400 Subject: [PATCH 260/683] Undocumented IOPS return IOPS is being returned by the module, but simply was not documented. --- rds_instance_info.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rds_instance_info.py b/rds_instance_info.py index fba7804012a..13609972c17 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -234,6 +234,11 @@ returned: always type: str sample: '2017-10-10T04:00:07.434000+00:00' + iops: + description: The Provisioned IOPS value for the DB instance. + returned: always + type: int + sample: 1000 kms_key_id: description: KMS Key ID returned: always From c3eb273d752ab4253584dc783902935667899bc5 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 20 Jul 2021 16:03:48 +0200 Subject: [PATCH 261/683] elb_target - add preserve_client_ip_enabled ond proxy_protocol_v2_enabled ptions Signed-off-by: Alina Buzachis --- elb_target_group.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/elb_target_group.py b/elb_target_group.py index 45649e7e651..0f9e3ff5f23 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -161,6 +161,23 @@ - The identifier of the virtual private cloud (VPC). Required when I(state) is C(present). required: false type: str + preserve_client_ip_enabled: + description: + - Indicates whether client IP preservation is enabled. + - The default is disabled if the target group type is C(ip) address and the target group protocol is C(tcp) or C(tls). + Otherwise, the default is enabled. Client IP preservation cannot be disabled for C(udp) and C(tcp_udp) target groups. + - I(preserve_client_ip_enabled) is supported only by Network Load Balancers. + type: bool + required: false + version_added: 2.0.0 + proxy_protocol_v2_enabled: + description: + - Indicates whether Proxy Protocol version 2 is enabled. + - The value is C(true) or C(false). + - I(proxy_protocol_v2_enabled) is supported only by Network Load Balancers. + type: bool + required: false + version_added: 2.0.0 wait: description: - Whether or not to wait for the target group. @@ -474,6 +491,8 @@ def create_or_update_target_group(connection, module): stickiness_type = module.params.get("stickiness_type") stickiness_app_cookie_duration = module.params.get("stickiness_app_cookie_duration") stickiness_app_cookie_name = module.params.get("stickiness_app_cookie_name") + preserve_client_ip_enabled = module.params.get("preserve_client_ip_enabled") + proxy_protocol_v2_enabled = module.params.get("proxy_protocol_v2_enabled") health_option_keys = [ "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", @@ -763,6 +782,13 @@ def create_or_update_target_group(connection, module): if stickiness_app_cookie_duration is not None: if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) + if preserve_client_ip_enabled is not None: + if target_type not in ('udp', 'tcp_udp'): + if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'): + update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()}) + if proxy_protocol_v2_enabled is not None: + if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'): + update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()}) if update_attributes: try: @@ -852,6 +878,8 @@ def main(): targets=dict(type='list', elements='dict'), unhealthy_threshold_count=dict(type='int'), vpc_id=dict(), + preserve_client_ip_enabled=dict(type='bool'), + proxy_protocol_v2_enabled=dict(type='bool'), wait_timeout=dict(type='int', default=200), wait=dict(type='bool', default=False) ) From d4d9c06559a6b53600c24a894ffb06be38f709fe Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 24 Sep 2021 14:49:59 +0200 Subject: [PATCH 262/683] bump version added - missed the 2.0.0 release --- elb_target_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 0f9e3ff5f23..9a740422293 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -169,7 +169,7 @@ - I(preserve_client_ip_enabled) is supported only by Network Load Balancers. type: bool required: false - version_added: 2.0.0 + version_added: 2.1.0 proxy_protocol_v2_enabled: description: - Indicates whether Proxy Protocol version 2 is enabled. @@ -177,7 +177,7 @@ - I(proxy_protocol_v2_enabled) is supported only by Network Load Balancers. type: bool required: false - version_added: 2.0.0 + version_added: 2.1.0 wait: description: - Whether or not to wait for the target group. From 6ff26b59cb3fde7b2c7e2f6374b30afab67cd61e Mon Sep 17 00:00:00 2001 From: Kevin Brebanov Date: Mon, 7 Jun 2021 10:41:05 -0400 Subject: [PATCH 263/683] route53: Prevent identifier from always being scrubbed. --- route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/route53.py b/route53.py index d1391cfac58..d4fe99531c0 100644 --- a/route53.py +++ b/route53.py @@ -606,6 +606,7 @@ def main(): 'TTL': ttl_in, 'ResourceRecords': [dict(Value=value) for value in value_in], 'HealthCheckId': health_check_in, + 'SetIdentifier': identifier_in, }) if alias_in: From cc0d358947ba444f5165877649e502767d5ab034 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 27 Sep 2021 09:26:23 +0200 Subject: [PATCH 264/683] Deprecate passing file names to the iam_server_certificate module --- iam_server_certificate.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 79ef9c53ef2..5402b22d126 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -56,17 +56,23 @@ cert_chain: description: - The path to, or content of, the CA certificate chain in PEM encoded format. - As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + - If the parameter is not a file, it is assumed to be content. + - Passing a file name is deprecated, and support will be dropped in + version 4.0.0 of this collection. type: str cert: description: - The path to, or content of the certificate body in PEM encoded format. - As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + - If the parameter is not a file, it is assumed to be content. + - Passing a file name is deprecated, and support will be dropped in + version 4.0.0 of this collection. type: str key: description: - The path to, or content of the private key in PEM encoded format. - As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + If the parameter is not a file, it is assumed to be content. + - Passing a file name is deprecated, and support will be dropped in + version 4.0.0 of this collection. type: str dup_ok: description: @@ -231,16 +237,31 @@ def load_data(cert, key, cert_chain): if cert and os.path.isfile(cert): with open(cert, 'r') as cert_fh: cert = cert_fh.read().rstrip() + module.deprecate( + 'Passing a file name as the cert argument has been deprecated. ' + 'Please use a lookup instead, see the documentation for examples.', + version='4.0.0', collection_name='community.aws') if key and os.path.isfile(key): with open(key, 'r') as key_fh: key = key_fh.read().rstrip() + module.deprecate( + 'Passing a file name as the key argument has been deprecated. ' + 'Please use a lookup instead, see the documentation for examples.', + version='4.0.0', collection_name='community.aws') if cert_chain and os.path.isfile(cert_chain): with open(cert_chain, 'r') as cert_chain_fh: cert_chain = cert_chain_fh.read() + module.deprecate( + 'Passing a file name as the cert_chain argument has been deprecated. ' + 'Please use a lookup instead, see the documentation for examples.', + version='4.0.0', collection_name='community.aws') return cert, key, cert_chain def main(): + + global module + argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), From f45f6f8cedacf27d6d24e27aec0bec992ccea5d7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 30 Sep 2021 12:01:21 +0200 Subject: [PATCH 265/683] sns_topic - Define shape for delivery_policy. --- sns_topic.py | 105 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 4 deletions(-) diff --git a/sns_topic.py b/sns_topic.py index dd5af417bab..37cf573ce58 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -49,6 +49,73 @@ description: - Delivery policy to apply to the SNS topic. type: dict + suboptions: + http: + description: + - Delivery policy for HTTP(S) messages. + - See U(https://docs.aws.amazon.com/sns/latest/dg/sns-message-delivery-retries.html) + for more information. + type: dict + required: false + suboptions: + disableSubscriptionOverrides: + description: + - Applies this policy to all subscriptions, even if they have their own policies. + type: bool + required: false + defaultThrottlePolicy: + description: + - Throttle the rate of messages sent to subsriptions. + type: dict + suboptions: + maxReceivesPerSecond: + description: + - The maximum number of deliveries per second per subscription. + type: int + required: true + required: false + defaultHealthyRetryPolicy: + description: + - Retry policy for HTTP(S) messages. + type: dict + required: true + suboptions: + minDelayTarget: + description: + - The minimum delay for a retry. + type: int + required: true + maxDelayTarget: + description: + - The maximum delay for a retry. + type: int + required: true + numRetries: + description: + - The total number of retries. + type: int + required: true + numMaxDelayRetries: + description: + - The number of retries with the maximum delay between them. + type: int + required: true + numMinDelayRetries: + description: + - The number of retries with just the minimum delay between them. + type: int + required: true + numNoDelayRetries: + description: + - The number of retries to be performmed immediately. + type: int + required: true + backoffFunction: + description: + - The function for backoff between retries. + type: str + required: true + choices: ['arithmetic', 'exponential', 'geometric', 'linear'] subscriptions: description: - List of subscriptions to apply to the topic. Note that AWS requires @@ -225,8 +292,12 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class SnsTopicManager(object): @@ -251,7 +322,7 @@ def __init__(self, self.state = state self.display_name = display_name self.policy = policy - self.delivery_policy = delivery_policy + self.delivery_policy = scrub_none_parameters(delivery_policy) if delivery_policy else None self.subscriptions = subscriptions self.subscriptions_existing = [] self.subscriptions_deleted = [] @@ -495,13 +566,39 @@ def get_info(self): def main(): + + # We're kinda stuck with CamelCase here, it would be nice to switch to + # snake_case, but we'd need to purge out the alias entries + http_retry_args = dict( + minDelayTarget=dict(type='int', required=True), + maxDelayTarget=dict(type='int', required=True), + numRetries=dict(type='int', required=True), + numMaxDelayRetries=dict(type='int', required=True), + numMinDelayRetries=dict(type='int', required=True), + numNoDelayRetries=dict(type='int', required=True), + backoffFunction=dict(type='str', required=True, choices=['arithmetic', 'exponential', 'geometric', 'linear']), + ) + http_delivery_args = dict( + defaultHealthyRetryPolicy=dict(type='dict', required=True, options=http_retry_args), + disableSubscriptionOverrides=dict(type='bool', required=False), + defaultThrottlePolicy=dict( + type='dict', required=False, + options=dict( + maxReceivesPerSecond=dict(type='int', required=True), + ), + ), + ) + delivery_args = dict( + http=dict(type='dict', required=False, options=http_delivery_args), + ) + argument_spec = dict( name=dict(required=True), topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']), state=dict(default='present', choices=['present', 'absent']), display_name=dict(), policy=dict(type='dict'), - delivery_policy=dict(type='dict'), + delivery_policy=dict(type='dict', options=delivery_args), subscriptions=dict(default=[], type='list', elements='dict'), purge_subscriptions=dict(type='bool', default=True), ) From a2fc3e5aea7c6d8d1f1f3cf866ac8e8425799c98 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 8 Oct 2021 10:27:13 +0200 Subject: [PATCH 266/683] touch route53_health_check plugin to try and trigger tests --- route53_health_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route53_health_check.py b/route53_health_check.py index 7db80d875a5..0fc8ea4d698 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -10,7 +10,7 @@ --- module: route53_health_check version_added: 1.0.0 -short_description: Add or delete health-checks in Amazons Route53 DNS service +short_description: Manage health-checks in Amazons Route53 DNS service description: - Creates and deletes DNS Health checks in Amazons Route53 service. - Only the port, resource_path, string_match and request_interval are From 90a733a6b468083060b3e4dde3c772987bbfba68 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Fri, 8 Oct 2021 17:55:30 +0100 Subject: [PATCH 267/683] iam_role_info jittered backoff --- iam_role.py | 3 +-- iam_role_info.py | 16 ++++++---------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/iam_role.py b/iam_role.py index f5699edf8b5..62d76f46c9a 100644 --- a/iam_role.py +++ b/iam_role.py @@ -214,9 +214,8 @@ def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc): return False -@AWSRetry.jittered_backoff() def _list_policies(connection): - paginator = connection.get_paginator('list_policies') + paginator = connection.get_paginator('list_policies', aws_retry=True) return paginator.paginate().build_full_result()['Policies'] diff --git a/iam_role_info.py b/iam_role_info.py index 0a627d10cc7..9c089cc35d6 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -159,27 +159,23 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -@AWSRetry.exponential_backoff() def list_iam_roles_with_backoff(client, **kwargs): - paginator = client.get_paginator('list_roles') + paginator = client.get_paginator('list_roles', aws_retry=True) return paginator.paginate(**kwargs).build_full_result() -@AWSRetry.exponential_backoff() def list_iam_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_role_policies') + paginator = client.get_paginator('list_role_policies', aws_retry=True) return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] -@AWSRetry.exponential_backoff() def list_iam_attached_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_attached_role_policies') + paginator = client.get_paginator('list_attached_role_policies', aws_retry=True) return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] -@AWSRetry.exponential_backoff() def list_iam_instance_profiles_for_role_with_backoff(client, role_name): - paginator = client.get_paginator('list_instance_profiles_for_role') + paginator = client.get_paginator('list_instance_profiles_for_role', aws_retry=True) return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles'] @@ -210,7 +206,7 @@ def describe_iam_roles(module, client): path_prefix = module.params['path_prefix'] if name: try: - roles = [client.get_role(RoleName=name)['Role']] + roles = [client.get_role(aws_retry=True, RoleName=name)['Role']] except is_boto3_error_code('NoSuchEntity'): return [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -245,7 +241,7 @@ def main(): if module._name == 'iam_role_facts': module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", date='2021-12-01', collection_name='community.aws') - client = module.client('iam') + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) From 5fcc031a8fd581954b255822a8d0f429c6faa0e5 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Sat, 9 Oct 2021 16:16:27 +0100 Subject: [PATCH 268/683] PR feedback --- iam_role.py | 3 ++- iam_role_info.py | 16 +++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/iam_role.py b/iam_role.py index 62d76f46c9a..f5699edf8b5 100644 --- a/iam_role.py +++ b/iam_role.py @@ -214,8 +214,9 @@ def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc): return False +@AWSRetry.jittered_backoff() def _list_policies(connection): - paginator = connection.get_paginator('list_policies', aws_retry=True) + paginator = connection.get_paginator('list_policies') return paginator.paginate().build_full_result()['Policies'] diff --git a/iam_role_info.py b/iam_role_info.py index 9c089cc35d6..acc0094cf96 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -159,23 +159,25 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +@AWSRetry.jittered_backoff() def list_iam_roles_with_backoff(client, **kwargs): - paginator = client.get_paginator('list_roles', aws_retry=True) + paginator = client.get_paginator('list_roles') return paginator.paginate(**kwargs).build_full_result() +@AWSRetry.jittered_backoff() def list_iam_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_role_policies', aws_retry=True) + paginator = client.get_paginator('list_role_policies') return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] - +@AWSRetry.jittered_backoff() def list_iam_attached_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_attached_role_policies', aws_retry=True) + paginator = client.get_paginator('list_attached_role_policies') return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] - +@AWSRetry.jittered_backoff() def list_iam_instance_profiles_for_role_with_backoff(client, role_name): - paginator = client.get_paginator('list_instance_profiles_for_role', aws_retry=True) + paginator = client.get_paginator('list_instance_profiles_for_role') return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles'] @@ -206,7 +208,7 @@ def describe_iam_roles(module, client): path_prefix = module.params['path_prefix'] if name: try: - roles = [client.get_role(aws_retry=True, RoleName=name)['Role']] + roles = [client.get_role(RoleName=name, aws_retry=True)['Role']] except is_boto3_error_code('NoSuchEntity'): return [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except From e4521be272ba838dbcb02ea019d8121a47fe814e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 9 Oct 2021 22:01:18 +0200 Subject: [PATCH 269/683] Whitespace linting --- iam_role_info.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/iam_role_info.py b/iam_role_info.py index acc0094cf96..a08df455fad 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -170,11 +170,13 @@ def list_iam_role_policies_with_backoff(client, role_name): paginator = client.get_paginator('list_role_policies') return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] + @AWSRetry.jittered_backoff() def list_iam_attached_role_policies_with_backoff(client, role_name): paginator = client.get_paginator('list_attached_role_policies') return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] + @AWSRetry.jittered_backoff() def list_iam_instance_profiles_for_role_with_backoff(client, role_name): paginator = client.get_paginator('list_instance_profiles_for_role') From be8876008baca9ccff346febeb5513072d359163 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 26 Sep 2021 11:55:36 +0200 Subject: [PATCH 270/683] Initial boto3 migration --- route53_health_check.py | 393 +++++++++++++++++++++------------------- 1 file changed, 210 insertions(+), 183 deletions(-) diff --git a/route53_health_check.py b/route53_health_check.py index 0fc8ea4d698..980cef5fba5 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -44,7 +44,7 @@ health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html. - - Required for all checks except TCP. + - Mutually exclusive with I(type='TCP'). - The path must begin with a / - Maximum 255 characters. type: str @@ -74,15 +74,13 @@ - The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. - default: 3 + - Will default to C(3) if not specified on creation. choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] type: int author: "zimbatm (@zimbatm)" extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 -requirements: -- boto >= 2.49.0 ''' EXAMPLES = ''' @@ -119,162 +117,195 @@ import uuid try: - import boto.ec2 - from boto.route53 import Route53Connection, exception - from boto.route53.healthcheck import HealthCheck + import botocore except ImportError: pass # Handled by HAS_BOTO +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def _list_health_checks(**params): + try: + results = client.list_health_checks(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to list health checks') + return results -# Things that can't get changed: -# protocol -# ip_address or domain -# request_interval -# string_match if not previously enabled -def find_health_check(conn, wanted): +def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): """Searches for health checks that have the exact same set of immutable values""" - results = conn.get_list_health_checks() + # In lieu of an Id we perform matches against the following values: + # - ip_addr + # - fqdn + # - type (immutable) + # - request_interval + # - port + + # Because the list and route53 provides no 'filter' mechanism, + # the using a paginator would result in (on average) double the + # number of API calls and can get really slow. + # Additionally, we can't properly wrap the paginator, so retrying means + # starting from scratch with a paginator + results = _list_health_checks() while True: - for check in results.HealthChecks: - config = check.HealthCheckConfig + for check in results.get('HealthChecks'): + config = check.get('HealthCheckConfig') if ( - config.get('IPAddress') == wanted.ip_addr and - config.get('FullyQualifiedDomainName') == wanted.fqdn and - config.get('Type') == wanted.hc_type and - config.get('RequestInterval') == str(wanted.request_interval) and - config.get('Port') == str(wanted.port) + config.get('IPAddress', None) == ip_addr and + config.get('FullyQualifiedDomainName', None) == fqdn and + config.get('Type') == hc_type and + config.get('RequestInterval') == request_interval and + config.get('Port', None) == port and + True ): return check - if (results.IsTruncated == 'true'): - results = conn.get_list_health_checks(marker=results.NextMarker) + if results.get('IsTruncated', False): + results = _list_health_checks(Marker=results.get('NextMarker')) else: return None -def to_health_check(config): - return HealthCheck( - config.get('IPAddress'), - int(config.get('Port')), - config.get('Type'), - config.get('ResourcePath'), - fqdn=config.get('FullyQualifiedDomainName'), - string_match=config.get('SearchString'), - request_interval=int(config.get('RequestInterval')), - failure_threshold=int(config.get('FailureThreshold')), - ) +def delete_health_check(check_id): + if not check_id: + return False, None + if module.check_mode: + return True, 'delete' -def health_check_diff(a, b): - a = a.__dict__ - b = b.__dict__ - if a == b: - return {} - diff = {} - for key in set(a.keys()) | set(b.keys()): - if a.get(key) != b.get(key): - diff[key] = b.get(key) - return diff - - -def to_template_params(health_check): - params = { - 'ip_addr_part': '', - 'port': health_check.port, - 'type': health_check.hc_type, - 'resource_path_part': '', - 'fqdn_part': '', - 'string_match_part': '', - 'request_interval': health_check.request_interval, - 'failure_threshold': health_check.failure_threshold, - } - if health_check.ip_addr: - params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr} - if health_check.resource_path: - params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path} - if health_check.fqdn: - params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn} - if health_check.string_match: - params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match} - return params - - -XMLResourcePathPart = """%(resource_path)s""" - -POSTXMLBody = """ - - %(caller_ref)s - - %(ip_addr_part)s - %(port)s - %(type)s - %(resource_path_part)s - %(fqdn_part)s - %(string_match_part)s - %(request_interval)s - %(failure_threshold)s - - - """ - -UPDATEHCXMLBody = """ - - %(health_check_version)s - %(ip_addr_part)s - %(port)s - %(resource_path_part)s - %(fqdn_part)s - %(string_match_part)s - %(failure_threshold)i - - """ - - -def create_health_check(conn, health_check, caller_ref=None): - if caller_ref is None: - caller_ref = str(uuid.uuid4()) - uri = '/%s/healthcheck' % conn.Version - params = to_template_params(health_check) - params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref) - - xml_body = POSTXMLBody % params - response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) - body = response.read() - boto.log.debug(body) - if response.status == 201: - e = boto.jsonresponse.Element() - h = boto.jsonresponse.XmlHandler(e, None) - h.parse(body) - return e - else: - raise exception.DNSServerError(response.status, response.reason, body) - - -def update_health_check(conn, health_check_id, health_check_version, health_check): - uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id) - params = to_template_params(health_check) - params.update( - xmlns=conn.XMLNameSpace, - health_check_version=health_check_version, + try: + client.delete_health_check( + aws_retry=True, + HealthCheckId=check_id, + ) + except is_boto3_error_code('NoSuchHealthCheck'): + # Handle the deletion race condition as cleanly as possible + return False, None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to list health checks') + + return True, 'delete' + + +def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in): + + # In general, if a request is repeated with the same CallerRef it won't + # result in a duplicate check appearing. This means we can safely use our + # retry decorators + caller_ref = str(uuid.uuid4()) + missing_args = [] + + health_check = dict( + Type=type_in, + RequestInterval=request_interval_in, + Port=port_in, ) - xml_body = UPDATEHCXMLBody % params - response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) - body = response.read() - boto.log.debug(body) - if response.status not in (200, 204): - raise exception.DNSServerError(response.status, - response.reason, - body) - e = boto.jsonresponse.Element() - h = boto.jsonresponse.XmlHandler(e, None) - h.parse(body) - return e + if ip_addr_in: + health_check['IPAddress'] = ip_addr_in + if fqdn_in: + health_check['FullyQualifiedDomainName'] = fqdn_in + + if type_in in ['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + resource_path = module.params.get('resource_path') + # if not resource_path: + # missing_args.append('resource_path') + if resource_path: + health_check['ResourcePath'] = resource_path + if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + string_match = module.params.get('string_match') + if not string_match: + missing_args.append('string_match') + health_check['SearchString'] = module.params.get('string_match') + + failure_threshold = module.params.get('failure_threshold') + if not failure_threshold: + failure_threshold = 3 + health_check['FailureThreshold'] = failure_threshold + + if missing_args: + module.fail_json(msg='missing required arguments for creation: {0}'.format( + ', '.join(missing_args)), + ) + + if module.check_mode: + return True, 'create', None + + try: + result = client.create_health_check( + aws_retry=True, + CallerReference=caller_ref, + HealthCheckConfig=health_check, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check) + + return True, 'create', result.get('HealthCheck').get('Id') + + +def update_health_check(existing_check): + # In theory it's also possible to update the IPAddress, Port and + # FullyQualifiedDomainName, however, because we use these in lieu of a + # 'Name' to uniquely identify the health check this isn't currently + # supported. If we accepted an ID it would be possible to modify them. + + changes = dict() + existing_config = existing_check.get('HealthCheckConfig') + + resource_path = module.params.get('resource_path', None) + if resource_path and resource_path != existing_config.get('ResourcePath'): + changes['ResourcePath'] = resource_path + + search_string = module.params.get('string_match', None) + if search_string and search_string != existing_config.get('SearchString'): + changes['SearchString'] = search_string + + failure_threshold = module.params.get('failure_threshold', None) + if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'): + changes['FailureThreshold'] = failure_threshold + + # No changes... + if not changes: + return False, None + + if module.check_mode: + return True, 'update' + + check_id = existing_check.get('Id') + version_id = existing_check.get('HealthCheckVersion', 1) + version_id += 1 + try: + client.update_health_check( + HealthCheckId=check_id, + HealthCheckVersion=version_id, + **changes, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to update health check.', id=check_id) + + return True, 'update' + + +def describe_health_check(id): + if not id: + return dict() + + try: + result = client.get_health_check( + aws_retry=True, + HealthCheckId=id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to get health check.', id=id) + + health_check = result.get('HealthCheck', {}) + health_check = camel_dict_to_snake_dict(health_check) + return health_check def main(): @@ -287,12 +318,26 @@ def main(): fqdn=dict(), string_match=dict(), request_interval=dict(type='int', choices=[10, 30], default=30), - failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3), + failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), ) - module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) - if not HAS_BOTO: - module.fail_json(msg='boto 2.27.0+ required for this module') + args_one_of = [ + ['ip_address', 'fqdn'], + ] + + args_if = [ + ['type', 'TCP', ('port',)], + ] + + global module + global client + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=args_one_of, + required_if=args_if, + supports_check_mode=True, + ) state_in = module.params.get('state') ip_addr_in = module.params.get('ip_address') @@ -304,63 +349,45 @@ def main(): request_interval_in = module.params.get('request_interval') failure_threshold_in = module.params.get('failure_threshold') - if ip_addr_in is None and fqdn_in is None: - module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required") - # Default port if port_in is None: if type_in in ['HTTP', 'HTTP_STR_MATCH']: port_in = 80 elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: port_in = 443 - else: - module.fail_json(msg="parameter 'port' is required for 'type' TCP") - # string_match in relation with type - if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - if string_match_in is None: - module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types") - elif len(string_match_in) > 255: + if string_match_in: + if type_in not in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") + if len(string_match_in) > 255: module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") - elif string_match_in: - module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - # connect to the route53 endpoint - try: - conn = Route53Connection(**aws_connect_kwargs) - except boto.exception.BotoServerError as e: - module.fail_json(msg=e.error_message) + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) changed = False action = None check_id = None - wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in) - existing_check = find_health_check(conn, wanted_config) + + existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + if existing_check: - check_id = existing_check.Id - existing_config = to_health_check(existing_check.HealthCheckConfig) + check_id = existing_check.get('Id') - if state_in == 'present': + if state_in == 'absent': + changed, action = delete_health_check(check_id) + check_id = None + elif state_in == 'present': if existing_check is None: - action = "create" - check_id = create_health_check(conn, wanted_config).HealthCheck.Id - changed = True + changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) else: - diff = health_check_diff(existing_config, wanted_config) - if diff: - action = "update" - update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config) - changed = True - elif state_in == 'absent': - if check_id: - action = "delete" - conn.delete_health_check(check_id) - changed = True - else: - module.fail_json(msg="Logic Error: Unknown state") - - module.exit_json(changed=changed, health_check=dict(id=check_id), action=action) + changed, action = update_health_check(existing_check) + + health_check = describe_health_check(id=check_id) + health_check['action'] = action + module.exit_json( + changed=changed, + health_check=health_check, + ) if __name__ == '__main__': From f4e8e6269d8d439b35d31956bd297ea9d3b8efba Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 26 Sep 2021 13:42:28 +0200 Subject: [PATCH 271/683] version --- route53_health_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route53_health_check.py b/route53_health_check.py index 980cef5fba5..cd74e61f660 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -277,8 +277,8 @@ def update_health_check(existing_check): return True, 'update' check_id = existing_check.get('Id') + # This makes sure we're starting from the version we think we are... version_id = existing_check.get('HealthCheckVersion', 1) - version_id += 1 try: client.update_health_check( HealthCheckId=check_id, From 590cb77e921e87c276e02136b575b965d8d14d15 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 26 Sep 2021 17:19:53 +0200 Subject: [PATCH 272/683] better --- route53_health_check.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/route53_health_check.py b/route53_health_check.py index cd74e61f660..b54b7699d83 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -161,8 +161,7 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): config.get('FullyQualifiedDomainName', None) == fqdn and config.get('Type') == hc_type and config.get('RequestInterval') == request_interval and - config.get('Port', None) == port and - True + config.get('Port', None) == port ): return check From 630d6b042c30a164c77def09c8d307f2b50e9ea8 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 26 Sep 2021 20:20:35 +0200 Subject: [PATCH 273/683] Add docs and tests for return codes --- route53_health_check.py | 73 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/route53_health_check.py b/route53_health_check.py index b54b7699d83..bcf7357c0ef 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -25,6 +25,7 @@ ip_address: description: - IP address of the end-point to check. Either this or I(fqdn) has to be provided. + - IP addresses must be publicly routable. type: str port: description: @@ -111,7 +112,79 @@ community.aws.route53_health_check: state: absent fqdn: host1.example.com +''' +RETURN = r''' +health_check: + description: Information about the health check. + returned: success + type: dict + contains: + action: + description: The action performed by the module. + type: str + returned: When a change is or would be made. + sample: 'updated' + id: + description: The Unique ID assigned by AWS to the health check. + type: str + returned: When the health check exists. + sample: 50ec8a13-9623-4c66-9834-dd8c5aedc9ba + health_check_version: + description: The version number of the health check. + type: int + returned: When the health check exists. + sample: 14 + health_check_config: + description: + - Detailed information about the health check. + - May contain additional values from Route 53 health check + features not yet supported by this module. + type: dict + returned: When the health check exists. + contains: + type: + description: The type of the health check. + type: str + returned: When the health check exists. + sample: 'HTTPS_STR_MATCH' + failure_threshold: + description: + - The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to + change the current status of the endpoint from unhealthy to healthy or vice versa. + type: int + returned: When the health check exists. + sample: 3 + fully_qualified_domain_name: + description: The FQDN configured for the health check to test. + type: str + returned: When the health check exists and an FQDN is configured. + sample: 'updated' + ip_address: + description: The IPv4 or IPv6 IP address of the endpoint to be queried. + type: str + returned: When the health check exists and a specific IP address is configured. + sample: '' + port: + description: The port on the endpoint that the health check will query. + type: str + returned: When the health check exists. + sample: 'updated' + request_interval: + description: The number of seconds between health check queries. + type: int + returned: When the health check exists. + sample: 30 + resource_path: + description: The URI path to query when performing an HTTP/HTTPS based health check. + type: str + returned: When the health check exists and a resource path has been configured. + sample: '/healthz' + search_string: + description: A string that must be present in the response for a health check to be considered successful. + type: str + returned: When the health check exists and a search string has been configured. + sample: 'ALIVE' ''' import uuid From 5760375b5eb6737fffd9b5566c1eff304b2a03ea Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 20 Sep 2021 14:49:21 +0200 Subject: [PATCH 274/683] dynamodb - Initial migration to boto3 --- dynamodb_table.py | 974 +++++++++++++++++++++++++++++++++------------- 1 file changed, 714 insertions(+), 260 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 7a3add3727a..5d445e5f657 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -16,11 +16,6 @@ - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. author: Alan Loi (@loia) -requirements: -- python >= 3.6 -- boto >= 2.49.0 -- boto3 >= 1.15.0 -- botocore >= 1.18.0 options: state: description: @@ -36,13 +31,13 @@ hash_key_name: description: - Name of the hash key. - - Required when C(state=present). + - Required when I(state=present) and table doesn't exist. type: str hash_key_type: description: - Type of the hash key. + - Defaults to C('STRING') when creating a new table. choices: ['STRING', 'NUMBER', 'BINARY'] - default: 'STRING' type: str range_key_name: description: @@ -51,18 +46,18 @@ range_key_type: description: - Type of the range key. + - Defaults to C('STRING') when creating a new range key. choices: ['STRING', 'NUMBER', 'BINARY'] - default: 'STRING' type: str read_capacity: description: - Read throughput capacity (units) to provision. - default: 1 + - Defaults to C(1) when creating a new table. type: int write_capacity: description: - Write throughput capacity (units) to provision. - default: 1 + - Defaults to C(1) when creating a new table. type: int indexes: description: @@ -77,25 +72,39 @@ type: description: - The type of index. - - "Valid types: C(all), C(global_all), C(global_include), C(global_keys_only), C(include), C(keys_only)" type: str required: true + choices: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] hash_key_name: - description: The name of the hash-based key. - required: true + description: + - The name of the hash-based key. + - Required if index doesn't already exist. + - Can not be modified once the index has been created. + required: false type: str hash_key_type: - description: The type of the hash-based key. + description: + - The type of the hash-based key. + - Defaults to C('STRING') when creating a new index. + - Can not be modified once the index has been created. type: str + choices: ['STRING', 'NUMBER', 'BINARY'] range_key_name: - description: The name of the range-based key. + description: + - The name of the range-based key. + - Can not be modified once the index has been created. type: str range_key_type: type: str - description: The type of the range-based key. + description: + - The type of the range-based key. + - Defaults to C('STRING') when creating a new index. + - Can not be modified once the index has been created. + choices: ['STRING', 'NUMBER', 'BINARY'] includes: type: list description: A list of fields to include when using C(global_include) or C(include) indexes. + elements: str read_capacity: description: - Read throughput capacity (units) to provision for the index. @@ -110,13 +119,25 @@ tags: description: - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag. - - 'For example: C({"key":"value"}) and C({"key":"value","key2":"value2"})' + - 'For example: C({"key":"value"}) or C({"key":"value","key2":"value2"})' type: dict - wait_for_active_timeout: + purge_tags: + description: + - Remove tags not listed in I(tags). + default: True + type: bool + wait_timeout: description: - - how long before wait gives up, in seconds. only used when tags is set - default: 60 + - How long (in seconds) to wait for creation / update / deletion to complete. + aliases: ['wait_for_active_timeout'] + default: 120 type: int + wait: + description: + - When I(wait=True) the module will wait for up to I(wait_timeout) seconds + for table creation or deletion to complete before returning. + default: True + type: bool extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -174,300 +195,734 @@ sample: ACTIVE ''' -import time -import traceback - try: - import boto - import boto.dynamodb2 - from boto.dynamodb2.table import Table - from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex - from boto.dynamodb2.types import STRING, NUMBER, BINARY - from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError - from boto.dynamodb2.exceptions import ValidationException - DYNAMO_TYPE_MAP = { - 'STRING': STRING, - 'NUMBER': NUMBER, - 'BINARY': BINARY - } - # Boto 2 is mandatory, Boto3 is only needed for tagging import botocore except ImportError: - pass # Handled by ec2.HAS_BOTO and ec2.HAS_BOTO3 + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags DYNAMO_TYPE_DEFAULT = 'STRING' INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name'] INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity'] INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] +# Map in both directions +DYNAMO_TYPE_MAP_LONG = {'STRING': 'S', 'NUMBER': 'N', 'BINARY': 'B'} +DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items()) +KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys()) + + +# If you try to update an index while another index is updating, it throws +# LimitExceededException/ResourceInUseException exceptions at you. This can be +# pretty slow, so add plenty of retries... +@AWSRetry.jittered_backoff( + retries=45, delay=5, max_delay=30, + catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], +) +def _update_table_with_long_retry(**changes): + return client.update_table( + TableName=module.params.get('name'), + **changes + ) + + +# ResourceNotFoundException is expected here if the table doesn't exist +@AWSRetry.jittered_backoff(catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException']) +def _describe_table(**params): + return client.describe_table(**params) -def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3_sts=None, region=None): +def wait_exists(): table_name = module.params.get('name') - hash_key_name = module.params.get('hash_key_name') - hash_key_type = module.params.get('hash_key_type') - range_key_name = module.params.get('range_key_name') - range_key_type = module.params.get('range_key_type') - read_capacity = module.params.get('read_capacity') - write_capacity = module.params.get('write_capacity') - all_indexes = module.params.get('indexes') - tags = module.params.get('tags') - wait_for_active_timeout = module.params.get('wait_for_active_timeout') - - for index in all_indexes: - validate_index(index, module) - - schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type) - - throughput = { - 'read': read_capacity, - 'write': write_capacity - } - - indexes, global_indexes = get_indexes(all_indexes) - - result = dict( - region=region, - table_name=table_name, + wait_timeout = module.params.get('wait_timeout') + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + try: + waiter = client.get_waiter('table_exists') + waiter.wait( + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + TableName=table_name, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on table creation') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed while waiting on table creation') + + +def wait_not_exists(): + table_name = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + try: + waiter = client.get_waiter('table_not_exists') + waiter.wait( + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + TableName=table_name, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on table deletion') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed while waiting on table deletion') + + +def _short_type_to_long(short_key): + if not short_key: + return None + return DYNAMO_TYPE_MAP_SHORT.get(short_key, None) + + +def _long_type_to_short(long_key): + if not long_key: + return None + return DYNAMO_TYPE_MAP_LONG.get(long_key, None) + + +def _schema_dict(key_name, key_type): + return dict( + AttributeName=key_name, + KeyType=key_type, + ) + + +def _merge_index_params(index, current_index): + idx = dict(current_index) + idx.update(index) + return idx + + +def _decode_primary_index(current_table): + """ + Decodes the primary index info from the current table definition + splitting it up into the keys we use as parameters + """ + # The schema/attribute definitions are a list of dicts which need the same + # treatment as boto3's tag lists + schema = boto3_tag_list_to_ansible_dict( + current_table.get('key_schema', []), + # Map from 'HASH'/'RANGE' to attribute name + tag_name_key_name='key_type', + tag_value_key_name='attribute_name', + ) + attributes = boto3_tag_list_to_ansible_dict( + current_table.get('attribute_definitions', []), + # Map from attribute name to 'S'/'N'/'B'. + tag_name_key_name='attribute_name', + tag_value_key_name='attribute_type', + ) + + hash_key_name = schema.get('HASH') + hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None)) + range_key_name = schema.get('RANGE', None) + range_key_type = _short_type_to_long(attributes.get(range_key_name, None)) + + return dict( hash_key_name=hash_key_name, hash_key_type=hash_key_type, range_key_name=range_key_name, range_key_type=range_key_type, - read_capacity=read_capacity, - write_capacity=write_capacity, - indexes=all_indexes, ) + +def _decode_index(index_data, attributes, type_prefix=''): try: - table = Table(table_name, connection=connection) + index_map = dict( + name=index_data['index_name'], + ) - if dynamo_table_exists(table): - result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes) - else: - if not module.check_mode: - Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes) - result['changed'] = True - - if not module.check_mode: - result['table_status'] = table.describe()['Table']['TableStatus'] - - if tags: - # only tables which are active can be tagged - wait_until_table_active(module, table, wait_for_active_timeout) - account_id = get_account_id(boto3_sts) - boto3_dynamodb.tag_resource( - ResourceArn='arn:aws:dynamodb:' + - region + - ':' + - account_id + - ':table/' + - table_name, - Tags=ansible_dict_to_boto3_tag_list(tags)) - result['tags'] = tags - - except BotoServerError: - result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc() - module.fail_json(**result) - else: - module.exit_json(**result) + index_data = dict(index_data) + index_data['attribute_definitions'] = attributes + index_map.update(_decode_primary_index(index_data)) -def get_account_id(boto3_sts): - return boto3_sts.get_caller_identity()["Account"] + throughput = index_data.get('provisioned_throughput', {}) + if throughput: + index_map['read_capacity'] = throughput.get('read_capacity_units') + index_map['write_capacity'] = throughput.get('write_capacity_units') + projection = index_data.get('projection', {}) + if projection: + index_map['type'] = type_prefix + projection.get('projection_type') + index_map['includes'] = projection.get('non_key_attributes', []) -def wait_until_table_active(module, table, wait_timeout): - max_wait_time = time.time() + wait_timeout - while (max_wait_time > time.time()) and (table.describe()['Table']['TableStatus'] != 'ACTIVE'): - time.sleep(5) - if max_wait_time <= time.time(): - # waiting took too long - module.fail_json(msg="timed out waiting for table to exist") + return index_map + except Exception as e: + module.fail_json_aws(e, msg='Decode failure', index_data=index_data) -def delete_dynamo_table(connection, module): - table_name = module.params.get('name') +def compatability_results(current_table): + if not current_table: + return dict() + + throughput = current_table.get('provisioned_throughput', {}) - result = dict( - region=module.params.get('region'), - table_name=table_name, + primary_indexes = _decode_primary_index(current_table) + + hash_key_name = primary_indexes.get('hash_key_name') + hash_key_type = primary_indexes.get('hash_key_type') + range_key_name = primary_indexes.get('range_key_name') + range_key_type = primary_indexes.get('range_key_type') + + indexes = list() + global_indexes = current_table.get('_global_index_map', {}) + local_indexes = current_table.get('_local_index_map', {}) + for index in global_indexes: + indexes.append(global_indexes[index]) + for index in local_indexes: + indexes.append(local_indexes[index]) + + compat_results = dict( + hash_key_name=hash_key_name, + hash_key_type=hash_key_type, + range_key_name=range_key_name, + range_key_type=range_key_type, + indexes=indexes, + read_capacity=throughput.get('read_capacity_units', None), + region=module.region, + table_name=current_table.get('table_name', None), + table_status=current_table.get('table_status', None), + tags=current_table.get('tags', {}), + write_capacity=throughput.get('write_capacity_units', None), ) + return compat_results + + +def get_dynamodb_table(): + table_name = module.params.get('name') + try: + table = _describe_table(TableName=table_name) + except is_boto3_error_code('ResourceNotFoundException'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe table') + + table = table['Table'] try: - table = Table(table_name, connection=connection) + tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table['TableArn'])['Tags'] + except is_boto3_error_code('AccessDeniedException'): + module.warn('Permission denied when listing tags') + tags = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to list table tags') + + tags = boto3_tag_list_to_ansible_dict(tags) + + table = camel_dict_to_snake_dict(table) + + # Put some of the values into places people will expect them + table['arn'] = table['table_arn'] + table['name'] = table['table_name'] + table['status'] = table['table_status'] + table['id'] = table['table_id'] + table['size'] = table['table_size_bytes'] + table['tags'] = tags + + # convert indexes into something we can easily search against + attributes = table['attribute_definitions'] + global_index_map = dict() + local_index_map = dict() + for index in table.get('global_secondary_indexes', []): + idx = _decode_index(index, attributes, type_prefix='global_') + global_index_map[idx['name']] = idx + for index in table.get('local_secondary_indexes', []): + idx = _decode_index(index, attributes) + local_index_map[idx['name']] = idx + table['_global_index_map'] = global_index_map + table['_local_index_map'] = local_index_map + + return table + + +def _generate_attribute_map(): + """ + Builds a map of Key Names to Type + """ + attributes = dict() + + for index in (module.params, *module.params.get('indexes')): + # run through hash_key_name and range_key_name + for t in ['hash', 'range']: + key_name = index.get(t + '_key_name') + if not key_name: + continue + key_type = index.get(t + '_key_type') or DYNAMO_TYPE_DEFAULT + _type = _long_type_to_short(key_type) + if key_name in attributes: + if _type != attributes[key_name]: + module.fail_json(msg='Conflicting attribute type', + type_1=_type, type_2=attributes[key_name], + key_name=key_name) + else: + attributes[key_name] = _type + + return attributes + + +def _generate_attributes(): + attributes = _generate_attribute_map() + + # Use ansible_dict_to_boto3_tag_list to generate the list of dicts + # format we need + attrs = ansible_dict_to_boto3_tag_list( + attributes, + tag_name_key_name='AttributeName', + tag_value_key_name='AttributeType' + ) + return list(attrs) - if dynamo_table_exists(table): - if not module.check_mode: - table.delete() - result['changed'] = True - else: - result['changed'] = False +def _generate_throughput(params=None): + if not params: + params = module.params + + read_capacity = params.get('read_capacity') or 1 + write_capacity = params.get('write_capacity') or 1 + throughput = dict( + ReadCapacityUnits=read_capacity, + WriteCapacityUnits=write_capacity, + ) + + return throughput + + +def _generate_schema(params=None): + if not params: + params = module.params + + schema = list() + hash_key_name = params.get('hash_key_name') + range_key_name = params.get('range_key_name') + + if hash_key_name: + entry = _schema_dict(hash_key_name, 'HASH') + schema.append(entry) + if range_key_name: + entry = _schema_dict(range_key_name, 'RANGE') + schema.append(entry) - except BotoServerError: - result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc() - module.fail_json(**result) + return schema + + +def _primary_index_changes(current_table): + + primary_index = _decode_primary_index(current_table) + + hash_key_name = primary_index.get('hash_key_name') + _hash_key_name = module.params.get('hash_key_name') + hash_key_type = primary_index.get('hash_key_type') + _hash_key_type = module.params.get('hash_key_type') + range_key_name = primary_index.get('range_key_name') + _range_key_name = module.params.get('range_key_name') + range_key_type = primary_index.get('range_key_type') + _range_key_type = module.params.get('range_key_type') + + changed = list() + + if _hash_key_name and (_hash_key_name != hash_key_name): + changed.append('hash_key_name') + if _hash_key_type and (_hash_key_type != hash_key_type): + changed.append('hash_key_type') + if _range_key_name and (_range_key_name != range_key_name): + changed.append('range_key_name') + if _range_key_type and (_range_key_type != range_key_type): + changed.append('range_key_type') + + return changed + + +def _throughput_changes(current_table): + + throughput = current_table.get('provisioned_throughput', {}) + read_capacity = throughput.get('read_capacity_units', None) + _read_capacity = module.params.get('read_capacity') or read_capacity + write_capacity = throughput.get('write_capacity_units', None) + _write_capacity = module.params.get('write_capacity') or write_capacity + + if (read_capacity != _read_capacity) or (write_capacity != _write_capacity): + return dict( + ReadCapacityUnits=_read_capacity, + WriteCapacityUnits=_write_capacity, + ) + + return dict() + + +def _generate_global_indexes(): + index_exists = dict() + indexes = list() + for index in module.params.get('indexes'): + if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + continue + name = index.get('name') + if name in index_exists: + module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + # Convert the type name to upper case and remove the global_ + index['type'] = index['type'].upper()[7:] + index = _generate_index(index) + index_exists[name] = True + indexes.append(index) + + return indexes + + +def _generate_local_indexes(): + index_exists = dict() + indexes = list() + for index in module.params.get('indexes'): + index = dict() + if index.get('type') not in ['all', 'include', 'keys_only']: + continue + name = index.get('name') + if name in index_exists: + module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) + index['type'] = index['type'].upper() + index = _generate_index(index, False) + index_exists[name] = True + indexes.append(index) + + return indexes + + +def _generate_global_index_map(current_table): + global_index_map = dict() + existing_indexes = current_table['_global_index_map'] + for index in module.params.get('indexes'): + if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + continue + name = index.get('name') + if name in global_index_map: + module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + idx = _merge_index_params(index, existing_indexes.get(name, {})) + # Convert the type name to upper case and remove the global_ + idx['type'] = idx['type'].upper()[7:] + global_index_map[name] = idx + return global_index_map + + +def _generate_local_index_map(current_table): + local_index_map = dict() + existing_indexes = current_table['_local_index_map'] + for index in module.params.get('indexes'): + if index.get('type') not in ['all', 'include', 'keys_only']: + continue + name = index.get('name') + if name in local_index_map: + module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) + idx = _merge_index_params(index, existing_indexes.get(name, {})) + # Convert the type name to upper case + idx['type'] = idx['type'].upper() + local_index_map[name] = idx + return local_index_map + + +def _generate_index(index, include_throughput=True): + key_schema = _generate_schema(index) + throughput = _generate_throughput(index) + non_key_attributes = index['includes'] or [] + projection = dict( + ProjectionType=index['type'], + ) + if index['type'] != 'ALL': + projection['NonKeyAttributes'] = non_key_attributes else: - module.exit_json(**result) + if non_key_attributes: + module.deprecate( + "DynamoDB does not support specifying non-key-attributes ('includes') for " + "indexes of type 'all'. Attempts to set this attributes are currently " + "ignored, but in future will result in a failure. " + "Index name: {0}".format(index['name']), + version='3.0.0', collection_name='community.aws') + + idx = dict( + IndexName=index['name'], + KeySchema=key_schema, + Projection=projection, + ) + if include_throughput: + idx['ProvisionedThroughput'] = throughput + return idx -def dynamo_table_exists(table): - try: - table.describe() - return True - except JSONResponseError as e: - if e.message and e.message.startswith('Requested resource not found'): - return False - else: - raise e +def _attribute_changes(current_table): + # TODO (future) It would be nice to catch attempts to change types here. + return _generate_attributes() -def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None): - table.describe() # populate table details - throughput_changed = False - global_indexes_changed = False - if has_throughput_changed(table, throughput): - if not check_mode: - throughput_changed = table.update(throughput=throughput) - else: - throughput_changed = True +def _global_index_changes(current_table): + current_global_index_map = current_table['_global_index_map'] + global_index_map = _generate_global_index_map(current_table) - removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes) - if removed_indexes: - if not check_mode: - for name, index in removed_indexes.items(): - global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed - else: - global_indexes_changed = True + index_changes = list() - if added_indexes: - if not check_mode: - for name, index in added_indexes.items(): - global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed + # TODO (future) it would be nice to add support for deleting an index + for name in global_index_map: + idx = dict(_generate_index(global_index_map[name])) + if name not in current_global_index_map: + index_changes.append(dict(Create=idx)) else: - global_indexes_changed = True + # The only thing we can change is the provisioned throughput. + # TODO (future) it would be nice to throw a deprecation here + # rather than dropping other changes on the floor + _current = current_global_index_map[name] + _new = global_index_map[name] + change = dict() + if _new['read_capacity'] != _current['read_capacity']: + change['ReadCapacityUnits'] = _new['read_capacity'] + if _new['write_capacity'] != _current['write_capacity']: + change['WriteCapacityUnits'] = _new['write_capacity'] + if change: + update = dict( + IndexName=name, + ProvisionedThroughput=change, + ) + index_changes.append(dict(Update=update)) + + return index_changes + + +def _local_index_changes(current_table): + # TODO (future) Changes to Local Indexes aren't possible after creation, + # we should probably throw a deprecation warning here (original module + # also just dropped these changes on the floor) + return [] + + +def _update_table(current_table): + changes = dict() + additional_global_index_changes = list() + + throughput_changes = _throughput_changes(current_table) + if throughput_changes: + changes['ProvisionedThroughput'] = throughput_changes + + global_index_changes = _global_index_changes(current_table) + if global_index_changes: + changes['GlobalSecondaryIndexUpdates'] = global_index_changes + # Only one index can be changed at a time, pass the first during the + # main update and deal with the others on a slow retry to wait for + # completion + if len(global_index_changes) > 1: + changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + additional_global_index_changes = global_index_changes[1:] + + local_index_changes = _local_index_changes(current_table) + if local_index_changes: + changes['LocalSecondaryIndexUpdates'] = local_index_changes + + if not changes: + return False - if index_throughput_changes: - if not check_mode: - # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed + if module.check_mode: + return True + + if global_index_changes or local_index_changes: + changes['AttributeDefinitions'] = _generate_attributes() + + try: + client.update_table( + aws_retry=True, + TableName=module.params.get('name'), + **changes + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update table") + + if additional_global_index_changes: + for index in additional_global_index_changes: try: - global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed - except ValidationException: - pass - else: - global_indexes_changed = True + _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update table", + changes=changes, + additional_global_index_changes=additional_global_index_changes) + + if module.params.get('wait'): + wait_exists() - return throughput_changed or global_indexes_changed + return True -def has_throughput_changed(table, new_throughput): - if not new_throughput: +def _update_tags(current_table): + _tags = module.params.get('tags') + if _tags is None: return False - return new_throughput['read'] != table.throughput['read'] or \ - new_throughput['write'] != table.throughput['write'] + tags_to_add, tags_to_remove = compare_aws_tags(current_table['tags'], module.params.get('tags'), + purge_tags=module.params.get('purge_tags')) + # If neither need updating we can return already + if not (tags_to_add or tags_to_remove): + return False -def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type): - if range_key_name: - schema = [ - HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])), - RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])) - ] - else: - schema = [ - HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])) - ] - return schema + if module.check_mode: + return True + + if tags_to_add: + try: + client.tag_resource( + aws_retry=True, + ResourceArn=current_table['arn'], + Tags=ansible_dict_to_boto3_tag_list(tags_to_add), + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to tag table") + if tags_to_remove: + try: + client.untag_resource( + aws_retry=True, + ResourceArn=current_table['arn'], + TagKeys=tags_to_remove, + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to untag table") + + return True + + +def update_table(current_table): + primary_index_changes = _primary_index_changes(current_table) + if primary_index_changes: + module.deprecate("DynamoDB does not support updating the Primary keys on a table. " + "Attempts to change the keys are currently ignored, but in future will " + "result in a failure. " + "Changed paramters are {0}".format(primary_index_changes), + version='3.0.0', collection_name='community.aws') + changed = False + changed |= _update_table(current_table) + changed |= _update_tags(current_table) -def get_changed_global_indexes(table, global_indexes): - table.describe() + if module.params.get('wait'): + wait_exists() - table_index_info = dict((index.name, index.schema()) for index in table.global_indexes) - table_index_objects = dict((index.name, index) for index in table.global_indexes) - set_index_info = dict((index.name, index.schema()) for index in global_indexes) - set_index_objects = dict((index.name, index) for index in global_indexes) + return changed - removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info) - added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info) - # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed - # for name, index in set_index_objects.items(): - # if (name not in added_indexes and - # (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or - # index.throughput['write'] != str(table_index_objects[name].throughput['write']))): - # index_throughput_changes[name] = index.throughput - # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed - index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes) - return removed_indexes, added_indexes, index_throughput_changes +def create_table(): + table_name = module.params.get('name') + hash_key_name = module.params.get('hash_key_name') + tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) -def validate_index(index, module): - for key, val in index.items(): - if key not in INDEX_OPTIONS: - module.fail_json(msg='%s is not a valid option for an index' % key) - for required_option in INDEX_REQUIRED_OPTIONS: - if required_option not in index: - module.fail_json(msg='%s is a required option for an index' % required_option) - if index['type'] not in INDEX_TYPE_OPTIONS: - module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS)) + if not hash_key_name: + module.fail_json('"hash_key_name" must be provided when creating a new table.') + + if module.check_mode: + return True + throughput = _generate_throughput() + attributes = _generate_attributes() + key_schema = _generate_schema() + local_indexes = _generate_local_indexes() + global_indexes = _generate_global_indexes() + + params = dict( + TableName=table_name, + AttributeDefinitions=attributes, + KeySchema=key_schema, + ProvisionedThroughput=throughput, + Tags=tags, + # TODO (future) + # BillingMode, + # StreamSpecification, + # SSESpecification, + ) + if local_indexes: + params['LocalSecondaryIndexes'] = local_indexes + if global_indexes: + params['GlobalSecondaryIndexes'] = global_indexes -def get_indexes(all_indexes): - indexes = [] - global_indexes = [] - for index in all_indexes: - name = index['name'] - schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type')) - throughput = { - 'read': index.get('read_capacity', 1), - 'write': index.get('write_capacity', 1) - } + try: + client.create_table(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to create table') - if index['type'] == 'all': - indexes.append(AllIndex(name, parts=schema)) + if module.params.get('wait'): + wait_exists() - elif index['type'] == 'global_all': - global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput)) + return True - elif index['type'] == 'global_include': - global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes'])) - elif index['type'] == 'global_keys_only': - global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput)) +def delete_table(current_table): + if not current_table: + return False - elif index['type'] == 'include': - indexes.append(IncludeIndex(name, parts=schema, includes=index['includes'])) + if module.check_mode: + return True + + table_name = module.params.get('name') + + # If an index is mid-update then we have to wait for the update to complete + # before deletion will succeed + long_retry = AWSRetry.jittered_backoff( + retries=45, delay=5, max_delay=30, + catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'], + ) + + try: + long_retry(client.delete_table)(TableName=table_name) + except is_boto3_error_code('ResourceNotFoundException'): + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to delete table') - elif index['type'] == 'keys_only': - indexes.append(KeysOnlyIndex(name, parts=schema)) + if module.params.get('wait'): + wait_not_exists() - return indexes, global_indexes + return True def main(): + + global module + global client + + # TODO (future) It would be good to split global and local indexes. They have + # different parameters, use a separate namespace for names, + # and local indexes can't be updated. + index_options = dict( + name=dict(type='str', required=True), + # It would be nice to make this optional, but because Local and Global + # indexes are mixed in here we need this to be able to tell to which + # group of indexes the index belongs. + type=dict(type='str', required=True, choices=INDEX_TYPE_OPTIONS), + hash_key_name=dict(type='str', required=False), + hash_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), + range_key_name=dict(type='str', required=False), + range_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), + includes=dict(type='list', required=False, elements='str'), + read_capacity=dict(type='int', required=False), + write_capacity=dict(type='int', required=False), + ) + argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), hash_key_name=dict(type='str'), - hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), + hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), range_key_name=dict(type='str'), - range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), - read_capacity=dict(default=1, type='int'), - write_capacity=dict(default=1, type='int'), - indexes=dict(default=[], type='list', elements='dict'), + range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), + read_capacity=dict(type='int'), + write_capacity=dict(type='int'), + indexes=dict(default=[], type='list', elements='dict', options=index_options), tags=dict(type='dict'), - wait_for_active_timeout=dict(default=60, type='int'), + purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=120, type='int', aliases=['wait_for_active_timeout']), ) module = AnsibleAWSModule( @@ -476,36 +931,35 @@ def main(): check_boto3=False, ) - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - if not HAS_BOTO3 and module.params.get('tags'): - module.fail_json(msg='boto3 required when using tags for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg='region must be specified') + retry_decorator = AWSRetry.jittered_backoff( + catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + ) + client = module.client('dynamodb', retry_decorator=retry_decorator) - try: - connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) - except (NoAuthHandlerFound, AnsibleAWSError) as e: - module.fail_json(msg=str(e)) - - if module.params.get('tags'): - try: - boto3_dynamodb = module.client('dynamodb') - boto3_sts = module.client('sts') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - else: - boto3_dynamodb = None - boto3_sts = None + current_table = get_dynamodb_table() + changed = False + table = None + results = dict() state = module.params.get('state') if state == 'present': - create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region) + if current_table: + changed |= update_table(current_table) + else: + changed |= create_table() + table = get_dynamodb_table() elif state == 'absent': - delete_dynamo_table(connection, module) + changed |= delete_table(current_table) + + compat_results = compatability_results(table) + if compat_results: + results.update(compat_results) + + results['changed'] = changed + if table: + results['table'] = table + + module.exit_json(**results) if __name__ == '__main__': From be633e1653aa62db0642a8508fc804c7410e6c0f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 22 Sep 2021 15:04:04 +0200 Subject: [PATCH 275/683] Ensure we can update indexes when only a one of read/write capacity is updated. --- dynamodb_table.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 5d445e5f657..86c08f90934 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -348,6 +348,7 @@ def _decode_index(index_data, attributes, type_prefix=''): index_map.update(_decode_primary_index(index_data)) throughput = index_data.get('provisioned_throughput', {}) + index_map['provisioned_throughput'] = throughput if throughput: index_map['read_capacity'] = throughput.get('read_capacity_units') index_map['write_capacity'] = throughput.get('write_capacity_units') @@ -379,9 +380,13 @@ def compatability_results(current_table): global_indexes = current_table.get('_global_index_map', {}) local_indexes = current_table.get('_local_index_map', {}) for index in global_indexes: - indexes.append(global_indexes[index]) + idx = dict(global_indexes[index]) + idx.pop('provisioned_throughput', None) + indexes.append(idx) for index in local_indexes: - indexes.append(local_indexes[index]) + idx = dict(local_indexes[index]) + idx.pop('provisioned_throughput', None) + indexes.append(idx) compat_results = dict( hash_key_name=hash_key_name, @@ -543,13 +548,16 @@ def _primary_index_changes(current_table): return changed -def _throughput_changes(current_table): +def _throughput_changes(current_table, params=None): + + if not params: + params = module.params throughput = current_table.get('provisioned_throughput', {}) read_capacity = throughput.get('read_capacity_units', None) - _read_capacity = module.params.get('read_capacity') or read_capacity + _read_capacity = params.get('read_capacity') or read_capacity write_capacity = throughput.get('write_capacity_units', None) - _write_capacity = module.params.get('write_capacity') or write_capacity + _write_capacity = params.get('write_capacity') or write_capacity if (read_capacity != _read_capacity) or (write_capacity != _write_capacity): return dict( @@ -679,11 +687,7 @@ def _global_index_changes(current_table): # rather than dropping other changes on the floor _current = current_global_index_map[name] _new = global_index_map[name] - change = dict() - if _new['read_capacity'] != _current['read_capacity']: - change['ReadCapacityUnits'] = _new['read_capacity'] - if _new['write_capacity'] != _current['write_capacity']: - change['WriteCapacityUnits'] = _new['write_capacity'] + change = dict(_throughput_changes(_current, _new)) if change: update = dict( IndexName=name, @@ -746,9 +750,8 @@ def _update_table(current_table): try: _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update table", - changes=changes, - additional_global_index_changes=additional_global_index_changes) + module.fail_json_aws(e, msg="Failed to update table", changes=changes, + additional_global_index_changes=additional_global_index_changes) if module.params.get('wait'): wait_exists() @@ -957,6 +960,9 @@ def main(): results['changed'] = changed if table: + # These are used to pass computed data about, not needed for users + table.pop('_global_index_map', None) + table.pop('_local_index_map', None) results['table'] = table module.exit_json(**results) From f6fe72a4bf2e0e7a8f25dd25179f549b19727ae7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 12 Oct 2021 09:20:51 +0200 Subject: [PATCH 276/683] Remove inaccurate references to boto --- aws_api_gateway.py | 4 ---- aws_sgw_info.py | 2 +- ec2_lc_find.py | 1 - rds_subnet_group.py | 4 ++-- 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/aws_api_gateway.py b/aws_api_gateway.py index 9eadf88d48b..5ce411195e9 100644 --- a/aws_api_gateway.py +++ b/aws_api_gateway.py @@ -19,10 +19,6 @@ stable guaranteed unique identifier for the API. If you do not give api_id then a new API will be created each time this is run. - - Beware that there are very hard limits on the rate that - you can call API Gateway's REST API. You may need to patch - your boto. See U(https://github.com/boto/boto3/issues/876) - and discuss it with your AWS rep. - swagger_file and swagger_text are passed directly on to AWS transparently whilst swagger_dict is an ansible dict which is converted to JSON before the API definitions are uploaded. diff --git a/aws_sgw_info.py b/aws_sgw_info.py index 37caabf3fd9..e59f8ecf9f1 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -353,7 +353,7 @@ def main(): client = module.client('storagegateway') if client is None: # this should never happen - module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.') + module.fail_json(msg='Unknown error, failed to create storagegateway client, no information available.') SGWInformationManager(client, module).fetch() diff --git a/ec2_lc_find.py b/ec2_lc_find.py index 6657de27349..3e525adc6cf 100644 --- a/ec2_lc_find.py +++ b/ec2_lc_find.py @@ -16,7 +16,6 @@ description: - Returns list of matching Launch Configurations for a given name, along with other useful information. - Results can be sorted and sliced. - - It depends on boto. - Based on the work by Tom Bamford U(https://github.com/tombamford) author: "Jose Armesto (@fiunchinho)" diff --git a/rds_subnet_group.py b/rds_subnet_group.py index bb0cc685a8a..7d789481c43 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -15,7 +15,7 @@ version_added: 1.0.0 short_description: manage RDS database subnet groups description: - - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5. + - Creates, modifies, and deletes RDS database subnet groups. options: state: description: @@ -142,7 +142,7 @@ def create_result(changed, subnet_group=None): def create_subnet_list(subnets): ''' - Construct a list of subnet ids from a list of subnets dicts returned by boto. + Construct a list of subnet ids from a list of subnets dicts returned by boto3. Parameters: subnets (list): A list of subnets definitions. @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups From b9b0dc953f49aafa9a16543f75f19c172996cf17 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 11 Oct 2021 14:34:52 +0200 Subject: [PATCH 277/683] Add iam_access_key(_info) new modules --- iam_access_key.py | 316 +++++++++++++++++++++++++++++++++++++++++ iam_access_key_info.py | 127 +++++++++++++++++ 2 files changed, 443 insertions(+) create mode 100644 iam_access_key.py create mode 100644 iam_access_key_info.py diff --git a/iam_access_key.py b/iam_access_key.py new file mode 100644 index 00000000000..1d5701e9d74 --- /dev/null +++ b/iam_access_key.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: iam_access_key +version_added: 2.1.0 +short_description: Manage AWS IAM User access keys +description: + - Manage AWS IAM user access keys. +author: Mark Chappell (@tremble) +options: + user_name: + description: + - The name of the IAM User to which the key belongs. + required: true + type: str + aliases: ['username'] + id: + description: + - The ID of the access key. + - Required when I(state=absent). + - Mutually exclusive with I(rotate_keys). + required: false + type: str + state: + description: + - Create or remove the access key. + - When I(state=present) and I(id) is not defined a new key will be created. + required: false + type: str + default: 'present' + choices: [ 'present', 'absent' ] + active: + description: + - Whether the key should be enabled or disabled. + - Defaults to C(true) when creating a new key. + required: false + type: bool + aliases: ['enabled'] + rotate_keys: + description: + - When there are already 2 access keys attached to the IAM user the oldest + key will be removed and a new key created. + - Ignored if I(state=absent) + - Mutually exclusive with I(id). + required: false + type: bool + default: false + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a new access key + community.aws.iam_access_key: + user_name: example_user + state: present + +- name: Delete the access_key + community.aws.iam_access_key: + name: example_user + access_key_id: AKIA1EXAMPLE1EXAMPLE + state: absent +''' + +RETURN = r''' +access_key: + description: A dictionary containing all the access key information. + returned: When the key exists. + type: complex + contains: + access_key_id: + description: The ID for the access key. + returned: success + type: str + sample: AKIA1EXAMPLE1EXAMPLE + create_date: + description: The date and time, in ISO 8601 date-time format, when the access key was created. + returned: success + type: str + sample: "2021-10-09T13:25:42+00:00" + user_name: + description: The name of the IAM user to which the key is attached. + returned: success + type: str + sample: example_user + status: + description: + - The status of the key. + - C(Active) means it can be used. + - C(Inactive) means it can not be used. + returned: success + type: str + sample: Inactive +secret_access_key: + description: + - The secret access key. + - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data. + - Secret access keys can only be accessed at creation time. + returned: When a new key is created. + type: str + sample: example/Example+EXAMPLE+example/Example +deleted_access_key_id: + description: + - The access key deleted during rotation. + returned: When a key was deleted during the rotation of access keys + type: str + sample: AKIA1EXAMPLE1EXAMPLE +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def delete_access_key(access_keys, user, access_key_id): + if not access_key_id: + return False + + if access_key_id not in access_keys: + return False + + if module.check_mode: + return True + + try: + client.delete_access_key( + aws_retry=True, + UserName=user, + AccessKeyId=access_key_id, + ) + except is_boto3_error_code('NoSuchEntityException'): + # Generally occurs when race conditions have happened and someone + # deleted the key while we were checking to see if it existed. + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user) + ) + + return True + + +def update_access_key(access_keys, user, access_key_id, enabled): + if access_key_id not in access_keys: + module.fail_json( + msg='Access key "{0}" not found attached to User "{1}"'.format(access_key_id, user), + ) + + changes = dict() + access_key = access_keys.get(access_key_id) + + if enabled is not None: + desired_status = 'Active' if enabled else 'Inactive' + if access_key.get('status') != desired_status: + changes['Status'] = desired_status + + if not changes: + return False + + if module.check_mode: + return True + + try: + client.update_access_key( + aws_retry=True, + UserName=user, + AccessKeyId=access_key_id, + **changes + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, changes=changes, + msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user), + ) + return True + + +def create_access_key(access_keys, user, rotate_keys, enabled): + changed = False + oldest_key = False + + if len(access_keys) > 1 and rotate_keys: + sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get('create_date', None)) + oldest_key = sorted_keys[0] + changed |= delete_access_key(access_keys, user, oldest_key) + + if module.check_mode: + if changed: + return dict(deleted_access_key=oldest_key) + return True + + try: + results = client.create_access_key(aws_retry=True, UserName=user) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user)) + results = camel_dict_to_snake_dict(results) + access_key = results.get('access_key') + access_key = normalize_boto3_result(access_key) + + # Update settings which can't be managed on creation + if enabled is False: + access_key_id = access_key['access_key_id'] + access_keys = {access_key_id: access_key} + update_access_key(access_keys, user, access_key_id, enabled) + access_key['status'] = 'Inactive' + + if oldest_key: + access_key['deleted_access_key'] = oldest_key + + return access_key + + +def get_access_keys(user): + try: + results = client.list_access_keys(aws_retry=True, UserName=user) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg='Failed to get access keys for user "{0}"'.format(user) + ) + if not results: + return None + + results = camel_dict_to_snake_dict(results) + access_keys = results.get('access_key_metadata', []) + if not access_keys: + return [] + + access_keys = normalize_boto3_result(access_keys) + access_keys = {k['access_key_id']: k for k in access_keys} + return access_keys + + +def main(): + + global module + global client + + argument_spec = dict( + user_name=dict(required=True, type='str', aliases=['username']), + id=dict(required=False, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + active=dict(required=False, type='bool', aliases=['enabled']), + rotate_keys=dict(required=False, type='bool', default=False), + ) + + required_if = [ + ['state', 'absent', ('id')], + ] + mutually_exclusive = [ + ['rotate_keys', 'id'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + + changed = False + state = module.params.get('state') + user = module.params.get('user_name') + access_key_id = module.params.get('id') + rotate_keys = module.params.get('rotate_keys') + enabled = module.params.get('active') + + access_keys = get_access_keys(user) + results = dict() + + if state == 'absent': + changed |= delete_access_key(access_keys, user, access_key_id) + else: + # If we have an ID then we should try to update it + if access_key_id: + changed |= update_access_key(access_keys, user, access_key_id, enabled) + access_keys = get_access_keys(user) + results['access_key'] = access_keys.get(access_key_id, None) + # Otherwise we try to create a new one + else: + secret_key = create_access_key(access_keys, user, rotate_keys, enabled) + if isinstance(secret_key, bool): + changed |= secret_key + else: + changed = True + results['access_key_id'] = secret_key.get('access_key_id', None) + results['secret_access_key'] = secret_key.pop('secret_access_key', None) + results['deleted_access_key_id'] = secret_key.pop('deleted_access_key', None) + if secret_key: + results['access_key'] = secret_key + results = scrub_none_parameters(results) + + module.exit_json(changed=changed, **results) + + +if __name__ == '__main__': + main() diff --git a/iam_access_key_info.py b/iam_access_key_info.py new file mode 100644 index 00000000000..9251cb846f6 --- /dev/null +++ b/iam_access_key_info.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: iam_access_key_info +version_added: 2.1.0 +short_description: fetch information about AWS IAM User access keys +description: + - 'Fetches information AWS IAM user access keys.' + - 'Note: It is not possible to fetch the secret access key.' +author: Mark Chappell (@tremble) +options: + user_name: + description: + - The name of the IAM User to which the keys belong. + required: true + type: str + aliases: ['username'] + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Fetch Access keys for a user + community.aws.iam_access_key_info: + user_name: example_user +''' + +RETURN = r''' +access_key: + description: A dictionary containing all the access key information. + returned: When the key exists. + type: list + elements: dict + contains: + access_key_id: + description: The ID for the access key. + returned: success + type: str + sample: AKIA1EXAMPLE1EXAMPLE + create_date: + description: The date and time, in ISO 8601 date-time format, when the access key was created. + returned: success + type: str + sample: "2021-10-09T13:25:42+00:00" + user_name: + description: The name of the IAM user to which the key is attached. + returned: success + type: str + sample: example_user + status: + description: + - The status of the key. + - C(Active) means it can be used. + - C(Inactive) means it can not be used. + returned: success + type: str + sample: Inactive +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def get_access_keys(user): + try: + results = client.list_access_keys(aws_retry=True, UserName=user) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg='Failed to get access keys for user "{0}"'.format(user) + ) + if not results: + return None + + results = camel_dict_to_snake_dict(results) + access_keys = results.get('access_key_metadata', []) + if not access_keys: + return [] + + access_keys = normalize_boto3_result(access_keys) + access_keys = sorted(access_keys, key=lambda d: d.get('create_date', None)) + return access_keys + + +def main(): + + global module + global client + + argument_spec = dict( + user_name=dict(required=True, type='str', aliases=['username']), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + + changed = False + user = module.params.get('user_name') + access_keys = get_access_keys(user) + + module.exit_json(changed=changed, access_keys=access_keys) + + +if __name__ == '__main__': + main() From d7b2b12d3dd4ae998151e8391a43a056a9b4ac85 Mon Sep 17 00:00:00 2001 From: szb640 Date: Wed, 25 Aug 2021 17:12:50 +0200 Subject: [PATCH 278/683] Added tags to iam_user --- iam_user.py | 69 ++++++++++++++++++++++++++++++++++++++++++++++-- iam_user_info.py | 16 +++++++++-- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/iam_user.py b/iam_user.py index 659eec56354..fd96d7b5ed3 100644 --- a/iam_user.py +++ b/iam_user.py @@ -41,6 +41,16 @@ default: false type: bool aliases: ['purge_policy', 'purge_managed_policies'] + tags: + description: + - Tag dict to apply to the user. + required: false + type: dict + purge_tags: + description: + - Remove tags not listed in I(tags) when tags is specified. + default: true + type: bool extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -71,6 +81,13 @@ state: present purge_policies: true +- name: Create user with tags + community.aws.iam_user: + name: testuser1 + state: present + tags: + Env: Prod + - name: Delete the user community.aws.iam_user: name: testuser1 @@ -103,6 +120,11 @@ description: the path to the user type: str sample: / + tags: + description: user tags + type: dict + returned: always + sample: '{"Env": "Prod"}' ''' try: @@ -114,6 +136,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags def compare_attached_policies(current_attached_policies, new_attached_policies): @@ -156,6 +181,7 @@ def create_or_update_user(connection, module): params = dict() params['UserName'] = module.params.get('name') + params["Tags"] = module.params.get('tags') managed_policies = module.params.get('managed_policies') purge_policies = module.params.get('purge_policies') changed = False @@ -176,6 +202,8 @@ def create_or_update_user(connection, module): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create user") + else: + changed = update_user_tags(connection, module, params['UserName'], user) # Manage managed policies current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) @@ -208,13 +236,15 @@ def create_or_update_user(connection, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format( policy_arn, params['UserName'])) + if module.check_mode: module.exit_json(changed=changed) # Get the user again user = get_user(connection, module, params['UserName']) + user['tags'] = get_user_tags(connection, module, params['UserName']) - module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user)) + module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user, ignore_list=["tags"])) def destroy_user(connection, module): @@ -302,6 +332,13 @@ def get_user(connection, module, name): module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) +def get_user_tags(connection, module, user_name): + try: + return boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name, aws_retry=True)['Tags']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to list tags for user {0}".format(user_name)) + + def get_attached_policy_list(connection, module, name): try: @@ -321,6 +358,32 @@ def delete_user_login_profile(connection, module, user_name): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) +def update_user_tags(connection, module, user_name, user): + new_tags = module.params.get('Tags') + if new_tags is None: + return False + new_tags = boto3_tag_list_to_ansible_dict(new_tags) + + purge_tags = module.params.get('purge_tags') + + try: + existing_tags = boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name, aws_retry=True)['Tags']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): + existing_tags = {} + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + + if not module.check_mode: + try: + if tags_to_remove: + connection.untag_user(UserName=user_name, TagKeys=tags_to_remove, aws_retry=True) + if tags_to_add: + connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name) + + changed = bool(tags_to_add) or bool(tags_to_remove) + return changed def main(): @@ -328,7 +391,9 @@ def main(): name=dict(required=True, type='str'), managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), state=dict(choices=['present', 'absent'], required=True), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) + purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), ) module = AnsibleAWSModule( diff --git a/iam_user_info.py b/iam_user_info.py index 5ada74c612b..1662a66fc30 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -96,6 +96,11 @@ returned: if user exists type: str sample: "test_user" + tags: + description: User tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' ''' try: @@ -107,6 +112,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @AWSRetry.exponential_backoff() @@ -128,7 +134,13 @@ def list_iam_users(connection, module): if name: params['UserName'] = name try: - iam_users.append(connection.get_user(**params)['User']) + user = connection.get_user(**params)['User'] + try: + user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) + del user['Tags'] + except KeyError: + user['tags'] = {} + iam_users.append(user) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) @@ -150,7 +162,7 @@ def list_iam_users(connection, module): if name: iam_users = [user for user in iam_users if user['UserName'] == name] - module.exit_json(iam_users=[camel_dict_to_snake_dict(user) for user in iam_users]) + module.exit_json(iam_users=[camel_dict_to_snake_dict(user, ignore_list=['tags']) for user in iam_users]) def main(): From 926ee3c1a0628414d9a16229341dbc04052c5372 Mon Sep 17 00:00:00 2001 From: szb640 Date: Thu, 26 Aug 2021 14:48:53 +0200 Subject: [PATCH 279/683] Fix spacing. --- iam_user.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/iam_user.py b/iam_user.py index fd96d7b5ed3..efbf0d120ad 100644 --- a/iam_user.py +++ b/iam_user.py @@ -358,6 +358,7 @@ def delete_user_login_profile(connection, module, user_name): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) + def update_user_tags(connection, module, user_name, user): new_tags = module.params.get('Tags') if new_tags is None: @@ -385,6 +386,7 @@ def update_user_tags(connection, module, user_name, user): changed = bool(tags_to_add) or bool(tags_to_remove) return changed + def main(): argument_spec = dict( From 3051627d2bbd30983791cd5746b1dde941235ec8 Mon Sep 17 00:00:00 2001 From: bsziksza Date: Thu, 26 Aug 2021 16:16:40 +0200 Subject: [PATCH 280/683] Fix module parameter name. Remove unused method parameter. --- iam_user.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/iam_user.py b/iam_user.py index efbf0d120ad..ed3ccbf5c4e 100644 --- a/iam_user.py +++ b/iam_user.py @@ -203,7 +203,7 @@ def create_or_update_user(connection, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create user") else: - changed = update_user_tags(connection, module, params['UserName'], user) + changed = update_user_tags(connection, module, params['UserName']) # Manage managed policies current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) @@ -359,11 +359,10 @@ def delete_user_login_profile(connection, module, user_name): module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) -def update_user_tags(connection, module, user_name, user): - new_tags = module.params.get('Tags') +def update_user_tags(connection, module, user_name): + new_tags = module.params.get('tags') if new_tags is None: return False - new_tags = boto3_tag_list_to_ansible_dict(new_tags) purge_tags = module.params.get('purge_tags') @@ -414,4 +413,4 @@ def main(): if __name__ == '__main__': - main() + main() \ No newline at end of file From f5b27aaac01c0d072c2403b71a6b6b59d19cb247 Mon Sep 17 00:00:00 2001 From: szb640 Date: Thu, 26 Aug 2021 16:37:40 +0200 Subject: [PATCH 281/683] Restored EOL. --- iam_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iam_user.py b/iam_user.py index ed3ccbf5c4e..3861537e683 100644 --- a/iam_user.py +++ b/iam_user.py @@ -413,4 +413,4 @@ def main(): if __name__ == '__main__': - main() \ No newline at end of file + main() From 37e35e8a509afc53643257d93bc6eb7b43bfeece Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 12:56:36 +0200 Subject: [PATCH 282/683] Fix user creation with no tags. --- iam_user.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/iam_user.py b/iam_user.py index 3861537e683..17e83041ecf 100644 --- a/iam_user.py +++ b/iam_user.py @@ -181,9 +181,10 @@ def create_or_update_user(connection, module): params = dict() params['UserName'] = module.params.get('name') - params["Tags"] = module.params.get('tags') managed_policies = module.params.get('managed_policies') purge_policies = module.params.get('purge_policies') + if module.params.get('tags') is not None: + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) changed = False if managed_policies: managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) From dc937807663b798a3a0232153894686ffdb178f9 Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 13:05:44 +0200 Subject: [PATCH 283/683] Removed retries as iam_user doesn't support those yet. --- iam_user.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/iam_user.py b/iam_user.py index 17e83041ecf..ee1f9e95671 100644 --- a/iam_user.py +++ b/iam_user.py @@ -335,7 +335,7 @@ def get_user(connection, module, name): def get_user_tags(connection, module, user_name): try: - return boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name, aws_retry=True)['Tags']) + return boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name)['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list tags for user {0}".format(user_name)) @@ -368,7 +368,7 @@ def update_user_tags(connection, module, user_name): purge_tags = module.params.get('purge_tags') try: - existing_tags = boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name, aws_retry=True)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name)['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): existing_tags = {} @@ -377,9 +377,9 @@ def update_user_tags(connection, module, user_name): if not module.check_mode: try: if tags_to_remove: - connection.untag_user(UserName=user_name, TagKeys=tags_to_remove, aws_retry=True) + connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) if tags_to_add: - connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) + connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name) From 4df6cc00779b87bf71c2cfa3f77780fd1df5051b Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 13:17:32 +0200 Subject: [PATCH 284/683] Convert tags when using iam_user_info. --- iam_user_info.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/iam_user_info.py b/iam_user_info.py index 1662a66fc30..6984aca77f2 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -121,6 +121,15 @@ def list_iam_users_with_backoff(client, operation, **kwargs): return paginator.paginate(**kwargs).build_full_result() +def describe_user(user): + try: + user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) + del user['Tags'] + except KeyError: + user['tags'] = {} + return user + + def list_iam_users(connection, module): name = module.params.get('name') @@ -134,13 +143,7 @@ def list_iam_users(connection, module): if name: params['UserName'] = name try: - user = connection.get_user(**params)['User'] - try: - user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) - del user['Tags'] - except KeyError: - user['tags'] = {} - iam_users.append(user) + iam_users.append(connection.get_user(**params)['User']) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) @@ -162,7 +165,7 @@ def list_iam_users(connection, module): if name: iam_users = [user for user in iam_users if user['UserName'] == name] - module.exit_json(iam_users=[camel_dict_to_snake_dict(user, ignore_list=['tags']) for user in iam_users]) + module.exit_json(iam_users=[camel_dict_to_snake_dict(describe_user(user), ignore_list=['tags']) for user in iam_users]) def main(): From 37bd9743766a6661933858c8db62e6300a1512cd Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 13:44:42 +0200 Subject: [PATCH 285/683] Moved tag creation to get_user. Reduce AWS calls by using retrieved user object. Minor iam_user_info fixes. --- iam_user.py | 31 ++++++++++++++----------------- iam_user_info.py | 6 +++--- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/iam_user.py b/iam_user.py index ee1f9e95671..2f2f36c0aa6 100644 --- a/iam_user.py +++ b/iam_user.py @@ -204,7 +204,7 @@ def create_or_update_user(connection, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create user") else: - changed = update_user_tags(connection, module, params['UserName']) + changed = update_user_tags(connection, module, params, user) # Manage managed policies current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) @@ -243,7 +243,6 @@ def create_or_update_user(connection, module): # Get the user again user = get_user(connection, module, params['UserName']) - user['tags'] = get_user_tags(connection, module, params['UserName']) module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user, ignore_list=["tags"])) @@ -326,18 +325,18 @@ def get_user(connection, module, name): params['UserName'] = name try: - return connection.get_user(**params) + user = connection.get_user(**params) except is_boto3_error_code('NoSuchEntity'): return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) - - -def get_user_tags(connection, module, user_name): + try: - return boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name)['Tags']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list tags for user {0}".format(user_name)) + user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) + del user['Tags'] + except KeyError: + user['tags'] = {} + return user def get_attached_policy_list(connection, module, name): @@ -360,18 +359,16 @@ def delete_user_login_profile(connection, module, user_name): module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) -def update_user_tags(connection, module, user_name): - new_tags = module.params.get('tags') +def update_user_tags(connection, module, params, user): + user_name = params['UserName'] + existing_tags = user['tags'] + new_tags = params.get('Tags') if new_tags is None: return False - + new_tags = boto3_tag_list_to_ansible_dict(new_tags) + purge_tags = module.params.get('purge_tags') - try: - existing_tags = boto3_tag_list_to_ansible_dict(connection.list_user_tags(UserName=user_name)['Tags']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): - existing_tags = {} - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) if not module.check_mode: diff --git a/iam_user_info.py b/iam_user_info.py index 6984aca77f2..457e05bc5a4 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -99,7 +99,7 @@ tags: description: User tags. type: dict - returned: always + returned: if user exists sample: '{"Env": "Prod"}' ''' @@ -121,7 +121,7 @@ def list_iam_users_with_backoff(client, operation, **kwargs): return paginator.paginate(**kwargs).build_full_result() -def describe_user(user): +def describe_iam_user(user): try: user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) del user['Tags'] @@ -165,7 +165,7 @@ def list_iam_users(connection, module): if name: iam_users = [user for user in iam_users if user['UserName'] == name] - module.exit_json(iam_users=[camel_dict_to_snake_dict(describe_user(user), ignore_list=['tags']) for user in iam_users]) + module.exit_json(iam_users=[camel_dict_to_snake_dict(describe_iam_user(user), ignore_list=['tags']) for user in iam_users]) def main(): From 78956261e9b8e6d75a3955607a24f9eb15b52342 Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 13:56:40 +0200 Subject: [PATCH 286/683] User contents are inside 'User' key. --- iam_user.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/iam_user.py b/iam_user.py index 2f2f36c0aa6..874d25536ce 100644 --- a/iam_user.py +++ b/iam_user.py @@ -332,10 +332,10 @@ def get_user(connection, module, name): module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) try: - user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) - del user['Tags'] + user['User']['tags'] = boto3_tag_list_to_ansible_dict(user['User']['Tags']) + del user['User']['Tags'] except KeyError: - user['tags'] = {} + user['User']['tags'] = {} return user @@ -361,7 +361,7 @@ def delete_user_login_profile(connection, module, user_name): def update_user_tags(connection, module, params, user): user_name = params['UserName'] - existing_tags = user['tags'] + existing_tags = user['User']['tags'] new_tags = params.get('Tags') if new_tags is None: return False From 1157f899dac80d2363c2b4ff03e8707a95e0dc77 Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 14:17:28 +0200 Subject: [PATCH 287/683] Fixed snake casing of tags. Added additional test for None tags. --- iam_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iam_user.py b/iam_user.py index 874d25536ce..06f396d2eb2 100644 --- a/iam_user.py +++ b/iam_user.py @@ -244,7 +244,7 @@ def create_or_update_user(connection, module): # Get the user again user = get_user(connection, module, params['UserName']) - module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user, ignore_list=["tags"])) + module.exit_json(changed=changed, iam_user={'user': camel_dict_to_snake_dict(user['User'], ignore_list=["tags"])}) def destroy_user(connection, module): From 367d370d01bdc1655a11357fb3330aa23bb4e7f1 Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 14:43:47 +0200 Subject: [PATCH 288/683] Fixed whitespaces. --- iam_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/iam_user.py b/iam_user.py index 06f396d2eb2..47ca325fa1e 100644 --- a/iam_user.py +++ b/iam_user.py @@ -330,7 +330,7 @@ def get_user(connection, module, name): return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) - + try: user['User']['tags'] = boto3_tag_list_to_ansible_dict(user['User']['Tags']) del user['User']['Tags'] @@ -366,7 +366,7 @@ def update_user_tags(connection, module, params, user): if new_tags is None: return False new_tags = boto3_tag_list_to_ansible_dict(new_tags) - + purge_tags = module.params.get('purge_tags') tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) From 994818a4074ad3b7e063ed936262e32457ccc806 Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 14:58:44 +0200 Subject: [PATCH 289/683] Apply suggestions from code review pt 1 Co-authored-by: Mark Chappell --- iam_user_info.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/iam_user_info.py b/iam_user_info.py index 457e05bc5a4..10a6f2bffdc 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -122,11 +122,9 @@ def list_iam_users_with_backoff(client, operation, **kwargs): def describe_iam_user(user): - try: - user['tags'] = boto3_tag_list_to_ansible_dict(user['Tags']) - del user['Tags'] - except KeyError: - user['tags'] = {} + tags = boto3_tag_list_to_ansible_dict(user.pop('Tags', [])) + user = camel_dict_to_snake_dict(user) + user['tags'] = tags return user @@ -165,7 +163,7 @@ def list_iam_users(connection, module): if name: iam_users = [user for user in iam_users if user['UserName'] == name] - module.exit_json(iam_users=[camel_dict_to_snake_dict(describe_iam_user(user), ignore_list=['tags']) for user in iam_users]) + module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users]) def main(): From 7b113030565bd9b38259874aadd6728b62a74208 Mon Sep 17 00:00:00 2001 From: szb640 Date: Fri, 27 Aug 2021 15:06:51 +0200 Subject: [PATCH 290/683] Cleaned up case transformation and tag assignment. --- iam_user.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/iam_user.py b/iam_user.py index 47ca325fa1e..44efaa2bad9 100644 --- a/iam_user.py +++ b/iam_user.py @@ -244,7 +244,7 @@ def create_or_update_user(connection, module): # Get the user again user = get_user(connection, module, params['UserName']) - module.exit_json(changed=changed, iam_user={'user': camel_dict_to_snake_dict(user['User'], ignore_list=["tags"])}) + module.exit_json(changed=changed, iam_user=user) def destroy_user(connection, module): @@ -331,11 +331,9 @@ def get_user(connection, module, name): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) - try: - user['User']['tags'] = boto3_tag_list_to_ansible_dict(user['User']['Tags']) - del user['User']['Tags'] - except KeyError: - user['User']['tags'] = {} + tags = boto3_tag_list_to_ansible_dict(user['User'].pop('Tags', [])) + user = camel_dict_to_snake_dict(user) + user['user']['tags'] = tags return user @@ -361,7 +359,7 @@ def delete_user_login_profile(connection, module, user_name): def update_user_tags(connection, module, params, user): user_name = params['UserName'] - existing_tags = user['User']['tags'] + existing_tags = user['user']['tags'] new_tags = params.get('Tags') if new_tags is None: return False From 253921ab90904d972818444d58cac3372522db44 Mon Sep 17 00:00:00 2001 From: szb640 Date: Wed, 15 Sep 2021 14:50:04 +0200 Subject: [PATCH 291/683] Added version_added tags to documentation. --- iam_user.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/iam_user.py b/iam_user.py index 44efaa2bad9..d195a9d7758 100644 --- a/iam_user.py +++ b/iam_user.py @@ -46,11 +46,13 @@ - Tag dict to apply to the user. required: false type: dict + version_added: 2.0.0 purge_tags: description: - Remove tags not listed in I(tags) when tags is specified. default: true type: bool + version_added: 2.0.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 From cfdd89417bfe3ce19c895fda8986d75c27decc58 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 13 Oct 2021 11:25:39 +0200 Subject: [PATCH 292/683] Update version_added - we missed 2.0.0 --- iam_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/iam_user.py b/iam_user.py index d195a9d7758..2a7998a6d10 100644 --- a/iam_user.py +++ b/iam_user.py @@ -46,13 +46,13 @@ - Tag dict to apply to the user. required: false type: dict - version_added: 2.0.0 + version_added: 2.1.0 purge_tags: description: - Remove tags not listed in I(tags) when tags is specified. default: true type: bool - version_added: 2.0.0 + version_added: 2.1.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 From 6cce19415423a97a4fb9976871c5e85d61266787 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 12 Oct 2021 09:55:24 +0200 Subject: [PATCH 293/683] Add support for disabling route53 health checks --- route53_health_check.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/route53_health_check.py b/route53_health_check.py index bcf7357c0ef..e38e9adc053 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -22,6 +22,13 @@ choices: [ 'present', 'absent' ] type: str default: 'present' + disabled: + description: + - Stops Route 53 from performing health checks. + - See the AWS documentation for more details on the exact implications. + U(https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-values.html) + - Defaults to C(true) when creating a new health check. + type: bool ip_address: description: - IP address of the end-point to check. Either this or I(fqdn) has to be provided. @@ -185,6 +192,11 @@ type: str returned: When the health check exists and a search string has been configured. sample: 'ALIVE' + disabled: + description: Whether the health check has been disabled or not. + type: bool + returned: When the health check exists. + sample: false ''' import uuid @@ -278,6 +290,8 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ RequestInterval=request_interval_in, Port=port_in, ) + if module.params.get('disabled') is not None: + health_check['Disabled'] = module.params.get('disabled') if ip_addr_in: health_check['IPAddress'] = ip_addr_in if fqdn_in: @@ -341,6 +355,10 @@ def update_health_check(existing_check): if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'): changes['FailureThreshold'] = failure_threshold + disabled = module.params.get('disabled', None) + if disabled is not None and disabled != existing_config.get('Disabled'): + changes['Disabled'] = module.params.get('disabled') + # No changes... if not changes: return False, None @@ -383,6 +401,7 @@ def describe_health_check(id): def main(): argument_spec = dict( state=dict(choices=['present', 'absent'], default='present'), + disabled=dict(type='bool'), ip_address=dict(), port=dict(type='int'), type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), From 31e26ce9c00fa42b6f5fb830dceb8df50b3f3224 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 12 Oct 2021 10:00:33 +0200 Subject: [PATCH 294/683] changelog --- route53_health_check.py | 1 + 1 file changed, 1 insertion(+) diff --git a/route53_health_check.py b/route53_health_check.py index e38e9adc053..af482132e56 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -29,6 +29,7 @@ U(https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-values.html) - Defaults to C(true) when creating a new health check. type: bool + version_added: 2.1.0 ip_address: description: - IP address of the end-point to check. Either this or I(fqdn) has to be provided. From 9b6431f2e95bd960cf15726aa439180b95e71265 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 27 Sep 2021 10:10:41 +0200 Subject: [PATCH 295/683] Deprecate defaulting to dup_ok being false. This will mean that most invocations will only need to query the current cert rather than all certificates. --- iam_server_certificate.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 5402b22d126..14cc5cef077 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -78,7 +78,8 @@ description: - By default the module will not upload a certificate that is already uploaded into AWS. - If I(dup_ok=True), it will upload the certificate as long as the name is unique. - - Defaults to C(false). + - Currently defaults to C(false), this will default to C(true) in release + 4.0.0. type: bool author: Jonathan I. Davila (@defionscode) @@ -306,6 +307,12 @@ def main(): new_name = module.params.get('new_name') new_path = module.params.get('new_path') dup_ok = module.params.get('dup_ok') + + if dup_ok is None: + module.deprecate( + 'The dup_ok module currently defaults to false, this will change in ' + 'release 4.0.0 to true.', version='4.0.0', collection_name='community.aws') + if state == 'present' and not new_name and not new_path: cert, key, cert_chain = load_data(cert=module.params.get('cert'), key=module.params.get('key'), From af6d0f83919ba52adbb25d537d167b26f46bbcf6 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 27 Sep 2021 08:57:02 +0200 Subject: [PATCH 296/683] Migrate iam_server_certificate to boto3 --- iam_server_certificate.py | 375 ++++++++++++++++++++++++-------------- 1 file changed, 241 insertions(+), 134 deletions(-) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 14cc5cef077..1a5df57465c 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -126,114 +126,196 @@ import os try: - import boto - import boto.iam - import boto.ec2 + import botocore except ImportError: pass # Handled by HAS_BOTO +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO - - -def cert_meta(iam, name): - certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate - ocert = certificate.certificate_body - opath = certificate.server_certificate_metadata.path - ocert_id = certificate.server_certificate_metadata.server_certificate_id - upload_date = certificate.server_certificate_metadata.upload_date - exp = certificate.server_certificate_metadata.expiration - arn = certificate.server_certificate_metadata.arn - return opath, ocert, ocert_id, upload_date, exp, arn - - -def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): - update = False - - # IAM cert names are case insensitive - names_lower = [n.lower() for n in [name, new_name] if n is not None] - orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names] - - if any(ct in orig_cert_names_lower for ct in names_lower): - for i_name in names_lower: - if cert is not None: - try: - c_index = orig_cert_names_lower.index(i_name) - except NameError: - continue - else: - # NOTE: remove the carriage return to strictly compare the cert bodies. - slug_cert = cert.replace('\r', '') - slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '') - if slug_orig_cert_bodies == slug_cert: - update = True - break - elif slug_cert.startswith(slug_orig_cert_bodies): - update = True - break - else: - module.fail_json(changed=False, msg='A cert with the name %s already exists and' - ' has a different certificate body associated' - ' with it. Certificates cannot have the same name' % orig_cert_names[c_index]) - else: - update = True - break - elif cert in orig_cert_bodies and not dup_ok: - for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): - if crt_body == cert: - module.fail_json(changed=False, msg='This certificate already' - ' exists under the name %s' % crt_name) - - return update - - -def cert_action(module, iam, name, cpath, new_name, new_path, state, - cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok): - if state == 'present': - update = dup_check(module, iam, name, new_name, cert, orig_cert_names, - orig_cert_bodies, dup_ok) - if update: - opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) - changed = True - if new_name and new_path: - iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) - module.exit_json(changed=changed, original_name=name, new_name=new_name, - original_path=opath, new_path=new_path, cert_body=ocert, - upload_date=upload_date, expiration_date=exp, arn=arn) - elif new_name and not new_path: - iam.update_server_cert(name, new_cert_name=new_name) - module.exit_json(changed=changed, original_name=name, new_name=new_name, - cert_path=opath, cert_body=ocert, - upload_date=upload_date, expiration_date=exp, arn=arn) - elif not new_name and new_path: - iam.update_server_cert(name, new_path=new_path) - module.exit_json(changed=changed, name=new_name, - original_path=opath, new_path=new_path, cert_body=ocert, - upload_date=upload_date, expiration_date=exp, arn=arn) - else: - changed = False - module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, - upload_date=upload_date, expiration_date=exp, arn=arn, - msg='No new path or name specified. No changes made') - else: - changed = True - iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath) - opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) - module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, - upload_date=upload_date, expiration_date=exp, arn=arn) - elif state == 'absent': - if name in orig_cert_names: - changed = True - iam.delete_server_cert(name) - module.exit_json(changed=changed, deleted_cert=name) - else: - changed = False - module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name) +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +@AWSRetry.jittered_backoff() +def _list_server_certficates(): + paginator = client.get_paginator('list_server_certificates') + return paginator.paginate().build_full_result()['ServerCertificateMetadataList'] + + +def check_duplicate_cert(new_cert): + orig_cert_names = list(c['ServerCertificateName'] for c in _list_server_certficates()) + for cert_name in orig_cert_names: + cert = get_server_certificate(cert_name) + if not cert: + continue + cert_body = cert.get('certificate_body', None) + if not _compare_cert(new_cert, cert_body): + continue + module.fail_json( + changed=False, + msg='This certificate already exists under the name {0} and dup_ok=False'.format(cert_name), + duplicate_cert=cert, + ) + + +def _compare_cert(cert_a, cert_b): + if not cert_a and not cert_b: + return True + if not cert_a or not cert_b: + return False + # Trim out the whitespace before comparing the certs. While this could mean + # an invalid cert 'matches' a valid cert, that's better than some stray + # whitespace breaking things + cert_a.replace('\r', '') + cert_a.replace('\n', '') + cert_a.replace(' ', '') + cert_b.replace('\r', '') + cert_b.replace('\n', '') + cert_b.replace(' ', '') + + return cert_a == cert_b + + +def update_server_certificate(current_cert): + changed = False + + cert, key, cert_chain = load_data() + + if not _compare_cert(cert, current_cert.get('certificate_body', None)): + module.fail_json(msg='Modifying the certificate body is not supported by AWS') + if not _compare_cert(cert_chain, current_cert.get('certificate_chain', None)): + module.fail_json(msg='Modifying the chaining certificate is not supported by AWS') + # We can't compare keys. + + if module.check_mode: + return changed + + # For now we can't make any changes. Updates to tagging would go here and + # update 'changed' + + return changed + + +def create_server_certificate(): + cert, key, cert_chain = load_data() + + if not module.params.get('dup_ok'): + check_duplicate_cert(cert) + + path = module.params.get('path') + name = module.params.get('name') + + params = dict( + ServerCertificateName=name, + CertificateBody=cert, + PrivateKey=key, + ) + + if cert_chain: + params['CertificateChain'] = cert_chain + if path: + params['Path'] = path + + if module.check_mode: + return True + + try: + client.upload_server_certificate( + aws_retry=True, + **params + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name)) + + return True + + +def rename_server_certificate(current_cert): + name = module.params.get('name') + new_name = module.params.get('new_name') + new_path = module.params.get('new_path') + + changes = dict() + + # Try to be nice, if we've already been renamed exit quietly. + if not current_cert: + current_cert = get_server_certificate(new_name) + else: + if new_name: + changes['NewServerCertificateName'] = new_name + + cert_metadata = current_cert.get('server_certificate_metadata', {}) + + if not current_cert: + module.fail_json(msg='Unable to find certificate {0}'.format(name)) + current_path = cert_metadata.get('path', None) + if new_path and current_path != new_path: + changes['NewPath'] = new_path + + if not changes: + return False + + if module.check_mode: + return True + + try: + client.update_server_certificate( + aws_retry=True, + ServerCertificateName=name, + **changes + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name), + changes=changes) + + return True + + +def delete_server_certificate(current_cert): + if not current_cert: + return False + + if module.check_mode: + return True + + name = module.params.get('name') + + try: + result = client.delete_server_certificate( + aws_retry=True, + ServerCertificateName=name, + ) + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to delete server certificate {0}'.format(name)) + + return True + + +def get_server_certificate(name): + if not name: + return None + try: + result = client.get_server_certificate( + aws_retry=True, + ServerCertificateName=name, + ) + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to get server certificate {0}'.format(name)) + cert = dict(camel_dict_to_snake_dict(result.get('ServerCertificate'))) + return cert + + +def load_data(): + cert = module.params.get('cert') + key = module.params.get('key') + cert_chain = module.params.get('cert_chain') -def load_data(cert, key, cert_chain): # if paths are provided rather than lookups read the files and return the contents if cert and os.path.isfile(cert): with open(cert, 'r') as cert_fh: @@ -259,9 +341,36 @@ def load_data(cert, key, cert_chain): return cert, key, cert_chain +def compatability_results(current_cert): + compat_results = dict() + + if not current_cert: + return compat_results + + metadata = current_cert.get('server_certificate_metadata', {}) + + if current_cert.get('certificate_body', None): + compat_results['cert_body'] = current_cert.get('certificate_body') + if current_cert.get('certificate_chain', None): + compat_results['chain_cert_body'] = current_cert.get('certificate_chain') + if metadata.get('arn', None): + compat_results['arn'] = metadata.get('arn') + if metadata.get('expiration', None): + compat_results['expiration_date'] = metadata.get('expiration') + if metadata.get('path', None): + compat_results['cert_path'] = metadata.get('path') + if metadata.get('server_certificate_name', None): + compat_results['name'] = metadata.get('server_certificate_name') + if metadata.get('upload_date', None): + compat_results['upload_date'] = metadata.get('upload_date') + + return compat_results + + def main(): global module + global client argument_spec = dict( state=dict(required=True, choices=['present', 'absent']), @@ -285,21 +394,9 @@ def main(): ['new_name', 'cert'], ['new_name', 'cert_chain'], ], - check_boto3=False, ) - if not HAS_BOTO: - module.fail_json(msg="Boto is required for this module") - - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - - try: - if region: - iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) - else: - iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=str(e)) + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get('state') name = module.params.get('name') @@ -313,28 +410,38 @@ def main(): 'The dup_ok module currently defaults to false, this will change in ' 'release 4.0.0 to true.', version='4.0.0', collection_name='community.aws') - if state == 'present' and not new_name and not new_path: - cert, key, cert_chain = load_data(cert=module.params.get('cert'), - key=module.params.get('key'), - cert_chain=module.params.get('cert_chain')) + current_cert = get_server_certificate(name) + + results = dict() + if state == 'absent': + changed = delete_server_certificate(current_cert) + if changed: + results['deleted_cert'] = name + else: + msg = 'Certificate with the name {0} already absent'.format(name) + results['msg'] = msg else: - cert = key = cert_chain = None + if new_name or new_path: + changed = rename_server_certificate(current_cert) + if new_name: + name = new_name + updated_cert = get_server_certificate(name) + elif current_cert: + changed = update_server_certificate(current_cert) + updated_cert = get_server_certificate(name) + else: + changed = create_server_certificate() + updated_cert = get_server_certificate(name) - orig_cert_names = [ctb['server_certificate_name'] for ctb in - iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list] - orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body - for thing in orig_cert_names] - if new_name == name: - new_name = None - if new_path == path: - new_path = None + results['server_certificate'] = updated_cert + compat_results = compatability_results(updated_cert) + if compat_results: + results.update(compat_results) - changed = False - try: - cert_action(module, iam, name, path, new_name, new_path, state, - cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok) - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err), debug=[cert, key]) + module.exit_json( + changed=changed, + **results + ) if __name__ == '__main__': From 5adb432127851d742b8f602d74d6b6a2812dc621 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 27 Sep 2021 19:57:10 +0200 Subject: [PATCH 297/683] Add support for check_mode --- iam_server_certificate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 1a5df57465c..b6cad710fb3 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -394,6 +394,7 @@ def main(): ['new_name', 'cert'], ['new_name', 'cert_chain'], ], + supports_check_mode=True, ) client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) From 2c1dbbf32e50e41999cb917accc053f2a99a75bc Mon Sep 17 00:00:00 2001 From: Mauricio Teixeira <1847440+badnetmask@users.noreply.github.com> Date: Mon, 3 May 2021 19:21:46 -0400 Subject: [PATCH 298/683] implement adding tags to brand new zones --- route53_zone.py | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/route53_zone.py b/route53_zone.py index cdc5538c027..6bc2443f2bd 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -5,7 +5,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type - DOCUMENTATION = ''' module: route53_zone short_description: add or delete Route53 zones @@ -47,6 +46,18 @@ - The reusable delegation set ID to be associated with the zone. - Note that you can't associate a reusable delegation set with a private hosted zone. type: str + tags: + description: + - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one. + type: dict + version_added: 2.1.0 + purge_tags: + description: + - Delete any tags not specified in the task that are on the zone. + This means you have to specify all the desired tags on each task affecting a zone. + default: false + type: bool + version_added: 2.1.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -77,6 +88,21 @@ zone: example.com comment: reusable delegation set example delegation_set_id: A1BCDEF2GHIJKL + +- name: create a public zone with tags + community.aws.route53_zone: + zone: example.com + comment: this is an example + tags: + Owner: Ansible Team + +- name: modify a public zone, removing all previous tags and adding a new one + community.aws.route53_zone: + zone: example.com + comment: this is an example + tags: + Support: Ansible Community + purge_tags: true ''' RETURN = ''' @@ -115,10 +141,15 @@ returned: for public hosted zones, if they have been associated with a reusable delegation set type: str sample: "A1BCDEF2GHIJKL" +tags: + description: tags associated with the zone + returned: when tags are defined + type: dict ''' import time from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags try: from botocore.exceptions import BotoCoreError, ClientError @@ -150,6 +181,8 @@ def create(module, client, matching_zones): vpc_region = module.params.get('vpc_region') comment = module.params.get('comment') delegation_set_id = module.params.get('delegation_set_id') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') if not zone_in.endswith('.'): zone_in += "." @@ -164,6 +197,8 @@ def create(module, client, matching_zones): 'name': zone_in, 'delegation_set_id': delegation_set_id, 'zone_id': None, + 'tags': tags, + 'purge_tags': purge_tags, } if private_zone: @@ -287,6 +322,9 @@ def create_or_update_public(module, client, matching_zones, record): record['name'] = zone_details['Name'] record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') + if record['tags'] or record['purge_tags']: + changed = manage_tags(module, client, 'hostedzone', record, zone_details['Id'].replace('/hostedzone/', '')) + return changed, record @@ -394,6 +432,8 @@ def main(): comment=dict(default=''), hosted_zone_id=dict(), delegation_set_id=dict(), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), ) mutually_exclusive = [ From 73692b3951c673d899687c0e5729d22bae441491 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 12 Oct 2021 13:52:39 +0200 Subject: [PATCH 299/683] Rework route53 tagging logic a little --- route53_zone.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/route53_zone.py b/route53_zone.py index 6bc2443f2bd..334e6d62718 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -150,6 +150,7 @@ import time from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags +from ansible_collections.community.aws.plugins.module_utils.route53 import get_tags try: from botocore.exceptions import BotoCoreError, ClientError @@ -197,8 +198,6 @@ def create(module, client, matching_zones): 'name': zone_in, 'delegation_set_id': delegation_set_id, 'zone_id': None, - 'tags': tags, - 'purge_tags': purge_tags, } if private_zone: @@ -206,6 +205,14 @@ def create(module, client, matching_zones): else: changed, result = create_or_update_public(module, client, matching_zones, record) + zone_id = result.get('zone_id') + if zone_id: + if tags is not None: + changed |= manage_tags(module, client, 'hostedzone', zone_id, tags, purge_tags) + result['tags'] = get_tags(module, client, 'hostedzone', zone_id) + else: + result['tags'] = tags + return changed, result @@ -322,9 +329,6 @@ def create_or_update_public(module, client, matching_zones, record): record['name'] = zone_details['Name'] record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') - if record['tags'] or record['purge_tags']: - changed = manage_tags(module, client, 'hostedzone', record, zone_details['Id'].replace('/hostedzone/', '')) - return changed, record From a84a0e0468cf2c1119fc325cc4730060810d37dc Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 1 Oct 2021 17:16:41 +0200 Subject: [PATCH 300/683] ecs_taskdefinition - add placement_constraints option Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 86319b8bb3c..de02167a050 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -518,6 +518,22 @@ - If I(launch_type=FARGATE), this field is required and is limited by the CPU. required: false type: str + placement_constraints: + version_added: 2.1.0 + description: + - Placement constraint objects to use for the task. + - You can specify a maximum of 10 constraints per task. + - Task placement constraints are not supported for tasks run on Fargate. + required: false + type: list + elements: dict + suboptions: + type: + description: The type of constraint. + type: str + expression: + description: A cluster query language expression to apply to the constraint. + type: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -667,7 +683,7 @@ def describe_task(self, task_name): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory): + def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory, placement_constraints): validated_containers = [] # Ensures the number parameters are int as required by boto @@ -721,6 +737,8 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, params['requiresCompatibilities'] = [launch_type] if execution_role_arn: params['executionRoleArn'] = execution_role_arn + if placement_constraints: + params['placementConstraints'] = placement_constraints try: response = self.ecs.register_task_definition(aws_retry=True, **params) @@ -780,7 +798,8 @@ def main(): volumes=dict(required=False, type='list', elements='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), - memory=dict(required=False, type='str') + memory=dict(required=False, type='str'), + placement_constraints=dict(required=False, type='list', elements='dict') ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -801,8 +820,12 @@ def main(): network_mode = module.params['network_mode'] launch_type = module.params['launch_type'] - if launch_type == 'FARGATE' and network_mode != 'awsvpc': - module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") + placement_constraints = module.params['placement_constraints'] + if launch_type == 'FARGATE': + if network_mode != 'awsvpc': + module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") + if placement_constraints: + module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate") for container in module.params['containers']: if container.get('links') and network_mode == 'awsvpc': @@ -969,7 +992,8 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ volumes, module.params['launch_type'], module.params['cpu'], - module.params['memory']) + module.params['memory'], + module.params['placement_constraints'],) results['changed'] = True elif module.params['state'] == 'absent': From e5e3699da03a3f1715dc89df371b02d5fe57a354 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 8 Oct 2021 14:19:06 +0200 Subject: [PATCH 301/683] Address reviewer's comments Signed-off-by: Alina Buzachis --- ecs_taskdefinition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index de02167a050..41a8c9e610f 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -799,7 +799,7 @@ def main(): launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), memory=dict(required=False, type='str'), - placement_constraints=dict(required=False, type='list', elements='dict') + placement_constraints=dict(required=False, type='list', elements='dict', options=dict(type=dict(type='str'), expression=dict(type='str')))), ) module = AnsibleAWSModule(argument_spec=argument_spec, From 9d62f3a313168ba20ee0a191b0c06906d038b9a2 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 2 Oct 2021 17:52:09 +0200 Subject: [PATCH 302/683] pep8 fixup - line too long --- ecs_taskdefinition.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 41a8c9e610f..144426670a9 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -683,7 +683,8 @@ def describe_task(self, task_name): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory, placement_constraints): + def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, + volumes, launch_type, cpu, memory, placement_constraints): validated_containers = [] # Ensures the number parameters are int as required by boto @@ -799,7 +800,8 @@ def main(): launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), memory=dict(required=False, type='str'), - placement_constraints=dict(required=False, type='list', elements='dict', options=dict(type=dict(type='str'), expression=dict(type='str')))), + placement_constraints=dict(required=False, type='list', elements='dict', + options=dict(type=dict(type='str'), expression=dict(type='str'))), ) module = AnsibleAWSModule(argument_spec=argument_spec, From 4dd46c4c978061f66b40c48cfbd1cb61730d552f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 14 Oct 2021 10:04:22 +0200 Subject: [PATCH 303/683] docs - Remove references to old (unsupported) versions of Ansible --- ec2_lc.py | 5 +---- ecs_taskdefinition.py | 1 - iam_role.py | 2 +- route53.py | 3 +-- 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/ec2_lc.py b/ec2_lc.py index 9aaa96538db..2cdf0463863 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -21,7 +21,6 @@ notes: - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the launch configuration on AWS. You must create a new config and assign it to the ASG instead. - - encrypted volumes are supported on versions >= 2.4 author: @@ -188,9 +187,7 @@ EXAMPLES = r''' -# create a launch configuration using an AMI image and instance type as a basis - -- name: note that encrypted volumes are only supported in >= Ansible 2.4 +- name: create a launch configuration with an encrypted volume community.aws.ec2_lc: name: special image_id: ami-XXX diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 144426670a9..505a4207117 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -471,7 +471,6 @@ network_mode: description: - The Docker networking mode to use for the containers in the task. - - C(awsvpc) mode was added in Ansible 2.5 - Windows containers must use I(network_mode=default), which will utilize docker NAT networking. - Setting I(network_mode=default) for a Linux container will use C(bridge) mode. required: false diff --git a/iam_role.py b/iam_role.py index f5699edf8b5..356741e4ec5 100644 --- a/iam_role.py +++ b/iam_role.py @@ -43,7 +43,7 @@ type: json managed_policies: description: - - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names. + - A list of managed policy ARNs, managed policy ARNs or friendly names. - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]). - To embed an inline policy, use M(community.aws.iam_policy). aliases: ['managed_policy'] diff --git a/route53.py b/route53.py index d4fe99531c0..964020257db 100644 --- a/route53.py +++ b/route53.py @@ -19,8 +19,7 @@ options: state: description: - - Specifies the state of the resource record. As of Ansible 2.4, the I(command) option has been changed - to I(state) as default and the choices C(present) and C(absent) have been added, but I(command) still works as well. + - Specifies the state of the resource record. required: true aliases: [ 'command' ] choices: [ 'present', 'absent', 'get', 'create', 'delete' ] From 7829830dc51d2d0c5de0c60ca6942e0add01de40 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 14 Oct 2021 10:14:21 +0200 Subject: [PATCH 304/683] replace Ansible version deprecations with date deprecations (in line with the module.deprecate date entries) --- aws_kms.py | 10 +++++----- cloudfront_info.py | 2 +- elb_network_lb.py | 4 ++-- iam_policy.py | 13 +++++++------ iam_role.py | 4 ++-- 5 files changed, 17 insertions(+), 16 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index 10753f63584..05a520ac94a 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -44,7 +44,7 @@ - (deprecated) Grant or deny access. - Used for modifying the Key Policy rather than modifying a grant and only works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. default: grant choices: [ grant, deny ] aliases: @@ -56,7 +56,7 @@ - One of I(policy_role_name) or I(policy_role_arn) are required. - Used for modifying the Key Policy rather than modifying a grant and only works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. required: false aliases: - role_name @@ -67,7 +67,7 @@ - One of I(policy_role_name) or I(policy_role_arn) are required. - Used for modifying the Key Policy rather than modifying a grant and only works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. type: str required: false aliases: @@ -78,7 +78,7 @@ - Required when I(policy_mode=grant). - Used for modifying the Key Policy rather than modifying a grant and only works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. required: false aliases: - grant_types @@ -90,7 +90,7 @@ - Only cleans if changes are being made. - Used for modifying the Key Policy rather than modifying a grant and only works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. type: bool default: true aliases: diff --git a/cloudfront_info.py b/cloudfront_info.py index 767557bf6e9..df42ed0d1ac 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -173,7 +173,7 @@ # When the module is called as cloudfront_facts, return values are published # in ansible_facts['cloudfront'][] and can be used as follows. -# Note that this is deprecated and will stop working in Ansible 2.13. +# Note that this is deprecated and will stop working in a release after 2021-12-01. - name: Gather facts community.aws.cloudfront_facts: distribution: true diff --git a/elb_network_lb.py b/elb_network_lb.py index 47ac7b1d0d7..8de4b7692aa 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -108,7 +108,7 @@ description: - Create or destroy the load balancer. - The current default is C(absent). However, this behavior is inconsistent with other modules - and as such the default will change to C(present) in 2.14. + and as such the default will change to C(present) in a release after 2022-06-01. To maintain the existing behavior explicitly set I(state=absent). choices: [ 'present', 'absent' ] type: str @@ -452,7 +452,7 @@ def main(): if state is None: # See below, unless state==present we delete. Ouch. module.deprecate('State currently defaults to absent. This is inconsistent with other modules' - ' and the default will be changed to `present` in Ansible 2.14', + ' and the default will be changed to `present` in a release after 2022-06-01', date='2022-06-01', collection_name='community.aws') # Quick check of listeners parameters diff --git a/iam_policy.py b/iam_policy.py index 819ed369a31..570c37efa1b 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -36,7 +36,7 @@ description: - The path to the properly json formatted policy file. - Mutually exclusive with I(policy_json). - - This option has been deprecated and will be removed in 2.14. The existing behavior can be + - This option has been deprecated and will be removed in a release after 2022-06-01. The existing behavior can be reproduced by using the I(policy_json) option and reading the file using the lookup plugin. type: str policy_json: @@ -53,9 +53,10 @@ type: str skip_duplicates: description: - - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. If there is a match it will not make - a new policy object with the same rules. - - The current default is C(true). However, this behavior can be confusing and as such the default will change to C(false) in 2.14. To maintain + - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. + If there is a match it will not make a new policy object with the same rules. + - The current default is C(true). However, this behavior can be confusing and as such the default will + change to C(false) in a release after 2022-06-01. To maintain the existing behavior explicitly set I(skip_duplicates=true). type: bool @@ -304,13 +305,13 @@ def main(): if (skip_duplicates is None): module.deprecate('The skip_duplicates behaviour has caused confusion and' - ' will be disabled by default in Ansible 2.14', + ' will be disabled by default in a release after 2022-06-01', date='2022-06-01', collection_name='community.aws') skip_duplicates = True if module.params.get('policy_document'): module.deprecate('The policy_document option has been deprecated and' - ' will be removed in Ansible 2.14', + ' will be removed in a release after 2022-06-01', date='2022-06-01', collection_name='community.aws') args = dict( diff --git a/iam_role.py b/iam_role.py index 356741e4ec5..948358b184e 100644 --- a/iam_role.py +++ b/iam_role.py @@ -57,7 +57,7 @@ purge_policies: description: - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. - - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false). + - By default I(purge_policies=true). In a release after 2022-06-01 this will be changed to I(purge_policies=false). type: bool aliases: ['purge_policy', 'purge_managed_policies'] state: @@ -621,7 +621,7 @@ def main(): supports_check_mode=True) if module.params.get('purge_policies') is None: - module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.' + module.deprecate('After 2022-06-01 the default value of purge_policies will change from true to false.' ' To maintain the existing behaviour explicitly set purge_policies=true', date='2022-06-01', collection_name='community.aws') if module.params.get('boundary'): From 99f0548d7a1db4f97adb98dcf45c1122278ece88 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 14 Oct 2021 09:27:08 +0200 Subject: [PATCH 305/683] ec2_win_password - migrate to boto3 --- ec2_win_password.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/ec2_win_password.py b/ec2_win_password.py index 3ed0afb79d4..00bd603ed97 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -55,7 +55,6 @@ requirements: - cryptography -- boto >= 2.49.0 notes: - As of Ansible 2.4, this module requires the python cryptography module rather than the older pycrypto module. @@ -110,11 +109,15 @@ except ImportError: HAS_CRYPTOGRAPHY = False +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + from ansible.module_utils._text import to_bytes from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def setup_module_object(): @@ -130,6 +133,14 @@ def setup_module_object(): return module +def _get_password(module, client, instance_id): + try: + data = client.get_password_data(aws_retry=True, InstanceId=instance_id)['PasswordData'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to get password data') + return data + + def ec2_win_password(module): instance_id = module.params.get('instance_id') key_file = module.params.get('key_file') @@ -144,21 +155,21 @@ def ec2_win_password(module): wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') - ec2 = ec2_connect(module) + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) if wait: start = datetime.datetime.now() end = start + datetime.timedelta(seconds=wait_timeout) while datetime.datetime.now() < end: - data = ec2.get_password_data(instance_id) + data = _get_password(module, client, instance_id) decoded = b64decode(data) if not decoded: time.sleep(5) else: break else: - data = ec2.get_password_data(instance_id) + data = _get_password(module, client, instance_id) decoded = b64decode(data) if wait and datetime.datetime.now() >= end: @@ -198,9 +209,6 @@ def ec2_win_password(module): def main(): module = setup_module_object() - if not HAS_BOTO: - module.fail_json(msg='Boto required for this module.') - if not HAS_CRYPTOGRAPHY: module.fail_json(msg='cryptography package required for this module.') From 8d81026a78cede10417afb83a5da97847fb72965 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 14 Oct 2021 09:28:28 +0200 Subject: [PATCH 306/683] Clean up docs and make key_file and key_data mutually exclusive as defined by the docs --- ec2_win_password.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ec2_win_password.py b/ec2_win_password.py index 00bd603ed97..7f977360e80 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -10,10 +10,9 @@ --- module: ec2_win_password version_added: 1.0.0 -short_description: Gets the default administrator password for ec2 windows instances +short_description: Gets the default administrator password for EC2 Windows instances description: - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). - - This module has a dependency on python-boto. author: "Rick Mendes (@rickmendes)" options: instance_id: @@ -55,9 +54,6 @@ requirements: - cryptography -notes: - - As of Ansible 2.4, this module requires the python cryptography module rather than the - older pycrypto module. ''' EXAMPLES = ''' @@ -129,7 +125,8 @@ def setup_module_object(): wait=dict(type='bool', default=False, required=False), wait_timeout=dict(default=120, required=False, type='int'), ) - module = AnsibleAWSModule(argument_spec=argument_spec) + mutually_exclusive = [['key_file', 'key_data']] + module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) return module From 85a1d6a9957454395105aa53f66b0ba2ba1b0593 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Fri, 24 Sep 2021 23:45:23 +0100 Subject: [PATCH 307/683] Add support for billing_mode --- dynamodb_table.py | 74 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 20 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 86c08f90934..8010a0e024b 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -49,6 +49,12 @@ - Defaults to C('STRING') when creating a new range key. choices: ['STRING', 'NUMBER', 'BINARY'] type: str + billing_mode: + description: + - Controls whether provisoned pr on-demand tables are created. + choices: ['PROVISIONED', 'PAY_PER_REQUEST'] + default: 'PROVISIONED' + type: str read_capacity: description: - Read throughput capacity (units) to provision. @@ -165,6 +171,14 @@ read_capacity: 10 write_capacity: 10 +- name: Create pay-per-request table + community.aws.dynamodb_table: + name: my-table + region: us-east-1 + hash_key_name: id + hash_key_type: STRING + billing_mode: PAY_PER_REQUEST + - name: set index on existing dynamo table community.aws.dynamodb_table: name: my-table @@ -367,7 +381,10 @@ def compatability_results(current_table): if not current_table: return dict() - throughput = current_table.get('provisioned_throughput', {}) + billing_mode = current_table.get('billing_mode') + + if billing_mode == "PROVISIONED": + throughput = current_table.get('provisioned_throughput', {}) primary_indexes = _decode_primary_index(current_table) @@ -394,12 +411,13 @@ def compatability_results(current_table): range_key_name=range_key_name, range_key_type=range_key_type, indexes=indexes, + billing_mode=billing_mode, read_capacity=throughput.get('read_capacity_units', None), + write_capacity=throughput.get('write_capacity_units', None), region=module.region, table_name=current_table.get('table_name', None), table_status=current_table.get('table_status', None), tags=current_table.get('tags', {}), - write_capacity=throughput.get('write_capacity_units', None), ) return compat_results @@ -571,6 +589,13 @@ def _throughput_changes(current_table, params=None): def _generate_global_indexes(): index_exists = dict() indexes = list() + billing_mode = module.params.get('billing_mode') + + if billing_mode == "PROVISIONED": + is_provisioned = True + else: + is_provisioned = False + for index in module.params.get('indexes'): if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: continue @@ -579,7 +604,7 @@ def _generate_global_indexes(): module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) # Convert the type name to upper case and remove the global_ index['type'] = index['type'].upper()[7:] - index = _generate_index(index) + index = _generate_index(index, include_throughput=is_provisioned) index_exists[name] = True indexes.append(index) @@ -589,6 +614,7 @@ def _generate_global_indexes(): def _generate_local_indexes(): index_exists = dict() indexes = list() + for index in module.params.get('indexes'): index = dict() if index.get('type') not in ['all', 'include', 'keys_only']: @@ -708,20 +734,22 @@ def _local_index_changes(current_table): def _update_table(current_table): changes = dict() additional_global_index_changes = list() - - throughput_changes = _throughput_changes(current_table) - if throughput_changes: - changes['ProvisionedThroughput'] = throughput_changes - - global_index_changes = _global_index_changes(current_table) - if global_index_changes: - changes['GlobalSecondaryIndexUpdates'] = global_index_changes - # Only one index can be changed at a time, pass the first during the - # main update and deal with the others on a slow retry to wait for - # completion - if len(global_index_changes) > 1: - changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] - additional_global_index_changes = global_index_changes[1:] + billing_mode = module.params.get('billing_mode') + + if billing_mode == "PROVISIONED": + throughput_changes = _throughput_changes(current_table) + if throughput_changes: + changes['ProvisionedThroughput'] = throughput_changes + + global_index_changes = _global_index_changes(current_table) + if global_index_changes: + changes['GlobalSecondaryIndexUpdates'] = global_index_changes + # Only one index can be changed at a time, pass the first during the + # main update and deal with the others on a slow retry to wait for + # completion + if len(global_index_changes) > 1: + changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: @@ -818,6 +846,7 @@ def update_table(current_table): def create_table(): table_name = module.params.get('name') hash_key_name = module.params.get('hash_key_name') + billing_mode = module.params.get('billing_mode') tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) @@ -827,7 +856,9 @@ def create_table(): if module.check_mode: return True - throughput = _generate_throughput() + if billing_mode == "PROVISIONED": + throughput = _generate_throughput() + attributes = _generate_attributes() key_schema = _generate_schema() local_indexes = _generate_local_indexes() @@ -837,13 +868,15 @@ def create_table(): TableName=table_name, AttributeDefinitions=attributes, KeySchema=key_schema, - ProvisionedThroughput=throughput, Tags=tags, + BillingMode=billing_mode # TODO (future) - # BillingMode, # StreamSpecification, # SSESpecification, ) + + if billing_mode == "PROVISIONED": + params['ProvisionedThroughput'] = throughput if local_indexes: params['LocalSecondaryIndexes'] = local_indexes if global_indexes: @@ -919,6 +952,7 @@ def main(): hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), range_key_name=dict(type='str'), range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), + billing_mode=dict(default='PROVISIONED', type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']), read_capacity=dict(type='int'), write_capacity=dict(type='int'), indexes=dict(default=[], type='list', elements='dict', options=index_options), From a7ad73fd02ea61813b897aa6047b97b51adeb526 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Sat, 25 Sep 2021 00:16:05 +0100 Subject: [PATCH 308/683] fix assignment issue --- dynamodb_table.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 8010a0e024b..2cd380f2107 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -412,14 +412,16 @@ def compatability_results(current_table): range_key_type=range_key_type, indexes=indexes, billing_mode=billing_mode, - read_capacity=throughput.get('read_capacity_units', None), - write_capacity=throughput.get('write_capacity_units', None), region=module.region, table_name=current_table.get('table_name', None), table_status=current_table.get('table_status', None), tags=current_table.get('tags', {}), ) + if billing_mode == "PROVISIONED": + compat_results['read_capacity'] = throughput.get('read_capacity_units', None) + compat_results['write_capacity'] = throughput.get('write_capacity_units', None) + return compat_results From 7636bb03c009a54375365f8a37f26ac963666cc9 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Mon, 11 Oct 2021 17:32:23 +0100 Subject: [PATCH 309/683] fixes & support move between provisioned & pay-per-request --- dynamodb_table.py | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 2cd380f2107..c0823bfbe8b 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -2,10 +2,6 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - DOCUMENTATION = r''' --- module: dynamodb_table @@ -455,6 +451,12 @@ def get_dynamodb_table(): table['size'] = table['table_size_bytes'] table['tags'] = tags + # billing_mode_summary is only set if the table is already PAY_PER_REQUEST + if 'billing_mode_summary' in table: + table['billing_mode'] = table['billing_mode_summary']['billing_mode'] + else: + table['billing_mode'] = "PROVISIONED" + # convert indexes into something we can easily search against attributes = table['attribute_definitions'] global_index_map = dict() @@ -736,22 +738,23 @@ def _local_index_changes(current_table): def _update_table(current_table): changes = dict() additional_global_index_changes = list() - billing_mode = module.params.get('billing_mode') - if billing_mode == "PROVISIONED": - throughput_changes = _throughput_changes(current_table) - if throughput_changes: - changes['ProvisionedThroughput'] = throughput_changes - - global_index_changes = _global_index_changes(current_table) - if global_index_changes: - changes['GlobalSecondaryIndexUpdates'] = global_index_changes - # Only one index can be changed at a time, pass the first during the - # main update and deal with the others on a slow retry to wait for - # completion - if len(global_index_changes) > 1: - changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] - additional_global_index_changes = global_index_changes[1:] + throughput_changes = _throughput_changes(current_table) + if throughput_changes: + changes['ProvisionedThroughput'] = throughput_changes + + if current_table.get('billing_mode') != module.params.get('billing_mode'): + changes['BillingMode'] = billing_mode + + global_index_changes = _global_index_changes(current_table) + if global_index_changes: + changes['GlobalSecondaryIndexUpdates'] = global_index_changes + # Only one index can be changed at a time, pass the first during the + # main update and deal with the others on a slow retry to wait for + # completion + if len(global_index_changes) > 1: + changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: @@ -961,7 +964,7 @@ def main(): tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int', aliases=['wait_for_active_timeout']), + wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']), ) module = AnsibleAWSModule( From 568a3e4bc11c6115e8f4b029e2be90357d5e3e00 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Mon, 11 Oct 2021 17:58:16 +0100 Subject: [PATCH 310/683] fix missed update --- dynamodb_table.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index c0823bfbe8b..fe4366e3b6e 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -453,7 +453,7 @@ def get_dynamodb_table(): # billing_mode_summary is only set if the table is already PAY_PER_REQUEST if 'billing_mode_summary' in table: - table['billing_mode'] = table['billing_mode_summary']['billing_mode'] + table['billing_mode'] = "PAY_PER_REQUEST" else: table['billing_mode'] = "PROVISIONED" @@ -744,7 +744,7 @@ def _update_table(current_table): changes['ProvisionedThroughput'] = throughput_changes if current_table.get('billing_mode') != module.params.get('billing_mode'): - changes['BillingMode'] = billing_mode + changes['BillingMode'] = module.params.get('billing_mode') global_index_changes = _global_index_changes(current_table) if global_index_changes: From 6d837c78bff494e3d7a4087ae281877f0f8b7669 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Mon, 11 Oct 2021 20:40:40 +0100 Subject: [PATCH 311/683] fixes --- dynamodb_table.py | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index fe4366e3b6e..b1212cf6647 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -132,7 +132,7 @@ description: - How long (in seconds) to wait for creation / update / deletion to complete. aliases: ['wait_for_active_timeout'] - default: 120 + default: 300 type: int wait: description: @@ -593,12 +593,6 @@ def _throughput_changes(current_table, params=None): def _generate_global_indexes(): index_exists = dict() indexes = list() - billing_mode = module.params.get('billing_mode') - - if billing_mode == "PROVISIONED": - is_provisioned = True - else: - is_provisioned = False for index in module.params.get('indexes'): if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: @@ -608,7 +602,7 @@ def _generate_global_indexes(): module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) # Convert the type name to upper case and remove the global_ index['type'] = index['type'].upper()[7:] - index = _generate_index(index, include_throughput=is_provisioned) + index = _generate_index(index) index_exists[name] = True indexes.append(index) @@ -666,7 +660,7 @@ def _generate_local_index_map(current_table): return local_index_map -def _generate_index(index, include_throughput=True): +def _generate_index(index): key_schema = _generate_schema(index) throughput = _generate_throughput(index) non_key_attributes = index['includes'] or [] @@ -689,7 +683,8 @@ def _generate_index(index, include_throughput=True): KeySchema=key_schema, Projection=projection, ) - if include_throughput: + + if module.params.get('billing_mode') == "PROVISIONED": idx['ProvisionedThroughput'] = throughput return idx @@ -717,13 +712,15 @@ def _global_index_changes(current_table): # rather than dropping other changes on the floor _current = current_global_index_map[name] _new = global_index_map[name] - change = dict(_throughput_changes(_current, _new)) - if change: - update = dict( - IndexName=name, - ProvisionedThroughput=change, - ) - index_changes.append(dict(Update=update)) + + if module.params.get('billing_mode') == "PROVISIONED": + change = dict(_throughput_changes(_current, _new)) + if change: + update = dict( + IndexName=name, + ProvisionedThroughput=change, + ) + index_changes.append(dict(Update=update)) return index_changes From 2dc5cefade46c999a40fa428b8fdebd5fa4f7b02 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Mon, 11 Oct 2021 20:47:53 +0100 Subject: [PATCH 312/683] re-add removal of param --- dynamodb_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index b1212cf6647..c353ada60c5 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -660,7 +660,7 @@ def _generate_local_index_map(current_table): return local_index_map -def _generate_index(index): +def _generate_index(index, include_throughput=True): key_schema = _generate_schema(index) throughput = _generate_throughput(index) non_key_attributes = index['includes'] or [] From 499549a503bb67713664ed92f88f527a60910f0b Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Tue, 12 Oct 2021 09:25:09 +0100 Subject: [PATCH 313/683] re-add removed section --- dynamodb_table.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index c353ada60c5..b2621dd79af 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -2,6 +2,10 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + DOCUMENTATION = r''' --- module: dynamodb_table @@ -684,7 +688,7 @@ def _generate_index(index, include_throughput=True): Projection=projection, ) - if module.params.get('billing_mode') == "PROVISIONED": + if include_throughput and module.params.get('billing_mode') == "PROVISIONED": idx['ProvisionedThroughput'] = throughput return idx From 7540f953725665a4342222df1001d348654eac78 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Tue, 12 Oct 2021 11:40:57 +0100 Subject: [PATCH 314/683] multiple indexes can now be updated at once --- dynamodb_table.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index b2621dd79af..a0eb632be91 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -455,9 +455,10 @@ def get_dynamodb_table(): table['size'] = table['table_size_bytes'] table['tags'] = tags - # billing_mode_summary is only set if the table is already PAY_PER_REQUEST + # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST + # and when updating the billing_mode if 'billing_mode_summary' in table: - table['billing_mode'] = "PAY_PER_REQUEST" + table['billing_mode'] = table['billing_mode_summary']['billing_mode'] else: table['billing_mode'] = "PROVISIONED" @@ -750,12 +751,6 @@ def _update_table(current_table): global_index_changes = _global_index_changes(current_table) if global_index_changes: changes['GlobalSecondaryIndexUpdates'] = global_index_changes - # Only one index can be changed at a time, pass the first during the - # main update and deal with the others on a slow retry to wait for - # completion - if len(global_index_changes) > 1: - changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] - additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: From b477d456c62f361cdcd991a01e5977d833d3a565 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Tue, 12 Oct 2021 14:26:26 +0100 Subject: [PATCH 315/683] indexes indexes --- dynamodb_table.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dynamodb_table.py b/dynamodb_table.py index a0eb632be91..299e684b4a1 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -751,6 +751,14 @@ def _update_table(current_table): global_index_changes = _global_index_changes(current_table) if global_index_changes: changes['GlobalSecondaryIndexUpdates'] = global_index_changes + # Only one index can be changed at a time except if changing the billing mode, pass the first during the + # main update and deal with the others on a slow retry to wait for + # completion + + if current_table.get('billing_mode') == module.params.get('billing_mode'): + if len(global_index_changes) > 1: + changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: From 2b4431e4013d0f4d1815107d0b8138571a7afbbf Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Tue, 12 Oct 2021 21:33:46 +0100 Subject: [PATCH 316/683] PR feedback --- dynamodb_table.py | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 299e684b4a1..a9e19268db5 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -53,7 +53,6 @@ description: - Controls whether provisoned pr on-demand tables are created. choices: ['PROVISIONED', 'PAY_PER_REQUEST'] - default: 'PROVISIONED' type: str read_capacity: description: @@ -383,9 +382,6 @@ def compatability_results(current_table): billing_mode = current_table.get('billing_mode') - if billing_mode == "PROVISIONED": - throughput = current_table.get('provisioned_throughput', {}) - primary_indexes = _decode_primary_index(current_table) hash_key_name = primary_indexes.get('hash_key_name') @@ -419,6 +415,7 @@ def compatability_results(current_table): ) if billing_mode == "PROVISIONED": + throughput = current_table.get('provisioned_throughput', {}) compat_results['read_capacity'] = throughput.get('read_capacity_units', None) compat_results['write_capacity'] = throughput.get('write_capacity_units', None) @@ -595,10 +592,15 @@ def _throughput_changes(current_table, params=None): return dict() -def _generate_global_indexes(): +def _generate_global_indexes(billing_mode): index_exists = dict() indexes = list() + include_throughput = True + + if billing_mode == "PAY_PER_REQUEST": + include_throughput = False + for index in module.params.get('indexes'): if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: continue @@ -607,7 +609,7 @@ def _generate_global_indexes(): module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) # Convert the type name to upper case and remove the global_ index['type'] = index['type'].upper()[7:] - index = _generate_index(index) + index = _generate_index(index, include_throughput) index_exists[name] = True indexes.append(index) @@ -689,7 +691,7 @@ def _generate_index(index, include_throughput=True): Projection=projection, ) - if include_throughput and module.params.get('billing_mode') == "PROVISIONED": + if include_throughput: idx['ProvisionedThroughput'] = throughput return idx @@ -704,11 +706,19 @@ def _global_index_changes(current_table): current_global_index_map = current_table['_global_index_map'] global_index_map = _generate_global_index_map(current_table) + current_billing_mode = current_table.get('billing_mode') + + include_throughput = True + + if module.params.get('billing_mode', current_billing_mode) == "PAY_PER_REQUEST": + include_throughput = False + index_changes = list() # TODO (future) it would be nice to add support for deleting an index for name in global_index_map: - idx = dict(_generate_index(global_index_map[name])) + + idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput)) if name not in current_global_index_map: index_changes.append(dict(Create=idx)) else: @@ -718,7 +728,7 @@ def _global_index_changes(current_table): _current = current_global_index_map[name] _new = global_index_map[name] - if module.params.get('billing_mode') == "PROVISIONED": + if include_throughput: change = dict(_throughput_changes(_current, _new)) if change: update = dict( @@ -745,8 +755,11 @@ def _update_table(current_table): if throughput_changes: changes['ProvisionedThroughput'] = throughput_changes - if current_table.get('billing_mode') != module.params.get('billing_mode'): - changes['BillingMode'] = module.params.get('billing_mode') + current_billing_mode = current_table.get('billing_mode') + new_billing_mode = module.params.get('billing_mode', current_billing_mode) + + if current_billing_mode != new_billing_mode: + changes['BillingMode'] = new_billing_mode global_index_changes = _global_index_changes(current_table) if global_index_changes: @@ -755,7 +768,7 @@ def _update_table(current_table): # main update and deal with the others on a slow retry to wait for # completion - if current_table.get('billing_mode') == module.params.get('billing_mode'): + if current_billing_mode == new_billing_mode: if len(global_index_changes) > 1: changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] additional_global_index_changes = global_index_changes[1:] @@ -871,7 +884,7 @@ def create_table(): attributes = _generate_attributes() key_schema = _generate_schema() local_indexes = _generate_local_indexes() - global_indexes = _generate_global_indexes() + global_indexes = _generate_global_indexes(billing_mode) params = dict( TableName=table_name, @@ -961,7 +974,7 @@ def main(): hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), range_key_name=dict(type='str'), range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - billing_mode=dict(default='PROVISIONED', type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']), + billing_mode=dict(type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']), read_capacity=dict(type='int'), write_capacity=dict(type='int'), indexes=dict(default=[], type='list', elements='dict', options=index_options), From a8912ddd5459cb32c78d0872eba34cf29306023d Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Wed, 13 Oct 2021 10:33:07 +0100 Subject: [PATCH 317/683] fix creation when billing_mode is omitted --- dynamodb_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index a9e19268db5..3f19afd4f89 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -868,7 +868,7 @@ def update_table(current_table): def create_table(): table_name = module.params.get('name') hash_key_name = module.params.get('hash_key_name') - billing_mode = module.params.get('billing_mode') + billing_mode = module.params.get('billing_mode', 'PROVISIONED') tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) From 83a702436c2e8f8c26f32d8d787d775986f9650d Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Wed, 13 Oct 2021 17:39:07 +0100 Subject: [PATCH 318/683] handle the fact the param is always set but to None --- dynamodb_table.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 3f19afd4f89..8696161c58e 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -868,7 +868,10 @@ def update_table(current_table): def create_table(): table_name = module.params.get('name') hash_key_name = module.params.get('hash_key_name') - billing_mode = module.params.get('billing_mode', 'PROVISIONED') + billing_mode = module.params.get('billing_mode') + + if billing_mode == None: + billing_mode = "PROVISIONED" tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) From 264b0d718199fa591576b19af85df1545e9bd250 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Wed, 13 Oct 2021 20:13:27 +0100 Subject: [PATCH 319/683] tests fix --- dynamodb_table.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 8696161c58e..a8725ab7d32 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -708,9 +708,14 @@ def _global_index_changes(current_table): current_billing_mode = current_table.get('billing_mode') + if module.params.get('billing_mode') is None: + billing_mode = current_billing_mode + else: + billing_mode = module.params.get('billing_mode') + include_throughput = True - if module.params.get('billing_mode', current_billing_mode) == "PAY_PER_REQUEST": + if billing_mode == "PAY_PER_REQUEST": include_throughput = False index_changes = list() @@ -756,7 +761,10 @@ def _update_table(current_table): changes['ProvisionedThroughput'] = throughput_changes current_billing_mode = current_table.get('billing_mode') - new_billing_mode = module.params.get('billing_mode', current_billing_mode) + new_billing_mode = module.params.get('billing_mode') + + if new_billing_mode is None: + new_billing_mode = current_billing_mode if current_billing_mode != new_billing_mode: changes['BillingMode'] = new_billing_mode From 041fd28c5ec152928df14fd9c839fe272877de47 Mon Sep 17 00:00:00 2001 From: mark-woolley Date: Wed, 13 Oct 2021 22:20:22 +0100 Subject: [PATCH 320/683] lint fix --- dynamodb_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index a8725ab7d32..98d6fa632f9 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -878,7 +878,7 @@ def create_table(): hash_key_name = module.params.get('hash_key_name') billing_mode = module.params.get('billing_mode') - if billing_mode == None: + if billing_mode is None: billing_mode = "PROVISIONED" tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) From 4cb919b3c9dda7944862ccea0181a04056b25256 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 17 Oct 2021 11:01:37 +0200 Subject: [PATCH 321/683] ec2_eip tagging support (#332) ec2_eip tagging support SUMMARY Add support for tagging EIPs on creation. Todo: EIP Tagging Tests Retry decorator Note: While it's now possible to pass tagging information into the association call this was only added Dec 2020 so won't work for most folks. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_eip ec2_eip_info ADDITIONAL INFORMATION fixes: #331 Reviewed-by: Rick Mendes Reviewed-by: None --- ec2_eip.py | 22 +++++++++++++++++++++- ec2_eip_info.py | 15 +++++++++------ 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/ec2_eip.py b/ec2_eip.py index 927d31551b7..e38e941661f 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -64,6 +64,16 @@ network interface or instance to be re-associated with the specified instance or interface. default: false type: bool + tags: + description: A dictionary of tags to apply to the EIP. + type: dict + version_added: 2.1.0 + purge_tags: + description: Whether the I(tags) argument should cause tags not in the + dictionary to be removed. + default: True + type: bool + version_added: 2.1.0 tag_name: description: - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse @@ -227,6 +237,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): @@ -247,7 +258,7 @@ def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, params['AllocationId'] = address['AllocationId'] else: params['PublicIp'] = address['PublicIp'] - res = ec2.associate_address(**params) + res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) module.fail_json_aws(e, msg=msg) @@ -535,6 +546,8 @@ def main(): allow_reassociation=dict(type='bool', default=False), wait_timeout=dict(type='int', removed_at_date='2022-06-01', removed_from_collection='community.aws'), private_ip_address=dict(), + tags=dict(required=False, type='dict'), + purge_tags=dict(required=False, type='bool', default=True), tag_name=dict(), tag_value=dict(), public_ipv4_pool=dict() @@ -563,6 +576,8 @@ def main(): tag_name = module.params.get('tag_name') tag_value = module.params.get('tag_value') public_ipv4_pool = module.params.get('public_ipv4_pool') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') if instance_id: is_instance = True @@ -575,6 +590,7 @@ def main(): module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") is_instance = False + # Tags for *searching* for an EIP. tag_dict = generate_tag_dict(module, tag_name, tag_value) try: @@ -603,6 +619,10 @@ def main(): 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId'] } + + result['changed'] |= ensure_ec2_tags( + ec2, module, result['allocation_id'], + resource_type='elastic-ip', tags=tags, purge_tags=purge_tags) else: if device_id: disassociated = ensure_absent( diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 553930db67a..e38735c087e 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -97,22 +97,25 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: pass # caught by imported AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + def get_eips_details(module): - connection = module.client('ec2') + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) filters = module.params.get("filters") try: response = connection.describe_addresses( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(filters) ) except (BotoCoreError, ClientError) as e: From de12a74ec1e8a70899afad252180625fe42f02c3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 18 Oct 2021 08:30:22 +0200 Subject: [PATCH 322/683] Bulk update AWSRetry.backoff to AWSRetry.jittered_backoff (#764) Bulk update AWSRetry.backoff to AWSRetry.jittered_backoff SUMMARY CloudRetry.backoff has been deprecated in favour of CloudRetry{exponential,jittered}_backoff bulk update AWSRetry.backoff usage. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_config_delivery_channel.py plugins/modules/aws_direct_connect_confirm_connection.py plugins/modules/aws_direct_connect_connection.py plugins/modules/aws_direct_connect_link_aggregation_group.py plugins/modules/aws_direct_connect_virtual_interface.py plugins/modules/aws_inspector_target.py plugins/modules/aws_kms.py plugins/modules/aws_kms_info.py plugins/modules/cloudformation_stack_set.py plugins/modules/dms_endpoint.py plugins/modules/dms_replication_subnet_group.py plugins/modules/ec2_asg.py plugins/modules/ec2_elb_info.py plugins/modules/ecs_service_info.py plugins/modules/iam_managed_policy.py plugins/modules/iam_saml_federation.py plugins/modules/rds.py ADDITIONAL INFORMATION Reviewed-by: None Reviewed-by: None --- aws_config_delivery_channel.py | 2 +- aws_direct_connect_confirm_connection.py | 4 +-- aws_direct_connect_connection.py | 10 +++--- aws_direct_connect_link_aggregation_group.py | 2 +- aws_direct_connect_virtual_interface.py | 2 +- aws_inspector_target.py | 2 +- aws_kms.py | 16 ++++----- aws_kms_info.py | 16 ++++----- cloudformation_stack_set.py | 2 +- dms_endpoint.py | 12 +++---- dms_replication_subnet_group.py | 10 +++--- ec2_asg.py | 38 ++++++++++---------- ec2_elb_info.py | 6 ++-- ecs_service_info.py | 4 +-- iam_managed_policy.py | 2 +- iam_saml_federation.py | 10 +++--- rds.py | 4 +-- 17 files changed, 71 insertions(+), 71 deletions(-) diff --git a/aws_config_delivery_channel.py b/aws_config_delivery_channel.py index e6e9d40e62c..fb3851a4ecc 100644 --- a/aws_config_delivery_channel.py +++ b/aws_config_delivery_channel.py @@ -79,7 +79,7 @@ # this waits for an IAM role to become fully available, at the cost of # taking a long time to fail when the IAM role/policy really is invalid -retry_unavailable_iam_on_put_delivery = AWSRetry.backoff( +retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff( catch_extra_error_codes=['InsufficientDeliveryPolicyException'], ) diff --git a/aws_direct_connect_confirm_connection.py b/aws_direct_connect_confirm_connection.py index 7ea8527db72..b583def09d9 100644 --- a/aws_direct_connect_confirm_connection.py +++ b/aws_direct_connect_confirm_connection.py @@ -69,10 +69,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} +retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def describe_connections(client, params): return client.describe_connections(**params) diff --git a/aws_direct_connect_connection.py b/aws_direct_connect_connection.py index 98afd701f3d..3764b1c7802 100644 --- a/aws_direct_connect_connection.py +++ b/aws_direct_connect_connection.py @@ -167,7 +167,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} +retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} def connection_status(client, connection_id): @@ -179,7 +179,7 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T if connection_id: params['connectionId'] = connection_id try: - response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params) + response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: if connection_id: msg = "Failed to describe DirectConnect ID {0}".format(connection_id) @@ -227,7 +227,7 @@ def create_connection(client, location, bandwidth, name, lag_id): params['lagId'] = lag_id try: - connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params) + connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), last_traceback=traceback.format_exc(), @@ -242,7 +242,7 @@ def changed_properties(current_status, location, bandwidth): return current_bandwidth != bandwidth or current_location != location -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def update_associations(client, latest_state, connection_id, lag_id): changed = False if 'lagId' in latest_state and lag_id != latest_state['lagId']: @@ -277,7 +277,7 @@ def ensure_present(client, connection_id, connection_name, location, bandwidth, return False, connection_id -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def ensure_absent(client, connection_id): changed = False if connection_id: diff --git a/aws_direct_connect_link_aggregation_group.py b/aws_direct_connect_link_aggregation_group.py index 7b287bd61f3..0567ba90288 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/aws_direct_connect_link_aggregation_group.py @@ -265,7 +265,7 @@ def delete_lag(client, lag_id): exception=e) -@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) +@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) def _update_lag(client, lag_id, lag_name, min_links): params = {} if min_links: diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index d520f0ee84f..d2d199c5527 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -267,7 +267,7 @@ def try_except_ClientError(failure_msg): def wrapper(f): def run_func(*args, **kwargs): try: - result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) + result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result diff --git a/aws_inspector_target.py b/aws_inspector_target.py index ceb4abd63dd..a84e245d152 100644 --- a/aws_inspector_target.py +++ b/aws_inspector_target.py @@ -110,7 +110,7 @@ pass # caught by AnsibleAWSModule -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def main(): argument_spec = dict( name=dict(required=True), diff --git a/aws_kms.py b/aws_kms.py index 05a520ac94a..13bbd7f4619 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -434,19 +434,19 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_iam_roles_with_backoff(connection): paginator = connection.get_paginator('list_roles') return paginator.paginate().build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): paginator = connection.get_paginator('list_keys') return paginator.paginate().build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): paginator = connection.get_paginator('list_aliases') return paginator.paginate().build_full_result() @@ -465,30 +465,30 @@ def get_kms_aliases_lookup(connection): return _aliases -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_tags_with_backoff(connection, key_id, **kwargs): return connection.list_resource_tags(KeyId=key_id, **kwargs) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id): params = dict(KeyId=key_id) paginator = connection.get_paginator('list_grants') return paginator.paginate(**params).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_metadata_with_backoff(connection, key_id): return connection.describe_key(KeyId=key_id) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): paginator = connection.get_paginator('list_key_policies') return paginator.paginate(KeyId=key_id).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_key_policy_with_backoff(connection, key_id, policy_name): return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) diff --git a/aws_kms_info.py b/aws_kms_info.py index 3e606481e15..a7620dad005 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -261,13 +261,13 @@ _aliases = dict() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): paginator = connection.get_paginator('list_keys') return paginator.paginate().build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): paginator = connection.get_paginator('list_aliases') return paginator.paginate().build_full_result() @@ -286,12 +286,12 @@ def get_kms_aliases_lookup(connection): return _aliases -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_tags_with_backoff(connection, key_id, **kwargs): return connection.list_resource_tags(KeyId=key_id, **kwargs) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id, **kwargs): params = dict(KeyId=key_id) if kwargs.get('tokens'): @@ -300,23 +300,23 @@ def get_kms_grants_with_backoff(connection, key_id, **kwargs): return paginator.paginate(**params).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_metadata_with_backoff(connection, key_id): return connection.describe_key(KeyId=key_id) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): paginator = connection.get_paginator('list_key_policies') return paginator.paginate(KeyId=key_id).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_key_policy_with_backoff(connection, key_id, policy_name): return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_enable_key_rotation_with_backoff(connection, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index b10addf7485..750dceb2bf7 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -361,7 +361,7 @@ def compare_stack_instances(cfn, stack_set_name, accounts, regions): return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances) -@AWSRetry.backoff(tries=3, delay=4) +@AWSRetry.jittered_backoff(retries=3, delay=4) def stack_set_facts(cfn, stack_set_name): try: ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet'] diff --git a/dms_endpoint.py b/dms_endpoint.py index f4ab520903a..6cc3bc3f896 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -175,10 +175,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -backoff_params = dict(tries=5, delay=1, backoff=1.5) +backoff_params = dict(retries=5, delay=1, backoff=1.5) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_endpoints(connection, endpoint_identifier): """ checks if the endpoint exists """ try: @@ -189,7 +189,7 @@ def describe_endpoints(connection, endpoint_identifier): return {'Endpoints': []} -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def dms_delete_endpoint(client, **params): """deletes the DMS endpoint based on the EndpointArn""" if module.params.get('wait'): @@ -198,19 +198,19 @@ def dms_delete_endpoint(client, **params): return client.delete_endpoint(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def dms_create_endpoint(client, **params): """ creates the DMS endpoint""" return client.create_endpoint(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def dms_modify_endpoint(client, **params): """ updates the endpoint""" return client.modify_endpoint(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def get_endpoint_deleted_waiter(client): return client.get_waiter('endpoint_deleted') diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 305b6b5a85d..917f27438ff 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -66,10 +66,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -backoff_params = dict(tries=5, delay=1, backoff=1.5) +backoff_params = dict(retries=5, delay=1, backoff=1.5) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_subnet_group(connection, subnet_group): """checks if instance exists""" try: @@ -80,18 +80,18 @@ def describe_subnet_group(connection, subnet_group): return {'ReplicationSubnetGroups': []} -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_create(connection, **params): """ creates the replication subnet group """ return connection.create_replication_subnet_group(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_modify(connection, **modify_params): return connection.modify_replication_subnet_group(**modify_params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_delete(module, connection): subnetid = module.params.get('identifier') delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) diff --git a/ec2_asg.py b/ec2_asg.py index 662c23873b1..46cdcbf15b8 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -639,21 +639,21 @@ INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') -backoff_params = dict(tries=10, delay=3, backoff=1.5) +backoff_params = dict(retries=10, delay=3, backoff=1.5) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_autoscaling_groups(connection, group_name): pg = connection.get_paginator('describe_auto_scaling_groups') return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def deregister_lb_instances(connection, lb_name, instance_id): connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_instance_health(connection, lb_name, instances): params = dict(LoadBalancerName=lb_name) if instances: @@ -661,28 +661,28 @@ def describe_instance_health(connection, lb_name, instances): return connection.describe_instance_health(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_target_health(connection, target_group_arn, instances): return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def suspend_asg_processes(connection, asg_name, processes): connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def resume_asg_processes(connection, asg_name, processes): connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_launch_configurations(connection, launch_config_name): pg = connection.get_paginator('describe_launch_configurations') return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_launch_templates(connection, launch_template): if launch_template['launch_template_id'] is not None: try: @@ -698,12 +698,12 @@ def describe_launch_templates(connection, launch_template): module.fail_json(msg="No launch template found matching: %s" % launch_template) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def create_asg(connection, **params): connection.create_auto_scaling_group(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def put_notification_config(connection, asg_name, topic_arn, notification_types): connection.put_notification_configuration( AutoScalingGroupName=asg_name, @@ -712,7 +712,7 @@ def put_notification_config(connection, asg_name, topic_arn, notification_types) ) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def del_notification_config(connection, asg_name, topic_arn): connection.delete_notification_configuration( AutoScalingGroupName=asg_name, @@ -720,37 +720,37 @@ def del_notification_config(connection, asg_name, topic_arn): ) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def attach_load_balancers(connection, asg_name, load_balancers): connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def detach_load_balancers(connection, asg_name, load_balancers): connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def attach_lb_target_groups(connection, asg_name, target_group_arns): connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def detach_lb_target_groups(connection, asg_name, target_group_arns): connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def update_asg(connection, **params): connection.update_auto_scaling_group(**params) -@AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) +@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) def delete_asg(connection, asg_name, force_delete): connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def terminate_asg_instance(connection, instance_id, decrement_capacity): connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, ShouldDecrementDesiredCapacity=decrement_capacity) diff --git a/ec2_elb_info.py b/ec2_elb_info.py index add102ab87a..8b207111b60 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -109,7 +109,7 @@ def _get_tags(self, elbname): elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)]) return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key')) - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def _get_elb_connection(self): return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) @@ -158,7 +158,7 @@ def _get_health_check(self, health_check): health_check_dict['ping_path'] = path return health_check_dict - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def _get_elb_info(self, elb): elb_info = { 'name': elb.name, @@ -202,7 +202,7 @@ def _get_elb_info(self, elb): def list_elbs(self): elb_array, token = [], None - get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) + get_elb_with_backoff = AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) while True: all_elbs = get_elb_with_backoff(marker=token) token = all_elbs.next_marker diff --git a/ecs_service_info.py b/ecs_service_info.py index 9b47b02a714..79332e55702 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -148,7 +148,7 @@ def __init__(self, module): self.module = module self.ecs = module.client('ecs') - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_services_with_backoff(self, **kwargs): paginator = self.ecs.get_paginator('list_services') try: @@ -156,7 +156,7 @@ def list_services_with_backoff(self, **kwargs): except is_boto3_error_code('ClusterNotFoundException') as e: self.module.fail_json_aws(e, "Could not find cluster to list services") - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def describe_services_with_backoff(self, **kwargs): return self.ecs.describe_services(**kwargs) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index a56e76d037f..d6cdd33525e 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -141,7 +141,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_policies_with_backoff(iam): paginator = iam.get_paginator('list_policies') return paginator.paginate(Scope='Local').build_full_result() diff --git a/iam_saml_federation.py b/iam_saml_federation.py index a78decfe625..4b41f443134 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -123,23 +123,23 @@ def __init__(self, module): self.module.fail_json_aws(e, msg="Unknown boto error") # use retry decorator for boto3 calls - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _list_saml_providers(self): return self.conn.list_saml_providers() - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _get_saml_provider(self, arn): return self.conn.get_saml_provider(SAMLProviderArn=arn) - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _update_saml_provider(self, arn, metadata): return self.conn.update_saml_provider(SAMLProviderArn=arn, SAMLMetadataDocument=metadata) - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _create_saml_provider(self, metadata, name): return self.conn.create_saml_provider(SAMLMetadataDocument=metadata, Name=name) - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _delete_saml_provider(self, arn): return self.conn.delete_saml_provider(SAMLProviderArn=arn) diff --git a/rds.py b/rds.py index a59b183925b..bfbf0019f6b 100644 --- a/rds.py +++ b/rds.py @@ -943,13 +943,13 @@ def await_resource(conn, resource, status, module): if resource.name is None: module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) # Back off if we're getting throttled, since we're just waiting anyway - resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) + resource = AWSRetry.jittered_backoff(retries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) else: # Temporary until all the rds2 commands have their responses parsed if resource.name is None: module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) # Back off if we're getting throttled, since we're just waiting anyway - resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) + resource = AWSRetry.jittered_backoff(retries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) if resource is None: break # Some RDS resources take much longer than others to be ready. Check From ecc22b058e3c941bb2509e0dd4a7459838717eae Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 18 Oct 2021 11:24:09 +0200 Subject: [PATCH 323/683] route53_health_check - add tagging support (#765) route53_health_check - add tagging support SUMMARY add tagging support to route53_health_check ISSUE TYPE Feature Pull Request COMPONENT NAME route53_health_check ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: None --- route53_health_check.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/route53_health_check.py b/route53_health_check.py index af482132e56..382be93ab6d 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -86,6 +86,17 @@ - Will default to C(3) if not specified on creation. choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] type: int + tags: + description: + - A hash/dictionary of tags to set on the health check. + type: dict + version_added: 2.1.0 + purge_tags: + description: + - Delete any tags not specified in I(tags). + default: false + type: bool + version_added: 2.1.0 author: "zimbatm (@zimbatm)" extends_documentation_fragment: - amazon.aws.aws @@ -198,6 +209,11 @@ type: bool returned: When the health check exists. sample: false + tags: + description: A dictionary representing the tags on the health check. + type: dict + returned: When the health check exists. + sample: '{"my_key": "my_value"}' ''' import uuid @@ -212,6 +228,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.route53 import get_tags +from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags def _list_health_checks(**params): @@ -332,7 +350,8 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check) - return True, 'create', result.get('HealthCheck').get('Id') + check_id = result.get('HealthCheck').get('Id') + return True, 'create', check_id def update_health_check(existing_check): @@ -396,6 +415,8 @@ def describe_health_check(id): health_check = result.get('HealthCheck', {}) health_check = camel_dict_to_snake_dict(health_check) + tags = get_tags(module, client, 'healthcheck', id) + health_check['tags'] = tags return health_check @@ -411,6 +432,8 @@ def main(): string_match=dict(), request_interval=dict(type='int', choices=[10, 30], default=30), failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), ) args_one_of = [ @@ -473,6 +496,9 @@ def main(): changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) else: changed, action = update_health_check(existing_check) + if check_id: + changed |= manage_tags(module, client, 'healthcheck', check_id, + module.params.get('tags'), module.params.get('purge_tags')) health_check = describe_health_check(id=check_id) health_check['action'] = action From 6d39e51f119cc7182b61c74d5508121db2951fb5 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 18 Oct 2021 11:14:32 +0100 Subject: [PATCH 324/683] Add waiter to the iam_role module (#767) Add waiter to the iam_role module SUMMARY This change adds the wait param used in other AWS modules, adding usage of a waiter for the iam_role creation / updates. Currently there is no waiting done to ensure the iam_role has actually created and is available before exiting. The tests have also been split up into separate files to make it a bit more manageable. Fixes: #710 ISSUE TYPE Feature Pull Request COMPONENT NAME iam_role ADDITIONAL INFORMATION Successful run completed of the iam_role integration test suite locally: ansible-test integration --docker centos8 -v iam_role --allow-unsupported PLAY RECAP ********************************************************************* testhost : ok=198 changed=46 unreachable=0 failed=0 skipped=0 rescued=0 ignored=7 AWS ACTIONS: ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'iam:CreateInstanceProfile', 'iam:CreatePolicy', 'iam:CreateRole', 'iam:DeleteInstanceProfile', 'iam:DeletePolicy', 'iam:DeleteRole', 'iam:DeleteRolePermissionsBoundary', 'iam:DeleteRolePolicy', 'iam:DetachRolePolicy', 'iam:GetRole', 'iam:GetRolePolicy', 'iam:ListAttachedRolePolicies', 'iam:ListEntitiesForPolicy', 'iam:ListInstanceProfilesForRole', 'iam:ListPolicies', 'iam:ListPolicyVersions', 'iam:ListRolePolicies', 'iam:ListRoleTags', 'iam:ListRoles', 'iam:PutRolePermissionsBoundary', 'iam:PutRolePolicy', 'iam:RemoveRoleFromInstanceProfile', 'iam:TagRole', 'iam:UntagRole', 'iam:UpdateRole'] Run command: docker exec 56cb328c6d9af293d9e820e1f2a94fb8ca87e0769b2b9b6d46bad661f9edde65 tar czf /root/output.tgz --exclude .tmp -C /root/ansible/ansible_collections/community/aws/tests output Run command: docker exec -i 56cb328c6d9af293d9e820e1f2a94fb8ca87e0769b2b9b6d46bad661f9edde65 dd if=/root/output.tgz bs=65536 Run command: tar oxzf /tmp/ansible-result-k2lnga3v.tgz -C /mnt/c/Users/mark.woolley/Documents/GitHub/public/ansible_collections/community/aws/tests Run command: docker rm -f 56cb328c6d9af293d9e820e1f2a94fb8ca87e0769b2b9b6d46bad661f9edde65 Reviewed-by: Mark Chappell Reviewed-by: None --- iam_role.py | 189 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 120 insertions(+), 69 deletions(-) diff --git a/iam_role.py b/iam_role.py index 948358b184e..7ca0d8c4fbb 100644 --- a/iam_role.py +++ b/iam_role.py @@ -87,6 +87,17 @@ - Remove tags not listed in I(tags) when tags is specified. default: true type: bool + wait_timeout: + description: + - How long (in seconds) to wait for creation / update to complete. + default: 120 + type: int + wait: + description: + - When I(wait=True) the module will wait for up to I(wait_timeout) seconds + for IAM role creation before returning. + default: True + type: bool extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -215,16 +226,40 @@ def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc): @AWSRetry.jittered_backoff() -def _list_policies(connection): - paginator = connection.get_paginator('list_policies') +def _list_policies(): + paginator = client.get_paginator('list_policies') return paginator.paginate().build_full_result()['Policies'] -def convert_friendly_names_to_arns(connection, module, policy_names): +def wait_iam_exists(): + if module.check_mode: + return + if not module.params.get('wait'): + return + + role_name = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + try: + waiter = client.get_waiter('role_exists') + waiter.wait( + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + RoleName=role_name, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on IAM role creation') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on IAM role creation') + + +def convert_friendly_names_to_arns(policy_names): if not any(not policy.startswith('arn:') for policy in policy_names): return policy_names allpolicies = {} - policies = _list_policies(connection) + policies = _list_policies() for policy in policies: allpolicies[policy['PolicyName']] = policy['Arn'] @@ -235,31 +270,31 @@ def convert_friendly_names_to_arns(connection, module, policy_names): module.fail_json_aws(e, msg="Couldn't find policy") -def attach_policies(connection, module, policies_to_attach, params): +def attach_policies(policies_to_attach, params): changed = False for policy_arn in policies_to_attach: try: if not module.check_mode: - connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True) + client.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName'])) changed = True return changed -def remove_policies(connection, module, policies_to_remove, params): +def remove_policies(policies_to_remove, params): changed = False for policy in policies_to_remove: try: if not module.check_mode: - connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True) + client.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName'])) changed = True return changed -def generate_create_params(module): +def generate_create_params(): params = dict() params['Path'] = module.params.get('path') params['RoleName'] = module.params.get('name') @@ -276,7 +311,7 @@ def generate_create_params(module): return params -def create_basic_role(connection, module, params): +def create_basic_role(params): """ Perform the Role creation. Assumes tests for the role existing have already been performed. @@ -284,11 +319,11 @@ def create_basic_role(connection, module, params): try: if not module.check_mode: - role = connection.create_role(aws_retry=True, **params) + role = client.create_role(aws_retry=True, **params) # 'Description' is documented as key of the role returned by create_role # but appears to be an AWS bug (the value is not returned using the AWS CLI either). # Get the role after creating it. - role = get_role_with_backoff(connection, module, params['RoleName']) + role = get_role_with_backoff(params['RoleName']) else: role = {'MadeInCheckMode': True} role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument']) @@ -298,7 +333,7 @@ def create_basic_role(connection, module, params): return role -def update_role_assumed_policy(connection, module, params, role): +def update_role_assumed_policy(params, role): # Check Assumed Policy document if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']): return False @@ -307,7 +342,7 @@ def update_role_assumed_policy(connection, module, params, role): return True try: - connection.update_assume_role_policy( + client.update_assume_role_policy( RoleName=params['RoleName'], PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])), aws_retry=True) @@ -316,7 +351,7 @@ def update_role_assumed_policy(connection, module, params, role): return True -def update_role_description(connection, module, params, role): +def update_role_description(params, role): # Check Description update if params.get('Description') is None: return False @@ -327,13 +362,13 @@ def update_role_description(connection, module, params, role): return True try: - connection.update_role(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) + client.update_role(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName'])) return True -def update_role_max_session_duration(connection, module, params, role): +def update_role_max_session_duration(params, role): # Check MaxSessionDuration update if params.get('MaxSessionDuration') is None: return False @@ -344,13 +379,13 @@ def update_role_max_session_duration(connection, module, params, role): return True try: - connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True) + client.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName'])) return True -def update_role_permissions_boundary(connection, module, params, role): +def update_role_permissions_boundary(params, role): # Check PermissionsBoundary if params.get('PermissionsBoundary') is None: return False @@ -362,18 +397,18 @@ def update_role_permissions_boundary(connection, module, params, role): if params.get('PermissionsBoundary') == '': try: - connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True) + client.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName'])) else: try: - connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True) + client.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName'])) return True -def update_managed_policies(connection, module, params, role, managed_policies, purge_policies): +def update_managed_policies(params, role, managed_policies, purge_policies): # Check Managed Policies if managed_policies is None: return False @@ -384,7 +419,7 @@ def update_managed_policies(connection, module, params, role, managed_policies, return True # Get list of current attached managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['RoleName']) + current_attached_policies = get_attached_policy_list(params['RoleName']) current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies] if len(managed_policies) == 1 and managed_policies[0] is None: @@ -396,16 +431,16 @@ def update_managed_policies(connection, module, params, role, managed_policies, changed = False if purge_policies: - changed |= remove_policies(connection, module, policies_to_remove, params) + changed |= remove_policies(policies_to_remove, params) - changed |= attach_policies(connection, module, policies_to_attach, params) + changed |= attach_policies(policies_to_attach, params) return changed -def create_or_update_role(connection, module): +def create_or_update_role(): - params = generate_create_params(module) + params = generate_create_params() role_name = params['RoleName'] create_instance_profile = module.params.get('create_instance_profile') purge_policies = module.params.get('purge_policies') @@ -414,48 +449,59 @@ def create_or_update_role(connection, module): managed_policies = module.params.get('managed_policies') if managed_policies: # Attempt to list the policies early so we don't leave things behind if we can't find them. - managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + managed_policies = convert_friendly_names_to_arns(managed_policies) changed = False # Get role - role = get_role(connection, module, role_name) + role = get_role(role_name) # If role is None, create it if role is None: - role = create_basic_role(connection, module, params) + role = create_basic_role(params) + + if not module.check_mode and module.params.get('wait'): + wait_iam_exists() + changed = True else: - changed |= update_role_tags(connection, module, params, role) - changed |= update_role_assumed_policy(connection, module, params, role) - changed |= update_role_description(connection, module, params, role) - changed |= update_role_max_session_duration(connection, module, params, role) - changed |= update_role_permissions_boundary(connection, module, params, role) + changed |= update_role_tags(params, role) + changed |= update_role_assumed_policy(params, role) + changed |= update_role_description(params, role) + changed |= update_role_max_session_duration(params, role) + changed |= update_role_permissions_boundary(params, role) + + if not module.check_mode and module.params.get('wait'): + wait_iam_exists() if create_instance_profile: - changed |= create_instance_profiles(connection, module, params, role) + changed |= create_instance_profiles(params, role) - changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies) + if not module.check_mode and module.params.get('wait'): + wait_iam_exists() + + changed |= update_managed_policies(params, role, managed_policies, purge_policies) + wait_iam_exists() # Get the role again if not role.get('MadeInCheckMode', False): - role = get_role(connection, module, params['RoleName']) - role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName']) - role['tags'] = get_role_tags(connection, module) + role = get_role(params['RoleName']) + role['AttachedPolicies'] = get_attached_policy_list(params['RoleName']) + role['tags'] = get_role_tags() module.exit_json( changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']), **camel_dict_to_snake_dict(role, ignore_list=['tags'])) -def create_instance_profiles(connection, module, params, role): +def create_instance_profiles(params, role): if role.get('MadeInCheckMode', False): return False # Fetch existing Profiles try: - instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName'])) @@ -468,7 +514,7 @@ def create_instance_profiles(connection, module, params, role): # Make sure an instance profile is created try: - connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True) + client.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True) except is_boto3_error_code('EntityAlreadyExists'): # If the profile already exists, no problem, move on. # Implies someone's changing things at the same time... @@ -478,19 +524,19 @@ def create_instance_profiles(connection, module, params, role): # And attach the role to the profile try: - connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True) + client.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName'])) return True -def remove_instance_profiles(connection, module, role_params, role): +def remove_instance_profiles(role_params, role): role_name = module.params.get('name') delete_profiles = module.params.get("delete_instance_profile") try: - instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) @@ -499,21 +545,21 @@ def remove_instance_profiles(connection, module, role_params, role): profile_name = profile['InstanceProfileName'] try: if not module.check_mode: - connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params) + client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params) if profile_name == role_name: if delete_profiles: try: - connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) + client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) -def destroy_role(connection, module): +def destroy_role(): role_name = module.params.get('name') - role = get_role(connection, module, role_name) + role = get_role(role_name) role_params = dict() role_params['RoleName'] = role_name boundary_params = dict(role_params) @@ -526,51 +572,51 @@ def destroy_role(connection, module): # - attached instance profiles # - attached managed policies # - permissions boundary - remove_instance_profiles(connection, module, role_params, role) - update_managed_policies(connection, module, role_params, role, [], True) - update_role_permissions_boundary(connection, module, boundary_params, role) + remove_instance_profiles(role_params, role) + update_managed_policies(role_params, role, [], True) + update_role_permissions_boundary(boundary_params, role) try: if not module.check_mode: - connection.delete_role(aws_retry=True, **role_params) + client.delete_role(aws_retry=True, **role_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to delete role") module.exit_json(changed=True) -def get_role_with_backoff(connection, module, name): +def get_role_with_backoff(name): try: - return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role'] + return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) -def get_role(connection, module, name): +def get_role(name): try: - return connection.get_role(RoleName=name, aws_retry=True)['Role'] + return client.get_role(RoleName=name, aws_retry=True)['Role'] except is_boto3_error_code('NoSuchEntity'): return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) -def get_attached_policy_list(connection, module, name): +def get_attached_policy_list(name): try: - return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] + return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) -def get_role_tags(connection, module): +def get_role_tags(): role_name = module.params.get('name') try: - return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) -def update_role_tags(connection, module, params, role): +def update_role_tags(params, role): new_tags = params.get('Tags') if new_tags is None: return False @@ -580,7 +626,7 @@ def update_role_tags(connection, module, params, role): purge_tags = module.params.get('purge_tags') try: - existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): existing_tags = {} @@ -589,9 +635,9 @@ def update_role_tags(connection, module, params, role): if not module.check_mode: try: if tags_to_remove: - connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) + client.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) if tags_to_add: - connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) + client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name) @@ -601,6 +647,9 @@ def update_role_tags(connection, module, params, role): def main(): + global module + global client + argument_spec = dict( name=dict(type='str', required=True), path=dict(type='str', default="/"), @@ -615,6 +664,8 @@ def main(): purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=120, type='int'), ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[('state', 'present', ['assume_role_policy_document'])], @@ -638,14 +689,14 @@ def main(): if not path.endswith('/') or not path.startswith('/'): module.fail_json(msg="path must begin and end with /") - connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': - create_or_update_role(connection, module) + create_or_update_role() else: - destroy_role(connection, module) + destroy_role() if __name__ == '__main__': From 4c2e09baae8e511ccf114e324fd739eabe984259 Mon Sep 17 00:00:00 2001 From: Alan Hohn Date: Tue, 19 Oct 2021 14:16:35 -0400 Subject: [PATCH 325/683] Add network load balancer example for specified internal IP address (#742) Add network load balancer example for specified internal IP address SUMMARY Adds an example to community.aws.elb_network_lb for creating an internally-facing ELB with a specified internal IP address. The subnet_mappings dictionary is passed directly through to the AWS ELB API, so the ability was inherent in the module but wasn't documented. ISSUE TYPE Docs Pull Request COMPONENT NAME community.aws.elb_network_lb ADDITIONAL INFORMATION No module change, just docs. Happy to make any requested changes. Thanks for this module and for looking at the PR. Reviewed-by: Markus Bergholz Reviewed-by: None --- elb_network_lb.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/elb_network_lb.py b/elb_network_lb.py index 8de4b7692aa..2f664c721ee 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -86,7 +86,8 @@ subnet_mappings: description: - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP - to attach to the load balancer. You can specify one Elastic IP address per subnet. + to attach to the load balancer or the internal IP address for an internal load balancer. You can specify one Elastic IP address or internal + address per subnet. - This parameter is mutually exclusive with I(subnets). type: list elements: dict @@ -169,6 +170,21 @@ TargetGroupName: mytargetgroup # Required. The name of the target group state: present +- name: Create an internal ELB with a specified IP address + community.aws.elb_network_lb: + name: myelb + scheme: internal + subnet_mappings: + - SubnetId: subnet-012345678 + PrivateIPv4Address: 192.168.0.1 # Must be an address from within the CIDR of the subnet. + listeners: + - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + DefaultActions: + - Type: forward # Required. Only 'forward' is accepted at this time + TargetGroupName: mytargetgroup # Required. The name of the target group + state: present + - name: Remove an ELB community.aws.elb_network_lb: name: myelb From a57632d92ff59ae5771bcfe51786159e896a1613 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 20 Oct 2021 14:21:52 +0200 Subject: [PATCH 326/683] elb_instance - initial integration tests (#768) elb_instance - initial integration tests SUMMARY Rewrite elb_instance (ec2_elb) for boto3 ISSUE TYPE Feature Pull Request COMPONENT NAME elb_instance ADDITIONAL INFORMATION fixes: #384 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: None --- elb_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/elb_instance.py b/elb_instance.py index 5759b0b2ccc..b234031ee24 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -271,7 +271,7 @@ def _get_instance_lbs(self, ec2_elbs=None): break if ec2_elbs: - lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) + lbs = sorted([lb for lb in elbs if lb.name in ec2_elbs], key=lambda lb: lb.__repr__()) else: lbs = [] for lb in elbs: From 8c958990e0b7d900ab7bf5b700dc13dae6dc3895 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 21 Oct 2021 14:14:37 +0200 Subject: [PATCH 327/683] kms_spec and kms_usage parameter for aws_kms module (#774) kms_spec and kms_usage parameter for aws_kms module SUMMARY Add missing parameters kms_spec and kms_usage for aws_kms module, ISSUE TYPE Feature Pull Request COMPONENT NAME aws_kms Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell Reviewed-by: None --- aws_kms.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index 13bbd7f4619..41a5ee63c69 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -173,6 +173,24 @@ - policy to apply to the KMS key. - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) type: json + key_spec: + aliases: + - customer_master_key_spec + description: + - Specifies the type of KMS key to create. + - The specification is not changeable once the key is created. + type: str + default: SYMMETRIC_DEFAULT + choices: ['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1'] + version_added: 2.1.0 + key_usage: + description: + - Determines the cryptographic operations for which you can use the KMS key. + - The usage is not changeable once the key is created. + type: str + default: ENCRYPT_DECRYPT + choices: ['ENCRYPT_DECRYPT', 'SIGN_VERIFY'] + version_added: 2.1.0 author: - Ted Timmons (@tedder) - Will Thames (@willthames) @@ -852,9 +870,12 @@ def update_key(connection, module, key): def create_key(connection, module): + key_usage = module.params.get('key_usage') + key_spec = module.params.get('key_spec') params = dict(BypassPolicyLockoutSafetyCheck=False, Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'), - KeyUsage='ENCRYPT_DECRYPT', + KeyUsage=key_usage, + CustomerMasterKeySpec=key_spec, Origin='AWS_KMS') if module.check_mode: @@ -1067,7 +1088,10 @@ def main(): policy=dict(type='json'), purge_grants=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), - enable_key_rotation=(dict(type='bool')) + enable_key_rotation=(dict(type='bool')), + key_spec=dict(type='str', default='SYMMETRIC_DEFAULT', aliases=['customer_master_key_spec'], + choices=['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1']), + key_usage=dict(type='str', default='ENCRYPT_DECRYPT', choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY']), ) module = AnsibleAWSModule( From e1be2b8ee34e84de17772ffa3da1456cf78aa4f8 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 22 Oct 2021 15:10:11 +0100 Subject: [PATCH 328/683] Rds enhanced monitoring bug fix (#747) Rds enhanced monitoring bug fix SUMMARY (a copy of #712 as I messed up my branch by accident) This is a fix for an issue when an RDS instance already exists and you wish to enable enhanced monitoring, for the full details see the linked old reported issue: ansible/ansible#51772 But in summary currently if you enable enhanced monitoring on an RDS instance that already exists where it isn't already enabled then the following is returned: An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'MonitoringRoleArn' fatal: [localhost_eu-west-1-pdv-qa-1 -> 127.0.0.1]: FAILED! => changed=false module_stderr: |- Traceback (most recent call last): File "master:/opt/mitogen/mitogen-0.2.9/ansible_mitogen/runner.py", line 975, in _run self._run_code(code, mod) File "master:/opt/mitogen/mitogen-0.2.9/ansible_mitogen/runner.py", line 939, in _run_code exec(code, vars(mod)) File "master:/tmp/build/4bef5c86/framework/library/cloud/aws/rds_instance.py", line 1245, in File "master:/tmp/build/4bef5c86/framework/library/cloud/aws/rds_instance.py", line 1210, in main File "master:/tmp/build/4bef5c86/framework/library/cloud/aws/rds_instance.py", line 855, in get_parameters File "master:/tmp/build/4bef5c86/framework/library/cloud/aws/rds_instance.py", line 885, in get_options_with_changing_values File "master:/tmp/build/4bef5c86/framework/library/cloud/aws/rds_instance.py", line 983, in get_changing_options_with_consistent_keys KeyError: 'MonitoringRoleArn' module_stdout: '' msg: |- MODULE FAILURE See stdout/stderr for the exact error Originally-Depends-On: mattclay/aws-terminator#164 Other changes A load of issues have surfaced in the integration tests due to how slow RDS is to create / modify etc. I've condensed down the tests where possible reducing the number of inventory jobs to 6 and bumped serial to 6 so that hopefully all tests can run at once and finish within the 1 hr AWS session duration. ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_instance Reviewed-by: Mark Chappell Reviewed-by: Mark Woolley Reviewed-by: None --- rds_instance.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/rds_instance.py b/rds_instance.py index c1f118db514..92d5e257cf0 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -989,13 +989,12 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c def get_changing_options_with_consistent_keys(modify_params, instance): - inconsistent_parameters = list(modify_params.keys()) changing_params = {} for param in modify_params: - current_option = instance.get('PendingModifiedValues', {}).get(param) + current_option = instance.get('PendingModifiedValues', {}).get(param, None) if current_option is None: - current_option = instance[param] + current_option = instance.get(param, None) if modify_params[param] != current_option: changing_params[param] = modify_params[param] From 0659e67174132f3c46d6ea9f49a085453ed4de5b Mon Sep 17 00:00:00 2001 From: Asaf Levy Date: Fri, 22 Oct 2021 21:02:14 +0300 Subject: [PATCH 329/683] Allow ECS.Client.delete_service to force delete a service (#228) Allow ECS.Client.delete_service to force delete a service SUMMARY Change allows ansible to forcefully delete a service, required when deleting a service with >0 scale, or no target group ISSUE TYPE #220 COMPONENT NAME ecs_service Reviewed-by: None --- ecs_service.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 590276e0ab1..d43253af386 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -127,6 +127,12 @@ field: description: The field to apply the placement strategy against. type: str + force_deletion: + description: + - Forcabily delete the service. Required when deleting a service with >0 scale, or no target group. + default: False + type: bool + version_added: 2.1.0 network_configuration: description: - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). @@ -631,8 +637,8 @@ def jsonize(self, service): e['createdAt'] = str(e['createdAt']) return service - def delete_service(self, service, cluster=None): - return self.ecs.delete_service(cluster=cluster, service=service) + def delete_service(self, service, cluster=None, force=False): + return self.ecs.delete_service(cluster=cluster, service=service, force=force) def health_check_setable(self, params): load_balancers = params.get('loadBalancers', []) @@ -652,6 +658,7 @@ def main(): delay=dict(required=False, type='int', default=10), repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), + force_deletion=dict(required=False, default=False, type='bool'), deployment_configuration=dict(required=False, default={}, type='dict'), placement_constraints=dict( required=False, @@ -810,7 +817,8 @@ def main(): try: service_mgr.delete_service( module.params['name'], - module.params['cluster'] + module.params['cluster'], + module.params['force_deletion'], ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") From fe0d1b597e92346115c28eeb5d90a51861fc1b3d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 23 Oct 2021 14:31:17 +0200 Subject: [PATCH 330/683] lambda_info - use a paginator and AWSRetry (#777) lambda_info - use a paginator and AWSRetry SUMMARY Add paginator and AWSRetry to lambda_info. It seems to have a half-implemented manual pagination referring to parameters that don't exist. Just use the real pagination. ISSUE TYPE Feature Pull Request COMPONENT NAME lambda_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: None --- lambda_info.py | 54 +++++++++++++------------------------------------- 1 file changed, 14 insertions(+), 40 deletions(-) diff --git a/lambda_info.py b/lambda_info.py index c95c0218132..04cae251be6 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -58,7 +58,6 @@ - name: List all function community.aws.lambda_info: query: all - max_items: 20 register: output - name: show Lambda information ansible.builtin.debug: @@ -89,6 +88,13 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +@AWSRetry.jittered_backoff() +def _paginate(client, function, **params): + paginator = client.get_paginator(function) + return paginator.paginate(**params).build_full_result() def fix_return(node): @@ -127,14 +133,8 @@ def alias_details(client, module): function_name = module.params.get('function_name') if function_name: - params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') try: - lambda_info.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) + lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) except is_boto3_error_code('ResourceNotFoundException'): lambda_info.update(aliases=[]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -154,9 +154,6 @@ def all_details(client, module): :return dict: """ - if module.params.get('max_items') or module.params.get('next_marker'): - module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') - lambda_info = dict() function_name = module.params.get('function_name') @@ -187,21 +184,14 @@ def config_details(client, module): function_name = module.params.get('function_name') if function_name: try: - lambda_info.update(client.get_function_configuration(FunctionName=function_name)) + lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) except is_boto3_error_code('ResourceNotFoundException'): lambda_info.update(function={}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) else: - params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - try: - lambda_info.update(function_list=client.list_functions(**params)['Functions']) + lambda_info.update(function_list=_paginate(client, 'list_functions')['Functions']) except is_boto3_error_code('ResourceNotFoundException'): lambda_info.update(function_list=[]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -234,14 +224,8 @@ def mapping_details(client, module): if module.params.get('event_source_arn'): params['EventSourceArn'] = module.params.get('event_source_arn') - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - try: - lambda_info.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) + lambda_info.update(mappings=_paginate(client, 'list_event_source_mappings', **params)['EventSourceMappings']) except is_boto3_error_code('ResourceNotFoundException'): lambda_info.update(mappings=[]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -262,16 +246,13 @@ def policy_details(client, module): :return dict: """ - if module.params.get('max_items') or module.params.get('next_marker'): - module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') - lambda_info = dict() function_name = module.params.get('function_name') if function_name: try: # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_info.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) + lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) except is_boto3_error_code('ResourceNotFoundException'): lambda_info.update(policy={}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -295,15 +276,8 @@ def version_details(client, module): function_name = module.params.get('function_name') if function_name: - params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - try: - lambda_info.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) + lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) except is_boto3_error_code('ResourceNotFoundException'): lambda_info.update(versions=[]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -343,7 +317,7 @@ def main(): if len(function_name) > 64: module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - client = module.client('lambda') + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) invocations = dict( aliases='alias_details', From a4e7e52ab654832d446cc663da33867a590ab3f4 Mon Sep 17 00:00:00 2001 From: christophemorio <49184206+christophemorio@users.noreply.github.com> Date: Sun, 24 Oct 2021 11:15:47 +0200 Subject: [PATCH 331/683] ELB info: return all LB if names is not defined (#693) ELB info: return all LB if names is not defined SUMMARY Documentation says options: names: description: - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. But doing this elb_classic_lb_info returns an empty list. ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_classic_lb_info ADDITIONAL INFORMATION - hosts: localhost tasks: - community.aws.elb_classic_lb_info: {} register: elb_info - debug: var=elb_info $ ansible-playbook playbook.yaml TASK [community.aws.elb_classic_lb_info] ******** ok: [localhost] TASK [debug] ******** ok: [localhost] => { "elb_info": { "changed": false, "elbs": [], # <-- should return list of all ELB "failed": false } } Reviewed-by: Mark Chappell Reviewed-by: None Reviewed-by: None --- elb_classic_lb_info.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index f57f4dd391c..1afbd49c9dc 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -157,22 +157,36 @@ def list_elbs(connection, load_balancer_names): results = [] + if not load_balancer_names: + for lb in get_all_lb(connection): + results.append(describe_elb(connection, lb)) + for load_balancer_name in load_balancer_names: lb = get_lb(connection, load_balancer_name) if not lb: continue - description = camel_dict_to_snake_dict(lb) - name = lb['LoadBalancerName'] - instances = lb.get('Instances', []) - description['tags'] = get_tags(connection, name) - description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService') - description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService') - description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown') - description['attributes'] = get_lb_attributes(connection, name) - results.append(description) + results.append(describe_elb(connection, lb)) return results +def describe_elb(connection, lb): + description = camel_dict_to_snake_dict(lb) + name = lb['LoadBalancerName'] + instances = lb.get('Instances', []) + description['tags'] = get_tags(connection, name) + description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService') + description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService') + description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown') + description['attributes'] = get_lb_attributes(connection, name) + return description + + +@AWSRetry.jittered_backoff() +def get_all_lb(connection): + paginator = connection.get_paginator('describe_load_balancers') + return paginator.paginate().build_full_result()['LoadBalancerDescriptions'] + + def get_lb(connection, load_balancer_name): try: return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0] From 16d4b9417ba12f3f25457beb2293606ef15bd693 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 25 Oct 2021 16:51:49 +0200 Subject: [PATCH 332/683] rds_option_group (_info) new modules (#517) rds_option_group (_info) new modules SUMMARY Attempt to resurrect ansible/ansible#25290 Fixes: #463 ISSUE TYPE New Module Pull Request COMPONENT NAME rds_option_group rds_option_group_info Requires: mattclay/aws-terminator#144 Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: None --- rds_option_group.py | 671 +++++++++++++++++++++++++++++++++++++++ rds_option_group_info.py | 327 +++++++++++++++++++ 2 files changed, 998 insertions(+) create mode 100644 rds_option_group.py create mode 100644 rds_option_group_info.py diff --git a/rds_option_group.py b/rds_option_group.py new file mode 100644 index 00000000000..3b01eaeda82 --- /dev/null +++ b/rds_option_group.py @@ -0,0 +1,671 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: rds_option_group +short_description: rds_option_group module +version_added: 2.1.0 +description: + - Manages the creation, modification, deletion of RDS option groups. +author: + - "Nick Aslanidis (@naslanidis)" + - "Will Thames (@willthames)" + - "Alina Buzachis (@alinabuzachis)" +options: + state: + description: + - Specifies whether the option group should be C(present) or C(absent). + required: true + choices: [ 'present', 'absent' ] + type: str + option_group_name: + description: + - Specifies the name of the option group to be created. + required: true + type: str + engine_name: + description: + - Specifies the name of the engine that this option group should be associated with. + type: str + major_engine_version: + description: + - Specifies the major version of the engine that this option group should be associated with. + type: str + option_group_description: + description: + - The description of the option group. + type: str + apply_immediately: + description: + - Indicates whether the changes should be applied immediately, or during the next maintenance window. + required: false + type: bool + default: false + options: + description: + - Options in this list are added to the option group. + - If already present, the specified configuration is used to update the existing configuration. + - If none are supplied, any existing options are removed. + type: list + elements: dict + suboptions: + option_name: + description: The configuration of options to include in a group. + required: false + type: str + port: + description: The optional port for the option. + required: false + type: int + option_version: + description: The version for the option. + required: false + type: str + option_settings: + description: The option settings to include in an option group. + required: false + type: list + suboptions: + name: + description: The name of the option that has settings that you can set. + required: false + type: str + value: + description: The current value of the option setting. + required: false + type: str + default_value: + description: The default value of the option setting. + required: false + type: str + description: + description: The description of the option setting. + required: false + type: str + apply_type: + description: The DB engine specific parameter type. + required: false + type: str + data_type: + description: The data type of the option setting. + required: false + type: str + allowed_values: + description: The allowed values of the option setting. + required: false + type: str + is_modifiable: + description: A Boolean value that, when C(true), indicates the option setting can be modified from the default. + required: false + type: bool + is_collection: + description: Indicates if the option setting is part of a collection. + required: false + type: bool + db_security_group_memberships: + description: A list of C(DBSecurityGroupMembership) name strings used for this option. + required: false + type: list + vpc_security_group_memberships: + description: A list of C(VpcSecurityGroupMembership) name strings used for this option. + required: false + type: list + tags: + description: + - A dictionary of key value pairs to assign the option group. + - To remove all tags set I(tags={}) and I(purge_tags=true). + type: dict + purge_tags: + description: + - Remove tags not listed in I(tags). + type: bool + default: true + wait: + description: Whether to wait for the cluster to be available or deleted. + type: bool + default: True +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = r''' +# Create an RDS Mysql Option group +- name: Create an RDS Mysql option group + community.aws.rds_option_group: + state: present + option_group_name: test-mysql-option-group + engine_name: mysql + major_engine_version: 5.6 + option_group_description: test mysql option group + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "sg-d188c123" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" + register: new_rds_mysql_option_group + +# Remove currently configured options for an option group by removing options argument +- name: Create an RDS Mysql option group + community.aws.rds_option_group: + state: present + option_group_name: test-mysql-option-group + engine_name: mysql + major_engine_version: 5.6 + option_group_description: test mysql option group + apply_immediately: true + register: rds_mysql_option_group + +- name: Create an RDS Mysql option group using tags + community.aws.rds_option_group: + state: present + option_group_name: test-mysql-option-group + engine_name: mysql + major_engine_version: 5.6 + option_group_description: test mysql option group + apply_immediately: true + tags: + Tag1: tag1 + Tag2: tag2 + register: rds_mysql_option_group + +# Delete an RDS Mysql Option group +- name: Delete an RDS Mysql option group + community.aws.rds_option_group: + state: absent + option_group_name: test-mysql-option-group + register: deleted_rds_mysql_option_group +''' + +RETURN = r''' +allows_vpc_and_non_vpc_instance_memberships: + description: Indicates whether this option group can be applied to both VPC and non-VPC instances. + returned: always + type: bool + sample: false +changed: + description: If the Option Group has changed. + type: bool + returned: always + sample: true +engine_name: + description: Indicates the name of the engine that this option group can be applied to. + returned: always + type: str + sample: "mysql" +major_engine_version: + description: Indicates the major engine version associated with this option group. + returned: always + type: str + sample: "5.6" +option_group_arn: + description: The Amazon Resource Name (ARN) for the option group. + returned: always + type: str + sample: "arn:aws:rds:ap-southeast-2:721066863947:og:ansible-test-option-group" +option_group_description: + description: Provides a description of the option group. + returned: always + type: str + sample: "test mysql option group" +option_group_name: + description: Specifies the name of the option group. + returned: always + type: str + sample: "test-mysql-option-group" +options: + description: Indicates what options are available in the option group. + returned: always + type: list + elements: dict + contains: + db_security_group_memberships: + description: If the option requires access to a port, then this DB security group allows access to the port. + returned: always + type: list + elements: dict + contains: + status: + description: The status of the DB security group. + returned: always + type: str + sample: "available" + db_security_group_name: + description: The name of the DB security group. + returned: always + type: str + sample: "mydbsecuritygroup" + option_description: + description: The description of the option. + returned: always + type: str + sample: "Innodb Memcached for MySQL" + option_name: + description: The name of the option. + returned: always + type: str + sample: "MEMCACHED" + option_settings: + description: The name of the option. + returned: always + type: list + contains: + allowed_values: + description: The allowed values of the option setting. + returned: always + type: str + sample: "1-2048" + apply_type: + description: The DB engine specific parameter type. + returned: always + type: str + sample: "STATIC" + data_type: + description: The data type of the option setting. + returned: always + type: str + sample: "INTEGER" + default_value: + description: The default value of the option setting. + returned: always + type: str + sample: "1024" + description: + description: The description of the option setting. + returned: always + type: str + sample: "Verbose level for memcached." + is_collection: + description: Indicates if the option setting is part of a collection. + returned: always + type: bool + sample: true + is_modifiable: + description: A Boolean value that, when true, indicates the option setting can be modified from the default. + returned: always + type: bool + sample: true + name: + description: The name of the option that has settings that you can set. + returned: always + type: str + sample: "INNODB_API_ENABLE_MDL" + value: + description: The current value of the option setting. + returned: always + type: str + sample: "0" + permanent: + description: Indicate if this option is permanent. + returned: always + type: bool + sample: true + persistent: + description: Indicate if this option is persistent. + returned: always + type: bool + sample: true + port: + description: If required, the port configured for this option to use. + returned: always + type: int + sample: 11211 + vpc_security_group_memberships: + description: If the option requires access to a port, then this VPC security group allows access to the port. + returned: always + type: list + elements: dict + contains: + status: + description: The status of the VPC security group. + returned: always + type: str + sample: "available" + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: str + sample: "sg-0cd636a23ae76e9a4" +vpc_id: + description: If present, this option group can only be applied to instances that are in the VPC indicated by this field. + returned: always + type: str + sample: "vpc-bf07e9d6" +tags: + description: The tags associated the Internet Gateway. + type: dict + returned: always + sample: { + "Ansible": "Test" + } +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_option_groups(client, **params): + try: + paginator = client.get_paginator('describe_option_groups') + return paginator.paginate(**params).build_full_result()['OptionGroupsList'][0] + except is_boto3_error_code('OptionGroupNotFoundFault'): + return {} + + +def get_option_group(client, module): + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + + try: + result = camel_dict_to_snake_dict(_describe_option_groups(client, **params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe option groups.") + + if result: + result['tags'] = get_tags(client, module, result['option_group_arn']) + + return result + + +def create_option_group_options(client, module): + changed = True + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + options_to_include = module.params.get('options') + params['OptionsToInclude'] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) + + if module.params.get('apply_immediately'): + params['ApplyImmediately'] = module.params.get('apply_immediately') + + if module.check_mode: + return changed + + try: + client.modify_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update Option Group.") + + return changed + + +def remove_option_group_options(client, module, options_to_remove): + changed = True + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + params['OptionsToRemove'] = options_to_remove + + if module.params.get('apply_immediately'): + params['ApplyImmediately'] = module.params.get('apply_immediately') + + if module.check_mode: + return changed + + try: + client.modify_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + return changed + + +def create_option_group(client, module): + changed = True + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + params['EngineName'] = module.params.get('engine_name') + params['MajorEngineVersion'] = str(module.params.get('major_engine_version')) + params['OptionGroupDescription'] = module.params.get('option_group_description') + + if module.params.get('tags'): + params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + else: + params['Tags'] = list() + + if module.check_mode: + return changed + try: + client.create_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to create Option Group.') + + return changed + + +def match_option_group_options(client, module): + requires_update = False + new_options = module.params.get('options') + + # Get existing option groups and compare to our new options spec + current_option = get_option_group(client, module) + + if current_option['options'] == [] and new_options: + requires_update = True + else: + for option in current_option['options']: + for setting_name in new_options: + if setting_name['option_name'] == option['option_name']: + + # Security groups need to be handled separately due to different keys on request and what is + # returned by the API + if any( + name in option.keys() - ['option_settings', 'vpc_security_group_memberships'] and + setting_name[name] != option[name] + for name in setting_name + ): + requires_update = True + + if any( + name in option and name == 'vpc_security_group_memberships' + for name in setting_name + ): + current_sg = set(sg['vpc_security_group_id'] for sg in option['vpc_security_group_memberships']) + new_sg = set(setting_name['vpc_security_group_memberships']) + if current_sg != new_sg: + requires_update = True + + if any( + new_option_setting['name'] == current_option_setting['name'] and + new_option_setting['value'] != current_option_setting['value'] + for new_option_setting in setting_name['option_settings'] + for current_option_setting in option['option_settings'] + ): + requires_update = True + else: + requires_update = True + + return requires_update + + +def compare_option_group(client, module): + to_be_added = None + to_be_removed = None + current_option = get_option_group(client, module) + new_options = module.params.get('options') + new_settings = set([item['option_name'] for item in new_options]) + old_settings = set([item['option_name'] for item in current_option['options']]) + + if new_settings != old_settings: + to_be_added = list(new_settings - old_settings) + to_be_removed = list(old_settings - new_settings) + + return to_be_added, to_be_removed + + +def setup_option_group(client, module): + results = [] + changed = False + to_be_added = None + to_be_removed = None + + # Check if there is an existing options group + existing_option_group = get_option_group(client, module) + + if existing_option_group: + results = existing_option_group + + # Check tagging + changed |= update_tags(client, module, existing_option_group) + + if module.params.get('options'): + # Check if existing options require updating + update_required = match_option_group_options(client, module) + + # Check if there are options to be added or removed + if update_required: + to_be_added, to_be_removed = compare_option_group(client, module) + + if to_be_added or update_required: + changed |= create_option_group_options(client, module) + + if to_be_removed: + changed |= remove_option_group_options(client, module, to_be_removed) + + # If changed, get updated version of option group + if changed: + results = get_option_group(client, module) + else: + # No options were supplied. If options exist, remove them + current_option_group = get_option_group(client, module) + + if current_option_group['options'] != []: + # Here we would call our remove options function + options_to_remove = [] + + for option in current_option_group['options']: + options_to_remove.append(option['option_name']) + + changed |= remove_option_group_options(client, module, options_to_remove) + + # If changed, get updated version of option group + if changed: + results = get_option_group(client, module) + else: + changed = create_option_group(client, module) + + if module.params.get('options'): + changed = create_option_group_options(client, module) + + results = get_option_group(client, module) + + return changed, results + + +def remove_option_group(client, module): + changed = False + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + + # Check if there is an existing options group + existing_option_group = get_option_group(client, module) + + if existing_option_group: + + if module.check_mode: + return True, {} + + changed = True + try: + client.delete_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete option group.") + + return changed, {} + + +def update_tags(client, module, option_group): + if module.params.get('tags') is None: + return False + + try: + existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group['option_group_arn'])['TagList'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain option group tags.") + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), + module.params['tags'], module.params['purge_tags']) + changed = bool(to_update or to_delete) + + if to_update: + try: + if module.check_mode: + return changed + client.add_tags_to_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], + Tags=ansible_dict_to_boto3_tag_list(to_update)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't add tags to option group.") + if to_delete: + try: + if module.check_mode: + return changed + client.remove_tags_from_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], + TagKeys=to_delete) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove tags from option group.") + + return changed + + +def main(): + argument_spec = dict( + option_group_name=dict(required=True, type='str'), + engine_name=dict(type='str'), + major_engine_version=dict(type='str'), + option_group_description=dict(type='str'), + options=dict(required=False, type='list', elements='dict'), + apply_immediately=dict(type='bool', default=False), + state=dict(required=True, choices=['present', 'absent']), + tags=dict(required=False, type='dict'), + purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['engine_name', 'major_engine_version', 'option_group_description']]], + ) + + try: + client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + state = module.params.get('state') + + if state == 'present': + changed, results = setup_option_group(client, module) + else: + changed, results = remove_option_group(client, module) + + module.exit_json(changed=changed, **results) + + +if __name__ == '__main__': + main() diff --git a/rds_option_group_info.py b/rds_option_group_info.py new file mode 100644 index 00000000000..b29479386ff --- /dev/null +++ b/rds_option_group_info.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_option_group_info +short_description: rds_option_group_info module +version_added: 2.1.0 +description: + - Gather information about RDS option groups. +author: "Alina Buzachis (@alinabuzachis)" +options: + option_group_name: + description: + - The name of the option group to describe. + - Can't be supplied together with I(engine_name) or I(major_engine_version). + default: '' + required: false + type: str + marker: + description: + - If this parameter is specified, the response includes only records beyond the marker, up to the value specified by I(max_records). + - Allowed values are between C(20) and C(100). + default: '' + required: false + type: str + max_records: + description: + - The maximum number of records to include in the response. + type: int + default: 100 + required: false + engine_name: + description: Filters the list of option groups to only include groups associated with a specific database engine. + type: str + default: '' + required: false + major_engine_version: + description: + - Filters the list of option groups to only include groups associated with a specific database engine version. + - If specified, then I(engine_name) must also be specified. + type: str + default: '' + required: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: List an option group + community.aws.rds_option_group_info: + option_group_name: test-mysql-option-group + register: option_group + +- name: List all the option groups + community.aws.rds_option_group_info: + region: ap-southeast-2 + profile: production + register: option_group +''' + +RETURN = r''' +changed: + description: True if listing the RDS option group succeeds. + type: bool + returned: always + sample: false +option_groups_list: + description: The available RDS option groups. + returned: always + type: complex + contains: + allows_vpc_and_non_vpc_instance_memberships: + description: Indicates whether this option group can be applied to both VPC and non-VPC instances. + returned: always + type: bool + sample: false + engine_name: + description: Indicates the name of the engine that this option group can be applied to. + returned: always + type: str + sample: "mysql" + major_engine_version: + description: Indicates the major engine version associated with this option group. + returned: always + type: str + sample: "5.6" + option_group_arn: + description: The Amazon Resource Name (ARN) for the option group. + returned: always + type: str + sample: "arn:aws:rds:ap-southeast-2:721066863947:og:ansible-test-option-group" + option_group_description: + description: Provides a description of the option group. + returned: always + type: str + sample: "test mysql option group" + option_group_name: + description: Specifies the name of the option group. + returned: always + type: str + sample: "test-mysql-option-group" + options: + description: Indicates what options are available in the option group. + returned: always + type: complex + contains: + db_security_group_memberships: + description: If the option requires access to a port, then this DB security group allows access to the port. + returned: always + type: complex + sample: list + elements: dict + contains: + status: + description: The status of the DB security group. + returned: always + type: str + sample: "available" + db_security_group_name: + description: The name of the DB security group. + returned: always + type: str + sample: "mydbsecuritygroup" + option_description: + description: The description of the option. + returned: always + type: str + sample: "Innodb Memcached for MySQL" + option_name: + description: The name of the option. + returned: always + type: str + sample: "MEMCACHED" + option_settings: + description: The name of the option. + returned: always + type: complex + contains: + allowed_values: + description: The allowed values of the option setting. + returned: always + type: str + sample: "1-2048" + apply_type: + description: The DB engine specific parameter type. + returned: always + type: str + sample: "STATIC" + data_type: + description: The data type of the option setting. + returned: always + type: str + sample: "INTEGER" + default_value: + description: The default value of the option setting. + returned: always + type: str + sample: "1024" + description: + description: The description of the option setting. + returned: always + type: str + sample: "Verbose level for memcached." + is_collection: + description: Indicates if the option setting is part of a collection. + returned: always + type: bool + sample: true + is_modifiable: + description: A Boolean value that, when true, indicates the option setting can be modified from the default. + returned: always + type: bool + sample: true + name: + description: The name of the option that has settings that you can set. + returned: always + type: str + sample: "INNODB_API_ENABLE_MDL" + value: + description: The current value of the option setting. + returned: always + type: str + sample: "0" + permanent: + description: Indicate if this option is permanent. + returned: always + type: bool + sample: true + persistent: + description: Indicate if this option is persistent. + returned: always + type: bool + sample: true + port: + description: If required, the port configured for this option to use. + returned: always + type: int + sample: 11211 + vpc_security_group_memberships: + description: If the option requires access to a port, then this VPC security group allows access to the port. + returned: always + type: list + elements: dict + contains: + status: + description: The status of the VPC security group. + returned: always + type: str + sample: "available" + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: str + sample: "sg-0cd636a23ae76e9a4" + vpc_id: + description: If present, this option group can only be applied to instances that are in the VPC indicated by this field. + returned: always + type: str + sample: "vpc-bf07e9d6" + tags: + description: The tags associated the Internet Gateway. + type: dict + returned: always + sample: { + "Ansible": "Test" + } + +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_option_groups(client, **params): + try: + paginator = client.get_paginator('describe_option_groups') + return paginator.paginate(**params).build_full_result() + except is_boto3_error_code('OptionGroupNotFoundFault'): + return {} + + +def list_option_groups(client, module): + option_groups = list() + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + + if module.params.get('marker'): + params['Marker'] = module.params.get('marker') + if int(params['Marker']) < 20 or int(params['Marker']) > 100: + module.fail_json(msg="marker must be between 20 and 100 minutes") + + if module.params.get('max_records'): + params['MaxRecords'] = module.params.get('max_records') + if params['MaxRecords'] > 100: + module.fail_json(msg="The maximum number of records to include in the response is 100.") + + params['EngineName'] = module.params.get('engine_name') + params['MajorEngineVersion'] = module.params.get('major_engine_version') + + try: + result = _describe_option_groups(client, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe option groups.") + + for option_group in result['OptionGroupsList']: + # Turn the boto3 result into ansible_friendly_snaked_names + converted_option_group = camel_dict_to_snake_dict(option_group) + converted_option_group['tags'] = get_tags(client, module, converted_option_group['option_group_arn']) + option_groups.append(converted_option_group) + + return option_groups + + +def main(): + argument_spec = dict( + option_group_name=dict(default='', type='str'), + marker=dict(type='str'), + max_records=dict(type='int', default=100), + engine_name=dict(type='str', default=''), + major_engine_version=dict(type='str', default=''), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['option_group_name', 'engine_name'], + ['option_group_name', 'major_engine_version'], + ], + required_together=[ + ['engine_name', 'major_engine_version'], + ], + ) + + # Validate Requirements + try: + connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + results = list_option_groups(connection, module) + + module.exit_json(result=results) + + +if __name__ == '__main__': + main() From c274ab95f2eb844c58dc7ba88d4c1b2ea5158d59 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 25 Oct 2021 21:32:17 +0100 Subject: [PATCH 333/683] Fix cloudfront_info pagination bug (#780) Fix cloudfront_info pagination bug SUMMARY Currently the cloudfront_info module is using a bespoke paginator function, which is causing some issues when over 100 distributions exist (see linked issue), when in fact it can be easily switched to a native boto3 paginator that would fix the bug reported in the linked issue. Fixes #769 ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_info ADDITIONAL INFORMATION Pagination docs: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#paginators Tests following steps from linked issue: ran a ansible task: - hosts: localhost connection: local gather_facts: false tasks: - name: "Check if distribution already exists" cloudfront_info: distribution: true domain_name_alias: "XX" register: distribution_data Results: TASK [Check if distribution already exists] ************************************************************************************************************************************************************************************************************************************ task path: /root/test/test.yml:6 <127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: root <127.0.0.1> EXEC /bin/sh -c 'echo ~root && sleep 0' <127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir "` echo /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166 `" && echo ansible-tmp-1635164453.7083595-389-139483031845166="` echo /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166 `" ) && sleep 0' Using module file /root/test/library/cloudfront_info.py <127.0.0.1> PUT /root/.ansible/tmp/ansible-local-3861wn9tjv7/tmp6o0jv2s2 TO /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166/AnsiballZ_cloudfront_info.py <127.0.0.1> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166/ /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166/AnsiballZ_cloudfront_info.py && sleep 0' <127.0.0.1> EXEC /bin/sh -c '/usr/bin/python3 /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166/AnsiballZ_cloudfront_info.py && sleep 0' <127.0.0.1> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1635164453.7083595-389-139483031845166/ > /dev/null 2>&1 && sleep 0' ok: [localhost] => { "changed": false, "cloudfront": { "XXXXXX": { "Distribution": { "ARN": "arn:aws:cloudfront::XXXXXXXX:distribution/XXXXXX", "ActiveTrustedKeyGroups": { "Enabled": false, "Quantity": 0 }, "ActiveTrustedSigners": { "Enabled": false, "Quantity": 0 }, "AliasICPRecordals": [ { "CNAME": "XXXX", "ICPRecordalStatus": "APPROVED" } ], ... Reviewed-by: Mark Chappell Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz Reviewed-by: None --- cloudfront_info.py | 111 +++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 60 deletions(-) diff --git a/cloudfront_info.py b/cloudfront_info.py index df42ed0d1ac..be8481a40ae 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -269,6 +269,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class CloudFrontServiceManager: @@ -278,64 +279,72 @@ def __init__(self, module): self.module = module try: - self.client = module.client('cloudfront') + self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') def get_distribution(self, distribution_id): try: - func = partial(self.client.get_distribution, Id=distribution_id) - return self.paginated_response(func) + distribution = self.client.get_distribution(aws_retry=True, Id=distribution_id) + return distribution except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing distribution") def get_distribution_config(self, distribution_id): try: - func = partial(self.client.get_distribution_config, Id=distribution_id) - return self.paginated_response(func) + distribution = self.client.get_distribution_config(aws_retry=True, Id=distribution_id) + return distribution except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing distribution configuration") def get_origin_access_identity(self, origin_access_identity_id): try: - func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id) - return self.paginated_response(func) + origin_access_identity = self.client.get_cloud_front_origin_access_identity(aws_retry=True, Id=origin_access_identity_id) + return origin_access_identity except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing origin access identity") def get_origin_access_identity_config(self, origin_access_identity_id): try: - func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id) - return self.paginated_response(func) + origin_access_identity = self.client.get_cloud_front_origin_access_identity_config(aws_retry=True, Id=origin_access_identity_id) + return origin_access_identity except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") def get_invalidation(self, distribution_id, invalidation_id): try: - func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id) - return self.paginated_response(func) + invalidation = self.client.get_invalidation(aws_retry=True, DistributionId=distribution_id, Id=invalidation_id) + return invalidation except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing invalidation") def get_streaming_distribution(self, distribution_id): try: - func = partial(self.client.get_streaming_distribution, Id=distribution_id) - return self.paginated_response(func) + streaming_distribution = self.client.get_streaming_distribution(aws_retry=True, Id=distribution_id) + return streaming_distribution except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing streaming distribution") def get_streaming_distribution_config(self, distribution_id): try: - func = partial(self.client.get_streaming_distribution_config, Id=distribution_id) - return self.paginated_response(func) + streaming_distribution = self.client.get_streaming_distribution_config(aws_retry=True, Id=distribution_id) + return streaming_distribution except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error describing streaming distribution") + # Split out paginator to allow for the backoff decorator to function + @AWSRetry.jittered_backoff() + def _paginated_result(self, paginator_name, **params): + paginator = self.client.get_paginator(paginator_name) + results = paginator.paginate(**params).build_full_result() + return results + def list_origin_access_identities(self): try: - func = partial(self.client.list_cloud_front_origin_access_identities) - origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList') - if origin_access_identity_list['Quantity'] > 0: + results = self._paginated_result('list_cloud_front_origin_access_identities') + origin_access_identity_list = results.get('CloudFrontOriginAccessIdentityList', {'Items': []}) + + if len(origin_access_identity_list['Items']) > 0: return origin_access_identity_list['Items'] return {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -343,12 +352,14 @@ def list_origin_access_identities(self): def list_distributions(self, keyed=True): try: - func = partial(self.client.list_distributions) - distribution_list = self.paginated_response(func, 'DistributionList') - if distribution_list['Quantity'] == 0: - return {} - else: + results = self._paginated_result('list_distributions') + distribution_list = results.get('DistributionList', {'Items': []}) + + if len(distribution_list['Items']) > 0: distribution_list = distribution_list['Items'] + else: + return {} + if not keyed: return distribution_list return self.keyed_list_helper(distribution_list) @@ -357,21 +368,23 @@ def list_distributions(self, keyed=True): def list_distributions_by_web_acl_id(self, web_acl_id): try: - func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id) - distribution_list = self.paginated_response(func, 'DistributionList') - if distribution_list['Quantity'] == 0: - return {} - else: + results = self._paginated_result('list_cloud_front_origin_access_identities', WebAclId=web_acl_id) + distribution_list = results.get('DistributionList', {'Items': []}) + + if len(distribution_list['Items']) > 0: distribution_list = distribution_list['Items'] + else: + return {} return self.keyed_list_helper(distribution_list) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") def list_invalidations(self, distribution_id): try: - func = partial(self.client.list_invalidations, DistributionId=distribution_id) - invalidation_list = self.paginated_response(func, 'InvalidationList') - if invalidation_list['Quantity'] > 0: + results = self._paginated_result('list_invalidations', DistributionId=distribution_id) + invalidation_list = results.get('InvalidationList', {'Items': []}) + + if len(invalidation_list['Items']) > 0: return invalidation_list['Items'] return {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -379,12 +392,14 @@ def list_invalidations(self, distribution_id): def list_streaming_distributions(self, keyed=True): try: - func = partial(self.client.list_streaming_distributions) - streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList') - if streaming_distribution_list['Quantity'] == 0: - return {} - else: + results = self._paginated_result('list_streaming_distributions') + streaming_distribution_list = results.get('StreamingDistributionList', {'Items': []}) + + if len(streaming_distribution_list['Items']) > 0: streaming_distribution_list = streaming_distribution_list['Items'] + else: + return {} + if not keyed: return streaming_distribution_list return self.keyed_list_helper(streaming_distribution_list) @@ -484,30 +499,6 @@ def get_aliases_from_distribution_id(self, distribution_id): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") - def paginated_response(self, func, result_key=""): - ''' - Returns expanded response for paginated operations. - The 'result_key' is used to define the concatenated results that are combined from each paginated response. - ''' - args = dict() - results = dict() - loop = True - while loop: - response = func(**args) - if result_key == "": - result = response - result.pop('ResponseMetadata', None) - else: - result = response.get(result_key) - results.update(result) - args['Marker'] = response.get('NextMarker') - for key in response.keys(): - if key.endswith('List'): - args['Marker'] = response[key].get('NextMarker') - break - loop = args['Marker'] is not None - return results - def keyed_list_helper(self, list_to_key): keyed_list = dict() for item in list_to_key: From 10348854861f6d10e154a3ead496d5091a506049 Mon Sep 17 00:00:00 2001 From: Khanh Ngo Date: Wed, 27 Oct 2021 07:07:24 +0200 Subject: [PATCH 334/683] Add tag query to lambda_info module (#375) Add tag query to lambda_info module SUMMARY This PR to add tags query into the lambda_info module. It is helpful to be able to query the Lambda function's tag list so we can integrate future actions. This PR also fix some missing parameter ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME lambda_info ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Jill R Reviewed-by: None --- lambda_info.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/lambda_info.py b/lambda_info.py index 04cae251be6..c76ecba3d1e 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -21,7 +21,7 @@ query: description: - Specifies the resource type for which to gather information. Leave blank to retrieve all information. - choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] + choices: [ "aliases", "all", "config", "mappings", "policy", "versions", "tags" ] default: "all" type: str function_name: @@ -164,6 +164,7 @@ def all_details(client, module): lambda_info[function_name].update(policy_details(client, module)[function_name]) lambda_info[function_name].update(version_details(client, module)[function_name]) lambda_info[function_name].update(mapping_details(client, module)[function_name]) + lambda_info[function_name].update(tags_details(client, module)[function_name]) else: lambda_info.update(config_details(client, module)) @@ -199,6 +200,7 @@ def config_details(client, module): functions = dict() for func in lambda_info.pop('function_list', []): + func['tags'] = client.get_function(FunctionName=func['FunctionName']).get('Tags', {}) functions[func['FunctionName']] = camel_dict_to_snake_dict(func) return functions @@ -288,6 +290,31 @@ def version_details(client, module): return {function_name: camel_dict_to_snake_dict(lambda_info)} +def tags_details(client, module): + """ + Returns tag details for one or all lambda functions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_info = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) + else: + module.fail_json(msg='Parameter function_name required for query=tags.') + + return {function_name: camel_dict_to_snake_dict(lambda_info)} + + def main(): """ Main entry point. @@ -296,8 +323,8 @@ def main(): """ argument_spec = dict( function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), - event_source_arn=dict(required=False, default=None) + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default='all'), + event_source_arn=dict(required=False, default=None), ) module = AnsibleAWSModule( @@ -326,6 +353,7 @@ def main(): mappings='mapping_details', policy='policy_details', versions='version_details', + tags='tags_details', ) this_module_function = globals()[invocations[module.params['query']]] From 0aca75e0a789bee432f5a827f4e2de7c50561dfa Mon Sep 17 00:00:00 2001 From: Berend de Boer Date: Wed, 27 Oct 2021 18:50:00 +1300 Subject: [PATCH 335/683] efs - Add support to specify the number of days to transition to inactive storage (#522) efs - Add support to specify the number of days to transition to inactive storage SUMMARY Support setting EFS lifecycle policy. ISSUE TYPE Feature Pull Request COMPONENT NAME community.aws.efs ADDITIONAL INFORMATION This is now possible: - name: Set a lifecycle policy community.aws.efs: state: present name: myTestEFS transition_to_ia: 7 targets: - subnet_id: subnet-7654fdca security_groups: [ "sg-4c5d6f7a" ] - name: Remove a lifecycle policy community.aws.efs: state: present name: myTestEFS transition_to_ia: None targets: - subnet_id: subnet-7654fdca security_groups: [ "sg-4c5d6f7a" ] Reviewed-by: Mark Chappell Reviewed-by: Jill R Reviewed-by: None --- efs.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/efs.py b/efs.py index 0cf4f88c1d7..a67c83be3c7 100644 --- a/efs.py +++ b/efs.py @@ -96,6 +96,15 @@ - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary. default: 0 type: int + transition_to_ia: + description: + - How many days before objects transition to the lower-cost EFS Infrequent Access (IA) storage class. + - If set to the string C(None), any existing lifecyle policy will be removed, and objects will not transition + to an IA storage class. + - If this parameter is absent, any existing lifecycle policy will not be affected. + choices: ['None', '7', '14', '30', '60', '90'] + type: str + version_added: 2.1.0 extends_documentation_fragment: - amazon.aws.aws @@ -125,6 +134,24 @@ - subnet_id: subnet-7654fdca security_groups: [ "sg-4c5d6f7a" ] +- name: Set a lifecycle policy + community.aws.efs: + state: present + name: myTestEFS + transition_to_ia: 7 + targets: + - subnet_id: subnet-7654fdca + security_groups: [ "sg-4c5d6f7a" ] + +- name: Remove a lifecycle policy + community.aws.efs: + state: present + name: myTestEFS + transition_to_ia: None + targets: + - subnet_id: subnet-7654fdca + security_groups: [ "sg-4c5d6f7a" ] + - name: Deleting EFS community.aws.efs: state: absent @@ -459,6 +486,27 @@ def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mi self.module.fail_json_aws(e, msg="Unable to update file system.") return changed + def update_lifecycle_policy(self, name, transition_to_ia): + """ + Update filesystem with new lifecycle policy. + """ + changed = False + state = self.get_file_system_state(name) + if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: + fs_id = self.get_file_system_id(name) + current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id) + if transition_to_ia == 'None': + LifecyclePolicies = [] + else: + LifecyclePolicies = [{'TransitionToIA': 'AFTER_' + transition_to_ia + '_DAYS'}] + if current_policies.get('LifecyclePolicies') != LifecyclePolicies: + response = self.connection.put_lifecycle_configuration( + FileSystemId=fs_id, + LifecyclePolicies=LifecyclePolicies, + ) + changed = True + return changed + def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps): """ Change attributes (mount targets and tags) of filesystem by name @@ -680,6 +728,7 @@ def main(): tags=dict(required=False, type="dict", default={}), targets=dict(required=False, type="list", default=[], elements='dict'), performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), + transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None), throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None), provisioned_throughput_in_mibps=dict(required=False, type='float'), wait=dict(required=False, type="bool", default=False), @@ -707,6 +756,7 @@ def main(): kms_key_id = module.params.get('kms_key_id') performance_mode = performance_mode_translations[module.params.get('performance_mode')] purge_tags = module.params.get('purge_tags') + transition_to_ia = module.params.get('transition_to_ia') throughput_mode = module.params.get('throughput_mode') provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps') state = str(module.params.get('state')).lower() @@ -720,6 +770,8 @@ def main(): changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets, throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed + if transition_to_ia: + changed |= connection.update_lifecycle_policy(name, transition_to_ia) result = first_or_default(connection.get_file_systems(CreationToken=name)) elif state == 'absent': From d37b8c53f0c2c317480434a8f38abf95a44b3ac3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 27 Oct 2021 12:03:05 +0200 Subject: [PATCH 336/683] elb_instance - boto3 migration (#773) elb_instance - boto3 migration SUMMARY Migrate elb_instance to boto3 ISSUE TYPE Feature Pull Request COMPONENT NAME elb_instance ADDITIONAL INFORMATION Depends-On: ansible/ansible-zuul-jobs#1200 Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: None --- elb_instance.py | 365 ++++++++++++++++++++++++++---------------------- 1 file changed, 197 insertions(+), 168 deletions(-) diff --git a/elb_instance.py b/elb_instance.py index b234031ee24..6116207866b 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -14,8 +14,6 @@ description: - This module de-registers or registers an AWS EC2 instance from the ELBs that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. author: "John Jarvis (@jarv)" options: @@ -27,13 +25,13 @@ type: str instance_id: description: - - EC2 Instance ID + - EC2 Instance ID. required: true type: str ec2_elbs: description: - - List of ELB names, required for registration. - - The ec2_elbs fact should be used if there was a previous de-register. + - List of ELB names + - Required when I(state=present). type: list elements: str enable_availability_zone: @@ -56,11 +54,12 @@ - Ignored when I(wait=no). default: 0 type: int +notes: +- The ec2_elb fact currently set by this module has been deprecated and will no + longer be set after release 4.0.0 of the collection. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 -requirements: -- boto >= 2.49.0 ''' EXAMPLES = r""" @@ -70,6 +69,7 @@ community.aws.elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" state: absent + register: deregister_instances delegate_to: localhost roles: - myrole @@ -77,90 +77,120 @@ - name: Instance Register community.aws.elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" + ec2_elbs: "{{ deregister_instances.updated_elbs }}" state: present delegate_to: localhost - loop: "{{ ec2_elbs }}" """ -import time +RETURN = ''' +updated_elbs: + description: A list of ELB names that the instance has been added to or removed from. + returned: always + type: list + elements: str +''' try: - import boto - import boto.ec2 - import boto.ec2.autoscale - import boto.ec2.elb + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class ElbManager: """Handles EC2 instance ELB registration and de-registration""" - def __init__(self, module, instance_id=None, ec2_elbs=None, - region=None, **aws_connect_params): + def __init__(self, module, instance_id=None, ec2_elbs=None): + retry_decorator = AWSRetry.jittered_backoff() self.module = module + self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator) + self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator) + self.client_elb = module.client('elb', retry_decorator=retry_decorator) self.instance_id = instance_id - self.region = region - self.aws_connect_params = aws_connect_params self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False + self.updated_elbs = set() def deregister(self, wait, timeout): """De-register the instance from all ELBs and wait for the ELB to report it out-of-service""" for lb in self.lbs: - initial_state = self._get_instance_health(lb) - if initial_state is None: - # Instance isn't registered with this load - # balancer. Ignore it and try the next one. + instance_ids = [i['InstanceId'] for i in lb['Instances']] + if self.instance_id not in instance_ids: continue - # The instance is not associated with any load balancer so nothing to do - if not self._get_instance_lbs(): - return + self.updated_elbs.add(lb['LoadBalancerName']) + + if self.module.check_mode: + self.changed = True + continue - lb.deregister_instances([self.instance_id]) + try: + self.client_elb.deregister_instances_from_load_balancer( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{"InstanceId": self.instance_id}], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer', + load_balancer=lb, instance=self.instance_id) # The ELB is changing state in some way. Either an instance that's # InService is moving to OutOfService, or an instance that's # already OutOfService is being deregistered. self.changed = True - if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) + for lb in self.lbs: + self._await_elb_instance_state(lb, 'Deregistered', timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: - initial_state = self._get_instance_health(lb) + instance_ids = [i['InstanceId'] for i in lb['Instances']] + if self.instance_id in instance_ids: + continue - if enable_availability_zone: - self._enable_availailability_zone(lb) + self.updated_elbs.add(lb['LoadBalancerName']) - lb.register_instances([self.instance_id]) + if enable_availability_zone: + self.changed |= self._enable_availailability_zone(lb) - if wait: - self._await_elb_instance_state(lb, 'InService', initial_state, timeout) - else: - # We cannot assume no change was made if we don't wait - # to find out + if self.module.check_mode: self.changed = True + continue + + try: + self.client_elb.register_instances_with_load_balancer( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{"InstanceId": self.instance_id}], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to register instance with load balancer', + load_balancer=lb, instance=self.instance_id) + + self.changed = True + + for lb in self.lbs: + self._await_elb_instance_state(lb, 'InService', timeout) + + @AWSRetry.jittered_backoff() + def _describe_elbs(self, **params): + paginator = self.client_elb.get_paginator('describe_load_balancers') + results = paginator.paginate(**params).build_full_result() + return results['LoadBalancerDescriptions'] def exists(self, lbtest): """ Verify that the named ELB actually exists """ found = False for lb in self.lbs: - if lb.name == lbtest: + if lb['LoadBalancerName'] == lbtest: found = True break return found @@ -170,63 +200,59 @@ def _enable_availailability_zone(self, lb): Returns True if the zone was enabled or False if no change was made. lb: load balancer""" instance = self._get_instance() - if instance.placement in lb.availability_zones: - return False + desired_zone = instance['Placement']['AvailabilityZone'] - lb.enable_zones(zones=instance.placement) - - # If successful, the new zone will have been added to - # lb.availability_zones - return instance.placement in lb.availability_zones - - def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" + if desired_zone in lb['AvailabilityZones']: + return False - wait_timeout = time.time() + timeout - while True: - instance_state = self._get_instance_health(lb) + if self.module.check_mode: + return True - if not instance_state: - msg = ("The instance %s could not be put in service on %s." - " Reason: Invalid Instance") - self.module.fail_json(msg=msg % (self.instance_id, lb)) + try: + self.client_elb.enable_availability_zones_for_load_balancer( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + AvailabilityZones=[desired_zone], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers', + load_balancer=lb, zone=desired_zone) + + return True + + def _await_elb_instance_state(self, lb, awaited_state, timeout): + """Wait for an ELB to change state""" + if self.module.check_mode: + return + + initial_state = self._get_instance_health(lb) + + if awaited_state == initial_state: + return + + if awaited_state == 'InService': + waiter = self.client_elb.get_waiter('instance_in_service') + elif awaited_state == 'Deregistered': + waiter = self.client_elb.get_waiter('instance_deregistered') + elif awaited_state == 'OutOfService': + waiter = self.client_elb.get_waiter('instance_deregistered') + else: + self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state) - if instance_state.state == awaited_state: - # Check the current state against the initial state, and only set - # changed if they are different. - if (initial_state is None) or (instance_state.state != initial_state.state): - self.changed = True - break - elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks and continue waiting - pass - elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance" - and time.time() >= wait_timeout): - # If the reason_code for the instance being out of service is - # "Instance" this indicates a failure state, e.g. the instance - # has failed a health check or the ELB does not have the - # instance's availability zone enabled. The exact reason why is - # described in InstantState.description. - msg = ("The instance %s could not be put in service on %s." - " Reason: %s") - self.module.fail_json(msg=msg % (self.instance_id, - lb, - instance_state.description)) - time.sleep(1) - - def _is_instance_state_pending(self, instance_state): - """ - Determines whether the instance_state is "pending", meaning there is - an operation under way to bring it in service. - """ - # This is messy, because AWS provides no way to distinguish between - # an instance that is is OutOfService because it's pending vs. OutOfService - # because it's failing health checks. So we're forced to analyze the - # description, which is likely to be brittle. - return (instance_state and 'pending' in instance_state.description) + try: + waiter.wait( + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{"InstanceId": self.instance_id}], + WaiterConfig={'Delay': 1, 'MaxAttempts': timeout}, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state', + awaited_state=awaited_state) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state', + awaited_state=awaited_state) + + return def _get_instance_health(self, lb): """ @@ -234,13 +260,20 @@ def _get_instance_health(self, lb): certain error conditions. """ try: - status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError as e: - if e.error_code == 'InvalidInstance': - return None - else: - raise - return status + status = self.client_elb.describe_instance_health( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{'InstanceId': self.instance_id}], + )['InstanceStates'] + except is_boto3_error_code('InvalidInstance'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg='Failed to get instance health') + + if not status: + return None + + return status[0]['State'] def _get_instance_lbs(self, ec2_elbs=None): """Returns a list of ELBs attached to self.instance_id @@ -248,36 +281,29 @@ def _get_instance_lbs(self, ec2_elbs=None): for elb lookup instead of returning what elbs are attached to self.instance_id""" + list_params = dict() if not ec2_elbs: ec2_elbs = self._get_auto_scaling_group_lbs() - try: - elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) + if ec2_elbs: + list_params['LoadBalancerNames'] = ec2_elbs - elbs = [] - marker = None - while True: - try: - newelbs = elb.get_all_load_balancers(marker=marker) - marker = newelbs.next_marker - elbs.extend(newelbs) - if not marker: - break - except TypeError: - # Older version of boto do not allow for params - elbs = elb.get_all_load_balancers() - break + try: + elbs = self._describe_elbs(**list_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to describe load balancers') if ec2_elbs: - lbs = sorted([lb for lb in elbs if lb.name in ec2_elbs], key=lambda lb: lb.__repr__()) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) + return elbs + + # If ec2_elbs wasn't specified, then filter out LBs we're not a member + # of. + lbs = [] + for lb in elbs: + instance_ids = [i['InstanceId'] for i in lb['Instances']] + if self.instance_id in instance_ids: + lbs.append(lb) + return lbs def _get_auto_scaling_group_lbs(self): @@ -285,34 +311,42 @@ def _get_auto_scaling_group_lbs(self): indirectly through its auto scaling group membership""" try: - asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) + asg_instances = self.client_asg.describe_auto_scaling_instances( + aws_retry=True, + InstanceIds=[self.instance_id])['AutoScalingInstances'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) if len(asg_instances) > 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") if not asg_instances: - asg_elbs = [] - else: - asg_name = asg_instances[0].group_name + # Instance isn't a member of an ASG + return [] + + asg_name = asg_instances[0]['AutoScalingGroupName'] - asgs = asg.get_all_groups([asg_name]) - if len(asg_instances) != 1: - self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + try: + asg_instances = self.client_asg.describe_auto_scaling_groups( + aws_retry=True, + AutoScalingGroupNames=[asg_name])['AutoScalingGroups'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - asg_elbs = asgs[0].load_balancers + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - return asg_elbs + return asg_instances[0]['LoadBalancerNames'] def _get_instance(self): - """Returns a boto.ec2.InstanceObject for self.instance_id""" + """Returns the description of an instance""" try: - ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) - return ec2.get_only_instances(instance_ids=[self.instance_id])[0] + result = self.client_ec2.describe_instances( + aws_retry=True, + InstanceIds=[self.instance_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + return result['Reservations'][0]['Instances'][0] def main(): @@ -324,48 +358,43 @@ def main(): wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'required': False, 'default': 0, 'type': 'int'}, ) + required_if = [ + ('state', 'present', ['ec2_elbs']), + ] module = AnsibleAWSModule( argument_spec=argument_spec, + required_if=required_if, supports_check_mode=True, - check_boto3=False, ) - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - ec2_elbs = module.params['ec2_elbs'] wait = module.params['wait'] enable_availability_zone = module.params['enable_availability_zone'] timeout = module.params['wait_timeout'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) + + elb_man = ElbManager(module, instance_id, ec2_elbs) if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): - msg = "ELB %s does not exist" % elb - module.fail_json(msg=msg) + module.fail_json(msg="ELB {0} does not exist".format(elb)) - if not module.check_mode: - if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': - elb_man.deregister(wait, timeout) + if module.params['state'] == 'present': + elb_man.register(wait, enable_availability_zone, timeout) + elif module.params['state'] == 'absent': + elb_man.deregister(wait, timeout) - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) + # XXX We're not an _fact module we shouldn't be returning a fact and poluting + # the namespace + ansible_facts = {'ec2_elbs': [lb['LoadBalancerName'] for lb in elb_man.lbs]} - module.exit_json(**ec2_facts_result) + module.exit_json( + changed=elb_man.changed, + ansible_facts=ansible_facts, + updated_elbs=list(elb_man.updated_elbs), + ) if __name__ == '__main__': From ef29d8ec46c98550c5ae8f189624c1fdf054c16f Mon Sep 17 00:00:00 2001 From: James McClune Date: Wed, 27 Oct 2021 13:57:26 -0400 Subject: [PATCH 337/683] reflected docs change in sqs_queue.py (#782) changed `kms_master_key_id` return value from `always` SUMMARY changed kms_master_key_id return value from always to if value exists. the boto3 response for sqs_queue should only return a kms_master_key_id if a value for that key exists ISSUE TYPE Docs Pull Request COMPONENT NAME sqs_queue Reviewed-by: None Reviewed-by: Mark Chappell --- sqs_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqs_queue.py b/sqs_queue.py index 45a8ccfc079..a7cd9b8ada3 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -119,7 +119,7 @@ kms_master_key_id: description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. type: str - returned: always + returned: if value exists sample: alias/MyAlias kms_data_key_reuse_period_seconds: description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. From 2a4aea7eab529a7fd1c8b995447a7014556976b3 Mon Sep 17 00:00:00 2001 From: James McClune Date: Wed, 27 Oct 2021 13:57:31 -0400 Subject: [PATCH 338/683] add `KmsMasterKeyId` as attribute option for boto3 call (#762) add `KmsMasterKeyId` as attribute option for boto3 call SUMMARY When creating a SQS queue, passing a value for KmsMasterKeyId does not enable SSE. This PR fixes how attributes, like KmsMasterKeyId, are passed into the boto3 invocation. Fixes: #698 ISSUE TYPE Bugfix Pull Request COMPONENT NAME sqs_queue.py Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: James McClune Reviewed-by: None --- sqs_queue.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/sqs_queue.py b/sqs_queue.py index a7cd9b8ada3..2a1de9d0bd0 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -290,6 +290,7 @@ def describe_queue(client, queue_url): def create_or_update_sqs_queue(client, module): is_fifo = (module.params.get('queue_type') == 'fifo') + kms_master_key_id = module.params.get('kms_master_key_id') queue_name = get_queue_name(module, is_fifo) result = dict( name=queue_name, @@ -300,8 +301,14 @@ def create_or_update_sqs_queue(client, module): queue_url = get_queue_url(client, queue_name) result['queue_url'] = queue_url + # Create a dict() to hold attributes that will be passed to boto3 + create_attributes = {} + if not queue_url: - create_attributes = {'FifoQueue': 'true'} if is_fifo else {} + if is_fifo: + create_attributes['FifoQueue'] = "True" + if kms_master_key_id: + create_attributes['KmsMasterKeyId'] = kms_master_key_id result['changed'] = True if module.check_mode: return result @@ -385,7 +392,7 @@ def update_sqs_queue(module, client, queue_url): if changed and not check_mode: client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) - return changed, existing_attributes.get('queue_arn'), + return changed, existing_attributes.get('queue_arn') def delete_sqs_queue(client, module): From 95cb587c47355d9cd75f0122bc3c8b1136233e32 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 28 Oct 2021 12:58:02 +0200 Subject: [PATCH 339/683] Rename rds_snapshot to rds_instance_snapshot (#783) Rename rds_snapshot to rds_instance_snapshot SUMMARY Rename rds_snapshot to rds_instance_snapshot since rds_snapshot only handles snapshotting of DB instances. A new module for snapshotting RDS clusters will be added in a future PR. ISSUE TYPE New Module Pull Request COMPONENT NAME rds_snapshot Reviewed-by: Mark Chappell Reviewed-by: None --- rds_snapshot.py => rds_instance_snapshot.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename rds_snapshot.py => rds_instance_snapshot.py (99%) diff --git a/rds_snapshot.py b/rds_instance_snapshot.py similarity index 99% rename from rds_snapshot.py rename to rds_instance_snapshot.py index 0ca957c1b68..45915082739 100644 --- a/rds_snapshot.py +++ b/rds_instance_snapshot.py @@ -10,7 +10,7 @@ DOCUMENTATION = ''' --- -module: rds_snapshot +module: rds_instance_snapshot version_added: 1.0.0 short_description: manage Amazon RDS snapshots. description: @@ -66,12 +66,12 @@ EXAMPLES = ''' - name: Create snapshot - community.aws.rds_snapshot: + community.aws.rds_instance_snapshot: db_instance_identifier: new-database db_snapshot_identifier: new-database-snapshot - name: Delete snapshot - community.aws.rds_snapshot: + community.aws.rds_instance_snapshot: db_snapshot_identifier: new-database-snapshot state: absent ''' From 87db3384473916126f3102922251a9186d7b07ea Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 10 Nov 2021 08:10:31 +0100 Subject: [PATCH 340/683] Fix docs issues. (#797) Fix docs issues. SUMMARY Fix some docs issues exhibited by ansible/ansible#76262. ISSUE TYPE Docs Pull Request COMPONENT NAME some modules Reviewed-by: Brian Scholer Reviewed-by: Mark Chappell Reviewed-by: None --- ec2_elb_info.py | 2 +- iam.py | 4 +++- rds.py | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ec2_elb_info.py b/ec2_elb_info.py index 8b207111b60..ab74e11e0d6 100644 --- a/ec2_elb_info.py +++ b/ec2_elb_info.py @@ -24,7 +24,7 @@ deprecated: removed_in: 3.0.0 why: The ec2_elb_info is based upon a deprecated version of the AWS SDK. - alternative: Use M(elb_classic_lb_info). + alternative: Use M(community.aws.elb_classic_lb_info). short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - Gather information about EC2 Elastic Load Balancers in AWS diff --git a/iam.py b/iam.py index 4dd11aa0672..52aca2650ab 100644 --- a/iam.py +++ b/iam.py @@ -13,7 +13,9 @@ deprecated: removed_in: 3.0.0 why: The iam module is based upon a deprecated version of the AWS SDK. - alternative: Use M(iam_user), M(iam_group), M(iam_role), M(iam_policy) and M(iam_managed_policy) modules. + alternative: >- + Use M(community.aws.iam_user), M(community.aws.iam_group), M(community.aws.iam_role), M(community.aws.iam_policy) + and M(community.aws.iam_managed_policy) modules. short_description: Manage IAM users, groups, roles and keys description: diff --git a/rds.py b/rds.py index bfbf0019f6b..08e158c9395 100644 --- a/rds.py +++ b/rds.py @@ -13,7 +13,7 @@ deprecated: removed_in: 3.0.0 why: The rds module is based upon a deprecated version of the AWS SDK. - alternative: Use M(rds_instance), M(rds_instance_info), and M(rds_snapshot). + alternative: Use M(community.aws.rds_instance), M(community.aws.rds_instance_info), and M(community.aws.rds_instance_snapshot). short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts description: - Creates, deletes, or modifies rds resources. From d969b3759abd6b59ad79dcdc30e30da486d359d2 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Fri, 12 Nov 2021 20:31:02 +0100 Subject: [PATCH 341/683] route53: fix empty result set (#799) route53: fix empty result set SUMMARY Closes: #798 Using state: get on none existing records results in an error. ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53 ADDITIONAL INFORMATION --- - hosts: localhost connection: local tasks: - community.aws.route53: state: get zone: xn--mitlinuxwrdasnichtpassiert-ohc.de record: doesnotexist.xn--mitlinuxwrdasnichtpassiert-ohc.de. type: A register: test - debug: var: test results in TASK [community.aws.route53] ******************************************************************************************************************************************************************************************************** task path: /home/m/git/lekker/iac/798.yml:6 <127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: m <127.0.0.1> EXEC /bin/sh -c 'echo ~m && sleep 0' <127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /home/m/.ansible/tmp `"&& mkdir "` echo /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765 `" && echo ansible-tmp-1636703337.6553104-21549-57181038135765="` echo /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765 `" ) && sleep 0' Using module file /home/m/.local/lib/python3.9/site-packages/ansible_collections/community/aws/plugins/modules/route53.py <127.0.0.1> PUT /home/m/.ansible/tmp/ansible-local-21466oog6jc4o/tmpeo_xv0k3 TO /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py <127.0.0.1> EXEC /bin/sh -c 'chmod u+x /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/ /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py && sleep 0' <127.0.0.1> EXEC /bin/sh -c '/usr/bin/python /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py && sleep 0' <127.0.0.1> EXEC /bin/sh -c 'rm -f -r /home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/ > /dev/null 2>&1 && sleep 0' The full traceback is: Traceback (most recent call last): File "/home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py", line 100, in _ansiballz_main() File "/home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py", line 92, in _ansiballz_main invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS) File "/home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py", line 40, in invoke_module runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.route53', init_globals=dict(_module_fqn='ansible_collections.community.aws.plugins.modules.route53', _modlib_path=modlib_path), File "/usr/lib/python3.9/runpy.py", line 210, in run_module return _run_module_code(code, init_globals, run_name, mod_spec) File "/usr/lib/python3.9/runpy.py", line 97, in _run_module_code _run_code(code, mod_globals, init_globals, File "/usr/lib/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/tmp/ansible_community.aws.route53_payload_aekaeeo1/ansible_community.aws.route53_payload.zip/ansible_collections/community/aws/plugins/modules/route53.py", line 698, in File "/tmp/ansible_community.aws.route53_payload_aekaeeo1/ansible_community.aws.route53_payload.zip/ansible_collections/community/aws/plugins/modules/route53.py", line 636, in main File "/tmp/ansible_community.aws.route53_payload_aekaeeo1/ansible_community.aws.route53_payload.zip/ansible/module_utils/common/dict_transformations.py", line 42, in camel_dict_to_snake_dict AttributeError: 'NoneType' object has no attribute 'items' fatal: [localhost]: FAILED! => { "changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py\", line 100, in \n _ansiballz_main()\n File \"/home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py\", line 92, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/home/m/.ansible/tmp/ansible-tmp-1636703337.6553104-21549-57181038135765/AnsiballZ_route53.py\", line 40, in invoke_module\n runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.route53', init_globals=dict(_module_fqn='ansible_collections.community.aws.plugins.modules.route53', _modlib_path=modlib_path),\n File \"/usr/lib/python3.9/runpy.py\", line 210, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/lib/python3.9/runpy.py\", line 97, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/usr/lib/python3.9/runpy.py\", line 87, in _run_code\n exec(code, run_globals)\n File \"/tmp/ansible_community.aws.route53_payload_aekaeeo1/ansible_community.aws.route53_payload.zip/ansible_collections/community/aws/plugins/modules/route53.py\", line 698, in \n File \"/tmp/ansible_community.aws.route53_payload_aekaeeo1/ansible_community.aws.route53_payload.zip/ansible_collections/community/aws/plugins/modules/route53.py\", line 636, in main\n File \"/tmp/ansible_community.aws.route53_payload_aekaeeo1/ansible_community.aws.route53_payload.zip/ansible/module_utils/common/dict_transformations.py\", line 42, in camel_dict_to_snake_dict\nAttributeError: 'NoneType' object has no attribute 'items'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } Reviewed-by: Mark Chappell Reviewed-by: None --- route53.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/route53.py b/route53.py index 964020257db..c60886d3c99 100644 --- a/route53.py +++ b/route53.py @@ -637,6 +637,11 @@ def main(): ns = get_hosted_zone_nameservers(route53, zone_id) formatted_aws = format_record(aws_record, zone_in, zone_id) + + if formatted_aws is None: + # record does not exist + module.exit_json(changed=False, set=[], nameservers=ns, resource_record_sets=[]) + rr_sets = [camel_dict_to_snake_dict(aws_record)] module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets) From 59926c5185987dee4d6b0d10b595896f26c3d754 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Fri, 12 Nov 2021 21:01:50 +0100 Subject: [PATCH 342/683] fix diff mode (#802) Route53: fix diff mode when state: absent SUMMARY Fix diff mode when state: absend ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53 Reviewed-by: Felix Fontein Reviewed-by: None --- route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route53.py b/route53.py index c60886d3c99..17ed261aa14 100644 --- a/route53.py +++ b/route53.py @@ -697,7 +697,7 @@ def main(): changed=True, diff=dict( before=formatted_aws, - after=formatted_record if command != 'delete' else {}, + after=formatted_record if command_in != 'delete' else {}, resource_record_sets=rr_sets, ), ) From 88694a99a06700db162a0719da6ea6e69e9ebdb3 Mon Sep 17 00:00:00 2001 From: James McClune Date: Fri, 19 Nov 2021 13:42:59 -0500 Subject: [PATCH 343/683] added clarification on SSE and `kms_master_key_id` (#785) added clarification on SSE and `kms_master_key_id` SUMMARY Added note about kms_master_key_id and how SSE is enabled automatically. ISSUE TYPE Docs Pull Request COMPONENT NAME sqs_queue Reviewed-by: Mark Chappell Reviewed-by: James McClune --- sqs_queue.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sqs_queue.py b/sqs_queue.py index 2a1de9d0bd0..79e19cbda9d 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -73,6 +73,7 @@ kms_master_key_id: description: - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. + - Specifying a valid I(kms_master_key_id) will enable encryption automatically. type: str kms_data_key_reuse_period_seconds: description: From da2f9802f8566e820ac41074b74b7f256bf39d11 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Mon, 22 Nov 2021 17:37:43 +0100 Subject: [PATCH 344/683] fix delete records without TTL (#801) fix delete records without TTL SUMMARY Closes #800 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53 Reviewed-by: Mark Chappell Reviewed-by: Felix Fontein Reviewed-by: Tiger Kaovilai Reviewed-by: Markus Bergholz Reviewed-by: None --- route53.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/route53.py b/route53.py index 17ed261aa14..4275d65b684 100644 --- a/route53.py +++ b/route53.py @@ -69,7 +69,6 @@ value: description: - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. - - When deleting a record all values for the record must be specified or Route 53 will not delete it. type: list elements: str overwrite: @@ -513,8 +512,6 @@ def main(): required_if=( ('state', 'present', ['value']), ('state', 'create', ['value']), - ('state', 'absent', ['value']), - ('state', 'delete', ['value']), ), # failover, region and weight are mutually exclusive mutually_exclusive=[ @@ -607,6 +604,10 @@ def main(): 'HealthCheckId': health_check_in, 'SetIdentifier': identifier_in, }) + if command_in == 'delete' and aws_record is not None: + resource_record_set['TTL'] = aws_record.get('TTL') + if not resource_record_set['ResourceRecords']: + resource_record_set['ResourceRecords'] = aws_record.get('ResourceRecords') if alias_in: resource_record_set['AliasTarget'] = dict( From 1f93316b24d461e2f154d5fcbfad8bd33e81ea33 Mon Sep 17 00:00:00 2001 From: Ivan Chekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Thu, 25 Nov 2021 15:27:46 -0500 Subject: [PATCH 345/683] Add Python and Glue version parameters, add check mode (#480) Add Python and Glue version parameters, add check mode SUMMARY Add parameters for Python version and Glue version. Available Python and Glue version can be found here: https://docs.aws.amazon.com/glue/latest/dg/add-job.html ISSUE TYPE Feature Pull Request COMPONENT NAME aws_glue_job ADDITIONAL INFORMATION Example: community.aws.aws_glue_job: - name: my-job description: My test job command_script_location: my-s3-bucket/script.py command_python_version: 3 glue_version: "2.0" role: MyGlueJobRole state: present Reviewed-by: Mark Chappell Reviewed-by: Ivan Chekaldin Reviewed-by: Jill R Reviewed-by: Alina Buzachis Reviewed-by: None --- aws_glue_job.py | 151 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 121 insertions(+), 30 deletions(-) diff --git a/aws_glue_job.py b/aws_glue_job.py index dac91ecc794..edca5d051d5 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -28,6 +28,12 @@ - The name of the job command. This must be 'glueetl'. default: glueetl type: str + command_python_version: + description: + - Python version being used to execute a Python shell job. + - AWS currently supports C('2') or C('3'). + type: str + version_added: 2.2.0 command_script_location: description: - The S3 path to a script that executes a job. @@ -47,6 +53,11 @@ description: - Description of the job being defined. type: str + glue_version: + description: + - Glue version determines the versions of Apache Spark and Python that AWS Glue supports. + type: str + version_added: 1.5.0 max_concurrent_runs: description: - The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when @@ -61,6 +72,18 @@ - The name you assign to this job definition. It must be unique in your account. required: true type: str + number_of_workers: + description: + - The number of workers of a defined workerType that are allocated when a job runs. + type: int + version_added: 1.5.0 + purge_tags: + description: + - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified. + default: true + type: bool + version_added: 2.2.0 role: description: - The name or ARN of the IAM role associated with this job. @@ -72,26 +95,22 @@ required: true choices: [ 'present', 'absent' ] type: str + tags: + description: + - A hash/dictionary of tags to be applied to the job. + - Remove completely or specify an empty dictionary to remove all tags. + type: dict + version_added: 2.2.0 timeout: description: - The job timeout in minutes. type: int - glue_version: - description: - - Glue version determines the versions of Apache Spark and Python that AWS Glue supports. - type: str - version_added: 1.5.0 worker_type: description: - The type of predefined worker that is allocated when a job runs. choices: [ 'Standard', 'G.1X', 'G.2X' ] type: str version_added: 1.5.0 - number_of_workers: - description: - - The number of workers of a defined workerType that are allocated when a job runs. - type: int - version_added: 1.5.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -103,7 +122,10 @@ # Create an AWS Glue job - community.aws.aws_glue_job: - command_script_location: s3bucket/script.py + command_script_location: "s3://s3bucket/script.py" + default_arguments: + "--extra-py-files": s3://s3bucket/script-package.zip + "--TempDir": "s3://s3bucket/temp/" name: my-glue-job role: my-iam-role state: present @@ -138,6 +160,11 @@ returned: when state is present type: str sample: mybucket/myscript.py + python_version: + description: Specifies the Python version. + returned: when state is present + type: str + sample: 3 connections: description: The connections used for this job. returned: when state is present @@ -158,6 +185,11 @@ returned: when state is present type: str sample: My first Glue job +glue_version: + description: Glue version. + returned: when state is present + type: str + sample: 2.0 job_name: description: The name of the AWS Glue job. returned: always @@ -213,6 +245,11 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info def _get_glue_job(connection, module, glue_job_name): @@ -224,9 +261,8 @@ def _get_glue_job(connection, module, glue_job_name): :param glue_job_name: Name of Glue job to get :return: boto3 Glue job dict or None if not found """ - try: - return connection.get_job(JobName=glue_job_name)['Job'] + return connection.get_job(aws_retry=True, JobName=glue_job_name)['Job'] except is_boto3_error_code('EntityNotFoundException'): return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -241,7 +277,6 @@ def _compare_glue_job_params(user_params, current_params): :param current_params: the Glue job parameters currently configured :return: True if any parameter is mismatched else False """ - # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value @@ -252,18 +287,25 @@ def _compare_glue_job_params(user_params, current_params): if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']: return True - if 'Command' in user_params and user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']: - return True - if 'Connections' in user_params and set(user_params['Connections']) != set(current_params['Connections']): + if 'Command' in user_params: + if user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']: + return True + if user_params['Command']['PythonVersion'] != current_params['Command']['PythonVersion']: + return True + if 'Connections' in user_params and user_params['Connections'] != current_params['Connections']: return True - if 'DefaultArguments' in user_params and set(user_params['DefaultArguments']) != set(current_params['DefaultArguments']): + if 'DefaultArguments' in user_params and user_params['DefaultArguments'] != current_params['DefaultArguments']: return True if 'Description' in user_params and user_params['Description'] != current_params['Description']: return True if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']: return True + if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + return True if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']: return True + if 'Role' in user_params and user_params['Role'] != current_params['Role']: + return True if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']: return True if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: @@ -276,6 +318,44 @@ def _compare_glue_job_params(user_params, current_params): return False +def ensure_tags(connection, module, glue_job): + changed = False + + if module.params.get('tags') is None: + return False + + account_id, partition = get_aws_account_info(module) + arn = 'arn:{0}:glue:{1}:{2}:job/{3}'.format(partition, module.region, account_id, module.params.get('name')) + + try: + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + existing_tags = {} + else: + module.fail_json_aws(e, msg='Unable to get tags for Glue job %s' % module.params.get('name')) + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + + if tags_to_remove: + changed = True + if not module.check_mode: + try: + connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + + if tags_to_add: + changed = True + if not module.check_mode: + try: + connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + + return changed + + def create_or_update_glue_job(connection, module, glue_job): """ Create or update an AWS Glue job @@ -294,12 +374,16 @@ def create_or_update_glue_job(connection, module, glue_job): params['AllocatedCapacity'] = module.params.get("allocated_capacity") if module.params.get("command_script_location") is not None: params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")} + if module.params.get("command_python_version") is not None: + params['Command']['PythonVersion'] = module.params.get("command_python_version") if module.params.get("connections") is not None: params['Connections'] = {'Connections': module.params.get("connections")} if module.params.get("default_arguments") is not None: params['DefaultArguments'] = module.params.get("default_arguments") if module.params.get("description") is not None: params['Description'] = module.params.get("description") + if module.params.get("glue_version") is not None: + params['GlueVersion'] = module.params.get("glue_version") if module.params.get("max_concurrent_runs") is not None: params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")} if module.params.get("max_retries") is not None: @@ -320,22 +404,24 @@ def create_or_update_glue_job(connection, module, glue_job): # Update job needs slightly modified params update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)} del update_params['JobUpdate']['Name'] - connection.update_job(**update_params) + if not module.check_mode: + connection.update_job(aws_retry=True, **update_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) else: try: - connection.create_job(**params) + if not module.check_mode: + connection.create_job(aws_retry=True, **params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - # If changed, get the Glue job again - if changed: - glue_job = _get_glue_job(connection, module, params['Name']) + glue_job = _get_glue_job(connection, module, params['Name']) + + changed |= ensure_tags(connection, module, glue_job) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job)) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=['DefaultArguments'])) def delete_glue_job(connection, module, glue_job): @@ -347,12 +433,12 @@ def delete_glue_job(connection, module, glue_job): :param glue_job: a dict of AWS Glue job parameters or None :return: """ - changed = False if glue_job: try: - connection.delete_job(JobName=glue_job['Name']) + if not module.check_mode: + connection.delete_job(aws_retry=True, JobName=glue_job['Name']) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -366,29 +452,34 @@ def main(): dict( allocated_capacity=dict(type='int'), command_name=dict(type='str', default='glueetl'), + command_python_version=dict(type='str'), command_script_location=dict(type='str'), connections=dict(type='list', elements='str'), default_arguments=dict(type='dict'), description=dict(type='str'), + glue_version=dict(type='str'), max_concurrent_runs=dict(type='int'), max_retries=dict(type='int'), name=dict(required=True, type='str'), + number_of_workers=dict(type='int'), + purge_tags=dict(type='bool', default=True), role=dict(type='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), + tags=dict(type='dict'), timeout=dict(type='int'), - glue_version=dict(type='str'), worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'), - number_of_workers=dict(type='int'), ) ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[ ('state', 'present', ['role', 'command_script_location']) - ] + ], + supports_check_mode=True ) - connection = module.client('glue') + retry_decorator = AWSRetry.jittered_backoff(retries=10) + connection = module.client('glue', retry_decorator=retry_decorator) state = module.params.get("state") From c54d2119415e2f6a2ce2b45510683afeef80fe04 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 26 Nov 2021 22:04:57 +0100 Subject: [PATCH 346/683] [3.0.0] Bump minimal botocore version to 1.19.0 (#809) [3.0.0] Bump minimal botocore version to 1.19.0 SUMMARY In preparation for release 3.0.0, bump the minimal botocore version ISSUE TYPE Feature Pull Request COMPONENT NAME README.md plugins/modules/aws_s3_bucket_info.py requirements.txt tests/integration/constraints.txt tests/integration/targets/setup_botocore_pip/defaults/main.yml tests/unit/constraints.txt ADDITIONAL INFORMATION Depends-On: ansible-collections/amazon.aws#574 Reviewed-by: None --- aws_s3_bucket_info.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index f5b9c44f04c..74b16271a8a 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -80,7 +80,6 @@ bucket_ownership_controls: description: - Retrive S3 ownership controls. - - Access to bucket ownership controls requires botocore>=1.18.11. type: bool default: False bucket_website: @@ -595,9 +594,6 @@ def main(): module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') - if module.params.get("bucket_ownership_controls"): - module.require_botocore_at_least('1.18.11', reason='to retreive bucket ownership controls') - # Get parameters name = module.params.get("name") name_filter = module.params.get("name_filter") From b06e75fbf398ec32a14a0362aa0d7e96163e6c0e Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Tue, 30 Nov 2021 18:09:50 +0000 Subject: [PATCH 347/683] Fix route53_info max_items / type being ignored (#813) Fix route53_info max_items / type being ignored SUMMARY Currently if max_items is set on the route53_info module then it is ignored meaning all items are returned. type is also ignored due to an incorrect if statement It looks like it was a regression introduced here: ansible/ansible@6075536#diff-23a0c9250633162d50c3f06442b7a552a5ae0659a24dd01a328c0e165e473616 The tests have been updated to reflect a check on max_items and type Fixes: #529 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_info ADDITIONAL INFORMATION The problem with max_items being ignored is resolved by adding PaginationConfig and adding MaxItems to that instead. The problem with type being ignored is resolved by fixing an if statement. Boto3 docs: https://boto3.amazonaws.com/v1/documentation/api/1.18.7/reference/services/route53.html#Route53.Paginator.ListResourceRecordSets Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: None --- route53_info.py | 49 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/route53_info.py b/route53_info.py index abdf7e44709..322ce7b0523 100644 --- a/route53_info.py +++ b/route53_info.py @@ -47,7 +47,7 @@ description: - Maximum number of items to return for various get/list requests. required: false - type: str + type: int next_marker: description: - "Some requests such as list_command: hosted_zones will return a maximum @@ -72,7 +72,7 @@ description: - The type of DNS record. required: false - choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ] + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' ] type: str dns_name: description: @@ -228,9 +228,13 @@ def get_hosted_zone(client, module): def reusable_delegation_set_details(client, module): params = dict() + if not module.params.get('delegation_set_id'): + # Set PaginationConfig with max_items if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') @@ -246,8 +250,11 @@ def reusable_delegation_set_details(client, module): def list_hosted_zones(client, module): params = dict() + # Set PaginationConfig with max_items if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') @@ -272,8 +279,11 @@ def list_hosted_zones_by_name(client, module): if module.params.get('dns_name'): params['DNSName'] = module.params.get('dns_name') + # Set PaginationConfig with max_items if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) return client.list_hosted_zones_by_name(**params) @@ -340,12 +350,15 @@ def get_resource_tags(client, module): def list_health_checks(client, module): params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') + # Set PaginationConfig with max_items + if module.params.get('max_items'): + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) + paginator = client.get_paginator('list_health_checks') health_checks = paginator.paginate(**params).build_full_result()['HealthChecks'] return { @@ -362,19 +375,25 @@ def record_sets_details(client, module): else: module.fail_json(msg="Hosted Zone Id is required") - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - if module.params.get('start_record_name'): params['StartRecordName'] = module.params.get('start_record_name') + # Check that both params are set if type is applied if module.params.get('type') and not module.params.get('start_record_name'): module.fail_json(msg="start_record_name must be specified if type is set") - elif module.params.get('type'): + + if module.params.get('type'): params['StartRecordType'] = module.params.get('type') + # Set PaginationConfig with max_items + if module.params.get('max_items'): + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) + paginator = client.get_paginator('list_resource_record_sets') record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets'] + return { "ResourceRecordSets": record_sets, "list": record_sets, @@ -420,12 +439,12 @@ def main(): ], required=True), change_id=dict(), hosted_zone_id=dict(), - max_items=dict(), + max_items=dict(type='int'), next_marker=dict(), delegation_set_id=dict(), start_record_name=dict(), - type=dict(choices=[ - 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' + type=dict(type='str', choices=[ + 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' ]), dns_name=dict(), resource_id=dict(type='list', aliases=['resource_ids'], elements='str'), From 142e6fc4b54b1574ead4bf5467f9ca5e0e88647d Mon Sep 17 00:00:00 2001 From: Jill R <4121322+jillr@users.noreply.github.com> Date: Thu, 2 Dec 2021 02:58:06 -0700 Subject: [PATCH 348/683] Remove deprecated "facts" aliases (#814) Remove deprecated "facts" aliases SUMMARY Modules named "facts.py" that do not return ansible_facts were renamed to "info.py" in 2.9. Remove these aliases now that the deprecation period is over. This PR should be included in 3.0.0 of the collection. ISSUE TYPE Bugfix Pull Request COMPONENT NAME *_facts.py Reviewed-by: Mark Chappell Reviewed-by: Jill R Reviewed-by: None --- aws_acm_facts.py | 1 - aws_acm_info.py | 4 - aws_kms_facts.py | 1 - aws_kms_info.py | 3 - aws_region_facts.py | 1 - aws_region_info.py | 3 - aws_s3_bucket_facts.py | 1 - aws_s3_bucket_info.py | 12 +- aws_sgw_facts.py | 1 - aws_sgw_info.py | 3 - aws_waf_facts.py | 1 - aws_waf_info.py | 3 - cloudfront_facts.py | 1 - cloudfront_info.py | 28 +-- cloudwatchlogs_log_group_facts.py | 1 - cloudwatchlogs_log_group_info.py | 4 - ec2_asg_facts.py | 1 - ec2_asg_info.py | 4 - ec2_customer_gateway_facts.py | 1 - ec2_customer_gateway_info.py | 4 - ec2_eip_facts.py | 1 - ec2_eip_info.py | 3 - ec2_elb_facts.py | 1 - ec2_elb_info.py | 264 --------------------- ec2_lc_facts.py | 1 - ec2_lc_info.py | 4 - ec2_placement_group_facts.py | 1 - ec2_placement_group_info.py | 4 - ec2_vpc_nacl_facts.py | 1 - ec2_vpc_nacl_info.py | 3 - ec2_vpc_peering_facts.py | 1 - ec2_vpc_peering_info.py | 3 - ec2_vpc_route_table_facts.py | 1 - ec2_vpc_route_table_info.py | 4 - ec2_vpc_vgw_facts.py | 1 - ec2_vpc_vgw_info.py | 3 - ec2_vpc_vpn_facts.py | 1 - ec2_vpc_vpn_info.py | 3 - ecs_service_facts.py | 1 - ecs_service_info.py | 11 +- ecs_taskdefinition_facts.py | 1 - ecs_taskdefinition_info.py | 4 - efs_facts.py | 1 - efs_info.py | 10 +- elasticache_facts.py | 1 - elasticache_info.py | 3 - elb_application_lb_facts.py | 1 - elb_application_lb_info.py | 4 - elb_classic_lb_facts.py | 1 - elb_classic_lb_info.py | 3 - elb_target_facts.py | 1 - elb_target_group_facts.py | 1 - elb_target_group_info.py | 3 - elb_target_info.py | 3 - iam_cert_facts.py | 1 - iam_mfa_device_facts.py | 1 - iam_mfa_device_info.py | 3 - iam_role_facts.py | 1 - iam_role_info.py | 3 - iam_server_certificate_facts.py | 1 - iam_server_certificate_info.py | 5 - iam_user_info.py | 6 +- lambda_facts.py | 382 ------------------------------ rds_instance_facts.py | 1 - rds_instance_info.py | 3 - rds_snapshot_facts.py | 1 - rds_snapshot_info.py | 3 - redshift_facts.py | 1 - redshift_info.py | 3 - route53_facts.py | 1 - route53_info.py | 3 - 71 files changed, 8 insertions(+), 838 deletions(-) delete mode 120000 aws_acm_facts.py delete mode 120000 aws_kms_facts.py delete mode 120000 aws_region_facts.py delete mode 120000 aws_s3_bucket_facts.py delete mode 120000 aws_sgw_facts.py delete mode 120000 aws_waf_facts.py delete mode 120000 cloudfront_facts.py delete mode 120000 cloudwatchlogs_log_group_facts.py delete mode 120000 ec2_asg_facts.py delete mode 120000 ec2_customer_gateway_facts.py delete mode 120000 ec2_eip_facts.py delete mode 120000 ec2_elb_facts.py delete mode 100644 ec2_elb_info.py delete mode 120000 ec2_lc_facts.py delete mode 120000 ec2_placement_group_facts.py delete mode 120000 ec2_vpc_nacl_facts.py delete mode 120000 ec2_vpc_peering_facts.py delete mode 120000 ec2_vpc_route_table_facts.py delete mode 120000 ec2_vpc_vgw_facts.py delete mode 120000 ec2_vpc_vpn_facts.py delete mode 120000 ecs_service_facts.py delete mode 120000 ecs_taskdefinition_facts.py delete mode 120000 efs_facts.py delete mode 120000 elasticache_facts.py delete mode 120000 elb_application_lb_facts.py delete mode 120000 elb_classic_lb_facts.py delete mode 120000 elb_target_facts.py delete mode 120000 elb_target_group_facts.py delete mode 120000 iam_cert_facts.py delete mode 120000 iam_mfa_device_facts.py delete mode 120000 iam_role_facts.py delete mode 120000 iam_server_certificate_facts.py delete mode 100644 lambda_facts.py delete mode 120000 rds_instance_facts.py delete mode 120000 rds_snapshot_facts.py delete mode 120000 redshift_facts.py delete mode 120000 route53_facts.py diff --git a/aws_acm_facts.py b/aws_acm_facts.py deleted file mode 120000 index 42dbcf0df95..00000000000 --- a/aws_acm_facts.py +++ /dev/null @@ -1 +0,0 @@ -aws_acm_info.py \ No newline at end of file diff --git a/aws_acm_info.py b/aws_acm_info.py index f0b77b8958f..8d61dde4d3c 100644 --- a/aws_acm_info.py +++ b/aws_acm_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Retrieve information for ACM certificates - - This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change. - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API. options: certificate_arn: @@ -275,9 +274,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) acm_info = ACMServiceManager(module) - if module._name == 'aws_acm_facts': - module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", date='2021-12-01', collection_name='community.aws') - client = module.client('acm') certificates = acm_info.get_certificates(client, module, diff --git a/aws_kms_facts.py b/aws_kms_facts.py deleted file mode 120000 index ccd052f5199..00000000000 --- a/aws_kms_facts.py +++ /dev/null @@ -1 +0,0 @@ -aws_kms_info.py \ No newline at end of file diff --git a/aws_kms_info.py b/aws_kms_info.py index a7620dad005..5b92b39dba2 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -13,7 +13,6 @@ short_description: Gather information about AWS KMS keys description: - Gather information about AWS KMS keys including tags and grants - - This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change. author: "Will Thames (@willthames)" options: alias: @@ -458,8 +457,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=[['alias', 'filters', 'key_id']], supports_check_mode=True) - if module._name == 'aws_kms_facts': - module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws') try: connection = module.client('kms') diff --git a/aws_region_facts.py b/aws_region_facts.py deleted file mode 120000 index 03b0d29932e..00000000000 --- a/aws_region_facts.py +++ /dev/null @@ -1 +0,0 @@ -aws_region_info.py \ No newline at end of file diff --git a/aws_region_info.py b/aws_region_info.py index 67b71d6f868..66349e318a8 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Gather information about AWS regions. - - This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change. author: 'Henrique Rodrigues (@Sodki)' options: filters: @@ -72,8 +71,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'aws_region_facts': - module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) diff --git a/aws_s3_bucket_facts.py b/aws_s3_bucket_facts.py deleted file mode 120000 index 88f68b437a0..00000000000 --- a/aws_s3_bucket_facts.py +++ /dev/null @@ -1 +0,0 @@ -aws_s3_bucket_info.py \ No newline at end of file diff --git a/aws_s3_bucket_info.py b/aws_s3_bucket_info.py index 74b16271a8a..03da910549a 100644 --- a/aws_s3_bucket_info.py +++ b/aws_s3_bucket_info.py @@ -16,8 +16,6 @@ short_description: lists S3 buckets in AWS description: - Lists S3 buckets and details about those buckets. - - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.aws.aws_s3_bucket_info) module no longer returns C(ansible_facts)! options: name: description: @@ -589,10 +587,6 @@ def main(): # Including ec2 argument spec module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) - is_old_facts = module._name == 'aws_s3_bucket_facts' - if is_old_facts: - module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', " - "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') # Get parameters name = module.params.get("name") @@ -623,11 +617,7 @@ def main(): else: result['buckets'] = bucket_list - # Send exit - if is_old_facts: - module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result) - else: - module.exit_json(msg="Retrieved s3 info.", **result) + module.exit_json(msg="Retrieved s3 info.", **result) # MAIN diff --git a/aws_sgw_facts.py b/aws_sgw_facts.py deleted file mode 120000 index 0af0560a3b2..00000000000 --- a/aws_sgw_facts.py +++ /dev/null @@ -1 +0,0 @@ -aws_sgw_info.py \ No newline at end of file diff --git a/aws_sgw_info.py b/aws_sgw_info.py index e59f8ecf9f1..f49299947be 100644 --- a/aws_sgw_info.py +++ b/aws_sgw_info.py @@ -15,7 +15,6 @@ short_description: Fetch AWS Storage Gateway information description: - Fetch AWS Storage Gateway information - - This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change. author: Loic Blot (@nerzhul) options: gather_local_disks: @@ -348,8 +347,6 @@ def main(): supports_check_mode=True, ) - if module._name == 'aws_sgw_facts': - module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", date='2021-12-01', collection_name='community.aws') client = module.client('storagegateway') if client is None: # this should never happen diff --git a/aws_waf_facts.py b/aws_waf_facts.py deleted file mode 120000 index 3fd538387ac..00000000000 --- a/aws_waf_facts.py +++ /dev/null @@ -1 +0,0 @@ -aws_waf_info.py \ No newline at end of file diff --git a/aws_waf_info.py b/aws_waf_info.py index 9a1015d6858..838f9491dfd 100644 --- a/aws_waf_info.py +++ b/aws_waf_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Retrieve information for WAF ACLs, Rule , Conditions and Filters. - - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change. options: name: description: @@ -125,8 +124,6 @@ def main(): waf_regional=dict(type='bool', default=False) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'aws_waf_facts': - module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", date='2021-12-01', collection_name='community.aws') resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' client = module.client(resource) diff --git a/cloudfront_facts.py b/cloudfront_facts.py deleted file mode 120000 index 700056e714c..00000000000 --- a/cloudfront_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloudfront_info.py \ No newline at end of file diff --git a/cloudfront_info.py b/cloudfront_info.py index be8481a40ae..e9136341c9f 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -13,8 +13,6 @@ short_description: Obtain facts about an AWS CloudFront distribution description: - Gets information about an AWS CloudFront distribution. - - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.aws.cloudfront_info) module no longer returns C(ansible_facts)! author: Willem van Ketwich (@wilvk) options: distribution_id: @@ -171,22 +169,6 @@ - ansible.builtin.debug: msg: "{{ result_website['cloudfront']['www.my-website.com'] }}" -# When the module is called as cloudfront_facts, return values are published -# in ansible_facts['cloudfront'][] and can be used as follows. -# Note that this is deprecated and will stop working in a release after 2021-12-01. -- name: Gather facts - community.aws.cloudfront_facts: - distribution: true - distribution_id: my-cloudfront-distribution-id -- ansible.builtin.debug: - msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}" - -- community.aws.cloudfront_facts: - distribution: true - domain_name_alias: www.my-website.com -- ansible.builtin.debug: - msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}" - - name: Get all information about an invalidation for a distribution. community.aws.cloudfront_info: invalidation: true @@ -545,10 +527,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - is_old_facts = module._name == 'cloudfront_facts' - if is_old_facts: - module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', " - "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') service_mgr = CloudFrontServiceManager(module) @@ -658,10 +636,8 @@ def main(): result['changed'] = False result['cloudfront'].update(facts) - if is_old_facts: - module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result) - else: - module.exit_json(msg="Retrieved CloudFront info.", **result) + + module.exit_json(msg="Retrieved CloudFront info.", **result) if __name__ == '__main__': diff --git a/cloudwatchlogs_log_group_facts.py b/cloudwatchlogs_log_group_facts.py deleted file mode 120000 index 402937478ad..00000000000 --- a/cloudwatchlogs_log_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloudwatchlogs_log_group_info.py \ No newline at end of file diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index c53e501717f..ff80191790d 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -14,7 +14,6 @@ short_description: Get information about log_group in CloudWatchLogs description: - Lists the specified log groups. You can list all your log groups or filter the results by prefix. - - This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change. author: - Willian Ricardo (@willricardo) options: @@ -98,9 +97,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'cloudwatchlogs_log_group_facts': - module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", - date='2021-12-01', collection_name='community.aws') try: logs = module.client('logs') diff --git a/ec2_asg_facts.py b/ec2_asg_facts.py deleted file mode 120000 index 88ec9524588..00000000000 --- a/ec2_asg_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_asg_info.py \ No newline at end of file diff --git a/ec2_asg_info.py b/ec2_asg_info.py index 2b8cf4bc90c..55d463e096b 100644 --- a/ec2_asg_info.py +++ b/ec2_asg_info.py @@ -13,7 +13,6 @@ short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS description: - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS - - This module was called C(ec2_asg_facts) before Ansible 2.9. The usage did not change. author: "Rob White (@wimnat)" options: name: @@ -444,9 +443,6 @@ def main(): supports_check_mode=True, ) - if module._name == 'ec2_asg_facts': - module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", date='2021-12-01', collection_name='community.aws') - asg_name = module.params.get('name') asg_tags = module.params.get('tags') diff --git a/ec2_customer_gateway_facts.py b/ec2_customer_gateway_facts.py deleted file mode 120000 index 2e1aec0aba5..00000000000 --- a/ec2_customer_gateway_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_customer_gateway_info.py \ No newline at end of file diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index ab8ac41e505..7b55d433b99 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -13,7 +13,6 @@ short_description: Gather information about customer gateways in AWS description: - Gather information about customer gateways in AWS. - - This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change. author: Madhura Naniwadekar (@Madhura-CSI) options: filters: @@ -125,9 +124,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=[['customer_gateway_ids', 'filters']], supports_check_mode=True) - if module._module._name == 'ec2_customer_gateway_facts': - module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'", - date='2021-12-01', collection_name='community.aws') connection = module.client('ec2') diff --git a/ec2_eip_facts.py b/ec2_eip_facts.py deleted file mode 120000 index 0ba519697bd..00000000000 --- a/ec2_eip_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_eip_info.py \ No newline at end of file diff --git a/ec2_eip_info.py b/ec2_eip_info.py index e38735c087e..4f560429e12 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -13,7 +13,6 @@ short_description: List EC2 EIP details description: - List details of EC2 Elastic IP addresses. - - This module was called C(ec2_eip_facts) before Ansible 2.9. The usage did not change. author: "Brad Macpherson (@iiibrad)" options: filters: @@ -137,8 +136,6 @@ def main(): ), supports_check_mode=True ) - if module._module._name == 'ec2_eip_facts': - module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", date='2021-12-01', collection_name='community.aws') module.exit_json(changed=False, addresses=get_eips_details(module)) diff --git a/ec2_elb_facts.py b/ec2_elb_facts.py deleted file mode 120000 index a029c6d0b08..00000000000 --- a/ec2_elb_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_elb_info.py \ No newline at end of file diff --git a/ec2_elb_info.py b/ec2_elb_info.py deleted file mode 100644 index ab74e11e0d6..00000000000 --- a/ec2_elb_info.py +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/python -# -# This is a free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This Ansible library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this library. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_elb_info -version_added: 1.0.0 -deprecated: - removed_in: 3.0.0 - why: The ec2_elb_info is based upon a deprecated version of the AWS SDK. - alternative: Use M(community.aws.elb_classic_lb_info). -short_description: Gather information about EC2 Elastic Load Balancers in AWS -description: - - Gather information about EC2 Elastic Load Balancers in AWS - - This module was called C(ec2_elb_facts) before Ansible 2.9. The usage did not change. -author: - - "Michael Schultz (@mjschultz)" - - "Fernando Jose Pando (@nand0p)" -options: - names: - description: - - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. - type: list - elements: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -requirements: -- boto >= 2.49.0 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. -# Output format tries to match amazon.aws.ec2_elb_lb module input parameters - -- name: Gather information about all ELBs - community.aws.ec2_elb_info: - register: elb_info -- ansible.builtin.debug: - msg: "{{ item.dns_name }}" - loop: "{{ elb_info.elbs }}" - -- name: Gather information about a particular ELB - community.aws.ec2_elb_info: - names: frontend-prod-elb - register: elb_info - -- ansible.builtin.debug: - msg: "{{ elb_info.elbs.0.dns_name }}" - -- name: Gather information about a set of ELBs - community.aws.ec2_elb_info: - names: - - frontend-prod-elb - - backend-prod-elb - register: elb_info - -- ansible.builtin.debug: - msg: "{{ item.dns_name }}" - loop: "{{ elb_info.elbs }}" - -''' - -import traceback - -try: - import boto.ec2.elb - from boto.ec2.tag import Tag - from boto.exception import BotoServerError -except ImportError: - pass # Handled by ec2.HAS_BOTO - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO - - -class ElbInformation(object): - """Handles ELB information.""" - - def __init__(self, module, names, region, **aws_connect_params): - - self.module = module - self.names = names - self.region = region - self.aws_connect_params = aws_connect_params - self.connection = self._get_elb_connection() - - def _get_tags(self, elbname): - params = {'LoadBalancerNames.member.1': elbname} - elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)]) - return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key')) - - @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) - def _get_elb_connection(self): - return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - - def _get_elb_listeners(self, listeners): - listener_list = [] - - for listener in listeners: - listener_dict = { - 'load_balancer_port': listener[0], - 'instance_port': listener[1], - 'protocol': listener[2], - 'instance_protocol': listener[3] - } - - try: - ssl_certificate_id = listener[4] - except IndexError: - pass - else: - if ssl_certificate_id: - listener_dict['ssl_certificate_id'] = ssl_certificate_id - - listener_list.append(listener_dict) - - return listener_list - - def _get_health_check(self, health_check): - protocol, port_path = health_check.target.split(':') - try: - port, path = port_path.split('/', 1) - path = '/{0}'.format(path) - except ValueError: - port = port_path - path = None - - health_check_dict = { - 'ping_protocol': protocol.lower(), - 'ping_port': int(port), - 'response_timeout': health_check.timeout, - 'interval': health_check.interval, - 'unhealthy_threshold': health_check.unhealthy_threshold, - 'healthy_threshold': health_check.healthy_threshold, - } - - if path: - health_check_dict['ping_path'] = path - return health_check_dict - - @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) - def _get_elb_info(self, elb): - elb_info = { - 'name': elb.name, - 'zones': elb.availability_zones, - 'dns_name': elb.dns_name, - 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name, - 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id, - 'hosted_zone_name': elb.canonical_hosted_zone_name, - 'hosted_zone_id': elb.canonical_hosted_zone_name_id, - 'instances': [instance.id for instance in elb.instances], - 'listeners': self._get_elb_listeners(elb.listeners), - 'scheme': elb.scheme, - 'security_groups': elb.security_groups, - 'health_check': self._get_health_check(elb.health_check), - 'subnets': elb.subnets, - 'instances_inservice': [], - 'instances_inservice_count': 0, - 'instances_outofservice': [], - 'instances_outofservice_count': 0, - 'instances_inservice_percent': 0.0, - 'tags': self._get_tags(elb.name) - } - - if elb.vpc_id: - elb_info['vpc_id'] = elb.vpc_id - - if elb.instances: - instance_health = self.connection.describe_instance_health(elb.name) - elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService'] - elb_info['instances_inservice_count'] = len(elb_info['instances_inservice']) - elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService'] - elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice']) - try: - elb_info['instances_inservice_percent'] = ( - float(elb_info['instances_inservice_count']) / - float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count']) - ) * 100. - except ZeroDivisionError: - elb_info['instances_inservice_percent'] = 0. - return elb_info - - def list_elbs(self): - elb_array, token = [], None - get_elb_with_backoff = AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) - while True: - all_elbs = get_elb_with_backoff(marker=token) - token = all_elbs.next_marker - - if all_elbs: - if self.names: - for existing_lb in all_elbs: - if existing_lb.name in self.names: - elb_array.append(existing_lb) - else: - elb_array.extend(all_elbs) - else: - break - - if token is None: - break - - return list(map(self._get_elb_info, elb_array)) - - -def main(): - argument_spec = dict( - names={'default': [], 'type': 'list', 'elements': 'str'} - ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - - if module._name == 'ec2_elb_facts': - # The ec2_elb_facts alias was already deprecated - module.deprecate("The 'ec2_elb_facts' module has been deprecated and replaced by the 'elb_classic_lb_info' module'", - version='3.0.0', collection_name='community.aws') - if module._name == 'ec2_elb_info': - module.deprecate("The 'ec2_elb_info' module has been deprecated and replaced by the 'elb_classic_lb_info' module'", - version='3.0.0', collection_name='community.aws') - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - try: - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="region must be specified") - - names = module.params['names'] - elb_information = ElbInformation( - module, names, region, **aws_connect_params) - - ec2_info_result = dict(changed=False, - elbs=elb_information.list_elbs()) - - except BotoServerError as err: - module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message), - exception=traceback.format_exc()) - - module.exit_json(**ec2_info_result) - - -if __name__ == '__main__': - main() diff --git a/ec2_lc_facts.py b/ec2_lc_facts.py deleted file mode 120000 index cb62597c074..00000000000 --- a/ec2_lc_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_lc_info.py \ No newline at end of file diff --git a/ec2_lc_info.py b/ec2_lc_info.py index ea3832e1234..d05bf9876b4 100644 --- a/ec2_lc_info.py +++ b/ec2_lc_info.py @@ -14,7 +14,6 @@ short_description: Gather information about AWS Autoscaling Launch Configurations. description: - Gather information about AWS Autoscaling Launch Configurations. - - This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change. author: "Loïc Latreille (@psykotox)" options: name: @@ -212,9 +211,6 @@ def main(): supports_check_mode=True, ) - if module._name == 'ec2_lc_facts': - module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws') - try: connection = module.client('autoscaling') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/ec2_placement_group_facts.py b/ec2_placement_group_facts.py deleted file mode 120000 index 7d33ef0167f..00000000000 --- a/ec2_placement_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_placement_group_info.py \ No newline at end of file diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 354d3eb3276..6a344f1d8d9 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -13,7 +13,6 @@ short_description: List EC2 Placement Group(s) details description: - List details of EC2 Placement Group(s). - - This module was called C(ec2_placement_group_facts) before Ansible 2.9. The usage did not change. author: "Brad Macpherson (@iiibrad)" options: names: @@ -114,9 +113,6 @@ def main(): argument_spec=argument_spec, supports_check_mode=True ) - if module._module._name == 'ec2_placement_group_facts': - module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'", - date='2021-12-01', collection_name='community.aws') connection = module.client('ec2') diff --git a/ec2_vpc_nacl_facts.py b/ec2_vpc_nacl_facts.py deleted file mode 120000 index a88962d88f4..00000000000 --- a/ec2_vpc_nacl_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_nacl_info.py \ No newline at end of file diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 88786bf76ca..b8d256470ee 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -12,7 +12,6 @@ short_description: Gather information about Network ACLs in an AWS VPC description: - Gather information about Network ACLs in an AWS VPC - - This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change. author: "Brad Davidson (@brandond)" options: nacl_ids: @@ -207,8 +206,6 @@ def main(): filters=dict(default={}, type='dict')) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'ec2_vpc_nacl_facts': - module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) diff --git a/ec2_vpc_peering_facts.py b/ec2_vpc_peering_facts.py deleted file mode 120000 index 074baf65a0f..00000000000 --- a/ec2_vpc_peering_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_peering_info.py \ No newline at end of file diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index f43d1378aa8..e7d8264c8d5 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Gets various details related to AWS VPC Peers - - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change. options: peer_connection_ids: description: @@ -235,8 +234,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,) - if module._name == 'ec2_vpc_peering_facts': - module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", date='2021-12-01', collection_name='community.aws') try: ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) diff --git a/ec2_vpc_route_table_facts.py b/ec2_vpc_route_table_facts.py deleted file mode 120000 index ed0f72a1aa3..00000000000 --- a/ec2_vpc_route_table_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_route_table_info.py \ No newline at end of file diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py index 2e4dd384930..a84245d47ee 100644 --- a/ec2_vpc_route_table_info.py +++ b/ec2_vpc_route_table_info.py @@ -13,7 +13,6 @@ short_description: Gather information about ec2 VPC route tables in AWS description: - Gather information about ec2 VPC route tables in AWS - - This module was called C(ec2_vpc_route_table_facts) before Ansible 2.9. The usage did not change. author: - "Rob White (@wimnat)" - "Mark Chappell (@tremble)" @@ -270,9 +269,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'ec2_vpc_route_table_facts': - module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", - date='2021-12-01', collection_name='community.aws') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) diff --git a/ec2_vpc_vgw_facts.py b/ec2_vpc_vgw_facts.py deleted file mode 120000 index bbcf44bef40..00000000000 --- a/ec2_vpc_vgw_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_vgw_info.py \ No newline at end of file diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 7cb7b15798e..aa4a4719ffe 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -13,7 +13,6 @@ short_description: Gather information about virtual gateways in AWS description: - Gather information about virtual gateways in AWS. - - This module was called C(ec2_vpc_vgw_facts) before Ansible 2.9. The usage did not change. options: filters: description: @@ -134,8 +133,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'ec2_vpc_vgw_facts': - module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", date='2021-12-01', collection_name='community.aws') try: connection = module.client('ec2') diff --git a/ec2_vpc_vpn_facts.py b/ec2_vpc_vpn_facts.py deleted file mode 120000 index 671a1a30341..00000000000 --- a/ec2_vpc_vpn_facts.py +++ /dev/null @@ -1 +0,0 @@ -ec2_vpc_vpn_info.py \ No newline at end of file diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 31fe02621b4..57ebb17e852 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -13,7 +13,6 @@ short_description: Gather information about VPN Connections in AWS. description: - Gather information about VPN Connections in AWS. - - This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change. author: Madhura Naniwadekar (@Madhura-CSI) options: filters: @@ -204,8 +203,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=[['vpn_connection_ids', 'filters']], supports_check_mode=True) - if module._module._name == 'ec2_vpc_vpn_facts': - module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('ec2') diff --git a/ecs_service_facts.py b/ecs_service_facts.py deleted file mode 120000 index fead2dab76b..00000000000 --- a/ecs_service_facts.py +++ /dev/null @@ -1 +0,0 @@ -ecs_service_info.py \ No newline at end of file diff --git a/ecs_service_info.py b/ecs_service_info.py index 79332e55702..e6167afd09c 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -13,8 +13,6 @@ short_description: List or describe services in ECS description: - Lists or describes services in ECS. - - This module was called C(ecs_service_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.aws.ecs_service_info) module no longer returns C(ansible_facts)! author: - "Mark Chance (@Java1Guy)" - "Darek Kaczynski (@kaczynskid)" @@ -220,10 +218,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - is_old_facts = module._name == 'ecs_service_facts' - if is_old_facts: - module.deprecate("The 'ecs_service_facts' module has been renamed to 'ecs_service_info', " - "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') show_details = module.params.get('details') @@ -241,10 +235,7 @@ def main(): else: ecs_info = task_mgr.list_services(module.params['cluster']) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=ecs_info, **ecs_info) - else: - module.exit_json(changed=False, **ecs_info) + module.exit_json(changed=False, **ecs_info) if __name__ == '__main__': diff --git a/ecs_taskdefinition_facts.py b/ecs_taskdefinition_facts.py deleted file mode 120000 index 0eb6f10b8ff..00000000000 --- a/ecs_taskdefinition_facts.py +++ /dev/null @@ -1 +0,0 @@ -ecs_taskdefinition_info.py \ No newline at end of file diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index ad351576dca..b3772a7f7e3 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -14,7 +14,6 @@ notes: - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition) - - This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage did not change. description: - Describes a task definition in ECS. author: @@ -313,9 +312,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'ecs_taskdefinition_facts': - module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", - date='2021-12-01', collection_name='community.aws') ecs = module.client('ecs') diff --git a/efs_facts.py b/efs_facts.py deleted file mode 120000 index 781c362da4b..00000000000 --- a/efs_facts.py +++ /dev/null @@ -1 +0,0 @@ -efs_info.py \ No newline at end of file diff --git a/efs_info.py b/efs_info.py index 9a6ce1786fc..8f616d15dda 100644 --- a/efs_info.py +++ b/efs_info.py @@ -13,7 +13,6 @@ short_description: Get information about Amazon EFS file systems description: - This module can be used to search Amazon EFS file systems. - - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts). Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! author: - "Ryan Sydnor (@ryansydnor)" @@ -363,10 +362,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - is_old_facts = module._name == 'efs_facts' - if is_old_facts: - module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', " - "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws') connection = EFSConnection(module) @@ -387,10 +382,7 @@ def main(): targets = [(item, prefix_to_attr(item)) for item in targets] file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)] - if is_old_facts: - module.exit_json(changed=False, ansible_facts={'efs': file_systems_info}) - else: - module.exit_json(changed=False, efs=file_systems_info) + module.exit_json(changed=False, efs=file_systems_info) if __name__ == '__main__': diff --git a/elasticache_facts.py b/elasticache_facts.py deleted file mode 120000 index d6cd32eb0c5..00000000000 --- a/elasticache_facts.py +++ /dev/null @@ -1 +0,0 @@ -elasticache_info.py \ No newline at end of file diff --git a/elasticache_info.py b/elasticache_info.py index 026337e3350..c9fa9cc4502 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Retrieve information from AWS ElastiCache clusters - - This module was called C(elasticache_facts) before Ansible 2.9. The usage did not change. options: name: description: @@ -294,8 +293,6 @@ def main(): name=dict(required=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'elasticache_facts': - module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", date='2021-12-01', collection_name='community.aws') client = module.client('elasticache') diff --git a/elb_application_lb_facts.py b/elb_application_lb_facts.py deleted file mode 120000 index c5ee0eaca83..00000000000 --- a/elb_application_lb_facts.py +++ /dev/null @@ -1 +0,0 @@ -elb_application_lb_info.py \ No newline at end of file diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index d04bd0d8261..ddac4fe9629 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -13,7 +13,6 @@ short_description: Gather information about application ELBs in AWS description: - Gather information about application ELBs in AWS - - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change. author: Rob White (@wimnat) options: load_balancer_arns: @@ -254,9 +253,6 @@ def main(): mutually_exclusive=[['load_balancer_arns', 'names']], supports_check_mode=True, ) - if module._name == 'elb_application_lb_facts': - module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", - date='2021-12-01', collection_name='community.aws') try: connection = module.client('elbv2') diff --git a/elb_classic_lb_facts.py b/elb_classic_lb_facts.py deleted file mode 120000 index d182d5e1441..00000000000 --- a/elb_classic_lb_facts.py +++ /dev/null @@ -1 +0,0 @@ -elb_classic_lb_info.py \ No newline at end of file diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 1afbd49c9dc..25d4eadbf63 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -24,7 +24,6 @@ short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - Gather information about EC2 Elastic Load Balancers in AWS - - This module was called C(elb_classic_lb_facts) before Ansible 2.9. The usage did not change. author: - "Michael Schultz (@mjschultz)" - "Fernando Jose Pando (@nand0p)" @@ -218,8 +217,6 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module._name == 'elb_classic_lb_facts': - module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", date='2021-12-01', collection_name='community.aws') connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)) diff --git a/elb_target_facts.py b/elb_target_facts.py deleted file mode 120000 index 897c23897de..00000000000 --- a/elb_target_facts.py +++ /dev/null @@ -1 +0,0 @@ -elb_target_info.py \ No newline at end of file diff --git a/elb_target_group_facts.py b/elb_target_group_facts.py deleted file mode 120000 index 3abd2ee5a65..00000000000 --- a/elb_target_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -elb_target_group_info.py \ No newline at end of file diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 59bfbbc66ff..5abe8d34210 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -13,7 +13,6 @@ short_description: Gather information about ELB target groups in AWS description: - Gather information about ELB target groups in AWS - - This module was called C(elb_target_group_facts) before Ansible 2.9. The usage did not change. author: Rob White (@wimnat) options: load_balancer_arn: @@ -299,8 +298,6 @@ def main(): mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], supports_check_mode=True, ) - if module._name == 'elb_target_group_facts': - module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", date='2021-12-01', collection_name='community.aws') try: connection = module.client('elbv2') diff --git a/elb_target_info.py b/elb_target_info.py index 507380c9717..dc71adbc72f 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -12,7 +12,6 @@ description: - This module will search through every target group in a region to find which ones have registered a given instance ID or IP. - - This module was called C(elb_target_facts) before Ansible 2.9. The usage did not change. author: "Yaakov Kuperman (@yaakov-github)" options: @@ -412,8 +411,6 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) - if module._name == 'elb_target_facts': - module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", date='2021-12-01', collection_name='community.aws') instance_id = module.params["instance_id"] get_unused_target_groups = module.params["get_unused_target_groups"] diff --git a/iam_cert_facts.py b/iam_cert_facts.py deleted file mode 120000 index 63244caa58d..00000000000 --- a/iam_cert_facts.py +++ /dev/null @@ -1 +0,0 @@ -iam_server_certificate_info.py \ No newline at end of file diff --git a/iam_mfa_device_facts.py b/iam_mfa_device_facts.py deleted file mode 120000 index 63be2b059fd..00000000000 --- a/iam_mfa_device_facts.py +++ /dev/null @@ -1 +0,0 @@ -iam_mfa_device_info.py \ No newline at end of file diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 78cfe8249d0..4cd27875273 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -13,7 +13,6 @@ short_description: List the MFA (Multi-Factor Authentication) devices registered for a user description: - List the MFA (Multi-Factor Authentication) devices registered for a user - - This module was called C(iam_mfa_device_facts) before Ansible 2.9. The usage did not change. author: Victor Costan (@pwnall) options: user_name: @@ -91,8 +90,6 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) - if module._name == 'iam_mfa_device_facts': - module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", date='2021-12-01', collection_name='community.aws') try: connection = module.client('iam') diff --git a/iam_role_facts.py b/iam_role_facts.py deleted file mode 120000 index e15c454b71c..00000000000 --- a/iam_role_facts.py +++ /dev/null @@ -1 +0,0 @@ -iam_role_info.py \ No newline at end of file diff --git a/iam_role_info.py b/iam_role_info.py index a08df455fad..561b9f92d70 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -13,7 +13,6 @@ short_description: Gather information on IAM roles description: - Gathers information about IAM roles. - - This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change. author: - "Will Thames (@willthames)" options: @@ -242,8 +241,6 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['name', 'path_prefix']]) - if module._name == 'iam_role_facts': - module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", date='2021-12-01', collection_name='community.aws') client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) diff --git a/iam_server_certificate_facts.py b/iam_server_certificate_facts.py deleted file mode 120000 index 63244caa58d..00000000000 --- a/iam_server_certificate_facts.py +++ /dev/null @@ -1 +0,0 @@ -iam_server_certificate_info.py \ No newline at end of file diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index a37c9e88c83..1356a5ec15e 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -13,7 +13,6 @@ short_description: Retrieve the information of a server certificate description: - Retrieve the attributes of a server certificate. - - This module was called C(iam_server_certificate_facts) before Ansible 2.9. The usage did not change. author: "Allen Sanabria (@linuxdynasty)" options: name: @@ -147,10 +146,6 @@ def main(): supports_check_mode=True, ) - if module._name == 'iam_server_certificate_facts': - module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", - date='2021-12-01', collection_name='community.aws') - try: iam = module.client('iam') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/iam_user_info.py b/iam_user_info.py index 10a6f2bffdc..e8fa1ac028a 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -45,17 +45,17 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather facts about "test" user. -- name: Get IAM user facts +- name: Get IAM user info community.aws.iam_user_info: name: "test" # Gather facts about all users in the "dev" group. -- name: Get IAM user facts +- name: Get IAM user info community.aws.iam_user_info: group: "dev" # Gather facts about all users with "/division_abc/subdivision_xyz/" path. -- name: Get IAM user facts +- name: Get IAM user info community.aws.iam_user_info: path: "/division_abc/subdivision_xyz/" ''' diff --git a/lambda_facts.py b/lambda_facts.py deleted file mode 100644 index 010add7985d..00000000000 --- a/lambda_facts.py +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lambda_facts -version_added: 1.0.0 -deprecated: - removed_at_date: '2021-12-01' - removed_from_collection: 'community.aws' - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.aws.lambda_info) instead. -short_description: Gathers AWS Lambda function details as Ansible facts -description: - - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases and - M(community.aws.lambda_event) to manage lambda event source mappings. - - -options: - query: - description: - - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts. - choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] - default: "all" - type: str - function_name: - description: - - The name of the lambda function for which facts are requested. - aliases: [ "function", "name"] - type: str - event_source_arn: - description: - - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. - type: str -author: Pierre Jodouin (@pjodouin) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' ---- -# Simple example of listing all info for a function -- name: List all for a specific function - community.aws.lambda_facts: - query: all - function_name: myFunction - register: my_function_details - -# List all versions of a function -- name: List function versions - community.aws.lambda_facts: - query: versions - function_name: myFunction - register: my_function_versions - -# List all lambda function versions -- name: List all function - community.aws.lambda_facts: - query: all - max_items: 20 -- name: show Lambda facts - ansible.builtin.debug: - var: lambda_facts -''' - -RETURN = ''' ---- -lambda_facts: - description: lambda facts - returned: success - type: dict -lambda_facts.function: - description: lambda function list - returned: success - type: dict -lambda_facts.function.TheName: - description: lambda function information, including event, mapping, and version information - returned: success - type: dict -''' -import json -import datetime -import sys -import re - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - - -def fix_return(node): - """ - fixup returned dictionary - - :param node: - :return: - """ - - if isinstance(node, datetime.datetime): - node_value = str(node) - - elif isinstance(node, list): - node_value = [fix_return(item) for item in node] - - elif isinstance(node, dict): - node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) - - else: - node_value = node - - return node_value - - -def alias_details(client, module): - """ - Returns list of aliases for a specified function. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :return dict: - """ - - lambda_facts = dict() - - function_name = module.params.get('function_name') - if function_name: - params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - try: - lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_facts.update(aliases=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get aliases") - else: - module.fail_json(msg='Parameter function_name required for query=aliases.') - - return {function_name: camel_dict_to_snake_dict(lambda_facts)} - - -def all_details(client, module): - """ - Returns all lambda related facts. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :return dict: - """ - - if module.params.get('max_items') or module.params.get('next_marker'): - module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') - - lambda_facts = dict() - - function_name = module.params.get('function_name') - if function_name: - lambda_facts[function_name] = {} - lambda_facts[function_name].update(config_details(client, module)[function_name]) - lambda_facts[function_name].update(alias_details(client, module)[function_name]) - lambda_facts[function_name].update(policy_details(client, module)[function_name]) - lambda_facts[function_name].update(version_details(client, module)[function_name]) - lambda_facts[function_name].update(mapping_details(client, module)[function_name]) - else: - lambda_facts.update(config_details(client, module)) - - return lambda_facts - - -def config_details(client, module): - """ - Returns configuration details for one or all lambda functions. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :return dict: - """ - - lambda_facts = dict() - - function_name = module.params.get('function_name') - if function_name: - try: - lambda_facts.update(client.get_function_configuration(FunctionName=function_name)) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_facts.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) - else: - params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - - try: - lambda_facts.update(function_list=client.list_functions(**params)['Functions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_facts.update(function_list=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get function list") - - functions = dict() - for func in lambda_facts.pop('function_list', []): - functions[func['FunctionName']] = camel_dict_to_snake_dict(func) - return functions - - return {function_name: camel_dict_to_snake_dict(lambda_facts)} - - -def mapping_details(client, module): - """ - Returns all lambda event source mappings. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :return dict: - """ - - lambda_facts = dict() - params = dict() - function_name = module.params.get('function_name') - - if function_name: - params['FunctionName'] = module.params.get('function_name') - - if module.params.get('event_source_arn'): - params['EventSourceArn'] = module.params.get('event_source_arn') - - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - - try: - lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_facts.update(mappings=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get source event mappings") - - if function_name: - return {function_name: camel_dict_to_snake_dict(lambda_facts)} - - return camel_dict_to_snake_dict(lambda_facts) - - -def policy_details(client, module): - """ - Returns policy attached to a lambda function. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :return dict: - """ - - if module.params.get('max_items') or module.params.get('next_marker'): - module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') - - lambda_facts = dict() - - function_name = module.params.get('function_name') - if function_name: - try: - # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_facts.update(policy={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=policy.') - - return {function_name: camel_dict_to_snake_dict(lambda_facts)} - - -def version_details(client, module): - """ - Returns all lambda function versions. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :return dict: - """ - - lambda_facts = dict() - - function_name = module.params.get('function_name') - if function_name: - params = dict() - if module.params.get('max_items'): - params['MaxItems'] = module.params.get('max_items') - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - - try: - lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_facts.update(versions=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=versions.') - - return {function_name: camel_dict_to_snake_dict(lambda_facts)} - - -def main(): - """ - Main entry point. - - :return dict: ansible facts - """ - argument_spec = dict( - function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), - event_source_arn=dict(required=False, default=None) - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[], - required_together=[] - ) - - # validate function_name if present - function_name = module.params['function_name'] - if function_name: - if not re.search(r"^[\w\-:]+$", function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) - ) - if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - client = module.client('lambda') - - this_module = sys.modules[__name__] - - invocations = dict( - aliases='alias_details', - all='all_details', - config='config_details', - mappings='mapping_details', - policy='policy_details', - versions='version_details', - ) - - this_module_function = getattr(this_module, invocations[module.params['query']]) - all_facts = fix_return(this_module_function(client, module)) - - results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False) - - if module.check_mode: - results['msg'] = 'Check mode set but ignored for fact gathering only.' - - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/rds_instance_facts.py b/rds_instance_facts.py deleted file mode 120000 index f3dda867271..00000000000 --- a/rds_instance_facts.py +++ /dev/null @@ -1 +0,0 @@ -rds_instance_info.py \ No newline at end of file diff --git a/rds_instance_info.py b/rds_instance_info.py index 13609972c17..22a10a081ed 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -15,7 +15,6 @@ short_description: obtain information about one or more RDS instances description: - Obtain information about one or more RDS instances. - - This module was called C(rds_instance_facts) before Ansible 2.9. The usage did not change. options: db_instance_identifier: description: @@ -398,8 +397,6 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) - if module._name == 'rds_instance_facts': - module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", date='2021-12-01', collection_name='community.aws') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) diff --git a/rds_snapshot_facts.py b/rds_snapshot_facts.py deleted file mode 120000 index 7281d3b696f..00000000000 --- a/rds_snapshot_facts.py +++ /dev/null @@ -1 +0,0 @@ -rds_snapshot_info.py \ No newline at end of file diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py index d374520ab89..7abc0cae675 100644 --- a/rds_snapshot_info.py +++ b/rds_snapshot_info.py @@ -16,7 +16,6 @@ description: - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora). - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed. - - This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change. options: db_snapshot_identifier: description: @@ -373,8 +372,6 @@ def main(): supports_check_mode=True, mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] ) - if module._name == 'rds_snapshot_facts': - module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", date='2021-12-01', collection_name='community.aws') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() diff --git a/redshift_facts.py b/redshift_facts.py deleted file mode 120000 index 40a774faad8..00000000000 --- a/redshift_facts.py +++ /dev/null @@ -1 +0,0 @@ -redshift_info.py \ No newline at end of file diff --git a/redshift_info.py b/redshift_info.py index bc4cb021840..b79b28b3074 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -15,7 +15,6 @@ short_description: Gather information about Redshift cluster(s) description: - Gather information about Redshift cluster(s). - - This module was called C(redshift_facts) before Ansible 2.9. The usage did not change. options: cluster_identifier: description: @@ -334,8 +333,6 @@ def main(): argument_spec=argument_spec, supports_check_mode=True ) - if module._name == 'redshift_facts': - module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", date='2021-12-01', collection_name='community.aws') cluster_identifier = module.params.get('cluster_identifier') cluster_tags = module.params.get('tags') diff --git a/route53_facts.py b/route53_facts.py deleted file mode 120000 index 6b40f0529b0..00000000000 --- a/route53_facts.py +++ /dev/null @@ -1 +0,0 @@ -route53_info.py \ No newline at end of file diff --git a/route53_info.py b/route53_info.py index 322ce7b0523..e2f1cd686ff 100644 --- a/route53_info.py +++ b/route53_info.py @@ -12,7 +12,6 @@ version_added: 1.0.0 description: - Gets various details related to Route53 zone, record set or health check details. - - This module was called C(route53_facts) before Ansible 2.9. The usage did not change. options: query: description: @@ -474,8 +473,6 @@ def main(): ], check_boto3=False, ) - if module._name == 'route53_facts': - module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", date='2021-12-01', collection_name='community.aws') try: route53 = module.client('route53') From 1d456479ab1c9ee70b3623de920d969696e55df6 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Thu, 2 Dec 2021 19:47:29 +0000 Subject: [PATCH 349/683] Create ec2_asg_scheduled_action module (#779) Create ec2_asg_scheduled_action module SUMMARY This creates a new module ec2_asg_scheduled_action to create scheduled actions on Auto Scaling Groups. It was based on and modified from: https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py Requires: mattclay/aws-terminator#179 ISSUE TYPE New Module Pull Request COMPONENT NAME ec2_asg_scheduled_action ADDITIONAL INFORMATION Actions can be created like so: - name: Create a minimal scheduled action for autoscaling group community.aws.ec2_asg_scheduled_action: autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action start_time: 2021 October 25 08:00 UTC recurrence: 40 22 * * 1-5 desired_capacity: 10 state: present Actions can be deleted like so: - name: Delete scheduled action community.aws.ec2_asg_scheduled_action: autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action state: absent Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: None --- ec2_asg_scheduled_action.py | 320 ++++++++++++++++++++++++++++++++++++ 1 file changed, 320 insertions(+) create mode 100644 ec2_asg_scheduled_action.py diff --git a/ec2_asg_scheduled_action.py b/ec2_asg_scheduled_action.py new file mode 100644 index 00000000000..5f41dc31b05 --- /dev/null +++ b/ec2_asg_scheduled_action.py @@ -0,0 +1,320 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Based off of https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py +# (c) 2016, Mike Mochan <@mmochan> + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ec2_asg_scheduled_action +version_added: 2.2.0 +short_description: Create, modify and delete ASG scheduled scaling actions. +description: + - The module will create a new scheduled action when I(state=present) and no given action is found. + - The module will update a new scheduled action when I(state=present) and the given action is found. + - The module will delete a new scheduled action when I(state=absent) and the given action is found. +options: + autoscaling_group_name: + description: + - The name of the autoscaling group to add a scheduled action to. + type: str + required: true + scheduled_action_name: + description: + - The name of the scheduled action. + type: str + required: true + start_time: + description: + - Start time for the action. + type: str + end_time: + description: + - End time for the action. + type: str + time_zone: + description: + - Time zone to run against. + type: str + recurrence: + description: + - Cron style schedule to repeat the action on. + - Required when I(state=present). + type: str + min_size: + description: + - ASG min capacity. + type: int + max_size: + description: + - ASG max capacity. + type: int + desired_capacity: + description: + - ASG desired capacity. + type: int + state: + description: + - Create / update or delete scheduled action. + type: str + required: false + default: present + choices: ['present', 'absent'] +author: Mark Woolley(@marknet15) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = r''' +# Create a scheduled action for a autoscaling group. +- name: Create a minimal scheduled action for autoscaling group + community.aws.ec2_asg_scheduled_action: + region: eu-west-1 + autoscaling_group_name: test_asg + scheduled_action_name: test_scheduled_action + start_time: 2021 October 25 08:00 UTC + recurrence: 40 22 * * 1-5 + desired_capacity: 10 + state: present + register: scheduled_action + +- name: Create a scheduled action for autoscaling group + community.aws.ec2_asg_scheduled_action: + region: eu-west-1 + autoscaling_group_name: test_asg + scheduled_action_name: test_scheduled_action + start_time: 2021 October 25 08:00 UTC + end_time: 2021 October 25 08:00 UTC + time_zone: Europe/London + recurrence: 40 22 * * 1-5 + min_size: 10 + max_size: 15 + desired_capacity: 10 + state: present + register: scheduled_action + +- name: Delete scheduled action + community.aws.ec2_asg_scheduled_action: + region: eu-west-1 + autoscaling_group_name: test_asg + scheduled_action_name: test_scheduled_action + state: absent +''' +RETURN = r''' +scheduled_action_name: + description: The name of the scheduled action. + returned: when I(state=present) + type: str + sample: test_scheduled_action +start_time: + description: Start time for the action. + returned: when I(state=present) + type: str + sample: '2021 October 25 08:00 UTC' +end_time: + description: End time for the action. + returned: when I(state=present) + type: str + sample: '2021 October 25 08:00 UTC' +time_zone: + description: The ID of the Amazon Machine Image used by the launch configuration. + returned: when I(state=present) + type: str + sample: Europe/London +recurrence: + description: Cron style schedule to repeat the action on. + returned: when I(state=present) + type: str + sample: '40 22 * * 1-5' +min_size: + description: ASG min capacity. + returned: when I(state=present) + type: int + sample: 1 +max_size: + description: ASG max capacity. + returned: when I(state=present) + type: int + sample: 2 +desired_capacity: + description: ASG desired capacity. + returned: when I(state=present) + type: int + sample: 1 +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +try: + from dateutil.parser import parse as timedate_parse + HAS_DATEUTIL = True +except ImportError: + HAS_DATEUTIL = False + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def format_request(): + params = dict( + AutoScalingGroupName=module.params.get('autoscaling_group_name'), + ScheduledActionName=module.params.get('scheduled_action_name'), + Recurrence=module.params.get('recurrence') + ) + + # Some of these params are optional + if module.params.get('desired_capacity') is not None: + params['DesiredCapacity'] = module.params.get('desired_capacity') + + if module.params.get('min_size') is not None: + params['MinSize'] = module.params.get('min_size') + + if module.params.get('max_size') is not None: + params['MaxSize'] = module.params.get('max_size') + + if module.params.get('time_zone') is not None: + params['TimeZone'] = module.params.get('time_zone') + + if module.params.get('start_time') is not None: + params['StartTime'] = module.params.get('start_time') + + if module.params.get('end_time') is not None: + params['EndTime'] = module.params.get('end_time') + + return params + + +def delete_scheduled_action(current_actions): + if current_actions == []: + return False + + if module.check_mode: + return True + + params = dict( + AutoScalingGroupName=module.params.get('autoscaling_group_name'), + ScheduledActionName=module.params.get('scheduled_action_name') + ) + + try: + client.delete_scheduled_action(aws_retry=True, **params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + return True + + +def get_scheduled_actions(): + params = dict( + AutoScalingGroupName=module.params.get('autoscaling_group_name'), + ScheduledActionNames=[module.params.get('scheduled_action_name')] + ) + + try: + actions = client.describe_scheduled_actions(aws_retry=True, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + current_actions = actions.get("ScheduledUpdateGroupActions") + + return current_actions + + +def put_scheduled_update_group_action(current_actions): + changed = False + changes = dict() + params = format_request() + + if len(current_actions) < 1: + changed = True + else: + # To correctly detect changes convert the start_time & end_time to datetime object + if "StartTime" in params: + params["StartTime"] = timedate_parse(params["StartTime"]) + if "EndTime" in params: + params["EndTime"] = timedate_parse(params["EndTime"]) + + for k, v in params.items(): + if current_actions[0].get(k) != v: + changes[k] = v + + if changes: + changed = True + + if module.check_mode: + return changed + + try: + client.put_scheduled_update_group_action(aws_retry=True, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + return changed + + +def main(): + global module + global client + + argument_spec = dict( + autoscaling_group_name=dict(required=True, type='str'), + scheduled_action_name=dict(required=True, type='str'), + start_time=dict(default=None, type='str'), + end_time=dict(default=None, type='str'), + time_zone=dict(default=None, type='str'), + recurrence=dict(type='str'), + min_size=dict(default=None, type='int'), + max_size=dict(default=None, type='int'), + desired_capacity=dict(default=None, type='int'), + state=dict(default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['recurrence']]], + supports_check_mode=True + ) + + if not HAS_DATEUTIL: + module.fail_json(msg='dateutil is required for this module') + + if not module.botocore_at_least("1.20.24"): + module.fail_json(msg='botocore version >= 1.20.24 is required for this module') + + client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff()) + current_actions = get_scheduled_actions() + state = module.params.get('state') + results = dict() + + if state == 'present': + changed = put_scheduled_update_group_action(current_actions) + if not module.check_mode: + updated_action = get_scheduled_actions()[0] + results = dict( + scheduled_action_name=updated_action.get('ScheduledActionName'), + start_time=updated_action.get('StartTime'), + end_time=updated_action.get('EndTime'), + time_zone=updated_action.get('TimeZone'), + recurrence=updated_action.get('Recurrence'), + min_size=updated_action.get('MinSize'), + max_size=updated_action.get('MaxSize'), + desired_capacity=updated_action.get('DesiredCapacity') + ) + else: + changed = delete_scheduled_action(current_actions) + + results['changed'] = changed + module.exit_json(**results) + + +if __name__ == '__main__': + main() From 0a72531a17485500bb95df1214badec1985dda57 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Fri, 10 Dec 2021 20:49:07 +0100 Subject: [PATCH 350/683] more examples (#821) wafv2_web_acl: more examples SUMMARY Closes: #819 More examples Ref to official documentation for managed rules ISSUE TYPE Docs Pull Request COMPONENT NAME wafv2_wab_acl Reviewed-by: Alina Buzachis Reviewed-by: None --- wafv2_web_acl.py | 56 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 1476b1d48d0..5306c2e047f 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -63,6 +63,7 @@ rules: description: - The Rule statements used to identify the web requests that you want to allow, block, or count. + - For a list of managed rules see U(https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-list.html). type: list elements: dict suboptions: @@ -111,7 +112,7 @@ metric_name: blub rules: - name: zwei - priority: 2 + priority: 0 action: block: {} visibility_config: @@ -137,6 +138,59 @@ managed_rule_group_statement: vendor_name: AWS name: AWSManagedRulesAdminProtectionRuleSet + + # AWS Managed Bad Input Rule Set + # but allow PROPFIND_METHOD used e.g. by webdav + - name: bad_input_protect_whitelist_webdav + priority: 2 + override_action: + none: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: bad_input_protect + statement: + managed_rule_group_statement: + vendor_name: AWS + name: AWSManagedRulesKnownBadInputsRuleSet + excluded_rules: + - name: PROPFIND_METHOD + + # Rate Limit example. 1500 req/5min + # counted for two domains via or_statement. login.mydomain.tld and api.mydomain.tld + - name: rate_limit_example + priority: 3 + action: + block: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: mydomain-ratelimit + statement: + rate_based_statement: + limit: 1500 + aggregate_key_type: IP + scope_down_statement: + or_statement: + statements: + - byte_match_statement: + search_string: login.mydomain.tld + positional_constraint: CONTAINS + field_to_match: + single_header: + name: host + text_transformations: + - type: LOWERCASE + priority: 0 + - byte_match_dtatement: + search_string: api.mydomain.tld + positional_constraint: CONTAINS + field_to_match: + single_header: + name: host + text_transformations: + - type: LOWERCASE + priority: 0 tags: A: B C: D From ecf2d4aa54d5beb0750dde3c8fb5d03d456120b0 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 13 Dec 2021 09:59:52 +0000 Subject: [PATCH 351/683] iam_user password management support (#822) iam_user password management support SUMMARY The iam module currently supports password management for IAM users, but the newer iam_user module does not currently. This PR adds the password management functionality to bring parity with the old module. To ensure the IAM user is properly created before adding a login profile, the waiter for the IAM creation has also been added. ISSUE TYPE Feature Pull Request COMPONENT NAME iam_user ADDITIONAL INFORMATION The added functionality uses the create_login_profile and update_login_profile methods: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_login_profile https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.update_login_profile Local integration tests run: ansible-test integration --docker centos8 -vv iam_user --allow-unsupported ... PLAY RECAP ********************************************************************* testhost : ok=92 changed=24 unreachable=0 failed=0 skipped=0 rescued=0 ignored=2 AWS ACTIONS: ['iam:AddUserToGroup', 'iam:AttachUserPolicy', 'iam:CreateGroup', 'iam:CreateLoginProfile', 'iam:CreateUser', 'iam:DeleteGroup', 'iam:DeleteLoginProfile', 'iam:DeleteUser', 'iam:DetachUserPolicy', 'iam:GetGroup', 'iam:GetUser', 'iam:ListAccessKeys', 'iam:ListAttachedGroupPolicies', 'iam:ListAttachedUserPolicies', 'iam:ListGroupsForUser', 'iam:ListMFADevices', 'iam:ListPolicies', 'iam:ListSSHPublicKeys', 'iam:ListServiceSpecificCredentials', 'iam:ListSigningCertificates', 'iam:ListUserPolicies', 'iam:ListUsers', 'iam:RemoveUserFromGroup', 'iam:TagUser', 'iam:UntagUser', 'iam:UpdateLoginProfile'] Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell Reviewed-by: None --- iam_user.py | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 126 insertions(+), 4 deletions(-) diff --git a/iam_user.py b/iam_user.py index 2a7998a6d10..e308c3cf0cb 100644 --- a/iam_user.py +++ b/iam_user.py @@ -12,7 +12,8 @@ version_added: 1.0.0 short_description: Manage AWS IAM users description: - - Manage AWS IAM users. + - A module to manage AWS IAM users. + - The module does not manage groups that users belong to, groups memberships can be managed using `iam_group`. author: Josh Souza (@joshsouza) options: name: @@ -20,6 +21,27 @@ - The name of the user to create. required: true type: str + password: + description: + - The password to apply to the user. + required: false + type: str + version_added: 2.2.0 + update_password: + default: always + choices: ['always', 'on_create'] + description: + - When to update user passwords. + - I(update_password=always) will ensure the password is set to I(password). + - I(update_password=on_create) will only set the password for newly created users. + type: str + version_added: 2.2.0 + remove_password: + description: + - Option to delete user login passwords. + - This field is mutually exclusive to I(password). + type: 'bool' + version_added: 2.2.0 managed_policies: description: - A list of managed policy ARNs or friendly names to attach to the user. @@ -36,7 +58,7 @@ type: str purge_policies: description: - - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detached. required: false default: false type: bool @@ -53,6 +75,19 @@ default: true type: bool version_added: 2.1.0 + wait: + description: + - When I(wait=True) the module will wait for up to I(wait_timeout) seconds + for IAM user creation before returning. + default: True + type: bool + version_added: 2.2.0 + wait_timeout: + description: + - How long (in seconds) to wait for creation / updates to complete. + default: 120 + type: int + version_added: 2.2.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -70,6 +105,12 @@ name: testuser1 state: present +- name: Create a user with a password + community.aws.iam_user: + name: testuser1 + password: SomeSecurePassword + state: present + - name: Create a user and attach a managed policy using its ARN community.aws.iam_user: name: testuser1 @@ -179,15 +220,75 @@ def convert_friendly_names_to_arns(connection, module, policy_names): module.fail_json(msg="Couldn't find policy: " + str(e)) +def wait_iam_exists(connection, module): + if module.check_mode: + return + if not module.params.get('wait'): + return + + user_name = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + try: + waiter = connection.get_waiter('user_exists') + waiter.wait( + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + UserName=user_name, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on IAM user creation') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on IAM user creation') + + +def create_or_update_login_profile(connection, module): + + # Apply new password / update password for the user + user_params = dict() + user_params['UserName'] = module.params.get('name') + user_params['Password'] = module.params.get('password') + + try: + connection.update_login_profile(**user_params) + except is_boto3_error_code('NoSuchEntity'): + try: + connection.create_login_profile(**user_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create user login profile") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to update user login profile") + + return True + + +def delete_login_profile(connection, module): + + user_params = dict() + user_params['UserName'] = module.params.get('name') + + try: + connection.delete_login_profile(**user_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete user login profile") + + return True + + def create_or_update_user(connection, module): params = dict() params['UserName'] = module.params.get('name') managed_policies = module.params.get('managed_policies') purge_policies = module.params.get('purge_policies') + if module.params.get('tags') is not None: params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + changed = False + if managed_policies: managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) @@ -205,8 +306,23 @@ def create_or_update_user(connection, module): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create user") + + # Wait for user to be fully available before continuing + if module.params.get('wait'): + wait_iam_exists(connection, module) + + if module.params.get('password') is not None: + create_or_update_login_profile(connection, module) else: - changed = update_user_tags(connection, module, params, user) + login_profile_result = None + update_result = update_user_tags(connection, module, params, user) + + if module.params['update_password'] == "always" and module.params.get('password') is not None: + login_profile_result = create_or_update_login_profile(connection, module) + elif module.params.get('remove_password'): + login_profile_result = delete_login_profile(connection, module) + + changed = bool(update_result) or bool(login_profile_result) # Manage managed policies current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) @@ -388,16 +504,22 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), + password=dict(type='str', no_log=True), + update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), + remove_password=dict(type='bool'), managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), state=dict(choices=['present', 'absent'], required=True), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=120, type='int'), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, + mutually_exclusive=[['password', 'remove_password']] ) connection = module.client('iam') From 4413bc66cf2a8f0008963bf4dd432a9a26e723d0 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Sat, 18 Dec 2021 17:50:39 +0000 Subject: [PATCH 352/683] Make deprecated updates in dynamodbo_table module fail (#837) Make deprecated updates in dynamodbo_table module fail SUMMARY Make deprecated primary key updates and all type index includes update fail in dynamodb_table module for 3.0.0 ISSUE TYPE Feature Pull Request COMPONENT NAME dynamodb_table ADDITIONAL INFORMATION Both parts were previously deprecated and are currently ignored, this PR actually makes those ignore updates fail on attempts to pass the bad config. Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: None --- dynamodb_table.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 98d6fa632f9..1ea4391223c 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -678,12 +678,9 @@ def _generate_index(index, include_throughput=True): projection['NonKeyAttributes'] = non_key_attributes else: if non_key_attributes: - module.deprecate( + module.fail_json( "DynamoDB does not support specifying non-key-attributes ('includes') for " - "indexes of type 'all'. Attempts to set this attributes are currently " - "ignored, but in future will result in a failure. " - "Index name: {0}".format(index['name']), - version='3.0.0', collection_name='community.aws') + "indexes of type 'all'. Index name: {0}".format(index['name'])) idx = dict( IndexName=index['name'], @@ -857,11 +854,7 @@ def _update_tags(current_table): def update_table(current_table): primary_index_changes = _primary_index_changes(current_table) if primary_index_changes: - module.deprecate("DynamoDB does not support updating the Primary keys on a table. " - "Attempts to change the keys are currently ignored, but in future will " - "result in a failure. " - "Changed paramters are {0}".format(primary_index_changes), - version='3.0.0', collection_name='community.aws') + module.fail_json("DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format(primary_index_changes)) changed = False changed |= _update_table(current_table) From 888000a53356c1042c7de65d9a3252a483e99538 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 20 Dec 2021 14:34:23 +0000 Subject: [PATCH 353/683] Make deprecated keys_attr param ignored in aws_kms_info (#838) Make deprecated keys_attr param ignored in aws_kms_info SUMMARY Make deprecated keys_attr param ignored in aws_kms_info as planned for 3.0.0 ISSUE TYPE Feature Pull Request COMPONENT NAME aws_kms_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: None --- aws_kms_info.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index 5b92b39dba2..ee1649984f1 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -470,10 +470,9 @@ def main(): # We originally returned "keys" if module.params['keys_attr']: module.deprecate("Returning results in the 'keys' attribute conflicts with the builtin keys() method on " - "dicts and as such is deprecated. Please use the kms_keys attribute. This warning can be " + "dicts and as such is deprecated and is now ignored. Please use the kms_keys attribute. This warning can be " "silenced by setting keys_attr to False.", version='3.0.0', collection_name='community.aws') - ret_params.update(dict(keys=filtered_keys)) module.exit_json(**ret_params) From 5368ec6765de734c5348a90ce9bb8d6c73b66779 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Wed, 5 Jan 2022 11:01:34 +0100 Subject: [PATCH 354/683] Fix short EKS cluster names (#818) Fix short EKS cluster names SUMMARY Fixes #817 ISSUE TYPE Bugfix Pull Request COMPONENT NAME aws_eks ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: None --- aws_eks_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_eks_cluster.py b/aws_eks_cluster.py index 64627377c41..895758f0cf5 100644 --- a/aws_eks_cluster.py +++ b/aws_eks_cluster.py @@ -204,7 +204,7 @@ def ensure_present(client, module): resourcesVpcConfig=dict( subnetIds=subnets, securityGroupIds=groups), - clientRequestToken='ansible-create-%s' % name) + ) if module.params['version']: params['version'] = module.params['version'] cluster = client.create_cluster(**params)['cluster'] From c1ea5385f88f826216e77bfe41db71a3e390998d Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Wed, 5 Jan 2022 21:13:08 +0100 Subject: [PATCH 355/683] Remove deprecated Modules (#839) Remove deprecated Modules SUMMARY Remove deprecated modules for 3.0.0 release ISSUE TYPE Feature Pull Request COMPONENT NAME community.aws.iam community.aws.rds Reviewed-by: Alina Buzachis Reviewed-by: Jill R Reviewed-by: None --- iam.py | 880 ----------------------------------- rds.py | 1400 -------------------------------------------------------- 2 files changed, 2280 deletions(-) delete mode 100644 iam.py delete mode 100644 rds.py diff --git a/iam.py b/iam.py deleted file mode 100644 index 52aca2650ab..00000000000 --- a/iam.py +++ /dev/null @@ -1,880 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam -version_added: 1.0.0 -deprecated: - removed_in: 3.0.0 - why: The iam module is based upon a deprecated version of the AWS SDK. - alternative: >- - Use M(community.aws.iam_user), M(community.aws.iam_group), M(community.aws.iam_role), M(community.aws.iam_policy) - and M(community.aws.iam_managed_policy) modules. - -short_description: Manage IAM users, groups, roles and keys -description: - - Allows for the management of IAM users, user API keys, groups, roles. -options: - iam_type: - description: - - Type of IAM resource. - choices: ["user", "group", "role"] - type: str - required: true - name: - description: - - Name of IAM resource to create or identify. - required: true - type: str - new_name: - description: - - When I(state=update), will replace I(name) with I(new_name) on IAM resource. - type: str - new_path: - description: - - When I(state=update), will replace the path with new_path on the IAM resource. - type: str - state: - description: - - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. - required: true - choices: [ "present", "absent", "update" ] - type: str - path: - description: - - When creating or updating, specify the desired path of the resource. - - If I(state=present), it will replace the current path to match what is passed in when they do not match. - default: "/" - type: str - trust_policy: - description: - - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. - - Mutually exclusive with I(trust_policy_filepath). - type: dict - trust_policy_filepath: - description: - - The path to the trust policy document that grants an entity permission to assume the role. - - Mutually exclusive with I(trust_policy). - type: str - access_key_state: - description: - - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. - choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"] - type: str - key_count: - description: - - When I(access_key_state=create) it will ensure this quantity of keys are present. - default: 1 - type: int - access_key_ids: - description: - - A list of the keys that you want affected by the I(access_key_state) parameter. - type: list - elements: str - groups: - description: - - A list of groups the user should belong to. When I(state=update), will gracefully remove groups not listed. - type: list - elements: str - password: - description: - - When I(type=user) and either I(state=present) or I(state=update), define the users login password. - - Note that this will always return 'changed'. - type: str - update_password: - default: always - choices: ['always', 'on_create'] - description: - - When to update user passwords. - - I(update_password=always) will ensure the password is set to I(password). - - I(update_password=on_create) will only set the password for newly created users. - type: str -notes: - - 'Currently boto does not support the removal of Managed Policies, the module will error out if your - user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' -author: - - "Jonathan I. Davila (@defionscode)" - - "Paul Seiffert (@seiffert)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -requirements: -- boto >= 2.49.0 -''' - -EXAMPLES = r''' -# Basic user creation example -- name: Create two new IAM users with API keys - community.aws.iam: - iam_type: user - name: "{{ item }}" - state: present - password: "{{ temp_pass }}" - access_key_state: create - loop: - - jcleese - - mpython - -# Advanced example, create two new groups and add the pre-existing user -# jdavila to both groups. -- name: Create Two Groups, Mario and Luigi - community.aws.iam: - iam_type: group - name: "{{ item }}" - state: present - loop: - - Mario - - Luigi - register: new_groups - -- name: Update user - community.aws.iam: - iam_type: user - name: jdavila - state: update - groups: "{{ item.created_group.group_name }}" - loop: "{{ new_groups.results }}" - -# Example of role with custom trust policy for Lambda service -- name: Create IAM role with custom trust relationship - community.aws.iam: - iam_type: role - name: AAALambdaTestRole - state: present - trust_policy: - Version: '2012-10-17' - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: lambda.amazonaws.com - -''' -RETURN = r''' -role_result: - description: the IAM.role dict returned by Boto - type: str - returned: if iam_type=role and state=present - sample: { - "arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role", - "assume_role_policy_document": "...truncated...", - "create_date": "2017-09-02T14:32:23Z", - "path": "/", - "role_id": "AROAA1B2C3D4E5F6G7H8I", - "role_name": "my-new-role" - } -roles: - description: a list containing the name of the currently defined roles - type: list - returned: if iam_type=role and state=present - sample: [ - "my-new-role", - "my-existing-role-1", - "my-existing-role-2", - "my-existing-role-3", - "my-existing-role-...", - ] -''' - -import json -import traceback - -try: - import boto.exception - import boto.iam - import boto.iam.connection -except ImportError: - pass # Taken care of by ec2.HAS_BOTO - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - - -def _paginate(func, attr): - ''' - paginates the results from func by continuously passing in - the returned marker if the results were truncated. this returns - an iterator over the items in the returned response. `attr` is - the name of the attribute to iterate over in the response. - ''' - finished, marker = False, None - while not finished: - res = func(marker=marker) - for item in getattr(res, attr): - yield item - - finished = res.is_truncated == 'false' - if not finished: - marker = res.marker - - -def list_all_groups(iam): - return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')] - - -def list_all_users(iam): - return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')] - - -def list_all_roles(iam): - return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')] - - -def list_all_instance_profiles(iam): - return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')] - - -def create_user(module, iam, name, pwd, path, key_state, key_count): - key_qty = 0 - keys = [] - try: - user_meta = iam.create_user( - name, path).create_user_response.create_user_result.user - changed = True - if pwd is not None: - pwd = iam.create_login_profile(name, pwd) - if key_state in ['create']: - if key_count: - while key_count > key_qty: - keys.append(iam.create_access_key( - user_name=name).create_access_key_response. - create_access_key_result. - access_key) - key_qty += 1 - else: - keys = None - except boto.exception.BotoServerError as err: - module.fail_json(changed=False, msg=str(err)) - else: - user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) - return (user_info, changed) - - -def delete_dependencies_first(module, iam, name): - changed = False - # try to delete any keys - try: - current_keys = [ck['access_key_id'] for ck in - iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] - for key in current_keys: - iam.delete_access_key(key, name) - changed = True - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc()) - - # try to delete login profiles - try: - login_profile = iam.get_login_profiles(name).get_login_profile_response - iam.delete_login_profile(name) - changed = True - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg: - module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc()) - - # try to detach policies - try: - for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: - iam.delete_user_policy(name, policy) - changed = True - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if 'must detach all policies first' in error_msg: - module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" - "that %s has Managed Polices. This is not " - "currently supported by boto. Please detach the policies " - "through the console and try again." % name) - module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc()) - - # try to deactivate associated MFA devices - try: - mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', []) - for device in mfa_devices: - iam.deactivate_mfa_device(name, device['serial_number']) - changed = True - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc()) - - return changed - - -def delete_user(module, iam, name): - changed = delete_dependencies_first(module, iam, name) - try: - iam.delete_user(name) - except boto.exception.BotoServerError as ex: - module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc()) - else: - changed = True - return name, changed - - -def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): - changed = False - name_change = False - if updated and new_name: - name = new_name - try: - current_keys = [ck['access_key_id'] for ck in - iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] - status = [ck['status'] for ck in - iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] - key_qty = len(current_keys) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if 'cannot be found' in error_msg and updated: - current_keys = [ck['access_key_id'] for ck in - iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] - status = [ck['status'] for ck in - iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] - name = new_name - else: - module.fail_json(changed=False, msg=str(err)) - - updated_key_list = {} - - if new_name or new_path: - c_path = iam.get_user(name).get_user_result.user['path'] - if (name != new_name) or (c_path != new_path): - changed = True - try: - if not updated: - user = iam.update_user( - name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata - else: - user = iam.update_user( - name, new_path=new_path).update_user_response.response_metadata - user['updates'] = dict( - old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - module.fail_json(changed=False, msg=str(err)) - else: - if not updated: - name_change = True - - if pwd: - try: - iam.update_login_profile(name, pwd) - changed = True - except boto.exception.BotoServerError: - try: - iam.create_login_profile(name, pwd) - changed = True - except boto.exception.BotoServerError as err: - error_msg = boto_exception(str(err)) - if 'Password does not conform to the account password policy' in error_msg: - module.fail_json(changed=False, msg="Password doesn't conform to policy") - else: - module.fail_json(msg=error_msg) - - try: - current_keys = [ck['access_key_id'] for ck in - iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] - status = [ck['status'] for ck in - iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] - key_qty = len(current_keys) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if 'cannot be found' in error_msg and updated: - current_keys = [ck['access_key_id'] for ck in - iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] - status = [ck['status'] for ck in - iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] - name = new_name - else: - module.fail_json(changed=False, msg=str(err)) - - new_keys = [] - if key_state == 'create': - try: - while key_count > key_qty: - new_keys.append(iam.create_access_key( - user_name=name).create_access_key_response.create_access_key_result.access_key) - key_qty += 1 - changed = True - - except boto.exception.BotoServerError as err: - module.fail_json(changed=False, msg=str(err)) - - if keys and key_state: - for access_key in keys: - if key_state in ('active', 'inactive'): - if access_key in current_keys: - for current_key, current_key_state in zip(current_keys, status): - if key_state != current_key_state.lower(): - try: - iam.update_access_key(access_key, key_state.capitalize(), user_name=name) - changed = True - except boto.exception.BotoServerError as err: - module.fail_json(changed=False, msg=str(err)) - else: - module.fail_json(msg="Supplied keys not found for %s. " - "Current keys: %s. " - "Supplied key(s): %s" % - (name, current_keys, keys) - ) - - if key_state == 'remove': - if access_key in current_keys: - try: - iam.delete_access_key(access_key, user_name=name) - except boto.exception.BotoServerError as err: - module.fail_json(changed=False, msg=str(err)) - else: - changed = True - - try: - final_keys, final_key_status = \ - [ck['access_key_id'] for ck in - iam.get_all_access_keys(name). - list_access_keys_result. - access_key_metadata],\ - [ck['status'] for ck in - iam.get_all_access_keys(name). - list_access_keys_result. - access_key_metadata] - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err)) - - for fk, fks in zip(final_keys, final_key_status): - updated_key_list.update({fk: fks}) - - return name_change, updated_key_list, changed, new_keys - - -def set_users_groups(module, iam, name, groups, updated=None, - new_name=None): - """ Sets groups for a user, will purge groups not explicitly passed, while - retaining pre-existing groups that also are in the new list. - """ - changed = False - - if updated: - name = new_name - - try: - orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user( - name).list_groups_for_user_result.groups] - remove_groups = [ - rg for rg in frozenset(orig_users_groups).difference(groups)] - new_groups = [ - ng for ng in frozenset(groups).difference(orig_users_groups)] - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err)) - else: - if len(orig_users_groups) > 0: - for new in new_groups: - iam.add_user_to_group(new, name) - for rm in remove_groups: - iam.remove_user_from_group(rm, name) - else: - for group in groups: - try: - iam.add_user_to_group(group, name) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if ('The group with name %s cannot be found.' % group) in error_msg: - module.fail_json(changed=False, msg="Group %s doesn't exist" % group) - - if len(remove_groups) > 0 or len(new_groups) > 0: - changed = True - - return (groups, changed) - - -def create_group(module=None, iam=None, name=None, path=None): - changed = False - try: - iam.create_group( - name, path).create_group_response.create_group_result.group - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err)) - else: - changed = True - return name, changed - - -def delete_group(module=None, iam=None, name=None): - changed = False - try: - iam.delete_group(name) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if ('must delete policies first') in error_msg: - for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: - iam.delete_group_policy(name, policy) - try: - iam.delete_group(name) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if ('must delete policies first') in error_msg: - module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" - "that %s has Managed Polices. This is not " - "currently supported by boto. Please detach the policies " - "through the console and try again." % name) - else: - module.fail_json(changed=changed, msg=str(error_msg)) - else: - changed = True - else: - module.fail_json(changed=changed, msg=str(error_msg)) - else: - changed = True - return changed, name - - -def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): - changed = False - try: - current_group_path = iam.get_group( - name).get_group_response.get_group_result.group['path'] - if new_path: - if current_group_path != new_path: - iam.update_group(name, new_path=new_path) - changed = True - if new_name: - if name != new_name: - iam.update_group(name, new_group_name=new_name, new_path=new_path) - changed = True - name = new_name - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err)) - - return changed, name, new_path, current_group_path - - -def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc): - changed = False - iam_role_result = None - instance_profile_result = None - try: - if name not in role_list: - changed = True - iam_role_result = iam.create_role(name, - assume_role_policy_document=trust_policy_doc, - path=path).create_role_response.create_role_result.role - - if name not in prof_list: - instance_profile_result = iam.create_instance_profile(name, path=path) \ - .create_instance_profile_response.create_instance_profile_result.instance_profile - iam.add_role_to_instance_profile(name, name) - else: - instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err)) - else: - updated_role_list = list_all_roles(iam) - iam_role_result = iam.get_role(name).get_role_response.get_role_result.role - return changed, updated_role_list, iam_role_result, instance_profile_result - - -def delete_role(module, iam, name, role_list, prof_list): - changed = False - iam_role_result = None - instance_profile_result = None - try: - if name in role_list: - cur_ins_prof = [rp['instance_profile_name'] for rp in - iam.list_instance_profiles_for_role(name). - list_instance_profiles_for_role_result. - instance_profiles] - for profile in cur_ins_prof: - iam.remove_role_from_instance_profile(profile, name) - try: - iam.delete_role(name) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if ('must detach all policies first') in error_msg: - for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: - iam.delete_role_policy(name, policy) - try: - iam_role_result = iam.delete_role(name) - except boto.exception.BotoServerError as err: - error_msg = boto_exception(err) - if ('must detach all policies first') in error_msg: - module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" - "that %s has Managed Polices. This is not " - "currently supported by boto. Please detach the policies " - "through the console and try again." % name) - else: - module.fail_json(changed=changed, msg=str(err)) - else: - changed = True - - else: - changed = True - - for prof in prof_list: - if name == prof: - instance_profile_result = iam.delete_instance_profile(name) - except boto.exception.BotoServerError as err: - module.fail_json(changed=changed, msg=str(err)) - else: - updated_role_list = list_all_roles(iam) - return changed, updated_role_list, iam_role_result, instance_profile_result - - -def main(): - argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), - groups=dict(type='list', default=None, required=False, elements='str'), - state=dict(required=True, choices=['present', 'absent', 'update']), - password=dict(default=None, required=False, no_log=True), - # setting no_log=False on update_password avoids a false positive warning about not setting no_log - update_password=dict(default='always', required=False, choices=['always', 'on_create'], no_log=False), - access_key_state=dict(default=None, required=False, choices=[ - 'active', 'inactive', 'create', 'remove', - 'Active', 'Inactive', 'Create', 'Remove']), - access_key_ids=dict(type='list', default=None, required=False, elements='str', no_log=False), - key_count=dict(type='int', default=1, required=False), - name=dict(required=True), - trust_policy_filepath=dict(default=None, required=False), - trust_policy=dict(type='dict', default=None, required=False), - new_name=dict(default=None, required=False), - path=dict(default='/', required=False), - new_path=dict(default=None, required=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[['trust_policy', 'trust_policy_filepath']], - check_boto3=False, - ) - - module.deprecate("The 'iam' module has been deprecated and replaced by the 'iam_user', 'iam_group'" - " and 'iam_role' modules'", version='3.0.0', collection_name='community.aws') - - if not HAS_BOTO: - module.fail_json(msg='This module requires boto, please install it') - - state = module.params.get('state').lower() - iam_type = module.params.get('iam_type').lower() - groups = module.params.get('groups') - name = module.params.get('name') - new_name = module.params.get('new_name') - password = module.params.get('password') - update_pw = module.params.get('update_password') - path = module.params.get('path') - new_path = module.params.get('new_path') - key_count = module.params.get('key_count') - key_state = module.params.get('access_key_state') - trust_policy = module.params.get('trust_policy') - trust_policy_filepath = module.params.get('trust_policy_filepath') - key_ids = module.params.get('access_key_ids') - - if key_state: - key_state = key_state.lower() - if any(n in key_state for n in ['active', 'inactive']) and not key_ids: - module.fail_json(changed=False, msg="At least one access key has to be defined in order" - " to use 'active' or 'inactive'") - - if iam_type == 'user' and module.params.get('password') is not None: - pwd = module.params.get('password') - elif iam_type != 'user' and module.params.get('password') is not None: - module.fail_json(msg="a password is being specified when the iam_type " - "is not user. Check parameters") - else: - pwd = None - - if iam_type != 'user' and (module.params.get('access_key_state') is not None or - module.params.get('access_key_id') is not None): - module.fail_json(msg="the IAM type must be user, when IAM access keys " - "are being modified. Check parameters") - - if iam_type == 'role' and state == 'update': - module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " - "please specify present or absent") - - # check if trust_policy is present -- it can be inline JSON or a file path to a JSON file - if trust_policy_filepath: - try: - with open(trust_policy_filepath, 'r') as json_data: - trust_policy_doc = json.dumps(json.load(json_data)) - except Exception as e: - module.fail_json(msg=str(e) + ': ' + trust_policy_filepath) - elif trust_policy: - try: - trust_policy_doc = json.dumps(trust_policy) - except Exception as e: - module.fail_json(msg=str(e) + ': ' + trust_policy) - else: - trust_policy_doc = None - - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - - try: - if region: - iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) - else: - iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=str(e)) - - result = {} - changed = False - - try: - orig_group_list = list_all_groups(iam) - - orig_user_list = list_all_users(iam) - - orig_role_list = list_all_roles(iam) - - orig_prof_list = list_all_instance_profiles(iam) - except boto.exception.BotoServerError as err: - module.fail_json(msg=err.message) - - if iam_type == 'user': - been_updated = False - user_groups = None - user_exists = any(n in [name, new_name] for n in orig_user_list) - if user_exists: - current_path = iam.get_user(name).get_user_result.user['path'] - if not new_path and current_path != path: - new_path = path - path = current_path - - if state == 'present' and not user_exists and not new_name: - (meta, changed) = create_user( - module, iam, name, password, path, key_state, key_count) - keys = iam.get_all_access_keys(name).list_access_keys_result.\ - access_key_metadata - if groups: - (user_groups, changed) = set_users_groups( - module, iam, name, groups, been_updated, new_name) - module.exit_json( - user_meta=meta, groups=user_groups, user_name=meta['created_user']['user_name'], keys=keys, changed=changed) - - elif state in ['present', 'update'] and user_exists: - if update_pw == 'on_create': - password = None - if name not in orig_user_list and new_name in orig_user_list: - been_updated = True - name_change, key_list, user_changed, new_key = update_user( - module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated) - if new_key: - user_meta = {'access_keys': list(new_key)} - user_meta['access_keys'].extend( - [{'access_key_id': key, 'status': value} for key, value in key_list.items() if - key not in [it['access_key_id'] for it in new_key]]) - else: - user_meta = { - 'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]} - - if name_change and new_name: - orig_name = name - name = new_name - if isinstance(groups, list): - user_groups, groups_changed = set_users_groups( - module, iam, name, groups, been_updated, new_name) - if groups_changed == user_changed: - changed = groups_changed - else: - changed = True - else: - changed = user_changed - if new_name and new_path: - module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name, - new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list, - created_keys=new_key, user_meta=user_meta) - elif new_name and not new_path and not been_updated: - module.exit_json( - changed=changed, groups=user_groups, old_user_name=orig_name, user_name=new_name, new_user_name=new_name, keys=key_list, - created_keys=new_key, user_meta=user_meta) - elif new_name and not new_path and been_updated: - module.exit_json( - changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state, - created_keys=new_key, user_meta=user_meta) - elif not new_name and new_path: - module.exit_json( - changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, - keys=key_list, created_keys=new_key, user_meta=user_meta) - else: - module.exit_json( - changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key, - user_meta=user_meta) - - elif state == 'update' and not user_exists: - module.fail_json( - msg="The user %s does not exist. No update made." % name) - - elif state == 'absent': - if user_exists: - try: - set_users_groups(module, iam, name, '') - name, changed = delete_user(module, iam, name) - module.exit_json(deleted_user=name, user_name=name, changed=changed) - - except Exception as ex: - module.fail_json(changed=changed, msg=str(ex)) - else: - module.exit_json( - changed=False, msg="User %s is already absent from your AWS IAM users" % name) - - elif iam_type == 'group': - group_exists = name in orig_group_list - - if state == 'present' and not group_exists: - new_group, changed = create_group(module=module, iam=iam, name=name, path=path) - module.exit_json(changed=changed, group_name=new_group) - elif state in ['present', 'update'] and group_exists: - changed, updated_name, updated_path, cur_path = update_group( - module=module, iam=iam, name=name, new_name=new_name, - new_path=new_path) - - if new_path and new_name: - module.exit_json(changed=changed, old_group_name=name, - new_group_name=updated_name, old_path=cur_path, - new_group_path=updated_path) - - if new_path and not new_name: - module.exit_json(changed=changed, group_name=name, - old_path=cur_path, - new_group_path=updated_path) - - if not new_path and new_name: - module.exit_json(changed=changed, old_group_name=name, - new_group_name=updated_name, group_path=cur_path) - - if not new_path and not new_name: - module.exit_json( - changed=changed, group_name=name, group_path=cur_path) - - elif state == 'update' and not group_exists: - module.fail_json( - changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name) - - elif state == 'absent': - if name in orig_group_list: - removed_group, changed = delete_group(module=module, iam=iam, name=name) - module.exit_json(changed=changed, delete_group=removed_group) - else: - module.exit_json(changed=changed, msg="Group already absent") - - elif iam_type == 'role': - role_list = [] - if state == 'present': - changed, role_list, role_result, instance_profile_result = create_role( - module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc) - elif state == 'absent': - changed, role_list, role_result, instance_profile_result = delete_role( - module, iam, name, orig_role_list, orig_prof_list) - elif state == 'update': - module.fail_json( - changed=False, msg='Role update not currently supported by boto.') - module.exit_json(changed=changed, roles=role_list, role_result=role_result, - instance_profile_result=instance_profile_result) - - -if __name__ == '__main__': - main() diff --git a/rds.py b/rds.py deleted file mode 100644 index 08e158c9395..00000000000 --- a/rds.py +++ /dev/null @@ -1,1400 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds -version_added: 1.0.0 -deprecated: - removed_in: 3.0.0 - why: The rds module is based upon a deprecated version of the AWS SDK. - alternative: Use M(community.aws.rds_instance), M(community.aws.rds_instance_info), and M(community.aws.rds_instance_snapshot). -short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts -description: - - Creates, deletes, or modifies rds resources. - - When creating an instance it can be either a new instance or a read-only replica of an existing instance. - - The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0). - - Please use the boto3 based M(community.aws.rds_instance) instead. -options: - command: - description: - - Specifies the action to take. The 'reboot' option is available starting at version 2.0. - required: true - choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] - type: str - instance_name: - description: - - Database instance identifier. - - Required except when using I(command=facts) or I(command=delete) on just a snapshot. - type: str - source_instance: - description: - - Name of the database to replicate. - - Used only when I(command=replicate). - type: str - db_engine: - description: - - The type of database. - - Used only when I(command=create). - - mariadb was added in version 2.2. - choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', - 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'] - type: str - size: - description: - - Size in gigabytes of the initial storage for the DB instance. - - Used only when I(command=create) or I(command=modify). - type: str - instance_type: - description: - - The instance type of the database. - - If not specified then the replica inherits the same instance type as the source instance. - - Required when I(command=create). - - Optional when I(command=replicate), I(command=modify) or I(command=restore). - aliases: ['type'] - type: str - username: - description: - - Master database username. - - Used only when I(command=create). - type: str - password: - description: - - Password for the master database username. - - Used only when I(command=create) or I(command=modify). - type: str - db_name: - description: - - Name of a database to create within the instance. - - If not specified then no database is created. - - Used only when I(command=create). - type: str - engine_version: - description: - - Version number of the database engine to use. - - If not specified then the current Amazon RDS default engine version is used - - Used only when I(command=create). - type: str - parameter_group: - description: - - Name of the DB parameter group to associate with this instance. - - If omitted then the RDS default DBParameterGroup will be used. - - Used only when I(command=create) or I(command=modify). - type: str - license_model: - description: - - The license model for this DB instance. - - Used only when I(command=create) or I(command=restore). - choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] - type: str - multi_zone: - description: - - Specifies if this is a Multi-availability-zone deployment. - - Can not be used in conjunction with I(zone) parameter. - - Used only when I(command=create) or I(command=modify). - type: bool - iops: - description: - - Specifies the number of IOPS for the instance. - - Used only when I(command=create) or I(command=modify). - - Must be an integer greater than 1000. - type: str - security_groups: - description: - - Comma separated list of one or more security groups. - - Used only when I(command=create) or I(command=modify). - type: str - vpc_security_groups: - description: - - Comma separated list of one or more vpc security group ids. - - Also requires I(subnet) to be specified. - - Used only when I(command=create) or I(command=modify). - type: list - elements: str - port: - description: - - Port number that the DB instance uses for connections. - - Used only when I(command=create) or I(command=replicate). - - 'Defaults to the standard ports for each I(db_engine): C(3306) for MySQL and MariaDB, C(1521) for Oracle - C(1433) for SQL Server, C(5432) for PostgreSQL.' - type: int - upgrade: - description: - - Indicates that minor version upgrades should be applied automatically. - - Used only when I(command=create) or I(command=modify) or I(command=restore) or I(command=replicate). - type: bool - default: false - option_group: - description: - - The name of the option group to use. - - If not specified then the default option group is used. - - Used only when I(command=create). - type: str - maint_window: - description: - - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))' - - Times are specified in UTC. - - If not specified then a random maintenance window is assigned. - - Used only when I(command=create) or I(command=modify). - type: str - backup_window: - description: - - 'Backup window in format of C(hh24:mi-hh24:mi). (Example: C(18:00-20:30))' - - Times are specified in UTC. - - If not specified then a random backup window is assigned. - - Used only when command=create or command=modify. - type: str - backup_retention: - description: - - Number of days backups are retained. - - Set to 0 to disable backups. - - Default is 1 day. - - 'Valid range: 0-35.' - - Used only when I(command=create) or I(command=modify). - type: str - zone: - description: - - availability zone in which to launch the instance. - - Used only when I(command=create), I(command=replicate) or I(command=restore). - - Can not be used in conjunction with I(multi_zone) parameter. - aliases: ['aws_zone', 'ec2_zone'] - type: str - subnet: - description: - - VPC subnet group. - - If specified then a VPC instance is created. - - Used only when I(command=create). - type: str - snapshot: - description: - - Name of snapshot to take. - - When I(command=delete), if no I(snapshot) name is provided then no snapshot is taken. - - When I(command=delete), if no I(instance_name) is provided the snapshot is deleted. - - Used with I(command=facts), I(command=delete) or I(command=snapshot). - type: str - wait: - description: - - When I(command=create), replicate, modify or restore then wait for the database to enter the 'available' state. - - When I(command=delete), wait for the database to be terminated. - type: bool - default: false - wait_timeout: - description: - - How long before wait gives up, in seconds. - - Used when I(wait=true). - default: 300 - type: int - apply_immediately: - description: - - When I(apply_immediately=true), the modifications will be applied as soon as possible rather than waiting for the - next preferred maintenance window. - - Used only when I(command=modify). - type: bool - default: false - force_failover: - description: - - If enabled, the reboot is done using a MultiAZ failover. - - Used only when I(command=reboot). - type: bool - default: false - new_instance_name: - description: - - Name to rename an instance to. - - Used only when I(command=modify). - type: str - character_set_name: - description: - - Associate the DB instance with a specified character set. - - Used with I(command=create). - type: str - publicly_accessible: - description: - - Explicitly set whether the resource should be publicly accessible or not. - - Used with I(command=create), I(command=replicate). - - Requires boto >= 2.26.0 - type: str - tags: - description: - - tags dict to apply to a resource. - - Used with I(command=create), I(command=replicate), I(command=restore). - - Requires boto >= 2.26.0 - type: dict -author: - - "Bruce Pennypacker (@bpennypacker)" - - "Will Thames (@willthames)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -requirements: -- boto >= 2.49.0 -''' - -# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD - -EXAMPLES = r''' -- name: Basic mysql provisioning example - community.aws.rds: - command: create - instance_name: new-database - db_engine: MySQL - size: 10 - instance_type: db.m1.small - username: mysql_admin - password: 1nsecure - tags: - Environment: testing - Application: cms - -- name: Create a read-only replica and wait for it to become available - community.aws.rds: - command: replicate - instance_name: new-database-replica - source_instance: new_database - wait: yes - wait_timeout: 600 - -- name: Delete an instance, but create a snapshot before doing so - community.aws.rds: - command: delete - instance_name: new-database - snapshot: new_database_snapshot - -- name: Get facts about an instance - community.aws.rds: - command: facts - instance_name: new-database - register: new_database_facts - -- name: Rename an instance and wait for the change to take effect - community.aws.rds: - command: modify - instance_name: new-database - new_instance_name: renamed-database - wait: yes - -- name: Reboot an instance and wait for it to become available again - community.aws.rds: - command: reboot - instance_name: database - wait: yes - -# Restore a Postgres db instance from a snapshot, wait for it to become available again, and -# then modify it to add your security group. Also, display the new endpoint. -# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI -- community.aws.rds: - command: restore - snapshot: mypostgres-snapshot - instance_name: MyNewInstanceName - region: us-west-2 - zone: us-west-2b - subnet: default-vpc-xx441xxx - publicly_accessible: yes - wait: yes - wait_timeout: 600 - tags: - Name: pg1_test_name_tag - register: rds - -- community.aws.rds: - command: modify - instance_name: MyNewInstanceName - region: us-west-2 - vpc_security_groups: sg-xxx945xx - -- ansible.builtin.debug: - msg: "The new db endpoint is {{ rds.instance.endpoint }}" -''' - -RETURN = r''' -instance: - description: the rds instance - returned: always - type: complex - contains: - engine: - description: the name of the database engine - returned: when RDS instance exists - type: str - sample: "oracle-se" - engine_version: - description: the version of the database engine - returned: when RDS instance exists - type: str - sample: "11.2.0.4.v6" - license_model: - description: the license model information - returned: when RDS instance exists - type: str - sample: "bring-your-own-license" - character_set_name: - description: the name of the character set that this instance is associated with - returned: when RDS instance exists - type: str - sample: "AL32UTF8" - allocated_storage: - description: the allocated storage size in gigabytes (GB) - returned: when RDS instance exists - type: str - sample: "100" - publicly_accessible: - description: the accessibility options for the DB instance - returned: when RDS instance exists - type: bool - sample: "true" - latest_restorable_time: - description: the latest time to which a database can be restored with point-in-time restore - returned: when RDS instance exists - type: str - sample: "1489707802.0" - secondary_availability_zone: - description: the name of the secondary AZ for a DB instance with multi-AZ support - returned: when RDS instance exists and is multi-AZ - type: str - sample: "eu-west-1b" - backup_window: - description: the daily time range during which automated backups are created if automated backups are enabled - returned: when RDS instance exists and automated backups are enabled - type: str - sample: "03:00-03:30" - auto_minor_version_upgrade: - description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window - returned: when RDS instance exists - type: bool - sample: "true" - read_replica_source_dbinstance_identifier: - description: the identifier of the source DB instance if this RDS instance is a read replica - returned: when read replica RDS instance exists - type: str - sample: "null" - db_name: - description: the name of the database to create when the DB instance is created - returned: when RDS instance exists - type: str - sample: "ASERTG" - endpoint: - description: the endpoint uri of the database instance - returned: when RDS instance exists - type: str - sample: "my-ansible-database.asdfaosdgih.us-east-1.rds.amazonaws.com" - port: - description: the listening port of the database instance - returned: when RDS instance exists - type: int - sample: 3306 - parameter_groups: - description: the list of DB parameter groups applied to this RDS instance - returned: when RDS instance exists and parameter groups are defined - type: complex - contains: - parameter_apply_status: - description: the status of parameter updates - returned: when RDS instance exists - type: str - sample: "in-sync" - parameter_group_name: - description: the name of the DP parameter group - returned: when RDS instance exists - type: str - sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz" - option_groups: - description: the list of option group memberships for this RDS instance - returned: when RDS instance exists - type: complex - contains: - option_group_name: - description: the option group name for this RDS instance - returned: when RDS instance exists - type: str - sample: "default:oracle-se-11-2" - status: - description: the status of the RDS instance's option group membership - returned: when RDS instance exists - type: str - sample: "in-sync" - pending_modified_values: - description: a dictionary of changes to the RDS instance that are pending - returned: when RDS instance exists - type: complex - contains: - db_instance_class: - description: the new DB instance class for this RDS instance that will be applied or is in progress - returned: when RDS instance exists - type: str - sample: "null" - db_instance_identifier: - description: the new DB instance identifier this RDS instance that will be applied or is in progress - returned: when RDS instance exists - type: str - sample: "null" - allocated_storage: - description: the new allocated storage size for this RDS instance that will be applied or is in progress - returned: when RDS instance exists - type: str - sample: "null" - backup_retention_period: - description: the pending number of days for which automated backups are retained - returned: when RDS instance exists - type: str - sample: "null" - engine_version: - description: indicates the database engine version - returned: when RDS instance exists - type: str - sample: "null" - iops: - description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied - returned: when RDS instance exists - type: str - sample: "null" - master_user_password: - description: the pending or in-progress change of the master credentials for this RDS instance - returned: when RDS instance exists - type: str - sample: "null" - multi_az: - description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment - returned: when RDS instance exists - type: str - sample: "null" - port: - description: specifies the pending port for this RDS instance - returned: when RDS instance exists - type: str - sample: "null" - db_subnet_groups: - description: information on the subnet group associated with this RDS instance - returned: when RDS instance exists - type: complex - contains: - description: - description: the subnet group associated with the DB instance - returned: when RDS instance exists - type: str - sample: "Subnets for the UAT RDS SQL DB Instance" - name: - description: the name of the DB subnet group - returned: when RDS instance exists - type: str - sample: "samplesubnetgrouprds-j6paiqkxqp4z" - status: - description: the status of the DB subnet group - returned: when RDS instance exists - type: str - sample: "complete" - subnets: - description: the description of the DB subnet group - returned: when RDS instance exists - type: complex - contains: - availability_zone: - description: subnet availability zone information - returned: when RDS instance exists - type: complex - contains: - name: - description: availability zone - returned: when RDS instance exists - type: str - sample: "eu-west-1b" - provisioned_iops_capable: - description: whether provisioned iops are available in AZ subnet - returned: when RDS instance exists - type: bool - sample: "false" - identifier: - description: the identifier of the subnet - returned: when RDS instance exists - type: str - sample: "subnet-3fdba63e" - status: - description: the status of the subnet - returned: when RDS instance exists - type: str - sample: "active" -''' - -import time - -try: - import boto.rds - import boto.exception -except ImportError: - pass # Taken care of by ec2.HAS_BOTO - -try: - import boto.rds2 - import boto.rds2.exceptions - HAS_RDS2 = True -except ImportError: - HAS_RDS2 = False - -from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - - -DEFAULT_PORTS = { - 'aurora': 3306, - 'mariadb': 3306, - 'mysql': 3306, - 'oracle': 1521, - 'sqlserver': 1433, - 'postgres': 5432, -} - - -class RDSException(Exception): - def __init__(self, exc): - if hasattr(exc, 'error_message') and exc.error_message: - self.message = exc.error_message - self.code = exc.error_code - elif hasattr(exc, 'body') and 'Error' in exc.body: - self.message = exc.body['Error']['Message'] - self.code = exc.body['Error']['Code'] - else: - self.message = str(exc) - self.code = 'Unknown Error' - - -class RDSConnection: - def __init__(self, module, region, **aws_connect_params): - try: - self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError as e: - module.fail_json(msg=e.error_message) - - def get_db_instance(self, instancename): - try: - return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) - except boto.exception.BotoServerError: - return None - - def get_db_snapshot(self, snapshotid): - try: - return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) - except boto.exception.BotoServerError: - return None - - def create_db_instance(self, instance_name, size, instance_class, db_engine, - username, password, **params): - params['engine'] = db_engine - try: - result = self.connection.create_dbinstance(instance_name, size, instance_class, - username, password, **params) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def create_db_instance_read_replica(self, instance_name, source_instance, **params): - try: - result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def delete_db_instance(self, instance_name, **params): - try: - result = self.connection.delete_dbinstance(instance_name, **params) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def delete_db_snapshot(self, snapshot): - try: - result = self.connection.delete_dbsnapshot(snapshot) - return RDSSnapshot(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def modify_db_instance(self, instance_name, **params): - try: - result = self.connection.modify_dbinstance(instance_name, **params) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def reboot_db_instance(self, instance_name, **params): - try: - result = self.connection.reboot_dbinstance(instance_name) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): - try: - result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def create_db_snapshot(self, snapshot, instance_name, **params): - try: - result = self.connection.create_dbsnapshot(snapshot, instance_name) - return RDSSnapshot(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def promote_read_replica(self, instance_name, **params): - try: - result = self.connection.promote_read_replica(instance_name, **params) - return RDSDBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - -class RDS2Connection: - def __init__(self, module, region, **aws_connect_params): - try: - self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) - except boto.exception.BotoServerError as e: - module.fail_json(msg=e.error_message) - - def get_db_instance(self, instancename): - try: - dbinstances = self.connection.describe_db_instances( - db_instance_identifier=instancename - )['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] - result = RDS2DBInstance(dbinstances[0]) - return result - except boto.rds2.exceptions.DBInstanceNotFound as e: - return None - except Exception as e: - raise e - - def get_db_snapshot(self, snapshotid): - try: - snapshots = self.connection.describe_db_snapshots( - db_snapshot_identifier=snapshotid, - snapshot_type='manual' - )['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] - result = RDS2Snapshot(snapshots[0]) - return result - except boto.rds2.exceptions.DBSnapshotNotFound: - return None - - def create_db_instance(self, instance_name, size, instance_class, db_engine, - username, password, **params): - try: - result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password, - **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def create_db_instance_read_replica(self, instance_name, source_instance, **params): - try: - result = self.connection.create_db_instance_read_replica( - instance_name, - source_instance, - **params - )['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def delete_db_instance(self, instance_name, **params): - try: - result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def delete_db_snapshot(self, snapshot): - try: - result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] - return RDS2Snapshot(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def modify_db_instance(self, instance_name, **params): - try: - result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def reboot_db_instance(self, instance_name, **params): - try: - result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): - try: - result = self.connection.restore_db_instance_from_db_snapshot( - instance_name, - snapshot, - **params - )['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def create_db_snapshot(self, snapshot, instance_name, **params): - try: - result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] - return RDS2Snapshot(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - def promote_read_replica(self, instance_name, **params): - try: - result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] - return RDS2DBInstance(result) - except boto.exception.BotoServerError as e: - raise RDSException(e) - - -class RDSDBInstance: - def __init__(self, dbinstance): - self.instance = dbinstance - self.name = dbinstance.id - self.status = dbinstance.status - - def get_data(self): - d = { - 'id': self.name, - 'create_time': self.instance.create_time, - 'status': self.status, - 'availability_zone': self.instance.availability_zone, - 'backup_retention': self.instance.backup_retention_period, - 'backup_window': self.instance.preferred_backup_window, - 'maintenance_window': self.instance.preferred_maintenance_window, - 'multi_zone': self.instance.multi_az, - 'instance_type': self.instance.instance_class, - 'username': self.instance.master_username, - 'iops': self.instance.iops - } - - # Only assign an Endpoint if one is available - if hasattr(self.instance, 'endpoint'): - d["endpoint"] = self.instance.endpoint[0] - d["port"] = self.instance.endpoint[1] - if self.instance.vpc_security_groups is not None: - d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups) - else: - d["vpc_security_groups"] = None - else: - d["endpoint"] = None - d["port"] = None - d["vpc_security_groups"] = None - d['DBName'] = self.instance.DBName if hasattr(self.instance, 'DBName') else None - # ReadReplicaSourceDBInstanceIdentifier may or may not exist - try: - d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier - except Exception: - d["replication_source"] = None - return d - - -class RDS2DBInstance: - def __init__(self, dbinstance): - self.instance = dbinstance - if 'DBInstanceIdentifier' not in dbinstance: - self.name = None - else: - self.name = self.instance.get('DBInstanceIdentifier') - self.status = self.instance.get('DBInstanceStatus') - - def get_data(self): - d = { - 'id': self.name, - 'create_time': self.instance['InstanceCreateTime'], - 'engine': self.instance['Engine'], - 'engine_version': self.instance['EngineVersion'], - 'license_model': self.instance['LicenseModel'], - 'character_set_name': self.instance['CharacterSetName'], - 'allocated_storage': self.instance['AllocatedStorage'], - 'publicly_accessible': self.instance['PubliclyAccessible'], - 'latest_restorable_time': self.instance['LatestRestorableTime'], - 'status': self.status, - 'availability_zone': self.instance['AvailabilityZone'], - 'secondary_availability_zone': self.instance['SecondaryAvailabilityZone'], - 'backup_retention': self.instance['BackupRetentionPeriod'], - 'backup_window': self.instance['PreferredBackupWindow'], - 'maintenance_window': self.instance['PreferredMaintenanceWindow'], - 'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'], - 'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'], - 'multi_zone': self.instance['MultiAZ'], - 'instance_type': self.instance['DBInstanceClass'], - 'username': self.instance['MasterUsername'], - 'db_name': self.instance['DBName'], - 'iops': self.instance['Iops'], - 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier'] - } - if self.instance['DBParameterGroups'] is not None: - parameter_groups = [] - for x in self.instance['DBParameterGroups']: - parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']}) - d['parameter_groups'] = parameter_groups - if self.instance['OptionGroupMemberships'] is not None: - option_groups = [] - for x in self.instance['OptionGroupMemberships']: - option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']}) - d['option_groups'] = option_groups - if self.instance['PendingModifiedValues'] is not None: - pdv = self.instance['PendingModifiedValues'] - d['pending_modified_values'] = { - 'multi_az': pdv['MultiAZ'], - 'master_user_password': pdv['MasterUserPassword'], - 'port': pdv['Port'], - 'iops': pdv['Iops'], - 'allocated_storage': pdv['AllocatedStorage'], - 'engine_version': pdv['EngineVersion'], - 'backup_retention_period': pdv['BackupRetentionPeriod'], - 'db_instance_class': pdv['DBInstanceClass'], - 'db_instance_identifier': pdv['DBInstanceIdentifier'] - } - if self.instance["DBSubnetGroup"] is not None: - dsg = self.instance["DBSubnetGroup"] - db_subnet_groups = {} - db_subnet_groups['vpc_id'] = dsg['VpcId'] - db_subnet_groups['name'] = dsg['DBSubnetGroupName'] - db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower() - db_subnet_groups['description'] = dsg['DBSubnetGroupDescription'] - db_subnet_groups['subnets'] = [] - for x in dsg["Subnets"]: - db_subnet_groups['subnets'].append({ - 'status': x['SubnetStatus'].lower(), - 'identifier': x['SubnetIdentifier'], - 'availability_zone': { - 'name': x['SubnetAvailabilityZone']['Name'], - 'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable'] - } - }) - d['db_subnet_groups'] = db_subnet_groups - if self.instance["VpcSecurityGroups"] is not None: - d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) - if "Endpoint" in self.instance and self.instance["Endpoint"] is not None: - d['endpoint'] = self.instance["Endpoint"].get('Address', None) - d['port'] = self.instance["Endpoint"].get('Port', None) - else: - d['endpoint'] = None - d['port'] = None - d['DBName'] = self.instance['DBName'] if hasattr(self.instance, 'DBName') else None - return d - - -class RDSSnapshot: - def __init__(self, snapshot): - self.snapshot = snapshot - self.name = snapshot.id - self.status = snapshot.status - - def get_data(self): - d = { - 'id': self.name, - 'create_time': self.snapshot.snapshot_create_time, - 'status': self.status, - 'availability_zone': self.snapshot.availability_zone, - 'instance_id': self.snapshot.instance_id, - 'instance_created': self.snapshot.instance_create_time, - } - # needs boto >= 2.21.0 - if hasattr(self.snapshot, 'snapshot_type'): - d["snapshot_type"] = self.snapshot.snapshot_type - if hasattr(self.snapshot, 'iops'): - d["iops"] = self.snapshot.iops - return d - - -class RDS2Snapshot: - def __init__(self, snapshot): - if 'DeleteDBSnapshotResponse' in snapshot: - self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] - else: - self.snapshot = snapshot - self.name = self.snapshot.get('DBSnapshotIdentifier') - self.status = self.snapshot.get('Status') - - def get_data(self): - d = { - 'id': self.name, - 'create_time': self.snapshot['SnapshotCreateTime'], - 'status': self.status, - 'availability_zone': self.snapshot['AvailabilityZone'], - 'instance_id': self.snapshot['DBInstanceIdentifier'], - 'instance_created': self.snapshot['InstanceCreateTime'], - 'snapshot_type': self.snapshot['SnapshotType'], - 'iops': self.snapshot['Iops'], - } - return d - - -def await_resource(conn, resource, status, module): - start_time = time.time() - wait_timeout = module.params.get('wait_timeout') + start_time - check_interval = 5 - while wait_timeout > time.time() and resource.status != status: - time.sleep(check_interval) - if wait_timeout <= time.time(): - module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name) - if module.params.get('command') == 'snapshot': - # Temporary until all the rds2 commands have their responses parsed - if resource.name is None: - module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) - # Back off if we're getting throttled, since we're just waiting anyway - resource = AWSRetry.jittered_backoff(retries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) - else: - # Temporary until all the rds2 commands have their responses parsed - if resource.name is None: - module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) - # Back off if we're getting throttled, since we're just waiting anyway - resource = AWSRetry.jittered_backoff(retries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) - if resource is None: - break - # Some RDS resources take much longer than others to be ready. Check - # less aggressively for slow ones to avoid throttling. - if time.time() > start_time + 90: - check_interval = 20 - return resource - - -def create_db_instance(module, conn): - required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password'] - valid_vars = ['backup_retention', 'backup_window', - 'character_set_name', 'db_name', 'engine_version', - 'instance_type', 'iops', 'license_model', 'maint_window', - 'multi_zone', 'option_group', 'parameter_group', 'port', - 'subnet', 'upgrade', 'zone'] - if module.params.get('subnet'): - valid_vars.append('vpc_security_groups') - else: - valid_vars.append('security_groups') - if HAS_RDS2: - valid_vars.extend(['publicly_accessible', 'tags']) - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - - result = conn.get_db_instance(instance_name) - if result: - changed = False - else: - try: - result = conn.create_db_instance(instance_name, module.params.get('size'), - module.params.get('instance_type'), module.params.get('db_engine'), - module.params.get('username'), module.params.get('password'), **params) - changed = True - except RDSException as e: - module.fail_json(msg="Failed to create instance: %s" % to_native(e)) - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_instance(instance_name) - - module.exit_json(changed=changed, instance=resource.get_data()) - - -def replicate_db_instance(module, conn): - required_vars = ['instance_name', 'source_instance'] - valid_vars = ['instance_type', 'port', 'upgrade', 'zone'] - if HAS_RDS2: - valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags']) - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - source_instance = module.params.get('source_instance') - - result = conn.get_db_instance(instance_name) - if result: - changed = False - else: - try: - result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) - changed = True - except RDSException as e: - module.fail_json(msg="Failed to create replica instance: %s " % to_native(e)) - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_instance(instance_name) - - module.exit_json(changed=changed, instance=resource.get_data()) - - -def delete_db_instance_or_snapshot(module, conn): - required_vars = [] - valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot'] - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - snapshot = module.params.get('snapshot') - - if not instance_name: - result = conn.get_db_snapshot(snapshot) - else: - result = conn.get_db_instance(instance_name) - if not result: - module.exit_json(changed=False) - if result.status == 'deleting': - module.exit_json(changed=False) - try: - if instance_name: - if snapshot: - params["skip_final_snapshot"] = False - if HAS_RDS2: - params["final_db_snapshot_identifier"] = snapshot - else: - params["final_snapshot_id"] = snapshot - else: - params["skip_final_snapshot"] = True - result = conn.delete_db_instance(instance_name, **params) - else: - result = conn.delete_db_snapshot(snapshot) - except RDSException as e: - module.fail_json(msg="Failed to delete instance: %s" % to_native(e)) - - # If we're not waiting for a delete to complete then we're all done - # so just return - if not module.params.get('wait'): - module.exit_json(changed=True) - try: - await_resource(conn, result, 'deleted', module) - module.exit_json(changed=True) - except RDSException as e: - if e.code == 'DBInstanceNotFound': - module.exit_json(changed=True) - else: - module.fail_json(msg=to_native(e)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def facts_db_instance_or_snapshot(module, conn): - instance_name = module.params.get('instance_name') - snapshot = module.params.get('snapshot') - - if instance_name and snapshot: - module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both") - if instance_name: - resource = conn.get_db_instance(instance_name) - if not resource: - module.fail_json(msg="DB instance %s does not exist" % instance_name) - if snapshot: - resource = conn.get_db_snapshot(snapshot) - if not resource: - module.fail_json(msg="DB snapshot %s does not exist" % snapshot) - - module.exit_json(changed=False, instance=resource.get_data()) - - -def modify_db_instance(module, conn): - required_vars = ['instance_name'] - valid_vars = ['apply_immediately', 'backup_retention', 'backup_window', - 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', - 'maint_window', 'multi_zone', 'new_instance_name', - 'option_group', 'parameter_group', 'password', 'size', 'upgrade'] - - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - new_instance_name = module.params.get('new_instance_name') - - try: - result = conn.modify_db_instance(instance_name, **params) - except RDSException as e: - module.fail_json(msg=to_native(e)) - if params.get('apply_immediately'): - if new_instance_name: - # Wait until the new instance name is valid - new_instance = None - while not new_instance: - new_instance = conn.get_db_instance(new_instance_name) - time.sleep(5) - - # Found instance but it briefly flicks to available - # before rebooting so let's wait until we see it rebooting - # before we check whether to 'wait' - result = await_resource(conn, new_instance, 'rebooting', module) - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_instance(instance_name) - - # guess that this changed the DB, need a way to check - module.exit_json(changed=True, instance=resource.get_data()) - - -def promote_db_instance(module, conn): - required_vars = ['instance_name'] - valid_vars = ['backup_retention', 'backup_window'] - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - - result = conn.get_db_instance(instance_name) - if not result: - module.fail_json(msg="DB Instance %s does not exist" % instance_name) - - if result.get_data().get('replication_source'): - try: - result = conn.promote_read_replica(instance_name, **params) - changed = True - except RDSException as e: - module.fail_json(msg=to_native(e)) - else: - changed = False - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_instance(instance_name) - - module.exit_json(changed=changed, instance=resource.get_data()) - - -def snapshot_db_instance(module, conn): - required_vars = ['instance_name', 'snapshot'] - valid_vars = ['tags'] - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - snapshot = module.params.get('snapshot') - changed = False - result = conn.get_db_snapshot(snapshot) - if not result: - try: - result = conn.create_db_snapshot(snapshot, instance_name, **params) - changed = True - except RDSException as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_snapshot(snapshot) - - module.exit_json(changed=changed, snapshot=resource.get_data()) - - -def reboot_db_instance(module, conn): - required_vars = ['instance_name'] - valid_vars = [] - - if HAS_RDS2: - valid_vars.append('force_failover') - - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - result = conn.get_db_instance(instance_name) - changed = False - try: - result = conn.reboot_db_instance(instance_name, **params) - changed = True - except RDSException as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_instance(instance_name) - - module.exit_json(changed=changed, instance=resource.get_data()) - - -def restore_db_instance(module, conn): - required_vars = ['instance_name', 'snapshot'] - valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', - 'option_group', 'port', 'publicly_accessible', - 'subnet', 'tags', 'upgrade', 'zone'] - if HAS_RDS2: - valid_vars.append('instance_type') - else: - required_vars.append('instance_type') - params = validate_parameters(required_vars, valid_vars, module) - instance_name = module.params.get('instance_name') - instance_type = module.params.get('instance_type') - snapshot = module.params.get('snapshot') - - changed = False - result = conn.get_db_instance(instance_name) - if not result: - try: - result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) - changed = True - except RDSException as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('wait'): - resource = await_resource(conn, result, 'available', module) - else: - resource = conn.get_db_instance(instance_name) - - module.exit_json(changed=changed, instance=resource.get_data()) - - -def validate_parameters(required_vars, valid_vars, module): - command = module.params.get('command') - for v in required_vars: - if not module.params.get(v): - module.fail_json(msg="Parameter %s required for %s command" % (v, command)) - - # map to convert rds module options to boto rds and rds2 options - optional_params = { - 'port': 'port', - 'db_name': 'db_name', - 'zone': 'availability_zone', - 'maint_window': 'preferred_maintenance_window', - 'backup_window': 'preferred_backup_window', - 'backup_retention': 'backup_retention_period', - 'multi_zone': 'multi_az', - 'engine_version': 'engine_version', - 'upgrade': 'auto_minor_version_upgrade', - 'subnet': 'db_subnet_group_name', - 'license_model': 'license_model', - 'option_group': 'option_group_name', - 'size': 'allocated_storage', - 'iops': 'iops', - 'new_instance_name': 'new_instance_id', - 'apply_immediately': 'apply_immediately', - } - # map to convert rds module options to boto rds options - optional_params_rds = { - 'db_engine': 'engine', - 'password': 'master_password', - 'parameter_group': 'param_group', - 'instance_type': 'instance_class', - } - # map to convert rds module options to boto rds2 options - optional_params_rds2 = { - 'tags': 'tags', - 'publicly_accessible': 'publicly_accessible', - 'parameter_group': 'db_parameter_group_name', - 'character_set_name': 'character_set_name', - 'instance_type': 'db_instance_class', - 'password': 'master_user_password', - 'new_instance_name': 'new_db_instance_identifier', - 'force_failover': 'force_failover', - } - if HAS_RDS2: - optional_params.update(optional_params_rds2) - sec_group = 'db_security_groups' - else: - optional_params.update(optional_params_rds) - sec_group = 'security_groups' - # Check for options only supported with rds2 - for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()): - if module.params.get(k): - module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k) - - params = {} - for (k, v) in optional_params.items(): - if module.params.get(k) is not None and k not in required_vars: - if k in valid_vars: - params[v] = module.params[k] - else: - if module.params.get(k) is False: - pass - else: - module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command)) - - if module.params.get('security_groups'): - params[sec_group] = module.params.get('security_groups').split(',') - - vpc_groups = module.params.get('vpc_security_groups') - if vpc_groups: - if HAS_RDS2: - params['vpc_security_group_ids'] = vpc_groups - else: - groups_list = [] - for x in vpc_groups: - groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) - params['vpc_security_groups'] = groups_list - - # Convert tags dict to list of tuples that rds2 expects - if 'tags' in params: - params['tags'] = module.params['tags'].items() - return params - - -def main(): - argument_spec = dict( - command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), - instance_name=dict(required=False), - source_instance=dict(required=False), - db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', - 'sqlserver-web', 'postgres', 'aurora'], required=False), - size=dict(required=False), - instance_type=dict(aliases=['type'], required=False), - username=dict(required=False), - password=dict(no_log=True, required=False), - db_name=dict(required=False), - engine_version=dict(required=False), - parameter_group=dict(required=False), - license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False), - multi_zone=dict(type='bool', required=False), - iops=dict(required=False), - security_groups=dict(required=False), - vpc_security_groups=dict(type='list', required=False, elements='str'), - port=dict(required=False, type='int'), - upgrade=dict(type='bool', default=False), - option_group=dict(required=False), - maint_window=dict(required=False), - backup_window=dict(required=False), - backup_retention=dict(required=False), - zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False), - subnet=dict(required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - snapshot=dict(required=False), - apply_immediately=dict(type='bool', default=False), - new_instance_name=dict(required=False), - tags=dict(type='dict', required=False), - publicly_accessible=dict(required=False), - character_set_name=dict(required=False), - force_failover=dict(type='bool', required=False, default=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - check_boto3=False, - ) - - module.deprecate("The 'rds' module has been deprecated and replaced by the 'rds_instance' module'", - version='3.0.0', collection_name='community.aws') - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - invocations = { - 'create': create_db_instance, - 'replicate': replicate_db_instance, - 'delete': delete_db_instance_or_snapshot, - 'facts': facts_db_instance_or_snapshot, - 'modify': modify_db_instance, - 'promote': promote_db_instance, - 'snapshot': snapshot_db_instance, - 'reboot': reboot_db_instance, - 'restore': restore_db_instance, - } - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - # set port to per db defaults if not specified - if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create': - if '-' in module.params['db_engine']: - engine = module.params['db_engine'].split('-')[0] - else: - engine = module.params['db_engine'] - module.params['port'] = DEFAULT_PORTS[engine.lower()] - - # connect to the rds endpoint - if HAS_RDS2: - conn = RDS2Connection(module, region, **aws_connect_params) - else: - conn = RDSConnection(module, region, **aws_connect_params) - - invocations[module.params.get('command')](module, conn) - - -if __name__ == '__main__': - main() From 294c7a5b9a7e2efc252ee2072ad03b8a8a4b7a2c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 7 Jan 2022 21:38:17 +0100 Subject: [PATCH 356/683] aws_kms_info: Correct deprecation of keys_attr (#851) aws_kms_info: Correct deprecation of keys_attr SUMMARY fixup derecation of keys_attr. ISSUE TYPE Feature Pull Request COMPONENT NAME aws_kms_info ADDITIONAL INFORMATION #838 Reviewed-by: Markus Bergholz Reviewed-by: Jill R Reviewed-by: None --- aws_kms_info.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index ee1649984f1..671bf6f7447 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -47,14 +47,11 @@ type: bool keys_attr: description: - - Whether to return the results in the C(keys) attribute as well as the - C(kms_keys) attribute. - - Returning the C(keys) attribute conflicts with the builtin keys() - method on dictionaries and as such has been deprecated. - - After version C(3.0.0) this parameter will do nothing, and after - version C(4.0.0) this parameter will be removed. + - Returning the C(keys) attribute conflicted with the builtin keys() + method on dictionaries and as such was deprecated. + - This parameter now does nothing, and after version C(4.0.0) this + parameter will be removed. type: bool - default: True version_added: 2.0.0 extends_documentation_fragment: - amazon.aws.aws @@ -451,7 +448,7 @@ def main(): key_id=dict(aliases=['key_arn']), filters=dict(type='dict'), pending_deletion=dict(type='bool', default=False), - keys_attr=dict(type='bool', default=True), + keys_attr=dict(type='bool'), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -468,11 +465,11 @@ def main(): ret_params = dict(kms_keys=filtered_keys) # We originally returned "keys" - if module.params['keys_attr']: + if module.params.get('keys_attr') is not None: module.deprecate("Returning results in the 'keys' attribute conflicts with the builtin keys() method on " - "dicts and as such is deprecated and is now ignored. Please use the kms_keys attribute. This warning can be " - "silenced by setting keys_attr to False.", - version='3.0.0', collection_name='community.aws') + "dicts and as such was removed in version 3.0.0. Please use the kms_keys attribute. " + "This parameter is now ignored and will be removed in version 4.0.0.", + version='4.0.0', collection_name='community.aws') module.exit_json(**ret_params) From 7791c46b94d946f00358958b51c4ec3a85fbacb3 Mon Sep 17 00:00:00 2001 From: Matthew Davis <7035647+mdavis-xyz@users.noreply.github.com> Date: Fri, 14 Jan 2022 23:06:46 +1100 Subject: [PATCH 357/683] Add abort multipart upload and expire obj del markers to s3 lifecycle (#794) Add abort multipart upload and expire obj del markers to s3 lifecycle Depends-On: ansible/ansible-zuul-jobs#1247 SUMMARY Fixes #365 #796 ISSUE TYPE Feature Pull Request COMPONENT NAME s3_lifecycle ADDITIONAL INFORMATION I have not run integration tests yet because of #793. I'm unsure about how to name and structure the new arguments. Do I nest them to match the API, or flatten them to match existing arguments? Reviewed-by: Alina Buzachis Reviewed-by: Matthew Davis Reviewed-by: Mark Chappell Reviewed-by: None Reviewed-by: Markus Bergholz --- s3_lifecycle.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 9cec1402eb1..c12ce6b0897 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -23,16 +23,30 @@ - Name of the S3 bucket. required: true type: str + abort_incomplete_multipart_upload_days: + description: + - Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. + type: int + version_added: 2.2.0 expiration_date: description: - Indicates the lifetime of the objects that are subject to the rule by the date they will expire. - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. + - This cannot be specified with I(expire_object_delete_marker) type: str expiration_days: description: - Indicates the lifetime, in days, of the objects that are subject to the rule. - The value must be a non-zero positive integer. + - This cannot be specified with I(expire_object_delete_marker) type: int + expire_object_delete_marker: + description: + - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. + - If set to C(true), the delete marker will be expired; if set to C(false) the policy takes no action. + - This cannot be specified with I(expiration_days) or I(expiration_date). + type: bool + version_added: 2.2.0 prefix: description: - Prefix identifying one or more objects to which the rule applies. @@ -250,8 +264,10 @@ def fetch_rules(client, module, name): def build_rule(client, module): name = module.params.get("name") + abort_incomplete_multipart_upload_days = module.params.get("abort_incomplete_multipart_upload_days") expiration_date = parse_date(module.params.get("expiration_date")) expiration_days = module.params.get("expiration_days") + expire_object_delete_marker = module.params.get("expire_object_delete_marker") noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days") noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days") noncurrent_version_transitions = module.params.get("noncurrent_version_transitions") @@ -268,11 +284,19 @@ def build_rule(client, module): rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) if rule_id is not None: rule['ID'] = rule_id + + if abort_incomplete_multipart_upload_days: + rule['AbortIncompleteMultipartUpload'] = { + 'DaysAfterInitiation': abort_incomplete_multipart_upload_days + } + # Create expiration if expiration_days is not None: rule['Expiration'] = dict(Days=expiration_days) elif expiration_date is not None: rule['Expiration'] = dict(Date=expiration_date.isoformat()) + elif expire_object_delete_marker is not None: + rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) if noncurrent_version_expiration_days is not None: rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days) @@ -525,8 +549,10 @@ def main(): s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] argument_spec = dict( name=dict(required=True, type='str'), + abort_incomplete_multipart_upload_days=dict(type='int'), expiration_days=dict(type='int'), expiration_date=dict(), + expire_object_delete_marker=dict(type='bool'), noncurrent_version_expiration_days=dict(type='int'), noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class), noncurrent_version_transition_days=dict(type='int'), @@ -546,7 +572,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=[ - ['expiration_days', 'expiration_date'], + ['expiration_days', 'expiration_date', 'expire_object_delete_marker'], ['expiration_days', 'transition_date'], ['transition_days', 'transition_date'], ['transition_days', 'expiration_date'], @@ -563,8 +589,10 @@ def main(): if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix - required_when_present = ('expiration_date', 'expiration_days', 'transition_date', - 'transition_days', 'transitions', 'noncurrent_version_expiration_days', + required_when_present = ('abort_incomplete_multipart_upload_days', + 'expiration_date', 'expiration_days', 'expire_object_delete_marker', + 'transition_date', 'transition_days', 'transitions', + 'noncurrent_version_expiration_days', 'noncurrent_version_transition_days', 'noncurrent_version_transitions') for param in required_when_present: From 285e2188673fa1d96aafd7e886c4569de42eefdc Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Tue, 18 Jan 2022 07:27:13 +0100 Subject: [PATCH 358/683] docs fix. clean up duplicated and unspecific requirements (#863) docs fix. clean up duplicated and unspecific requirements SUMMARY remove unspecific/duplicated requirements ISSUE TYPE Docs Pull Request COMPONENT NAME aws_msk_config ecs_taskdefinition ADDITIONAL INFORMATION I don't see the json library is explicit used in aws_msk_config. Furthermore it's standard python library. Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell --- aws_msk_config.py | 3 --- ecs_taskdefinition.py | 1 - 2 files changed, 4 deletions(-) diff --git a/aws_msk_config.py b/aws_msk_config.py index f1966847422..afaea513937 100644 --- a/aws_msk_config.py +++ b/aws_msk_config.py @@ -12,9 +12,6 @@ module: aws_msk_config short_description: Manage Amazon MSK cluster configurations. version_added: "2.0.0" -requirements: - - botocore >= 1.17.48 - - boto3 description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations. author: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 505a4207117..ab3a47d176e 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -16,7 +16,6 @@ author: - Mark Chance (@Java1Guy) - Alina Buzachis (@alinabuzachis) -requirements: [ json, botocore, boto3 ] options: state: description: From aa09a6e6121abbb509c26652d705cc41a462cc00 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Tue, 18 Jan 2022 13:39:27 +0100 Subject: [PATCH 359/683] try docs fix (#871) wafv2_web_acl_info doc fix SUMMARY 10:5 error wrong indentation: expected 2 but found 4 (indentation) 19:30 error too few spaces after comma (commas) 23:1 error wrong indentation: expected 2 but found 0 (indentation) ISSUE TYPE Docs Pull Request COMPONENT NAME wafv2_web_acl_info Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley --- wafv2_web_acl_info.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index a0de1131cf6..3fb91fbd802 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -15,21 +15,21 @@ description: - Info about web acl options: - name: - description: - - The name of the web acl. - required: true - type: str - scope: - description: - - Scope of wafv2 web acl. - required: true - choices: ["CLOUDFRONT","REGIONAL"] - type: str + name: + description: + - The name of the web acl. + required: true + type: str + scope: + description: + - Scope of wafv2 web acl. + required: true + choices: ["CLOUDFRONT", "REGIONAL"] + type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' From f61b4e00cb1029993df06437d2f8387c9e04d782 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 20 Jan 2022 12:02:30 +0100 Subject: [PATCH 360/683] add missing password_reset_required parameter (#860) add missing password_reset_required parameter SUMMARY password_reset_required parameter is missing in iam_user module. ISSUE TYPE Feature Pull Request COMPONENT NAME iam_user ADDITIONAL INFORMATION Sadly, LoginProfile is only returned on create_login_profile and not on update_login_profile. Therefor the functionality can only be verified when the user is created, not when the user is udpated. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_login_profile https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.update_login_profile retval of update_login_profile is just 0.0s create_or_update_user: {'ResponseMetadata': {'HTTPHeaders': {'content-length': '216', 'content-type': 'text/xml', 'date': 'Wed, 12 Jan 2022 20:18:08 GMT', 'x-amzn-requestid': '11b6fde3-9f28-4265-8fac-88e3f5a238d3'}, 'HTTPStatusCode': 200, 'RequestId': '11b6fde3-9f28-4265-8fac-88e3f5a238d3', 'RetryAttempts': 0}} Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- iam_user.py | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/iam_user.py b/iam_user.py index e308c3cf0cb..7ec3901aa45 100644 --- a/iam_user.py +++ b/iam_user.py @@ -27,6 +27,13 @@ required: false type: str version_added: 2.2.0 + password_reset_required: + description: + - Defines if the user is required to set a new password after login. + required: false + type: bool + default: false + version_added: 3.1.0 update_password: default: always choices: ['always', 'on_create'] @@ -250,18 +257,20 @@ def create_or_update_login_profile(connection, module): user_params = dict() user_params['UserName'] = module.params.get('name') user_params['Password'] = module.params.get('password') + user_params['PasswordResetRequired'] = module.params.get('password_reset_required') + retval = {} try: - connection.update_login_profile(**user_params) + retval = connection.update_login_profile(**user_params) except is_boto3_error_code('NoSuchEntity'): try: - connection.create_login_profile(**user_params) + retval = connection.create_login_profile(**user_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create user login profile") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to update user login profile") - return True + return True, retval def delete_login_profile(connection, module): @@ -296,6 +305,7 @@ def create_or_update_user(connection, module): user = get_user(connection, module, params['UserName']) # If user is None, create it + new_login_profile = False if user is None: # Check mode means we would create the user if module.check_mode: @@ -312,13 +322,20 @@ def create_or_update_user(connection, module): wait_iam_exists(connection, module) if module.params.get('password') is not None: - create_or_update_login_profile(connection, module) + login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + + if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): + new_login_profile = True else: login_profile_result = None update_result = update_user_tags(connection, module, params, user) if module.params['update_password'] == "always" and module.params.get('password') is not None: - login_profile_result = create_or_update_login_profile(connection, module) + login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + + if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): + new_login_profile = True + elif module.params.get('remove_password'): login_profile_result = delete_login_profile(connection, module) @@ -361,6 +378,9 @@ def create_or_update_user(connection, module): # Get the user again user = get_user(connection, module, params['UserName']) + if changed and new_login_profile: + # `LoginProfile` is only returned on `create_login_profile` method + user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False) module.exit_json(changed=changed, iam_user=user) @@ -505,8 +525,9 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), password=dict(type='str', no_log=True), + password_reset_required=dict(type='bool', default=False, no_log=False), update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), - remove_password=dict(type='bool'), + remove_password=dict(type='bool', no_log=False), managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), state=dict(choices=['present', 'absent'], required=True), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), @@ -519,7 +540,7 @@ def main(): module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['password', 'remove_password']] + mutually_exclusive=[['password', 'remove_password']], ) connection = module.client('iam') From 21e4dfedeb395032baa46741724f4b568deb2670 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Mon, 24 Jan 2022 16:31:40 -0800 Subject: [PATCH 361/683] Add ec2_lc* integration tests (#824) Add ec2_lc* integration tests SUMMARY Added integration tests which exercise ec2_lc, ec2_lc_find, and ec2_lc_info. ISSUE TYPE Feature Pull Request ADDITIONAL INFORMATION ec2_lc (launch configurations) work with ASGs (auto-scaling groups) to define launch config for instances in the ASG. We have tests for ec2_asg that make use of ec2_lc but as it is slow already, it makes sense to have a dedicated test suite for ec2_lc. Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Jill R Reviewed-by: Markus Bergholz --- ec2_lc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_lc.py b/ec2_lc.py index 2cdf0463863..19f8dfe2972 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -478,7 +478,7 @@ def create_block_device_meta(module, volume): if 'no_device' in volume: return_object['NoDevice'] = volume.get('no_device') - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'ips', 'encrypted']): + if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'encrypted']): return_object['Ebs'] = {} if 'snapshot' in volume: From 827238fbe535c8896bec1aebd6f2d28c17e0d397 Mon Sep 17 00:00:00 2001 From: Yuri Krysko Date: Tue, 25 Jan 2022 08:05:22 -0500 Subject: [PATCH 362/683] Add ability to manage resource policy for AWS Secrets Manager secrets (#843) Add ability to manage resource policy for AWS Secrets Manager secrets SUMMARY AWS Secrets Manager secrets support attaching resource policy. The benefit is huge when necessary to access secrets from other AWS accounts. This pull request adds ability to manage (add new/remove or modify existing) secrets resource policy. ISSUE TYPE Feature Pull Request COMPONENT NAME module: aws_secret ADDITIONAL INFORMATION Reviewed-by: Mark Woolley Reviewed-by: Yuri Krysko Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- aws_secret.py | 78 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/aws_secret.py b/aws_secret.py index dfe1013194d..050b00f5ae8 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -6,7 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type - DOCUMENTATION = r''' --- module: aws_secret @@ -54,6 +53,13 @@ - Specifies string or binary data that you want to encrypt and store in the new version of the secret. default: "" type: str + resource_policy: + description: + - Specifies JSON-formatted resource policy to attach to the secret. Useful when granting cross-account access + to secrets. + required: false + type: json + version_added: 3.1.0 tags: description: - Specifies a list of user-defined tags that are attached to the secret. @@ -73,7 +79,6 @@ ''' - EXAMPLES = r''' - name: Add string to AWS Secrets Manager community.aws.aws_secret: @@ -82,6 +87,14 @@ secret_type: 'string' secret: "{{ super_secret_string }}" +- name: Add a secret with resource policy attached + community.aws.aws_secret: + name: 'test_secret_string' + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + resource_policy: "{{ lookup('template', 'templates/resource_policy.json.j2', convert_data=False) | string }}" + - name: remove string from AWS Secrets Manager community.aws.aws_secret: name: 'test_secret_string' @@ -90,7 +103,6 @@ secret: "{{ super_secret_string }}" ''' - RETURN = r''' secret: description: The secret information @@ -133,6 +145,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from traceback import format_exc +import json try: from botocore.exceptions import BotoCoreError, ClientError @@ -142,7 +157,7 @@ class Secret(object): """An object representation of the Secret described by the self.module args""" - def __init__(self, name, secret_type, secret, description="", kms_key_id=None, + def __init__(self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, tags=None, lambda_arn=None, rotation_interval=None): self.name = name self.description = description @@ -152,6 +167,7 @@ def __init__(self, name, secret_type, secret, description="", kms_key_id=None, else: self.secret_type = "SecretString" self.secret = secret + self.resource_policy = resource_policy self.tags = tags or {} self.rotation_enabled = False if lambda_arn: @@ -185,6 +201,15 @@ def update_args(self): args[self.secret_type] = self.secret return args + @property + def secret_resource_policy_args(self): + args = { + "SecretId": self.name + } + if self.resource_policy: + args["ResourcePolicy"] = self.resource_policy + return args + @property def boto3_tags(self): return ansible_dict_to_boto3_tag_list(self.Tags) @@ -211,6 +236,15 @@ def get_secret(self, name): self.module.fail_json_aws(e, msg="Failed to describe secret") return secret + def get_resource_policy(self, name): + try: + resource_policy = self.client.get_resource_policy(SecretId=name) + except self.client.exceptions.ResourceNotFoundException: + resource_policy = None + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get secret resource policy") + return resource_policy + def create_secret(self, secret): if self.module.check_mode: self.module.exit_json(changed=True) @@ -227,13 +261,26 @@ def create_secret(self, secret): def update_secret(self, secret): if self.module.check_mode: self.module.exit_json(changed=True) - try: response = self.client.update_secret(**secret.update_args) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to update secret") return response + def put_resource_policy(self, secret): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + json.loads(secret.secret_resource_policy_args.get("ResourcePolicy")) + except (TypeError, ValueError) as e: + self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc()) + + try: + response = self.client.put_resource_policy(**secret.secret_resource_policy_args) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to update secret resource policy") + return response + def restore_secret(self, name): if self.module.check_mode: self.module.exit_json(changed=True) @@ -255,6 +302,15 @@ def delete_secret(self, name, recovery_window): self.module.fail_json_aws(e, msg="Failed to delete secret") return response + def delete_resource_policy(self, name): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + response = self.client.delete_resource_policy(SecretId=name) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to delete secret resource policy") + return response + def update_rotation(self, secret): if secret.rotation_enabled: try: @@ -334,6 +390,7 @@ def main(): 'kms_key_id': dict(), 'secret_type': dict(choices=['binary', 'string'], default="string"), 'secret': dict(default="", no_log=True), + 'resource_policy': dict(type='json', default=None), 'tags': dict(type='dict', default={}), 'rotation_lambda': dict(), 'rotation_interval': dict(type='int', default=30), @@ -352,6 +409,7 @@ def main(): module.params.get('secret'), description=module.params.get('description'), kms_key_id=module.params.get('kms_key_id'), + resource_policy=module.params.get('resource_policy'), tags=module.params.get('tags'), lambda_arn=module.params.get('rotation_lambda'), rotation_interval=module.params.get('rotation_interval') @@ -374,6 +432,8 @@ def main(): if state == 'present': if current_secret is None: result = secrets_mgr.create_secret(secret) + if secret.resource_policy and result.get("ARN"): + result = secrets_mgr.put_resource_policy(secret) changed = True else: if current_secret.get("DeletedDate"): @@ -385,6 +445,14 @@ def main(): if not rotation_match(secret, current_secret): result = secrets_mgr.update_rotation(secret) changed = True + current_resource_policy_response = secrets_mgr.get_resource_policy(secret.name) + current_resource_policy = current_resource_policy_response.get("ResourcePolicy") + if compare_policies(secret.resource_policy, current_resource_policy): + if secret.resource_policy is None and current_resource_policy: + result = secrets_mgr.delete_resource_policy(secret.name) + else: + result = secrets_mgr.put_resource_policy(secret) + changed = True current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags) if tags_to_add: From 8507c3121215976b0c6f83d2fd870f5e0c948f95 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Tue, 25 Jan 2022 13:36:58 -0800 Subject: [PATCH 363/683] ec2_lc: add volume throughput parameter support (#790) ec2_lc: add volume throughput parameter support SUMMARY Adding throughput parameter support to ec2_lc. Fixes #784. ISSUE TYPE Feature Pull Request COMPONENT NAME community.aws.ec2_lc UPDATE: Integration tests being added in a separate PR: #824 Reviewed-by: Alina Buzachis Reviewed-by: Jill R --- ec2_lc.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/ec2_lc.py b/ec2_lc.py index 19f8dfe2972..de3a7a5443f 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -107,6 +107,12 @@ description: - The number of IOPS per second to provision for the volume. - Required when I(volume_type=io1). + throughput: + type: int + description: + - The throughput to provision for a gp3 volume. + - Valid Range is a minimum value of 125 and a maximum value of 1000. + version_added: 3.1.0 encrypted: type: bool default: false @@ -478,7 +484,7 @@ def create_block_device_meta(module, volume): if 'no_device' in volume: return_object['NoDevice'] = volume.get('no_device') - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'encrypted']): + if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']): return_object['Ebs'] = {} if 'snapshot' in volume: @@ -496,6 +502,11 @@ def create_block_device_meta(module, volume): if 'iops' in volume: return_object['Ebs']['Iops'] = volume.get('iops') + if 'throughput' in volume: + if volume.get('volume_type') != 'gp3': + module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.') + return_object['Ebs']['Throughput'] = volume.get('throughput') + if 'encrypted' in volume: return_object['Ebs']['Encrypted'] = volume.get('encrypted') From 97044d49d3c618352cf8fe2f77f546a71701c8db Mon Sep 17 00:00:00 2001 From: Hugh Saunders Date: Wed, 26 Jan 2022 11:39:09 +0000 Subject: [PATCH 364/683] Respect wait parameter in elb_instance when adding/removing instances (#826) Respect wait parameter in elb_instance when adding/removing instances SUMMARY The wait parameter is currently ignored when registering or deregistering an instance with an ELB. Looks like this was lost in the boto3 migration: 96f1518 Related: #825 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_instance ADDITIONAL INFORMATION See #825 Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- elb_instance.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/elb_instance.py b/elb_instance.py index 6116207866b..51ec03d5702 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -144,8 +144,9 @@ def deregister(self, wait, timeout): # already OutOfService is being deregistered. self.changed = True - for lb in self.lbs: - self._await_elb_instance_state(lb, 'Deregistered', timeout) + if wait: + for lb in self.lbs: + self._await_elb_instance_state(lb, 'Deregistered', timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB @@ -176,8 +177,9 @@ def register(self, wait, enable_availability_zone, timeout): self.changed = True - for lb in self.lbs: - self._await_elb_instance_state(lb, 'InService', timeout) + if wait: + for lb in self.lbs: + self._await_elb_instance_state(lb, 'InService', timeout) @AWSRetry.jittered_backoff() def _describe_elbs(self, **params): From 05ea1f59f2077224361204531b6281f4aa132123 Mon Sep 17 00:00:00 2001 From: Andreas Jonsson Date: Sat, 29 Jan 2022 17:11:14 -0800 Subject: [PATCH 365/683] Lambda - Wait before updating (#857) Lambda - Wait before updating SUMMARY Updated lambda module to wait for State = Active & LastUpdateStatus = Successful based on https://aws.amazon.com/blogs/compute/coming-soon-expansion-of-aws-lambda-states-to-all-functions/ Fixes #830 ISSUE TYPE Bugfix Pull Request COMPONENT NAME module: lambda ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- execute_lambda.py | 13 +++++++++++++ lambda.py | 20 +++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/execute_lambda.py b/execute_lambda.py index 7af644810a8..b4cbb4a53de 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -202,6 +202,9 @@ def main(): elif name: invoke_params['FunctionName'] = name + if not module.check_mode: + wait_for_lambda(client, module, name) + try: response = client.invoke(**invoke_params) except is_boto3_error_code('ResourceNotFoundException') as nfe: @@ -255,5 +258,15 @@ def main(): module.exit_json(changed=True, result=results) +def wait_for_lambda(client, module, name): + try: + waiter = client.get_waiter('function_active') + waiter.wait(FunctionName=name) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active') + + if __name__ == '__main__': main() diff --git a/lambda.py b/lambda.py index 1605d6497db..923b1646c3d 100644 --- a/lambda.py +++ b/lambda.py @@ -216,7 +216,7 @@ import re try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import ClientError, BotoCoreError, WaiterError except ImportError: pass # protected by AnsibleAWSModule @@ -320,6 +320,18 @@ def set_tag(client, module, tags, function): return changed +def wait_for_lambda(client, module, name): + try: + client_active_waiter = client.get_waiter('function_active') + client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter.wait(FunctionName=name) + client_updated_waiter.wait(FunctionName=name) + except WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating') + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') + + def main(): argument_spec = dict( name=dict(required=True), @@ -453,6 +465,9 @@ def main(): # Upload new configuration if configuration has changed if len(func_kwargs) > 1: + if not check_mode: + wait_for_lambda(client, module, name) + try: if not check_mode: response = client.update_function_configuration(aws_retry=True, **func_kwargs) @@ -494,6 +509,9 @@ def main(): # Upload new code if needed (e.g. code checksum has changed) if len(code_kwargs) > 2: + if not check_mode: + wait_for_lambda(client, module, name) + try: if not check_mode: response = client.update_function_code(aws_retry=True, **code_kwargs) From 18e757434fb71f02d5dd9edd2b62c4f1e66fc36e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 30 Jan 2022 02:16:55 +0100 Subject: [PATCH 366/683] Cleanup unused imports (#896) Cleanup unused imports (#852 / #892) Let's try once more... SUMMARY My local tests are flagging that we've picked up some unused imports again. ISSUE TYPE Feature Pull Request COMPONENT NAME aws_glue_job cloudfront_info rds_option_group_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- aws_glue_job.py | 2 -- cloudfront_info.py | 1 - rds_option_group_info.py | 1 - 3 files changed, 4 deletions(-) diff --git a/aws_glue_job.py b/aws_glue_job.py index edca5d051d5..4e278c81734 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -245,9 +245,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info diff --git a/cloudfront_info.py b/cloudfront_info.py index e9136341c9f..b7914dcceb4 100644 --- a/cloudfront_info.py +++ b/cloudfront_info.py @@ -241,7 +241,6 @@ type: dict ''' -from functools import partial import traceback try: diff --git a/rds_option_group_info.py b/rds_option_group_info.py index b29479386ff..37e848032c8 100644 --- a/rds_option_group_info.py +++ b/rds_option_group_info.py @@ -244,7 +244,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags From bd3fd0e4b8bb88738adfc8b258cc03818e5d73bf Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Sun, 30 Jan 2022 13:05:46 +0100 Subject: [PATCH 367/683] fix yamllint errors (#903) aws_acm: fix yamllint errors in docs section SUMMARY Closes: #902 ISSUE TYPE Docs Pull Request COMPONENT NAME aws_acm ADDITIONAL INFORMATION 1:1 warning missing document start "---" (document-start) 2:81 error line too long (88 > 80 characters) (line-length) 5:81 error line too long (89 > 80 characters) (line-length) 9:81 error line too long (86 > 80 characters) (line-length) 11:81 error line too long (100 > 80 characters) (line-length) 32:81 error line too long (87 > 80 characters) (line-length) 36:81 error line too long (98 > 80 characters) (line-length) 41:81 error line too long (98 > 80 characters) (line-length) 43:81 error line too long (105 > 80 characters) (line-length) 49:81 error line too long (84 > 80 characters) (line-length) 56:81 error line too long (102 > 80 characters) (line-length) 61:81 error line too long (87 > 80 characters) (line-length) 69:81 error line too long (91 > 80 characters) (line-length) 79:81 error line too long (92 > 80 characters) (line-length) 88:81 error line too long (85 > 80 characters) (line-length) 105:81 error line too long (83 > 80 characters) (line-length) 123:1 error wrong indentation: expected 2 but found 0 (indentation) 124:17 error no new line character at the end of file (new-line-at-end-of-file) Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell --- aws_acm.py | 71 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index 65c95212170..d28301e9160 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -25,18 +25,22 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' +--- module: aws_acm -short_description: Upload and delete certificates in the AWS Certificate Manager service +short_description: > + Upload and delete certificates in the AWS Certificate Manager service version_added: 1.0.0 description: - - Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM). + - > + Import and delete certificates in Amazon Web Service's Certificate + Manager (AWS ACM). - > This module does not currently interact with AWS-provided certificates. It currently only manages certificates provided to AWS by the user. - - The ACM API allows users to upload multiple certificates for the same domain name, - and even multiple identical certificates. - This module attempts to restrict such freedoms, to be idempotent, as per the Ansible philosophy. + - The ACM API allows users to upload multiple certificates for the same domain + name, and even multiple identical certificates. This module attempts to + restrict such freedoms, to be idempotent, as per the Ansible philosophy. It does this through applying AWS resource "Name" tags to ACM certificates. - > When I(state=present), @@ -57,63 +61,71 @@ this task will fail. - > When I(state=absent) and I(certificate_arn) is defined, - this module will delete the ACM resource with that ARN if it exists in this region, - and succeed without effect if it doesn't exist. + this module will delete the ACM resource with that ARN if it exists in this + region, and succeed without effect if it doesn't exist. - > - When I(state=absent) and I(domain_name) is defined, - this module will delete all ACM resources in this AWS region with a corresponding domain name. + When I(state=absent) and I(domain_name) is defined, this module will delete + all ACM resources in this AWS region with a corresponding domain name. If there are none, it will succeed without effect. - > When I(state=absent) and I(certificate_arn) is not defined, - and I(domain_name) is not defined, - this module will delete all ACM resources in this AWS region with a corresponding I(Name) tag. + and I(domain_name) is not defined, this module will delete all ACM resources + in this AWS region with a corresponding I(Name) tag. If there are none, it will succeed without effect. - - Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API. + - > + Note that this may not work properly with keys of size 4096 bits, due to a + limitation of the ACM API. options: certificate: description: - The body of the PEM encoded public certificate. - Required when I(state) is not C(absent). - - If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')). + - > + If your certificate is in a file, + use C(lookup('file', 'path/to/cert.pem')). type: str - certificate_arn: description: - The ARN of a certificate in ACM to delete - Ignored when I(state=present). - - If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag). + - > + If I(state=absent), you must provide one of + I(certificate_arn), I(domain_name) or I(name_tag). - > If I(state=absent) and no resource exists with this ARN in this region, the task will succeed with no effect. - > - If I(state=absent) and the corresponding resource exists in a different region, - this task may report success without deleting that resource. + If I(state=absent) and the corresponding resource exists in a different + region, this task may report success without deleting that resource. type: str aliases: [arn] - certificate_chain: description: - The body of the PEM encoded chain for your certificate. - - If your certificate chain is in a file, use C(lookup('file', 'path/to/chain.pem')). + - > + If your certificate chain is in a file, + use C(lookup('file', 'path/to/chain.pem')). - Ignored when I(state=absent) type: str - domain_name: description: - The domain name of the certificate. - > If I(state=absent) and I(domain_name) is specified, this task will delete all ACM certificates with this domain. - - Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) must be provided. + - > + Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) + must be provided. - > If I(state=present) this must not be specified. (Since the domain name is encoded within the public certificate's body.) type: str aliases: [domain] - name_tag: description: - - The unique identifier for tagging resources using AWS tags, with key I(Name). + - > + The unique identifier for tagging resources using AWS tags, + with key I(Name). - This can be any set of characters accepted by AWS for tag values. - > This is to ensure Ansible can treat certificates idempotently, @@ -124,15 +136,15 @@ I(certificate_arn), I(domain_name) or I(name_tag). type: str aliases: [name] - private_key: description: - The body of the PEM encoded private key. - Required when I(state=present). - Ignored when I(state=absent). - - If your private key is in a file, use C(lookup('file', 'path/to/key.pem')). + - > + If your private key is in a file, + use C(lookup('file', 'path/to/key.pem')). type: str - state: description: - > @@ -148,8 +160,9 @@ author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + ''' EXAMPLES = ''' From f1ce59e3c8e811e224f65969f912403a44533c4f Mon Sep 17 00:00:00 2001 From: Sebastien Rosset Date: Sun, 30 Jan 2022 04:12:43 -0800 Subject: [PATCH 368/683] improve doc (#901) Improve doc of ec2_launch_template module SUMMARY Add information about how the ec2_launch_template handles initial creation of the launch template, or creation of a new version of the launch template. ISSUE TYPE Docs Pull Request COMPONENT NAME ec2_launch_template ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell --- ec2_launch_template.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index e96049fa347..1051c1b7c66 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -31,6 +31,11 @@ template_name: description: - The template name. This must be unique in the region-account combination you are using. + - If no launch template exists with the specified name, a new launch template is created. + - If a launch template with the specified name already exists and the configuration has not changed, + nothing happens. + - If a launch template with the specified name already exists and the configuration has changed, + a new version of the launch template is created. aliases: [name] type: str default_version: From 7247b4a4b81d7671ede4d182a7600b3a0a86f4ee Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 31 Jan 2022 10:29:42 +0000 Subject: [PATCH 369/683] Fix cloudfront_distribution s3_origin_access_identity_enabled bug (#881) Fix cloudfront_distribution s3_origin_access_identity_enabled bug SUMMARY If s3_origin_access_identity_enabled is set to True but no s3_origin_config then a default origin config is applied however it also picks up s3_origin_access_identity_enabled as S3OriginAccessIdentityEnabled and passes it to the API request which is not a valid option to be passed and then fails validation. Fixes: #749 ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution ADDITIONAL INFORMATION The option mention is not valid for the API request: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_distribution Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- cloudfront_distribution.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 80ac6dcec4b..946b93e2041 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1686,9 +1686,6 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): - if not origin['s3_origin_access_identity_enabled']: - return None - if origin.get('s3_origin_config', {}).get('origin_access_identity'): return origin['s3_origin_config']['origin_access_identity'] @@ -1719,13 +1716,20 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin['custom_headers'] = ansible_list_to_cloudfront_list() if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): if origin.get("s3_origin_access_identity_enabled") is not None: - s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) + if origin['s3_origin_access_identity_enabled']: + s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) + else: + s3_origin_config = None + + del(origin["s3_origin_access_identity_enabled"]) + if s3_origin_config: oai = s3_origin_config else: oai = "" + origin["s3_origin_config"] = dict(origin_access_identity=oai) - del(origin["s3_origin_access_identity_enabled"]) + if 'custom_origin_config' in origin: self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") else: From 6f30968a4f9af884a4fe27a65165d33358eac0ff Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 31 Jan 2022 12:26:36 +0100 Subject: [PATCH 370/683] [Breaking Change] Final removal of original boto SDK (#898) [Breaking Change] Final removal of original boto SDK SUMMARY Remove old boto based inventory script Clean up requirements Clean up random comments in docs/comments ISSUE TYPE Feature Pull Request COMPONENT NAME scripts/inventory/ec2.py requirements.txt test-requirements.txt tests/integration/requirements.txt tests/unit/requirements.txt ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_direct_connect_virtual_interface.py | 2 +- ec2_eip.py | 2 +- ecs_service.py | 2 +- ecs_taskdefinition.py | 2 +- iam_saml_federation.py | 2 +- iam_server_certificate.py | 2 -- rds_param_group.py | 2 +- route53.py | 2 +- 8 files changed, 7 insertions(+), 9 deletions(-) diff --git a/aws_direct_connect_virtual_interface.py b/aws_direct_connect_virtual_interface.py index d2d199c5527..f0c1b7f7800 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/aws_direct_connect_virtual_interface.py @@ -404,7 +404,7 @@ def create_vi(client, public, associated_id, creation_params): :param public: a boolean :param associated_id: a link aggregation group ID or connection ID to associate with the virtual interface. - :param creation_params: a dict of parameters to use in the boto call + :param creation_params: a dict of parameters to use in the AWS SDK call :return The ID of the created virtual interface ''' err_msg = "Failed to create virtual interface" diff --git a/ec2_eip.py b/ec2_eip.py index e38e941661f..ca883e5f715 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -494,7 +494,7 @@ def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True) def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): # type: (EC2Connection, str, bool, str) -> Address - """ Overrides boto's allocate_address function to support BYOIP """ + """ Overrides botocore's allocate_address function to support BYOIP """ params = {} if domain is not None: diff --git a/ecs_service.py b/ecs_service.py index d43253af386..8e7adbcacc2 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -752,7 +752,7 @@ def main(): loadBalancer['containerPort'] = int(loadBalancer['containerPort']) if update: - # check various parameters and boto versions and give a helpful error in boto is not new enough for feature + # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature if module.params['scheduling_strategy']: if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index ab3a47d176e..f99db8b9659 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -685,7 +685,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, volumes, launch_type, cpu, memory, placement_constraints): validated_containers = [] - # Ensures the number parameters are int as required by boto + # Ensures the number parameters are int as required by the AWS SDK for container in container_definitions: for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'): if param in container: diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 4b41f443134..70bd4461d10 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -120,7 +120,7 @@ def __init__(self, module): try: self.conn = module.client('iam') except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Unknown boto error") + self.module.fail_json_aws(e, msg="Unknown AWS SDK error") # use retry decorator for boto3 calls @AWSRetry.jittered_backoff(retries=3, delay=5) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index b6cad710fb3..142d391ac06 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -86,8 +86,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 -requirements: -- boto >= 2.49.0 ''' EXAMPLES = ''' diff --git a/rds_param_group.py b/rds_param_group.py index 76e6138b466..7d5d216d092 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -159,7 +159,7 @@ def convert_parameter(param, value): converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] except ValueError: # may be based on a variable (ie. {foo*3/4}) so - # just pass it on through to boto + # just pass it on through to the AWS SDK pass elif isinstance(value, bool): converted_value = 1 if value else 0 diff --git a/route53.py b/route53.py index 4275d65b684..4ddacdca09e 100644 --- a/route53.py +++ b/route53.py @@ -413,7 +413,7 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): if private_zone == want_private and zone['Name'] == zone_name: if want_vpc_id: - # NOTE: These details aren't available in other boto methods, hence the necessary + # NOTE: These details aren't available in other boto3 methods, hence the necessary # extra API call hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: From 2d6971763652f55402a9bb7d076ba6101229325c Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 2 Feb 2022 11:41:14 +0100 Subject: [PATCH 371/683] Move some shared logic in module utils/sns (#879) Move some shared logic in module utils/sns SUMMARY Move some shared logic in module utils/sns ISSUE TYPE Feature Pull Request COMPONENT NAME sns_topic sns sns_topic_info Reviewed-by: Mark Chappell Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz --- sns.py | 19 +-------- sns_topic.py | 117 +++++++-------------------------------------------- 2 files changed, 17 insertions(+), 119 deletions(-) diff --git a/sns.py b/sns.py index a18c3279173..fc400bac5e0 100644 --- a/sns.py +++ b/sns.py @@ -134,22 +134,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - - -def arn_topic_lookup(module, client, short_topic): - lookup_topic = ':{0}'.format(short_topic) - - try: - paginator = client.get_paginator('list_topics') - topic_iterator = paginator.paginate() - for response in topic_iterator: - for topic in response['Topics']: - if topic['TopicArn'].endswith(lookup_topic): - return topic['TopicArn'] - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to look up topic ARN') - - return None +from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup def main(): @@ -205,7 +190,7 @@ def main(): # Short names can't contain ':' so we'll assume this is the full ARN sns_kwargs['TopicArn'] = topic else: - sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic) + sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic) if not sns_kwargs['TopicArn']: module.fail_json(msg='Could not find topic: {0}'.format(topic)) diff --git a/sns_topic.py b/sns_topic.py index 37cf573ce58..817729c33e8 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -284,8 +284,6 @@ ''' import json -import re -import copy try: import botocore @@ -293,11 +291,14 @@ pass # handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.sns import list_topics +from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup +from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies +from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions +from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint +from ansible_collections.community.aws.plugins.module_utils.sns import get_info class SnsTopicManager(object): @@ -334,36 +335,6 @@ def __init__(self, self.topic_arn = None self.attributes_set = [] - @AWSRetry.jittered_backoff() - def _list_topics_with_backoff(self): - paginator = self.connection.get_paginator('list_topics') - return paginator.paginate().build_full_result()['Topics'] - - @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) - def _list_topic_subscriptions_with_backoff(self): - paginator = self.connection.get_paginator('list_subscriptions_by_topic') - return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions'] - - @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) - def _list_subscriptions_with_backoff(self): - paginator = self.connection.get_paginator('list_subscriptions') - return paginator.paginate().build_full_result()['Subscriptions'] - - def _list_topics(self): - try: - topics = self._list_topics_with_backoff() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get topic list") - return [t['TopicArn'] for t in topics] - - def _topic_arn_lookup(self): - # topic names cannot have colons, so this captures the full topic name - all_topics = self._list_topics() - lookup_topic = ':%s' % self.name - for topic in all_topics: - if topic.endswith(lookup_topic): - return topic - def _create_topic(self): attributes = {'FifoTopic': 'false'} tags = [] @@ -381,20 +352,6 @@ def _create_topic(self): self.topic_arn = response['TopicArn'] return True - def _compare_delivery_policies(self, policy_a, policy_b): - _policy_a = copy.deepcopy(policy_a) - _policy_b = copy.deepcopy(policy_b) - # AWS automatically injects disableSubscriptionOverrides if you set an - # http policy - if 'http' in policy_a: - if 'disableSubscriptionOverrides' not in policy_a['http']: - _policy_a['http']['disableSubscriptionOverrides'] = False - if 'http' in policy_b: - if 'disableSubscriptionOverrides' not in policy_b['http']: - _policy_b['http']['disableSubscriptionOverrides'] = False - comparison = (_policy_a != _policy_b) - return comparison - def _set_topic_attrs(self): changed = False try: @@ -423,7 +380,7 @@ def _set_topic_attrs(self): self.module.fail_json_aws(e, msg="Couldn't set topic policy") if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or - self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): + compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): changed = True self.attributes_set.append('delivery_policy') if not self.check_mode: @@ -434,22 +391,14 @@ def _set_topic_attrs(self): self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") return changed - def _canonicalize_endpoint(self, protocol, endpoint): - # AWS SNS expects phone numbers in - # and canonicalizes to E.164 format - # See - if protocol == 'sms': - return re.sub('[^0-9+]*', '', endpoint) - return endpoint - def _set_topic_subs(self): changed = False subscriptions_existing_list = set() desired_subscriptions = [(sub['protocol'], - self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in + canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in self.subscriptions] - for sub in self._list_topic_subscriptions(): + for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): sub_key = (sub['Protocol'], sub['Endpoint']) subscriptions_existing_list.add(sub_key) if (self.purge_subscriptions and sub_key not in desired_subscriptions and @@ -472,23 +421,10 @@ def _set_topic_subs(self): self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) return changed - def _list_topic_subscriptions(self): - try: - return self._list_topic_subscriptions_with_backoff() - except is_boto3_error_code('AuthorizationError'): - try: - # potentially AuthorizationError when listing subscriptions for third party topic - return [sub for sub in self._list_subscriptions_with_backoff() - if sub['TopicArn'] == self.topic_arn] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) - def _delete_subscriptions(self): # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days # https://forums.aws.amazon.com/thread.jspa?threadID=85993 - subscriptions = self._list_topic_subscriptions() + subscriptions = list_topic_subscriptions(self.connection, self.module, self.topic_arn) if not subscriptions: return False for sub in subscriptions: @@ -518,10 +454,10 @@ def ensure_ok(self): if self._name_is_arn(): self.topic_arn = self.name else: - self.topic_arn = self._topic_arn_lookup() + self.topic_arn = topic_arn_lookup(self.connection, self.module, self.name) if not self.topic_arn: changed = self._create_topic() - if self.topic_arn in self._list_topics(): + if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_attrs() elif self.display_name or self.policy or self.delivery_policy: self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") @@ -533,37 +469,14 @@ def ensure_gone(self): if self._name_is_arn(): self.topic_arn = self.name else: - self.topic_arn = self._topic_arn_lookup() + self.topic_arn = topic_arn_lookup(self.connection, self.module, self.name) if self.topic_arn: - if self.topic_arn not in self._list_topics(): + if self.topic_arn not in list_topics(self.connection, self.module): self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") changed = self._delete_subscriptions() changed |= self._delete_topic() return changed - def get_info(self): - info = { - 'name': self.name, - 'topic_type': self.topic_type, - 'state': self.state, - 'subscriptions_new': self.subscriptions, - 'subscriptions_existing': self.subscriptions_existing, - 'subscriptions_deleted': self.subscriptions_deleted, - 'subscriptions_added': self.subscriptions_added, - 'subscriptions_purge': self.purge_subscriptions, - 'check_mode': self.check_mode, - 'topic_created': self.topic_created, - 'topic_deleted': self.topic_deleted, - 'attributes_set': self.attributes_set, - } - if self.state != 'absent': - if self.topic_arn in self._list_topics(): - info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'])) - info['delivery_policy'] = info.pop('effective_delivery_policy') - info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()] - - return info - def main(): @@ -635,7 +548,7 @@ def main(): sns_facts = dict(changed=changed, sns_arn=sns_topic.topic_arn, - sns_topic=sns_topic.get_info()) + sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn)) module.exit_json(**sns_facts) From edf65bce9e20b02a3ba0f6f050879b02396fcba1 Mon Sep 17 00:00:00 2001 From: Priyadarshini Chettiar <45838555+priyadarshu@users.noreply.github.com> Date: Thu, 3 Feb 2022 02:22:27 +0530 Subject: [PATCH 372/683] Update the name attribute value in the examples (#918) Update the name attribute value in the examples SUMMARY Problem- All the examples had same name key value irrespective of different purposes of the tasks Action taken - Made changes in the name of the tasks under examples Corrected it with relevant name key value to the comments of the task ISSUE TYPE Docs Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Markus Bergholz --- iam_managed_policy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index d6cdd33525e..2b33d711e71 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -65,14 +65,14 @@ state: present # Update a policy with a new default version -- name: Create IAM Managed Policy +- name: Update an IAM Managed Policy with new default version community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: "{{ lookup('file', 'managed_policy_update.json') }}" state: present # Update a policy with a new non default version -- name: Create IAM Managed Policy +- name: Update an IAM Managed Policy with a non default version community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: @@ -85,7 +85,7 @@ state: present # Update a policy and make it the only version and the default version -- name: Create IAM Managed Policy +- name: Update an IAM Managed Policy with default version as the only version community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: | @@ -101,7 +101,7 @@ state: present # Remove a policy -- name: Create IAM Managed Policy +- name: Remove an existing IAM Managed Policy community.aws.iam_managed_policy: policy_name: "ManagedPolicy" state: absent From 8a151d61cd98455680fe40d3455e2cfdd1551ec7 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 3 Feb 2022 13:51:25 +0100 Subject: [PATCH 373/683] ec2_launch_template: implement missing metadata options (#917) ec2_launch_template: implement missing metadata options SUMMARY Add missing metadata options instance_metadata_tags http_protocol_ipv6 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_launch_template Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis --- ec2_launch_template.py | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 1051c1b7c66..fab3c4100bd 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -353,6 +353,22 @@ The state of token usage for your instance metadata requests. choices: [optional, required] default: 'optional' + http_protocol_ipv6: + version_added: 3.1.0 + type: str + description: > + - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). + - Requires boto3 >= 1.18.29 + choices: [enabled, disabled] + default: 'disabled' + instance_metadata_tags: + version_added: 3.1.0 + type: str + description: + - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). + - Requires boto3 >= 1.20.30 + choices: [enabled, disabled] + default: 'disabled' ''' EXAMPLES = ''' @@ -516,6 +532,24 @@ def create_or_update(module, template_options): out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) + + if lt_data.get('MetadataOptions'): + if not module.boto3_at_least('1.20.30'): + # fail only if enabled is requested + if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled': + module.require_boto3_at_least('1.20.30', reason='to set instance_metadata_tags') + # pop if it's not requested to keep backwards compatibility. + # otherwise the modules failes because parameters are set due default values + lt_data['MetadataOptions'].pop('InstanceMetadataTags') + + if not module.boto3_at_least('1.18.29'): + # fail only if enabled is requested + if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled': + module.require_boto3_at_least('1.18.29', reason='to set http_protocol_ipv6') + # pop if it's not requested to keep backwards compatibility. + # otherwise the modules failes because parameters are set due default values + lt_data['MetadataOptions'].pop('HttpProtocolIpv6') + if not (template or template_versions): # create a full new one try: @@ -671,7 +705,9 @@ def main(): options=dict( http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional') + http_tokens=dict(choices=['optional', 'required'], default='optional'), + http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), + instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), ) ), network_interfaces=dict( From f298d0e15ebe9dfa30a5b3332d209559c54c7e1c Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 4 Feb 2022 12:03:55 +0000 Subject: [PATCH 374/683] Add AWSRetry backoff logic to route53_zone and route53_info (#865) Add AWSRetry backoff logic to route53_zone and route53_info SUMMARY Add AWSRetry backoff logic to route53_zone and route53_info. Currently from time to time I've been hitting AWS throttling errors leading to ansible failures: An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (Throttling) when calling the ListHostedZones operation (reached max retries: 4): Rate exceeded fatal: [localhost_staging -> 127.0.0.1]: FAILED! => changed=false boto3_version: 1.20.34 botocore_version: 1.23.34 error: code: Throttling message: Rate exceeded type: Sender msg: 'Could not list current hosted zones: An error occurred (Throttling) when calling the ListHostedZones operation (reached max retries: 4): Rate exceeded' response_metadata: http_headers: connection: close content-length: '255' content-type: text/xml date: Fri, 14 Jan 2022 12:09:35 GMT x-amzn-requestid: xxxxxxx http_status_code: 400 max_attempts_reached: true request_id: xxxxxxx retry_attempts: 4 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_zone route53_info ADDITIONAL INFORMATION I've added the standard backoff retry logic and split out the paginators. Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- route53_info.py | 56 +++++++++++++++++++++++++++++-------------------- route53_zone.py | 53 +++++++++++++++++++++++++++------------------- 2 files changed, 64 insertions(+), 45 deletions(-) diff --git a/route53_info.py b/route53_info.py index e2f1cd686ff..7622113c25e 100644 --- a/route53_info.py +++ b/route53_info.py @@ -212,9 +212,17 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -def get_hosted_zone(client, module): +# Split out paginator to allow for the backoff decorator to function +@AWSRetry.jittered_backoff() +def _paginated_result(paginator_name, **params): + paginator = client.get_paginator(paginator_name) + return paginator.paginate(**params).build_full_result() + + +def get_hosted_zone(): params = dict() if module.params.get('hosted_zone_id'): @@ -225,7 +233,7 @@ def get_hosted_zone(client, module): return client.get_hosted_zone(**params) -def reusable_delegation_set_details(client, module): +def reusable_delegation_set_details(): params = dict() if not module.params.get('delegation_set_id'): @@ -246,7 +254,7 @@ def reusable_delegation_set_details(client, module): return results -def list_hosted_zones(client, module): +def list_hosted_zones(): params = dict() # Set PaginationConfig with max_items @@ -261,15 +269,15 @@ def list_hosted_zones(client, module): if module.params.get('delegation_set_id'): params['DelegationSetId'] = module.params.get('delegation_set_id') - paginator = client.get_paginator('list_hosted_zones') - zones = paginator.paginate(**params).build_full_result()['HostedZones'] + zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] + return { "HostedZones": zones, "list": zones, } -def list_hosted_zones_by_name(client, module): +def list_hosted_zones_by_name(): params = dict() if module.params.get('hosted_zone_id'): @@ -287,7 +295,7 @@ def list_hosted_zones_by_name(client, module): return client.list_hosted_zones_by_name(**params) -def change_details(client, module): +def change_details(): params = dict() if module.params.get('change_id'): @@ -299,11 +307,11 @@ def change_details(client, module): return results -def checker_ip_range_details(client, module): +def checker_ip_range_details(): return client.get_checker_ip_ranges() -def get_count(client, module): +def get_count(): if module.params.get('query') == 'health_check': results = client.get_health_check_count() else: @@ -312,7 +320,7 @@ def get_count(client, module): return results -def get_health_check(client, module): +def get_health_check(): params = dict() if not module.params.get('health_check_id'): @@ -330,7 +338,7 @@ def get_health_check(client, module): return results -def get_resource_tags(client, module): +def get_resource_tags(): params = dict() if module.params.get('resource_id'): @@ -346,7 +354,7 @@ def get_resource_tags(client, module): return client.list_tags_for_resources(**params) -def list_health_checks(client, module): +def list_health_checks(): params = dict() if module.params.get('next_marker'): @@ -358,15 +366,15 @@ def list_health_checks(client, module): MaxItems=module.params.get('max_items') ) - paginator = client.get_paginator('list_health_checks') - health_checks = paginator.paginate(**params).build_full_result()['HealthChecks'] + health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] + return { "HealthChecks": health_checks, "list": health_checks, } -def record_sets_details(client, module): +def record_sets_details(): params = dict() if module.params.get('hosted_zone_id'): @@ -390,8 +398,7 @@ def record_sets_details(client, module): MaxItems=module.params.get('max_items') ) - paginator = client.get_paginator('list_resource_record_sets') - record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets'] + record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] return { "ResourceRecordSets": record_sets, @@ -399,7 +406,7 @@ def record_sets_details(client, module): } -def health_check_details(client, module): +def health_check_details(): health_check_invocations = { 'list': list_health_checks, 'details': get_health_check, @@ -409,11 +416,11 @@ def health_check_details(client, module): 'tags': get_resource_tags, } - results = health_check_invocations[module.params.get('health_check_method')](client, module) + results = health_check_invocations[module.params.get('health_check_method')]() return results -def hosted_zone_details(client, module): +def hosted_zone_details(): hosted_zone_invocations = { 'details': get_hosted_zone, 'list': list_hosted_zones, @@ -422,11 +429,14 @@ def hosted_zone_details(client, module): 'tags': get_resource_tags, } - results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module) + results = hosted_zone_invocations[module.params.get('hosted_zone_method')]() return results def main(): + global module + global client + argument_spec = dict( query=dict(choices=[ 'change', @@ -475,7 +485,7 @@ def main(): ) try: - route53 = module.client('route53') + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') @@ -490,7 +500,7 @@ def main(): results = dict(changed=False) try: - results = invocations[module.params.get('query')](route53, module) + results = invocations[module.params.get('query')]() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg=to_native(e)) diff --git a/route53_zone.py b/route53_zone.py index 334e6d62718..ba51fcbb9e2 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: route53_zone short_description: add or delete Route53 zones version_added: 1.0.0 @@ -65,7 +65,7 @@ author: "Christopher Troup (@minichate)" ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: create a public zone community.aws.route53_zone: zone: example.com @@ -105,7 +105,7 @@ purge_tags: true ''' -RETURN = ''' +RETURN = r''' comment: description: optional hosted zone comment returned: when hosted zone exists @@ -149,6 +149,7 @@ import time from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags from ansible_collections.community.aws.plugins.module_utils.route53 import get_tags @@ -158,10 +159,15 @@ pass # caught by AnsibleAWSModule -def find_zones(module, client, zone_in, private_zone): +@AWSRetry.jittered_backoff() +def _list_zones(): + paginator = client.get_paginator('list_hosted_zones') + return paginator.paginate().build_full_result() + + +def find_zones(zone_in, private_zone): try: - paginator = client.get_paginator('list_hosted_zones') - results = paginator.paginate().build_full_result() + results = _list_zones() except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not list current hosted zones") zones = [] @@ -176,7 +182,7 @@ def find_zones(module, client, zone_in, private_zone): return zones -def create(module, client, matching_zones): +def create(matching_zones): zone_in = module.params.get('zone').lower() vpc_id = module.params.get('vpc_id') vpc_region = module.params.get('vpc_region') @@ -201,9 +207,9 @@ def create(module, client, matching_zones): } if private_zone: - changed, result = create_or_update_private(module, client, matching_zones, record) + changed, result = create_or_update_private(matching_zones, record) else: - changed, result = create_or_update_public(module, client, matching_zones, record) + changed, result = create_or_update_public(matching_zones, record) zone_id = result.get('zone_id') if zone_id: @@ -216,7 +222,7 @@ def create(module, client, matching_zones): return changed, result -def create_or_update_private(module, client, matching_zones, record): +def create_or_update_private(matching_zones, record): for z in matching_zones: try: result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids @@ -275,7 +281,7 @@ def create_or_update_private(module, client, matching_zones, record): return changed, record -def create_or_update_public(module, client, matching_zones, record): +def create_or_update_public(matching_zones, record): zone_details, zone_delegation_set_details = None, {} for matching_zone in matching_zones: try: @@ -332,7 +338,7 @@ def create_or_update_public(module, client, matching_zones, record): return changed, record -def delete_private(module, client, matching_zones, vpc_id, vpc_region): +def delete_private(matching_zones, vpc_id, vpc_region): for z in matching_zones: try: result = client.get_hosted_zone(Id=z['Id']) @@ -360,7 +366,7 @@ def delete_private(module, client, matching_zones, vpc_id, vpc_region): return False, "The vpc_id and the vpc_region do not match a private hosted zone." -def delete_public(module, client, matching_zones): +def delete_public(matching_zones): if len(matching_zones) > 1: changed = False msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone." @@ -375,7 +381,7 @@ def delete_public(module, client, matching_zones): return changed, msg -def delete_hosted_id(module, client, hosted_zone_id, matching_zones): +def delete_hosted_id(hosted_zone_id, matching_zones): if hosted_zone_id == "all": deleted = [] for z in matching_zones: @@ -401,7 +407,7 @@ def delete_hosted_id(module, client, hosted_zone_id, matching_zones): return changed, msg -def delete(module, client, matching_zones): +def delete(matching_zones): zone_in = module.params.get('zone').lower() vpc_id = module.params.get('vpc_id') vpc_region = module.params.get('vpc_region') @@ -414,12 +420,12 @@ def delete(module, client, matching_zones): if zone_in in [z['Name'] for z in matching_zones]: if hosted_zone_id: - changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones) + changed, result = delete_hosted_id(hosted_zone_id, matching_zones) else: if private_zone: - changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region) + changed, result = delete_private(matching_zones, vpc_id, vpc_region) else: - changed, result = delete_public(module, client, matching_zones) + changed, result = delete_public(matching_zones) else: changed = False result = "No zone to delete." @@ -428,6 +434,9 @@ def delete(module, client, matching_zones): def main(): + global module + global client + argument_spec = dict( zone=dict(required=True), state=dict(default='present', choices=['present', 'absent']), @@ -461,13 +470,13 @@ def main(): private_zone = bool(vpc_id and vpc_region) - client = module.client('route53') + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) - zones = find_zones(module, client, zone_in, private_zone) + zones = find_zones(zone_in, private_zone) if state == 'present': - changed, result = create(module, client, matching_zones=zones) + changed, result = create(matching_zones=zones) elif state == 'absent': - changed, result = delete(module, client, matching_zones=zones) + changed, result = delete(matching_zones=zones) if isinstance(result, dict): module.exit_json(changed=changed, result=result, **result) From f924ea092af26b29e197bfbdb6dc3746e1456577 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 4 Feb 2022 16:08:05 +0000 Subject: [PATCH 375/683] Add deregistration_connection_termination to elb_target_group (#913) Add deregistration_connection_termination to elb_target_group SUMMARY Adding support for the deregistration_connection_termination param in the elb_target_group module. Along with this I've enabled and fixed up the integration tests. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_target_group ADDITIONAL INFORMATION The API param is deregistration_delay.connection_termination.enabled https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_group_attributes Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- elb_target_group.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/elb_target_group.py b/elb_target_group.py index 9a740422293..20e9c2b19da 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -22,6 +22,13 @@ - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. type: int + deregistration_connection_termination: + description: + - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. + type: bool + default: false + required: false + version_added: 3.1.0 health_check_protocol: description: - The protocol the load balancer uses when performing health checks on targets. @@ -305,6 +312,11 @@ returned: when state present type: int sample: 300 +deregistration_connection_termination: + description: Indicates whether the load balancer terminates connections at the end of the deregistration timeout. + returned: when state present + type: bool + sample: True health_check_interval_seconds: description: The approximate amount of time, in seconds, between health checks of an individual target. returned: when state present @@ -425,7 +437,7 @@ def get_tg_attributes(connection, module, tg_arn): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group attributes") - # Replace '.' with '_' in attribute key names to make it more Ansibley + # Replace '.' with '_' in attribute key names to make it more Ansible friendly return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items()) @@ -486,6 +498,7 @@ def create_or_update_target_group(connection, module): tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") deregistration_delay_timeout = module.params.get("deregistration_delay_timeout") + deregistration_connection_termination = module.params.get("deregistration_connection_termination") stickiness_enabled = module.params.get("stickiness_enabled") stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") stickiness_type = module.params.get("stickiness_type") @@ -767,6 +780,9 @@ def create_or_update_target_group(connection, module): if deregistration_delay_timeout is not None: if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if deregistration_connection_termination is not None: + if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": + update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) if stickiness_enabled is not None: if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) @@ -855,6 +871,7 @@ def main(): 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] argument_spec = dict( deregistration_delay_timeout=dict(type='int'), + deregistration_connection_termination=dict(type='bool', default=False), health_check_protocol=dict(choices=protocols_list), health_check_port=dict(), health_check_path=dict(), @@ -897,6 +914,9 @@ def main(): connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) if module.params.get('state') == 'present': + if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None): + module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination") + create_or_update_target_group(connection, module) else: delete_target_group(connection, module) From 9b6c20268ea7f618c4372e4e2c9f45576574ce1a Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 4 Feb 2022 16:10:43 +0000 Subject: [PATCH 376/683] Fix IOPs io1 DB instance updates and integration tests also (#878) Fix IOPs io1 DB instance updates and integration tests also SUMMARY Primary this PR is to fix updates when updating iops or allocated_storage on io1 DB instances when only one param is changing. Secondarily this fixes up the tests again and is test against some improvements to the waiter configuration see linked PR. IOPs error on update attempts if only one param is being updated: error: code: InvalidParameterCombination message: You must specify both the storage size and iops when modifying the storage size or iops on a DB instance that has iops. type: Sender msg: 'Unable to modify DB instance: An error occurred (InvalidParameterCombination) when calling the ModifyDBInstance operation: You must specify both the storage size and iops when modifying the storage size or iops on a DB instance that has iops.' ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_instance ADDITIONAL INFORMATION These tests are very slow and still a little flakey but generally all pass as expected now locally. Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- rds_instance.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/rds_instance.py b/rds_instance.py index 92d5e257cf0..742a7266c5e 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -467,10 +467,15 @@ RETURN = r''' allocated_storage: - description: The allocated storage size in gibibytes. This is always 1 for aurora database engines. + description: The allocated storage size in gigabytes. This is always 1 for aurora database engines. returned: always type: int sample: 20 +associated_roles: + description: The list of currently associated roles. + returned: always + type: list + sample: [] auto_minor_version_upgrade: description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. returned: always @@ -890,6 +895,17 @@ def get_options_with_changing_values(client, module, parameters): updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) parameters = updated_parameters + if instance.get('StorageType') == 'io1': + # Bundle Iops and AllocatedStorage while updating io1 RDS Instance + current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) + current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) + new_iops = module.params.get('iops') + new_allocated_storage = module.params.get('allocated_storage') + + if current_iops != new_iops or current_allocated_storage != new_allocated_storage: + parameters['AllocatedStorage'] = new_allocated_storage + parameters['Iops'] = new_iops + if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: parameters.pop('NewDBInstanceIdentifier') @@ -1179,6 +1195,7 @@ def main(): ('engine', 'aurora', ('db_cluster_identifier',)), ('engine', 'aurora-mysql', ('db_cluster_identifier',)), ('engine', 'aurora-postresql', ('db_cluster_identifier',)), + ('storage_type', 'io1', ('iops', 'allocated_storage')), ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), ('creation_source', 's3', ( 's3_bucket_name', 'engine', 'master_username', 'master_user_password', From d7cddcff2400af30265c29d1111b51d641d20a6e Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Fri, 4 Feb 2022 11:33:23 -0800 Subject: [PATCH 377/683] ec2_placement_group: Add partition strategy and partition count (#872) ec2_placement_group: Add partition strategy and partition count SUMMARY Add partition as a strategy and an option, partition_count to choose the actual number of partitions for the community.aws.ec2_placement_group module. Fixes #808 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_placement_group ADDITIONAL INFO Tested locally with - name: Create a partition placement group with partition count 4. ec2_placement_group: name: my-cluster state: present strategy: partition partition_count: 4 Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Mark Woolley --- ec2_placement_group.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 3ccb2c00802..9ca3bb02ab9 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -23,6 +23,13 @@ - The name for the placement group. required: true type: str + partition_count: + description: + - The number of partitions. + - Valid only when I(Strategy) is set to C(partition). + - Must be a value between C(1) and C(7). + type: int + version_added: 3.1.0 state: description: - Create or delete placement group. @@ -35,7 +42,7 @@ low-latency group in a single Availability Zone, while Spread spreads instances across underlying hardware. default: cluster - choices: [ 'cluster', 'spread' ] + choices: [ 'cluster', 'spread', 'partition' ] type: str extends_documentation_fragment: - amazon.aws.aws @@ -58,6 +65,13 @@ state: present strategy: spread +- name: Create a Partition strategy placement group. + community.aws.ec2_placement_group: + name: my-cluster + state: present + strategy: partition + partition_count: 3 + - name: Delete a placement group. community.aws.ec2_placement_group: name: my-cluster @@ -126,10 +140,21 @@ def get_placement_group_details(connection, module): def create_placement_group(connection, module): name = module.params.get("name") strategy = module.params.get("strategy") + partition_count = module.params.get("partition_count") + + if strategy != 'partition' and partition_count: + module.fail_json( + msg="'partition_count' can only be set when strategy is set to 'partition'.") + + params = {} + params['GroupName'] = name + params['Strategy'] = strategy + if partition_count: + params['PartitionCount'] = partition_count + params['DryRun'] = module.check_mode try: - connection.create_placement_group( - GroupName=name, Strategy=strategy, DryRun=module.check_mode) + connection.create_placement_group(**params) except is_boto3_error_code('DryRunOperation'): module.exit_json(changed=True, placement_group={ "name": name, @@ -165,8 +190,9 @@ def delete_placement_group(connection, module): def main(): argument_spec = dict( name=dict(required=True, type='str'), + partition_count=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), - strategy=dict(default='cluster', choices=['cluster', 'spread']) + strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition']) ) module = AnsibleAWSModule( From facc648e81c24d30213efb910fd348aba004756a Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 7 Feb 2022 17:40:48 +0000 Subject: [PATCH 378/683] Add dynamodb table class support (#880) Add dynamodb table class support SUMMARY Add support for defining a TableClass on DynamoDB tables. TableClass was introduced as part of botocore version 1.23.18 https://github.com/boto/botocore/blob/develop/CHANGELOG.rst#12318 Fixes: #829 ISSUE TYPE Feature Pull Request COMPONENT NAME dynamodb_table ADDITIONAL INFORMATION https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.create_table https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.update_table Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Jill R --- dynamodb_table.py | 70 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 66 insertions(+), 4 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 1ea4391223c..839178256aa 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -121,6 +121,13 @@ default: [] type: list elements: dict + table_class: + description: + - The class of the table. + - Requires at least botocore version 1.23.18. + choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS'] + type: str + version_added: 3.1.0 tags: description: - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag. @@ -201,11 +208,49 @@ ''' RETURN = r''' +table: + description: The returned table params from the describe API call. + returned: success + type: complex + contains: {} + sample: { + "arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table", + "attribute_definitions": [ + { + "attribute_name": "id", + "attribute_type": "N" + } + ], + "billing_mode": "PROVISIONED", + "creation_date_time": "2022-02-04T13:36:01.578000+00:00", + "id": "533b45fe-0870-4b66-9b00-d2afcfe96f19", + "item_count": 0, + "key_schema": [ + { + "attribute_name": "id", + "key_type": "HASH" + } + ], + "name": "ansible-test-14482047-alinas-mbp", + "provisioned_throughput": { + "number_of_decreases_today": 0, + "read_capacity_units": 1, + "write_capacity_units": 1 + }, + "size": 0, + "status": "ACTIVE", + "table_arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table", + "table_id": "533b45fe-0870-4b66-9b00-d2afcfe96f19", + "table_name": "ansible-test-table", + "table_size_bytes": 0, + "table_status": "ACTIVE", + "tags": {} + } table_status: - description: The current status of the table. - returned: success - type: str - sample: ACTIVE + description: The current status of the table. + returned: success + type: str + sample: ACTIVE ''' try: @@ -410,6 +455,7 @@ def compatability_results(current_table): billing_mode=billing_mode, region=module.region, table_name=current_table.get('table_name', None), + table_class=current_table.get('table_class_summary', {}).get('table_class', None), table_status=current_table.get('table_status', None), tags=current_table.get('tags', {}), ) @@ -452,6 +498,9 @@ def get_dynamodb_table(): table['size'] = table['table_size_bytes'] table['tags'] = tags + if 'table_class_summary' in table: + table['table_class'] = table['table_class_summary']['table_class'] + # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST # and when updating the billing_mode if 'billing_mode_summary' in table: @@ -753,6 +802,7 @@ def _update_table(current_table): changes = dict() additional_global_index_changes = list() + # Get throughput / billing_mode changes throughput_changes = _throughput_changes(current_table) if throughput_changes: changes['ProvisionedThroughput'] = throughput_changes @@ -766,6 +816,11 @@ def _update_table(current_table): if current_billing_mode != new_billing_mode: changes['BillingMode'] = new_billing_mode + # Update table_class use exisiting if none is defined + if module.params.get('table_class'): + if module.params.get('table_class') != current_table.get('table_class'): + changes['TableClass'] = module.params.get('table_class') + global_index_changes = _global_index_changes(current_table) if global_index_changes: changes['GlobalSecondaryIndexUpdates'] = global_index_changes @@ -868,6 +923,7 @@ def update_table(current_table): def create_table(): table_name = module.params.get('name') + table_class = module.params.get('table_class') hash_key_name = module.params.get('hash_key_name') billing_mode = module.params.get('billing_mode') @@ -901,6 +957,8 @@ def create_table(): # SSESpecification, ) + if table_class: + params['TableClass'] = table_class if billing_mode == "PROVISIONED": params['ProvisionedThroughput'] = throughput if local_indexes: @@ -982,6 +1040,7 @@ def main(): read_capacity=dict(type='int'), write_capacity=dict(type='int'), indexes=dict(default=[], type='list', elements='dict', options=index_options), + table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), @@ -999,6 +1058,9 @@ def main(): ) client = module.client('dynamodb', retry_decorator=retry_decorator) + if module.params.get('table_class'): + module.require_botocore_at_least('1.23.18', reason='to set table_class') + current_table = get_dynamodb_table() changed = False table = None From a353ab3a759c5c8b7aaa9fc3c25e967144c03762 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 9 Feb 2022 17:44:52 -0500 Subject: [PATCH 379/683] add check_mode for elb_application_lb* & refactor integration tests (#894) add check_mode for elb_application_lb* & refactor integration tests SUMMARY Add check_mode support for elb_application_lb* & refactor integration tests. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_application_lb elb_application_lb_info Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Jill R Reviewed-by: Mark Woolley --- elb_application_lb.py | 252 +++++++++++++++++++++++-------------- elb_application_lb_info.py | 135 +++++++++++++++----- 2 files changed, 262 insertions(+), 125 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index 4b547ace1c2..32c0f28bd95 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -48,7 +48,7 @@ type: str deletion_protection: description: - - Indicates whether deletion protection for the ELB is enabled. + - Indicates whether deletion protection for the ALB is enabled. - Defaults to C(false). type: bool http2: @@ -62,7 +62,7 @@ type: int listeners: description: - - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys + - A list of dicts containing listeners to attach to the ALB. See examples for detail of the dict required. Note that listener keys are CamelCased. type: list elements: dict @@ -123,7 +123,7 @@ type: str purge_listeners: description: - - If C(yes), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. + - If C(yes), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter. - If the I(listeners) parameter is not set then listeners will not be modified. default: yes type: bool @@ -149,7 +149,7 @@ elements: str scheme: description: - - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. + - Internet-facing or internal load balancer. An ALB scheme can not be modified after creation. default: internet-facing choices: [ 'internet-facing', 'internal' ] type: str @@ -195,9 +195,9 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Create an ELB and attach a listener +# Create an ALB and attach a listener - community.aws.elb_application_lb: - name: myelb + name: myalb security_groups: - sg-12345678 - my-sec-group @@ -216,12 +216,12 @@ TargetGroupName: # Required. The name of the target group state: present -# Create an ELB and attach a listener with logging enabled +# Create an ALB and attach a listener with logging enabled - community.aws.elb_application_lb: access_logs_enabled: yes access_logs_s3_bucket: mybucket access_logs_s3_prefix: "logs" - name: myelb + name: myalb security_groups: - sg-12345678 - my-sec-group @@ -303,9 +303,9 @@ Type: forward state: present -# Remove an ELB +# Remove an ALB - community.aws.elb_application_lb: - name: myelb + name: myalb state: absent ''' @@ -315,27 +315,32 @@ description: The name of the S3 bucket for the access logs. returned: when state is present type: str - sample: mys3bucket + sample: "mys3bucket" access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. returned: when state is present - type: str + type: bool sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. returned: when state is present type: str - sample: my/logs + sample: "my/logs" availability_zones: description: The Availability Zones for the load balancer. returned: when state is present type: list - sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" + sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] canonical_hosted_zone_id: description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. returned: when state is present type: str - sample: ABCDEF12345678 + sample: "ABCDEF12345678" +changed: + description: Whether an ALB was created/updated/deleted + returned: always + type: bool + sample: true created_time: description: The date and time the load balancer was created. returned: when state is present @@ -344,23 +349,23 @@ deletion_protection_enabled: description: Indicates whether deletion protection is enabled. returned: when state is present - type: str + type: bool sample: true dns_name: description: The public DNS name of the load balancer. returned: when state is present type: str - sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com + sample: "internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com" idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. returned: when state is present type: int sample: 60 ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. + description: The type of IP addresses used by the subnets for the load balancer. returned: when state is present type: str - sample: ipv4 + sample: "ipv4" listeners: description: Information about the listeners. returned: when state is present @@ -385,7 +390,7 @@ description: The protocol for connections from clients to the load balancer. returned: when state is present type: str - sample: HTTPS + sample: "HTTPS" certificates: description: The SSL server certificate. returned: when state is present @@ -420,22 +425,42 @@ description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-alb/001122334455" load_balancer_name: description: The name of the load balancer. returned: when state is present type: str - sample: my-elb + sample: "my-alb" routing_http2_enabled: description: Indicates whether HTTP/2 is enabled. returned: when state is present - type: str + type: bool sample: true +routing_http_desync_mitigation_mode: + description: Determines how the load balancer handles requests that might pose a security risk to an application. + returned: when state is present + type: str + sample: "defensive" +routing_http_drop_invalid_header_fields_enabled: + description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + returned: when state is present + type: bool + sample: false +routing_http_x_amzn_tls_version_and_cipher_suite_enabled: + description: Indicates whether the two headers are added to the client request before sending it to the target. + returned: when state is present + type: bool + sample: false +routing_http_xff_client_port_enabled: + description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + returned: when state is present + type: bool + sample: false scheme: description: Internet-facing or internal load balancer. returned: when state is present type: str - sample: internal + sample: "internal" security_groups: description: The IDs of the security groups for the load balancer. returned: when state is present @@ -445,29 +470,35 @@ description: The state of the load balancer. returned: when state is present type: dict - sample: "{'code': 'active'}" + sample: {'code': 'active'} tags: description: The tags attached to the load balancer. returned: when state is present type: dict - sample: "{ + sample: { 'Tag': 'Example' - }" + } type: description: The type of load balancer. returned: when state is present type: str - sample: application + sample: "application" vpc_id: description: The ID of the VPC for the load balancer. returned: when state is present type: str - sample: vpc-0011223344 + sample: "vpc-0011223344" +waf_fail_open_enabled: + description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + returned: when state is present + type: bool + sample: false ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags - +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( ApplicationLoadBalancer, ELBListener, @@ -478,134 +509,170 @@ from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules -def create_or_update_elb(elb_obj): - """Create ELB or modify main attributes. json_exit here""" - if elb_obj.elb: - # ELB exists so check subnets, security groups and tags match what has been passed - +def create_or_update_alb(alb_obj): + """Create ALB or modify main attributes. json_exit here""" + if alb_obj.elb: + # ALB exists so check subnets, security groups and tags match what has been passed # Subnets - if not elb_obj.compare_subnets(): - elb_obj.modify_subnets() + if not alb_obj.compare_subnets(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.modify_subnets() # Security Groups - if not elb_obj.compare_security_groups(): - elb_obj.modify_security_groups() + if not alb_obj.compare_security_groups(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.modify_security_groups() # Tags - only need to play with tags if tags parameter has been set to something - if elb_obj.tags is not None: + if alb_obj.tags is not None: + + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']), + boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags) + + # Exit on check_mode + if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) if tags_to_delete: - elb_obj.delete_tags(tags_to_delete) + alb_obj.delete_tags(tags_to_delete) # Add/update tags if tags_need_modify: - elb_obj.modify_tags() + alb_obj.modify_tags() else: # Create load balancer - elb_obj.create_elb() + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') + alb_obj.create_elb() - # ELB attributes - elb_obj.update_elb_attributes() - elb_obj.modify_elb_attributes() + # ALB attributes + alb_obj.update_elb_attributes() + alb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) - + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() + # Exit on check_mode + if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn']) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn']) listener_obj.modify() listeners_obj.changed = True - # If listeners changed, mark ELB as changed + # If listeners changed, mark ALB as changed if listeners_obj.changed: - elb_obj.changed = True + alb_obj.changed = True # Rules of each listener for listener in listeners_obj.listeners: if 'Rules' in listener: - rules_obj = ELBListenerRules(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) - + rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() + # Exit on check_mode + if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + # Delete rules - if elb_obj.module.params['purge_rules']: + if alb_obj.module.params['purge_rules']: for rule in rules_to_delete: - rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) rule_obj.delete() - elb_obj.changed = True + alb_obj.changed = True # Add rules for rule in rules_to_add: - rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn) + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) rule_obj.create() - elb_obj.changed = True + alb_obj.changed = True # Modify rules for rule in rules_to_modify: - rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn) + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) rule_obj.modify() - elb_obj.changed = True + alb_obj.changed = True + + # Update ALB ip address type only if option has been provided + if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'): + # Exit on check_mode + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) - # Get the ELB again - elb_obj.update() + alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type')) - # Get the ELB listeners again + # Exit on check_mode - no changes + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.') + + # Get the ALB again + alb_obj.update() + + # Get the ALB listeners again listeners_obj.update() - # Update the ELB attributes - elb_obj.update_elb_attributes() + # Update the ALB attributes + alb_obj.update_elb_attributes() # Convert to snake_case and merge in everything we want to return to the user - snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) - snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) - snaked_elb['listeners'] = [] + snaked_alb = camel_dict_to_snake_dict(alb_obj.elb) + snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes)) + snaked_alb['listeners'] = [] for listener in listeners_obj.current_listeners: # For each listener, get listener rules - listener['rules'] = get_elb_listener_rules(elb_obj.connection, elb_obj.module, listener['ListenerArn']) - snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn']) + snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags']) # ip address type - snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() + snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type() + + alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb) - elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) +def delete_alb(alb_obj): -def delete_elb(elb_obj): + if alb_obj.elb: - if elb_obj.elb: - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + # Exit on check_mode + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.') + + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) listener_obj.delete() - elb_obj.delete() + alb_obj.delete() - elb_obj.module.exit_json(changed=elb_obj.changed) + else: + + # Exit on check_mode - no changes + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.') + + alb_obj.module.exit_json(changed=alb_obj.changed) def main(): @@ -648,7 +715,8 @@ def main(): ], required_together=[ ['access_logs_enabled', 'access_logs_s3_bucket'] - ] + ], + supports_check_mode=True, ) # Quick check of listeners parameters @@ -668,12 +736,12 @@ def main(): state = module.params.get("state") - elb = ApplicationLoadBalancer(connection, connection_ec2, module) + alb = ApplicationLoadBalancer(connection, connection_ec2, module) if state == 'present': - create_or_update_elb(elb) - else: - delete_elb(elb) + create_or_update_alb(alb) + elif state == 'absent': + delete_alb(alb) if __name__ == '__main__': diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index ddac4fe9629..d1de312df11 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -10,9 +10,9 @@ --- module: elb_application_lb_info version_added: 1.0.0 -short_description: Gather information about application ELBs in AWS +short_description: Gather information about Application Load Balancers in AWS description: - - Gather information about application ELBs in AWS + - Gather information about Application Load Balancers in AWS author: Rob White (@wimnat) options: load_balancer_arns: @@ -37,19 +37,19 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- name: Gather information about all target groups +- name: Gather information about all ALBs community.aws.elb_application_lb_info: -- name: Gather information about the target group attached to a particular ELB +- name: Gather information about a particular ALB given its ARN community.aws.elb_application_lb_info: load_balancer_arns: - - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" + - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-alb/aabbccddeeff" -- name: Gather information about a target groups named 'tg1' and 'tg2' +- name: Gather information about ALBs named 'alb1' and 'alb2' community.aws.elb_application_lb_info: names: - - elb1 - - elb2 + - alb1 + - alb2 - name: Gather information about specific ALB community.aws.elb_application_lb_info: @@ -69,55 +69,119 @@ access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. type: str - sample: mys3bucket + sample: "mys3bucket" access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. - type: str + type: bool sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. type: str - sample: /my/logs + sample: "my/logs" availability_zones: description: The Availability Zones for the load balancer. type: list - sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" + sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] canonical_hosted_zone_id: description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. type: str - sample: ABCDEF12345678 + sample: "ABCDEF12345678" created_time: description: The date and time the load balancer was created. type: str sample: "2015-02-12T02:14:02+00:00" deletion_protection_enabled: description: Indicates whether deletion protection is enabled. - type: str + type: bool sample: true dns_name: description: The public DNS name of the load balancer. type: str - sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com + sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com" idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. - type: str + type: int sample: 60 ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. + description: The type of IP addresses used by the subnets for the load balancer. type: str - sample: ipv4 + sample: "ipv4" + listeners: + description: Information about the listeners. + type: complex + contains: + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + type: str + sample: "" + port: + description: The port on which the load balancer is listening. + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + type: str + sample: "HTTPS" + certificates: + description: The SSL server certificate. + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. + type: str + sample: "" + default_actions: + description: The default actions for the listener. + type: str + contains: + type: + description: The type of action. + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + type: str + sample: "" load_balancer_arn: description: The Amazon Resource Name (ARN) of the load balancer. type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-alb/001122334455" load_balancer_name: description: The name of the load balancer. type: str - sample: my-elb + sample: "my-alb" + routing_http2_enabled: + description: Indicates whether HTTP/2 is enabled. + type: bool + sample: true + routing_http_desync_mitigation_mode: + description: Determines how the load balancer handles requests that might pose a security risk to an application. + type: str + sample: "defensive" + routing_http_drop_invalid_header_fields_enabled: + description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + type: bool + sample: false + routing_http_x_amzn_tls_version_and_cipher_suite_enabled: + description: Indicates whether the two headers are added to the client request before sending it to the target. + type: bool + sample: false + routing_http_xff_client_port_enabled: + description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + type: bool + sample: false scheme: description: Internet-facing or internal load balancer. type: str - sample: internal + sample: "internal" security_groups: description: The IDs of the security groups for the load balancer. type: list @@ -125,21 +189,26 @@ state: description: The state of the load balancer. type: dict - sample: "{'code': 'active'}" + sample: {'code': 'active'} tags: description: The tags attached to the load balancer. type: dict - sample: "{ + sample: { 'Tag': 'Example' - }" + } type: description: The type of load balancer. type: str - sample: application + sample: "application" vpc_id: description: The ID of the VPC for the load balancer. type: str - sample: vpc-0011223344 + sample: "vpc-0011223344" + waf_fail_open_enabled: + description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets + if it is unable to forward the request to AWS WAF. + type: bool + sample: false ''' try: @@ -154,12 +223,12 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -def get_elb_listeners(connection, module, elb_arn): +def get_alb_listeners(connection, module, alb_arn): try: - return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners'] + return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe elb listeners") + module.fail_json_aws(e, msg="Failed to describe alb listeners") def get_listener_rules(connection, module, listener_arn): @@ -218,17 +287,17 @@ def list_load_balancers(connection, module): module.fail_json_aws(e, msg="Failed to list load balancers") for load_balancer in load_balancers['LoadBalancers']: - # Get the attributes for each elb + # Get the attributes for each alb load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) - # Get the listeners for each elb - load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn']) + # Get the listeners for each alb + load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn']) # For each listener, get listener rules for listener in load_balancer['listeners']: listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) - # Get ELB ip address type + # Get ALB ip address type load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) # Turn the boto3 result in to ansible_friendly_snaked_names From b057d51e9bcf37564022463f3c28b29f235fe830 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Thu, 10 Feb 2022 11:36:13 +0100 Subject: [PATCH 380/683] Extended the wafv2_web_acl module with custom_response_bodies argument (#721) Extended the wafv2_web_acl module with custom_response_bodies argument SUMMARY Extended the wafv2_web_acl module to also take the custom_response_bodies argument, improved docs and extended tests ISSUE TYPE Feature Pull Request COMPONENT NAME wafv2_web_acl ADDITIONAL INFORMATION Also touched docs of aws_waf_web_acl to make it easier to find the WAF v2 modules as I had trouble finding that at first. Reviewed-by: Markus Bergholz Reviewed-by: Stefan Horning Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- aws_waf_web_acl.py | 6 +- wafv2_web_acl.py | 144 ++++++++++++++++++++++++++++++++++++--------- 2 files changed, 119 insertions(+), 31 deletions(-) diff --git a/aws_waf_web_acl.py b/aws_waf_web_acl.py index 7cdf770aa38..609df528a0a 100644 --- a/aws_waf_web_acl.py +++ b/aws_waf_web_acl.py @@ -8,11 +8,11 @@ DOCUMENTATION = r''' module: aws_waf_web_acl -short_description: Create and delete WAF Web ACLs. +short_description: Create and delete WAF Web ACLs version_added: 1.0.0 description: - - Read the AWS documentation for WAF - U(https://aws.amazon.com/documentation/waf/). + - Module for WAF classic, for WAF v2 use the I(wafv2_*) modules. + - Read the AWS documentation for WAF U(https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html). author: - Mike Mochan (@mmochan) diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 5306c2e047f..b11b0872b0e 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -11,9 +11,10 @@ version_added: 1.5.0 author: - "Markus Bergholz (@markuman)" -short_description: wafv2_web_acl +short_description: Create and delete WAF Web ACLs description: - - Create, modify or delete a wafv2 web acl. + - Create, modify or delete AWS WAF v2 web ACLs (not for classic WAF). + - See docs at U(https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html) options: state: description: @@ -28,9 +29,9 @@ type: str scope: description: - - Scope of wafv2 web acl. + - Geographical scope of the web acl. required: true - choices: ["CLOUDFRONT","REGIONAL"] + choices: ["CLOUDFRONT", "REGIONAL"] type: str description: description: @@ -39,7 +40,7 @@ default_action: description: - Default action of the wafv2 web acl. - choices: ["Block","Allow"] + choices: ["Block", "Allow"] type: str sampled_requests: description: @@ -87,6 +88,14 @@ description: - Rule configuration. type: dict + custom_response_bodies: + description: + - A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing + - the key of the body dictionary element. + - Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content). + - Requires botocore >= 1.21.0 + type: dict + version_added: 3.1.0 purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. @@ -100,16 +109,15 @@ ''' EXAMPLES = ''' -- name: create web acl +- name: Create test web acl community.aws.wafv2_web_acl: name: test05 - state: present description: hallo eins scope: REGIONAL default_action: Allow sampled_requests: no cloudwatch_metrics: yes - metric_name: blub + metric_name: test05-acl-metric rules: - name: zwei priority: 0 @@ -191,10 +199,56 @@ text_transformations: - type: LOWERCASE priority: 0 + purge_rules: yes tags: A: B C: D - register: out + state: present + +- name: Create IP filtering web ACL + community.aws.wafv2_web_acl: + name: ip-filtering-traffic + description: ACL that filters web traffic based on rate limits and whitelists some IPs + scope: REGIONAL + default_action: Allow + sampled_requests: yes + cloudwatch_metrics: yes + metric_name: ip-filtering-traffic + rules: + - name: whitelist-own-IPs + priority: 0 + action: + allow: {} + statement: + ip_set_reference_statement: + arn: 'arn:aws:wafv2:us-east-1:520789123123:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123' + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: waf-acl-rule-whitelist-own-IPs + - name: rate-limit-per-IP + priority: 1 + action: + block: + custom_response: + response_code: 429 + custom_response_body_key: too_many_requests + statement: + rate_based_statement: + limit: 5000 + aggregate_key_type: IP + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: waf-acl-rule-rate-limit-per-IP + purge_rules: yes + custom_response_bodies: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + region: us-east-1 + state: present + ''' RETURN = """ @@ -218,6 +272,12 @@ sample: test02 returned: Always, as long as the web acl exists type: str +default_action: + description: Default action of ACL + returned: Always, as long as the web acl exists + sample: + allow: {} + type: dict rules: description: Current rules of the web acl returned: Always, as long as the web acl exists @@ -235,6 +295,14 @@ cloud_watch_metrics_enabled: true metric_name: admin_protect sampled_requests_enabled: true +custom_response_bodies: + description: Custom response body configurations to be used in rules + type: dict + sample: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + returned: Always, as long as the web acl exists visibility_config: description: Visibility config of the web acl returned: Always, as long as the web acl exists @@ -267,22 +335,27 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.fail_json_aws = fail_json_aws self.existing_acl, self.id, self.locktoken = self.get_web_acl() - def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name): + def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'Id': self.id, + 'DefaultAction': default_action, + 'Description': description, + 'Rules': rules, + 'VisibilityConfig': { + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + }, + 'LockToken': self.locktoken + } + + if custom_response_bodies: + req_obj['CustomResponseBodies'] = custom_response_bodies + try: - response = self.wafv2.update_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - DefaultAction=default_action, - Description=description, - Rules=rules, - VisibilityConfig={ - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - }, - LockToken=self.locktoken - ) + response = self.wafv2.update_web_acl(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 web acl.") return response @@ -331,7 +404,7 @@ def get_web_acl(self): def list(self): return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) - def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description): + def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies): req_obj = { 'Name': self.name, 'Scope': self.scope, @@ -343,6 +416,9 @@ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, me 'MetricName': metric_name } } + + if custom_response_bodies: + req_obj['CustomResponseBodies'] = custom_response_bodies if description: req_obj['Description'] = description if tags: @@ -370,6 +446,7 @@ def main(): cloudwatch_metrics=dict(type='bool', default=True), metric_name=dict(type='str'), tags=dict(type='dict'), + custom_response_bodies=dict(type='dict'), purge_rules=dict(default=True, type='bool') ) @@ -392,6 +469,14 @@ def main(): purge_rules = module.params.get("purge_rules") check_mode = module.check_mode + custom_response_bodies = module.params.get("custom_response_bodies") + if custom_response_bodies: + module.require_botocore_at_least('1.21.0', reason='to set custom response bodies') + custom_response_bodies = {} + + for custom_name, body in module.params.get("custom_response_bodies").items(): + custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True) + if default_action == 'Block': default_action = {'Block': {}} elif default_action == 'Allow': @@ -422,7 +507,8 @@ def main(): rules, sampled_requests, cloudwatch_metrics, - metric_name + metric_name, + custom_response_bodies ) else: @@ -438,7 +524,8 @@ def main(): cloudwatch_metrics, metric_name, tags, - description + description, + custom_response_bodies ) elif state == 'absent': @@ -453,7 +540,8 @@ def main(): rules, sampled_requests, cloudwatch_metrics, - metric_name + metric_name, + custom_response_bodies ) else: change = True From 7712fdb8f54570d77f53c55b3320df0c4367d4a0 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Thu, 10 Feb 2022 12:26:27 +0000 Subject: [PATCH 381/683] Refactor iam_managed_policy module and add integration tests (#893) Refactor iam_managed_policy module and add integration tests SUMMARY Refactor iam_managed_policy module to: Improve AWS retry backoff logic Add check_mode support Fix module exit on updates to policies when no changes are present Other changes: Add disabled integration tests ISSUE TYPE Bugfix Pull Request COMPONENT NAME iam_managed_policy ADDITIONAL INFORMATION Backoff logic only partially covered the module, and it didn't support check_mode or have any integration tests. Due to the nature of the IAM based modules the tests are intentionally disabled but have been run locally: ansible-test integration iam_managed_policy --allow-unsupported --docker PLAY RECAP ********************************************************************* testhost : ok=20 changed=6 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 AWS ACTIONS: ['iam:CreatePolicy', 'iam:CreatePolicyVersion', 'iam:DeletePolicy', 'iam:DeletePolicyVersion', 'iam:GetPolicy', 'iam:GetPolicyVersion', 'iam:ListEntitiesForPolicy', 'iam:ListPolicies', 'iam:ListPolicyVersions', 'iam:SetDefaultPolicyVersion'] Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- iam_managed_policy.py | 201 ++++++++++++++++++++++++------------------ 1 file changed, 114 insertions(+), 87 deletions(-) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 2b33d711e71..403b4720d50 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: iam_managed_policy version_added: 1.0.0 @@ -55,7 +55,7 @@ - amazon.aws.ec2 ''' -EXAMPLES = ''' +EXAMPLES = r''' # Create Policy ex nihilo - name: Create IAM Managed Policy community.aws.iam_managed_policy: @@ -107,11 +107,12 @@ state: absent ''' -RETURN = ''' +RETURN = r''' policy: description: Returns the policy json structure, when state == absent this will return the value of the removed policy. returned: success - type: str + type: complex + contains: {} sample: '{ "arn": "arn:aws:iam::aws:policy/AdministratorAccess " "attachment_count": 0, @@ -142,14 +143,14 @@ @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def list_policies_with_backoff(iam): - paginator = iam.get_paginator('list_policies') +def list_policies_with_backoff(): + paginator = client.get_paginator('list_policies') return paginator.paginate(Scope='Local').build_full_result() -def get_policy_by_name(module, iam, name): +def get_policy_by_name(name): try: - response = list_policies_with_backoff(iam) + response = list_policies_with_backoff() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policies") for policy in response['Policies']: @@ -158,32 +159,36 @@ def get_policy_by_name(module, iam, name): return None -def delete_oldest_non_default_version(module, iam, policy): +def delete_oldest_non_default_version(policy): try: - versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] if not v['IsDefaultVersion']] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") versions.sort(key=lambda v: v['CreateDate'], reverse=True) for v in versions[-1:]: try: - iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") # This needs to return policy_version, changed -def get_or_create_policy_version(module, iam, policy, policy_document): +def get_or_create_policy_version(policy, policy_document): try: - versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") + for v in versions: try: - document = iam.get_policy_version(PolicyArn=policy['Arn'], - VersionId=v['VersionId'])['PolicyVersion']['Document'] + document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId'])) + + if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): + return v, True + # If the current policy matches the existing one if not compare_policies(document, json.loads(to_native(policy_document))): return v, False @@ -195,12 +200,12 @@ def get_or_create_policy_version(module, iam, policy, policy_document): # and if that doesn't work, delete the oldest non default policy version # and try again. try: - version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] return version, True except is_boto3_error_code('LimitExceeded'): - delete_oldest_non_default_version(module, iam, policy) + delete_oldest_non_default_version(policy) try: - version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] return version, True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: module.fail_json_aws(second_e, msg="Couldn't create policy version") @@ -208,58 +213,132 @@ def get_or_create_policy_version(module, iam, policy, policy_document): module.fail_json_aws(e, msg="Couldn't create policy version") -def set_if_default(module, iam, policy, policy_version, is_default): +def set_if_default(policy, policy_version, is_default): if is_default and not policy_version['IsDefaultVersion']: try: - iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) + client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't set default policy version") return True return False -def set_if_only(module, iam, policy, policy_version, is_only): +def set_if_only(policy, policy_version, is_only): if is_only: try: - versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[ + versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[ 'Versions'] if not v['IsDefaultVersion']] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: - iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") return len(versions) > 0 return False -def detach_all_entities(module, iam, policy, **kwargs): +def detach_all_entities(policy, **kwargs): try: - entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) + entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName'])) for g in entities['PolicyGroups']: try: - iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) + client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName'])) for u in entities['PolicyUsers']: try: - iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) + client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName'])) for r in entities['PolicyRoles']: try: - iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) + client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName'])) if entities['IsTruncated']: - detach_all_entities(module, iam, policy, marker=entities['Marker']) + detach_all_entities(policy, marker=entities['Marker']) + + +def create_or_update_policy(existing_policy): + name = module.params.get('policy_name') + description = module.params.get('policy_description') + default = module.params.get('make_default') + only = module.params.get('only_version') + + policy = None + + if module.params.get('policy') is not None: + policy = json.dumps(json.loads(module.params.get('policy'))) + + if existing_policy is None: + if module.check_mode: + module.exit_json(changed=True) + + # Create policy when none already exists + try: + rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) + + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) + else: + policy_version, changed = get_or_create_policy_version(existing_policy, policy) + changed = set_if_default(existing_policy, policy_version, default) or changed + changed = set_if_only(existing_policy, policy_version, only) or changed + + # If anything has changed we need to refresh the policy + if changed: + try: + updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Couldn't get policy") + + module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(updated_policy)) + else: + module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(existing_policy)) + + +def delete_policy(existing_policy): + # Check for existing policy + if existing_policy: + if module.check_mode: + module.exit_json(changed=True) + + # Detach policy + detach_all_entities(existing_policy) + # Delete Versions + try: + versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policy versions") + for v in versions: + if not v['IsDefaultVersion']: + try: + client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) + # Delete policy + try: + client.delete_policy(PolicyArn=existing_policy['Arn']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName'])) + + # This is the one case where we will return the old policy + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) + else: + module.exit_json(changed=False, policy=None) def main(): + global module + global client + argument_spec = dict( policy_name=dict(required=True), policy_description=dict(default=''), @@ -273,75 +352,23 @@ def main(): module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[['state', 'present', ['policy']]], + supports_check_mode=True ) name = module.params.get('policy_name') - description = module.params.get('policy_description') state = module.params.get('state') - default = module.params.get('make_default') - only = module.params.get('only_version') - - policy = None - - if module.params.get('policy') is not None: - policy = json.dumps(json.loads(module.params.get('policy'))) try: - iam = module.client('iam') + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') - p = get_policy_by_name(module, iam, name) - if state == 'present': - if p is None: - # No Policy so just create one - try: - rvalue = iam.create_policy(PolicyName=name, Path='/', - PolicyDocument=policy, Description=description) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) - - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) - else: - policy_version, changed = get_or_create_policy_version(module, iam, p, policy) - changed = set_if_default(module, iam, p, policy_version, default) or changed - changed = set_if_only(module, iam, p, policy_version, only) or changed - # If anything has changed we needto refresh the policy - if changed: - try: - p = iam.get_policy(PolicyArn=p['Arn'])['Policy'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Couldn't get policy") + existing_policy = get_policy_by_name(name) - module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p)) + if state == 'present': + create_or_update_policy(existing_policy) else: - # Check for existing policy - if p: - # Detach policy - detach_all_entities(module, iam, p) - # Delete Versions - try: - versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - for v in versions: - if not v['IsDefaultVersion']: - try: - iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) - # Delete policy - try: - iam.delete_policy(PolicyArn=p['Arn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(p['PolicyName'])) - - # This is the one case where we will return the old policy - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p)) - else: - module.exit_json(changed=False, policy=None) -# end main + delete_policy(existing_policy) if __name__ == '__main__': From 8e0f2fafbcc69e4ab1b56ec50be9c46e498c8333 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 16 Feb 2022 12:04:20 -0500 Subject: [PATCH 382/683] Stabilize ec2_eip module (#936) Stabilize ec2_eip module SUMMARY fixed check_mode issues added integration tests for check_mode / idempotency updated json returned when state = absent for clarity removed json_query references fixes #159 Depends-On: ansible-collections/amazon.aws#672 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_eip Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Jill R --- ec2_eip.py | 54 ++++++++++++++++++++++++++++++++----------------- ec2_eip_info.py | 2 +- 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/ec2_eip.py b/ec2_eip.py index ca883e5f715..e0031eaf10a 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -27,8 +27,8 @@ public_ip: description: - The IP address of a previously allocated EIP. - - When I(public_ip=present) and device is specified, the EIP is associated with the device. - - When I(public_ip=absent) and device is specified, the EIP is disassociated from the device. + - When I(state=present) and device is specified, the EIP is associated with the device. + - When I(state=absent) and device is specified, the EIP is disassociated from the device. aliases: [ ip ] type: str state: @@ -328,7 +328,7 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): except is_boto3_error_code('InvalidAddress.NotFound') as e: # If we're releasing and we can't find it, it's already gone... if module.params.get('state') == 'absent': - module.exit_json(changed=False) + module.exit_json(changed=False, disassociated=False, released=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") addresses = addresses["Addresses"] @@ -385,6 +385,8 @@ def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True try: + if check_mode: + return None, True result = ec2.allocate_address(Domain=domain, aws_retry=True), True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") @@ -493,8 +495,11 @@ def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True) def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): - # type: (EC2Connection, str, bool, str) -> Address + # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address """ Overrides botocore's allocate_address function to support BYOIP """ + if check_mode: + return None + params = {} if domain is not None: @@ -503,9 +508,6 @@ def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool if public_ipv4_pool is not None: params['PublicIpv4Pool'] = public_ipv4_pool - if check_mode: - params['DryRun'] = 'true' - try: result = ec2.allocate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -606,19 +608,33 @@ def main(): reuse_existing_ip_allowed, allow_reassociation, module.check_mode, is_instance=is_instance ) + if 'allocation_id' not in result: + # Don't check tags on check_mode here - no EIP to pass through + module.exit_json(**result) else: if address: - changed = False + result = { + 'changed': False, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } else: address, changed = allocate_address( ec2, module, domain, reuse_existing_ip_allowed, module.check_mode, tag_dict, public_ipv4_pool ) - result = { - 'changed': changed, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] - } + if address: + result = { + 'changed': changed, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } + else: + # Don't check tags on check_mode here - no EIP to pass through + result = { + 'changed': changed + } + module.exit_json(**result) result['changed'] |= ensure_ec2_tags( ec2, module, result['allocation_id'], @@ -633,21 +649,21 @@ def main(): released = release_address(ec2, module, address, module.check_mode) result = { 'changed': True, - 'disassociated': disassociated, - 'released': released + 'disassociated': disassociated['changed'], + 'released': released['changed'] } else: result = { 'changed': disassociated['changed'], - 'disassociated': disassociated, - 'released': {'changed': False} + 'disassociated': disassociated['changed'], + 'released': False } else: released = release_address(ec2, module, address, module.check_mode) result = { 'changed': released['changed'], - 'disassociated': {'changed': False}, - 'released': released + 'disassociated': False, + 'released': released['changed'] } except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: diff --git a/ec2_eip_info.py b/ec2_eip_info.py index 4f560429e12..31d8145742b 100644 --- a/ec2_eip_info.py +++ b/ec2_eip_info.py @@ -44,7 +44,7 @@ register: my_vm_eips - ansible.builtin.debug: - msg: "{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}" + msg: "{{ my_vm_eips.addresses | selectattr('private_ip_address', 'equalto', '10.0.0.5') }}" - name: List all EIP addresses for several VMs. community.aws.ec2_eip_info: From 3fd431889f2e894cd018721b6bc0977338169300 Mon Sep 17 00:00:00 2001 From: Francesc Navarro Date: Wed, 23 Feb 2022 21:35:13 +0100 Subject: [PATCH 383/683] Awsretry/cloudfront distribution (#297) Awsretry/cloudfront distribution SUMMARY Adding AWSRetry.exponential_backoff when updating a cloudfront distribution. Fixes #296 ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution Reviewed-by: matej Reviewed-by: Mark Chappell Reviewed-by: Francesc Navarro Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- cloudfront_distribution.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 946b93e2041..486da18b461 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1378,7 +1378,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager from ansible.module_utils.common.dict_transformations import recursive_diff -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict import datetime @@ -1433,7 +1433,7 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): def create_distribution(client, module, config, tags): try: if not tags: - return client.create_distribution(DistributionConfig=config)['Distribution'] + return client.create_distribution(aws_retry=True, DistributionConfig=config)['Distribution'] else: distribution_config_with_tags = { 'DistributionConfig': config, @@ -1441,42 +1441,42 @@ def create_distribution(client, module, config, tags): 'Items': tags } } - return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] + return client.create_distribution_with_tags(aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating distribution") def delete_distribution(client, module, distribution): try: - return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) + return client.delete_distribution(aws_retry=True, Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution'])) def update_distribution(client, module, config, distribution_id, e_tag): try: - return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] + return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) def tag_resource(client, module, arn, tags): try: - return client.tag_resource(Resource=arn, Tags=dict(Items=tags)) + return client.tag_resource(aws_retry=True, Resource=arn, Tags=dict(Items=tags)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error tagging resource") def untag_resource(client, module, arn, tag_keys): try: - return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys)) + return client.untag_resource(aws_retry=True, Resource=arn, TagKeys=dict(Items=tag_keys)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error untagging resource") def list_tags_for_resource(client, module, arn): try: - response = client.list_tags_for_resource(Resource=arn) + response = client.list_tags_for_resource(aws_retry=True, Resource=arn) return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error listing tags for resource") @@ -2152,7 +2152,7 @@ def main(): ] ) - client = module.client('cloudfront') + client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) validation_mgr = CloudFrontValidationManager(module) From f526547a42b092df2fbbdac8297f0a40e4d5942d Mon Sep 17 00:00:00 2001 From: greenflowers <46848026+greenflowers@users.noreply.github.com> Date: Thu, 24 Feb 2022 10:10:37 +0000 Subject: [PATCH 384/683] Remove string of iam_managed_policy module docs (#952) Remove string of iam_managed_policy module docs SUMMARY Remove "ex nihilo" docs of iam_managed_policy module ISSUE TYPE Docs Pull Request COMPONENT NAME iam_managed_policy Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- iam_managed_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 403b4720d50..4c02054db21 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -56,7 +56,7 @@ ''' EXAMPLES = r''' -# Create Policy ex nihilo +# Create a policy - name: Create IAM Managed Policy community.aws.iam_managed_policy: policy_name: "ManagedPolicy" From aaed7863cd19f60b37aaf49a7b2b6d3eaf76dbe3 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Sat, 26 Feb 2022 03:22:04 -0800 Subject: [PATCH 385/683] ec2_asg: Add functionality to detach specified instances from ASG (#933) ec2_asg: Add functionality to detach specified instances from ASG SUMMARY Adds feature to detach specified instances from a AutoScalingGroup rather than terminating them directly. Detached instances are not terminated and can be managed independently. Implements #649 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_asg ADDITIONAL INFORMATION Makes use of https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/autoscaling.html#AutoScaling.Client.detach_instances Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Jill R Reviewed-by: Joseph Torcasso --- ec2_asg.py | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 3 deletions(-) diff --git a/ec2_asg.py b/ec2_asg.py index 46cdcbf15b8..8dc7cd783f2 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -182,6 +182,21 @@ matching the current launch configuration. type: list elements: str + detach_instances: + description: + - Removes one or more instances from the specified AutoScalingGroup. + - If I(decrement_desired_capacity) flag is not set, new instance(s) are launched to replace the detached instance(s). + - If a Classic Load Balancer is attached to the AutoScalingGroup, the instances are also deregistered from the load balancer. + - If there are target groups attached to the AutoScalingGroup, the instances are also deregistered from the target groups. + type: list + elements: str + version_added: 3.2.0 + decrement_desired_capacity: + description: + - Indicates whether the AutoScalingGroup decrements the desired capacity value by the number of instances detached. + default: false + type: bool + version_added: 3.2.0 lc_check: description: - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config). @@ -756,6 +771,12 @@ def terminate_asg_instance(connection, instance_id, decrement_capacity): ShouldDecrementDesiredCapacity=decrement_capacity) +@AWSRetry.jittered_backoff(**backoff_params) +def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capacity): + connection.detach_instances(InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, + ShouldDecrementDesiredCapacity=decrement_capacity) + + def enforce_required_arguments_for_create(): ''' As many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module, so we enforce @@ -1523,6 +1544,40 @@ def replace(connection): return changed, asg_properties +def detach(connection): + group_name = module.params.get('name') + detach_instances = module.params.get('detach_instances') + as_group = describe_autoscaling_groups(connection, group_name)[0] + decrement_desired_capacity = module.params.get('decrement_desired_capacity') + min_size = module.params.get('min_size') + props = get_properties(as_group) + instances = props['instances'] + + # check if provided instance exists in asg, create list of instances to detach which exist in asg + instances_to_detach = [] + for instance_id in detach_instances: + if instance_id in instances: + instances_to_detach.append(instance_id) + + # check if setting decrement_desired_capacity will make desired_capacity smaller + # than the currently set minimum size in ASG configuration + if decrement_desired_capacity: + decremented_desired_capacity = len(instances) - len(instances_to_detach) + if min_size and min_size > decremented_desired_capacity: + module.fail_json( + msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\ + which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format(decremented_desired_capacity, min_size)) + + if instances_to_detach: + try: + detach_asg_instances(connection, instances_to_detach, group_name, decrement_desired_capacity) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to detach instances from AutoScaling Group") + + asg_properties = get_properties(as_group) + return True, asg_properties + + def get_instances_by_launch_config(props, lc_check, initial_instances): new_instances = [] old_instances = [] @@ -1776,6 +1831,8 @@ def main(): replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[], elements='str'), + detach_instances=dict(type='list', default=[], elements='str'), + decrement_desired_capacity=dict(type='bool', default=False), lc_check=dict(type='bool', default=True), lt_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), @@ -1821,16 +1878,18 @@ def main(): argument_spec=argument_spec, mutually_exclusive=[ ['replace_all_instances', 'replace_instances'], - ['launch_config_name', 'launch_template'] + ['replace_all_instances', 'detach_instances'], + ['launch_config_name', 'launch_template'], ] ) state = module.params.get('state') replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') + detach_instances = module.params.get('detach_instances') connection = module.client('autoscaling') - changed = create_changed = replace_changed = False + changed = create_changed = replace_changed = detach_changed = False exists = asg_exists(connection) if state == 'present': @@ -1847,7 +1906,15 @@ def main(): ): replace_changed, asg_properties = replace(connection) - if create_changed or replace_changed: + # Only detach instances if asg existed at start of call + if ( + exists + and (detach_instances) + and (module.params.get('launch_config_name') or module.params.get('launch_template')) + ): + detach_changed, asg_properties = detach(connection) + + if create_changed or replace_changed or detach_changed: changed = True module.exit_json(changed=changed, **asg_properties) From 7190bdb25d732d8cfd9cedcc072adcf551267c1c Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Tue, 1 Mar 2022 11:55:31 +0100 Subject: [PATCH 386/683] add missing documentation (#945) cloudfront_distribution: add missing documentation SUMMARY Closes #877 The modul resprects this parameter already. ISSUE TYPE Docs Pull Request COMPONENT NAME cloudfront_distribution Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis --- cloudfront_distribution.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 486da18b461..332298a8fcd 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -200,6 +200,10 @@ - The ID of the origin that you want CloudFront to route requests to by default. type: str + response_headers_policy_id: + description: + - The ID of the header policy that CloudFront adds to responses that it sends to viewers. + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. @@ -317,6 +321,10 @@ - The ID of the origin that you want CloudFront to route requests to by default. type: str + response_headers_policy_id: + description: + - The ID of the header policy that CloudFront adds to responses that it sends to viewers. + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. From 3283aaa507e60e1c0c337de4dba0e48858f1e2de Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 2 Mar 2022 12:13:44 +0100 Subject: [PATCH 387/683] [4.0.0] Bump minimum botocore version to 1.20.0 (#956) [4.0.0] Bump minimum botocore version to 1.20.0 SUMMARY With the next major version we can bump botocore/boto3 again. Since 1.20.0 is now over a year old, we can bump the minimum version in preparation for 4.0.0. CI should still test backports against the relevant versions for the backported release. 1.20.0 was released 2021-02-02. 1.21.0 was released 2021-07-15, hopefully we'll release 4.0.0 before July. Should we release after mid-July we can always bump again. ( Follow up to ansible-collections/amazon.aws#692 ) ISSUE TYPE Feature Pull Request COMPONENT NAME requirements.txt ADDITIONAL INFORMATION botocore] $ git show 1.20.0 tag 1.20.0 Tagger: aws-sdk-python-automation Date: Tue Feb 2 19:11:44 2021 +0000 Tagging 1.20.0 release. commit b7d27dc39aea82e22e2c11443fbd02a4904367cd (tag: 1.20.0) Merge: cc497a593 27ebea65f Author: aws-sdk-python-automation Date: Tue Feb 2 19:11:44 2021 +0000 Merge branch 'release-1.20.0' * release-1.20.0: Bumping version to 1.20.0 Update to latest models Add changelog for custom endpoints and ARN resources Allow custom endpoints when addressing ARN resources Add changes for Python 3.4/3.5 removal Fall back to Transfer-Encoding 'chunked' if AWSRequest body is not seekable stream boto3] $ git show 1.17.0 tag 1.17.0 Tagger: aws-sdk-python-automation Date: Tue Feb 2 19:11:35 2021 +0000 Tagging 1.17.0 release. commit 1a35ed1ab41f967ea43420650075f2693cbbe08b (tag: 1.17.0) Merge: d77d24f9 35454bc5 Author: aws-sdk-python-automation Date: Tue Feb 2 19:11:35 2021 +0000 Merge branch 'release-1.17.0' * release-1.17.0: Bumping version to 1.17.0 Add changelog entries from botocore Add S3 VPCE examples Add changes for Python 3.4/3.5 removal Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- aws_msk_cluster.py | 2 -- ec2_launch_template.py | 12 ++++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index d6cf35d3ba3..320b867680b 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -50,7 +50,6 @@ instance_type: description: - The type of Amazon EC2 instances to use for Kafka brokers. - - Update operation requires botocore version >= 1.19.58. choices: - kafka.t3.small - kafka.m5.large @@ -520,7 +519,6 @@ def create_or_update_cluster(client, module): } }, "broker_type": { - "botocore_version": "1.19.58", "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], "target_value": module.params.get("instance_type"), "update_params": { diff --git a/ec2_launch_template.py b/ec2_launch_template.py index fab3c4100bd..bccc79eddb7 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -358,7 +358,7 @@ type: str description: > - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). - - Requires boto3 >= 1.18.29 + - Requires botocore >= 1.21.29 choices: [enabled, disabled] default: 'disabled' instance_metadata_tags: @@ -366,7 +366,7 @@ type: str description: - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). - - Requires boto3 >= 1.20.30 + - Requires botocore >= 1.23.30 choices: [enabled, disabled] default: 'disabled' ''' @@ -534,18 +534,18 @@ def create_or_update(module, template_options): lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) if lt_data.get('MetadataOptions'): - if not module.boto3_at_least('1.20.30'): + if not module.botocore_at_least('1.23.30'): # fail only if enabled is requested if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled': - module.require_boto3_at_least('1.20.30', reason='to set instance_metadata_tags') + module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') # pop if it's not requested to keep backwards compatibility. # otherwise the modules failes because parameters are set due default values lt_data['MetadataOptions'].pop('InstanceMetadataTags') - if not module.boto3_at_least('1.18.29'): + if not module.botocore_at_least('1.21.29'): # fail only if enabled is requested if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled': - module.require_boto3_at_least('1.18.29', reason='to set http_protocol_ipv6') + module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') # pop if it's not requested to keep backwards compatibility. # otherwise the modules failes because parameters are set due default values lt_data['MetadataOptions'].pop('HttpProtocolIpv6') From eee3703fa5bf4bdee2fd63c1aa26a94533b10710 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 10 Mar 2022 18:52:45 -0800 Subject: [PATCH 388/683] ec2_asg: Add purge_tags to AutoScalingGroups. (#960) ec2_asg: Add purge_tags to AutoScalingGroups. SUMMARY Add purge_tags to ec2_asg module. Fixes #481. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_asg ADDITIONAL INFORMATION There was another PR (currently closed) #482 - with similar functionality but I'm not sure if having modules to handle tags for individual services/modules is a good way to have this functionality. It will certainly cause increase in number of modules. Hence tried modifying existing ec2_asg module to be able to do this. This utilizes underlying API calls to: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeTags.html https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateOrUpdateTags.html https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DeleteTags.html Reviewed-by: Alina Buzachis Reviewed-by: Jill R Reviewed-by: Mandar Kulkarni --- ec2_asg.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/ec2_asg.py b/ec2_asg.py index 8dc7cd783f2..fa91232cbe6 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -220,6 +220,13 @@ - When I(propagate_at_launch) is true the tags will be propagated to the Instances created. type: list elements: dict + purge_tags: + description: + - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified. + default: true + type: bool + version_added: 3.2.0 health_check_period: description: - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. @@ -645,6 +652,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', @@ -1097,6 +1105,7 @@ def create_autoscaling_group(connection): desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') default_cooldown = module.params.get('default_cooldown') @@ -1205,9 +1214,12 @@ def create_autoscaling_group(connection): changed = True # process tag changes + have_tags = as_group.get('Tags') + want_tags = asg_tags + if purge_tags and not want_tags and have_tags: + connection.delete_tags(Tags=list(have_tags)) + if len(set_tags) > 0: - have_tags = as_group.get('Tags') - want_tags = asg_tags if have_tags: have_tags.sort(key=lambda x: x["Key"]) if want_tags: @@ -1218,9 +1230,11 @@ def create_autoscaling_group(connection): for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): changed = True - dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'], - ResourceType='auto-scaling-group', Key=dead_tag)) + if purge_tags: + dead_tags.append(dict( + ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] + if dead_tags: connection.delete_tags(Tags=dead_tags) @@ -1838,6 +1852,7 @@ def main(): wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[], elements='dict'), + purge_tags=dict(type='bool', default=True), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), From 88251e915c8f42fe3164e075616af9375c534859 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Fri, 11 Mar 2022 04:33:02 -0500 Subject: [PATCH 389/683] elb_target_group - support target_type alb (#966) elb_target_group - support target_type alb SUMMARY Add support for target_type alb and integration tests Update documentation for clarity Fixes #891 ISSUE TYPE Feature Pull Request COMPONENT NAME elb_target_group Reviewed-by: Mark Woolley Reviewed-by: Mandar Kulkarni Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- elb_target_group.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 20e9c2b19da..229e2129bfe 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -76,13 +76,14 @@ type: str port: description: - - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. Required if - I(state) is C(present). + - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. + - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb). required: false type: int protocol: description: - - The protocol to use for routing traffic to the targets. Required when I(state) is C(present). + - The protocol to use for routing traffic to the targets. + - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb). required: false choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] type: str @@ -141,15 +142,16 @@ target_type: description: - The type of target that you must specify when registering targets with this target group. The possible values are - C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address) or C(lambda) (target is specified by ARN). - Note that you can't specify targets for a target group using more than one type. Target type lambda only accept one target. When more than + C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address), C(lambda) (target is specified by ARN), + or C(alb) (target is specified by ARN). + Note that you can't specify targets for a target group using more than one type. Target types lambda and alb only accept one target. When more than one target is specified, only the first one is used. All additional targets are ignored. If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses. - The default behavior is C(instance). required: false - choices: ['instance', 'ip', 'lambda'] + choices: ['instance', 'ip', 'lambda', 'alb'] type: str targets: description: @@ -165,7 +167,8 @@ type: int vpc_id: description: - - The identifier of the virtual private cloud (VPC). Required when I(state) is C(present). + - The identifier of the virtual private cloud (VPC). + - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb). required: false type: str preserve_client_ip_enabled: @@ -891,7 +894,7 @@ def main(): state=dict(required=True, choices=['present', 'absent']), successful_response_codes=dict(), tags=dict(default={}, type='dict'), - target_type=dict(choices=['instance', 'ip', 'lambda']), + target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']), targets=dict(type='list', elements='dict'), unhealthy_threshold_count=dict(type='int'), vpc_id=dict(), @@ -905,6 +908,7 @@ def main(): required_if=[ ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], + ['target_type', 'alb', ['protocol', 'port', 'vpc_id']], ] ) From 82804e5bff97b70ad810554b80333346132c0ee8 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Mon, 14 Mar 2022 15:21:40 -0400 Subject: [PATCH 390/683] elb_application_lb - treat empty security group as VPC default (#971) elb_application_lb - treat empty security group as VPC default SUMMARY Fixes idempotency issue when security_groups = [] by treating [] as using the VPC's default security group (like it does on creation). Fixes #28 Used same logic as amazon.aws.ec2_vpc_route_table does for using default igw Added integration tests ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_application_lb Reviewed-by: Jill R Reviewed-by: Mark Woolley --- elb_application_lb.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index 32c0f28bd95..448eba4c1aa 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -144,7 +144,7 @@ description: - A list of the names or IDs of the security groups to assign to the load balancer. - Required if I(state=present). - default: [] + - If C([]), the VPC's default security group will be used. type: list elements: str scheme: @@ -494,10 +494,16 @@ type: bool sample: false ''' +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( ApplicationLoadBalancer, @@ -509,6 +515,29 @@ from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules +@AWSRetry.jittered_backoff() +def describe_sgs_with_backoff(connection, **params): + paginator = connection.get_paginator('describe_security_groups') + return paginator.paginate(**params).build_full_result()['SecurityGroups'] + + +def find_default_sg(connection, module, vpc_id): + """ + Finds the default security group for the given VPC ID. + """ + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'group-name': 'default'}) + try: + sg = describe_sgs_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='No default security group found for VPC {0}'.format(vpc_id)) + if len(sg) == 1: + return sg[0]['GroupId'] + elif len(sg) == 0: + module.fail_json(msg='No default security group found for VPC {0}'.format(vpc_id)) + else: + module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id)) + + def create_or_update_alb(alb_obj): """Create ALB or modify main attributes. json_exit here""" if alb_obj.elb: @@ -738,6 +767,11 @@ def main(): alb = ApplicationLoadBalancer(connection, connection_ec2, module) + # Update security group if default is specified + if alb.elb and module.params.get('security_groups') == []: + module.params['security_groups'] = [find_default_sg(connection_ec2, module, alb.elb['VpcId'])] + alb = ApplicationLoadBalancer(connection, connection_ec2, module) + if state == 'present': create_or_update_alb(alb) elif state == 'absent': From 6586fa9ad80d0bee318d500256d49bae4d4526b2 Mon Sep 17 00:00:00 2001 From: Wojciech Inglot Date: Mon, 14 Mar 2022 21:08:35 +0100 Subject: [PATCH 391/683] redshift_info - fix invalid import path for botocore exceptions (#970) redshift_info - fix invalid import path for botocore exceptions Depends-On: #979 SUMMARY Fix invalid import path for botocore exceptions Fixes #968 ISSUE TYPE Bugfix Pull Request COMPONENT NAME redshift_info Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis --- redshift_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/redshift_info.py b/redshift_info.py index b79b28b3074..a6a8a578a37 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -277,7 +277,7 @@ import re try: - from botocore.exception import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # caught by AnsibleAWSModule From a353730e9449d28d68b75784d96cdc137b7eb995 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Tue, 15 Mar 2022 09:06:05 +0100 Subject: [PATCH 392/683] Fix documentation about force_update_password in rds_instance module (#957) Fix documentation about force_update_password in rds_instance module SUMMARY Wrong name used in docs. ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/rds_instance.py Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis --- rds_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rds_instance.py b/rds_instance.py index 742a7266c5e..4a1086f24ec 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -225,7 +225,7 @@ master_user_password: description: - An 8-41 character password for the master database user. The password can contain any printable ASCII character - except "/", """, or "@". To modify the password use I(force_password_update). Use I(apply immediately) to change + except "/", """, or "@". To modify the password use I(force_update_password). Use I(apply immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: - password From 7af46c8e548b106adeaa4c15630f193a066bd0fc Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Tue, 15 Mar 2022 11:52:43 +0000 Subject: [PATCH 393/683] Add backoff logic to elb_application_lb_info (#977) Add backoff logic to elb_application_lb_info SUMMARY From time to time rate limiting failures occur on the usage of this module, this PR adds backoff logic to the module to improve its stability. fatal: [127.0.0.1 -> 127.0.0.1]: FAILED! => changed=false boto3_version: 1.20.34 botocore_version: 1.23.34 error: code: Throttling message: Rate exceeded type: Sender msg: 'Failed to list load balancers: An error occurred (Throttling) when calling the DescribeLoadBalancers operation (reached max retries: 4): Rate exceeded' response_metadata: http_headers: content-length: '271' content-type: text/xml date: Thu, 10 Mar 2022 10:34:23 GMT x-amzn-requestid: xxxxx http_status_code: 400 max_attempts_reached: true request_id: xxxxx retry_attempts: 4 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_application_lb_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- elb_application_lb_info.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index d1de312df11..dbd4b7e0ab6 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -220,7 +220,13 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict + + +@AWSRetry.jittered_backoff() +def get_paginator(connection, **kwargs): + paginator = connection.get_paginator('describe_load_balancers') + return paginator.paginate(**kwargs).build_full_result() def get_alb_listeners(connection, module, alb_arn): @@ -274,13 +280,12 @@ def list_load_balancers(connection, module): names = module.params.get("names") try: - load_balancer_paginator = connection.get_paginator('describe_load_balancers') if not load_balancer_arns and not names: - load_balancers = load_balancer_paginator.paginate().build_full_result() + load_balancers = get_paginator(connection) if load_balancer_arns: - load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result() + load_balancers = get_paginator(connection, LoadBalancerArns=load_balancer_arns) if names: - load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result() + load_balancers = get_paginator(connection, Names=names) except is_boto3_error_code('LoadBalancerNotFound'): module.exit_json(load_balancers=[]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -324,7 +329,7 @@ def main(): ) try: - connection = module.client('elbv2') + connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') From c71ac90505b1ab905ba8451148c21d6e1db20200 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Tue, 15 Mar 2022 13:01:29 +0100 Subject: [PATCH 394/683] =?UTF-8?q?New=20module=20for=20creating=20Cloudfr?= =?UTF-8?q?ont=20header=20policies=E2=80=A6=20(#925)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New module for creating Cloudfront header policies… .. used for response headers SUMMARY New Cloudfront module for CF response headers policies, see https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/adding-response-headers.html This is still a relavily new feature, see https://aws.amazon.com/de/blogs/networking-and-content-delivery/amazon-cloudfront-introduces-response-headers-policies/ ISSUE TYPE New Module Pull Request COMPONENT NAME cloudfront_response_headers_policy.py Reviewed-by: Mark Woolley Reviewed-by: Stefan Horning Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- cloudfront_response_headers_policy.py | 291 ++++++++++++++++++++++++++ 1 file changed, 291 insertions(+) create mode 100644 cloudfront_response_headers_policy.py diff --git a/cloudfront_response_headers_policy.py b/cloudfront_response_headers_policy.py new file mode 100644 index 00000000000..813f8c657a9 --- /dev/null +++ b/cloudfront_response_headers_policy.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +version_added: 3.2.0 +module: cloudfront_response_headers_policy + +short_description: Create, update and delete response headers policies to be used in a Cloudfront distribution + +description: + - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers + - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy) + +author: Stefan Horning (@stefanhorning) + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + + +options: + state: + description: Decides if the named policy should be absent or present + choices: + - present + - absent + default: present + type: str + name: + description: Name of the policy + required: true + type: str + comment: + description: Description of the policy + required: false + type: str + cors_config: + description: CORS header config block + required: false + default: {} + type: dict + security_headers_config: + description: Security headers config block. For headers suchs as XSS-Protection, Content-Security-Policy or Strict-Transport-Security + required: false + default: {} + type: dict + custom_headers_config: + description: Custom headers config block. Define your own list of headers and values as a list + required: false + default: {} + type: dict + +''' + +EXAMPLES = ''' +- name: Creationg a Cloudfront header policy using all predefined header features and a custom header for demonstration + community.aws.cloudfront_response_headers_policy: + name: my-header-policy + comment: My header policy for all the headers + cors_config: + access_control_allow_origins: + items: + - 'https://foo.com/bar' + - 'https://bar.com/foo' + access_control_allow_headers: + items: + - 'X-Session-Id' + access_control_allow_methods: + items: + - GET + - OPTIONS + - HEAD + access_control_allow_credentials: true + access_control_expose_headers: + items: + - 'X-Session-Id' + access_control_max_age_sec: 1800 + origin_override: true + security_headers_config: + xss_protection: + protection: true + report_uri: 'https://my.report-uri.com/foo/bar' + override: true + frame_options: + frame_option: 'SAMEORIGIN' + override: true + referrer_policy: + referrer_policy: 'same-origin' + override: true + content_security_policy: + content_security_policy: "frame-ancestors 'none'; report-uri https://my.report-uri.com/r/d/csp/enforce;" + override: true + content_type_options: + override: true + strict_transport_security: + include_subdomains: true + preload: true + access_control_max_age_sec: 63072000 + override: true + custom_headers_config: + items: + - { header: 'X-Test-Header', value: 'Foo', override: true } + state: present + +- name: Delete header policy + community.aws.cloudfront_response_headers_policy: + name: my-header-policy + state: absent +''' + +RETURN = ''' +response_headers_policy: + description: The policy's information + returned: success + type: complex + contains: + id: + description: ID of the policy + returned: always + type: str + sample: '10a45b52-630e-4b7c-77c6-205f06df0462' + last_modified_time: + description: Timestamp of last modification of policy + returned: always + type: str + sample: '2022-02-04T13:23:27.304000+00:00' + response_headers_policy_config: + description: The response headers config dict containing all the headers configured + returned: always + type: complex + contains: + name: + description: Name of the policy + type: str + returned: always + sample: my-header-policy +''' + +try: + from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError +except ImportError: + pass # caught by imported AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +import datetime + + +class CloudfrontResponseHeadersPolicyService(object): + + def __init__(self, module): + self.module = module + self.client = module.client('cloudfront') + self.check_mode = module.check_mode + + def find_response_headers_policy(self, name): + try: + policies = self.client.list_response_headers_policies()['ResponseHeadersPolicyList']['Items'] + + for policy in policies: + if policy['ResponseHeadersPolicy']['ResponseHeadersPolicyConfig']['Name'] == name: + policy_id = policy['ResponseHeadersPolicy']['Id'] + # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here + matching_policy = self.client.get_response_headers_policy(Id=policy['ResponseHeadersPolicy']['Id']) + break + else: + matching_policy = None + + return matching_policy + except (ParamValidationError, ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error fetching policy information") + + def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config): + cors_config = snake_dict_to_camel_dict(cors_config, capitalize_first=True) + security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True) + + # Little helper for turning xss_protection into XSSProtection and not into XssProtection + if 'XssProtection' in security_headers_config: + security_headers_config['XSSProtection'] = security_headers_config.pop('XssProtection') + + custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True) + + config = { + 'Name': name, + 'Comment': comment, + 'CorsConfig': self.insert_quantities(cors_config), + 'SecurityHeadersConfig': security_headers_config, + 'CustomHeadersConfig': self.insert_quantities(custom_headers_config) + } + + config = {k: v for k, v in config.items() if v} + + matching_policy = self.find_response_headers_policy(name) + + changed = False + + if self.check_mode: + self.module.exit_json(changed=True, response_headers_policy=camel_dict_to_snake_dict(config)) + + if matching_policy is None: + try: + result = self.client.create_response_headers_policy(ResponseHeadersPolicyConfig=config) + changed = True + except (ParamValidationError, ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error creating policy") + else: + policy_id = matching_policy['ResponseHeadersPolicy']['Id'] + etag = matching_policy['ETag'] + try: + result = self.client.update_response_headers_policy(Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config) + + changed_time = result['ResponseHeadersPolicy']['LastModifiedTime'] + seconds = 3 # threshhold for returned timestamp age + seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds)) + + # consider change made by this execution of the module if returned timestamp was very recent + if changed_time > seconds_ago: + changed = True + except (ParamValidationError, ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Updating creating policy") + + self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) + + def delete_response_header_policy(self, name): + matching_policy = self.find_response_headers_policy(name) + + if matching_policy is None: + self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting") + else: + policy_id = matching_policy['ResponseHeadersPolicy']['Id'] + etag = matching_policy['ETag'] + if self.check_mode: + result = {} + else: + try: + result = self.client.delete_response_headers_policy(Id=policy_id, IfMatch=etag) + except (ParamValidationError, ClientError, BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error deleting policy") + + self.module.exit_json(changed=True, **camel_dict_to_snake_dict(result)) + + # Inserts a Quantity field into dicts with a list ('Items') + @staticmethod + def insert_quantities(dict_with_items): + # Items on top level case + if 'Items' in dict_with_items and isinstance(dict_with_items['Items'], list): + dict_with_items['Quantity'] = len(dict_with_items['Items']) + + # Items on second level case + for k, v in dict_with_items.items(): + if isinstance(v, dict) and 'Items' in v: + v['Quantity'] = len(v['Items']) + + return dict_with_items + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + comment=dict(type='str'), + cors_config=dict(type='dict', default=dict()), + security_headers_config=dict(type='dict', default=dict()), + custom_headers_config=dict(type='dict', default=dict()), + state=dict(choices=['present', 'absent'], type='str', default='present'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + name = module.params.get('name') + comment = module.params.get('comment', '') + cors_config = module.params.get('cors_config') + security_headers_config = module.params.get('security_headers_config') + custom_headers_config = module.params.get('custom_headers_config') + state = module.params.get('state') + + service = CloudfrontResponseHeadersPolicyService(module) + + if state == 'absent': + service.delete_response_header_policy(name) + else: + service.create_response_header_policy(name, comment, cors_config, security_headers_config, custom_headers_config) + + +if __name__ == '__main__': + main() From b3007c4366501a3dbc1c9db47241acbd2e0876af Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 15 Mar 2022 15:59:58 +0100 Subject: [PATCH 395/683] New Modules: AWS Network Firewall - rule groups (#944) New Modules: AWS Network Firewall - rule groups Sorta-Depends-On: #974 SUMMARY Two new modules for AWS Network Firewall rule groups. This first iteration will only support stateful rule groups. networkfirewall_rule_group.py networkfirewall_rule_group_info.py ToDo: Initial modules Return Value documentation Integration Tests CI Permissions ISSUE TYPE New Module Pull Request COMPONENT NAME plugins/module_utils/networkfirewall.py plugins/modules/networkfirewall_rule_group.py plugins/modules/networkfirewall_rule_group_info.py ADDITIONAL INFORMATION Note: It's a deliberate choice not to support creation of stateless rules initially. I want to get some of the initial framework in place so that the Policy and Firewall pieces can be built out while waiting on reviews. Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- networkfirewall_rule_group.py | 818 +++++++++++++++++++++++++++++ networkfirewall_rule_group_info.py | 446 ++++++++++++++++ 2 files changed, 1264 insertions(+) create mode 100644 networkfirewall_rule_group.py create mode 100644 networkfirewall_rule_group_info.py diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py new file mode 100644 index 00000000000..116c5249b3f --- /dev/null +++ b/networkfirewall_rule_group.py @@ -0,0 +1,818 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: networkfirewall_rule_group +short_description: create, delete and modify AWS Network Firewall rule groups +version_added: 4.0.0 +description: + - A module for managing AWS Network Firewall rule groups. + - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/index.html) + - Currently only supports C(stateful) firewall groups. +options: + arn: + description: + - The ARN of the Network Firewall rule group. + - Exactly one of I(arn) and I(name) must be provided. + required: false + type: str + name: + description: + - The name of the Network Firewall rule group. + - When I(name) is set, I(rule_type) must also be set. + required: false + type: str + rule_type: + description: + - Indicates whether the rule group is stateless or stateful. + - Stateless rulesets are currently not supported. + - Required if I(name) is set. + required: false + aliases: ['type' ] + choices: ['stateful'] +# choices: ['stateful', 'stateless'] + type: str + state: + description: + - Create or remove the Network Firewall rule group. + required: false + choices: ['present', 'absent'] + default: 'present' + type: str + capacity: + description: + - The maximum operating resources that this rule group can use. + - Once a rule group is created this parameter is immutable. + - See also the AWS documentation about how capacityis calculated + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/nwfw-rule-group-capacity.html) + - This option is mandatory when creating a new rule group. + type: int + required: false + rule_order: + description: + - Indicates how to manage the order of the rule evaluation for the rule group. + - Once a rule group is created this parameter is immutable. + - Mutually exclusive with I(rule_type=stateless). + - For more information on how rules are evaluated read the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). + - I(rule_order) requires botocore>=1.23.23. + type: str + required: false + choices: ['default', 'strict'] + aliases: ['stateful_rule_order'] + description: + description: + - A description of the AWS Network Firewall rule group. + type: str + ip_variables: + description: + - A dictionary mapping variable names to a list of IP addresses and address ranges, in CIDR notation. + - For example C({EXAMPLE_HOSTS:["192.0.2.0/24", "203.0.113.42"]}). + - Mutually exclusive with I(domain_list). + type: dict + required: false + aliases: ['ip_set_variables'] + purge_ip_variables: + description: + - Whether to purge variable names not mentioned in the I(ip_variables) + dictionary. + - To remove all IP Set Variables it is necessary to explicitly set I(ip_variables={}) + and I(purge_port_variables=true). + type: bool + default: true + required: false + aliases: ['purge_ip_set_variables'] + port_variables: + description: + - A dictionary mapping variable names to a list of ports. + - For example C({SECURE_PORTS:["22", "443"]}). + type: dict + required: false + aliases: ['port_set_variables'] + purge_port_variables: + description: + - Whether to purge variable names not mentioned in the I(port_variables) + dictionary. + - To remove all Port Set Variables it is necessary to explicitly set I(port_variables={}) + and I(purge_port_variables=true). + type: bool + required: false + default: true + aliases: ['purge_port_set_variables'] + rule_strings: + description: + - Rules in Suricata format. + - If I(rule_strings) is specified, it must include at least one entry. + - For more information read the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-limitations-caveats.html) + and the Suricata documentation + U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html). + - Mutually exclusive with I(rule_type=stateless). + - Mutually exclusive with I(domain_list) and I(rule_list). + - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be + specified at creation time. + type: list + elements: str + required: false + domain_list: + description: + - Inspection criteria for a domain list rule group. + - When set overwrites all Domain List settings with the new configuration. + - For more information about domain name based filtering + read the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-rule-groups-domain-names.html). + - Mutually exclusive with I(rule_type=stateless). + - Mutually exclusive with I(ip_variables), I(rule_list) and I(rule_strings). + - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be + specified at creation time. + type: dict + required: false + suboptions: + domain_names: + description: + - A list of domain names to look for in the traffic flow. + type: list + elements: str + required: true + filter_http: + description: + - Whether HTTP traffic should be inspected (uses the host header). + type: bool + required: false + default: false + filter_https: + description: + - Whether HTTPS traffic should be inspected (uses the SNI). + type: bool + required: false + default: false + action: + description: + - Action to perform on traffic that matches the rule match settings. + type: str + required: true + choices: ['allow', 'deny'] + source_ips: + description: + - Used to expand the local network definition beyond the CIDR range + of the VPC where you deploy Network Firewall. + type: list + elements: str + required: false + rule_list: + description: + - Inspection criteria to be used for a 5-tuple based rule group. + - When set overwrites all existing 5-tuple rules with the new configuration. + - Mutually exclusive with I(domain_list) and I(rule_strings). + - Mutually exclusive with I(rule_type=stateless). + - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be + specified at creation time. + - For more information about valid values see the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_StatefulRule.html) + and + U(https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_Header.html). + - 'Note: Idempotency when comparing AWS Web UI and Ansiible managed rules can not be guaranteed' + type: list + elements: dict + required: false + aliases: ['stateful_rule_list'] + suboptions: + action: + description: + - What Network Firewall should do with the packets in a traffic flow when the flow matches. + type: str + required: true + choices: ['pass', 'drop', 'alert'] + protocol: + description: + - The protocol to inspect for. To specify all, you can use C(IP), because all traffic on AWS is C(IP). + type: str + required: true + source: + description: + - The source IP address or address range to inspect for, in CIDR notation. + - To match with any address, specify C(ANY). + type: str + required: true + source_port: + description: + - The source port to inspect for. + - To match with any port, specify C(ANY). + type: str + required: true + direction: + description: + - The direction of traffic flow to inspect. + - If set to C(any), the inspection matches both traffic going from the + I(source) to the I(destination) and from the I(destination) to the + I(source). + - If set to C(forward), the inspection only matches traffic going from the + I(source) to the I(destination). + type: str + required: false + default: 'forward' + choices: ['forward', 'any'] + destination: + description: + - The destination IP address or address range to inspect for, in CIDR notation. + - To match with any address, specify C(ANY). + type: str + required: true + destination_port: + description: + - The source port to inspect for. + - To match with any port, specify C(ANY). + type: str + required: true + sid: + description: + - The signature ID of the rule. + - A unique I(sid) must be passed for all rules. + type: int + required: true + rule_options: + description: + - Additional options for the rule. + - 5-tuple based rules are converted by AWS into Suricata rules, for more + complex options requirements where order matters consider using I(rule_strings). + - A dictionary mapping Suricata RuleOptions names to a list of values. + - The examples section contains some examples of using rule_options. + - For more information read the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-limitations-caveats.html) + and the Suricata documentation + U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html). + type: dict + required: false + tags: + description: + - A dictionary representing the tags associated with the rule group. + - 'For example C({"Example Tag": "some example value"})' + - Unless I(purge_tags=False) all other tags will be removed from the rule + group. + type: dict + required: false + purge_tags: + description: + - If I(purge_tags=true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + type: bool + required: false + default: True + +author: Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' +# Create a rule group +- name: Create a minimal AWS Network Firewall Rule Group + community.aws.networkfirewall_rule_group: + name: 'MinimalGroup' + type: 'stateful' + capacity: 200 + rule_strings: + - 'pass tcp any any -> any any (sid:1000001;)' + +# Create an example rule group using rule_list +- name: Create 5-tuple Rule List based rule group + community.aws.networkfirewall_rule_group: + name: 'ExampleGroup' + type: 'stateful' + description: 'My description' + rule_order: default + capacity: 100 + rule_list: + - sid: 1 + direction: forward + action: pass + protocol: IP + source: any + source_port: any + destination: any + destination_port: any + +# Create an example rule group using rule_list +- name: Create 5-tuple Rule List based rule group + community.aws.networkfirewall_rule_group: + name: 'ExampleGroup' + type: 'stateful' + description: 'My description' + ip_variables: + SOURCE_IPS: ['203.0.113.0/24', '198.51.100.42'] + DESTINATION_IPS: ['192.0.2.0/24', '198.51.100.48'] + port_variables: + HTTP_PORTS: [80, 8080] + rule_order: default + capacity: 100 + rule_list: + # Allow 'Destination Unreachable' traffic + - sid: 1 + action: pass + protocol: icmp + source: any + source_port: any + destination: any + destination_port: any + rule_options: + itype: 3 + - sid: 2 + action: drop + protocol: tcp + source: "$SOURCE_IPS" + source_port: any + destination: "$DESTINATION_IPS" + destination_port: "$HTTP_PORTS" + rule_options: + urilen: ["20<>40"] + # Where only a keyword is needed, add the keword, but no value + http_uri: + # Settings where Suricata expects raw strings (like the content + # keyword) will need to have the double-quotes explicitly escaped and + # passed because there's no practical way to distinguish between them + # and flags. + content: '"index.php"' + +# Create an example rule group using Suricata rule strings +- name: Create Suricata rule string based rule group + community.aws.networkfirewall_rule_group: + name: 'ExampleSuricata' + type: 'stateful' + description: 'My description' + capacity: 200 + ip_variables: + EXAMPLE_IP: ['203.0.113.0/24', '198.51.100.42'] + ANOTHER_EXAMPLE: ['192.0.2.0/24', '198.51.100.48'] + port_variables: + EXAMPLE_PORT: [443, 22] + rule_strings: + - 'pass tcp any any -> $EXAMPLE_IP $EXAMPLE_PORT (sid:1000001;)' + - 'pass udp any any -> $ANOTHER_EXAMPLE any (sid:1000002;)' + +# Create an example Domain List based rule group +- name: Create Domain List based rule group + community.aws.networkfirewall_rule_group: + name: 'ExampleDomainList' + type: 'stateful' + description: 'My description' + capacity: 100 + domain_list: + domain_names: + - 'example.com' + - '.example.net' + filter_https: True + filter_http: True + action: allow + source_ips: '192.0.2.0/24' + +# Update the description of a rule group +- name: Update the description of a rule group + community.aws.networkfirewall_rule_group: + name: 'MinimalGroup' + type: 'stateful' + description: 'Another description' + +# Update IP Variables for a rule group +- name: Update IP Variables + community.aws.networkfirewall_rule_group: + name: 'ExampleGroup' + type: 'stateful' + ip_variables: + EXAMPLE_IP: ['192.0.2.0/24', '203.0.113.0/24', '198.51.100.42'] + purge_ip_variables: false + +# Delete a rule group +- name: Delete a rule group + community.aws.networkfirewall_rule_group: + name: 'MinimalGroup' + type: 'stateful' + state: absent + +''' + +RETURN = ''' +rule_group: + description: Details of the rules in the rule group + type: dict + returned: success + contains: + rule_variables: + description: Settings that are available for use in the rules in the rule group. + returned: When rule variables are attached to the rule group. + type: complex + contains: + ip_sets: + description: A dictionary mapping variable names to IP addresses in CIDR format. + returned: success + type: dict + example: ['192.0.2.0/24'] + port_sets: + description: A dictionary mapping variable names to ports + returned: success + type: dict + example: ['42'] + stateful_rule_options: + description: Additional options governing how Network Firewall handles stateful rules. + returned: When the rule group is either "rules string" or "rules list" based. + type: dict + contains: + rule_order: + description: The order in which rules will be evaluated. + returned: success + type: str + example: 'DEFAULT_ACTION_ORDER' + rules_source: + description: Inspection criteria used for a 5-tuple based rule group. + returned: success + type: dict + contains: + stateful_rules: + description: A list of dictionaries describing the rules that the rule group is comprised of. + returned: When the rule group is "rules list" based. + type: list + elements: dict + contains: + action: + description: What action to perform when a flow matches the rule criteria. + returned: success + type: str + example: 'PASS' + header: + description: A description of the criteria used for the rule. + returned: success + type: dict + contains: + protocol: + description: The protocol to inspect for. + returned: success + type: str + example: 'IP' + source: + description: The source address or range of addresses to inspect for. + returned: success + type: str + example: '203.0.113.98' + source_port: + description: The source port to inspect for. + returned: success + type: str + example: '42' + destination: + description: The destination address or range of addresses to inspect for. + returned: success + type: str + example: '198.51.100.0/24' + destination_port: + description: The destination port to inspect for. + returned: success + type: str + example: '6666:6667' + direction: + description: The direction of traffic flow to inspect. + returned: success + type: str + example: 'FORWARD' + rule_options: + description: Additional Suricata RuleOptions settings for the rule. + returned: success + type: list + elements: dict + contains: + keyword: + description: The keyword for the setting. + returned: success + type: str + example: 'sid:1' + settings: + description: A list of values passed to the setting. + returned: When values are available + type: list + elements: str + rules_string: + description: A string describing the rules that the rule group is comprised of. + returned: When the rule group is "rules string" based. + type: str + rules_source_list: + description: A description of the criteria for a domain list rule group. + returned: When the rule group is "domain list" based. + type: dict + contains: + targets: + description: A list of domain names to be inspected for. + returned: success + type: list + elements: str + example: ['abc.example.com', '.example.net'] + target_types: + description: The protocols to be inspected by the rule group. + returned: success + type: list + elements: str + example: ['TLS_SNI', 'HTTP_HOST'] + generated_rules_type: + description: Whether the rule group allows or denies access to the domains in the list. + returned: success + type: str + example: 'ALLOWLIST' + stateless_rules_and_custom_actions: + description: A description of the criteria for a stateless rule group. + returned: When the rule group is a stateless rule group. + type: dict + contains: + stateless_rules: + description: A list of stateless rules for use in a stateless rule group. + type: list + elements: dict + contains: + rule_definition: + description: Describes the stateless 5-tuple inspection criteria and actions for the rule. + returned: success + type: dict + contains: + match_attributes: + description: Describes the stateless 5-tuple inspection criteria for the rule. + returned: success + type: dict + contains: + sources: + description: The source IP addresses and address ranges to inspect for. + returned: success + type: list + elements: dict + contains: + address_definition: + description: An IP address or a block of IP addresses in CIDR notation. + returned: success + type: str + example: '192.0.2.3' + destinations: + description: The destination IP addresses and address ranges to inspect for. + returned: success + type: list + elements: dict + contains: + address_definition: + description: An IP address or a block of IP addresses in CIDR notation. + returned: success + type: str + example: '192.0.2.3' + source_ports: + description: The source port ranges to inspect for. + returned: success + type: list + elements: dict + contains: + from_port: + description: The lower limit of the port range. + returned: success + type: int + to_port: + description: The upper limit of the port range. + returned: success + type: int + destination_ports: + description: The destination port ranges to inspect for. + returned: success + type: list + elements: dict + contains: + from_port: + description: The lower limit of the port range. + returned: success + type: int + to_port: + description: The upper limit of the port range. + returned: success + type: int + protocols: + description: The IANA protocol numbers of the protocols to inspect for. + returned: success + type: list + elements: int + example: [6] + tcp_flags: + description: The TCP flags and masks to inspect for. + returned: success + type: list + elements: dict + contains: + flags: + description: Used with masks to define the TCP flags that flows are inspected for. + returned: success + type: list + elements: str + masks: + description: The set of flags considered during inspection. + returned: success + type: list + elements: str + actions: + description: The actions to take when a flow matches the rule. + returned: success + type: list + elements: str + example: ['aws:pass', 'CustomActionName'] + priority: + description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. + returned: success + type: int + custom_actions: + description: A list of individual custom action definitions that are available for use in stateless rules. + type: list + elements: dict + contains: + action_name: + description: The name for the custom action. + returned: success + type: str + action_definition: + description: The custom action associated with the action name. + returned: success + type: dict + contains: + publish_metric_action: + description: The description of an action which publishes to CloudWatch. + returned: When the action publishes to CloudWatch. + type: dict + contains: + dimensions: + description: The value to use in an Amazon CloudWatch custom metric dimension. + returned: success + type: list + elements: dict + contains: + value: + description: The value to use in the custom metric dimension. + returned: success + type: str +rule_group_metadata: + description: Details of the rules in the rule group + type: dict + returned: success + contains: + capacity: + description: The maximum operating resources that this rule group can use. + type: int + returned: success + consumed_capacity: + description: The number of capacity units currently consumed by the rule group rules. + type: int + returned: success + description: + description: A description of the rule group. + type: str + returned: success + number_of_associations: + description: The number of firewall policies that use this rule group. + type: int + returned: success + rule_group_arn: + description: The ARN for the rule group + type: int + returned: success + example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup' + rule_group_id: + description: A unique identifier for the rule group. + type: int + returned: success + example: '12345678-abcd-1234-abcd-123456789abc' + rule_group_name: + description: The name of the rule group. + type: str + returned: success + rule_group_status: + description: The current status of a rule group. + type: str + returned: success + example: 'DELETING' + tags: + description: A dictionary representing the tags associated with the rule group. + type: dict + returned: success + type: + description: Whether the rule group is stateless or stateful. + type: str + returned: success + example: 'STATEFUL' +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager + + +def main(): + + domain_list_spec = dict( + domain_names=dict(type='list', elements='str', required=True), + filter_http=dict(type='bool', required=False, default=False), + filter_https=dict(type='bool', required=False, default=False), + action=dict(type='str', required=True, choices=['allow', 'deny']), + source_ips=dict(type='list', elements='str', required=False), + ) + + rule_list_spec = dict( + action=dict(type='str', required=True, choices=['pass', 'drop', 'alert']), + protocol=dict(type='str', required=True), + source=dict(type='str', required=True), + source_port=dict(type='str', required=True), + direction=dict(type='str', required=False, default='forward', choices=['forward', 'any']), + destination=dict(type='str', required=True), + destination_port=dict(type='str', required=True), + sid=dict(type='int', required=True), + rule_options=dict(type='dict', required=False), + ) + + argument_spec = dict( + arn=dict(type='str', required=False), + name=dict(type='str', required=False), + rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateful']), + # rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']), + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), + capacity=dict(type='int', required=False), + rule_order=dict(type='str', required=False, aliases=['stateful_rule_order'], choices=['default', 'strict']), + description=dict(type='str', required=False), + ip_variables=dict(type='dict', required=False, aliases=['ip_set_variables']), + purge_ip_variables=dict(type='bool', required=False, aliases=['purge_ip_set_variables'], default=True), + port_variables=dict(type='dict', required=False, aliases=['port_set_variables']), + purge_port_variables=dict(type='bool', required=False, aliases=['purge_port_set_variables'], default=True), + rule_strings=dict(type='list', elements='str', required=False), + domain_list=dict(type='dict', options=domain_list_spec, required=False), + rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False), + tags=dict(type='dict', required=False), + purge_tags=dict(type='bool', required=False, default=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('name', 'arn'), + ('rule_strings', 'domain_list', 'rule_list'), + ('domain_list', 'ip_variables'), + ], + required_together=[ + ('name', 'rule_type'), + ], + required_one_of=[ + ('name', 'arn'), + ], + ) + + module.require_botocore_at_least('1.19.20') + + state = module.params.get('state') + name = module.params.get('name') + arn = module.params.get('arn') + rule_type = module.params.get('rule_type') + + if rule_type == 'stateless': + if module.params.get('rule_order'): + module.fail_json('rule_order can not be set for stateless rule groups') + if module.params.get('rule_strings'): + module.fail_json('rule_strings can only be used for stateful rule groups') + if module.params.get('rule_list'): + module.fail_json('rule_list can only be used for stateful rule groups') + if module.params.get('domain_list'): + module.fail_json('domain_list can only be used for stateful rule groups') + + if module.params.get('rule_order'): + module.require_botocore_at_least('1.23.23', reason='to set the rule order') + + manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type) + + if state == 'absent': + manager.delete() + else: + manager.set_description(module.params.get('description')) + manager.set_capacity(module.params.get('capacity')) + manager.set_rule_order(module.params.get('rule_order')) + manager.set_ip_variables(module.params.get('ip_variables'), module.params.get('purge_ip_variables')) + manager.set_port_variables(module.params.get('port_variables'), module.params.get('purge_port_variables')) + manager.set_rule_string(module.params.get('rule_strings')) + manager.set_domain_list(module.params.get('domain_list')) + manager.set_rule_list(module.params.get('rule_list')) + manager.set_tags(module.params.get('tags'), module.params.get('purge_tags')) + + manager.flush_changes() + + results = dict( + changed=manager.changed, + rule_group=manager.updated_resource, + ) + if manager.changed: + diff = dict( + before=manager.original_resource, + after=manager.updated_resource, + ) + results['diff'] = diff + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py new file mode 100644 index 00000000000..ae9f43bd28b --- /dev/null +++ b/networkfirewall_rule_group_info.py @@ -0,0 +1,446 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: networkfirewall_rule_group_info +short_description: describe AWS Network Firewall rule groups +version_added: 4.0.0 +description: + - A module for describing AWS Network Firewall rule groups. +options: + arn: + description: + - The ARN of the Network Firewall rule group. + - At time of writing AWS does not support describing Managed Rules. + required: false + type: str + name: + description: + - The name of the Network Firewall rule group. + required: false + type: str + rule_type: + description: + - Indicates whether the rule group is stateless or stateful. + - Required if I(name) is provided. + required: false + aliases: ['type' ] + choices: ['stateful', 'stateless'] + type: str + scope: + description: + - The scope of the request. + - When I(scope='account') returns a description of all rule groups in the account. + - When I(scope='managed') returns a list of available managed rule group arns. + - By default searches only at the account scope. + - I(scope='managed') requires botocore>=1.23.23. + required: false + choices: ['managed', 'account'] + type: str + +author: Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' + +# Describe all Rule Groups in an account (excludes managed groups) +- community.aws.networkfirewall_rule_group_info: {} + +# List the available Managed Rule groups (AWS doesn't support describing the +# groups) +- community.aws.networkfirewall_rule_group_info: + scope: managed + +# Describe a Rule Group by ARN +- community.aws.networkfirewall_rule_group_info: + arn: arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleRuleGroup + +# Describe a Rule Group by name +- community.aws.networkfirewall_rule_group_info: + name: ExampleRuleGroup + type: stateful + +''' + +RETURN = ''' +rule_list: + description: A list of ARNs of the matching rule groups. + type: list + elements: str + returned: When a rule name isn't specified + +rule_groups: + description: The details of the rule groups + returned: success + type: list + elements: dict + contains: + rule_group: + description: Details of the rules in the rule group + type: dict + returned: success + contains: + rule_variables: + description: Settings that are available for use in the rules in the rule group. + returned: When rule variables are attached to the rule group. + type: complex + contains: + ip_sets: + description: A dictionary mapping variable names to IP addresses in CIDR format. + returned: success + type: dict + example: ['192.0.2.0/24'] + port_sets: + description: A dictionary mapping variable names to ports + returned: success + type: dict + example: ['42'] + stateful_rule_options: + description: Additional options governing how Network Firewall handles stateful rules. + returned: When the rule group is either "rules string" or "rules list" based. + type: dict + contains: + rule_order: + description: The order in which rules will be evaluated. + returned: success + type: str + example: 'DEFAULT_ACTION_ORDER' + rules_source: + description: DEFAULT_ACTION_ORDER + returned: success + type: dict + contains: + stateful_rules: + description: A list of dictionaries describing the rules that the rule group is comprised of. + returned: When the rule group is "rules list" based. + type: list + elements: dict + contains: + action: + description: What action to perform when a flow matches the rule criteria. + returned: success + type: str + example: 'PASS' + header: + description: A description of the criteria used for the rule. + returned: success + type: dict + contains: + protocol: + description: The protocol to inspect for. + returned: success + type: str + example: 'IP' + source: + description: The source address or range of addresses to inspect for. + returned: success + type: str + example: '203.0.113.98' + source_port: + description: The source port to inspect for. + returned: success + type: str + example: '42' + destination: + description: The destination address or range of addresses to inspect for. + returned: success + type: str + example: '198.51.100.0/24' + destination_port: + description: The destination port to inspect for. + returned: success + type: str + example: '6666:6667' + direction: + description: The direction of traffic flow to inspect. + returned: success + type: str + example: 'FORWARD' + rule_options: + description: Additional Suricata RuleOptions settings for the rule. + returned: success + type: list + elements: dict + contains: + keyword: + description: The keyword for the setting. + returned: success + type: str + example: 'sid:1' + settings: + description: A list of values passed to the setting. + returned: When values are available + type: list + elements: str + rules_string: + description: A string describing the rules that the rule group is comprised of. + returned: When the rule group is "rules string" based. + type: str + rules_source_list: + description: A description of the criteria for a domain list rule group. + returned: When the rule group is "domain list" based. + type: dict + contains: + targets: + description: A list of domain names to be inspected for. + returned: success + type: list + elements: str + example: ['abc.example.com', '.example.net'] + target_types: + description: The protocols to be inspected by the rule group. + returned: success + type: list + elements: str + example: ['TLS_SNI', 'HTTP_HOST'] + generated_rules_type: + description: Whether the rule group allows or denies access to the domains in the list. + returned: success + type: str + example: 'ALLOWLIST' + stateless_rules_and_custom_actions: + description: A description of the criteria for a stateless rule group. + returned: When the rule group is a stateless rule group. + type: dict + contains: + stateless_rules: + description: A list of stateless rules for use in a stateless rule group. + type: list + elements: dict + contains: + rule_definition: + description: Describes the stateless 5-tuple inspection criteria and actions for the rule. + returned: success + type: dict + contains: + match_attributes: + description: Describes the stateless 5-tuple inspection criteria for the rule. + returned: success + type: dict + contains: + sources: + description: The source IP addresses and address ranges to inspect for. + returned: success + type: list + elements: dict + contains: + address_definition: + description: An IP address or a block of IP addresses in CIDR notation. + returned: success + type: str + example: '192.0.2.3' + destinations: + description: The destination IP addresses and address ranges to inspect for. + returned: success + type: list + elements: dict + contains: + address_definition: + description: An IP address or a block of IP addresses in CIDR notation. + returned: success + type: str + example: '192.0.2.3' + source_ports: + description: The source port ranges to inspect for. + returned: success + type: list + elements: dict + contains: + from_port: + description: The lower limit of the port range. + returned: success + type: int + to_port: + description: The upper limit of the port range. + returned: success + type: int + destination_ports: + description: The destination port ranges to inspect for. + returned: success + type: list + elements: dict + contains: + from_port: + description: The lower limit of the port range. + returned: success + type: int + to_port: + description: The upper limit of the port range. + returned: success + type: int + protocols: + description: The IANA protocol numbers of the protocols to inspect for. + returned: success + type: list + elements: int + example: [6] + tcp_flags: + description: The TCP flags and masks to inspect for. + returned: success + type: list + elements: dict + contains: + flags: + description: Used with masks to define the TCP flags that flows are inspected for. + returned: success + type: list + elements: str + masks: + description: The set of flags considered during inspection. + returned: success + type: list + elements: str + actions: + description: The actions to take when a flow matches the rule. + returned: success + type: list + elements: str + example: ['aws:pass', 'CustomActionName'] + priority: + description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. + returned: success + type: int + custom_actions: + description: A list of individual custom action definitions that are available for use in stateless rules. + type: list + elements: dict + contains: + action_name: + description: The name for the custom action. + returned: success + type: str + action_definition: + description: The custom action associated with the action name. + returned: success + type: dict + contains: + publish_metric_action: + description: The description of an action which publishes to CloudWatch. + returned: When the action publishes to CloudWatch. + type: dict + contains: + dimensions: + description: The value to use in an Amazon CloudWatch custom metric dimension. + returned: success + type: list + elements: dict + contains: + value: + description: The value to use in the custom metric dimension. + returned: success + type: str + rule_group_metadata: + description: Details of the rules in the rule group + type: dict + returned: success + contains: + capacity: + description: The maximum operating resources that this rule group can use. + type: int + returned: success + consumed_capacity: + description: The number of capacity units currently consumed by the rule group rules. + type: int + returned: success + description: + description: A description of the rule group. + type: str + returned: success + number_of_associations: + description: The number of firewall policies that use this rule group. + type: int + returned: success + rule_group_arn: + description: The ARN for the rule group + type: int + returned: success + example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup' + rule_group_id: + description: A unique identifier for the rule group. + type: int + returned: success + example: '12345678-abcd-1234-abcd-123456789abc' + rule_group_name: + description: The name of the rule group. + type: str + returned: success + rule_group_status: + description: The current status of a rule group. + type: str + returned: success + example: 'DELETING' + tags: + description: A dcitionary representing the tags associated with the rule group. + type: dict + returned: success + type: + description: Whether the rule group is stateless or stateful. + type: str + returned: success + example: 'STATEFUL' +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=False), + rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateless', 'stateful']), + arn=dict(type='str', required=False), + scope=dict(type='str', required=False, choices=['managed', 'account']), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('arn', 'name',), + ('arn', 'rule_type'), + ], + required_together=[ + ('name', 'rule_type'), + ] + ) + + module.require_botocore_at_least('1.19.20') + + arn = module.params.get('arn') + name = module.params.get('name') + rule_type = module.params.get('rule_type') + scope = module.params.get('scope') + + if module.params.get('scope') == 'managed': + module.require_botocore_at_least('1.23.23', reason='to list managed rules') + + manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type) + + results = dict(changed=False) + + if name or arn: + rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn) + rules = [rule] + results['rule_groups'] = rules + else: + rule_list = manager.list(scope=scope) + results['rule_list'] = rule_list + if scope != 'managed': + rules = [manager.get_rule_group(arn=r) for r in rule_list] + results['rule_groups'] = rules + + module.exit_json(**results) + + +if __name__ == '__main__': + main() From 76ae5ce0c430794469712c0877a722d3acf44a99 Mon Sep 17 00:00:00 2001 From: Geoffrey Hichborn <166528+phene@users.noreply.github.com> Date: Wed, 16 Mar 2022 01:59:29 -0700 Subject: [PATCH 396/683] IAM Role Removal Does Not Require Removal of Permission Boundary (#961) IAM Role Removal Does Not Require Removal of Permission Boundary SUMMARY Removes unnecessary removal of permission boundary from a role when deleting a role. Unlike inline policies, permission boundaries do not need to be removed from an IAM role before deleting the IAM role. This behavior causes issues when a permission boundary is inherited that prevents removal of the permission boundary. Fixes #959 ISSUE TYPE Bugfix Pull Request COMPONENT NAME iam_role Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell --- iam_role.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/iam_role.py b/iam_role.py index 7ca0d8c4fbb..15683e0e060 100644 --- a/iam_role.py +++ b/iam_role.py @@ -571,10 +571,8 @@ def destroy_role(): # Before we try to delete the role we need to remove any # - attached instance profiles # - attached managed policies - # - permissions boundary remove_instance_profiles(role_params, role) update_managed_policies(role_params, role, [], True) - update_role_permissions_boundary(boundary_params, role) try: if not module.check_mode: From f19831967c28cbe978b174a84ff0f94dfbd57bfc Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Thu, 17 Mar 2022 16:20:05 +0000 Subject: [PATCH 397/683] Add backoff logic to elb_target_group_info (#1001) Add backoff logic to elb_target_group_info SUMMARY From time to time rate limiting failures occur on the usage of this module, this PR adds backoff logic to the module to improve its stability. An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (Throttling) when calling the DescribeTargetGroups operation (reached max retries: 4): Rate exceeded fatal: [10_184_0_132 -> 127.0.0.1]: FAILED! => changed=false boto3_version: 1.20.34 botocore_version: 1.23.34 error: code: Throttling message: Rate exceeded type: Sender msg: 'Failed to list target groups: An error occurred (Throttling) when calling the DescribeTargetGroups operation (reached max retries: 4): Rate exceeded' response_metadata: http_headers: content-length: '271' content-type: text/xml date: Wed, 16 Mar 2022 09:50:24 GMT x-amzn-requestid: xxxxx http_status_code: 400 max_attempts_reached: true request_id: xxxxx retry_attempts: 4 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_target_group_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- elb_target_group_info.py | 43 +++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 5abe8d34210..1237a594e6b 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -214,13 +214,19 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict -def get_target_group_attributes(connection, module, target_group_arn): +@AWSRetry.jittered_backoff() +def get_paginator(client, **kwargs): + paginator = client.get_paginator('describe_target_groups') + return paginator.paginate(**kwargs).build_full_result() + + +def get_target_group_attributes(target_group_arn): try: - target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) + target_group_attributes = boto3_tag_list_to_ansible_dict(client.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe target group attributes") @@ -229,23 +235,23 @@ def get_target_group_attributes(connection, module, target_group_arn): for (k, v) in target_group_attributes.items()) -def get_target_group_tags(connection, module, target_group_arn): +def get_target_group_tags(target_group_arn): try: - return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) + return boto3_tag_list_to_ansible_dict(client.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe group tags") -def get_target_group_targets_health(connection, module, target_group_arn): +def get_target_group_targets_health(target_group_arn): try: - return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] + return client.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get target health") -def list_target_groups(connection, module): +def list_target_groups(): load_balancer_arn = module.params.get("load_balancer_arn") target_group_arns = module.params.get("target_group_arns") @@ -253,15 +259,14 @@ def list_target_groups(connection, module): collect_targets_health = module.params.get("collect_targets_health") try: - target_group_paginator = connection.get_paginator('describe_target_groups') if not load_balancer_arn and not target_group_arns and not names: - target_groups = target_group_paginator.paginate().build_full_result() + target_groups = get_paginator() if load_balancer_arn: - target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result() + target_groups = get_paginator(LoadBalancerArn=load_balancer_arn) if target_group_arns: - target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result() + target_groups = get_paginator(TargetGroupArns=target_group_arns) if names: - target_groups = target_group_paginator.paginate(Names=names).build_full_result() + target_groups = get_paginator(Names=names) except is_boto3_error_code('TargetGroupNotFound'): module.exit_json(target_groups=[]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except @@ -269,22 +274,24 @@ def list_target_groups(connection, module): # Get the attributes and tags for each target group for target_group in target_groups['TargetGroups']: - target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn'])) + target_group.update(get_target_group_attributes(target_group['TargetGroupArn'])) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']] # Get tags for each target group for snaked_target_group in snaked_target_groups: - snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn']) + snaked_target_group['tags'] = get_target_group_tags(snaked_target_group['target_group_arn']) if collect_targets_health: snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict( - target) for target in get_target_group_targets_health(connection, module, snaked_target_group['target_group_arn'])] + target) for target in get_target_group_targets_health(snaked_target_group['target_group_arn'])] module.exit_json(target_groups=snaked_target_groups) def main(): + global module + global client argument_spec = dict( load_balancer_arn=dict(type='str'), @@ -300,11 +307,11 @@ def main(): ) try: - connection = module.client('elbv2') + client = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') - list_target_groups(connection, module) + list_target_groups() if __name__ == '__main__': From 588b00c328d03222bf8a48f8a48654844d5d6284 Mon Sep 17 00:00:00 2001 From: Michael Mayer Date: Fri, 18 Mar 2022 13:29:18 -0700 Subject: [PATCH 398/683] Support changing of launch type (#840) Support changing of launch type SUMMARY When changing the launch_type parameter for an ecs_taskdefition there was no change reported by the module. This adds a check to see if launch_type in the ecs_taskdefinition has changed. If there is a change detected the module reports back there is not a matching task definition and creates a new one. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_taskdefinition Reviewed-by: Alina Buzachis Reviewed-by: Michael Mayer Reviewed-by: Markus Bergholz --- ecs_taskdefinition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index f99db8b9659..c74bf44ec9d 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -923,13 +923,16 @@ def _right_has_values_of_left(left, right): return True - def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, existing_task_definition): + def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, existing_task_definition): if td['status'] != "ACTIVE": return None if requested_task_role_arn != td.get('taskRoleArn', ""): return None + if requested_launch_type is not None and requested_launch_type not in td.get('compatibilities', []): + return None + existing_volumes = td.get('volumes', []) or [] if len(requested_volumes) != len(existing_volumes): @@ -972,7 +975,8 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ requested_volumes = module.params['volumes'] or [] requested_containers = module.params['containers'] or [] requested_task_role_arn = module.params['task_role_arn'] - existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, td) + requested_launch_type = module.params['launch_type'] + existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td) if existing: break From 879a3c95e99493ddcefb2a67343cebaedeb0d758 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 24 Mar 2022 09:54:00 -0700 Subject: [PATCH 399/683] ec2_asg_instance_refresh and ec2_asg_instance_refresh_info modules (#973) ec2_asg_instance_refresh and ec2_asg_instance_refresh_info modules SUMMARY Reviving original PR that adds Autoscaling instance refresh API support as the author has yet not updated PR based on review feedback. Issue: #135 PR being revived: #795 Fixes #135 ISSUE TYPE New Module Pull Request COMPONENT NAME ec2_asg_instance_refresh ec2_asg_instance_refreshes_info ADDITIONAL INFORMATION More about the feature: https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/ Boto3 documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/autoscaling.html#AutoScaling.Client.start_instance_refresh Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Mark Woolley Reviewed-by: Jill R Reviewed-by: Joseph Torcasso --- ec2_asg_instance_refresh.py | 267 +++++++++++++++++++++++++++++++ ec2_asg_instance_refresh_info.py | 219 +++++++++++++++++++++++++ 2 files changed, 486 insertions(+) create mode 100644 ec2_asg_instance_refresh.py create mode 100644 ec2_asg_instance_refresh_info.py diff --git a/ec2_asg_instance_refresh.py b/ec2_asg_instance_refresh.py new file mode 100644 index 00000000000..faa61fa74cb --- /dev/null +++ b/ec2_asg_instance_refresh.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_asg_instance_refresh +version_added: 3.2.0 +short_description: Start or cancel an EC2 Auto Scaling Group (ASG) instance refresh in AWS +description: + - Start or cancel an EC2 Auto Scaling Group instance refresh in AWS. + - Can be used with M(community.aws.ec2_asg_instance_refresh_info) to track the subsequent progress. +author: "Dan Khersonsky (@danquixote)" +options: + state: + description: + - Desired state of the ASG. + type: str + required: true + choices: [ 'started', 'cancelled' ] + name: + description: + - The name of the auto scaling group you are searching for. + type: str + required: true + strategy: + description: + - The strategy to use for the instance refresh. The only valid value is C(Rolling). + - A rolling update is an update that is applied to all instances in an Auto Scaling group until all instances have been updated. + - A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale in. + - If the rolling update process fails, any instances that were already replaced are not rolled back to their previous configuration. + type: str + default: 'Rolling' + preferences: + description: + - Set of preferences associated with the instance refresh request. + - If not provided, the default values are used. + - For I(min_healthy_percentage), the default value is C(90). + - For I(instance_warmup), the default is to use the value specified for the health check grace period for the Auto Scaling group. + - Can not be specified when I(state) is set to 'cancelled'. + required: false + suboptions: + min_healthy_percentage: + description: + - Total percent of capacity in ASG that must remain healthy during instance refresh to allow operation to continue. + - It is rounded up to the nearest integer. + type: int + default: 90 + instance_warmup: + description: + - The number of seconds until a newly launched instance is configured and ready to use. + - During this time, Amazon EC2 Auto Scaling does not immediately move on to the next replacement. + - The default is to use the value for the health check grace period defined for the group. + type: int + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Start a refresh + community.aws.ec2_asg_instance_refresh: + name: some-asg + state: started + +- name: Cancel a refresh + community.aws.ec2_asg_instance_refresh: + name: some-asg + state: cancelled + +- name: Start a refresh and pass preferences + community.aws.ec2_asg_instance_refresh: + name: some-asg + state: started + preferences: + min_healthy_percentage: 91 + instance_warmup: 60 + +''' + +RETURN = ''' +--- +instance_refresh_id: + description: instance refresh id + returned: success + type: str + sample: "08b91cf7-8fa6-48af-b6a6-d227f40f1b9b" +auto_scaling_group_name: + description: Name of autoscaling group + returned: success + type: str + sample: "public-webapp-production-1" +status: + description: + - The current state of the group when DeleteAutoScalingGroup is in progress. + - The following are the possible statuses + - Pending -- The request was created, but the operation has not started. + - InProgress -- The operation is in progress. + - Successful -- The operation completed successfully. + - Failed -- The operation failed to complete. You can troubleshoot using the status reason and the scaling activities. + - Cancelling -- + - An ongoing operation is being cancelled. + - Cancellation does not roll back any replacements that have already been completed, + - but it prevents new replacements from being started. + - Cancelled -- The operation is cancelled. + returned: success + type: str + sample: "Pending" +start_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: str + sample: "2015-11-25T00:05:36.309Z" +end_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: str + sample: "2015-11-25T00:05:36.309Z" +percentage_complete: + description: the % of completeness + returned: success + type: int + sample: 100 +instances_to_update: + description: num. of instance to update + returned: success + type: int + sample: 5 +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + + +def start_or_cancel_instance_refresh(conn, module): + """ + Args: + conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. + module: AnsibleAWSModule object + + Returns: + { + "instance_refreshes": [ + { + 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg', + 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09', + 'instances_to_update': 1, + 'percentage_complete': 0, + "preferences": { + "instance_warmup": 60, + "min_healthy_percentage": 90, + "skip_matching": false + }, + 'start_time': '2021-02-04T03:39:40+00:00', + 'status': 'Cancelling', + 'status_reason': 'Replacing instances before cancelling.', + } + ] + } + """ + + asg_state = module.params.get('state') + asg_name = module.params.get('name') + preferences = module.params.get('preferences') + + args = {} + args['AutoScalingGroupName'] = asg_name + if asg_state == 'started': + args['Strategy'] = module.params.get('strategy') + if preferences: + if asg_state == 'cancelled': + module.fail_json(msg='can not pass preferences dict when canceling a refresh') + _prefs = scrub_none_parameters(preferences) + args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) + cmd_invocations = { + 'cancelled': conn.cancel_instance_refresh, + 'started': conn.start_instance_refresh, + } + try: + if module.check_mode: + if asg_state == 'started': + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]') + if ongoing_refresh: + module.exit_json(changed=False, msg='In check_mode - Instance Refresh is already in progress, can not start new instance refresh.') + else: + module.exit_json(changed=True, msg='Would have started instance refresh if not in check mode.') + elif asg_state == 'cancelled': + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0] + if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']: + module.exit_json(changed=False, msg='In check_mode - Instance Refresh already cancelled or is pending cancellation.') + elif not ongoing_refresh: + module.exit_json(chaned=False, msg='In check_mode - No active referesh found, nothing to cancel.') + else: + module.exit_json(changed=True, msg='Would have cancelled instance refresh if not in check mode.') + result = cmd_invocations[asg_state](aws_retry=True, **args) + instance_refreshes = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']]) + result = dict( + instance_refreshes=camel_dict_to_snake_dict(instance_refreshes['InstanceRefreshes'][0]) + ) + return module.exit_json(**result) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws( + e, + msg='Failed to {0} InstanceRefresh'.format( + asg_state.replace('ed', '') + ) + ) + + +def main(): + + argument_spec = dict( + state=dict( + type='str', + required=True, + choices=['started', 'cancelled'], + ), + name=dict(required=True), + strategy=dict( + type='str', + default='Rolling', + required=False + ), + preferences=dict( + type='dict', + required=False, + options=dict( + min_healthy_percentage=dict(type='int', default=90), + instance_warmup=dict(type='int'), + ) + ), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + autoscaling = module.client( + 'autoscaling', + retry_decorator=AWSRetry.jittered_backoff( + retries=10, + catch_extra_error_codes=['InstanceRefreshInProgress'] + ) + ) + + start_or_cancel_instance_refresh(autoscaling, module) + + +if __name__ == '__main__': + main() diff --git a/ec2_asg_instance_refresh_info.py b/ec2_asg_instance_refresh_info.py new file mode 100644 index 00000000000..d4a12380098 --- /dev/null +++ b/ec2_asg_instance_refresh_info.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_asg_instance_refresh_info +version_added: 3.2.0 +short_description: Gather information about ec2 Auto Scaling Group (ASG) Instance Refreshes in AWS +description: + - Describes one or more instance refreshes. + - You can determine the status of a request by looking at the I(status) parameter. +author: "Dan Khersonsky (@danquixote)" +options: + name: + description: + - The name of the Auto Scaling group. + type: str + required: true + ids: + description: + - One or more instance refresh IDs. + type: list + elements: str + default: [] + next_token: + description: + - The token for the next set of items to return. (You received this token from a previous call.) + type: str + max_records: + description: + - The maximum number of items to return with this call. The default value is 50 and the maximum value is 100. + type: int + required: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Find an refresh by ASG name + community.aws.ec2_asg_instance_refresh_info: + name: somename-asg + +- name: Find an refresh by ASG name and one or more refresh-IDs + community.aws.ec2_asg_instance_refresh_info: + name: somename-asg + ids: ['some-id-123'] + register: asgs + +- name: Find an refresh by ASG name and set max_records + community.aws.ec2_asg_instance_refresh_info: + name: somename-asg + max_records: 4 + register: asgs + +- name: Find an refresh by ASG name and NextToken, if received from a previous call + community.aws.ec2_asg_instance_refresh_info: + name: somename-asg + next_token: 'some-token-123' + register: asgs +''' + +RETURN = ''' +--- +instance_refresh_id: + description: instance refresh id + returned: success + type: str + sample: "08b91cf7-8fa6-48af-b6a6-d227f40f1b9b" +auto_scaling_group_name: + description: Name of autoscaling group + returned: success + type: str + sample: "public-webapp-production-1" +status: + description: + - The current state of the group when DeleteAutoScalingGroup is in progress. + - The following are the possible statuses + - Pending -- The request was created, but the operation has not started. + - InProgress -- The operation is in progress. + - Successful -- The operation completed successfully. + - Failed -- The operation failed to complete. You can troubleshoot using the status reason and the scaling activities. + - Cancelling -- + - An ongoing operation is being cancelled. + - Cancellation does not roll back any replacements that have already been completed, + - but it prevents new replacements from being started. + - Cancelled -- The operation is cancelled. + returned: success + type: str + sample: "Pending" +start_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: str + sample: "2015-11-25T00:05:36.309Z" +end_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: str + sample: "2015-11-25T00:05:36.309Z" +percentage_complete: + description: the % of completeness + returned: success + type: int + sample: 100 +instances_to_update: + description: num. of instance to update + returned: success + type: int + sample: 5 +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +def find_asg_instance_refreshes(conn, module): + """ + Args: + conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. + module: AnsibleAWSModule object + + Returns: + { + "instance_refreshes": [ + { + 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg', + 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09', + 'instances_to_update': 1, + 'percentage_complete': 0, + "preferences": { + "instance_warmup": 60, + "min_healthy_percentage": 90, + "skip_matching": false + }, + 'start_time': '2021-02-04T03:39:40+00:00', + 'status': 'Cancelled', + 'status_reason': 'Cancelled due to user request.', + } + ], + 'next_token': 'string' + } + """ + + asg_name = module.params.get('name') + asg_ids = module.params.get('ids') + asg_next_token = module.params.get('next_token') + asg_max_records = module.params.get('max_records') + + args = {} + args['AutoScalingGroupName'] = asg_name + if asg_ids: + args['InstanceRefreshIds'] = asg_ids + if asg_next_token: + args['NextToken'] = asg_next_token + if asg_max_records: + args['MaxRecords'] = asg_max_records + + try: + instance_refreshes_result = {} + response = conn.describe_instance_refreshes(**args) + if 'InstanceRefreshes' in response: + instance_refreshes_dict = dict( + instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', '')) + instance_refreshes_result = camel_dict_to_snake_dict( + instance_refreshes_dict) + + while 'NextToken' in response: + args['NextToken'] = response['NextToken'] + response = conn.describe_instance_refreshes(**args) + if 'InstanceRefreshes' in response: + instance_refreshes_dict = camel_dict_to_snake_dict(dict( + instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', ''))) + instance_refreshes_result.update(instance_refreshes_dict) + + return module.exit_json(**instance_refreshes_result) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to describe InstanceRefreshes') + + +def main(): + + argument_spec = dict( + name=dict(required=True, type='str'), + ids=dict(required=False, default=[], elements='str', type='list'), + next_token=dict(required=False, default=None, type='str', no_log=True), + max_records=dict(required=False, type='int'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + autoscaling = module.client( + 'autoscaling', + retry_decorator=AWSRetry.jittered_backoff(retries=10) + ) + find_asg_instance_refreshes(autoscaling, module) + + +if __name__ == '__main__': + main() From b6b71a175133ce52455dc9d202ed59f8c06a3f4f Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 25 Mar 2022 22:07:35 +0000 Subject: [PATCH 400/683] Fix introduced bug (#1015) Fix introduced bug in elb_target_group_info SUMMARY Fix bug introduced here: #1001 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_target_group_info ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- elb_target_group_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 1237a594e6b..c17b61ab669 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -218,7 +218,7 @@ @AWSRetry.jittered_backoff() -def get_paginator(client, **kwargs): +def get_paginator(**kwargs): paginator = client.get_paginator('describe_target_groups') return paginator.paginate(**kwargs).build_full_result() From 0575168abaf1d48182ca08ae5b890189b69659cc Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Sat, 26 Mar 2022 14:58:32 -0400 Subject: [PATCH 401/683] elb_application_lb - support alb attributes (#963) elb_application_lb - support alb attributes SUMMARY Support modifying different alb specific attributes Fixes #571 Depends-On ansible-collections/amazon.aws#696 ISSUE TYPE Feature Pull Request COMPONENT NAME elb_application_lb Reviewed-by: Jill R Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso --- elb_application_lb.py | 51 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index 448eba4c1aa..430647e7995 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -49,13 +49,38 @@ deletion_protection: description: - Indicates whether deletion protection for the ALB is enabled. - - Defaults to C(false). + - Defaults to C(False). type: bool http2: description: - Indicates whether to enable HTTP2 routing. - - Defaults to C(false). + - Defaults to C(True). type: bool + http_desync_mitigation_mode: + description: + - Determines how the load balancer handles requests that might pose a security risk to an application. + - Defaults to C('defensive') + type: str + choices: ['monitor', 'defensive', 'strictest'] + version_added: 3.2.0 + http_drop_invalid_header_fields: + description: + - Indicates whether HTTP headers with invalid header fields are removed by the load balancer C(True) or routed to targets C(False). + - Defaults to C(False). + type: bool + version_added: 3.2.0 + http_x_amzn_tls_version_and_cipher_suite: + description: + - Indicates whether the two headers are added to the client request before sending it to the target. + - Defaults to C(False). + type: bool + version_added: 3.2.0 + http_xff_client_port: + description: + - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + - Defaults to C(False). + type: bool + version_added: 3.2.0 idle_timeout: description: - The number of seconds to wait before an idle connection is closed. @@ -183,6 +208,12 @@ - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. choices: [ 'ipv4', 'dualstack' ] type: str + waf_fail_open: + description: + - Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + - Defaults to C(False). + type: bool + version_added: 3.2.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -554,6 +585,13 @@ def create_or_update_alb(alb_obj): alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') alb_obj.modify_security_groups() + # ALB attributes + if not alb_obj.compare_elb_attributes(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.update_elb_attributes() + alb_obj.modify_elb_attributes() + # Tags - only need to play with tags if tags parameter has been set to something if alb_obj.tags is not None: @@ -578,10 +616,6 @@ def create_or_update_alb(alb_obj): alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') alb_obj.create_elb() - # ALB attributes - alb_obj.update_elb_attributes() - alb_obj.modify_elb_attributes() - # Listeners listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() @@ -712,6 +746,10 @@ def main(): access_logs_s3_prefix=dict(type='str'), deletion_protection=dict(type='bool'), http2=dict(type='bool'), + http_desync_mitigation_mode=dict(type='str', choices=['monitor', 'defensive', 'strictest']), + http_drop_invalid_header_fields=dict(type='bool'), + http_x_amzn_tls_version_and_cipher_suite=dict(type='bool'), + http_xff_client_port=dict(type='bool'), idle_timeout=dict(type='int'), listeners=dict(type='list', elements='dict', @@ -732,6 +770,7 @@ def main(): scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], default='present'), tags=dict(type='dict'), + waf_fail_open=dict(type='bool'), wait_timeout=dict(type='int'), wait=dict(default=False, type='bool'), purge_rules=dict(default=True, type='bool'), From c2ae58f1626126bb3979775c36c991bf906173ab Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 28 Mar 2022 12:01:37 +0200 Subject: [PATCH 402/683] sns_topic_info new module (#673) sns_topic_info new module SUMMARY sns_topic_info - new module allowing to get all AWS SNS topics or properties of a specific AWS SNS topic. Fixes #601 Requires #879 ISSUE TYPE New Module Pull Request COMPONENT NAME sns_topic_info Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz --- sns_topic_info.py | 167 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 sns_topic_info.py diff --git a/sns_topic_info.py b/sns_topic_info.py new file mode 100644 index 00000000000..380d712820b --- /dev/null +++ b/sns_topic_info.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: sns_topic_info +short_description: sns_topic_info module +version_added: 3.2.0 +description: +- The M(community.aws.sns_topic_info) module allows to get all AWS SNS topics or properties of a specific AWS SNS topic. +author: +- "Alina Buzachis (@alinabuzachis)" +options: + topic_arn: + description: The ARN of the AWS SNS topic for which you wish to find subscriptions or list attributes. + required: false + type: str +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = r''' +- name: list all the topics + community.aws.sns_topic_info: + register: sns_topic_list + +- name: get info on specific topic + community.aws.sns_topic_info: + topic_arn: "{{ sns_arn }}" + register: sns_topic_info +''' + +RETURN = r''' +result: + description: + - The result contaning the details of one or all AWS SNS topics. + returned: success + type: list + contains: + sns_arn: + description: The ARN of the topic. + type: str + returned: always + sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name" + sns_topic: + description: Dict of sns topic details. + type: complex + returned: always + contains: + delivery_policy: + description: Delivery policy for the SNS topic. + returned: when topic is owned by this AWS account + type: str + sample: > + {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0, + "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}} + display_name: + description: Display name for SNS topic. + returned: when topic is owned by this AWS account + type: str + sample: My topic name + owner: + description: AWS account that owns the topic. + returned: when topic is owned by this AWS account + type: str + sample: '111111111111' + policy: + description: Policy for the SNS topic. + returned: when topic is owned by this AWS account + type: str + sample: > + {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"}, + "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} + subscriptions: + description: List of subscribers to the topic in this AWS account. + returned: always + type: list + sample: [] + subscriptions_added: + description: List of subscribers added in this run. + returned: always + type: list + sample: [] + subscriptions_confirmed: + description: Count of confirmed subscriptions. + returned: when topic is owned by this AWS account + type: str + sample: '0' + subscriptions_deleted: + description: Count of deleted subscriptions. + returned: when topic is owned by this AWS account + type: str + sample: '0' + subscriptions_existing: + description: List of existing subscriptions. + returned: always + type: list + sample: [] + subscriptions_new: + description: List of new subscriptions. + returned: always + type: list + sample: [] + subscriptions_pending: + description: Count of pending subscriptions. + returned: when topic is owned by this AWS account + type: str + sample: '0' + subscriptions_purge: + description: Whether or not purge_subscriptions was set. + returned: always + type: bool + sample: true + topic_arn: + description: ARN of the SNS topic (equivalent to sns_arn). + returned: when topic is owned by this AWS account + type: str + sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic + topic_type: + description: The type of topic. + type: str + sample: "standard" +''' + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.sns import list_topics +from ansible_collections.community.aws.plugins.module_utils.sns import get_info + + +def main(): + argument_spec = dict( + topic_arn=dict(type='str', required=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) + + topic_arn = module.params.get('topic_arn') + + try: + connection = module.client('sns', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + if topic_arn: + results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn)) + else: + results = list_topics(connection, module) + + module.exit_json(result=results) + + +if __name__ == '__main__': + main() From c561c7bfb09935f1aa8183aaacf13a2e30e09633 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 28 Mar 2022 11:07:52 +0100 Subject: [PATCH 403/683] Refactor s3_bucket_notifications to support SNS / SQS (#940) Refactor s3_bucket_notifications to support SNS / SQS SUMMARY Refactor s3_bucket_notifications to extend module to support the extra targets of SNS and SQS along with the currently supported Lambda functions. Summary of changes: Refactor module to support SNS/SQS targets along with current Lambda function support. Fix check mode coverage Update integration tests to more comprehensive cover functionality. Update documentation in sns_topic and sqs_queue modules to add policy setting example. Fixes: #140 ISSUE TYPE Feature Pull Request COMPONENT NAME s3_bucket_notifications ADDITIONAL INFORMATION https://boto3.amazonaws.com/v1/documentation/api/1.16.0/reference/services/s3.html#S3.Client.put_bucket_notification_configuration Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz --- s3_bucket_notification.py | 324 +++++++++++++++++++++++++++----------- sns_topic.py | 41 ++++- sqs_queue.py | 37 ++++- 3 files changed, 301 insertions(+), 101 deletions(-) diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index cb398f5ac11..ee876405b20 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -1,4 +1,6 @@ #!/usr/bin/python + +# Copyright: (c) 2021, Ansible Project # (c) 2019, XLAB d.o.o # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -11,30 +13,28 @@ --- module: s3_bucket_notification version_added: 1.0.0 -short_description: Creates, updates or deletes S3 Bucket notification for lambda +short_description: Creates, updates or deletes S3 Bucket notifications targeting Lambda functions, SNS or SQS. description: - - This module allows the management of AWS Lambda function bucket event mappings via the - Ansible framework. Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) - to manage function aliases and M(community.aws.lambda_policy) to modify lambda permissions. + - This module supports the creation, updates and deletions of S3 bucket notification profiles targeting + either Lambda functions, SNS topics or SQS queues. + - The target for the notifications must already exist. For lambdas use module M(community.aws.lambda) + to manage the lambda function itself, M(community.aws.lambda_alias) + to manage function aliases and M(community.aws.lambda_policy) to modify lambda permissions. + For SNS or SQS then use M(community.aws.sns_topic) or M(community.aws.sqs_queue). notes: - - This module heavily depends on M(community.aws.lambda_policy) as you need to allow C(lambda:InvokeFunction) - permission for your lambda function. - + - If using Lambda function as the target then a Lambda policy is also needed, use + M(community.aws.lambda_policy) to do so to allow C(lambda:InvokeFunction) for the notification. author: - - XLAB d.o.o. (@xlab-si) - - Aljaz Kosir (@aljazkosir) - - Miha Plesko (@miha-plesko) + - XLAB d.o.o. (@xlab-si) + - Aljaz Kosir (@aljazkosir) + - Miha Plesko (@miha-plesko) + - Mark Woolley (@marknet15) options: event_name: description: - Unique name for event notification on bucket. required: true type: str - lambda_function_arn: - description: - - The ARN of the lambda function. - aliases: ['function_arn'] - type: str bucket_name: description: - S3 bucket name. @@ -46,6 +46,24 @@ default: "present" choices: ["present", "absent"] type: str + queue_arn: + description: + - The ARN of the SQS queue. + - Mutually exclusive with I(topic_arn) and I(lambda_function_arn). + type: str + version_added: 3.2.0 + topic_arn: + description: + - The ARN of the SNS topic. + - Mutually exclusive with I(queue_arn) and I(lambda_function_arn). + type: str + version_added: 3.2.0 + lambda_function_arn: + description: + - The ARN of the lambda function. + - Mutually exclusive with I(queue_arn) and I(topic_arn). + aliases: ['function_arn'] + type: str lambda_alias: description: - Name of the Lambda function alias. @@ -58,7 +76,7 @@ type: int events: description: - - Events that you want to be triggering notifications. You can select multiple events to send + - Events that will be triggering a notification. You can select multiple events to send to the same destination, you can set up different events to send to different destinations, and you can set up a prefix or suffix for an event. However, for each bucket, individual events cannot have multiple configurations with overlapping prefixes or @@ -82,30 +100,59 @@ characters. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' --- -# Example that creates a lambda event notification for a bucket -- name: Process jpg image +# Examples adding notification target configs to a S3 bucket +- name: Setup bucket event notification to a Lambda function community.aws.s3_bucket_notification: state: present event_name: on_file_add_or_remove bucket_name: test-bucket - function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda + lambda_function_arn: arn:aws:lambda:us-east-2:526810320200:function:test-lambda events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] prefix: images/ suffix: .jpg + +- name: Setup bucket event notification to SQS + community.aws.s3_bucket_notification: + state: present + event_name: on_file_add_or_remove + bucket_name: test-bucket + queue_arn: arn:aws:sqs:us-east-2:526810320200:test-queue + events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] + prefix: images/ + suffix: .jpg + +# Example removing an event notification +- name: Remove event notification + community.aws.s3_bucket_notification: + state: absent + event_name: on_file_add_or_remove + bucket_name: test-bucket ''' RETURN = r''' notification_configuration: - description: list of currently applied notifications - returned: success - type: list + description: dictionary of currently applied notifications + returned: success + type: complex + contains: + lambda_function_configurations: + description: + - List of current Lambda function notification configurations applied to the bucket. + type: list + queue_configurations: + description: + - List of current SQS notification configurations applied to the bucket. + type: list + topic_configurations: + description: + - List of current SNS notification configurations applied to the bucket. + type: list ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -118,47 +165,110 @@ class AmazonBucket: - def __init__(self, client, bucket_name): + def __init__(self, module, client): + self.module = module self.client = client - self.bucket_name = bucket_name + self.bucket_name = module.params['bucket_name'] + self.check_mode = module.check_mode self._full_config_cache = None def full_config(self): if self._full_config_cache is None: - self._full_config_cache = [Config.from_api(cfg) for cfg in - self.client.get_bucket_notification_configuration( - Bucket=self.bucket_name).get( - 'LambdaFunctionConfigurations', list())] + self._full_config_cache = dict( + QueueConfigurations=[], + TopicConfigurations=[], + LambdaFunctionConfigurations=[] + ) + + try: + config_lookup = self.client.get_bucket_notification_configuration( + Bucket=self.bucket_name) + except (ClientError, BotoCoreError) as e: + self.module.fail_json(msg='{0}'.format(e)) + + # Handle different event targets + if config_lookup.get('QueueConfigurations'): + for queue_config in config_lookup.get('QueueConfigurations'): + self._full_config_cache['QueueConfigurations'].append(Config.from_api(queue_config)) + + if config_lookup.get('TopicConfigurations'): + for topic_config in config_lookup.get('TopicConfigurations'): + self._full_config_cache['TopicConfigurations'].append(Config.from_api(topic_config)) + + if config_lookup.get('LambdaFunctionConfigurations'): + for function_config in config_lookup.get('LambdaFunctionConfigurations'): + self._full_config_cache['LambdaFunctionConfigurations'].append(Config.from_api(function_config)) + return self._full_config_cache def current_config(self, config_name): - for config in self.full_config(): - if config.raw['Id'] == config_name: - return config + # Iterate through configs and get current event config + for target_configs in self.full_config(): + for config in self.full_config()[target_configs]: + if config.raw['Id'] == config_name: + return config def apply_config(self, desired): - configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']] - configs.append(desired.raw) + configs = dict( + QueueConfigurations=[], + TopicConfigurations=[], + LambdaFunctionConfigurations=[] + ) + + # Iterate through existing configs then add the desired config + for target_configs in self.full_config(): + for config in self.full_config()[target_configs]: + if config.name != desired.raw['Id']: + configs[target_configs].append(config.raw) + + if self.module.params.get('queue_arn'): + configs['QueueConfigurations'].append(desired.raw) + if self.module.params.get('topic_arn'): + configs['TopicConfigurations'].append(desired.raw) + if self.module.params.get('lambda_function_arn'): + configs['LambdaFunctionConfigurations'].append(desired.raw) + self._upload_bucket_config(configs) return configs def delete_config(self, desired): - configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']] + configs = dict( + QueueConfigurations=[], + TopicConfigurations=[], + LambdaFunctionConfigurations=[] + ) + + # Iterate through existing configs omitting specified config + for target_configs in self.full_config(): + for config in self.full_config()[target_configs]: + if config.name != desired.raw['Id']: + configs[target_configs].append(config.raw) + self._upload_bucket_config(configs) return configs - def _upload_bucket_config(self, config): - self.client.put_bucket_notification_configuration( + def _upload_bucket_config(self, configs): + api_params = dict( Bucket=self.bucket_name, - NotificationConfiguration={ - 'LambdaFunctionConfigurations': config - }) + NotificationConfiguration=dict() + ) + + # Iterate through available configs + for target_configs in configs: + if len(configs[target_configs]) > 0: + api_params['NotificationConfiguration'][target_configs] = configs[target_configs] + + if not self.check_mode: + try: + self.client.put_bucket_notification_configuration(**api_params) + except (ClientError, BotoCoreError) as e: + self.module.fail_json(msg='{0}'.format(e)) class Config: def __init__(self, content): self._content = content - self.name = content['Id'] + self.name = content.get('Id') @property def raw(self): @@ -171,48 +281,65 @@ def __eq__(self, other): @classmethod def from_params(cls, **params): - function_arn = params['lambda_function_arn'] - - qualifier = None - if params['lambda_version'] > 0: - qualifier = str(params['lambda_version']) - elif params['lambda_alias']: - qualifier = str(params['lambda_alias']) - if qualifier: - params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) - - return cls({ - 'Id': params['event_name'], - 'LambdaFunctionArn': params['lambda_function_arn'], - 'Events': sorted(params['events']), - 'Filter': { - 'Key': { - 'FilterRules': [{ - 'Name': 'Prefix', - 'Value': params['prefix'] - }, { - 'Name': 'Suffix', - 'Value': params['suffix'] - }] - } - } - }) + """Generate bucket notification params for target""" + + bucket_event_params = dict( + Id=params['event_name'], + Events=sorted(params['events']), + Filter=dict( + Key=dict( + FilterRules=[ + dict( + Name='Prefix', + Value=params['prefix'] + ), + dict( + Name='Suffix', + Value=params['suffix'] + ) + ] + ) + ) + ) + + # Handle different event targets + if params.get('queue_arn'): + bucket_event_params['QueueArn'] = params['queue_arn'] + if params.get('topic_arn'): + bucket_event_params['TopicArn'] = params['topic_arn'] + if params.get('lambda_function_arn'): + function_arn = params['lambda_function_arn'] + + qualifier = None + if params['lambda_version'] > 0: + qualifier = str(params['lambda_version']) + elif params['lambda_alias']: + qualifier = str(params['lambda_alias']) + if qualifier: + params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + + bucket_event_params['LambdaFunctionArn'] = params['lambda_function_arn'] + + return cls(bucket_event_params) @classmethod def from_api(cls, config): return cls(config) -def main(): +def setup_module_object(): event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] + argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), event_name=dict(required=True), lambda_function_arn=dict(aliases=['function_arn']), + queue_arn=dict(type='str'), + topic_arn=dict(type='str'), bucket_name=dict(required=True), events=dict(type='list', default=[], choices=event_types, elements='str'), prefix=dict(default=''), @@ -221,36 +348,57 @@ def main(): lambda_version=dict(type='int', default=0), ) - module = AnsibleAWSModule( + mutually_exclusive = [ + ['queue_arn', 'topic_arn', 'lambda_function_arn'], + ['lambda_alias', 'lambda_version'] + ] + + return AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['lambda_alias', 'lambda_version']], + mutually_exclusive=mutually_exclusive, required_if=[['state', 'present', ['events']]] ) - bucket = AmazonBucket(module.client('s3'), module.params['bucket_name']) + +def main(): + module = setup_module_object() + + client = module.client('s3') + bucket = AmazonBucket(module, client) current = bucket.current_config(module.params['event_name']) desired = Config.from_params(**module.params) - notification_configuration = [cfg.raw for cfg in bucket.full_config()] + + notification_configs = dict( + QueueConfigurations=[], + TopicConfigurations=[], + LambdaFunctionConfigurations=[] + ) + + for target_configs in bucket.full_config(): + for cfg in bucket.full_config()[target_configs]: + notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw)) state = module.params['state'] - try: - if (state == 'present' and current == desired) or (state == 'absent' and not current): - changed = False - elif module.check_mode: - changed = True - elif state == 'present': + updated_configuration = dict() + changed = False + + if state == 'present': + if current != desired: + updated_configuration = bucket.apply_config(desired) changed = True - notification_configuration = bucket.apply_config(desired) - elif state == 'absent': + elif state == 'absent': + if current: + updated_configuration = bucket.delete_config(desired) changed = True - notification_configuration = bucket.delete_config(desired) - except (ClientError, BotoCoreError) as e: - module.fail_json(msg='{0}'.format(e)) - module.exit_json(**dict(changed=changed, - notification_configuration=[camel_dict_to_snake_dict(cfg) for cfg in - notification_configuration])) + for target_configs in updated_configuration: + notification_configs[target_configs] = [] + for cfg in updated_configuration.get(target_configs, list()): + notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg)) + + module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict( + notification_configs)) if __name__ == '__main__': diff --git a/sns_topic.py b/sns_topic.py index 817729c33e8..9755450c455 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -44,6 +44,8 @@ policy: description: - Policy to apply to the SNS topic. + - Policy body can be YAML or JSON. + - This is required for certain use cases for example with S3 bucket notifications. type: dict delivery_policy: description: @@ -155,20 +157,45 @@ delivery_policy: http: defaultHealthyRetryPolicy: - minDelayTarget: 2 - maxDelayTarget: 4 - numRetries: 3 - numMaxDelayRetries: 5 - backoffFunction: "" + minDelayTarget: 2 + maxDelayTarget: 4 + numRetries: 9 + numMaxDelayRetries: 5 + numMinDelayRetries: 2 + numNoDelayRetries: 2 + backoffFunction: "linear" disableSubscriptionOverrides: True defaultThrottlePolicy: - maxReceivesPerSecond: 10 + maxReceivesPerSecond: 10 subscriptions: - endpoint: "my_email_address@example.com" protocol: "email" - endpoint: "my_mobile_number" protocol: "sms" +- name: Create a topic permitting S3 bucket notifications + community.aws.sns_topic: + name: "S3Notifications" + state: present + display_name: "S3 notifications SNS topic" + policy: + Id: s3-topic-policy + Version: 2012-10-17 + Statement: + - Sid: Statement-id + Effect: Allow + Resource: "arn:aws:sns:*:*:S3Notifications" + Principal: + Service: s3.amazonaws.com + Action: sns:Publish + Condition: + ArnLike: + aws:SourceArn: "arn:aws:s3:*:*:SomeBucket" + +- name: Example deleting a topic + community.aws.sns_topic: + name: "ExampleTopic" + state: absent """ RETURN = r''' @@ -177,7 +204,7 @@ type: str returned: always sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name" -community.aws.sns_topic: +sns_topic: description: Dict of sns topic details type: complex returned: always diff --git a/sqs_queue.py b/sqs_queue.py index 79e19cbda9d..1be1936c55c 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: sqs_queue version_added: 1.0.0 @@ -64,7 +64,9 @@ type: int policy: description: - - The JSON dict policy to attach to queue. + - Policy to attach to the queue. + - Policy body can be YAML or JSON. + - This is required for certain use cases for example with S3 bucket notifications. type: dict redrive_policy: description: @@ -96,12 +98,12 @@ type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' -RETURN = ''' +RETURN = r''' content_based_deduplication: description: Enables content-based deduplication. Used for FIFOs only. type: bool @@ -169,7 +171,7 @@ sample: '{"Env": "prod"}' ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create SQS queue with redrive policy community.aws.sqs_queue: name: my-queue @@ -211,6 +213,29 @@ kms_master_key_id: alias/MyQueueKey kms_data_key_reuse_period_seconds: 3600 +- name: Example queue allowing s3 bucket notifications + sqs_queue: + name: "S3Notifications" + default_visibility_timeout: 120 + message_retention_period: 86400 + maximum_message_size: 1024 + delivery_delay: 30 + receive_message_wait_time: 20 + policy: + Version: 2012-10-17 + Id: s3-queue-policy + Statement: + - Sid: allowNotifications + Effect: Allow + Principal: + Service: s3.amazonaws.com + Action: + - SQS:SendMessage + Resource: "arn:aws:sqs:*:*:S3Notifications" + Condition: + ArnLike: + aws:SourceArn: "arn:aws:s3:*:*:SomeBucket" + - name: Delete SQS queue community.aws.sqs_queue: name: my-queue From a9ac085d6134027fc7ccb8f8f1076ca37abed33d Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 28 Mar 2022 11:10:52 +0100 Subject: [PATCH 404/683] Add support for load_balancing_algorithm_type in elb_target_group (#1016) Add support for load_balancing_algorithm_type in elb_target_group SUMMARY AWS has supported setting the load_balancing.algorithm.type for a little while now allowing you to choose between either round_robin or least_oustanding_requests this PR adds support for setting the new parameter. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_target_group ADDITIONAL INFORMATION https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.modify_target_group_attributes https://aws.amazon.com/about-aws/whats-new/2019/11/application-load-balancer-now-supports-least-outstanding-requests-algorithm-for-load-balancing-requests/ Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley --- elb_target_group.py | 216 ++++++++++++++++++++++++++------------------ 1 file changed, 127 insertions(+), 89 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 229e2129bfe..917e352c75b 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -25,6 +25,7 @@ deregistration_connection_termination: description: - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. + - Using this option is only supported when attaching to a Network Load Balancer (NLB). type: bool default: false required: false @@ -127,6 +128,14 @@ - Valid values are C(lb_cookie), C(app_cookie) or C(source_ip). - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers. type: str + load_balancing_algorithm_type: + description: + - The type of load balancing algorithm to use. + - Changing the load balancing algorithm is only supported when used with Application Load Balancers (ALB). + - If not set AWS will default to C(round_robin). + choices: ['round_robin', 'least_outstanding_requests'] + type: str + version_added: 3.2.0 successful_response_codes: description: - The HTTP codes to use when checking for a successful response from a target. @@ -387,6 +396,12 @@ returned: when state present type: str sample: lb_cookie +load_balancing_algorithm_type: + description: The type load balancing algorithm used. + returned: when state present + type: str + version_added: 3.2.0 + sample: least_outstanding_requests tags: description: The tags attached to the target group. returned: when state present @@ -486,6 +501,71 @@ def wait_for_status(connection, module, target_group_arn, targets, status): return status_achieved, result +def create_or_update_attributes(connection, module, target_group, new_target_group): + changed = False + target_type = module.params.get("target_type") + deregistration_delay_timeout = module.params.get("deregistration_delay_timeout") + deregistration_connection_termination = module.params.get("deregistration_connection_termination") + stickiness_enabled = module.params.get("stickiness_enabled") + stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") + stickiness_type = module.params.get("stickiness_type") + stickiness_app_cookie_duration = module.params.get("stickiness_app_cookie_duration") + stickiness_app_cookie_name = module.params.get("stickiness_app_cookie_name") + preserve_client_ip_enabled = module.params.get("preserve_client_ip_enabled") + proxy_protocol_v2_enabled = module.params.get("proxy_protocol_v2_enabled") + load_balancing_algorithm_type = module.params.get("load_balancing_algorithm_type") + + # Now set target group attributes + update_attributes = [] + + # Get current attributes + current_tg_attributes = get_tg_attributes(connection, module, target_group['TargetGroupArn']) + + if deregistration_delay_timeout is not None: + if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: + update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if deregistration_connection_termination is not None: + if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": + update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) + if stickiness_enabled is not None: + if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": + update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) + if stickiness_lb_cookie_duration is not None: + if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: + update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) + if stickiness_type is not None: + if stickiness_type != current_tg_attributes.get('stickiness_type'): + update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) + if stickiness_app_cookie_name is not None: + if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'): + update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)}) + if stickiness_app_cookie_duration is not None: + if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: + update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) + if preserve_client_ip_enabled is not None: + if target_type not in ('udp', 'tcp_udp'): + if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'): + update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()}) + if proxy_protocol_v2_enabled is not None: + if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'): + update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()}) + if load_balancing_algorithm_type is not None: + if str(load_balancing_algorithm_type) != current_tg_attributes['load_balancing_algorithm_type']: + update_attributes.append({'Key': 'load_balancing.algorithm.type', 'Value': str(load_balancing_algorithm_type)}) + + if update_attributes: + try: + connection.modify_target_group_attributes(TargetGroupArn=target_group['TargetGroupArn'], Attributes=update_attributes, aws_retry=True) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state + if new_target_group: + connection.delete_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + module.fail_json_aws(e, msg="Couldn't delete target group") + + return changed + + def create_or_update_target_group(connection, module): changed = False @@ -500,15 +580,6 @@ def create_or_update_target_group(connection, module): params['VpcId'] = module.params.get("vpc_id") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") - deregistration_delay_timeout = module.params.get("deregistration_delay_timeout") - deregistration_connection_termination = module.params.get("deregistration_connection_termination") - stickiness_enabled = module.params.get("stickiness_enabled") - stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") - stickiness_type = module.params.get("stickiness_type") - stickiness_app_cookie_duration = module.params.get("stickiness_app_cookie_duration") - stickiness_app_cookie_name = module.params.get("stickiness_app_cookie_name") - preserve_client_ip_enabled = module.params.get("preserve_client_ip_enabled") - proxy_protocol_v2_enabled = module.params.get("proxy_protocol_v2_enabled") health_option_keys = [ "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", @@ -549,11 +620,11 @@ def create_or_update_target_group(connection, module): params['Matcher']['HttpCode'] = module.params.get("successful_response_codes") # Get target group - tg = get_target_group(connection, module) + target_group = get_target_group(connection, module) - if tg: + if target_group: diffs = [param for param in ('Port', 'Protocol', 'VpcId') - if tg.get(param) != params.get(param)] + if target_group.get(param) != params.get(param)] if diffs: module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % ", ".join(diffs)) @@ -564,39 +635,39 @@ def create_or_update_target_group(connection, module): if health_options: # Health check protocol - if 'HealthCheckProtocol' in params and tg['HealthCheckProtocol'] != params['HealthCheckProtocol']: + if 'HealthCheckProtocol' in params and target_group['HealthCheckProtocol'] != params['HealthCheckProtocol']: health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol'] # Health check port - if 'HealthCheckPort' in params and tg['HealthCheckPort'] != params['HealthCheckPort']: + if 'HealthCheckPort' in params and target_group['HealthCheckPort'] != params['HealthCheckPort']: health_check_params['HealthCheckPort'] = params['HealthCheckPort'] # Health check interval - if 'HealthCheckIntervalSeconds' in params and tg['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']: + if 'HealthCheckIntervalSeconds' in params and target_group['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']: health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds'] # Health check timeout - if 'HealthCheckTimeoutSeconds' in params and tg['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']: + if 'HealthCheckTimeoutSeconds' in params and target_group['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']: health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds'] # Healthy threshold - if 'HealthyThresholdCount' in params and tg['HealthyThresholdCount'] != params['HealthyThresholdCount']: + if 'HealthyThresholdCount' in params and target_group['HealthyThresholdCount'] != params['HealthyThresholdCount']: health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount'] # Unhealthy threshold - if 'UnhealthyThresholdCount' in params and tg['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']: + if 'UnhealthyThresholdCount' in params and target_group['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']: health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount'] # Only need to check response code and path for http(s) health checks - if tg['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: + if target_group['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: # Health check path - if 'HealthCheckPath' in params and tg['HealthCheckPath'] != params['HealthCheckPath']: + if 'HealthCheckPath' in params and target_group['HealthCheckPath'] != params['HealthCheckPath']: health_check_params['HealthCheckPath'] = params['HealthCheckPath'] # Matcher (successful response codes) # TODO: required and here? if 'Matcher' in params: - current_matcher_list = tg['Matcher']['HttpCode'].split(',') + current_matcher_list = target_group['Matcher']['HttpCode'].split(',') requested_matcher_list = params['Matcher']['HttpCode'].split(',') if set(current_matcher_list) != set(requested_matcher_list): health_check_params['Matcher'] = {} @@ -604,7 +675,7 @@ def create_or_update_target_group(connection, module): try: if health_check_params: - connection.modify_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True, **health_check_params) + connection.modify_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True, **health_check_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update target group") @@ -615,7 +686,7 @@ def create_or_update_target_group(connection, module): # describe_target_health seems to be the only way to get them try: current_targets = connection.describe_target_health( - TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) + TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group health") @@ -647,14 +718,16 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_add, aws_retry=True) + connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_add, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_add, 'healthy') + status_achieved, registered_instances = wait_for_status( + connection, module, target_group['TargetGroupArn'], instances_to_add, 'healthy') if not status_achieved: - module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') + module.fail_json( + msg='Error waiting for target registration to be healthy - please check the AWS console') remove_instances = set(current_instance_ids) - set(new_instance_ids) @@ -666,14 +739,16 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused') + status_achieved, registered_instances = wait_for_status( + connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') if not status_achieved: - module.fail_json(msg='Error waiting for target deregistration - please check the AWS console') + module.fail_json( + msg='Error waiting for target deregistration - please check the AWS console') # register lambda target else: @@ -691,7 +766,7 @@ def create_or_update_target_group(connection, module): if changed: if target.get("Id"): response = connection.register_targets( - TargetGroupArn=tg['TargetGroupArn'], + TargetGroupArn=target_group['TargetGroupArn'], Targets=[ { "Id": target['Id'] @@ -715,14 +790,16 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused') + status_achieved, registered_instances = wait_for_status( + connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') if not status_achieved: - module.fail_json(msg='Error waiting for target deregistration - please check the AWS console') + module.fail_json( + msg='Error waiting for target deregistration - please check the AWS console') # remove lambda targets else: @@ -733,7 +810,7 @@ def create_or_update_target_group(connection, module): target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] if changed: connection.deregister_targets( - TargetGroupArn=tg['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True) + TargetGroupArn=target_group['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True) else: try: connection.create_target_group(aws_retry=True, **params) @@ -742,18 +819,18 @@ def create_or_update_target_group(connection, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create target group") - tg = get_target_group(connection, module, retry_missing=True) + target_group = get_target_group(connection, module, retry_missing=True) if module.params.get("targets"): if target_type != "lambda": params['Targets'] = module.params.get("targets") try: - connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=params['Targets'], aws_retry=True) + connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=params['Targets'], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], params['Targets'], 'healthy') + status_achieved, registered_instances = wait_for_status(connection, module, target_group['TargetGroupArn'], params['Targets'], 'healthy') if not status_achieved: module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') @@ -761,7 +838,7 @@ def create_or_update_target_group(connection, module): try: target = module.params.get("targets")[0] response = connection.register_targets( - TargetGroupArn=tg['TargetGroupArn'], + TargetGroupArn=target_group['TargetGroupArn'], Targets=[ { "Id": target["Id"] @@ -774,61 +851,21 @@ def create_or_update_target_group(connection, module): module.fail_json_aws( e, msg="Couldn't register targets") - # Now set target group attributes - update_attributes = [] - - # Get current attributes - current_tg_attributes = get_tg_attributes(connection, module, tg['TargetGroupArn']) - - if deregistration_delay_timeout is not None: - if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: - update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) - if deregistration_connection_termination is not None: - if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": - update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) - if stickiness_enabled is not None: - if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": - update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) - if stickiness_lb_cookie_duration is not None: - if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) - if stickiness_type is not None: - if stickiness_type != current_tg_attributes.get('stickiness_type'): - update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) - if stickiness_app_cookie_name is not None: - if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'): - update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)}) - if stickiness_app_cookie_duration is not None: - if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) - if preserve_client_ip_enabled is not None: - if target_type not in ('udp', 'tcp_udp'): - if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'): - update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()}) - if proxy_protocol_v2_enabled is not None: - if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'): - update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()}) + attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group) - if update_attributes: - try: - connection.modify_target_group_attributes(TargetGroupArn=tg['TargetGroupArn'], Attributes=update_attributes, aws_retry=True) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state - if new_target_group: - connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) - module.fail_json_aws(e, msg="Couldn't delete target group") + if attributes_update: + changed = True # Tags - only need to play with tags if tags parameter has been set to something if tags: # Get tags - current_tags = get_target_group_tags(connection, module, tg['TargetGroupArn']) + current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn']) # Delete necessary tags tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags) if tags_to_delete: try: - connection.remove_tags(ResourceArns=[tg['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True) + connection.remove_tags(ResourceArns=[target_group['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags from target group") changed = True @@ -836,21 +873,21 @@ def create_or_update_target_group(connection, module): # Add/update tags if tags_need_modify: try: - connection.add_tags(ResourceArns=[tg['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True) + connection.add_tags(ResourceArns=[target_group['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to target group") changed = True # Get the target group again - tg = get_target_group(connection, module) + target_group = get_target_group(connection, module) # Get the target group attributes again - tg.update(get_tg_attributes(connection, module, tg['TargetGroupArn'])) + target_group.update(get_tg_attributes(connection, module, target_group['TargetGroupArn'])) - # Convert tg to snake_case - snaked_tg = camel_dict_to_snake_dict(tg) + # Convert target_group to snake_case + snaked_tg = camel_dict_to_snake_dict(target_group) - snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, tg['TargetGroupArn'])) + snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, target_group['TargetGroupArn'])) module.exit_json(changed=changed, **snaked_tg) @@ -891,6 +928,7 @@ def main(): stickiness_lb_cookie_duration=dict(type='int'), stickiness_app_cookie_duration=dict(type='int'), stickiness_app_cookie_name=dict(), + load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']), state=dict(required=True, choices=['present', 'absent']), successful_response_codes=dict(), tags=dict(default={}, type='dict'), From baff7f634d990034052f35a3e551e871607a8117 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 29 Mar 2022 15:10:42 +0200 Subject: [PATCH 405/683] rds_subnet_group: Add tags feature and enable check_mode (#562) rds_subnet_group: Add tags feature and enable check_mode SUMMARY rds_subnet_group: Add tags feature and enable check_mode Fixes: #552 Depends-On: ansible-collections/amazon.aws#553 ISSUE TYPE Feature Pull Request COMPONENT NAME rds_subnet_group Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz --- rds_subnet_group.py | 249 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 200 insertions(+), 49 deletions(-) diff --git a/rds_subnet_group.py b/rds_subnet_group.py index 7d789481c43..b0a9f8ae806 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -39,7 +39,21 @@ - Required when I(state=present). type: list elements: str -author: "Scott Anderson (@tastychutney)" + tags: + description: + - A hash/dictionary of tags to add to the new RDS subnet group or to add/remove from an existing one. + type: dict + version_added: 3.2.0 + purge_tags: + description: + - Whether or not to remove tags assigned to the RDS subnet group if not specified in the playbook. + - To remove all tags set I(tags) to an empty dictionary in conjunction with this. + default: True + type: bool + version_added: 3.2.0 +author: + - "Scott Anderson (@tastychutney)" + - "Alina Buzachis (@alinabuzachis)" extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -56,6 +70,18 @@ - subnet-aaaaaaaa - subnet-bbbbbbbb +- name: Add or change a subnet group and associate tags + community.aws.rds_subnet_group: + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + tags: + tag1: Tag1 + tag2: Tag2 + - name: Remove a subnet group community.aws.rds_subnet_group: state: absent @@ -63,6 +89,11 @@ ''' RETURN = r''' +changed: + description: True if listing the RDS subnet group succeeds. + type: bool + returned: always + sample: "false" subnet_group: description: Dictionary of DB subnet group values returned: I(state=present) @@ -72,46 +103,95 @@ description: The name of the DB subnet group (maintained for backward compatibility) returned: I(state=present) type: str + sample: "ansible-test-mbp-13950442" db_subnet_group_name: description: The name of the DB subnet group returned: I(state=present) type: str + sample: "ansible-test-mbp-13950442" description: description: The description of the DB subnet group (maintained for backward compatibility) returned: I(state=present) type: str + sample: "Simple description." db_subnet_group_description: description: The description of the DB subnet group returned: I(state=present) type: str + sample: "Simple description." vpc_id: description: The VpcId of the DB subnet group returned: I(state=present) type: str + sample: "vpc-0acb0ba033ff2119c" subnet_ids: description: Contains a list of Subnet IDs returned: I(state=present) type: list + sample: + "subnet-08c94870f4480797e" subnets: description: Contains a list of Subnet elements (@see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups) # noqa returned: I(state=present) type: list + contains: + subnet_availability_zone: + description: Contains Availability Zone information. + returned: I(state=present) + type: dict + version_added: 3.2.0 + sample: + name: "eu-north-1b" + subnet_identifier: + description: The identifier of the subnet. + returned: I(state=present) + type: str + version_added: 3.2.0 + sample: "subnet-08c94870f4480797e" + subnet_outpost: + description: This value specifies the Outpost. + returned: I(state=present) + type: dict + version_added: 3.2.0 + sample: {} + subnet_status: + description: The status of the subnet. + returned: I(state=present) + type: str + version_added: 3.2.0 + sample: "Active" status: description: The status of the DB subnet group (maintained for backward compatibility) returned: I(state=present) type: str + sample: "Complete" subnet_group_status: description: The status of the DB subnet group returned: I(state=present) type: str + sample: "Complete" db_subnet_group_arn: description: The ARN of the DB subnet group returned: I(state=present) type: str + sample: "arn:aws:rds:eu-north-1:721066863947:subgrp:ansible-test-13950442" + tags: + description: The tags associated with the subnet group + returned: I(state=present) + type: dict + version_added: 3.2.0 + sample: + tag1: Tag1 + tag2: Tag2 ''' from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags try: @@ -125,7 +205,7 @@ def create_result(changed, subnet_group=None): return dict( changed=changed ) - result_subnet_group = dict(camel_dict_to_snake_dict(subnet_group)) + result_subnet_group = dict(subnet_group) result_subnet_group['name'] = result_subnet_group.get( 'db_subnet_group_name') result_subnet_group['description'] = result_subnet_group.get( @@ -133,15 +213,39 @@ def create_result(changed, subnet_group=None): result_subnet_group['status'] = result_subnet_group.get( 'subnet_group_status') result_subnet_group['subnet_ids'] = create_subnet_list( - subnet_group.get('Subnets')) + subnet_group.get('subnets')) return dict( changed=changed, subnet_group=result_subnet_group ) +@AWSRetry.jittered_backoff() +def _describe_db_subnet_groups_with_backoff(client, **kwargs): + paginator = client.get_paginator('describe_db_subnet_groups') + return paginator.paginate(**kwargs).build_full_result() + + +def get_subnet_group(client, module): + params = dict() + params['DBSubnetGroupName'] = module.params.get('name').lower() + + try: + _result = _describe_db_subnet_groups_with_backoff(client, **params) + except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't describe subnet groups.") + + if _result: + result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0]) + result['tags'] = get_tags(client, module, result['db_subnet_group_arn']) + + return result + + def create_subnet_list(subnets): - ''' + r''' Construct a list of subnet ids from a list of subnets dicts returned by boto3. Parameters: subnets (list): A list of subnets definitions. @@ -151,7 +255,7 @@ def create_subnet_list(subnets): ''' subnets_ids = [] for subnet in subnets: - subnets_ids.append(subnet.get('SubnetIdentifier')) + subnets_ids.append(subnet.get('subnet_identifier')) return subnets_ids @@ -161,64 +265,111 @@ def main(): name=dict(required=True), description=dict(required=False), subnets=dict(required=False, type='list', elements='str'), + tags=dict(required=False, type='dict'), + purge_tags=dict(type='bool', default=True), ) required_if = [('state', 'present', ['description', 'subnets'])] + module = AnsibleAWSModule( - argument_spec=argument_spec, required_if=required_if) + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True + ) + state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') group_subnets = module.params.get('subnets') or [] try: - conn = module.client('rds') + connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to instantiate AWS connection') + module.fail_json_aws(e, 'Failed to instantiate AWS connection.') + # Default. + changed = None result = create_result(False) + tags_update = False + subnet_update = False - try: - matching_groups = conn.describe_db_subnet_groups( - DBSubnetGroupName=group_name, MaxRecords=100).get('DBSubnetGroups') - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): - # No existing subnet, create it if needed, else we can just exit. - if state == 'present': + if module.params.get("tags") is not None: + _tags = ansible_dict_to_boto3_tag_list(module.params.get("tags")) + else: + _tags = list() + + matching_groups = get_subnet_group(connection, module) + + if state == 'present': + if matching_groups: + # We have one or more subnets at this point. + + # Check if there is any tags update + tags_update = ensure_tags( + connection, + module, + matching_groups['db_subnet_group_arn'], + matching_groups['tags'], + module.params.get("tags"), + module.params['purge_tags'] + ) + + # Sort the subnet groups before we compare them + existing_subnets = create_subnet_list(matching_groups['subnets']) + existing_subnets.sort() + group_subnets.sort() + + # See if anything changed. + if ( + matching_groups['db_subnet_group_name'] != group_name or + matching_groups['db_subnet_group_description'] != group_description or + existing_subnets != group_subnets + ): + if not module.check_mode: + # Modify existing group. + try: + connection.modify_db_subnet_group( + aws_retry=True, + DBSubnetGroupName=group_name, + DBSubnetGroupDescription=group_description, + SubnetIds=group_subnets + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to update a subnet group.') + subnet_update = True + else: + if not module.check_mode: + try: + connection.create_db_subnet_group( + aws_retry=True, + DBSubnetGroupName=group_name, + DBSubnetGroupDescription=group_description, + SubnetIds=group_subnets, + Tags=_tags + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to create a new subnet group.') + subnet_update = True + elif state == 'absent': + if not module.check_mode: try: - new_group = conn.create_db_subnet_group( - DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets) - result = create_result(True, new_group.get('DBSubnetGroup')) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to create a new subnet group') - module.exit_json(**result) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Failed to get subnet groups description') - # We have one or more subnets at this point. - if state == 'absent': - try: - conn.delete_db_subnet_group(DBSubnetGroupName=group_name) - result = create_result(True) + connection.delete_db_subnet_group(aws_retry=True, DBSubnetGroupName=group_name) + except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + module.exit_json(**result) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, 'Failed to delete a subnet group.') + else: + subnet_group = get_subnet_group(connection, module) + if subnet_group: + subnet_update = True + result = create_result(subnet_update, subnet_group) module.exit_json(**result) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to delete a subnet group') - - # Sort the subnet groups before we compare them - existing_subnets = create_subnet_list(matching_groups[0].get('Subnets')) - existing_subnets.sort() - group_subnets.sort() - # See if anything changed. - if (matching_groups[0].get('DBSubnetGroupName') == group_name and - matching_groups[0].get('DBSubnetGroupDescription') == group_description and - existing_subnets == group_subnets): - result = create_result(False, matching_groups[0]) - module.exit_json(**result) - # Modify existing group. - try: - changed_group = conn.modify_db_subnet_group( - DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets) - result = create_result(True, changed_group.get('DBSubnetGroup')) - module.exit_json(**result) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to update a subnet group') + + subnet_update = True + + subnet_group = get_subnet_group(connection, module) + changed = tags_update or subnet_update + result = create_result(changed, subnet_group) + module.exit_json(**result) if __name__ == '__main__': From 793818602028d2fb6924f7a50671b4ad3d3f9f23 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 30 Mar 2022 13:58:09 -0400 Subject: [PATCH 406/683] rds_instance - add valid choices for engine type (#1034) rds_instance - add valid choices for engine type SUMMARY Add valid choices for engine and update integration tests with new error msg ISSUE TYPE Feature Pull Request COMPONENT NAME rds_instance Reviewed-by: Jill R --- rds_instance.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/rds_instance.py b/rds_instance.py index 4a1086f24ec..cdd0d13fa90 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -187,8 +187,8 @@ engine: description: - The name of the database engine to be used for this DB instance. This is required to create an instance. - Valid choices are aurora | aurora-mysql | aurora-postgresql | mariadb | mysql | oracle-ee | oracle-se | - oracle-se1 | oracle-se2 | postgres | sqlserver-ee | sqlserver-ex | sqlserver-se | sqlserver-web + choices: ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', + 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] type: str engine_version: description: @@ -787,6 +787,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', + 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] + def get_rds_method_attribute_name(instance, state, creation_source, read_replica): method_name = None @@ -1147,7 +1150,7 @@ def main(): enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), enable_iam_database_authentication=dict(type='bool'), enable_performance_insights=dict(type='bool'), - engine=dict(), + engine=dict(type='str', choices=valid_engines), engine_version=dict(), final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), force_failover=dict(type='bool'), From 179cd64753b76cecbb784d83a407c4bbf46f0db7 Mon Sep 17 00:00:00 2001 From: tjarra Date: Thu, 31 Mar 2022 03:10:15 -0300 Subject: [PATCH 407/683] New module aws_eks_fargate_profile (#942) New module aws_eks_fargate_profile SUMMARY (this is a copy of #941 because i messed up my original branch) New Module - aws_eks_fargate_profile This create a new module to manage fargate profiles in EKS clusters. Requires: ansible-collections/amazon.aws#651 ISSUE TYPE New Module Pull Request COMPONENT NAME aws_eks_fargate_profile ADDITIONAL INFORMATION community.aws.aws_eks_fargate_profile - name: create Fargate Profile aws_eks_fargate_profile: name: 'my-profile' state: present cluster_name: 'my-eks-cluster' role_arn: 'arn:aws:iam::999999999999:role/eks-FargatePodExecutionRole' subnets: - subnet-aaaa1111 selectors: - namespace: 'test_nm' labels: label1: test wait: true tags: env: test foo: bar Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: None Reviewed-by: Alina Buzachis Reviewed-by: Jill R --- eks_fargate_profile.py | 353 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 353 insertions(+) create mode 100644 eks_fargate_profile.py diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py new file mode 100644 index 00000000000..973b9497c2e --- /dev/null +++ b/eks_fargate_profile.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# Copyright (c) 2022 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: eks_fargate_profile +version_added: 3.2.0 +short_description: Manage EKS Fargate Profile +description: + - Manage EKS Fargate Profile. +author: Tiago Jarra (@tjarra) +options: + name: + description: Name of EKS Fargate Profile. + required: True + type: str + cluster_name: + description: Name of EKS Cluster. + required: True + type: str + role_arn: + description: + - ARN of IAM role used by the EKS cluster. + - Required when I(state=present). + type: str + subnets: + description: + - list of subnet IDs for the Kubernetes cluster. + - Required when I(state=present). + type: list + elements: str + selectors: + description: + - A list of selectors to use in fargate profile. + - Required when I(state=present). + type: list + elements: dict + suboptions: + namespace: + description: A namespace used in fargate profile. + type: str + labels: + description: A dictionary of labels used in fargate profile. + type: dict + state: + description: Create or delete the Fargate Profile. + choices: + - absent + - present + default: present + type: str + tags: + description: A dictionary of resource tags. + type: dict + purge_tags: + description: + - Purge existing tags that are not found in the cluster. + type: bool + default: true + wait: + description: >- + Specifies whether the module waits until the profile is created or deleted before moving on. + type: bool + default: false + wait_timeout: + description: >- + The duration in seconds to wait for the cluster to become active. Defaults + to 1200 seconds (20 minutes). + default: 1200 + type: int +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create an EKS Fargate Profile + community.aws.eks_fargate_profile: + name: test_fargate + cluster_name: test_cluster + role_arn: my_eks_role + subnets: + - subnet-aaaa1111 + selectors: + - namespace: nm-test + labels: + - label1: test + state: present + wait: yes + +- name: Remove an EKS Fargate Profile + community.aws.eks_fargate_profile: + name: test_fargate + cluster_name: test_cluster + wait: yes + state: absent +''' + +RETURN = r''' +fargate_profile_name: + description: Name of Fargate Profile. + returned: when state is present + type: str + sample: test_profile +fargate_profile_arn: + description: ARN of the Fargate Profile. + returned: when state is present + type: str + sample: arn:aws:eks:us-east-1:1231231123:safd +cluster_name: + description: Name of EKS Cluster. + returned: when state is present + type: str + sample: test-cluster +created_at: + description: Fargate Profile creation date and time. + returned: when state is present + type: str + sample: '2022-01-18T20:00:00.111000+00:00' +pod_execution_role_arn: + description: ARN of the IAM Role used by Fargate Profile. + returned: when state is present + type: str + sample: arn:aws:eks:us-east-1:1231231123:role/asdf +subnets: + description: List of subnets used in Fargate Profile. + returned: when state is present + type: list + sample: + - subnet-qwerty123 + - subnet-asdfg456 +selectors: + description: Selector configuration. + returned: when state is present + type: complex + contains: + namespace: + description: Name of the kubernetes namespace used in profile. + returned: when state is present + type: str + sample: nm-test + labels: + description: List of kubernetes labels used in profile. + returned: when state is present + type: list + sample: + - label1: test1 + - label2: test2 +tags: + description: A dictionary of resource tags. + returned: when state is present + type: dict + sample: + foo: bar + env: test +status: + description: status of the EKS Fargate Profile. + returned: when state is present + type: str + sample: + - CREATING + - ACTIVE +''' + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +try: + import botocore.exceptions +except ImportError: + pass + + +def validate_tags(client, module, fargate_profile): + changed = False + + try: + existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + except(botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) + + if tags_to_remove: + changed = True + if not module.check_mode: + try: + client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + + if tags_to_add: + changed = True + if not module.check_mode: + try: + client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + + return changed + + +def create_or_update_fargate_profile(client, module): + name = module.params.get('name') + subnets = module.params['subnets'] + role_arn = module.params['role_arn'] + cluster_name = module.params['cluster_name'] + selectors = module.params['selectors'] + tags = module.params['tags'] + wait = module.params.get('wait') + fargate_profile = get_fargate_profile(client, module, name, cluster_name) + + if fargate_profile: + changed = False + if set(fargate_profile['podExecutionRoleArn']) != set(role_arn): + module.fail_json(msg="Cannot modify Execution Role") + if set(fargate_profile['subnets']) != set(subnets): + module.fail_json(msg="Cannot modify Subnets") + if fargate_profile['selectors'] != selectors: + module.fail_json(msg="Cannot modify Selectors") + + changed = validate_tags(client, module, fargate_profile) + + if wait: + wait_until(client, module, 'fargate_profile_active', name, cluster_name) + fargate_profile = get_fargate_profile(client, module, name, cluster_name) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile)) + + if module.check_mode: + module.exit_json(changed=True) + + check_profiles_status(client, module, cluster_name) + + try: + params = dict(fargateProfileName=name, + podExecutionRoleArn=role_arn, + subnets=subnets, + clusterName=cluster_name, + selectors=selectors, + tags=tags + ) + fargate_profile = client.create_fargate_profile(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name) + + if wait: + wait_until(client, module, 'fargate_profile_active', name, cluster_name) + fargate_profile = get_fargate_profile(client, module, name, cluster_name) + + module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile)) + + +def delete_fargate_profile(client, module): + name = module.params.get('name') + cluster_name = module.params['cluster_name'] + existing = get_fargate_profile(client, module, name, cluster_name) + wait = module.params.get('wait') + if not existing or existing["status"] == "DELETING": + module.exit_json(changed=False) + + if not module.check_mode: + check_profiles_status(client, module, cluster_name) + try: + client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name) + + if wait: + wait_until(client, module, 'fargate_profile_deleted', name, cluster_name) + + module.exit_json(changed=True) + + +def get_fargate_profile(client, module, name, cluster_name): + try: + return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)['fargateProfile'] + except is_boto3_error_code('ResourceNotFoundException'): + return None + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get fargate profile") + + +# Check if any fargate profiles is in changing states, if so, wait for the end +def check_profiles_status(client, module, cluster_name): + try: + list_profiles = client.list_fargate_profiles(clusterName=cluster_name) + + for name in list_profiles["fargateProfileNames"]: + fargate_profile = get_fargate_profile(client, module, name, cluster_name) + if fargate_profile["status"] == 'CREATING': + wait_until(client, module, 'fargate_profile_active', fargate_profile["fargateProfileName"], cluster_name) + elif fargate_profile["status"] == 'DELETING': + wait_until(client, module, 'fargate_profile_deleted', fargate_profile["fargateProfileName"], cluster_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't not find EKS cluster") + + +def wait_until(client, module, waiter_name, name, cluster_name): + wait_timeout = module.params.get('wait_timeout') + waiter = get_waiter(client, waiter_name) + attempts = 1 + int(wait_timeout / waiter.config.delay) + try: + waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={'MaxAttempts': attempts}) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg="An error occurred waiting") + + +def main(): + argument_spec = dict( + name=dict(required=True), + cluster_name=dict(required=True), + role_arn=dict(), + subnets=dict(type='list', elements='str'), + selectors=dict(type='list', elements='dict', options=dict( + namespace=dict(type='str'), + labels=dict(type='dict', default={}) + )), + tags=dict(type='dict', default={}), + purge_tags=dict(type='bool', default=True), + state=dict(choices=['absent', 'present'], default='present'), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=1200, type='int') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['role_arn', 'subnets', 'selectors']]], + supports_check_mode=True, + ) + + try: + client = module.client('eks') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't connect to AWS") + + if module.params.get('state') == 'present': + create_or_update_fargate_profile(client, module) + else: + delete_fargate_profile(client, module) + + +if __name__ == '__main__': + main() From d8343ba5e9b98949c1d7489a2642cdbf0d495812 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 31 Mar 2022 11:17:18 +0200 Subject: [PATCH 408/683] incr version added (#1043) eks_fargate_profile: incr version added SUMMARY CI is failing for stable-3 branch NotImplementedError: Waiter fargate_profile_active could not be found for client . Possibly because the boto3 version in the main branch is higher as in the stable-3 branch. It's still possible to backport this for the next 3.3.0 release. The integration test must install a higher boto3 version. ISSUE TYPE Docs Pull Request COMPONENT NAME eks_fargate_profile Reviewed-by: Mark Chappell Reviewed-by: Mark Woolley --- eks_fargate_profile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 973b9497c2e..5ddad654d7f 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -9,7 +9,7 @@ DOCUMENTATION = r''' --- module: eks_fargate_profile -version_added: 3.2.0 +version_added: 4.0.0 short_description: Manage EKS Fargate Profile description: - Manage EKS Fargate Profile. From db1a928e04ceadb24b0e25780ef0de425b56746b Mon Sep 17 00:00:00 2001 From: Sebastien Rosset Date: Thu, 31 Mar 2022 03:47:24 -0700 Subject: [PATCH 409/683] Add support for tagging certificates. Fix deprecated tasks in aws_acm integration tests (#870) Add support for tagging certificates. Fix deprecated tasks in aws_acm integration tests SUMMARY This PR adds support for configuring arbitrary tags when importing a certificate using the aws_acm module. Previously, it was only possible to set the 'Name' tag. Additionally, this PR fixes issues with the aws_acm integration tests. The integration tests were using deprecated tasks or attributes, such as openssl_certificate. ISSUE TYPE Bugfix Pull Request COMPONENT NAME aws_acm ADDITIONAL INFORMATION Changes to the aws_acm.py module: Add new tags and purge_tags attributes. The certificate_arn attribute is now allowed when state='present'. A playbook should be allowed to modify an existing certificate entry by providing the ARN. For example, a play may want to add, modify, remove tags on an existing certificate. The aws_acm module returns the updated tags. See example below. Refactor aws_acm.py to improve code reuse and make it possible to set arbitrary tags. This should also help to 1) improve readability. 2) prepare for #869 which I am planning to work on next. Backwards-compatibility is retained, even though it might make sense to normalize some of the attributes. Example return value: "certificate": { "arn": "arn:aws:acm:us-west-1:account:certificate/f85abf9d-4bda-4dcc-98c3-770664a68243", "domain_name": "acm1.949058644.ansible.com", "tags": { "Application": "search", "Environment": "development", "Name": "ansible-test-78006277-398b5796f999_949058644_1" } } Integration tests: The openssl_certificate task is deprecated. Migrate to x509_certificate. The signature_algorithms attribute is no longer supported by the new x509_certificate task. Using selfsigned_digest instead. The integration tests for the aws_acm module pass locally. I see ansible/ansible#67788 has been closed, but tests/integration/targets/aws_acm/aliases still has unstable. I am not sure what to do about it. I was able to run the tests in my local workspace after making the above changes. Reviewed-by: Markus Bergholz Reviewed-by: Sebastien Rosset Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis --- aws_acm.py | 343 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 251 insertions(+), 92 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index d28301e9160..1125ead5036 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -79,15 +79,17 @@ certificate: description: - The body of the PEM encoded public certificate. - - Required when I(state) is not C(absent). + - Required when I(state) is not C(absent) and the certificate does not exist. - > If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')). type: str certificate_arn: description: - - The ARN of a certificate in ACM to delete - - Ignored when I(state=present). + - The ARN of a certificate in ACM to modify or delete. + - > + If I(state=present), the certificate with the specified ARN can be updated. + For example, this can be used to add/remove tags to an existing certificate. - > If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag). @@ -131,6 +133,9 @@ This is to ensure Ansible can treat certificates idempotently, even though the ACM API allows duplicate certificates. - If I(state=preset), this must be specified. + - > + If I(state=absent) and I(name_tag) is specified, + this task will delete all ACM certificates with this Name tag. - > If I(state=absent), you must provide exactly one of I(certificate_arn), I(domain_name) or I(name_tag). @@ -139,7 +144,7 @@ private_key: description: - The body of the PEM encoded private key. - - Required when I(state=present). + - Required when I(state=present) and the certificate does not exist. - Ignored when I(state=absent). - > If your private key is in a file, @@ -157,6 +162,26 @@ choices: [present, absent] default: present type: str + + tags: + description: + - Tags to apply to certificates imported in ACM. + - > + If both I(name_tag) and the 'Name' tag in I(tags) are set, + the values must be the same. + - > + If the 'Name' tag in I(tags) is not set and I(name_tag) is set, + the I(name_tag) value is copied to I(tags). + type: dict + version_added: 3.2.0 + + purge_tags: + description: + - whether to remove tags not present in the C(tags) parameter. + default: false + type: bool + version_added: 3.2.0 + author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: @@ -206,6 +231,14 @@ state: absent region: ap-southeast-2 +- name: add tags to an existing certificate with a particular ARN + community.aws.aws_acm: + certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" + tags: + Name: my_certificate + Application: search + Environment: development + purge_tags: true ''' RETURN = ''' @@ -234,11 +267,65 @@ ''' +import base64 +from copy import deepcopy +import re # regex library + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( + boto3_tag_list_to_ansible_dict, + ansible_dict_to_boto3_tag_list, +) from ansible.module_utils._text import to_text -import base64 -import re # regex library + + +def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): + if tags is None: + return (False, existing_tags) + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags) + changed = bool(tags_to_add or tags_to_remove) + if tags_to_add and not module.check_mode: + try: + client.add_tags_to_certificate( + CertificateArn=resource_arn, + Tags=ansible_dict_to_boto3_tag_list(tags_to_add), + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: + module.fail_json_aws( + e, "Couldn't add tags to certificate {0}".format(resource_arn) + ) + if tags_to_remove and not module.check_mode: + # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. + tags_list = [{'Key': key, 'Value': existing_tags.get(key)} for key in tags_to_remove] + try: + client.remove_tags_from_certificate( + CertificateArn=resource_arn, + Tags=tags_list, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: + module.fail_json_aws( + e, "Couldn't remove tags from certificate {0}".format(resource_arn) + ) + new_tags = deepcopy(existing_tags) + for key, value in tags_to_add.items(): + new_tags[key] = value + for key in tags_to_remove: + new_tags.pop(key, None) + return (changed, new_tags) # Takes in two text arguments @@ -293,6 +380,122 @@ def pem_chain_split(module, pem): return pem_arr +def update_imported_certificate(client, module, acm, old_cert, desired_tags): + """ + Update the existing certificate that was previously imported in ACM. + """ + module.debug("Existing certificate found in ACM") + if ('tags' not in old_cert) or ('Name' not in old_cert['tags']): + # shouldn't happen + module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) + if module.params.get('name_tag') is not None and (old_cert['tags']['Name'] != module.params.get('name_tag')): + # This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name', + # and the 'Name' tag in the AWS API does not match the ansible 'name_tag'. + module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert) + if 'certificate' not in old_cert: + # shouldn't happen + module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) + + cert_arn = None + # Are the existing certificate in ACM and the local certificate the same? + same = True + if module.params.get('certificate') is not None: + same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) + if module.params['certificate_chain']: + # Need to test this + # not sure if Amazon appends the cert itself to the chain when self-signed + same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) + else: + # When there is no chain with a cert + # it seems Amazon returns the cert itself as the chain + same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) + + if same: + module.debug("Existing certificate in ACM is the same") + cert_arn = old_cert['certificate_arn'] + changed = False + else: + absent_args = ['certificate', 'name_tag', 'private_key'] + if sum([(module.params[a] is not None) for a in absent_args]) < 3: + module.fail_json(msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.debug("Existing certificate in ACM is different, overwriting") + changed = True + if module.check_mode: + cert_arn = old_cert['certificate_arn'] + # note: returned domain will be the domain of the previous cert + else: + # update cert in ACM + cert_arn = acm.import_certificate( + client, + module, + certificate=module.params['certificate'], + private_key=module.params['private_key'], + certificate_chain=module.params['certificate_chain'], + arn=old_cert['certificate_arn'], + tags=desired_tags, + ) + return (changed, cert_arn) + + +def import_certificate(client, module, acm, desired_tags): + """ + Import a certificate to ACM. + """ + # Validate argument requirements + absent_args = ['certificate', 'name_tag', 'private_key'] + cert_arn = None + if sum([(module.params[a] is not None) for a in absent_args]) < 3: + module.fail_json(msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.debug("No certificate in ACM. Creating new one.") + changed = True + if module.check_mode: + domain = 'example.com' + module.exit_json(certificate=dict(domain_name=domain), changed=True) + else: + cert_arn = acm.import_certificate( + client, + module, + certificate=module.params['certificate'], + private_key=module.params['private_key'], + certificate_chain=module.params['certificate_chain'], + tags=desired_tags, + ) + return (changed, cert_arn) + + +def ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags): + cert_arn = None + changed = False + if len(certificates) > 1: + msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag'] + module.fail_json(msg=msg, certificates=certificates) + elif len(certificates) == 1: + # Update existing certificate that was previously imported to ACM. + (changed, cert_arn) = update_imported_certificate(client, module, acm, certificates[0], desired_tags) + else: # len(certificates) == 0 + # Import new certificate to ACM. + (changed, cert_arn) = import_certificate(client, module, acm, desired_tags) + + # Add/remove tags to/from certificate + try: + existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_certificate(CertificateArn=cert_arn)['Tags']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for certificate") + + purge_tags = module.params.get('purge_tags') + (c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags) + changed |= c + domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn) + module.exit_json(certificate=dict(domain_name=domain, arn=cert_arn, tags=new_tags), changed=changed) + + +def ensure_certificates_absent(client, module, acm, certificates): + for cert in certificates: + if not module.check_mode: + acm.delete_certificate(client, module, cert['certificate_arn']) + module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0)) + + def main(): argument_spec = dict( certificate=dict(), @@ -301,112 +504,68 @@ def main(): domain_name=dict(aliases=['domain']), name_tag=dict(aliases=['name']), private_key=dict(no_log=True), - state=dict(default='present', choices=['present', 'absent']) + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, ) - required_if = [ - ['state', 'present', ['certificate', 'name_tag', 'private_key']], - ] - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) acm = ACMServiceManager(module) # Check argument requirements if module.params['state'] == 'present': - if module.params['certificate_arn']: - module.fail_json(msg="Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'") + # at least one of these should be specified. + absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + if sum([(module.params[a] is not None) for a in absent_args]) < 1: + for a in absent_args: + module.debug("%s is %s" % (a, module.params[a])) + module.fail_json(msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") else: # absent # exactly one of these should be specified absent_args = ['certificate_arn', 'domain_name', 'name_tag'] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified") - - if module.params['name_tag']: - tags = dict(Name=module.params['name_tag']) - else: - tags = None + module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + + filter_tags = None + desired_tags = None + if module.params.get('tags') is not None: + desired_tags = module.params['tags'] + if module.params.get('name_tag') is not None: + # The module was originally implemented to filter certificates based on the 'Name' tag. + # Other tags are not used to filter certificates. + # It would make sense to replace the existing name_tag, domain, certificate_arn attributes + # with a 'filter' attribute, but that would break backwards-compatibility. + filter_tags = dict(Name=module.params['name_tag']) + if desired_tags is not None: + if 'Name' in desired_tags: + if desired_tags['Name'] != module.params['name_tag']: + module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'") + else: + desired_tags['Name'] = module.params['name_tag'] + else: + desired_tags = deepcopy(filter_tags) client = module.client('acm') # fetch the list of certificates currently in ACM - certificates = acm.get_certificates(client=client, - module=module, - domain_name=module.params['domain_name'], - arn=module.params['certificate_arn'], - only_tags=tags) + certificates = acm.get_certificates( + client=client, + module=module, + domain_name=module.params['domain_name'], + arn=module.params['certificate_arn'], + only_tags=filter_tags, + ) module.debug("Found %d corresponding certificates in ACM" % len(certificates)) - if module.params['state'] == 'present': - if len(certificates) > 1: - msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag'] - module.fail_json(msg=msg, certificates=certificates) - elif len(certificates) == 1: - # update the existing certificate - module.debug("Existing certificate found in ACM") - old_cert = certificates[0] # existing cert in ACM - if ('tags' not in old_cert) or ('Name' not in old_cert['tags']) or (old_cert['tags']['Name'] != module.params['name_tag']): - # shouldn't happen - module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) - - if 'certificate' not in old_cert: - # shouldn't happen - module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) - - # Are the existing certificate in ACM and the local certificate the same? - same = True - same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) - if module.params['certificate_chain']: - # Need to test this - # not sure if Amazon appends the cert itself to the chain when self-signed - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) - else: - # When there is no chain with a cert - # it seems Amazon returns the cert itself as the chain - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) - - if same: - module.debug("Existing certificate in ACM is the same, doing nothing") - domain = acm.get_domain_of_cert(client=client, module=module, arn=old_cert['certificate_arn']) - module.exit_json(certificate=dict(domain_name=domain, arn=old_cert['certificate_arn']), changed=False) - else: - module.debug("Existing certificate in ACM is different, overwriting") - - if module.check_mode: - arn = old_cert['certificate_arn'] - # note: returned domain will be the domain of the previous cert - else: - # update cert in ACM - arn = acm.import_certificate(client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], - arn=old_cert['certificate_arn'], - tags=tags) - domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) - module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) - else: # len(certificates) == 0 - module.debug("No certificate in ACM. Creating new one.") - if module.check_mode: - domain = 'example.com' - module.exit_json(certificate=dict(domain_name=domain), changed=True) - else: - arn = acm.import_certificate(client=client, - module=module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], - tags=tags) - domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) - - module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) + ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) else: # state == absent - for cert in certificates: - if not module.check_mode: - acm.delete_certificate(client, module, cert['certificate_arn']) - module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], - changed=(len(certificates) > 0)) + ensure_certificates_absent(client, module, acm, certificates) if __name__ == '__main__': From 90f39aea588963678d355c4081d972113afba498 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Fri, 1 Apr 2022 14:55:47 -0700 Subject: [PATCH 410/683] Add check_mode support to ec2_asg (#1033) Add check_mode support to ec2_asg SUMMARY Added check_mode support to ec2_asg. CI failure could be resolved by #1036 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_asg Reviewed-by: Markus Bergholz Reviewed-by: Jill R --- ec2_asg.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ec2_asg.py b/ec2_asg.py index fa91232cbe6..f95fb329ce5 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -1138,6 +1138,9 @@ def create_autoscaling_group(connection): ResourceType='auto-scaling-group', ResourceId=group_name)) if not as_groups: + if module.check_mode: + module.exit_json(changed=True, msg="Would have created AutoScalingGroup if not in check_mode.") + if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] @@ -1206,6 +1209,9 @@ def create_autoscaling_group(connection): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create Autoscaling Group.") else: + if module.check_mode: + module.exit_json(changed=True, msg="Would have modified AutoScalingGroup if required if not in check_mode.") + as_group = as_groups[0] initial_asg_properties = get_properties(as_group) changed = False @@ -1401,6 +1407,8 @@ def delete_autoscaling_group(connection): del_notification_config(connection, group_name, notification_topic) groups = describe_autoscaling_groups(connection, group_name) if groups: + if module.check_mode: + module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup if not in check_mode.") wait_timeout = time.time() + wait_timeout if not wait_for_instances: delete_asg(connection, group_name, force_delete=True) @@ -1456,6 +1464,7 @@ def replace(connection): min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') launch_config_name = module.params.get('launch_config_name') + # Required to maintain the default value being set to 'true' if launch_config_name: lc_check = module.params.get('lc_check') @@ -1891,6 +1900,7 @@ def main(): global module module = AnsibleAWSModule( argument_spec=argument_spec, + supports_check_mode=True, mutually_exclusive=[ ['replace_all_instances', 'replace_instances'], ['replace_all_instances', 'detach_instances'], From b2deb1cfe2547172bfb8473ca612f97494728f68 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Apr 2022 15:06:38 +0200 Subject: [PATCH 411/683] networkfirewall_rule_group: Add support for "wait" parameter (#1037) networkfirewall_rule_group: Add support for "wait" parameter SUMMARY Add support for 'wait' to networkfirewall_rule_group to speed up the integration tests a little. Note: Module not available in a release yet, so no changelog required. ISSUE TYPE Feature Pull Request COMPONENT NAME networkfirewall_rule_group ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- networkfirewall_rule_group.py | 540 +++++++++++++++-------------- networkfirewall_rule_group_info.py | 2 +- 2 files changed, 283 insertions(+), 259 deletions(-) diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py index 116c5249b3f..a0898b30884 100644 --- a/networkfirewall_rule_group.py +++ b/networkfirewall_rule_group.py @@ -262,6 +262,21 @@ type: bool required: false default: True + wait: + description: + - Whether to wait for the firewall rule group to reach the + C(ACTIVE) or C(DELETED) state before the module returns. + type: bool + required: false + default: true + wait_timeout: + description: + - Maximum time, in seconds, to wait for the firewall rule group + to reach the expected state. + - Defaults to 600 seconds. + type: int + required: false + author: Mark Chappell (@tremble) extends_documentation_fragment: @@ -401,304 +416,309 @@ type: dict returned: success contains: - rule_variables: - description: Settings that are available for use in the rules in the rule group. - returned: When rule variables are attached to the rule group. - type: complex - contains: - ip_sets: - description: A dictionary mapping variable names to IP addresses in CIDR format. - returned: success - type: dict - example: ['192.0.2.0/24'] - port_sets: - description: A dictionary mapping variable names to ports - returned: success - type: dict - example: ['42'] - stateful_rule_options: - description: Additional options governing how Network Firewall handles stateful rules. - returned: When the rule group is either "rules string" or "rules list" based. + rule_group: + description: Details of the rules in the rule group type: dict - contains: - rule_order: - description: The order in which rules will be evaluated. - returned: success - type: str - example: 'DEFAULT_ACTION_ORDER' - rules_source: - description: Inspection criteria used for a 5-tuple based rule group. returned: success - type: dict contains: - stateful_rules: - description: A list of dictionaries describing the rules that the rule group is comprised of. - returned: When the rule group is "rules list" based. - type: list - elements: dict + rule_variables: + description: Settings that are available for use in the rules in the rule group. + returned: When rule variables are attached to the rule group. + type: complex contains: - action: - description: What action to perform when a flow matches the rule criteria. - returned: success - type: str - example: 'PASS' - header: - description: A description of the criteria used for the rule. + ip_sets: + description: A dictionary mapping variable names to IP addresses in CIDR format. returned: success type: dict - contains: - protocol: - description: The protocol to inspect for. - returned: success - type: str - example: 'IP' - source: - description: The source address or range of addresses to inspect for. - returned: success - type: str - example: '203.0.113.98' - source_port: - description: The source port to inspect for. - returned: success - type: str - example: '42' - destination: - description: The destination address or range of addresses to inspect for. - returned: success - type: str - example: '198.51.100.0/24' - destination_port: - description: The destination port to inspect for. - returned: success - type: str - example: '6666:6667' - direction: - description: The direction of traffic flow to inspect. - returned: success - type: str - example: 'FORWARD' - rule_options: - description: Additional Suricata RuleOptions settings for the rule. + example: ['192.0.2.0/24'] + port_sets: + description: A dictionary mapping variable names to ports returned: success - type: list - elements: dict - contains: - keyword: - description: The keyword for the setting. - returned: success - type: str - example: 'sid:1' - settings: - description: A list of values passed to the setting. - returned: When values are available - type: list - elements: str - rules_string: - description: A string describing the rules that the rule group is comprised of. - returned: When the rule group is "rules string" based. - type: str - rules_source_list: - description: A description of the criteria for a domain list rule group. - returned: When the rule group is "domain list" based. + type: dict + example: ['42'] + stateful_rule_options: + description: Additional options governing how Network Firewall handles stateful rules. + returned: When the rule group is either "rules string" or "rules list" based. type: dict contains: - targets: - description: A list of domain names to be inspected for. - returned: success - type: list - elements: str - example: ['abc.example.com', '.example.net'] - target_types: - description: The protocols to be inspected by the rule group. - returned: success - type: list - elements: str - example: ['TLS_SNI', 'HTTP_HOST'] - generated_rules_type: - description: Whether the rule group allows or denies access to the domains in the list. + rule_order: + description: The order in which rules will be evaluated. returned: success type: str - example: 'ALLOWLIST' - stateless_rules_and_custom_actions: - description: A description of the criteria for a stateless rule group. - returned: When the rule group is a stateless rule group. + example: 'DEFAULT_ACTION_ORDER' + rules_source: + description: Inspection criteria used for a 5-tuple based rule group. + returned: success type: dict contains: - stateless_rules: - description: A list of stateless rules for use in a stateless rule group. + stateful_rules: + description: A list of dictionaries describing the rules that the rule group is comprised of. + returned: When the rule group is "rules list" based. type: list elements: dict contains: - rule_definition: - description: Describes the stateless 5-tuple inspection criteria and actions for the rule. + action: + description: What action to perform when a flow matches the rule criteria. + returned: success + type: str + example: 'PASS' + header: + description: A description of the criteria used for the rule. returned: success type: dict contains: - match_attributes: - description: Describes the stateless 5-tuple inspection criteria for the rule. + protocol: + description: The protocol to inspect for. + returned: success + type: str + example: 'IP' + source: + description: The source address or range of addresses to inspect for. + returned: success + type: str + example: '203.0.113.98' + source_port: + description: The source port to inspect for. + returned: success + type: str + example: '42' + destination: + description: The destination address or range of addresses to inspect for. + returned: success + type: str + example: '198.51.100.0/24' + destination_port: + description: The destination port to inspect for. + returned: success + type: str + example: '6666:6667' + direction: + description: The direction of traffic flow to inspect. + returned: success + type: str + example: 'FORWARD' + rule_options: + description: Additional Suricata RuleOptions settings for the rule. + returned: success + type: list + elements: dict + contains: + keyword: + description: The keyword for the setting. + returned: success + type: str + example: 'sid:1' + settings: + description: A list of values passed to the setting. + returned: When values are available + type: list + elements: str + rules_string: + description: A string describing the rules that the rule group is comprised of. + returned: When the rule group is "rules string" based. + type: str + rules_source_list: + description: A description of the criteria for a domain list rule group. + returned: When the rule group is "domain list" based. + type: dict + contains: + targets: + description: A list of domain names to be inspected for. + returned: success + type: list + elements: str + example: ['abc.example.com', '.example.net'] + target_types: + description: The protocols to be inspected by the rule group. + returned: success + type: list + elements: str + example: ['TLS_SNI', 'HTTP_HOST'] + generated_rules_type: + description: Whether the rule group allows or denies access to the domains in the list. + returned: success + type: str + example: 'ALLOWLIST' + stateless_rules_and_custom_actions: + description: A description of the criteria for a stateless rule group. + returned: When the rule group is a stateless rule group. + type: dict + contains: + stateless_rules: + description: A list of stateless rules for use in a stateless rule group. + type: list + elements: dict + contains: + rule_definition: + description: Describes the stateless 5-tuple inspection criteria and actions for the rule. returned: success type: dict contains: - sources: - description: The source IP addresses and address ranges to inspect for. - returned: success - type: list - elements: dict - contains: - address_definition: - description: An IP address or a block of IP addresses in CIDR notation. - returned: success - type: str - example: '192.0.2.3' - destinations: - description: The destination IP addresses and address ranges to inspect for. - returned: success - type: list - elements: dict - contains: - address_definition: - description: An IP address or a block of IP addresses in CIDR notation. - returned: success - type: str - example: '192.0.2.3' - source_ports: - description: The source port ranges to inspect for. + match_attributes: + description: Describes the stateless 5-tuple inspection criteria for the rule. returned: success - type: list - elements: dict + type: dict contains: - from_port: - description: The lower limit of the port range. + sources: + description: The source IP addresses and address ranges to inspect for. returned: success - type: int - to_port: - description: The upper limit of the port range. + type: list + elements: dict + contains: + address_definition: + description: An IP address or a block of IP addresses in CIDR notation. + returned: success + type: str + example: '192.0.2.3' + destinations: + description: The destination IP addresses and address ranges to inspect for. returned: success - type: int - destination_ports: - description: The destination port ranges to inspect for. - returned: success - type: list - elements: dict - contains: - from_port: - description: The lower limit of the port range. + type: list + elements: dict + contains: + address_definition: + description: An IP address or a block of IP addresses in CIDR notation. + returned: success + type: str + example: '192.0.2.3' + source_ports: + description: The source port ranges to inspect for. returned: success - type: int - to_port: - description: The upper limit of the port range. + type: list + elements: dict + contains: + from_port: + description: The lower limit of the port range. + returned: success + type: int + to_port: + description: The upper limit of the port range. + returned: success + type: int + destination_ports: + description: The destination port ranges to inspect for. returned: success - type: int - protocols: - description: The IANA protocol numbers of the protocols to inspect for. - returned: success - type: list - elements: int - example: [6] - tcp_flags: - description: The TCP flags and masks to inspect for. - returned: success - type: list - elements: dict - contains: - flags: - description: Used with masks to define the TCP flags that flows are inspected for. + type: list + elements: dict + contains: + from_port: + description: The lower limit of the port range. + returned: success + type: int + to_port: + description: The upper limit of the port range. + returned: success + type: int + protocols: + description: The IANA protocol numbers of the protocols to inspect for. returned: success type: list - elements: str - masks: - description: The set of flags considered during inspection. + elements: int + example: [6] + tcp_flags: + description: The TCP flags and masks to inspect for. returned: success type: list - elements: str - actions: - description: The actions to take when a flow matches the rule. + elements: dict + contains: + flags: + description: Used with masks to define the TCP flags that flows are inspected for. + returned: success + type: list + elements: str + masks: + description: The set of flags considered during inspection. + returned: success + type: list + elements: str + actions: + description: The actions to take when a flow matches the rule. + returned: success + type: list + elements: str + example: ['aws:pass', 'CustomActionName'] + priority: + description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. returned: success - type: list - elements: str - example: ['aws:pass', 'CustomActionName'] - priority: - description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. - returned: success - type: int - custom_actions: - description: A list of individual custom action definitions that are available for use in stateless rules. - type: list - elements: dict - contains: - action_name: - description: The name for the custom action. - returned: success - type: str - action_definition: - description: The custom action associated with the action name. - returned: success - type: dict + type: int + custom_actions: + description: A list of individual custom action definitions that are available for use in stateless rules. + type: list + elements: dict contains: - publish_metric_action: - description: The description of an action which publishes to CloudWatch. - returned: When the action publishes to CloudWatch. + action_name: + description: The name for the custom action. + returned: success + type: str + action_definition: + description: The custom action associated with the action name. + returned: success type: dict contains: - dimensions: - description: The value to use in an Amazon CloudWatch custom metric dimension. - returned: success - type: list - elements: dict + publish_metric_action: + description: The description of an action which publishes to CloudWatch. + returned: When the action publishes to CloudWatch. + type: dict contains: - value: - description: The value to use in the custom metric dimension. + dimensions: + description: The value to use in an Amazon CloudWatch custom metric dimension. returned: success - type: str -rule_group_metadata: - description: Details of the rules in the rule group - type: dict - returned: success - contains: - capacity: - description: The maximum operating resources that this rule group can use. - type: int - returned: success - consumed_capacity: - description: The number of capacity units currently consumed by the rule group rules. - type: int - returned: success - description: - description: A description of the rule group. - type: str - returned: success - number_of_associations: - description: The number of firewall policies that use this rule group. - type: int - returned: success - rule_group_arn: - description: The ARN for the rule group - type: int - returned: success - example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup' - rule_group_id: - description: A unique identifier for the rule group. - type: int - returned: success - example: '12345678-abcd-1234-abcd-123456789abc' - rule_group_name: - description: The name of the rule group. - type: str - returned: success - rule_group_status: - description: The current status of a rule group. - type: str - returned: success - example: 'DELETING' - tags: - description: A dictionary representing the tags associated with the rule group. + type: list + elements: dict + contains: + value: + description: The value to use in the custom metric dimension. + returned: success + type: str + rule_group_metadata: + description: Details of the rules in the rule group type: dict returned: success - type: - description: Whether the rule group is stateless or stateful. - type: str - returned: success - example: 'STATEFUL' + contains: + capacity: + description: The maximum operating resources that this rule group can use. + type: int + returned: success + consumed_capacity: + description: The number of capacity units currently consumed by the rule group rules. + type: int + returned: success + description: + description: A description of the rule group. + type: str + returned: success + number_of_associations: + description: The number of firewall policies that use this rule group. + type: int + returned: success + rule_group_arn: + description: The ARN for the rule group + type: int + returned: success + example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup' + rule_group_id: + description: A unique identifier for the rule group. + type: int + returned: success + example: '12345678-abcd-1234-abcd-123456789abc' + rule_group_name: + description: The name of the rule group. + type: str + returned: success + rule_group_status: + description: The current status of a rule group. + type: str + returned: success + example: 'DELETING' + tags: + description: A dictionary representing the tags associated with the rule group. + type: dict + returned: success + type: + description: Whether the rule group is stateless or stateful. + type: str + returned: success + example: 'STATEFUL' ''' @@ -746,6 +766,8 @@ def main(): rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False), tags=dict(type='dict', required=False), purge_tags=dict(type='bool', required=False, default=True), + wait=dict(type='bool', required=False, default=True), + wait_timeout=dict(type='int', required=False), ) module = AnsibleAWSModule( @@ -785,6 +807,8 @@ def main(): module.require_botocore_at_least('1.23.23', reason='to set the rule order') manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type) + manager.set_wait(module.params.get('wait', None)) + manager.set_wait_timeout(module.params.get('wait_timeout', None)) if state == 'absent': manager.delete() diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py index ae9f43bd28b..37cb6d2b0c1 100644 --- a/networkfirewall_rule_group_info.py +++ b/networkfirewall_rule_group_info.py @@ -378,7 +378,7 @@ returned: success example: 'DELETING' tags: - description: A dcitionary representing the tags associated with the rule group. + description: A dictionary representing the tags associated with the rule group. type: dict returned: success type: From e206284f0c3d3c4f377cf97a401c0c7c4142f8bf Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 5 Apr 2022 15:20:00 +0200 Subject: [PATCH 412/683] While migrating ec2_vpc_route_table and ec2_vpc_route_table_info (https://github.com/ansible-collections/community.aws/pull/672) (#987) Delete ec2_vpc_route_table* since already promoted While migrating ec2_vpc_route_table and ec2_vpc_route_table_info (#672) from community.aws to amazon.aws, we forget to delete the modules from the community.aws collection after the promotion. As of now, the modules within community.aws have not received any new updates, so we can remove them without backporting anything. SUMMARY ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Joseph Torcasso Reviewed-by: Jill R --- ec2_vpc_route_table.py | 722 ------------------------------------ ec2_vpc_route_table_info.py | 279 -------------- 2 files changed, 1001 deletions(-) delete mode 100644 ec2_vpc_route_table.py delete mode 100644 ec2_vpc_route_table_info.py diff --git a/ec2_vpc_route_table.py b/ec2_vpc_route_table.py deleted file mode 100644 index afc3487110a..00000000000 --- a/ec2_vpc_route_table.py +++ /dev/null @@ -1,722 +0,0 @@ -#!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_vpc_route_table -version_added: 1.0.0 -short_description: Manage route tables for AWS virtual private clouds -description: - - Manage route tables for AWS virtual private clouds -author: -- Robert Estelle (@erydo) -- Rob White (@wimnat) -- Will Thames (@willthames) -options: - lookup: - description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. - If no tags are specified then no lookup for an existing route table is performed and a new - route table will be created. To change tags of a route table you must look up by id. - default: tag - choices: [ 'tag', 'id' ] - type: str - propagating_vgw_ids: - description: Enable route propagation from virtual gateways specified by ID. - type: list - elements: str - purge_routes: - description: Purge existing routes that are not found in routes. - type: bool - default: 'yes' - purge_subnets: - description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied. - default: 'true' - type: bool - purge_tags: - description: Purge existing tags that are not found in route table. - type: bool - default: 'no' - route_table_id: - description: - - The ID of the route table to update or delete. - - Required when I(lookup=id). - type: str - routes: - description: List of routes in the route table. - Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', - 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'. - If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. - Routes are required for present states. - type: list - elements: dict - state: - description: Create or destroy the VPC route table. - default: present - choices: [ 'present', 'absent' ] - type: str - subnets: - description: An array of subnets to add to this route table. Subnets may be specified - by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. - type: list - elements: str - tags: - description: > - A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 }). Tags are - used to uniquely identify route tables within a VPC when the route_table_id is not supplied. - aliases: [ "resource_tags" ] - type: dict - vpc_id: - description: - - VPC ID of the VPC in which to create the route table. - - Required when I(state=present) or I(lookup=tag). - type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Basic creation example: -- name: Set up public subnet route table - community.aws.ec2_vpc_route_table: - vpc_id: vpc-1245678 - region: us-west-1 - tags: - Name: Public - subnets: - - "{{ jumpbox_subnet.subnet.id }}" - - "{{ frontend_subnet.subnet.id }}" - - "{{ vpn_subnet.subnet_id }}" - routes: - - dest: 0.0.0.0/0 - gateway_id: "{{ igw.gateway_id }}" - register: public_route_table - -- name: Set up NAT-protected route table - community.aws.ec2_vpc_route_table: - vpc_id: vpc-1245678 - region: us-west-1 - tags: - Name: Internal - subnets: - - "{{ application_subnet.subnet.id }}" - - 'Database Subnet' - - '10.0.0.0/8' - routes: - - dest: 0.0.0.0/0 - instance_id: "{{ nat.instance_id }}" - register: nat_route_table - -- name: delete route table - community.aws.ec2_vpc_route_table: - vpc_id: vpc-1245678 - region: us-west-1 - route_table_id: "{{ route_table.id }}" - lookup: id - state: absent -''' - -RETURN = r''' -route_table: - description: Route Table result - returned: always - type: complex - contains: - associations: - description: List of subnets associated with the route table - returned: always - type: complex - contains: - main: - description: Whether this is the main route table - returned: always - type: bool - sample: false - route_table_association_id: - description: ID of association between route table and subnet - returned: always - type: str - sample: rtbassoc-ab47cfc3 - route_table_id: - description: ID of the route table - returned: always - type: str - sample: rtb-bf779ed7 - subnet_id: - description: ID of the subnet - returned: always - type: str - sample: subnet-82055af9 - id: - description: ID of the route table (same as route_table_id for backwards compatibility) - returned: always - type: str - sample: rtb-bf779ed7 - propagating_vgws: - description: List of Virtual Private Gateways propagating routes - returned: always - type: list - sample: [] - route_table_id: - description: ID of the route table - returned: always - type: str - sample: rtb-bf779ed7 - routes: - description: List of routes in the route table - returned: always - type: complex - contains: - destination_cidr_block: - description: CIDR block of destination - returned: always - type: str - sample: 10.228.228.0/22 - gateway_id: - description: ID of the gateway - returned: when gateway is local or internet gateway - type: str - sample: local - instance_id: - description: ID of a NAT instance - returned: when the route is via an EC2 instance - type: str - sample: i-abcd123456789 - instance_owner_id: - description: AWS account owning the NAT instance - returned: when the route is via an EC2 instance - type: str - sample: 123456789012 - nat_gateway_id: - description: ID of the NAT gateway - returned: when the route is via a NAT gateway - type: str - sample: local - origin: - description: mechanism through which the route is in the table - returned: always - type: str - sample: CreateRouteTable - state: - description: state of the route - returned: always - type: str - sample: active - tags: - description: Tags applied to the route table - returned: always - type: dict - sample: - Name: Public route table - Public: 'true' - vpc_id: - description: ID for the VPC in which the route lives - returned: always - type: str - sample: vpc-6e2d2407 -''' - -import re -from time import sleep - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - - -@AWSRetry.jittered_backoff() -def describe_subnets_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_subnets') - return paginator.paginate(**params).build_full_result()['Subnets'] - - -@AWSRetry.jittered_backoff() -def describe_igws_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_internet_gateways') - return paginator.paginate(**params).build_full_result()['InternetGateways'] - - -@AWSRetry.jittered_backoff() -def describe_route_tables_with_backoff(connection, **params): - try: - paginator = connection.get_paginator('describe_route_tables') - return paginator.paginate(**params).build_full_result()['RouteTables'] - except is_boto3_error_code('InvalidRouteTableID.NotFound'): - return None - - -def find_subnets(connection, module, vpc_id, identified_subnets): - """ - Finds a list of subnets, each identified either by a raw ID, a unique - 'Name' tag, or a CIDR such as 10.0.0.0/8. - """ - CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') - SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') - - subnet_ids = [] - subnet_names = [] - subnet_cidrs = [] - for subnet in (identified_subnets or []): - if re.match(SUBNET_RE, subnet): - subnet_ids.append(subnet) - elif re.match(CIDR_RE, subnet): - subnet_cidrs.append(subnet) - else: - subnet_names.append(subnet) - - subnets_by_id = [] - if subnet_ids: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) - try: - subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) - - subnets_by_cidr = [] - if subnet_cidrs: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs}) - try: - subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) - - subnets_by_name = [] - if subnet_names: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names}) - try: - subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) - - for name in subnet_names: - matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name]) - if matching_count == 0: - module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) - elif matching_count > 1: - module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) - - return subnets_by_id + subnets_by_cidr + subnets_by_name - - -def find_igw(connection, module, vpc_id): - """ - Finds the Internet gateway for the given VPC ID. - """ - filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) - try: - igw = describe_igws_with_backoff(connection, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) - if len(igw) == 1: - return igw[0]['InternetGatewayId'] - elif len(igw) == 0: - module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id)) - else: - module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) - - -def tags_match(match_tags, candidate_tags): - return all((k in candidate_tags and candidate_tags[k] == v - for k, v in match_tags.items())) - - -def get_route_table_by_id(connection, module, route_table_id): - - route_table = None - try: - route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get route table") - if route_tables: - route_table = route_tables[0] - - return route_table - - -def get_route_table_by_tags(connection, module, vpc_id, tags): - count = 0 - route_table = None - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) - try: - route_tables = describe_route_tables_with_backoff(connection, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get route table") - for table in route_tables: - this_tags = describe_ec2_tags(connection, module, table['RouteTableId']) - if tags_match(tags, this_tags): - route_table = table - count += 1 - - if count > 1: - module.fail_json(msg="Tags provided do not identify a unique route table") - else: - return route_table - - -def route_spec_matches_route(route_spec, route): - if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']: - route_spec['NatGatewayId'] = route_spec.pop('GatewayId') - if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']: - if route_spec.get('DestinationCidrBlock', '').startswith('pl-'): - route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock') - - return set(route_spec.items()).issubset(route.items()) - - -def route_spec_matches_route_cidr(route_spec, route): - return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock') - - -def rename_key(d, old_key, new_key): - d[new_key] = d.pop(old_key) - - -def index_of_matching_route(route_spec, routes_to_match): - for i, route in enumerate(routes_to_match): - if route_spec_matches_route(route_spec, route): - return "exact", i - elif 'Origin' in route_spec and route_spec['Origin'] != 'EnableVgwRoutePropagation': - if route_spec_matches_route_cidr(route_spec, route): - return "replace", i - - -def ensure_routes(connection=None, module=None, route_table=None, route_specs=None, - propagating_vgw_ids=None, check_mode=None, purge_routes=None): - routes_to_match = list(route_table['Routes']) - route_specs_to_create = [] - route_specs_to_recreate = [] - for route_spec in route_specs: - match = index_of_matching_route(route_spec, routes_to_match) - if match is None: - if route_spec.get('DestinationCidrBlock'): - route_specs_to_create.append(route_spec) - else: - module.warn("Skipping creating {0} because it has no destination cidr block. " - "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec)) - else: - if match[0] == "replace": - if route_spec.get('DestinationCidrBlock'): - route_specs_to_recreate.append(route_spec) - else: - module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec)) - del routes_to_match[match[1]] - - routes_to_delete = [] - if purge_routes: - for r in routes_to_match: - if not r.get('DestinationCidrBlock'): - module.warn("Skipping purging route {0} because it has no destination cidr block. " - "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r)) - continue - if r['Origin'] == 'CreateRoute': - routes_to_delete.append(r) - - changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate) - if changed and not check_mode: - for route in routes_to_delete: - try: - connection.delete_route( - aws_retry=True, - RouteTableId=route_table['RouteTableId'], - DestinationCidrBlock=route['DestinationCidrBlock']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete route") - - for route_spec in route_specs_to_recreate: - try: - connection.replace_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't recreate route") - - for route_spec in route_specs_to_create: - try: - connection.create_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) - except is_boto3_error_code('RouteAlreadyExists'): - changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create route") - - return {'changed': bool(changed)} - - -def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None, - check_mode=None): - filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id}) - try: - route_tables = describe_route_tables_with_backoff(connection, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get route tables") - for route_table in route_tables: - if route_table['RouteTableId'] is None: - continue - for a in route_table['Associations']: - if a['Main']: - continue - if a['SubnetId'] == subnet_id: - if route_table['RouteTableId'] == route_table_id: - return {'changed': False, 'association_id': a['RouteTableAssociationId']} - else: - if check_mode: - return {'changed': True} - try: - connection.disassociate_route_table( - aws_retry=True, AssociationId=a['RouteTableAssociationId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") - - try: - association_id = connection.associate_route_table(aws_retry=True, - RouteTableId=route_table_id, - SubnetId=subnet_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't associate subnet with route table") - return {'changed': True, 'association_id': association_id} - - -def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None, - check_mode=None, purge_subnets=None): - current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']] - new_association_ids = [] - changed = False - for subnet in subnets: - result = ensure_subnet_association( - connection=connection, module=module, vpc_id=route_table['VpcId'], - route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], - check_mode=check_mode) - changed = changed or result['changed'] - if changed and check_mode: - return {'changed': True} - new_association_ids.append(result['association_id']) - - if purge_subnets: - to_delete = [a_id for a_id in current_association_ids - if a_id not in new_association_ids] - - for a_id in to_delete: - changed = True - if not check_mode: - try: - connection.disassociate_route_table(aws_retry=True, AssociationId=a_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") - - return {'changed': changed} - - -def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None, - check_mode=None): - changed = False - gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']] - to_add = set(propagating_vgw_ids) - set(gateways) - if to_add: - changed = True - if not check_mode: - for vgw_id in to_add: - try: - connection.enable_vgw_route_propagation( - aws_retry=True, - RouteTableId=route_table['RouteTableId'], - GatewayId=vgw_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't enable route propagation") - - return {'changed': changed} - - -def ensure_route_table_absent(connection, module): - - lookup = module.params.get('lookup') - route_table_id = module.params.get('route_table_id') - tags = module.params.get('tags') - vpc_id = module.params.get('vpc_id') - purge_subnets = module.params.get('purge_subnets') - - if lookup == 'tag': - if tags is not None: - route_table = get_route_table_by_tags(connection, module, vpc_id, tags) - else: - route_table = None - elif lookup == 'id': - route_table = get_route_table_by_id(connection, module, route_table_id) - - if route_table is None: - return {'changed': False} - - # disassociate subnets before deleting route table - if not module.check_mode: - ensure_subnet_associations(connection=connection, module=module, route_table=route_table, - subnets=[], check_mode=False, purge_subnets=purge_subnets) - try: - connection.delete_route_table(aws_retry=True, RouteTableId=route_table['RouteTableId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error deleting route table") - - return {'changed': True} - - -def get_route_table_info(connection, module, route_table): - result = get_route_table_by_id(connection, module, route_table['RouteTableId']) - try: - result['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get tags for route table") - result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) - # backwards compatibility - result['id'] = result['route_table_id'] - return result - - -def create_route_spec(connection, module, vpc_id): - routes = module.params.get('routes') - - for route_spec in routes: - rename_key(route_spec, 'dest', 'destination_cidr_block') - - if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': - igw = find_igw(connection, module, vpc_id) - route_spec['gateway_id'] = igw - if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): - rename_key(route_spec, 'gateway_id', 'nat_gateway_id') - - return snake_dict_to_camel_dict(routes, capitalize_first=True) - - -def ensure_route_table_present(connection, module): - - lookup = module.params.get('lookup') - propagating_vgw_ids = module.params.get('propagating_vgw_ids') - purge_routes = module.params.get('purge_routes') - purge_subnets = module.params.get('purge_subnets') - purge_tags = module.params.get('purge_tags') - route_table_id = module.params.get('route_table_id') - subnets = module.params.get('subnets') - tags = module.params.get('tags') - vpc_id = module.params.get('vpc_id') - routes = create_route_spec(connection, module, vpc_id) - - changed = False - tags_valid = False - - if lookup == 'tag': - if tags is not None: - try: - route_table = get_route_table_by_tags(connection, module, vpc_id, tags) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'") - else: - route_table = None - elif lookup == 'id': - try: - route_table = get_route_table_by_id(connection, module, route_table_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error finding route table with lookup 'id'") - - # If no route table returned then create new route table - if route_table is None: - changed = True - if not module.check_mode: - try: - route_table = connection.create_route_table(aws_retry=True, VpcId=vpc_id)['RouteTable'] - # try to wait for route table to be present before moving on - get_waiter( - connection, 'route_table_exists' - ).wait( - RouteTableIds=[route_table['RouteTableId']], - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout waiting for route table creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error creating route table") - else: - route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id} - module.exit_json(changed=changed, route_table=route_table) - - if routes is not None: - result = ensure_routes(connection=connection, module=module, route_table=route_table, - route_specs=routes, propagating_vgw_ids=propagating_vgw_ids, - check_mode=module.check_mode, purge_routes=purge_routes) - changed = changed or result['changed'] - - if propagating_vgw_ids is not None: - result = ensure_propagation(connection=connection, module=module, route_table=route_table, - propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode) - changed = changed or result['changed'] - - if not tags_valid and tags is not None: - changed |= ensure_ec2_tags(connection, module, route_table['RouteTableId'], - tags=tags, purge_tags=purge_tags, - retry_codes=['InvalidRouteTableID.NotFound']) - route_table['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) - - if subnets is not None: - associated_subnets = find_subnets(connection, module, vpc_id, subnets) - - result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table, - subnets=associated_subnets, check_mode=module.check_mode, - purge_subnets=purge_subnets) - changed = changed or result['changed'] - - if changed: - # pause to allow route table routes/subnets/associations to be updated before exiting with final state - sleep(5) - module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table)) - - -def main(): - argument_spec = dict( - lookup=dict(default='tag', choices=['tag', 'id']), - propagating_vgw_ids=dict(type='list', elements='str'), - purge_routes=dict(default=True, type='bool'), - purge_subnets=dict(default=True, type='bool'), - purge_tags=dict(default=False, type='bool'), - route_table_id=dict(), - routes=dict(default=[], type='list', elements='dict'), - state=dict(default='present', choices=['present', 'absent']), - subnets=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), - vpc_id=dict() - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['lookup', 'id', ['route_table_id']], - ['lookup', 'tag', ['vpc_id']], - ['state', 'present', ['vpc_id']]], - supports_check_mode=True) - - # The tests for RouteTable existing uses its own decorator, we can safely - # retry on InvalidRouteTableID.NotFound - retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['InvalidRouteTableID.NotFound']) - connection = module.client('ec2', retry_decorator=retry_decorator) - - state = module.params.get('state') - - if state == 'present': - result = ensure_route_table_present(connection, module) - elif state == 'absent': - result = ensure_route_table_absent(connection, module) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ec2_vpc_route_table_info.py b/ec2_vpc_route_table_info.py deleted file mode 100644 index a84245d47ee..00000000000 --- a/ec2_vpc_route_table_info.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ec2_vpc_route_table_info -version_added: 1.0.0 -short_description: Gather information about ec2 VPC route tables in AWS -description: - - Gather information about ec2 VPC route tables in AWS -author: -- "Rob White (@wimnat)" -- "Mark Chappell (@tremble)" -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. - type: dict -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all VPC route tables - community.aws.ec2_vpc_route_table_info: - -- name: Gather information about a particular VPC route table using route table ID - community.aws.ec2_vpc_route_table_info: - filters: - route-table-id: rtb-00112233 - -- name: Gather information about any VPC route table with a tag key Name and value Example - community.aws.ec2_vpc_route_table_info: - filters: - "tag:Name": Example - -- name: Gather information about any VPC route table within VPC with ID vpc-abcdef00 - community.aws.ec2_vpc_route_table_info: - filters: - vpc-id: vpc-abcdef00 -''' - -RETURN = r''' -route_tables: - description: - - A list of dictionarys describing route tables - - See also U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_route_tables) - returned: always - type: complex - contains: - associations: - description: List of subnets associated with the route table - returned: always - type: complex - contains: - main: - description: Whether this is the main route table - returned: always - type: bool - sample: false - id: - description: ID of association between route table and subnet - returned: always - type: str - sample: rtbassoc-ab47cfc3 - route_table_association_id: - description: ID of association between route table and subnet - returned: always - type: str - sample: rtbassoc-ab47cfc3 - route_table_id: - description: ID of the route table - returned: always - type: str - sample: rtb-bf779ed7 - subnet_id: - description: ID of the subnet - returned: always - type: str - sample: subnet-82055af9 - association_state: - description: The state of the association - returned: always - type: complex - contains: - state: - description: The state of the association - returned: always - type: str - sample: associated - state_message: - description: Additional information about the state of the association - returned: when available - type: str - sample: 'Creating association' - id: - description: ID of the route table (same as route_table_id for backwards compatibility) - returned: always - type: str - sample: rtb-bf779ed7 - owner_id: - description: ID of the account which owns the route table - returned: always - type: str - sample: '012345678912' - propagating_vgws: - description: List of Virtual Private Gateways propagating routes - returned: always - type: list - sample: [] - route_table_id: - description: ID of the route table - returned: always - type: str - sample: rtb-bf779ed7 - routes: - description: List of routes in the route table - returned: always - type: complex - contains: - destination_cidr_block: - description: CIDR block of destination - returned: always - type: str - sample: 10.228.228.0/22 - gateway_id: - description: ID of the gateway - returned: when gateway is local or internet gateway - type: str - sample: local - instance_id: - description: - - ID of a NAT instance. - - Empty unless the route is via an EC2 instance - returned: always - type: str - sample: i-abcd123456789 - instance_owner_id: - description: - - AWS account owning the NAT instance - - Empty unless the route is via an EC2 instance - returned: always - type: str - sample: 123456789012 - network_interface_id: - description: - - The ID of the network interface - - Empty unless the route is via an EC2 instance - returned: always - type: str - sample: 123456789012 - nat_gateway_id: - description: ID of the NAT gateway - returned: when the route is via a NAT gateway - type: str - sample: local - origin: - description: mechanism through which the route is in the table - returned: always - type: str - sample: CreateRouteTable - state: - description: state of the route - returned: always - type: str - sample: active - tags: - description: Tags applied to the route table - returned: always - type: dict - sample: - Name: Public route table - Public: 'true' - vpc_id: - description: ID for the VPC in which the route lives - returned: always - type: str - sample: vpc-6e2d2407 -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - - -@AWSRetry.jittered_backoff() -def describe_route_tables_with_backoff(connection, **params): - try: - paginator = connection.get_paginator('describe_route_tables') - return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('InvalidRouteTableID.NotFound'): - return None - - -def normalize_route(route): - # Historically these were all there, but set to null when empty' - for legacy_key in ['DestinationCidrBlock', 'GatewayId', 'InstanceId', - 'Origin', 'State', 'NetworkInterfaceId']: - if legacy_key not in route: - route[legacy_key] = None - route['InterfaceId'] = route['NetworkInterfaceId'] - return route - - -def normalize_association(assoc): - # Name change between boto v2 and boto v3, return both - assoc['Id'] = assoc['RouteTableAssociationId'] - return assoc - - -def normalize_route_table(table): - table['tags'] = boto3_tag_list_to_ansible_dict(table['Tags']) - table['Associations'] = [normalize_association(assoc) for assoc in table['Associations']] - table['Routes'] = [normalize_route(route) for route in table['Routes']] - table['Id'] = table['RouteTableId'] - del table['Tags'] - return camel_dict_to_snake_dict(table, ignore_list=['tags']) - - -def normalize_results(results): - """ - We used to be a boto v2 module, make sure that the old return values are - maintained and the shape of the return values are what people expect - """ - - routes = [normalize_route_table(route) for route in results['RouteTables']] - del results['RouteTables'] - results = camel_dict_to_snake_dict(results) - results['route_tables'] = routes - return results - - -def list_ec2_vpc_route_tables(connection, module): - - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - - try: - results = describe_route_tables_with_backoff(connection, Filters=filters) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to get route tables") - - results = normalize_results(results) - module.exit_json(changed=False, **results) - - -def main(): - argument_spec = dict( - filters=dict(default=None, type='dict'), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - - list_ec2_vpc_route_tables(connection, module) - - -if __name__ == '__main__': - main() From bd234b2d642a8a9f8e9a7e0f9f322317f12e8fc7 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 6 Apr 2022 01:02:43 +0200 Subject: [PATCH 413/683] New modules for RDS clusters management (#687) New modules for RDS clusters management SUMMARY Two new modules for RDS clusters management (rds_cluster ans rds_cluster_info) Took over from: #262 Depends-On: ansible-collections/amazon.aws#553 Closes #849 Should also close #191 ISSUE TYPE New Module Pull Request Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- rds_cluster.py | 1026 +++++++++++++++++++++++++++++++++++++++++++ rds_cluster_info.py | 307 +++++++++++++ 2 files changed, 1333 insertions(+) create mode 100644 rds_cluster.py create mode 100644 rds_cluster_info.py diff --git a/rds_cluster.py b/rds_cluster.py new file mode 100644 index 00000000000..16f2ed5a97a --- /dev/null +++ b/rds_cluster.py @@ -0,0 +1,1026 @@ +#!/usr/bin/python +# Copyright (c) 2022 Ansible Project +# Copyright (c) 2022 Alina Buzachis (@alinabuzachis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_cluster +version_added: "3.2.0" +short_description: rds_cluster module +description: + - Create, modify, and delete RDS clusters. +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +author: + - Sloane Hertel (@s-hertel) + - Alina Buzachis (@alinabuzachis) +options: + # General module options + state: + description: Whether the snapshot should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + creation_source: + description: Which source to use if creating from a template (an existing cluster, S3 bucket, or snapshot). + choices: ['snapshot', 's3', 'cluster'] + type: str + force_update_password: + description: + - Set to C(true) to update your cluster password with I(master_user_password). + - Since comparing passwords to determine if it needs to be updated is not possible this is set to C(false) by default to allow idempotence. + type: bool + default: false + promote: + description: Set to C(true) to promote a read replica cluster. + type: bool + default: false + purge_cloudwatch_logs_exports: + description: + - Whether or not to disable Cloudwatch logs enabled for the DB cluster that are not provided in I(enable_cloudwatch_logs_exports). + Set I(enable_cloudwatch_logs_exports) to an empty list to disable all. + type: bool + default: true + purge_tags: + description: + - Whether or not to remove tags assigned to the DB cluster if not specified in the playbook. To remove all tags + set I(tags) to an empty dictionary in conjunction with this. + type: bool + default: true + purge_security_groups: + description: + - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the cluster. + - Can be applied to I(vpc_security_group_ids) + type: bool + default: true + wait: + description: Whether to wait for the cluster to be available or deleted. + type: bool + default: true + # Options that have a corresponding boto3 parameter + apply_immediately: + description: + - A value that specifies whether modifying a cluster with I(new_db_cluster_identifier) and I(master_user_password) + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If C(false), changes + are applied during the next maintenance window. + type: bool + default: false + availability_zones: + description: + - A list of EC2 Availability Zones that instances in the DB cluster can be created in. + May be used when creating a cluster or when restoring from S3 or a snapshot. + aliases: + - zones + - az + type: list + elements: str + backtrack_to: + description: + - The timestamp of the time to backtrack the DB cluster to in ISO 8601 format, such as "2017-07-08T18:00Z". + type: str + backtrack_window: + description: + - The target backtrack window, in seconds. To disable backtracking, set this value to C(0). + - If specified, this value must be set to a number from C(0) to C(259,200) (72 hours). + default: 0 + type: int + backup_retention_period: + description: + - The number of days for which automated backups are retained (must be within C(1) to C(35)). + May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. + type: int + default: 1 + character_set_name: + description: + - The character set to associate with the DB cluster. + type: str + database_name: + description: + - The name for your database. If a name is not provided Amazon RDS will not create a database. + aliases: + - db_name + type: str + db_cluster_identifier: + description: + - The DB cluster (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or + hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. + aliases: + - cluster_id + - id + - cluster_name + type: str + required: true + db_cluster_parameter_group_name: + description: + - The name of the DB cluster parameter group to associate with this DB cluster. + If this argument is omitted when creating a cluster, the default DB cluster parameter group for the specified DB engine and version is used. + type: str + db_subnet_group_name: + description: + - A DB subnet group to associate with this DB cluster if not using the default. + type: str + enable_cloudwatch_logs_exports: + description: + - A list of log types that need to be enabled for exporting to CloudWatch Logs. + - Engine aurora-mysql supports C(audit), C(error), C(general) and C(slowquery). + - Engine aurora-postgresql supports C(postgresql). + type: list + elements: str + deletion_protection: + description: + - A value that indicates whether the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + By default, deletion protection is disabled. + type: bool + global_cluster_identifier: + description: + - The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster. + type: str + enable_http_endpoint: + description: + - A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. + By default, the HTTP endpoint is disabled. + type: bool + copy_tags_to_snapshot: + description: + - Indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. + The default is not to copy them. + type: bool + domain: + description: + - The Active Directory directory ID to create the DB cluster in. + type: str + domain_iam_role_name: + description: + - Specify the name of the IAM role to be used when making API calls to the Directory Service. + type: str + enable_global_write_forwarding: + description: + - A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database. + By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. + - This value can be only set on Aurora DB clusters that are members of an Aurora global database. + type: bool + enable_iam_database_authentication: + description: + - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. + If this option is omitted when creating the cluster, Amazon RDS sets this to C(false). + type: bool + engine: + description: + - The name of the database engine to be used for this DB cluster. This is required to create a cluster. + choices: + - aurora + - aurora-mysql + - aurora-postgresql + type: str + engine_version: + description: + - The version number of the database engine to use. + - For Aurora MySQL that could be C(5.6.10a), C(5.7.12). + - Aurora PostgreSQL example, C(9.6.3). + type: str + final_snapshot_identifier: + description: + - The DB cluster snapshot identifier of the new DB cluster snapshot created when I(skip_final_snapshot=false). + type: str + force_backtrack: + description: + - A boolean to indicate if the DB cluster should be forced to backtrack when binary logging is enabled. + Otherwise, an error occurs when binary logging is enabled. + type: bool + kms_key_id: + description: + - The AWS KMS key identifier (the ARN, unless you are creating a cluster in the same account that owns the + KMS key, in which case the KMS key alias may be used). + - If I(replication_source_identifier) specifies an encrypted source Amazon RDS will use the key used toe encrypt the source. + - If I(storage_encrypted=true) and and I(replication_source_identifier) is not provided, the default encryption key is used. + type: str + master_user_password: + description: + - An 8-41 character password for the master database user. + - The password can contain any printable ASCII character except "/", """, or "@". + - To modify the password use I(force_password_update). Use I(apply immediately) to change + the password immediately, otherwise it is updated during the next maintenance window. + aliases: + - password + type: str + master_username: + description: + - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter. + aliases: + - username + type: str + new_db_cluster_identifier: + description: + - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB cluster. + - The identifier must contain from 1 to 63 letters, numbers, or hyphens and the first character must be a + letter and may not end in a hyphen or contain consecutive hyphens. + - Use I(apply_immediately) to rename immediately, otherwise it is updated during the next maintenance window. + aliases: + - new_cluster_id + - new_id + - new_cluster_name + type: str + option_group_name: + description: + - The option group to associate with the DB cluster. + type: str + port: + description: + - The port number on which the instances in the DB cluster accept connections. If not specified, Amazon RDS + defaults this to C(3306) if the I(engine) is C(aurora) and c(5432) if the I(engine) is C(aurora-postgresql). + type: int + preferred_backup_window: + description: + - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are + enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with + I(preferred_maintenance_window). + aliases: + - backup_window + type: str + preferred_maintenance_window: + description: + - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must + be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. + aliases: + - maintenance_window + type: str + replication_source_identifier: + description: + - The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica. + aliases: + - replication_src_id + type: str + restore_to_time: + description: + - The UTC date and time to restore the DB cluster to. Must be in the format "2015-03-07T23:45:00Z". + - If this is not provided while restoring a cluster, I(use_latest_restorable_time) must be. + May not be specified if I(restore_type) is copy-on-write. + type: str + restore_type: + description: + - The type of restore to be performed. If not provided, Amazon RDS uses full-copy. + choices: + - full-copy + - copy-on-write + type: str + role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role to associate with the Aurora DB cluster, for example + "arn:aws:iam::123456789012:role/AuroraAccessRole" + type: str + s3_bucket_name: + description: + - The name of the Amazon S3 bucket that contains the data used to create the Amazon Aurora DB cluster. + type: str + s3_ingestion_role_arn: + description: + - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access + the Amazon S3 bucket on your behalf. + type: str + s3_prefix: + description: + - The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. + - If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. + type: str + skip_final_snapshot: + description: + - Whether a final DB cluster snapshot is created before the DB cluster is deleted. + - If this is C(false), I(final_snapshot_identifier) must be provided. + type: bool + default: false + snapshot_identifier: + description: + - The identifier for the DB snapshot or DB cluster snapshot to restore from. + - You can use either the name or the ARN to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot. + type: str + source_db_cluster_identifier: + description: + - The identifier of the source DB cluster from which to restore. + type: str + source_engine: + description: + - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. + choices: + - mysql + type: str + source_engine_version: + description: + - The version of the database that the backup files were created from. + type: str + source_region: + description: + - The ID of the region that contains the source for the DB cluster. + type: str + storage_encrypted: + description: + - Whether the DB cluster is encrypted. + type: bool + tags: + description: + - A dictionary of key value pairs to assign the DB cluster. + type: dict + use_earliest_time_on_point_in_time_unavailable: + description: + - If I(backtrack_to) is set to a timestamp earlier than the earliest backtrack time, this value backtracks the DB cluster to + the earliest possible backtrack time. Otherwise, an error occurs. + type: bool + use_latest_restorable_time: + description: + - Whether to restore the DB cluster to the latest restorable backup time. Only one of I(use_latest_restorable_time) + and I(restore_to_time) may be provided. + type: bool + vpc_security_group_ids: + description: + - A list of EC2 VPC security groups to associate with the DB cluster. + type: list + elements: str +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Create minimal aurora cluster in default VPC and default subnet group + community.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + engine: "aurora" + password: "{{ password }}" + username: "{{ username }}" + +- name: Add a new security group without purge + community.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + vpc_security_group_ids: + - sg-0be17ba10c9286b0b + purge_security_groups: false + +- name: Modify password + community.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + password: "{{ new_password }}" + force_update_password: true + apply_immediately: true + +- name: Rename the cluster + community.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "cluster-{{ resource_prefix }}" + new_cluster_id: "cluster-{{ resource_prefix }}-renamed" + apply_immediately: true + +- name: Delete aurora cluster without creating a final snapshot + community.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: True + tags: + Name: "cluster-{{ resource_prefix }}" + Created_By: "Ansible_rds_cluster_integration_test" + state: absent + +- name: Restore cluster from source snapshot + community.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "cluster-{{ resource_prefix }}-restored" + snapshot_identifier: "cluster-{{ resource_prefix }}-snapshot" +''' + +RETURN = r''' +activity_stream_status: + description: The status of the database activity stream. + returned: always + type: str + sample: stopped +allocated_storage: + description: + - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is + always 1 for aurora database engines. + returned: always + type: int + sample: 1 +associated_roles: + description: + - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated + with the DB cluster. Each dictionary contains the role_arn and the status of the role. + returned: always + type: list + sample: [] +availability_zones: + description: The list of availability zones that instances in the DB cluster can be created in. + returned: always + type: list + sample: + - us-east-1c + - us-east-1a + - us-east-1e +backup_retention_period: + description: The number of days for which automatic DB snapshots are retained. + returned: always + type: int + sample: 1 +changed: + description: If the RDS cluster has changed. + returned: always + type: bool + sample: true +cluster_create_time: + description: The time in UTC when the DB cluster was created. + returned: always + type: str + sample: '2018-06-29T14:08:58.491000+00:00' +copy_tags_to_snapshot: + description: + - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster. + returned: always + type: bool + sample: false +cross_account_clone: + description: + - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. + returned: always + type: bool + sample: false +db_cluster_arn: + description: The Amazon Resource Name (ARN) for the DB cluster. + returned: always + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo +db_cluster_identifier: + description: The lowercase user-supplied DB cluster identifier. + returned: always + type: str + sample: rds-cluster-demo +db_cluster_members: + description: + - A list of dictionaries containing information about the instances in the cluster. + Each dictionary contains the db_instance_identifier, is_cluster_writer (bool), + db_cluster_parameter_group_status, and promotion_tier (int). + returned: always + type: list + sample: [] +db_cluster_parameter_group: + description: The parameter group associated with the DB cluster. + returned: always + type: str + sample: default.aurora5.6 +db_cluster_resource_id: + description: The AWS Region-unique, immutable identifier for the DB cluster. + returned: always + type: str + sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU +db_subnet_group: + description: The name of the subnet group associated with the DB Cluster. + returned: always + type: str + sample: default +deletion_protection: + description: + - Indicates if the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + returned: always + type: bool + sample: false +domain_memberships: + description: + - The Active Directory Domain membership records associated with the DB cluster. + returned: always + type: list + sample: [] +earliest_restorable_time: + description: The earliest time to which a database can be restored with point-in-time restore. + returned: always + type: str + sample: '2018-06-29T14:09:34.797000+00:00' +endpoint: + description: The connection endpoint for the primary instance of the DB cluster. + returned: always + type: str + sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com +engine: + description: The database engine of the DB cluster. + returned: always + type: str + sample: aurora +engine_mode: + description: The DB engine mode of the DB cluster. + returned: always + type: str + sample: provisioned +engine_version: + description: The database engine version. + returned: always + type: str + sample: 5.6.10a +hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + returned: always + type: str + sample: Z2R2ITUGPM61AM +http_endpoint_enabled: + description: + - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. + returned: always + type: bool + sample: false +iam_database_authentication_enabled: + description: Whether IAM accounts may be mapped to database accounts. + returned: always + type: bool + sample: false +latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + returned: always + type: str + sample: '2018-06-29T14:09:34.797000+00:00' +master_username: + description: The master username for the DB cluster. + returned: always + type: str + sample: username +multi_az: + description: Whether the DB cluster has instances in multiple availability zones. + returned: always + type: bool + sample: false +port: + description: The port that the database engine is listening on. + returned: always + type: int + sample: 3306 +preferred_backup_window: + description: The UTC weekly time range during which system maintenance can occur. + returned: always + type: str + sample: 10:18-10:48 +preferred_maintenance_window: + description: The UTC weekly time range during which system maintenance can occur. + returned: always + type: str + sample: tue:03:23-tue:03:53 +read_replica_identifiers: + description: A list of read replica ID strings associated with the DB cluster. + returned: always + type: list + sample: [] +reader_endpoint: + description: The reader endpoint for the DB cluster. + returned: always + type: str + sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com +status: + description: The status of the DB cluster. + returned: always + type: str + sample: available +storage_encrypted: + description: Whether the DB cluster is storage encrypted. + returned: always + type: bool + sample: false +tag_list: + description: A list of tags consisting of key-value pairs. + returned: always + type: list + elements: dict + sample: [ + { + "key": "Created_By", + "value": "Ansible_rds_cluster_integration_test" + } + ] +tags: + description: A dictionary of key value pairs. + returned: always + type: dict + sample: { + "Name": "rds-cluster-demo" + } +vpc_security_groups: + description: A list of the DB cluster's security groups and their status. + returned: always + type: complex + contains: + status: + description: Status of the security group. + returned: always + type: str + sample: active + vpc_security_group_id: + description: Security group of the cluster. + returned: always + type: str + sample: sg-12345678 +''' + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_db_clusters(**params): + try: + paginator = client.get_paginator('describe_db_clusters') + return paginator.paginate(**params).build_full_result()['DBClusters'][0] + except is_boto3_error_code('DBClusterNotFoundFault'): + return {} + + +def get_add_role_options(params_dict, cluster): + current_role_arns = [role['RoleArn'] for role in cluster.get('AssociatedRoles', [])] + role = params_dict['RoleArn'] + if role is not None and role not in current_role_arns: + return {'RoleArn': role, 'DBClusterIdentifier': params_dict['DBClusterIdentifier']} + return {} + + +def get_backtrack_options(params_dict): + options = ['BacktrackTo', 'DBClusterIdentifier', 'UseEarliestTimeOnPointInTimeUnavailable'] + if params_dict['BacktrackTo'] is not None: + options = dict((k, params_dict[k]) for k in options if params_dict[k] is not None) + if 'ForceBacktrack' in params_dict: + options['Force'] = params_dict['ForceBacktrack'] + return options + return {} + + +def get_create_options(params_dict): + options = [ + 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', + 'CharacterSetName', 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', + 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'KmsKeyId', + 'Engine', 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'MasterUsername', + 'OptionGroupName', 'Port', 'ReplicationSourceIdentifier', 'SourceRegion', 'StorageEncrypted', + 'Tags', 'VpcSecurityGroupIds', 'EngineMode', 'ScalingConfiguration', 'DeletionProtection', + 'EnableHttpEndpoint', 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', + 'EnableGlobalWriteForwarding', + ] + + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_modify_options(params_dict, force_update_password): + options = [ + 'ApplyImmediately', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', + 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'EnableIAMDatabaseAuthentication', + 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'NewDBClusterIdentifier', + 'OptionGroupName', 'Port', 'VpcSecurityGroupIds', 'EnableIAMDatabaseAuthentication', + 'CloudwatchLogsExportConfiguration', 'DeletionProtection', 'EnableHttpEndpoint', + 'CopyTagsToSnapshot', 'EnableGlobalWriteForwarding', 'Domain', 'DomainIAMRoleName', + ] + modify_options = dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + if not force_update_password: + modify_options.pop('MasterUserPassword', None) + return modify_options + + +def get_delete_options(params_dict): + options = ['DBClusterIdentifier', 'FinalSnapshotIdentifier', 'SkipFinalSnapshot'] + return dict((k, params_dict[k]) for k in options if params_dict[k] is not None) + + +def get_restore_s3_options(params_dict): + options = [ + 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'CharacterSetName', + 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', 'DatabaseName', + 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'Engine', 'EngineVersion', + 'KmsKeyId', 'MasterUserPassword', 'MasterUsername', 'OptionGroupName', 'Port', + 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'S3BucketName', 'S3IngestionRoleArn', + 'S3Prefix', 'SourceEngine', 'SourceEngineVersion', 'StorageEncrypted', 'Tags', + 'VpcSecurityGroupIds', 'DeletionProtection', 'EnableHttpEndpoint', 'CopyTagsToSnapshot', + 'Domain', 'DomainIAMRoleName', + ] + + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_restore_snapshot_options(params_dict): + options = [ + 'AvailabilityZones', 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', + 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', + 'Engine', 'EngineVersion', 'KmsKeyId', 'OptionGroupName', 'Port', 'SnapshotIdentifier', + 'Tags', 'VpcSecurityGroupIds', 'DBClusterParameterGroupName', 'DeletionProtection', + 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', + ] + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_restore_cluster_options(params_dict): + options = [ + 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', 'EnableCloudwatchLogsExports', + 'EnableIAMDatabaseAuthentication', 'KmsKeyId', 'OptionGroupName', 'Port', 'RestoreToTime', + 'RestoreType', 'SourceDBClusterIdentifier', 'Tags', 'UseLatestRestorableTime', + 'VpcSecurityGroupIds', 'DeletionProtection', 'CopyTagsToSnapshot', 'Domain', + 'DomainIAMRoleName', + ] + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_rds_method_attribute_name(cluster): + state = module.params['state'] + creation_source = module.params['creation_source'] + method_name = None + method_options_name = None + + if state == 'absent': + if cluster and cluster['Status'] not in ['deleting', 'deleted']: + method_name = 'delete_db_cluster' + method_options_name = 'get_delete_options' + else: + if cluster: + method_name = 'modify_db_cluster' + method_options_name = 'get_modify_options' + elif creation_source == 'snapshot': + method_name = 'restore_db_cluster_from_db_snapshot' + method_options_name = 'get_restore_snapshot_options' + elif creation_source == 's3': + method_name = 'restore_db_cluster_from_s3' + method_options_name = 'get_restore_s3_options' + elif creation_source == 'cluster': + method_name = 'restore_db_cluster_to_point_in_time' + method_options_name = 'get_restore_cluster_options' + else: + method_name = 'create_db_cluster' + method_options_name = 'get_create_options' + + return method_name, method_options_name + + +def add_role(params): + if not module.check_mode: + try: + client.add_role_to_db_cluster(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}") + wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + + +def backtrack_cluster(params): + if not module.check_mode: + try: + client.backtrack_db_cluster(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=F"Unable to backtrack cluster {params['DBClusterIdentifier']}") + wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + + +def get_cluster(db_cluster_id): + try: + return _describe_db_clusters(DBClusterIdentifier=db_cluster_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe DB clusters") + + +def changing_cluster_options(modify_params, current_cluster): + changing_params = {} + apply_immediately = modify_params.pop('ApplyImmediately') + db_cluster_id = modify_params.pop('DBClusterIdentifier') + + enable_cloudwatch_logs_export = modify_params.pop('EnableCloudwatchLogsExports', None) + if enable_cloudwatch_logs_export is not None: + desired_cloudwatch_logs_configuration = {'EnableLogTypes': [], 'DisableLogTypes': []} + provided_cloudwatch_logs = set(enable_cloudwatch_logs_export) + current_cloudwatch_logs_export = set(current_cluster['EnabledCloudwatchLogsExports']) + + desired_cloudwatch_logs_configuration['EnableLogTypes'] = list(provided_cloudwatch_logs.difference(current_cloudwatch_logs_export)) + if module.params['purge_cloudwatch_logs_exports']: + desired_cloudwatch_logs_configuration['DisableLogTypes'] = list(current_cloudwatch_logs_export.difference(provided_cloudwatch_logs)) + changing_params['CloudwatchLogsExportConfiguration'] = desired_cloudwatch_logs_configuration + + password = modify_params.pop('MasterUserPassword', None) + if password: + changing_params['MasterUserPassword'] = password + + new_cluster_id = modify_params.pop('NewDBClusterIdentifier', None) + if new_cluster_id and new_cluster_id != current_cluster['DBClusterIdentifier']: + changing_params['NewDBClusterIdentifier'] = new_cluster_id + + option_group = modify_params.pop('OptionGroupName', None) + if ( + option_group and option_group not in [g['DBClusterOptionGroupName'] for g in current_cluster['DBClusterOptionGroupMemberships']] + ): + changing_params['OptionGroupName'] = option_group + + vpc_sgs = modify_params.pop('VpcSecurityGroupIds', None) + if vpc_sgs: + desired_vpc_sgs = [] + provided_vpc_sgs = set(vpc_sgs) + current_vpc_sgs = set([sg['VpcSecurityGroupId'] for sg in current_cluster['VpcSecurityGroups']]) + if module.params['purge_security_groups']: + desired_vpc_sgs = vpc_sgs + else: + if provided_vpc_sgs - current_vpc_sgs: + desired_vpc_sgs = list(provided_vpc_sgs | current_vpc_sgs) + + if desired_vpc_sgs: + changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs + + for param in modify_params: + if modify_params[param] != current_cluster[param]: + changing_params[param] = modify_params[param] + + if changing_params: + changing_params['DBClusterIdentifier'] = db_cluster_id + if apply_immediately is not None: + changing_params['ApplyImmediately'] = apply_immediately + + return changing_params + + +def ensure_present(cluster, parameters, method_name, method_options_name): + changed = False + + if not cluster: + if parameters.get('Tags') is not None: + parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + call_method(client, module, method_name, eval(method_options_name)(parameters)) + changed = True + else: + if get_backtrack_options(parameters): + backtrack_cluster(client, module, get_backtrack_options(parameters)) + changed = True + else: + modifiable_options = eval(method_options_name)(parameters, + force_update_password=module.params['force_update_password']) + modify_options = changing_cluster_options(modifiable_options, cluster) + if modify_options: + call_method(client, module, method_name, modify_options) + changed = True + if module.params['tags'] is not None: + existing_tags = get_tags(client, module, cluster['DBClusterArn']) + changed |= ensure_tags(client, module, cluster['DBClusterArn'], existing_tags, module.params['tags'], + module.params['purge_tags']) + + add_role_params = get_add_role_options(parameters, cluster) + if add_role_params: + add_role(client, module, add_role_params) + changed = True + + if module.params['promote'] and cluster.get('ReplicationSourceIdentifier'): + call_method(client, module, 'promote_read_replica_db_cluster', parameters={'DBClusterIdentifier': module.params['db_cluster_identifier']}) + changed = True + + return changed + + +def main(): + global module + global client + + arg_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + creation_source=dict(type='str', choices=['snapshot', 's3', 'cluster']), + force_update_password=dict(type='bool', default=False), + promote=dict(type='bool', default=False), + purge_cloudwatch_logs_exports=dict(type='bool', default=True), + purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + purge_security_groups=dict(type='bool', default=True), + ) + + parameter_options = dict( + apply_immediately=dict(type='bool', default=False), + availability_zones=dict(type='list', elements='str', aliases=['zones', 'az']), + backtrack_to=dict(), + backtrack_window=dict(type='int'), + backup_retention_period=dict(type='int', default=1), + character_set_name=dict(), + database_name=dict(aliases=['db_name']), + db_cluster_identifier=dict(required=True, aliases=['cluster_id', 'id', 'cluster_name']), + db_cluster_parameter_group_name=dict(), + db_subnet_group_name=dict(), + enable_cloudwatch_logs_exports=dict(type='list', elements='str'), + deletion_protection=dict(type='bool'), + global_cluster_identifier=dict(), + enable_http_endpoint=dict(type='bool'), + copy_tags_to_snapshot=dict(type='bool'), + domain=dict(), + domain_iam_role_name=dict(), + enable_global_write_forwarding=dict(type='bool'), + enable_iam_database_authentication=dict(type='bool'), + engine=dict(choices=["aurora", "aurora-mysql", "aurora-postgresql"]), + engine_version=dict(), + final_snapshot_identifier=dict(), + force_backtrack=dict(type='bool'), + kms_key_id=dict(), + master_user_password=dict(aliases=['password'], no_log=True), + master_username=dict(aliases=['username']), + new_db_cluster_identifier=dict(aliases=['new_cluster_id', 'new_id', 'new_cluster_name']), + option_group_name=dict(), + port=dict(type='int'), + preferred_backup_window=dict(aliases=['backup_window']), + preferred_maintenance_window=dict(aliases=['maintenance_window']), + replication_source_identifier=dict(aliases=['replication_src_id']), + restore_to_time=dict(), + restore_type=dict(choices=['full-copy', 'copy-on-write']), + role_arn=dict(), + s3_bucket_name=dict(), + s3_ingestion_role_arn=dict(), + s3_prefix=dict(), + skip_final_snapshot=dict(type='bool', default=False), + snapshot_identifier=dict(), + source_db_cluster_identifier=dict(), + source_engine=dict(choices=['mysql']), + source_engine_version=dict(), + source_region=dict(), + storage_encrypted=dict(type='bool'), + tags=dict(type='dict'), + use_earliest_time_on_point_in_time_unavailable=dict(type='bool'), + use_latest_restorable_time=dict(type='bool'), + vpc_security_group_ids=dict(type='list', elements='str'), + ) + arg_spec.update(parameter_options) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + required_if=[ + ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), + ('creation_source', 's3', ( + 's3_bucket_name', 'engine', 'master_username', 'master_user_password', + 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ], + mutually_exclusive=[ + ('s3_bucket_name', 'source_db_cluster_identifier', 'snapshot_identifier'), + ('use_latest_restorable_time', 'restore_to_time'), + ], + supports_check_mode=True + ) + + retry_decorator = AWSRetry.jittered_backoff(retries=10) + + try: + client = module.client('rds', retry_decorator=retry_decorator) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + module.params['db_cluster_identifier'] = module.params['db_cluster_identifier'].lower() + cluster = get_cluster(module.params['db_cluster_identifier']) + + if module.params['new_db_cluster_identifier']: + module.params['new_db_cluster_identifier'] = module.params['new_db_cluster_identifier'].lower() + + if get_cluster(module.params['new_db_cluster_identifier']): + module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists") + if not cluster: + module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist") + + if ( + module.params['state'] == 'absent' and module.params['skip_final_snapshot'] is False and + module.params['final_snapshot_identifier'] is None + ): + module.fail_json(msg='skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier') + + parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + + changed = False + method_name, method_options_name = get_rds_method_attribute_name(cluster) + + if method_name: + if method_name == 'delete_db_cluster': + call_method(client, module, method_name, eval(method_options_name)(parameters)) + changed = True + else: + changed |= ensure_present(cluster, parameters, method_name, method_options_name) + + if not module.check_mode and module.params['new_db_cluster_identifier'] and module.params['apply_immediately']: + cluster_id = module.params['new_db_cluster_identifier'] + else: + cluster_id = module.params['db_cluster_identifier'] + + result = camel_dict_to_snake_dict(get_cluster(cluster_id)) + + if result: + result['tags'] = get_tags(client, module, result['db_cluster_arn']) + + module.exit_json(changed=changed, **result) + + +if __name__ == '__main__': + main() diff --git a/rds_cluster_info.py b/rds_cluster_info.py new file mode 100644 index 00000000000..c53d661bd8b --- /dev/null +++ b/rds_cluster_info.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# Copyright (c) 2022 Ansible Project +# Copyright (c) 2022 Alina Buzachis (@alinabuzachis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: rds_cluster_info +version_added: 3.2.0 +short_description: Obtain information about one or more RDS clusters +description: + - Obtain information about one or more RDS clusters. +options: + db_cluster_identifier: + description: + - The user-supplied DB cluster identifier. + - If this parameter is specified, information from only the specific DB cluster is returned. + aliases: + - cluster_id + - id + - cluster_name + type: str + filters: + description: + - A filter that specifies one or more DB clusters to describe. + See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html). + type: dict +author: + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 + +''' + +EXAMPLES = r''' +- name: Get info of all existing DB clusters + community.aws.rds_cluster_info: + register: _result_cluster_info + +- name: Get info on a specific DB cluster + community.aws.rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: _result_cluster_info + +- name: Get info all DB clusters with specific engine + community.aws.rds_cluster_info: + engine: "aurora" + register: _result_cluster_info +''' + +RETURN = r''' +clusters: + description: List of RDS clusters. + returned: always + type: list + contains: + activity_stream_status: + description: The status of the database activity stream. + type: str + sample: stopped + allocated_storage: + description: + - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is + always 1 for aurora database engines. + type: int + sample: 1 + associated_roles: + description: + - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated + with the DB cluster. Each dictionary contains the role_arn and the status of the role. + type: list + sample: [] + availability_zones: + description: The list of availability zones that instances in the DB cluster can be created in. + type: list + sample: + - us-east-1c + - us-east-1a + - us-east-1e + backup_retention_period: + description: The number of days for which automatic DB snapshots are retained. + type: int + sample: 1 + cluster_create_time: + description: The time in UTC when the DB cluster was created. + type: str + sample: '2018-06-29T14:08:58.491000+00:00' + copy_tags_to_snapshot: + description: + - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster. + type: bool + sample: false + cross_account_clone: + description: + - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. + type: bool + sample: false + db_cluster_arn: + description: The Amazon Resource Name (ARN) for the DB cluster. + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo + db_cluster_identifier: + description: The lowercase user-supplied DB cluster identifier. + type: str + sample: rds-cluster-demo + db_cluster_members: + description: + - A list of dictionaries containing information about the instances in the cluster. + Each dictionary contains the I(db_instance_identifier), I(is_cluster_writer) (bool), + I(db_cluster_parameter_group_status), and I(promotion_tier) (int). + type: list + sample: [] + db_cluster_parameter_group: + description: The parameter group associated with the DB cluster. + type: str + sample: default.aurora5.6 + db_cluster_resource_id: + description: The AWS Region-unique, immutable identifier for the DB cluster. + type: str + sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU + db_subnet_group: + description: The name of the subnet group associated with the DB Cluster. + type: str + sample: default + deletion_protection: + description: + - Indicates if the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + type: bool + sample: false + domain_memberships: + description: + - The Active Directory Domain membership records associated with the DB cluster. + type: list + sample: [] + earliest_restorable_time: + description: The earliest time to which a database can be restored with point-in-time restore. + type: str + sample: '2018-06-29T14:09:34.797000+00:00' + endpoint: + description: The connection endpoint for the primary instance of the DB cluster. + type: str + sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com + engine: + description: The database engine of the DB cluster. + type: str + sample: aurora + engine_mode: + description: The DB engine mode of the DB cluster. + type: str + sample: provisioned + engine_version: + description: The database engine version. + type: str + sample: 5.6.10a + hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + type: str + sample: Z2R2ITUGPM61AM + http_endpoint_enabled: + description: + - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. + type: bool + sample: false + iam_database_authentication_enabled: + description: Whether IAM accounts may be mapped to database accounts. + type: bool + sample: false + latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + type: str + sample: '2018-06-29T14:09:34.797000+00:00' + master_username: + description: The master username for the DB cluster. + type: str + sample: username + multi_az: + description: Whether the DB cluster has instances in multiple availability zones. + type: bool + sample: false + port: + description: The port that the database engine is listening on. + type: int + sample: 3306 + preferred_backup_window: + description: The UTC weekly time range during which system maintenance can occur. + type: str + sample: 10:18-10:48 + preferred_maintenance_window: + description: The UTC weekly time range during which system maintenance can occur. + type: str + sample: tue:03:23-tue:03:53 + read_replica_identifiers: + description: A list of read replica ID strings associated with the DB cluster. + type: list + sample: [] + reader_endpoint: + description: The reader endpoint for the DB cluster. + type: str + sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com + status: + description: The status of the DB cluster. + type: str + sample: available + storage_encrypted: + description: Whether the DB cluster is storage encrypted. + type: bool + sample: false + tag_list: + description: A list of tags consisting of key-value pairs. + type: list + elements: dict + sample: [ + { + "key": "Created_By", + "value": "Ansible_rds_cluster_integration_test" + } + ] + tags: + description: A dictionary of key value pairs. + type: dict + sample: { + "Name": "rds-cluster-demo" + } + vpc_security_groups: + description: A list of the DB cluster's security groups and their status. + type: complex + contains: + status: + description: Status of the security group. + type: str + sample: active + vpc_security_group_id: + description: Security group of the cluster. + type: str + sample: sg-12345678 +''' + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_db_clusters(client, **params): + try: + paginator = client.get_paginator('describe_db_clusters') + return paginator.paginate(**params).build_full_result()['DBClusters'] + except is_boto3_error_code('DBClusterNotFoundFault'): + return [] + + +def cluster_info(client, module): + cluster_id = module.params.get('db_cluster_identifier') + filters = module.params.get('filters') + + params = dict() + if cluster_id: + params['DBClusterIdentifier'] = cluster_id + if filters: + params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + + try: + result = _describe_db_clusters(client, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get RDS cluster information.") + + for cluster in result: + cluster['Tags'] = get_tags(client, module, cluster['DBClusterArn']) + + return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=['Tags']) for cluster in result]) + + +def main(): + argument_spec = dict( + db_cluster_identifier=dict(aliases=['cluster_id', 'id', 'cluster_name']), + filters=dict(type='dict'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + module.exit_json(**cluster_info(client, module)) + + +if __name__ == '__main__': + main() From 6d365b300e60794929f057a7fc778e240cf7352c Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Mon, 11 Apr 2022 17:48:13 -0700 Subject: [PATCH 414/683] ec2_asg_lifecycle_hook: add integration tests (#1048) ec2_asg_lifecycle_hook: add integration tests SUMMARY Adding integration tests to ec2_asg_lifecycle_hook module. Fixing idempotency. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_asg_lifecycle_hook Reviewed-by: Joseph Torcasso Reviewed-by: Mandar Kulkarni Reviewed-by: Jill R --- ec2_asg_lifecycle_hook.py | 82 ++++++++++++++++++++++++++++----------- 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index fbdc4a3150d..713a147872f 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -97,19 +97,50 @@ ''' RETURN = ''' - +--- +auto_scaling_group_name: + description: The unique name of the auto scaling group + returned: success + type: str + sample: "myasg" +default_result: + description: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs + returned: success + type: str + sample: CONTINUE +global_timeout: + description: The maximum time, in seconds, that an instance can remain in a Pending:Wait or Terminating:Wait state + returned: success + type: int + sample: 172800 +heartbeat_timeout: + description: The maximum time, in seconds, that can elapse before the lifecycle hook times out + returned: success + type: int + sample: 3600 +lifecycle_hook_name: + description: The name of the lifecycle hook + returned: success + type: str + sample: "mylifecyclehook" +lifecycle_transition: + description: The instance state to which lifecycle hook should be attached + returned: success + type: str + sample: "autoscaling:EC2_INSTANCE_LAUNCHING" ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule try: import botocore except ImportError: pass # handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + def create_lifecycle_hook(connection, module): - changed = False lch_name = module.params.get('lifecycle_hook_name') asg_name = module.params.get('autoscaling_group_name') @@ -120,6 +151,9 @@ def create_lifecycle_hook(connection, module): heartbeat_timeout = module.params.get('heartbeat_timeout') default_result = module.params.get('default_result') + return_object = {} + return_object['changed'] = False + lch_params = { 'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name, @@ -150,23 +184,26 @@ def create_lifecycle_hook(connection, module): module.fail_json_aws(e, msg="Failed to get Lifecycle Hook") if not existing_hook: - changed = True - else: - # GlobalTimeout is not configurable, but exists in response. - # Removing it helps to compare both dicts in order to understand - # what changes were done. - del(existing_hook[0]['GlobalTimeout']) - added, removed, modified, same = dict_compare(lch_params, existing_hook[0]) - if added or removed or modified: - changed = True - - if changed: try: + return_object['changed'] = True connection.put_lifecycle_hook(**lch_params) + return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") - return(changed) + else: + added, removed, modified, same = dict_compare(lch_params, existing_hook[0]) + if modified: + try: + return_object['changed'] = True + connection.put_lifecycle_hook(**lch_params) + return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create LifecycleHook") + + module.exit_json(**camel_dict_to_snake_dict(return_object)) def dict_compare(d1, d2): @@ -186,11 +223,13 @@ def dict_compare(d1, d2): def delete_lifecycle_hook(connection, module): - changed = False lch_name = module.params.get('lifecycle_hook_name') asg_name = module.params.get('autoscaling_group_name') + return_object = {} + return_object['changed'] = False + try: all_hooks = connection.describe_lifecycle_hooks( AutoScalingGroupName=asg_name @@ -207,13 +246,14 @@ def delete_lifecycle_hook(connection, module): try: connection.delete_lifecycle_hook(**lch_params) - changed = True + return_object['changed'] = True + return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete LifecycleHook") else: pass - return(changed) + module.exit_json(**camel_dict_to_snake_dict(return_object)) def main(): @@ -238,11 +278,9 @@ def main(): changed = False if state == 'present': - changed = create_lifecycle_hook(connection, module) + create_lifecycle_hook(connection, module) elif state == 'absent': - changed = delete_lifecycle_hook(connection, module) - - module.exit_json(changed=changed) + delete_lifecycle_hook(connection, module) if __name__ == '__main__': From 84358a165e96b327f6179fcf1df6bf014b41ffd5 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Tue, 12 Apr 2022 00:53:27 -0400 Subject: [PATCH 415/683] rds_instance - fix check_mode and idempotence bugs and support adding/removing iam roles (#1002) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit rds_instance - fix check_mode and idempotence bugs and support adding/removing iam roles SUMMARY Support the addition and deletion of iam roles to db instances Fixes #464 Fixes #1013 Integration tests to test both this and the amazon.aws module_util rds changes Depends-On ansible-collections/amazon.aws#714 ISSUE TYPE Feature Pull Request COMPONENT NAME rds_instance ADDITIONAL INFORMATION Wasn't sure the best way to go about deleting IAM roles - ended up using a purge_iam_roles param that defaults to False, which seems consistent with other modules I've looked at. Reviewed-by: Mark Woolley Reviewed-by: Joseph Torcasso Reviewed-by: Gonéri Le Bouder Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- rds_instance.py | 174 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 153 insertions(+), 21 deletions(-) diff --git a/rds_instance.py b/rds_instance.py index cdd0d13fa90..4ae96546a0c 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -205,6 +205,23 @@ description: - Set to true to conduct the reboot through a MultiAZ failover. type: bool + iam_roles: + description: + - List of Amazon Web Services Identity and Access Management (IAM) roles to associate with DB instance. + type: list + elements: dict + suboptions: + feature_name: + description: + - The name of the feature associated with the IAM role. + type: str + required: yes + role_arn: + description: + - The ARN of the IAM role to associate with the DB instance. + type: str + required: yes + version_added: 3.3.0 iops: description: - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1. @@ -316,6 +333,12 @@ a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. type: bool + purge_iam_roles: + description: + - Set to C(True) to remove any IAM roles that aren't specified in the task and are associated with the instance. + type: bool + default: False + version_added: 3.3.0 restore_time: description: - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. @@ -462,7 +485,49 @@ vpc_security_group_ids: - sg-0be17ba10c9286b0b purge_security_groups: false - register: result + register: result + +# Add IAM role to db instance +- name: Create IAM policy + community.aws.iam_managed_policy: + policy_name: "my-policy" + policy: "{{ lookup('file','files/policy.json') }}" + state: present + register: iam_policy + +- name: Create IAM role + community.aws.iam_role: + assume_role_policy_document: "{{ lookup('file','files/assume_policy.json') }}" + name: "my-role" + state: present + managed_policy: "{{ iam_policy.policy.arn }}" + register: iam_role + +- name: Create DB instance with added IAM role + community.aws.rds_instance: + id: "my-instance-id" + state: present + engine: postgres + engine_version: 14.2 + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: db.m6g.large + allocated_storage: "{{ allocated_storage }}" + iam_roles: + - role_arn: "{{ iam_role.arn }}" + feature_name: 's3Export' + +- name: Remove IAM role from DB instance + community.aws.rds_instance: + id: "my-instance-id" + state: present + engine: postgres + engine_version: 14.2 + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: db.m6g.large + allocated_storage: "{{ allocated_storage }}" + purge_iam_roles: yes ''' RETURN = r''' @@ -780,16 +845,23 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import compare_iam_roles from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_identifier from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import update_iam_roles + valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] +valid_engines_iam_roles = ['aurora-postgresql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb', + 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] + def get_rds_method_attribute_name(instance, state, creation_source, read_replica): method_name = None @@ -945,23 +1017,21 @@ def get_current_attributes_with_inconsistent_keys(instance): options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] - options['AllowMajorVersionUpgrade'] = None options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] # PerformanceInsightsEnabled is not returned on older RDS instances it seems options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False) - options['MasterUserPassword'] = None options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] + # Neither of these are returned via describe_db_instances, so if either is specified during a check_mode run, changed=True + options['AllowMajorVersionUpgrade'] = None + options['MasterUserPassword'] = None + return options def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups): changing_params = {} current_options = get_current_attributes_with_inconsistent_keys(instance) - - if current_options.get("MaxAllocatedStorage") is None: - current_options["MaxAllocatedStorage"] = None - for option in current_options: current_option = current_options[option] desired_option = modify_params.pop(option, None) @@ -982,9 +1052,14 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c if desired_option in current_option: continue - if current_option == desired_option: + # Current option and desired option are the same - continue loop + if option != 'ProcessorFeatures' and current_option == desired_option: + continue + + if option == 'ProcessorFeatures' and current_option == boto3_tag_list_to_ansible_dict(desired_option, 'Name', 'Value'): continue + # Current option and desired option are different - add to changing_params list if option == 'ProcessorFeatures' and desired_option == []: changing_params['UseDefaultProcessorFeatures'] = True elif option == 'CloudwatchLogsExportConfiguration': @@ -1074,13 +1149,48 @@ def update_instance(client, module, instance, instance_id): def promote_replication_instance(client, module, instance, read_replica): changed = False if read_replica is False: - changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos')) - if changed: - try: - call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) - changed = True - except is_boto3_error_message('DB Instance is not a read replica'): - pass + # 'StatusInfos' only exists when the instance is a read replica + # See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-instances.html + if bool(instance.get('StatusInfos')): + try: + result, changed = call_method(client, module, method_name='promote_read_replica', + parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) + except is_boto3_error_message('DB Instance is not a read replica'): + pass + return changed + + +def ensure_iam_roles(client, module, instance_id): + ''' + Ensure specified IAM roles are associated with DB instance + + Parameters: + client: RDS client + module: AWSModule + instance_id: DB's instance ID + + Returns: + changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not + ''' + instance = camel_dict_to_snake_dict(get_instance(client, module, instance_id), ignore_list=['Tags', 'ProcessorFeatures']) + + # Ensure engine type supports associating IAM roles + engine = instance.get('engine') + if engine not in valid_engines_iam_roles: + module.fail_json(msg='DB engine {0} is not valid for adding IAM roles. Valid engines are {1}'.format(engine, valid_engines_iam_roles)) + + changed = False + purge_iam_roles = module.params.get('purge_iam_roles') + target_roles = module.params.get('iam_roles') if module.params.get('iam_roles') else [] + existing_roles = instance.get('associated_roles', []) + roles_to_add, roles_to_remove = compare_iam_roles(existing_roles, target_roles, purge_iam_roles) + if bool(roles_to_add or roles_to_remove): + changed = True + # Don't update on check_mode + if module.check_mode: + module.exit_json(changed=changed, **instance) + else: + update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove) return changed @@ -1121,6 +1231,7 @@ def main(): creation_source=dict(choices=['snapshot', 's3', 'instance']), force_update_password=dict(type='bool', default=False, no_log=False), purge_cloudwatch_logs_exports=dict(type='bool', default=True), + purge_iam_roles=dict(type='bool', default=False), purge_tags=dict(type='bool', default=True), read_replica=dict(type='bool'), wait=dict(type='bool', default=True), @@ -1154,6 +1265,7 @@ def main(): engine_version=dict(), final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), force_failover=dict(type='bool'), + iam_roles=dict(type='list', elements='dict'), iops=dict(type='int'), kms_key_id=dict(), license_model=dict(), @@ -1230,6 +1342,13 @@ def main(): if module.params['preferred_maintenance_window']: module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower() + # Throw warning regarding case when allow_major_version_upgrade is specified in check_mode + # describe_rds_instance never returns this value, so on check_mode, it will always return changed=True + # In non-check mode runs, changed will return the correct value, so no need to warn there. + # see: amazon.aws.module_util.rds.handle_errors. + if module.params.get('allow_major_version_upgrade') and module.check_mode: + module.warn('allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs.') + client = module.client('rds') changed = False state = module.params['state'] @@ -1239,17 +1358,30 @@ def main(): method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) if method_name: + + # Exit on create/delete if check_mode + if module.check_mode and method_name in ['create_db_instance', 'delete_db_instance']: + module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) + raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) - parameters = get_parameters(client, module, raw_parameters, method_name) + parameters_to_modify = get_parameters(client, module, raw_parameters, method_name) - if parameters: - result, changed = call_method(client, module, method_name, parameters) + if parameters_to_modify: + # Exit on check_mode when parameters to modify + if module.check_mode: + module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) + result, changed = call_method(client, module, method_name, parameters_to_modify) instance_id = get_final_identifier(method_name, module) - # Check tagging/promoting/rebooting/starting/stopping instance - if state != 'absent' and (not module.check_mode or instance): - changed |= update_instance(client, module, instance, instance_id) + if state != 'absent': + # Check tagging/promoting/rebooting/starting/stopping instance + if not module.check_mode or instance: + changed |= update_instance(client, module, instance, instance_id) + + # Check IAM roles + if module.params.get('iam_roles') or module.params.get('purge_iam_roles'): + changed |= ensure_iam_roles(client, module, instance_id) if changed: instance = get_instance(client, module, instance_id) From fdda10f189cebac32e3bfc2b8b70e6114fb0c86f Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 13 Apr 2022 15:06:43 -0400 Subject: [PATCH 416/683] iam_role - delete inline policies, stabilize for migration to amazon.aws (#1054) iam_role - delete inline policies, stabilize for migration to amazon.aws SUMMARY Stabilize for migration to amazon.aws delete inline policies before deleting the role removed global vars and refactored function definitions added some extra integration tests for check mode ISSUE TYPE Feature Pull Request COMPONENT NAME iam_role Reviewed-by: Markus Bergholz Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis Reviewed-by: Jill R --- iam_role.py | 297 +++++++++++++++++++++++++++------------------------- 1 file changed, 155 insertions(+), 142 deletions(-) diff --git a/iam_role.py b/iam_role.py index 15683e0e060..14a21fcf1f2 100644 --- a/iam_role.py +++ b/iam_role.py @@ -218,20 +218,13 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc): - if not compare_policies(current_policy_doc, json.loads(new_policy_doc)): - return True - else: - return False - - @AWSRetry.jittered_backoff() -def _list_policies(): +def _list_policies(client): paginator = client.get_paginator('list_policies') return paginator.paginate().build_full_result()['Policies'] -def wait_iam_exists(): +def wait_iam_exists(module, client): if module.check_mode: return if not module.params.get('wait'): @@ -255,11 +248,12 @@ def wait_iam_exists(): module.fail_json_aws(e, msg='Failed while waiting on IAM role creation') -def convert_friendly_names_to_arns(policy_names): +def convert_friendly_names_to_arns(module, client, policy_names): if not any(not policy.startswith('arn:') for policy in policy_names): return policy_names + allpolicies = {} - policies = _list_policies() + policies = _list_policies(client) for policy in policies: allpolicies[policy['PolicyName']] = policy['Arn'] @@ -270,31 +264,48 @@ def convert_friendly_names_to_arns(policy_names): module.fail_json_aws(e, msg="Couldn't find policy") -def attach_policies(policies_to_attach, params): +def attach_policies(module, client, policies_to_attach, role_name): + if module.check_mode and policies_to_attach: + return True + changed = False for policy_arn in policies_to_attach: try: - if not module.check_mode: - client.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True) + client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn, aws_retry=True) + changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName'])) - changed = True + module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, role_name)) return changed -def remove_policies(policies_to_remove, params): +def remove_policies(module, client, policies_to_remove, role_name): + if module.check_mode and policies_to_remove: + return True + changed = False for policy in policies_to_remove: try: - if not module.check_mode: - client.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName'])) - changed = True + client.detach_role_policy(RoleName=role_name, PolicyArn=policy, aws_retry=True) + changed = True + except is_boto3_error_code('NoSuchEntityException'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name)) return changed -def generate_create_params(): +def remove_inline_policies(module, client, role_name): + current_inline_policies = get_inline_policy_list(module, client, role_name) + for policy in current_inline_policies: + try: + client.delete_role_policy(RoleName=role_name, PolicyName=policy, aws_retry=True) + except is_boto3_error_code('NoSuchEntityException'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name)) + + +def generate_create_params(module): params = dict() params['Path'] = module.params.get('path') params['RoleName'] = module.params.get('name') @@ -311,31 +322,30 @@ def generate_create_params(): return params -def create_basic_role(params): +def create_basic_role(module, client): """ Perform the Role creation. Assumes tests for the role existing have already been performed. """ + if module.check_mode: + module.exit_json(changed=True) try: - if not module.check_mode: - role = client.create_role(aws_retry=True, **params) - # 'Description' is documented as key of the role returned by create_role - # but appears to be an AWS bug (the value is not returned using the AWS CLI either). - # Get the role after creating it. - role = get_role_with_backoff(params['RoleName']) - else: - role = {'MadeInCheckMode': True} - role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument']) + params = generate_create_params(module) + role = client.create_role(aws_retry=True, **params) + # 'Description' is documented as key of the role returned by create_role + # but appears to be an AWS bug (the value is not returned using the AWS CLI either). + # Get the role after creating it. + role = get_role_with_backoff(module, client, params['RoleName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create role") return role -def update_role_assumed_policy(params, role): +def update_role_assumed_policy(module, client, role_name, target_assumed_policy, current_assumed_policy): # Check Assumed Policy document - if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']): + if target_assumed_policy is None or not compare_policies(current_assumed_policy, json.loads(target_assumed_policy)): return False if module.check_mode: @@ -343,83 +353,72 @@ def update_role_assumed_policy(params, role): try: client.update_assume_role_policy( - RoleName=params['RoleName'], - PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])), + RoleName=role_name, + PolicyDocument=target_assumed_policy, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name)) return True -def update_role_description(params, role): +def update_role_description(module, client, role_name, target_description, current_description): # Check Description update - if params.get('Description') is None: - return False - if role.get('Description') == params['Description']: + if target_description is None or current_description == target_description: return False if module.check_mode: return True try: - client.update_role(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True) + client.update_role(RoleName=role_name, Description=target_description, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to update description for role {0}".format(role_name)) return True -def update_role_max_session_duration(params, role): +def update_role_max_session_duration(module, client, role_name, target_duration, current_duration): # Check MaxSessionDuration update - if params.get('MaxSessionDuration') is None: - return False - if role.get('MaxSessionDuration') == params['MaxSessionDuration']: + if target_duration is None or current_duration == target_duration: return False if module.check_mode: return True try: - client.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True) + client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(role_name)) return True -def update_role_permissions_boundary(params, role): +def update_role_permissions_boundary(module, client, role_name, target_permissions_boundary, current_permissions_boundary): # Check PermissionsBoundary - if params.get('PermissionsBoundary') is None: - return False - if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''): + if target_permissions_boundary is None or target_permissions_boundary == current_permissions_boundary: return False if module.check_mode: return True - if params.get('PermissionsBoundary') == '': + if target_permissions_boundary == '': try: - client.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True) + client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name)) else: try: - client.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True) + client.put_role_permissions_boundary(RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name)) return True -def update_managed_policies(params, role, managed_policies, purge_policies): +def update_managed_policies(module, client, role_name, managed_policies, purge_policies): # Check Managed Policies if managed_policies is None: return False - # If we're manipulating a fake role - if role.get('MadeInCheckMode', False): - role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies)) - return True - # Get list of current attached managed policies - current_attached_policies = get_attached_policy_list(params['RoleName']) + current_attached_policies = get_attached_policy_list(module, client, role_name) current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies] if len(managed_policies) == 1 and managed_policies[0] is None: @@ -429,84 +428,97 @@ def update_managed_policies(params, role, managed_policies, purge_policies): policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list) changed = False + if purge_policies and policies_to_remove: + if module.check_mode: + return True + else: + changed |= remove_policies(module, client, policies_to_remove, role_name) - if purge_policies: - changed |= remove_policies(policies_to_remove, params) - - changed |= attach_policies(policies_to_attach, params) + if policies_to_attach: + if module.check_mode: + return True + else: + changed |= attach_policies(module, client, policies_to_attach, role_name) return changed -def create_or_update_role(): +def create_or_update_role(module, client): - params = generate_create_params() - role_name = params['RoleName'] + role_name = module.params.get('name') + assumed_policy = module.params.get('assume_role_policy_document') create_instance_profile = module.params.get('create_instance_profile') + description = module.params.get('description') + duration = module.params.get('max_session_duration') + path = module.params.get('path') + permissions_boundary = module.params.get('boundary') + purge_tags = module.params.get('purge_tags') + tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None purge_policies = module.params.get('purge_policies') if purge_policies is None: purge_policies = True managed_policies = module.params.get('managed_policies') if managed_policies: # Attempt to list the policies early so we don't leave things behind if we can't find them. - managed_policies = convert_friendly_names_to_arns(managed_policies) + managed_policies = convert_friendly_names_to_arns(module, client, managed_policies) changed = False # Get role - role = get_role(role_name) + role = get_role(module, client, role_name) # If role is None, create it if role is None: - role = create_basic_role(params) + role = create_basic_role(module, client) if not module.check_mode and module.params.get('wait'): - wait_iam_exists() + wait_iam_exists(module, client) changed = True else: - changed |= update_role_tags(params, role) - changed |= update_role_assumed_policy(params, role) - changed |= update_role_description(params, role) - changed |= update_role_max_session_duration(params, role) - changed |= update_role_permissions_boundary(params, role) + # Role exists - get current attributes + current_assumed_policy = role.get('AssumeRolePolicyDocument') + current_description = role.get('Description') + current_duration = role.get('MaxSessionDuration') + current_permissions_boundary = role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', '') + + # Update attributes + changed |= update_role_tags(module, client, role_name, tags, purge_tags) + changed |= update_role_assumed_policy(module, client, role_name, assumed_policy, current_assumed_policy) + changed |= update_role_description(module, client, role_name, description, current_description) + changed |= update_role_max_session_duration(module, client, role_name, duration, current_duration) + changed |= update_role_permissions_boundary(module, client, role_name, permissions_boundary, current_permissions_boundary) if not module.check_mode and module.params.get('wait'): - wait_iam_exists() + wait_iam_exists(module, client) if create_instance_profile: - changed |= create_instance_profiles(params, role) + changed |= create_instance_profiles(module, client, role_name, path) if not module.check_mode and module.params.get('wait'): - wait_iam_exists() + wait_iam_exists(module, client) - changed |= update_managed_policies(params, role, managed_policies, purge_policies) - wait_iam_exists() + changed |= update_managed_policies(module, client, role_name, managed_policies, purge_policies) + wait_iam_exists(module, client) # Get the role again - if not role.get('MadeInCheckMode', False): - role = get_role(params['RoleName']) - role['AttachedPolicies'] = get_attached_policy_list(params['RoleName']) - role['tags'] = get_role_tags() - - module.exit_json( - changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']), - **camel_dict_to_snake_dict(role, ignore_list=['tags'])) + role = get_role(module, client, role_name) + role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name) + role['tags'] = get_role_tags(module, client) + module.exit_json(changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags'])) -def create_instance_profiles(params, role): - if role.get('MadeInCheckMode', False): - return False +def create_instance_profiles(module, client, role_name, path): # Fetch existing Profiles try: - instance_profiles = client.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)['InstanceProfiles'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) # Profile already exists - if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles): + if any(p['InstanceProfileName'] == role_name for p in instance_profiles): return False if module.check_mode: @@ -514,29 +526,28 @@ def create_instance_profiles(params, role): # Make sure an instance profile is created try: - client.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True) + client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True) except is_boto3_error_code('EntityAlreadyExists'): # If the profile already exists, no problem, move on. # Implies someone's changing things at the same time... return False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name)) # And attach the role to the profile try: - client.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True) + client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName'])) + module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(role_name)) return True -def remove_instance_profiles(role_params, role): - role_name = module.params.get('name') +def remove_instance_profiles(module, client, role_name): delete_profiles = module.params.get("delete_instance_profile") try: - instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)['InstanceProfiles'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) @@ -545,52 +556,53 @@ def remove_instance_profiles(role_params, role): profile_name = profile['InstanceProfileName'] try: if not module.check_mode: - client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params) + client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name) if profile_name == role_name: if delete_profiles: try: client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except is_boto3_error_code('NoSuchEntityException'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) -def destroy_role(): +def destroy_role(module, client): role_name = module.params.get('name') - role = get_role(role_name) - role_params = dict() - role_params['RoleName'] = role_name - boundary_params = dict(role_params) - boundary_params['PermissionsBoundary'] = '' + role = get_role(module, client, role_name) if role is None: module.exit_json(changed=False) - # Before we try to delete the role we need to remove any - # - attached instance profiles - # - attached managed policies - remove_instance_profiles(role_params, role) - update_managed_policies(role_params, role, [], True) - - try: - if not module.check_mode: - client.delete_role(aws_retry=True, **role_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete role") + if not module.check_mode: + # Before we try to delete the role we need to remove any + # - attached instance profiles + # - attached managed policies + # - embedded inline policies + remove_instance_profiles(module, client, role_name) + update_managed_policies(module, client, role_name, [], True) + remove_inline_policies(module, client, role_name) + try: + client.delete_role(aws_retry=True, RoleName=role_name) + except is_boto3_error_code('NoSuchEntityException'): + module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete role") module.exit_json(changed=True) -def get_role_with_backoff(name): +def get_role_with_backoff(module, client, name): try: return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) -def get_role(name): +def get_role(module, client, name): try: return client.get_role(RoleName=name, aws_retry=True)['Role'] except is_boto3_error_code('NoSuchEntity'): @@ -599,14 +611,21 @@ def get_role(name): module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) -def get_attached_policy_list(name): +def get_attached_policy_list(module, client, name): try: return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) -def get_role_tags(): +def get_inline_policy_list(module, client, name): + try: + return client.list_role_policies(RoleName=name, aws_retry=True)['PolicyNames'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) + + +def get_role_tags(module, client): role_name = module.params.get('name') try: return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) @@ -614,15 +633,11 @@ def get_role_tags(): module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) -def update_role_tags(params, role): - new_tags = params.get('Tags') +def update_role_tags(module, client, role_name, new_tags, purge_tags): if new_tags is None: return False new_tags = boto3_tag_list_to_ansible_dict(new_tags) - role_name = module.params.get('name') - purge_tags = module.params.get('purge_tags') - try: existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): @@ -645,9 +660,6 @@ def update_role_tags(params, role): def main(): - global module - global client - argument_spec = dict( name=dict(type='str', required=True), path=dict(type='str', default="/"), @@ -665,6 +677,7 @@ def main(): wait=dict(type='bool', default=True), wait_timeout=dict(default=120, type='int'), ) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[('state', 'present', ['assume_role_policy_document'])], supports_check_mode=True) @@ -692,9 +705,9 @@ def main(): state = module.params.get("state") if state == 'present': - create_or_update_role() - else: - destroy_role() + create_or_update_role(module, client) + elif state == 'absent': + destroy_role(module, client) if __name__ == '__main__': From 047516c7671a3a1b394ffff7f4c0363f7f43e5b4 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Wed, 13 Apr 2022 15:23:07 -0700 Subject: [PATCH 417/683] ec2_asg_lifecycle_hook: Add check_mode support (#1060) ec2_asg_lifecycle_hook: Add check_mode support SUMMARY Add check_mode support to ec2_asg_lifecycle_hook. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_asg_lifecycle_hook Reviewed-by: Markus Bergholz Reviewed-by: Joseph Torcasso --- ec2_asg_lifecycle_hook.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ec2_asg_lifecycle_hook.py b/ec2_asg_lifecycle_hook.py index 713a147872f..351bba5b84d 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/ec2_asg_lifecycle_hook.py @@ -185,6 +185,8 @@ def create_lifecycle_hook(connection, module): if not existing_hook: try: + if module.check_mode: + module.exit_json(changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode.") return_object['changed'] = True connection.put_lifecycle_hook(**lch_params) return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( @@ -196,6 +198,8 @@ def create_lifecycle_hook(connection, module): added, removed, modified, same = dict_compare(lch_params, existing_hook[0]) if modified: try: + if module.check_mode: + module.exit_json(changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode.") return_object['changed'] = True connection.put_lifecycle_hook(**lch_params) return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( @@ -245,6 +249,8 @@ def delete_lifecycle_hook(connection, module): } try: + if module.check_mode: + module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode.") connection.delete_lifecycle_hook(**lch_params) return_object['changed'] = True return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name} @@ -269,8 +275,12 @@ def main(): state=dict(default='present', choices=['present', 'absent']) ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['transition']]]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['transition']]], + ) + state = module.params.get('state') connection = module.client('autoscaling') From af675610e8c2ab53286c1c3194b88d0aa62006af Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Thu, 14 Apr 2022 10:28:44 +0200 Subject: [PATCH 418/683] New aws_api_gateway_domain module for adding custom domains (#44) New aws_api_gateway_domain module for adding custom domains SUMMARY New module to setup a custom domain for AWS API Gateway services. ISSUE TYPE New Module Pull Request COMPONENT NAME aws_api_gateway_domain ADDITIONAL INFORMATION Complements already existing aws_api_gateway module to also allow custom domain setup. Opened here as suggested in ansible PR ansible/ansible#68709 Reviewed-by: Stefan Horning Reviewed-by: Jill R Reviewed-by: Sandra McCann Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- aws_api_gateway_domain.py | 333 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 333 insertions(+) create mode 100644 aws_api_gateway_domain.py diff --git a/aws_api_gateway_domain.py b/aws_api_gateway_domain.py new file mode 100644 index 00000000000..a8a04295dae --- /dev/null +++ b/aws_api_gateway_domain.py @@ -0,0 +1,333 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: aws_api_gateway_domain +short_description: Manage AWS API Gateway custom domains +description: + - Manages API Gateway custom domains for API GW Rest APIs. + - AWS API Gateway custom domain setups use CloudFront behind the scenes. + So you will get a CloudFront distribution as a result, configured to be aliased with your domain. +version_added: '3.3.0' +author: + - 'Stefan Horning (@stefanhorning)' +options: + domain_name: + description: + - Domain name you want to use for your API GW deployment. + required: true + type: str + certificate_arn: + description: + - AWS Certificate Manger (ACM) TLS certificate ARN. + type: str + required: true + security_policy: + description: + - Set allowed TLS versions through AWS defined policies. Currently only C(TLS_1_0) and C(TLS_1_2) are available. + default: TLS_1_2 + choices: ['TLS_1_0', 'TLS_1_2'] + type: str + endpoint_type: + description: + - API endpoint configuration for domain. Use EDGE for edge-optimized endpoint, or use C(REGIONAL) or C(PRIVATE). + default: EDGE + choices: ['EDGE', 'REGIONAL', 'PRIVATE'] + type: str + domain_mappings: + description: + - Map your domain base paths to your API GW REST APIs, that you previously created. Use provided ID of the API setup and the release stage. + - "domain_mappings should be a list of dictionaries containing three keys: base_path, rest_api_id and stage." + - "Example: I([{ base_path: v1, rest_api_id: abc123, stage: production }])" + - if you want base path to be just I(/) omit the param completely or set it to empty string. + required: true + type: list + elements: dict + state: + description: + - Create or delete custom domain setup. + default: present + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +notes: + - Does not create a DNS entry on Route53, for that use the route53 module. + - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) + options to add own Certificates. +''' + +EXAMPLES = ''' +- name: Setup endpoint for a custom domain for your API Gateway HTTP API + community.aws.aws_api_gateway_domain: + domain_name: myapi.foobar.com + certificate_arn: 'arn:aws:acm:us-east-1:1231123123:certificate/8bd89412-abc123-xxxxx' + security_policy: TLS_1_2 + endpoint_type: EDGE + domain_mappings: + - { rest_api_id: abc123, stage: production } + state: present + register: api_gw_domain_result + +- name: Create a DNS record for your custom domain on route 53 (using route53 module) + community.aws.route53: + record: myapi.foobar.com + value: "{{ api_gw_domain_result.response.domain.distribution_domain_name }}" + type: A + alias: true + zone: foobar.com + alias_hosted_zone_id: "{{ api_gw_domain_result.response.domain.distribution_hosted_zone_id }}" + command: create +''' + +RETURN = ''' +response: + description: The data returned by create_domain_name (or update and delete) and create_base_path_mapping methods by boto3. + returned: success + type: dict + sample: + domain: + { + domain_name: mydomain.com, + certificate_arn: 'arn:aws:acm:xxxxxx', + distribution_domain_name: xxxx.cloudfront.net, + distribution_hosted_zone_id: ABC123123, + endpoint_configuration: { types: ['EDGE'] }, + domain_name_status: 'AVAILABLE', + security_policy: TLS_1_2, + tags: {} + } + path_mappings: [ + { base_path: '(empty)', rest_api_id: 'abcd123', stage: 'production' } + ] +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError, EndpointConnectionError +except ImportError: + pass # caught by imported AnsibleAWSModule + +import copy + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict + + +def get_domain(module, client): + domain_name = module.params.get('domain_name') + result = {} + try: + result['domain'] = get_domain_name(client, domain_name) + result['path_mappings'] = get_domain_mappings(client, domain_name) + except is_boto3_error_code('NotFoundException'): + return None + except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="getting API GW domain") + return camel_dict_to_snake_dict(result) + + +def create_domain(module, client): + path_mappings = module.params.get('domain_mappings', []) + domain_name = module.params.get('domain_name') + result = {'domain': {}, 'path_mappings': []} + + try: + result['domain'] = create_domain_name( + module, + client, + domain_name, + module.params.get('certificate_arn'), + module.params.get('endpoint_type'), + module.params.get('security_policy') + ) + + for mapping in path_mappings: + base_path = mapping.get('base_path', '') + rest_api_id = mapping.get('rest_api_id') + stage = mapping.get('stage') + if rest_api_id is None or stage is None: + module.fail_json('Every domain mapping needs a rest_api_id and stage name') + + result['path_mappings'].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) + + except (ClientError, BotoCoreError, EndpointConnectionError) as e: + module.fail_json_aws(e, msg="creating API GW domain") + return camel_dict_to_snake_dict(result) + + +def update_domain(module, client, existing_domain): + domain_name = module.params.get('domain_name') + result = existing_domain + result['updated'] = False + + domain = existing_domain.get('domain') + # Compare only relevant set of domain arguments. + # As get_domain_name gathers all kind of state information that can't be set anyways. + # Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity. + existing_domain_settings = { + 'certificate_arn': domain.get('certificate_arn'), + 'security_policy': domain.get('security_policy'), + 'endpoint_type': domain.get('endpoint_configuration').get('types')[0] + } + specified_domain_settings = { + 'certificate_arn': module.params.get('certificate_arn'), + 'security_policy': module.params.get('security_policy'), + 'endpoint_type': module.params.get('endpoint_type') + } + + if specified_domain_settings != existing_domain_settings: + try: + result['domain'] = update_domain_name(client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings)) + result['updated'] = True + except (ClientError, BotoCoreError, EndpointConnectionError) as e: + module.fail_json_aws(e, msg="updating API GW domain") + + existing_mappings = copy.deepcopy(existing_domain.get('path_mappings', [])) + # Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings + for mapping in existing_mappings: + if mapping.get('base_path', 'missing') == '(none)': + mapping.pop('base_path') + + specified_mappings = copy.deepcopy(module.params.get('domain_mappings', [])) + # Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings + for mapping in specified_mappings: + if mapping.get('base_path', 'missing') == '': + mapping.pop('base_path') + + if specified_mappings != existing_mappings: + try: + # When lists missmatch delete all existing mappings before adding new ones as specified + for mapping in existing_domain.get('path_mappings', []): + delete_domain_mapping(client, domain_name, mapping['base_path']) + for mapping in module.params.get('domain_mappings', []): + result['path_mappings'] = add_domain_mapping( + client, domain_name, mapping.get('base_path', ''), mapping.get('rest_api_id'), mapping.get('stage') + ) + result['updated'] = True + except (ClientError, BotoCoreError, EndpointConnectionError) as e: + module.fail_json_aws(e, msg="updating API GW domain mapping") + + return camel_dict_to_snake_dict(result) + + +def delete_domain(module, client): + domain_name = module.params.get('domain_name') + try: + result = delete_domain_name(client, domain_name) + except (ClientError, BotoCoreError, EndpointConnectionError) as e: + module.fail_json_aws(e, msg="deleting API GW domain") + return camel_dict_to_snake_dict(result) + + +retry_params = {"tries": 10, "delay": 5, "backoff": 1.2} + + +@AWSRetry.backoff(**retry_params) +def get_domain_name(client, domain_name): + return client.get_domain_name(domainName=domain_name) + + +@AWSRetry.backoff(**retry_params) +def get_domain_mappings(client, domain_name): + return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', []) + + +@AWSRetry.backoff(**retry_params) +def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy): + endpoint_configuration = {'types': [endpoint_type]} + + if endpoint_type == 'EDGE': + return client.create_domain_name( + domainName=domain_name, + certificateArn=certificate_arn, + endpointConfiguration=endpoint_configuration, + securityPolicy=security_policy + ) + else: + # Use regionalCertificateArn for regional domain deploys + return client.create_domain_name( + domainName=domain_name, + regionalCertificateArn=certificate_arn, + endpointConfiguration=endpoint_configuration, + securityPolicy=security_policy + ) + + +@AWSRetry.backoff(**retry_params) +def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage): + return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage) + + +@AWSRetry.backoff(**retry_params) +def update_domain_name(client, domain_name, **kwargs): + patch_operations = [] + + for key, value in kwargs.items(): + path = "/" + key + if key == "endpointType": + continue + patch_operations.append({"op": "replace", "path": path, "value": value}) + + return client.update_domain_name(domainName=domain_name, patchOperations=patch_operations) + + +@AWSRetry.backoff(**retry_params) +def delete_domain_name(client, domain_name): + return client.delete_domain_name(domainName=domain_name) + + +@AWSRetry.backoff(**retry_params) +def delete_domain_mapping(client, domain_name, base_path): + return client.delete_base_path_mapping(domainName=domain_name, basePath=base_path) + + +def main(): + argument_spec = dict( + domain_name=dict(type='str', required=True), + certificate_arn=dict(type='str', required=True), + security_policy=dict(type='str', default='TLS_1_2', choices=['TLS_1_0', 'TLS_1_2']), + endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']), + domain_mappings=dict(type='list', required=True, elements='dict'), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + client = module.client('apigateway') + + state = module.params.get('state') + changed = False + + if state == "present": + existing_domain = get_domain(module, client) + if existing_domain is not None: + result = update_domain(module, client, existing_domain) + changed = result['updated'] + else: + result = create_domain(module, client) + changed = True + if state == "absent": + result = delete_domain(module, client) + changed = True + + exit_args = {"changed": changed} + + if result is not None: + exit_args['response'] = result + + module.exit_json(**exit_args) + + +if __name__ == '__main__': + main() From 0e7dde6179c88f142ff0d4291b2f6c113e7889a0 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 14 Apr 2022 07:26:51 -0700 Subject: [PATCH 419/683] ec2_asg: Change purge_tags default value to False (#1064) ec2_asg: Change purge_tags default value to False SUMMARY Changed default value of purge_tags to False. With the addition of purge_tags to ec2_asg module #960, the default value was kept to True similar to many other modules in this collection and also as specified in ansible dev guide. With this addition, the issue was discovered that there is a possibility of this change breaking existing playbooks for users if they don't update their playbooks and specify purge_tags: false where required. This PR's change will prevent accidental breakage. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_asg Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis --- ec2_asg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ec2_asg.py b/ec2_asg.py index f95fb329ce5..0aa16aeaa0b 100644 --- a/ec2_asg.py +++ b/ec2_asg.py @@ -224,7 +224,7 @@ description: - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - If the I(tags) parameter is not set then tags will not be modified. - default: true + default: false type: bool version_added: 3.2.0 health_check_period: @@ -1861,7 +1861,7 @@ def main(): wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[], elements='dict'), - purge_tags=dict(type='bool', default=True), + purge_tags=dict(type='bool', default=False), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), From 565ef7e36d28c8336d09502aeb9bf72be6d8cd2c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 14 Apr 2022 16:53:58 +0200 Subject: [PATCH 420/683] Revert breaking change - iam_role return values (#1068) Revert breaking change - iam_role return values SUMMARY This hasn't been release yet, so a changelog isn't needed. While I'm generally good with cleaning up the output values here, this needs to be done as a separate breaking change, and must not be backported to stable-3. ISSUE TYPE Bugfix Pull Request COMPONENT NAME iam_role ADDITIONAL INFORMATION Breaking change silently introduced by #1054 Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso --- iam_role.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/iam_role.py b/iam_role.py index 14a21fcf1f2..814dbbb8b99 100644 --- a/iam_role.py +++ b/iam_role.py @@ -506,7 +506,8 @@ def create_or_update_role(module, client): role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name) role['tags'] = get_role_tags(module, client) - module.exit_json(changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags'])) + camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + module.exit_json(changed=changed, iam_role=camel_role, **camel_role) def create_instance_profiles(module, client, role_name, path): From 0598fe19c57af3b2ab97437974017c6d92e651f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20M=C3=A9nab=C3=A9?= Date: Wed, 20 Apr 2022 11:03:33 +0200 Subject: [PATCH 421/683] s3_lifecycle: support value '0' for transition_days (#1077) s3_lifecycle: support value '0' for transition_days SUMMARY s3_lifecycle module does not support value 0 for transition_days parameter. ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_lifecycle ADDITIONAL INFORMATION A lifecycle rule with 0 as transition_days allows to create a rule that will move objects with a delta of just few hours (if set to 1, objects will be moved with a delta of 1d + few hours). When the value 0 is set, the value is stripped from the query and we get an error that is hard to correlate with an "invalid" value on this parameter (which is valid as 0 is an integer): - name: Create s3 buckets lifecycle rules s3_lifecycle: ec2_url: "FIXME" region: "FIXME" aws_access_key: "FIXME" aws_secret_key: "FIXME" name: "FIXME" rule_id: "onezone" transitions: - transition_days: 0 storage_class: "ONEZONE_IA" state: present status: enabled fatal: [localhost]: FAILED! => { "boto3_version": "1.21.3", "botocore_version": "1.24.19", "changed": false, "error": { "code": "MalformedXML", "message": "Extra element Transition in interleave" }, "lifecycle_configuration": { "Rules": [ { "Filter": { "Prefix": "" }, "ID": "onezone", "Status": "Enabled", "Transitions": [ { "StorageClass": "ONEZONE_IA" } ] } ] }, ... MSG: An error occurred (MalformedXML) when calling the PutBucketLifecycleConfiguration operation: Extra element Transition in interleave This is because transition.get('transition_days') returns 0 which is considered as False on a condition (this patch just check if the value is defined; e.g. is not None). Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- s3_lifecycle.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index c12ce6b0897..3f0bd784cef 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -314,7 +314,7 @@ def build_rule(client, module): t_out = dict() if transition.get('transition_date'): t_out['Date'] = transition['transition_date'] - elif transition.get('transition_days'): + elif transition.get('transition_days') is not None: t_out['Days'] = transition['transition_days'] if transition.get('storage_class'): t_out['StorageClass'] = transition['storage_class'].upper() @@ -596,7 +596,7 @@ def main(): 'noncurrent_version_transition_days', 'noncurrent_version_transitions') for param in required_when_present: - if module.params.get(param): + if module.params.get(param) is None: break else: msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present) From a22b3114ef8e275d20ee76b810fa4dfdef7be799 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 22 Apr 2022 12:20:07 +0200 Subject: [PATCH 422/683] New modules: networkfirewall_policy (#1047) New modules: networkfirewall_policy SUMMARY New module for managing network firewall policy resources. ISSUE TYPE New Module Pull Request COMPONENT NAME networkfirewall_policy networkfirewall_policy_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- networkfirewall_policy.py | 450 +++++++++++++++++++++++++++++ networkfirewall_policy_info.py | 259 +++++++++++++++++ networkfirewall_rule_group_info.py | 6 +- 3 files changed, 713 insertions(+), 2 deletions(-) create mode 100644 networkfirewall_policy.py create mode 100644 networkfirewall_policy_info.py diff --git a/networkfirewall_policy.py b/networkfirewall_policy.py new file mode 100644 index 00000000000..5672a83501c --- /dev/null +++ b/networkfirewall_policy.py @@ -0,0 +1,450 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: networkfirewall_policy +short_description: manage AWS Network Firewall policies +version_added: 4.0.0 +description: + - A module for creating, updating and deleting AWS Network Firewall policies. +options: + arn: + description: + - The ARN of the Network Firewall policy. + - Exactly one of I(arn) or I(name) must be provided. + required: false + type: str + name: + description: + - The name of the Network Firewall policy. + - Cannot be updated after creation. + - Exactly one of I(arn) or I(name) must be provided. + required: false + type: str + state: + description: + - Create or remove the Network Firewall policy. + required: false + choices: ['present', 'absent'] + default: 'present' + type: str + description: + description: + - A description for the Network Firewall policy. + required: false + type: str + stateful_rule_groups: + description: + - A list of names or ARNs of stateful firewall rule groups. + required: false + type: list + elements: str + aliases: ['stateful_groups'] + stateless_rule_groups: + description: + - A list of names or ARNs of stateless firewall rule groups. + required: false + type: list + elements: str + aliases: ['stateless_groups'] + stateless_default_actions: + description: + - Actions to take on a packet if it doesn't match any of the stateless + rules in the policy. + - Common actions are C(aws:pass), C(aws:drop) and C(aws:forward_to_sfe). + - When creating a new policy defaults to C(aws:forward_to_sfe). + required: false + type: list + elements: str + stateless_fragment_default_actions: + description: + - Actions to take on a fragmented UDP packet if it doesn't match any + of the stateless rules in the policy. + - Common actions are C(aws:pass), C(aws:drop) and C(aws:forward_to_sfe). + - When creating a new policy defaults to C(aws:forward_to_sfe). + required: false + type: list + elements: str + stateful_default_actions: + description: + - Actions to take on a packet if it doesn't match any of the stateful + rules in the policy. + - Common actions are C(aws:drop_strict), C(aws:drop_established), + C(aws:alert_strict) and C(aws:alert_established). + - Only valid for policies where I(strict_rule_order=true). + - When creating a new policy defaults to C(aws:drop_strict). + - I(stateful_default_actions) requires botocore>=1.21.52. + required: false + type: list + elements: str + stateful_rule_order: + description: + - Indicates how to manage the order of stateful rule evaluation for the policy. + - When I(strict_rule_order='strict') rules and rule groups are evaluated in + the order that they're defined. + - Cannot be updated after creation. + - I(stateful_rule_order) requires botocore>=1.21.52. + required: false + type: str + choices: ['default', 'strict'] + aliases: ['rule_order'] + stateless_custom_actions: + description: + - A list of dictionaries defining custom actions which can be used in + I(stateless_default_actions) and I(stateless_fragment_default_actions). + required: false + type: list + elements: dict + aliases: ['custom_stateless_actions'] + suboptions: + name: + description: + - The name of the custom action. + required: true + type: str + publish_metric_dimension_value: + description: + - When the custom action is used, metrics will have a dimension of + C(CustomAction) the value of which is set to + I(publish_metric_dimension_value). + required: false + type: str + aliases: ['publish_metric_dimension_values'] + purge_stateless_custom_actions: + description: + - If I(purge_stateless_custom_actions=true), existing custom actions will + be purged from the resource to match exactly what is defined by + the I(stateless_custom_actions) parameter. + type: bool + required: false + default: True + aliases: ['purge_custom_stateless_actions'] + tags: + description: + - A dictionary representing the tags associated with the policy. + - 'For example C({"Example Tag": "some example value"})' + - Unless I(purge_tags=False) all other tags will be removed from the + policy. + type: dict + required: false + purge_tags: + description: + - If I(purge_tags=true) and I(tags) is defined existing tags will be + purged from the resource to match exactly what is defined by the + I(tags) parameter. + type: bool + required: false + default: True + wait: + description: + - Whether to wait for the firewall policy to reach the + C(ACTIVE) or C(DELETED) state before the module returns. + type: bool + required: false + default: true + wait_timeout: + description: + - Maximum time, in seconds, to wait for the firewall policy + to reach the expected state. + - Defaults to 600 seconds. + type: int + required: false + + +author: Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' +# Create an AWS Network Firewall Policy with default rule order +- community.aws.networkfirewall_policy: + stateful_rule_order: 'default' + state: present + name: 'ExamplePolicy' + +# Create an AWS Network Firewall Policy with strict rule order +- community.aws.networkfirewall_policy: + stateful_rule_order: 'strict' + state: present + name: 'ExampleStrictPolicy' + + +# Create an AWS Network Firewall Policy that defaults to dropping all packets +- community.aws.networkfirewall_policy: + stateful_rule_order: 'strict' + state: present + name: 'ExampleDropPolicy' + stateful_default_actions: + - 'aws:drop_strict' + stateful_rule_groups: + - 'ExampleStrictRuleGroup' + - 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/BotNetCommandAndControlDomainsStrictOrder' + +# Delete an AWS Network Firewall Policy +- community.aws.networkfirewall_policy: + state: absent + name: 'ExampleDropPolicy' +''' + +RETURN = ''' +policy: + description: The details of the policy + type: dict + returned: success + contains: + policy: + description: The details of the policy + type: dict + returned: success + contains: + stateful_engine_options: + description: + - Extra options describing how the stateful rules should be handled. + type: dict + returned: success + contains: + rule_order: + description: + - How rule group evaluation will be ordered. + - For more information on rule evaluation ordering see the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). + type: str + returned: success + example: 'DEFAULT_ACTION_ORDER' + stateful_rule_group_references: + description: Information about the stateful rule groups attached to the policy. + type: list + elements: dict + returned: success + contains: + resource_arn: + description: The ARN of the rule group. + type: str + returned: success + example: 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/AbusedLegitMalwareDomainsActionOrder' + priority: + description: + - An integer that indicates the order in which to run the stateful rule groups in a single policy. + - This only applies to policies that specify the STRICT_ORDER rule order in the stateful engine options settings. + type: int + returned: success + example: 1234 + stateless_custom_actions: + description: + - A description of additional custom actions available for use as + default rules to apply to stateless packets. + type: list + elements: dict + returned: success + contains: + action_name: + description: A name for the action. + type: str + returned: success + example: 'ExampleAction' + action_definition: + description: The action to perform. + type: dict + returned: success + contains: + publish_metric_action: + description: + - Definition of a custom metric to be published to CloudWatch. + - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/monitoring-cloudwatch.html) + type: dict + returned: success + contains: + dimensions: + description: + - The values of the CustomAction dimension to set on the metrics. + - The dimensions of a metric are used to identify unique + streams of data. + type: list + elements: dict + returned: success + contains: + value: + description: A value of the CustomAction dimension to set on the metrics. + type: str + returned: success + example: 'ExampleRule' + stateless_default_actions: + description: The default actions to take on a packet that doesn't match any stateful rules. + type: list + elements: str + returned: success + example: ['aws:alert_strict'] + stateless_fragment_default_actions: + description: The actions to take on a packet if it doesn't match any of the stateless rules in the policy. + type: list + elements: str + returned: success + example: ['aws:pass'] + stateless_rule_group_references: + description: Information about the stateful rule groups attached to the policy. + type: list + elements: dict + returned: success + contains: + resource_arn: + description: The ARN of the rule group. + type: str + returned: success + example: 'arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/ExampleGroup' + priority: + description: + - An integer that indicates the order in which to run the stateless rule groups in a single policy. + type: str + returned: success + example: 12345 + policy_metadata: + description: Metadata about the policy + type: dict + returned: success + contains: + consumed_stateful_rule_capacity: + description: The total number of capacity units used by the stateful rule groups. + type: int + returned: success + example: 165 + consumed_stateless_rule_capacity: + description: The total number of capacity units used by the stateless rule groups. + type: int + returned: success + example: 2010 + firewall_policy_arn: + description: The ARN of the policy. + type: str + returned: success + example: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy + firewall_policy_id: + description: The unique ID of the policy. + type: str + returned: success + example: 12345678-abcd-1234-5678-123456789abc + firewall_policy_name: + description: The name of the policy. + type: str + returned: success + example: ExamplePolicy + firewall_policy_status: + description: The current status of the policy. + type: str + returned: success + example: ACTIVE + number_of_associations: + description: The number of firewalls the policy is associated to. + type: int + returned: success + example: 1 + tags: + description: A dictionary representing the tags associated with the policy. + type: dict + returned: success + example: {'tagName': 'Some Value'} +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager + + +def main(): + + custom_action_options = dict( + name=dict(type='str', required=True), + # Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1" + publish_metric_dimension_value=dict(type='str', required=False, aliases=['publish_metric_dimension_values']), + # NetworkFirewallPolicyManager can cope with a list for future-proofing + # publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']), + ) + + argument_spec = dict( + name=dict(type='str', required=False), + arn=dict(type='str', required=False), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + description=dict(type='str', required=False), + tags=dict(type='dict', required=False), + purge_tags=dict(type='bool', required=False, default=True), + stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']), + stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']), + stateful_default_actions=dict(type='list', elements='str', required=False), + stateless_default_actions=dict(type='list', elements='str', required=False), + stateless_fragment_default_actions=dict(type='list', elements='str', required=False), + stateful_rule_order=dict(type='str', required=False, choices=['strict', 'default'], aliases=['rule_order']), + stateless_custom_actions=dict(type='list', elements='dict', required=False, + options=custom_action_options, aliases=['custom_stateless_actions']), + purge_stateless_custom_actions=dict(type='bool', required=False, default=True, aliases=['purge_custom_stateless_actions']), + wait=dict(type='bool', required=False, default=True), + wait_timeout=dict(type='int', required=False), + ) + + mutually_exclusive = [ + ('arn', 'name',) + ] + required_one_of = [ + ('arn', 'name',) + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + ) + + arn = module.params.get('arn') + name = module.params.get('name') + state = module.params.get('state') + + manager = NetworkFirewallPolicyManager(module, name=name, arn=arn) + manager.set_wait(module.params.get('wait', None)) + manager.set_wait_timeout(module.params.get('wait_timeout', None)) + + rule_order = module.params.get('stateful_rule_order') + if rule_order and rule_order != "default": + module.require_botocore_at_least('1.21.52', reason='to set the rule order') + if module.params.get('stateful_default_actions'): + module.require_botocore_at_least( + '1.21.52', reason='to set the default actions for stateful flows') + + if state == 'absent': + manager.delete() + else: + manager.set_description(module.params.get('description', None)) + manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) + # Actions need to be defined before potentially consuming them + manager.set_custom_stateless_actions( + module.params.get('stateless_custom_actions', None), + module.params.get('purge_stateless_custom_actions', True)), + manager.set_stateful_rule_order(module.params.get('stateful_rule_order', None)) + manager.set_stateful_rule_groups(module.params.get('stateful_rule_groups', None)) + manager.set_stateless_rule_groups(module.params.get('stateless_rule_groups', None)) + manager.set_stateful_default_actions(module.params.get('stateful_default_actions', None)) + manager.set_stateless_default_actions(module.params.get('stateless_default_actions', None)) + manager.set_stateless_fragment_default_actions(module.params.get('stateless_fragment_default_actions', None)) + + manager.flush_changes() + + results = dict( + changed=manager.changed, + policy=manager.updated_resource, + ) + if manager.changed: + diff = dict( + before=manager.original_resource, + after=manager.updated_resource, + ) + results['diff'] = diff + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/networkfirewall_policy_info.py b/networkfirewall_policy_info.py new file mode 100644 index 00000000000..a91536b6e0c --- /dev/null +++ b/networkfirewall_policy_info.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: networkfirewall_policy_info +short_description: describe AWS Network Firewall policies +version_added: 4.0.0 +description: + - A module for describing AWS Network Firewall policies. +options: + arn: + description: + - The ARN of the Network Firewall policy. + - Mutually exclusive with I(name). + required: false + type: str + name: + description: + - The name of the Network Firewall policy. + - Mutually exclusive with I(arn). + required: false + type: str + +author: Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' + +# Describe all Firewall policies in an account +- community.aws.networkfirewall_policy_info: {} + +# Describe a Firewall policy by ARN +- community.aws.networkfirewall_policy_info: + arn: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy + +# Describe a Firewall policy by name +- community.aws.networkfirewall_policy_info: + name: ExamplePolicy +''' + +RETURN = ''' +policy_list: + description: A list of ARNs of the matching policies. + type: list + elements: str + returned: When a policy name isn't specified + example: ['arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Example1', + 'arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Example2'] + +policies: + description: The details of the policies + returned: success + type: list + elements: dict + contains: + policy: + description: The details of the policy + type: dict + returned: success + contains: + stateful_engine_options: + description: + - Extra options describing how the stateful rules should be handled. + type: dict + returned: success + contains: + rule_order: + description: + - How rule group evaluation will be ordered. + - For more information on rule evaluation ordering see the AWS documentation + U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). + type: str + returned: success + example: 'DEFAULT_ACTION_ORDER' + stateful_rule_group_references: + description: Information about the stateful rule groups attached to the policy. + type: list + elements: dict + returned: success + contains: + resource_arn: + description: The ARN of the rule group. + type: str + returned: success + example: 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/AbusedLegitMalwareDomainsActionOrder' + priority: + description: + - An integer that indicates the order in which to run the stateful rule groups in a single policy. + - This only applies to policies that specify the STRICT_ORDER rule order in the stateful engine options settings. + type: int + returned: success + example: 1234 + stateless_custom_actions: + description: + - A description of additional custom actions available for use as + default rules to apply to stateless packets. + type: list + elements: dict + returned: success + contains: + action_name: + description: A name for the action. + type: str + returned: success + example: 'ExampleAction' + action_definition: + description: The action to perform. + type: dict + returned: success + contains: + publish_metric_action: + description: + - Definition of a custom metric to be published to CloudWatch. + - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/monitoring-cloudwatch.html) + type: dict + returned: success + contains: + dimensions: + description: + - The values of the CustomAction dimension to set on the metrics. + - The dimensions of a metric are used to identify unique + streams of data. + type: list + elements: dict + returned: success + contains: + value: + description: A value of the CustomAction dimension to set on the metrics. + type: str + returned: success + example: 'ExampleRule' + stateless_default_actions: + description: The default actions to take on a packet that doesn't match any stateful rules. + type: list + elements: str + returned: success + example: ['aws:alert_strict'] + stateless_fragment_default_actions: + description: The actions to take on a packet if it doesn't match any of the stateless rules in the policy. + type: list + elements: str + returned: success + example: ['aws:pass'] + stateless_rule_group_references: + description: Information about the stateful rule groups attached to the policy. + type: list + elements: dict + returned: success + contains: + resource_arn: + description: The ARN of the rule group. + type: str + returned: success + example: 'arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/ExampleGroup' + priority: + description: + - An integer that indicates the order in which to run the stateless rule groups in a single policy. + type: str + returned: success + example: 12345 + policy_metadata: + description: Metadata about the policy + type: dict + returned: success + contains: + consumed_stateful_rule_capacity: + description: The total number of capacity units used by the stateful rule groups. + type: int + returned: success + example: 165 + consumed_stateless_rule_capacity: + description: The total number of capacity units used by the stateless rule groups. + type: int + returned: success + example: 2010 + firewall_policy_arn: + description: The ARN of the policy. + type: str + returned: success + example: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy + firewall_policy_id: + description: The unique ID of the policy. + type: str + returned: success + example: 12345678-abcd-1234-5678-123456789abc + firewall_policy_name: + description: The name of the policy. + type: str + returned: success + example: ExamplePolicy + firewall_policy_status: + description: The current status of the policy. + type: str + returned: success + example: ACTIVE + number_of_associations: + description: The number of firewalls the policy is associated to. + type: int + returned: success + example: 1 + tags: + description: A dictionary representing the tags associated with the policy. + type: dict + returned: success + example: {'tagName': 'Some Value'} +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=False), + arn=dict(type='str', required=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('arn', 'name',), + ], + ) + + arn = module.params.get('arn') + name = module.params.get('name') + + manager = NetworkFirewallPolicyManager(module) + + results = dict(changed=False) + + if name or arn: + policy = manager.get_policy(name=name, arn=arn) + if policy: + results['policies'] = [policy] + else: + results['policies'] = [] + else: + policy_list = manager.list() + results['policy_list'] = policy_list + policies = [manager.get_policy(arn=p) for p in policy_list] + results['policies'] = policies + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py index 37cb6d2b0c1..b21e060795b 100644 --- a/networkfirewall_rule_group_info.py +++ b/networkfirewall_rule_group_info.py @@ -430,8 +430,10 @@ def main(): if name or arn: rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn) - rules = [rule] - results['rule_groups'] = rules + if rule: + results['rule_groups'] = [rule] + else: + results['rule_groups'] = [] else: rule_list = manager.list(scope=scope) results['rule_list'] = rule_list From ae39dfafa87afc6eee3d1e4f6ebe9276f089dc1a Mon Sep 17 00:00:00 2001 From: psharkey Date: Tue, 26 Apr 2022 03:42:55 -0500 Subject: [PATCH 423/683] Fixing parameter name in example. (#1092) Fixing parameter name in example. SUMMARY Fixing parameter used in example. ISSUE TYPE Docs Pull Request COMPONENT NAME aws_acm ADDITIONAL INFORMATION Using the example as is gives in an error - fatal: [localhost]: FAILED! => changed=false msg: 'state is present but all of the following are missing: private_key' Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell --- aws_acm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_acm.py b/aws_acm.py index 1125ead5036..d6ba255d575 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -202,7 +202,7 @@ - name: create/update a certificate with a chain community.aws.aws_acm: certificate: "{{ lookup('file', 'cert.pem' ) }}" - privateKey: "{{ lookup('file', 'key.pem' ) }}" + private_key: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert certificate_chain: "{{ lookup('file', 'chain.pem' ) }}" state: present From 3bba3ef986debbcd1fcb333f0825b9d402e1ae35 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Tue, 26 Apr 2022 05:37:51 -0400 Subject: [PATCH 424/683] iam_policy - update examples, add RETURN, add required_if case (#1093) iam_policy - update examples, add RETURN, add required_if case SUMMARY fix broken example add RETURN documentation require one of policy_document or policy_json when state is present add extra integration tests for check mode idempotency cases ISSUE TYPE Bugfix Pull Request COMPONENT NAME iam_policy Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- iam_policy.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/iam_policy.py b/iam_policy.py index 570c37efa1b..8989255d3c1 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -82,8 +82,7 @@ # Advanced example, create two new groups and add a READ-ONLY policy to both # groups. - name: Create Two Groups, Mario and Luigi - community.aws.iam: - iam_type: group + community.aws.iam_group: name: "{{ item }}" state: present loop: @@ -94,9 +93,9 @@ - name: Apply READ-ONLY policy to new groups that have been recently created community.aws.iam_policy: iam_type: group - iam_name: "{{ item.created_group.group_name }}" + iam_name: "{{ item.iam_group.group.group_name }}" policy_name: "READ-ONLY" - policy_document: readonlypolicy.json + policy_json: "{{ lookup('template', 'readonly.json.j2') }}" state: present loop: "{{ new_groups.results }}" @@ -107,12 +106,20 @@ iam_name: "{{ item.user }}" policy_name: "s3_limited_access_{{ item.prefix }}" state: present - policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " + policy_json: "{{ lookup('template', 's3_policy.json.j2') }}" loop: - user: s3_user prefix: s3_user_prefix ''' +RETURN = ''' +policies: + description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role). + returned: always + type: list + elements: str +''' + import json try: @@ -120,9 +127,10 @@ except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies class PolicyError(Exception): @@ -174,9 +182,9 @@ def delete(self): self.changed = False return - self.changed = True if not self.check_mode: self._delete(self.name, self.policy_name) + self.changed = True def get_policy_text(self): try: @@ -298,8 +306,16 @@ def main(): skip_duplicates=dict(type='bool', default=None, required=False) ) mutually_exclusive = [['policy_document', 'policy_json']] - - module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) + required_if = [ + ('state', 'present', ('policy_document', 'policy_json'), True), + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True + ) skip_duplicates = module.params.get('skip_duplicates') From 89fb521c3f83b3d5752167d3e707e3784bdb1a61 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Tue, 26 Apr 2022 18:02:27 -0400 Subject: [PATCH 425/683] aws_kms - stabilize and add integration tests (#1052) aws_kms - stabilize and add integration tests SUMMARY update/add integration tests for various actions return list of policies as a list of jsons for clarity sleep on updates (no kms waiter, attempted manual waiters but still had test failures) ISSUE TYPE Feature Pull Request COMPONENT NAME aws_kms ADDITIONAL INFORMATION I tried adding manual waiters for different actions like waiting for tags to be correct, policy to be updated, etc, but would still fail ~half of the time on idempotency tests. seems like after updating the key's status is a bit buggy. Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso Reviewed-by: Mandar Kulkarni Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- aws_kms.py | 77 +++++++++++++++++++++++++++++++++++++++++++------ aws_kms_info.py | 44 +++++++++++++++++++++++++++- 2 files changed, 111 insertions(+), 10 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index 41a5ee63c69..cf9c4b5eb96 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -199,6 +199,12 @@ - amazon.aws.aws - amazon.aws.ec2 + +notes: + - There are known inconsistencies in the amount of time required for updates of KMS keys to be fully reflected on AWS. + This can cause issues when running duplicate tasks in succession or using the aws_kms_info module to fetch key metadata + shortly after modifying keys. + For this reason, it is recommended to use the return data from this module (aws_kms) to fetch a key's metadata. ''' EXAMPLES = r''' @@ -310,6 +316,11 @@ type: str returned: always sample: false +enable_key_rotation: + description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined. + type: bool + returned: always + sample: false aliases: description: list of aliases associated with the key type: list @@ -318,9 +329,45 @@ - aws/acm - aws/ebs policies: - description: list of policy documents for the keys. Empty when access is denied even if there are policies. + description: list of policy documents for the key. Empty when access is denied even if there are policies. type: list returned: always + elements: str + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "111111111111" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::111111111111:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" +key_policies: + description: list of policy documents for the key. Empty when access is denied even if there are policies. + type: list + returned: always + elements: dict sample: Version: "2012-10-17" Id: "auto-ebs-2" @@ -351,6 +398,7 @@ - "kms:List*" - "kms:RevokeGrant" Resource: "*" + version_added: 3.3.0 tags: description: dictionary of tags applied to the key type: dict @@ -584,6 +632,7 @@ def get_key_details(connection, module, key_id): tags = get_kms_tags(connection, module, key_id) result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') result['policies'] = get_kms_policies(connection, module, key_id) + result['key_policies'] = [json.loads(policy) for policy in result['policies']] return result @@ -817,13 +866,15 @@ def update_key_rotation(connection, module, key, enable_key_rotation): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get current key rotation status") - try: - if enable_key_rotation: - connection.enable_key_rotation(KeyId=key_id) - else: - connection.disable_key_rotation(KeyId=key_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to enable/disable key rotation") + if not module.check_mode: + try: + if enable_key_rotation: + connection.enable_key_rotation(KeyId=key_id) + else: + connection.disable_key_rotation(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to enable/disable key rotation") + return True @@ -1030,6 +1081,11 @@ def canonicalize_alias_name(alias): def fetch_key_metadata(connection, module, key_id, alias): + # Note - fetching a key's metadata is very inconsistent shortly after any sort of update to a key has occurred. + # Combinations of manual waiters, checking expecting key values to actual key value, and static sleeps + # have all been exhausted, but none of those available options have solved the problem. + # Integration tests will wait for 10 seconds to combat this issue. + # See https://github.com/ansible-collections/community.aws/pull/1052. alias = canonicalize_alias_name(module.params.get('alias')) @@ -1104,10 +1160,13 @@ def main(): kms = module.client('kms') + module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", + date='2024-05-01', collection_name='community.aws') + key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias')) # We can't create keys with a specific ID, if we can't access the key we'll have to fail if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata: - module.fail_json(msg="Could not find key with id %s to update") + module.fail_json(msg="Could not find key with id {0} to update".format(module.params.get('key_id'))) if module.params.get('policy_grant_types') or mode == 'deny': module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile' diff --git a/aws_kms_info.py b/aws_kms_info.py index 671bf6f7447..c67e58d27ec 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -150,9 +150,10 @@ Name: myKey Purpose: protecting_stuff policies: - description: list of policy documents for the keys. Empty when access is denied even if there are policies. + description: list of policy documents for the key. Empty when access is denied even if there are policies. type: list returned: always + elements: str sample: Version: "2012-10-17" Id: "auto-ebs-2" @@ -183,6 +184,42 @@ - "kms:List*" - "kms:RevokeGrant" Resource: "*" + key_policies: + description: list of policy documents for the key. Empty when access is denied even if there are policies. + type: list + returned: always + elements: dict + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "111111111111" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::111111111111:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" + version_added: 3.3.0 grants: description: list of grants associated with a key type: complex @@ -240,6 +277,7 @@ sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz ''' +import json try: import botocore @@ -418,6 +456,7 @@ def get_key_details(connection, module, key_id, tokens=None): result = camel_dict_to_snake_dict(result) result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') result['policies'] = get_kms_policies(connection, module, key_id) + result['key_policies'] = [json.loads(policy) for policy in result['policies']] return result @@ -460,6 +499,9 @@ def main(): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') + module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", + date='2024-05-01', collection_name='community.aws') + all_keys = get_kms_info(connection, module) filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] ret_params = dict(kms_keys=filtered_keys) From aaa185d60e66bd039178572592087bfab6f963f2 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 28 Apr 2022 12:37:58 +0200 Subject: [PATCH 426/683] Docs linting fixups (#1100) Docs linting fixups SUMMARY While testing out linting/validation of the generated docs, a number of issues were highlighted. ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/aws_batch_job_definition.py plugins/modules/aws_config_rule.py plugins/modules/aws_secret.py plugins/modules/aws_waf_rule.py plugins/modules/cloudfront_invalidation.py plugins/modules/ec2_placement_group_info.py plugins/modules/iam_user.py plugins/modules/route53_health_check.py plugins/modules/s3_sync.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- aws_batch_job_definition.py | 5 +++-- aws_config_rule.py | 6 +++--- aws_secret.py | 2 +- aws_waf_rule.py | 2 +- cloudfront_invalidation.py | 2 +- ec2_placement_group_info.py | 3 ++- iam_user.py | 4 ++-- route53_health_check.py | 2 +- s3_sync.py | 4 ++-- 9 files changed, 16 insertions(+), 14 deletions(-) diff --git a/aws_batch_job_definition.py b/aws_batch_job_definition.py index 4beb1ab2c26..afaaf0c54e8 100644 --- a/aws_batch_job_definition.py +++ b/aws_batch_job_definition.py @@ -46,8 +46,9 @@ type: dict image: description: - - The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker - Hub registry are available by default. Other repositories are specified with `` repository-url /image tag ``. + - > + The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker + Hub registry are available by default. Other repositories are specified with C(repository-url/image-name:tag). Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run. diff --git a/aws_config_rule.py b/aws_config_rule.py index ed4de6ab7e2..a27236d82b0 100644 --- a/aws_config_rule.py +++ b/aws_config_rule.py @@ -69,9 +69,9 @@ description: - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs. - - Key `EventSource` The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. - - Key `MessageType` The type of notification that triggers AWS Config to run an evaluation for a rule. - - Key `MaximumExecutionFrequency` The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. + - Key C(EventSource) The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. + - Key C(MessageType) The type of notification that triggers AWS Config to run an evaluation for a rule. + - Key C(MaximumExecutionFrequency) The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. type: dict required: true input_parameters: diff --git a/aws_secret.py b/aws_secret.py index 050b00f5ae8..03f4a8d3592 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -40,7 +40,7 @@ kms_key_id: description: - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be - used to encrypt the `secret_string` or `secret_binary` values in the versions stored in this secret. + used to encrypt the I(secret) values in the versions stored in this secret. type: str secret_type: description: diff --git a/aws_waf_rule.py b/aws_waf_rule.py index ce28559b35f..f5701b2ff00 100644 --- a/aws_waf_rule.py +++ b/aws_waf_rule.py @@ -63,7 +63,7 @@ description: The name of the condition. The condition must already exist. purge_conditions: description: - - Whether or not to remove conditions that are not passed when updating `conditions`. + - Whether or not to remove conditions that are not passed when updating I(conditions). default: false type: bool waf_regional: diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 4fb602f7a77..0599a71de3e 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -43,7 +43,7 @@ type: str target_paths: description: - - A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*' + - A list of paths on the distribution to invalidate. Each path should begin with C(/). Wildcards are allowed. eg. C(/foo/bar/*) required: true type: list elements: str diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 6a344f1d8d9..f4ee9b753b0 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -45,7 +45,8 @@ register: specific_ec2_placement_groups - ansible.builtin.debug: - msg: "{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}" + msg: > + {{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }} ''' diff --git a/iam_user.py b/iam_user.py index 7ec3901aa45..6266d992003 100644 --- a/iam_user.py +++ b/iam_user.py @@ -13,7 +13,7 @@ short_description: Manage AWS IAM users description: - A module to manage AWS IAM users. - - The module does not manage groups that users belong to, groups memberships can be managed using `iam_group`. + - The module does not manage groups that users belong to, groups memberships can be managed using M(community.aws.iam_group). author: Josh Souza (@joshsouza) options: name: @@ -104,7 +104,7 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Note: This module does not allow management of groups that users belong to. -# Groups should manage their membership directly using `iam_group`, +# Groups should manage their membership directly using community.aws.iam_group, # as users belong to them. - name: Create a user diff --git a/route53_health_check.py b/route53_health_check.py index 382be93ab6d..22ce36beba8 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -60,7 +60,7 @@ fqdn: description: - Domain name of the endpoint to check. Either this or I(ip_address) has - to be provided. When both are given the `fqdn` is used in the `Host:` + to be provided. When both are given the I(fqdn) is used in the C(Host:) header of the HTTP request. type: str string_match: diff --git a/s3_sync.py b/s3_sync.py index c9021c3dbf9..75c653f5712 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -83,7 +83,7 @@ include: description: - Shell pattern-style file matching. - - Used before exclude to determine eligible files (for instance, only "*.gif") + - Used before exclude to determine eligible files (for instance, only C("*.gif")) - For multiple patterns, comma-separate them. required: false default: "*" @@ -91,7 +91,7 @@ exclude: description: - Shell pattern-style file matching. - - Used after include to remove files (for instance, skip "*.txt") + - Used after include to remove files (for instance, skip C("*.txt")) - For multiple patterns, comma-separate them. required: false default: ".*" From 81b1106777fec9e9ee74e4758822bf54029adf67 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 2 May 2022 12:25:38 +0200 Subject: [PATCH 427/683] New Module: TGW VPC Attachments (#1004) New modules: Transit Gateway VPC attachments SUMMARY Adds support for EC2 Transit Gateway VPC attachments Does not support accepting / rejecting attachments at this time. ISSUE TYPE New Module Pull Request COMPONENT NAME ec2_transit_gateway_vpc_attachment ec2_transit_gateway_vpc_attachment_info ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ec2_transit_gateway_vpc_attachment.py | 349 +++++++++++++++++++++ ec2_transit_gateway_vpc_attachment_info.py | 200 ++++++++++++ 2 files changed, 549 insertions(+) create mode 100644 ec2_transit_gateway_vpc_attachment.py create mode 100644 ec2_transit_gateway_vpc_attachment_info.py diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py new file mode 100644 index 00000000000..13518fdbe2a --- /dev/null +++ b/ec2_transit_gateway_vpc_attachment.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: ec2_transit_gateway_vpc_attachment +short_description: Create and delete AWS Transit Gateway VPC attachments +version_added: 4.0.0 +description: + - Creates, Deletes and Updates AWS Transit Gateway VPC Attachments. +options: + transit_gateway: + description: + - The ID of the Transit Gateway that the attachment belongs to. + - When creating a new attachment, I(transit_gateway) must be provided. + - At least one of I(name), I(transit_gateway) and I(id) must be provided. + - I(transit_gateway) is an immutable setting and can not be updated on an + existing attachment. + type: str + required: false + aliases: ['transit_gateway_id'] + id: + description: + - The ID of the Transit Gateway Attachment. + - When I(id) is not set, a search using I(transit_gateway) and I(name) will be + performed. If multiple results are returned, the module will fail. + - At least one of I(name), I(transit_gateway) and I(id) must be provided. + type: str + required: false + aliases: ['attachment_id'] + name: + description: + - The C(Name) tag of the Transit Gateway attachment. + - Providing both I(id) and I(name) will set the C(Name) tag on an existing + attachment the matching I(id). + - Setting the C(Name) tag in I(tags) will also result in the C(Name) tag being + updated. + - At least one of I(name), I(transit_gateway) and I(id) must be provided. + type: str + required: false + state: + description: + - Create or remove the Transit Gateway attachment. + type: str + required: false + choices: ['present', 'absent'] + default: 'present' + subnets: + description: + - The ID of the subnets in which to create the transit gateway VPC attachment. + - Required when creating a new attachment. + type: list + elements: str + required: false + purge_subnets: + description: + - If I(purge_subnets=true), existing subnets will be removed from the + attachment as necessary to match exactly what is defined by I(subnets). + type: bool + required: false + default: true + dns_support: + description: + - Whether DNS support is enabled. + type: bool + required: false + ipv6_support: + description: + - Whether IPv6 support is enabled. + type: bool + required: false + appliance_mode_support: + description: + - Whether the attachment is configured for appliance mode. + - When appliance mode is enabled, Transit Gateway, using 4-tuples of an + IP packet, selects a single Transit Gateway ENI in the Appliance VPC + for the life of a flow to send traffic to. + type: bool + required: false + tags: + description: + - A dictionary representing the tags associated with the Transit Gateway + attachment. + - 'For example C({"Example Tag": "some example value"})' + - Unless I(purge_tags=False) all other tags will be removed from the + attachment. + type: dict + required: false + purge_tags: + description: + - If I(purge_tags=true), existing tags will be purged from the resource + to match exactly what is defined by I(tags) parameter. + type: bool + required: false + default: true + wait: + description: + - Whether to wait for the Transit Gateway attachment to reach the + C(Available) or C(Deleted) state before the module returns. + type: bool + required: false + default: true + wait_timeout: + description: + - Maximum time, in seconds, to wait for the Transit Gateway attachment + to reach the expected state. + - Defaults to 600 seconds. + type: int + required: false +author: "Mark Chappell (@tremble)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' +# Create a Transit Gateway attachment +- community.aws.ec2_transit_gateway_vpc_attachment: + state: present + transit_gateway: 'tgw-123456789abcdef01' + name: AnsibleTest-1 + subnets: + - subnet-00000000000000000 + - subnet-11111111111111111 + - subnet-22222222222222222 + ipv6_support: True + purge_subnets: True + dns_support: True + appliance_mode_support: True + tags: + TestTag: changed data in Test Tag + +# Set sub options on a Transit Gateway attachment +- community.aws.ec2_transit_gateway_vpc_attachment: + state: present + id: 'tgw-attach-0c0c5fd0b0f01d1c9' + name: AnsibleTest-1 + ipv6_support: True + purge_subnets: False + dns_support: False + appliance_mode_support: True + +# Delete the transit gateway +- community.aws.ec2_transit_gateway_vpc_attachment: + state: absent + id: 'tgw-attach-0c0c5fd0b0f01d1c9' +''' + +RETURN = ''' +transit_gateway_attachments: + description: The attributes of the Transit Gateway attachments. + type: list + elements: dict + returned: success + contains: + creation_time: + description: + - An ISO 8601 date time stamp of when the attachment was created. + type: str + returned: success + example: '2022-03-10T16:40:26+00:00' + options: + description: + - Additional VPC attachment options. + type: dict + returned: success + contains: + appliance_mode_support: + description: + - Indicates whether appliance mode support is enabled. + type: str + returned: success + example: 'enable' + dns_support: + description: + - Indicates whether DNS support is enabled. + type: str + returned: success + example: 'disable' + ipv6_support: + description: + - Indicates whether IPv6 support is disabled. + type: str + returned: success + example: 'disable' + state: + description: + - The state of the attachment. + type: str + returned: success + example: 'deleting' + subnet_ids: + description: + - The IDs of the subnets in use by the attachment. + type: list + elements: str + returned: success + example: ['subnet-0123456789abcdef0', 'subnet-11111111111111111'] + tags: + description: + - A dictionary representing the resource tags. + type: dict + returned: success + transit_gateway_attachment_id: + description: + - The ID of the attachment. + type: str + returned: success + example: 'tgw-attach-0c0c5fd0b0f01d1c9' + transit_gateway_id: + description: + - The ID of the transit gateway that the attachment is connected to. + type: str + returned: success + example: 'tgw-0123456789abcdef0' + vpc_id: + description: + - The ID of the VPC that the attachment is connected to. + type: str + returned: success + example: 'vpc-0123456789abcdef0' + vpc_owner_id: + description: + - The ID of the account that the VPC belongs to. + type: str + returned: success + example: '012345678901' +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + +from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager + + +def main(): + + argument_spec = dict( + state=dict(type='str', required=False, default='present', choices=['absent', 'present']), + transit_gateway=dict(type='str', required=False, aliases=['transit_gateway_id']), + id=dict(type='str', required=False, aliases=['attachment_id']), + name=dict(type='str', required=False), + subnets=dict(type='list', elements='str', required=False), + purge_subnets=dict(type='bool', required=False, default=True), + tags=dict(type='dict', required=False), + purge_tags=dict(type='bool', required=False, default=True), + appliance_mode_support=dict(type='bool', required=False), + dns_support=dict(type='bool', required=False), + ipv6_support=dict(type='bool', required=False), + wait=dict(type='bool', required=False, default=True), + wait_timeout=dict(type='int', required=False), + ) + + one_of = [ + ['id', 'transit_gateway', 'name'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=one_of, + ) + + attach_id = module.params.get('id', None) + tgw = module.params.get('transit_gateway', None) + name = module.params.get('name', None) + tags = module.params.get('tags', None) + purge_tags = module.params.get('purge_tags') + state = module.params.get('state') + subnets = module.params.get('subnets', None) + purge_subnets = module.params.get('purge_subnets') + + # When not provided with an ID see if one exists. + if not attach_id: + search_manager = TransitGatewayVpcAttachmentManager(module=module) + filters = dict() + if tgw: + filters['transit-gateway-id'] = tgw + if name: + filters['tag:Name'] = name + if subnets: + vpc_id = search_manager.subnets_to_vpc(subnets) + filters['vpc-id'] = vpc_id + + # Attachments lurk in a 'deleted' state, for a while, ignore them so we + # can reuse the names + filters['state'] = [ + 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', + 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + ] + attachments = search_manager.list(filters=filters) + if len(attachments) > 1: + module.fail_json('Multiple matching attachments found, provide an ID', attachments=attachments) + # If we find a match then we'll modify it by ID, otherwise we'll be + # creating a new RTB. + if attachments: + attach_id = attachments[0]['transit_gateway_attachment_id'] + + manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id) + manager.set_wait(module.params.get('wait', None)) + manager.set_wait_timeout(module.params.get('wait_timeout', None)) + + if state == 'absent': + manager.delete() + else: + if not attach_id: + if not tgw: + module.fail_json('No existing attachment found. To create a new attachment' + ' the `transit_gateway` parameter must be provided.') + if not subnets: + module.fail_json('No existing attachment found. To create a new attachment' + ' the `subnets` parameter must be provided.') + + # name is just a special case of tags. + if name: + new_tags = dict(Name=name) + if tags is None: + purge_tags = False + else: + new_tags.update(tags) + tags = new_tags + + manager.set_transit_gateway(tgw) + manager.set_subnets(subnets, purge_subnets) + manager.set_tags(tags, purge_tags) + manager.set_dns_support(module.params.get('dns_support', None)) + manager.set_ipv6_support(module.params.get('ipv6_support', None)) + manager.set_appliance_mode_support(module.params.get('appliance_mode_support', None)) + manager.flush_changes() + + results = dict( + changed=manager.changed, + attachments=[manager.updated_resource], + ) + if manager.changed: + results['diff'] = dict( + before=manager.original_resource, + after=manager.updated_resource, + ) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py new file mode 100644 index 00000000000..a0a07ce87d7 --- /dev/null +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: ec2_transit_gateway_vpc_attachment_info +short_description: describes AWS Transit Gateway VPC attachments +version_added: 4.0.0 +description: + - Describes AWS Transit Gateway VPC Attachments. +options: + id: + description: + - The ID of the Transit Gateway Attachment. + - Mutually exclusive with I(name) and I(filters) + type: str + required: false + aliases: ['attachment_id'] + name: + description: + - The C(Name) tag of the Transit Gateway attachment. + type: str + required: false + filters: + description: + - A dictionary of filters to apply. Each dict item consists of a filter key and a filter value. + - Setting a C(tag:Name) filter will override the I(name) parameter. + type: dict + required: false + include_deleted: + description: + - If I(include_deleted=True), then attachments in a deleted state will + also be returned. + - Setting a C(state) filter will override the I(include_deleted) parameter. + type: bool + required: false + default: false +author: "Mark Chappell (@tremble)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' +# Describe a specific Transit Gateway attachment. +- community.aws.ec2_transit_gateway_vpc_attachment_info: + state: present + id: 'tgw-attach-0123456789abcdef0' + +# Describe all attachments attached to a transit gateway. +- community.aws.ec2_transit_gateway_vpc_attachment_info: + state: present + filters: + transit-gateway-id: tgw-0fedcba9876543210' + +# Describe all attachments in an account. +- community.aws.ec2_transit_gateway_vpc_attachment_info: + state: present + filters: + transit-gateway-id: tgw-0fedcba9876543210' +''' + +RETURN = ''' +transit_gateway_attachments: + description: The attributes of the Transit Gateway attachments. + type: list + elements: dict + returned: success + contains: + creation_time: + description: + - An ISO 8601 date time stamp of when the attachment was created. + type: str + returned: success + example: '2022-03-10T16:40:26+00:00' + options: + description: + - Additional VPC attachment options. + type: dict + returned: success + contains: + appliance_mode_support: + description: + - Indicates whether appliance mode support is enabled. + type: str + returned: success + example: 'enable' + dns_support: + description: + - Indicates whether DNS support is enabled. + type: str + returned: success + example: 'disable' + ipv6_support: + description: + - Indicates whether IPv6 support is disabled. + type: str + returned: success + example: 'disable' + state: + description: + - The state of the attachment. + type: str + returned: success + example: 'deleting' + subnet_ids: + description: + - The IDs of the subnets in use by the attachment. + type: list + elements: str + returned: success + example: ['subnet-0123456789abcdef0', 'subnet-11111111111111111'] + tags: + description: + - A dictionary representing the resource tags. + type: dict + returned: success + transit_gateway_attachment_id: + description: + - The ID of the attachment. + type: str + returned: success + example: 'tgw-attach-0c0c5fd0b0f01d1c9' + transit_gateway_id: + description: + - The ID of the transit gateway that the attachment is connected to. + type: str + returned: success + example: 'tgw-0123456789abcdef0' + vpc_id: + description: + - The ID of the VPC that the attachment is connected to. + type: str + returned: success + example: 'vpc-0123456789abcdef0' + vpc_owner_id: + description: + - The ID of the account that the VPC belongs to. + type: str + returned: success + example: '012345678901' +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + +from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager + + +def main(): + + argument_spec = dict( + id=dict(type='str', required=False, aliases=['attachment_id']), + name=dict(type='str', required=False), + filters=dict(type='dict', required=False), + include_deleted=dict(type='bool', required=False, default=False) + ) + + mutually_exclusive = [ + ['id', 'name'], + ['id', 'filters'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params.get('name', None) + id = module.params.get('id', None) + opt_filters = module.params.get('filters', None) + + search_manager = TransitGatewayVpcAttachmentManager(module=module) + filters = dict() + + if name: + filters['tag:Name'] = name + + if not module.params.get('include_deleted'): + # Attachments lurk in a 'deleted' state, for a while, ignore them so we + # can reuse the names + filters['state'] = [ + 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', + 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + ] + + if opt_filters: + filters.update(opt_filters) + + attachments = search_manager.list(filters=filters, id=id) + + module.exit_json(changed=False, attachments=attachments, filters=filters) + + +if __name__ == '__main__': + main() From ad21e1978337328ecf9e794b2c874cb271bab3f8 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Thu, 5 May 2022 17:54:19 -0400 Subject: [PATCH 428/683] Add missing `elements` option to type: list that did not specify (#1116) Add missing `elements` option to type: list that did not specify SUMMARY Fixes #1114 ISSUE TYPE Docs Pull Request COMPONENT NAME cloudfront_distribution ecs_taskdefinition elb_application_lb rds_option_group Reviewed-by: Alina Buzachis Reviewed-by: Jill R --- cloudfront_distribution.py | 1 + ecs_taskdefinition.py | 30 ++++++++++++++++++++++++++++-- elb_application_lb.py | 2 ++ rds_option_group.py | 3 +++ 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 332298a8fcd..4c021d6f007 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -558,6 +558,7 @@ restriction should apply to. - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/).' type: list + elements: str web_acl_id: description: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index c74bf44ec9d..34574aae4ed 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -88,6 +88,7 @@ - This parameter is only supported if I(network_mode=bridge). required: False type: list + elements: str portMappings: description: The list of port mappings for the container. required: False @@ -118,9 +119,10 @@ required: False type: str command: - description: The command that is passed to the container. + description: The command that is passed to the container. If there are multiple arguments, each argument is a separated string in the array. required: False type: list + elements: str environment: description: The environment variables to pass to a container. required: False @@ -210,6 +212,7 @@ "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"] + elements: str drop: description: - The Linux capabilities for the container that have been removed from the default configuration provided by Docker. @@ -220,6 +223,7 @@ "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"] + elements: str devices: description: - Any host devices to expose to the container. @@ -240,6 +244,7 @@ description: The explicit permissions to provide to the container for the device. required: False type: list + elements: str initProcessEnabled: description: Run an init process inside the container that forwards signals and reaps processes. required: False @@ -274,6 +279,7 @@ "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable", "runbindable", "private", "rprivate", "shared", "rshared", "slave", "rslave", "relatime", "norelatime", "strictatime", "nostrictatime", "mode", "uid", "gid", "nr_inodes", "nr_blocks", "mpol"] + elements: str maxSwap: description: - The total amount of swap memory (in MiB) a container can use. @@ -359,12 +365,14 @@ - This parameter is not supported for Windows containers. required: False type: list + elements: str dnsSearchDomains: description: - A list of DNS search domains that are presented to the container. - This parameter is not supported for Windows containers. required: False type: list + elements: str extraHosts: description: - A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. @@ -387,6 +395,7 @@ - This parameter is not supported for Windows containers. required: False type: list + elements: str interactive: description: - When I(interactive=True), it allows to deploy containerized applications that require stdin or a tty to be allocated. @@ -461,12 +470,29 @@ description: A list of namespaced kernel parameters to set in the container. required: False type: list + elements: dict + suboptions: + namespace: + description: The namespaced kernel parameter to set a C(value) for. + type: str + value: + description: The value for the namespaced kernel parameter that's specified in C(namespace). + type: str resourceRequirements: description: - The type and amount of a resource to assign to a container. - - The only supported resource is a C(GPU). + - The only supported resources are C(GPU) and C(InferenceAccelerator). required: False type: list + elements: dict + suboptions: + value: + description: The value for the specified resource type. + type: str + type: + description: The type of resource to assign to a container. + type: str + choices: ['GPU', 'InferenceAccelerator'] network_mode: description: - The Docker networking mode to use for the containers in the task. diff --git a/elb_application_lb.py b/elb_application_lb.py index 430647e7995..a7c75c00cd3 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -134,12 +134,14 @@ Conditions: type: list description: Conditions which must be met for the actions to be applied. + elements: dict Priority: type: int description: The rule priority. Actions: type: list description: Actions to apply if all of the rule's conditions are met. + elements: dict name: description: - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric diff --git a/rds_option_group.py b/rds_option_group.py index 3b01eaeda82..d4ed9e6ac65 100644 --- a/rds_option_group.py +++ b/rds_option_group.py @@ -70,6 +70,7 @@ description: The option settings to include in an option group. required: false type: list + elements: dict suboptions: name: description: The name of the option that has settings that you can set. @@ -111,10 +112,12 @@ description: A list of C(DBSecurityGroupMembership) name strings used for this option. required: false type: list + elements: str vpc_security_group_memberships: description: A list of C(VpcSecurityGroupMembership) name strings used for this option. required: false type: list + elements: str tags: description: - A dictionary of key value pairs to assign the option group. From cb3138305f5111408ea6af50e3e1fe5bbc468c6e Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Thu, 5 May 2022 18:26:54 -0400 Subject: [PATCH 429/683] iam_user - stabilize for migration to amazon.aws (#1059) iam_user - stabilize for migration to amazon.aws SUMMARY Stabilize for migration to amazon.aws dont delete user login profile on check mode add extra return value user to deprecate iam_user gracefully handle iam_user_info when no users returned ISSUE TYPE Feature Pull Request COMPONENT NAME iam_user Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso Reviewed-by: Jill R Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Mike Graves --- iam_user.py | 64 ++++++++++++++++++++++++++++++++++++------------ iam_user_info.py | 13 +++++++--- 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/iam_user.py b/iam_user.py index 6266d992003..b820c60dedb 100644 --- a/iam_user.py +++ b/iam_user.py @@ -161,20 +161,20 @@ user_id: description: the stable and unique string identifying the user type: str - sample: AGPAIDBWE12NSFINE55TM + sample: "AGPAIDBWE12NSFINE55TM" user_name: description: the friendly name that identifies the user type: str - sample: testuser1 + sample: "testuser1" path: description: the path to the user type: str - sample: / + sample: "/" tags: description: user tags type: dict returned: always - sample: '{"Env": "Prod"}' + sample: {"Env": "Prod"} ''' try: @@ -228,10 +228,6 @@ def convert_friendly_names_to_arns(connection, module, policy_names): def wait_iam_exists(connection, module): - if module.check_mode: - return - if not module.params.get('wait'): - return user_name = module.params.get('name') wait_timeout = module.params.get('wait_timeout') @@ -263,6 +259,7 @@ def create_or_update_login_profile(connection, module): try: retval = connection.update_login_profile(**user_params) except is_boto3_error_code('NoSuchEntity'): + # Login profile does not yet exist - create it try: retval = connection.create_login_profile(**user_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -274,14 +271,26 @@ def create_or_update_login_profile(connection, module): def delete_login_profile(connection, module): - + ''' + Deletes a users login profile. + Parameters: + connection: IAM client + module: AWSModule + Returns: + (bool): True if login profile deleted, False if no login profile found to delete + ''' user_params = dict() user_params['UserName'] = module.params.get('name') - try: - connection.delete_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user login profile") + # User does not have login profile - nothing to delete + if not user_has_login_profile(connection, module, user_params['UserName']): + return False + + if not module.check_mode: + try: + connection.delete_login_profile(**user_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete user login profile") return True @@ -331,6 +340,9 @@ def create_or_update_user(connection, module): update_result = update_user_tags(connection, module, params, user) if module.params['update_password'] == "always" and module.params.get('password') is not None: + # Can't compare passwords, so just return changed on check mode runs + if module.check_mode: + module.exit_json(changed=True) login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): @@ -382,7 +394,7 @@ def create_or_update_user(connection, module): # `LoginProfile` is only returned on `create_login_profile` method user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False) - module.exit_json(changed=changed, iam_user=user) + module.exit_json(changed=changed, iam_user=user, user=user['user']) def destroy_user(connection, module): @@ -412,7 +424,7 @@ def destroy_user(connection, module): connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"]) # Remove user's login profile (console password) - delete_user_login_profile(connection, module, user_name) + delete_login_profile(connection, module) # Remove user's ssh public keys ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"] @@ -495,6 +507,25 @@ def delete_user_login_profile(connection, module, user_name): module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) +def user_has_login_profile(connection, module, name): + ''' + Returns whether or not given user has a login profile. + Parameters: + connection: IAM client + module: AWSModule + name (str): Username of user + Returns: + (bool): True if user had login profile, False if not + ''' + try: + connection.get_login_profile(UserName=name) + except is_boto3_error_code('NoSuchEntity'): + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name)) + return True + + def update_user_tags(connection, module, params, user): user_name = params['UserName'] existing_tags = user['user']['tags'] @@ -543,6 +574,9 @@ def main(): mutually_exclusive=[['password', 'remove_password']], ) + module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", + date='2024-05-01', collection_name='community.aws') + connection = module.client('iam') state = module.params.get("state") diff --git a/iam_user_info.py b/iam_user_info.py index e8fa1ac028a..ee6224880cd 100644 --- a/iam_user_info.py +++ b/iam_user_info.py @@ -111,6 +111,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @@ -142,14 +143,18 @@ def list_iam_users(connection, module): params['UserName'] = name try: iam_users.append(connection.get_user(**params)['User']) - except (ClientError, BotoCoreError) as e: + except is_boto3_error_code('NoSuchEntity'): + pass + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) if group: params['GroupName'] = group try: iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users'] - except (ClientError, BotoCoreError) as e: + except is_boto3_error_code('NoSuchEntity'): + pass + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) if name: iam_users = [user for user in iam_users if user['UserName'] == name] @@ -158,7 +163,9 @@ def list_iam_users(connection, module): params['PathPrefix'] = path try: iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users'] - except (ClientError, BotoCoreError) as e: + except is_boto3_error_code('NoSuchEntity'): + pass + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) if name: iam_users = [user for user in iam_users if user['UserName'] == name] From e9dc653f2f1ab6649684a00ca146c416da718f77 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Thu, 5 May 2022 18:26:57 -0400 Subject: [PATCH 430/683] update RETURN docs with more info (#1109) lambda - update RETURN docs with more information on return data SUMMARY Give more information on what the return dictionaries contain ISSUE TYPE Docs Pull Request COMPONENT NAME lambda Reviewed-by: Mark Woolley Reviewed-by: Mandar Kulkarni Reviewed-by: Markus Bergholz --- lambda.py | 169 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 135 insertions(+), 34 deletions(-) diff --git a/lambda.py b/lambda.py index 923b1646c3d..8ad45785f21 100644 --- a/lambda.py +++ b/lambda.py @@ -169,45 +169,146 @@ RETURN = r''' code: - description: the lambda function location returned by get_function in boto3 + description: The lambda function's code returned by get_function in boto3. returned: success type: dict - sample: - { - 'location': 'a presigned S3 URL', - 'repository_type': 'S3', - } + contains: + location: + description: + - The presigned URL you can use to download the function's .zip file that you previously uploaded. + - The URL is valid for up to 10 minutes. + returned: success + type: str + sample: 'https://prod-04-2014-tasks.s3.us-east-1.amazonaws.com/snapshots/sample' + repository_type: + description: The repository from which you can download the function. + returned: success + type: str + sample: 'S3' configuration: - description: the lambda function metadata returned by get_function in boto3 + description: the lambda function's configuration metadata returned by get_function in boto3 returned: success type: dict - sample: - { - 'code_sha256': 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=', - 'code_size': 123, - 'description': 'My function', - 'environment': { - 'variables': { - 'key': 'value' - } - }, - 'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1', - 'function_name': 'myFunction', - 'handler': 'index.handler', - 'last_modified': '2017-08-01T00:00:00.000+0000', - 'memory_size': 128, - 'revision_id': 'a2x9886d-d48a-4a0c-ab64-82abc005x80c', - 'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution', - 'runtime': 'nodejs6.10', - 'tracing_config': { 'mode': 'Active' }, - 'timeout': 3, - 'version': '1', - 'vpc_config': { - 'security_group_ids': [], - 'subnet_ids': [], - 'vpc_id': '123' - } - } + contains: + code_sha256: + description: The SHA256 hash of the function's deployment package. + returned: success + type: str + sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=' + code_size: + description: The size of the function's deployment package in bytes. + returned: success + type: int + sample: 123 + dead_letter_config: + description: The function's dead letter queue. + returned: when the function has a dead letter queue configured + type: dict + sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 } + contains: + target_arn: + description: The ARN of an SQS queue or SNS topic. + returned: when the function has a dead letter queue configured + type: str + sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 + description: + description: The function's description. + returned: success + type: str + sample: 'My function' + environment: + description: The function's environment variables. + returned: when environment variables exist + type: dict + contains: + variables: + description: Environment variable key-value pairs. + returned: when environment variables exist + type: dict + sample: {'key': 'value'} + error: + description: Error message for environment variables that could not be applied. + returned: when there is an error applying environment variables + type: dict + contains: + error_code: + description: The error code. + returned: when there is an error applying environment variables + type: str + message: + description: The error message. + returned: when there is an error applying environment variables + type: str + function_arn: + description: The function's Amazon Resource Name (ARN). + returned: on success + type: str + sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1' + function_name: + description: The function's name. + returned: on success + type: str + sample: 'myFunction' + handler: + description: The function Lambda calls to begin executing your function. + returned: on success + type: str + sample: 'index.handler' + last_modified: + description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD). + returned: on success + type: str + sample: '2017-08-01T00:00:00.000+0000' + memory_size: + description: The memory allocated to the function. + returned: on success + type: int + sample: 128 + revision_id: + description: The latest updated revision of the function or alias. + returned: on success + type: str + sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c' + role: + description: The function's execution role. + returned: on success + type: str + sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + runtime: + description: The funtime environment for the Lambda function. + returned: on success + type: str + sample: 'nodejs6.10' + tracing_config: + description: The function's AWS X-Ray tracing configuration. + returned: on success + type: dict + sample: { 'mode': 'Active' } + contains: + mode: + description: The tracing mode. + returned: on success + type: str + sample: 'Active' + timeout: + description: The amount of time that Lambda allows a function to run before terminating it. + returned: on success + type: int + sample: 3 + version: + description: The version of the Lambda function. + returned: on success + type: str + sample: '1' + vpc_config: + description: The function's networking configuration. + returned: on success + type: dict + sample: { + 'security_group_ids': [], + 'subnet_ids': [], + 'vpc_id': '123' + } ''' import base64 From b94d80416ce6d0eaa461d27cf81222332771d9c5 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Fri, 6 May 2022 08:52:28 -0400 Subject: [PATCH 431/683] lambda - fix check mode on creation (#1108) lambda - fix check mode on creation Depends-On: #1116 SUMMARY When adding integration tests for check mode runs, creating a lambda function failed on check mode with the message "Unable to get function information after creating". Added parameter kms_key_arn - testing in integration tests appears difficult as I think we'd need to create an IAM policy to allow for adding kms_key, which would render these tests as unsupported. Added extra waiter for function_update in execute_lambda to resolve occasional integration test failure. Fixes #1111 ISSUE TYPE Feature Pull Request Bugfix Pull Request COMPONENT NAME lambda Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- execute_lambda.py | 6 ++++-- lambda.py | 21 ++++++++++++++++++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/execute_lambda.py b/execute_lambda.py index b4cbb4a53de..b599b6636ca 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -260,8 +260,10 @@ def main(): def wait_for_lambda(client, module, name): try: - waiter = client.get_waiter('function_active') - waiter.wait(FunctionName=name) + client_active_waiter = client.get_waiter('function_active') + client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter.wait(FunctionName=name) + client_updated_waiter.wait(FunctionName=name) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/lambda.py b/lambda.py index 8ad45785f21..ff469c5bc71 100644 --- a/lambda.py +++ b/lambda.py @@ -107,6 +107,11 @@ description: - Tag dict to apply to the function. type: dict + kms_key_arn: + description: + - The KMS key ARN used to encrypt the function's environment variables. + type: str + version_added: 3.3.0 author: - 'Steyn Huizinga (@steynovich)' extends_documentation_fragment: @@ -451,6 +456,7 @@ def main(): vpc_security_group_ids=dict(type='list', elements='str'), environment_variables=dict(type='dict'), dead_letter_arn=dict(), + kms_key_arn=dict(type='str', no_log=False), tracing_mode=dict(choices=['Active', 'PassThrough']), tags=dict(type='dict'), ) @@ -488,6 +494,7 @@ def main(): dead_letter_arn = module.params.get('dead_letter_arn') tracing_mode = module.params.get('tracing_mode') tags = module.params.get('tags') + kms_key_arn = module.params.get('kms_key_arn') check_mode = module.check_mode changed = False @@ -543,6 +550,8 @@ def main(): func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode): func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + if kms_key_arn: + func_kwargs.update({'KMSKeyArn': kms_key_arn}) # If VPC configuration is desired if vpc_subnet_ids: @@ -674,17 +683,23 @@ def main(): if tracing_mode: func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + if kms_key_arn: + func_kwargs.update({'KMSKeyArn': kms_key_arn}) + # If VPC configuration is given if vpc_subnet_ids: func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, 'SecurityGroupIds': vpc_security_group_ids}}) + # Function would have been created if not check mode + if check_mode: + module.exit_json(changed=True) + # Finally try to create function current_version = None try: - if not check_mode: - response = client.create_function(aws_retry=True, **func_kwargs) - current_version = response['Version'] + response = client.create_function(aws_retry=True, **func_kwargs) + current_version = response['Version'] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to create function") From 273ed92ad2b92c28cf0944bafb076b7c3ae085eb Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Fri, 6 May 2022 10:29:24 -0400 Subject: [PATCH 432/683] remove duplicate method (#1098) iam_user - remove duplicate method SUMMARY Remove duplicate method delete_user_login_profile, as there is already a delete_login_profile method ISSUE TYPE Feature Pull Request COMPONENT NAME iam_user Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- iam_user.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/iam_user.py b/iam_user.py index b820c60dedb..c5e7160f98b 100644 --- a/iam_user.py +++ b/iam_user.py @@ -497,16 +497,6 @@ def get_attached_policy_list(connection, module, name): module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) -def delete_user_login_profile(connection, module, user_name): - - try: - return connection.delete_login_profile(UserName=user_name) - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name)) - - def user_has_login_profile(connection, module, name): ''' Returns whether or not given user has a login profile. From cbbe150aee8381afb63586004c5782b9e0252681 Mon Sep 17 00:00:00 2001 From: Lukasz Rubaszewski Date: Fri, 6 May 2022 23:36:27 +0200 Subject: [PATCH 433/683] s3_lifecycle: check that configuration is complete before returning. (#1085) s3_lifecycle: reassure that configuration is complete before returning. Get bucket lifecycle configuration a few times to make sure it is stable. It was observed that shortly (~30s) after setting the rules a method get-bucket-lifecycle-configuration returns alternatively new and old rules in a random manner. Similar issue reported for boto3 library: boto/boto3#2491 SUMMARY Fixes #1084 ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_lifecycle Reviewed-by: Mark Chappell Reviewed-by: Lukasz Rubaszewski Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso --- s3_lifecycle.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 3f0bd784cef..a9aed2b29fd 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -485,15 +485,23 @@ def create_lifecycle_rule(client, module): _changed = changed _retries = 10 - while wait and _changed and _retries: + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again, + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 new_rules = fetch_rules(client, module, name) @@ -531,13 +539,21 @@ def destroy_lifecycle_rule(client, module): _changed = changed _retries = 10 - while wait and _changed and _retries: + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again, + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 new_rules = fetch_rules(client, module, name) From 4778a8956025609092f193fcb290eba6bdf63590 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Sat, 7 May 2022 19:23:37 -0400 Subject: [PATCH 434/683] execute_lambda - fix check mode and update RETURN docs (#1115) execute_lambda - fix check mode and update RETURN docs Depends-On: #1116 SUMMARY check_mode fix update RETURN docs to match what is actually being returned require one of name, function_arn ISSUE TYPE Bugfix Pull Request COMPONENT NAME execute_lambda ADDITIONAL INFORMATION I noticed some modules in community.aws will return data directly, and others will return data nested in a dict. Example: let iam_group be the module, retrieving a key called group_arn, and registering the response as response. Some modules you would need to query result.iam_group.group_arn, meanwhile in others, you can result.group_arn (where iam_group is assumed, since its the name of the module). Do we have a preference for either method? Should we come to some sort of collection-wide consensus on which to use moving forward? Reviewed-by: Sloane Hertel Reviewed-by: Markus Bergholz --- execute_lambda.py | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/execute_lambda.py b/execute_lambda.py index b599b6636ca..f9131e2bbea 100644 --- a/execute_lambda.py +++ b/execute_lambda.py @@ -108,20 +108,25 @@ ''' RETURN = ''' -output: - description: Function output if wait=true and the function returns a value +result: + description: Resulting data structure from a successful task execution. returned: success type: dict - sample: "{ 'output': 'something' }" -logs: - description: The last 4KB of the function logs. Only provided if I(tail_log) is true - type: str - returned: if I(tail_log) == true -status: - description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async) - type: int - sample: 200 - returned: always + contains: + output: + description: Function output if wait=true and the function returns a value + returned: success + type: dict + sample: "{ 'output': 'something' }" + logs: + description: The last 4KB of the function logs. Only provided if I(tail_log) is C(true) + type: str + returned: if I(tail_log) == true + status: + description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async) + type: int + sample: 200 + returned: always ''' import base64 @@ -134,6 +139,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def main(): @@ -151,7 +157,10 @@ def main(): supports_check_mode=True, mutually_exclusive=[ ['name', 'function_arn'], - ] + ], + required_one_of=[ + ('name', 'function_arn') + ], ) name = module.params.get('name') @@ -162,11 +171,8 @@ def main(): version_qualifier = module.params.get('version_qualifier') payload = module.params.get('payload') - if not (name or function_arn): - module.fail_json(msg="Must provide either a function_arn or a name to invoke.") - try: - client = module.client('lambda') + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') @@ -202,11 +208,12 @@ def main(): elif name: invoke_params['FunctionName'] = name - if not module.check_mode: - wait_for_lambda(client, module, name) + if module.check_mode: + module.exit_json(changed=True) try: - response = client.invoke(**invoke_params) + wait_for_lambda(client, module, name) + response = client.invoke(**invoke_params, aws_retry=True) except is_boto3_error_code('ResourceNotFoundException') as nfe: module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure " "the ARN is correct and your profile has " From 49465100fb581d1cd5325991391d24ca0f541fb6 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Mon, 9 May 2022 10:41:16 -0400 Subject: [PATCH 435/683] rds_instance - add deletion_protection param (#1105) rds_instance - add deletion_protection param Depends-On: #1116 SUMMARY Fixes #922 ISSUE TYPE Feature Pull Request COMPONENT NAME rds_instance Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- rds_instance.py | 14 ++++++++++++++ rds_instance_info.py | 6 ++++++ 2 files changed, 20 insertions(+) diff --git a/rds_instance.py b/rds_instance.py index 4ae96546a0c..09cb6c06979 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -160,6 +160,13 @@ aliases: - subnet_group type: str + deletion_protection: + description: + - A value that indicates whether the DB instance has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + By default, deletion protection is disabled. + type: bool + version_added: 3.3.0 domain: description: - The Active Directory Domain to restore the instance in. @@ -666,6 +673,12 @@ returned: always type: str sample: db-UHV3QRNWX4KB6GALCIGRML6QFA +deletion_protection: + description: C(True) if the DB instance has deletion protection enabled, C(False) if not. + returned: always + type: bool + sample: False + version_added: 3.3.0 domain_memberships: description: The Active Directory Domain membership records associated with the DB instance. returned: always @@ -1256,6 +1269,7 @@ def main(): db_security_groups=dict(type='list', elements='str'), db_snapshot_identifier=dict(), db_subnet_group_name=dict(aliases=['subnet_group']), + deletion_protection=dict(type='bool'), domain=dict(), domain_iam_role_name=dict(), enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), diff --git a/rds_instance_info.py b/rds_instance_info.py index 22a10a081ed..6e41ea62940 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -188,6 +188,12 @@ returned: always type: str sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA + deletion_protection: + description: C(True) if the DB instance has deletion protection enabled, C(False) if not. + returned: always + type: bool + sample: False + version_added: 3.3.0 domain_memberships: description: List of domain memberships returned: always From 602fe73cbe1356b6daa80433f6c9879162a2c5a3 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 9 May 2022 16:41:22 +0200 Subject: [PATCH 436/683] Update rds_instance_snaphot to use handlers defined in rds.py (#789) Update rds_instance_snaphot to use handlers defined in rds.py SUMMARY Update module to use handlers defined in rds.py Enable check_mode Add integration tests Depends-On: ansible-collections/amazon.aws#553 Also requires: mattclay/aws-terminator#185 ISSUE TYPE Feature Pull Request COMPONENT NAME rds_instance_snapshot Reviewed-by: Jill R Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz Reviewed-by: Joseph Torcasso --- rds_instance_snapshot.py | 180 ++++++++++++++++----------------------- 1 file changed, 72 insertions(+), 108 deletions(-) diff --git a/rds_instance_snapshot.py b/rds_instance_snapshot.py index 45915082739..2fa30f92d09 100644 --- a/rds_instance_snapshot.py +++ b/rds_instance_snapshot.py @@ -8,11 +8,11 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: rds_instance_snapshot version_added: 1.0.0 -short_description: manage Amazon RDS snapshots. +short_description: Manage Amazon RDS instance snapshots description: - Creates or deletes RDS snapshots. options: @@ -58,13 +58,14 @@ author: - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" + - "Alina Buzachis (@alinabuzachis)" extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create snapshot community.aws.rds_instance_snapshot: db_instance_identifier: new-database @@ -76,7 +77,7 @@ state: absent ''' -RETURN = ''' +RETURN = r''' allocated_storage: description: How much storage is allocated in GB. returned: always @@ -201,143 +202,106 @@ # import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method -def get_snapshot(client, module, snapshot_id): +def get_snapshot(snapshot_id): try: response = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id) - except client.exceptions.DBSnapshotNotFoundFault: + except is_boto3_error_code("DBSnapshotNotFoundFault"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + except is_boto3_error_code("DBSnapshotNotFound"): # pylint: disable=duplicate-except + return None + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) return response['DBSnapshots'][0] -def snapshot_to_facts(client, module, snapshot): - try: - snapshot['Tags'] = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'], - aws_retry=True)['TagList']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['DBSnapshotIdentifier']) - except KeyError: - module.fail_json(msg=str(snapshot)) +def fetch_tags(snapshot): + snapshot["Tags"] = get_tags(client, module, snapshot["DBSnapshotArn"]) - return camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) + return camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"]) -def wait_for_snapshot_status(client, module, db_snapshot_id, waiter_name): - if not module.params['wait']: - return - timeout = module.params['wait_timeout'] - try: - client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id, - WaiterConfig=dict( - Delay=5, - MaxAttempts=int((timeout + 2.5) / 5) - )) - except botocore.exceptions.WaiterError as e: - if waiter_name == 'db_snapshot_deleted': - msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id) - else: - msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id) - module.fail_json_aws(e, msg=msg) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_snapshot_id)) - - -def ensure_snapshot_absent(client, module): - snapshot_name = module.params.get('db_snapshot_identifier') +def ensure_snapshot_absent(): + snapshot_name = module.params.get("db_snapshot_identifier") + params = {"DBSnapshotIdentifier": snapshot_name} changed = False - snapshot = get_snapshot(client, module, snapshot_name) - if snapshot and snapshot['Status'] != 'deleting': - try: - client.delete_db_snapshot(DBSnapshotIdentifier=snapshot_name) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="trying to delete snapshot") - - # If we're not waiting for a delete to complete then we're all done - # so just return - if not snapshot or not module.params.get('wait'): - return dict(changed=changed) - try: - wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_deleted') - return dict(changed=changed) - except client.exceptions.DBSnapshotNotFoundFault: + snapshot = get_snapshot(snapshot_name) + if not snapshot: return dict(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "awaiting snapshot deletion") - - -def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): - if tags is None: - return False - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags) - changed = bool(tags_to_add or tags_to_remove) - if tags_to_add: - try: - client.add_tags_to_resource(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't add tags to snapshot {0}".format(resource_arn)) - if tags_to_remove: - try: - client.remove_tags_from_resource(ResourceName=resource_arn, TagKeys=tags_to_remove) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't remove tags from snapshot {0}".format(resource_arn)) - return changed - - -def ensure_snapshot_present(client, module): + elif snapshot and snapshot["Status"] != "deleting": + snapshot, changed = call_method(client, module, "delete_db_snapshot", params) + + return dict(changed=changed) + + +def ensure_snapshot_present(): db_instance_identifier = module.params.get('db_instance_identifier') snapshot_name = module.params.get('db_snapshot_identifier') changed = False - snapshot = get_snapshot(client, module, snapshot_name) + snapshot = get_snapshot(snapshot_name) if not snapshot: - try: - snapshot = client.create_db_snapshot(DBSnapshotIdentifier=snapshot_name, - DBInstanceIdentifier=db_instance_identifier)['DBSnapshot'] - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="trying to create db snapshot") + params = { + "DBSnapshotIdentifier": snapshot_name, + "DBInstanceIdentifier": db_instance_identifier + } + if module.params.get("tags"): + params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) + _result, changed = call_method(client, module, "create_db_snapshot", params) - if module.params.get('wait'): - wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_available') + if module.check_mode: + return dict(changed=changed) - existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'], - aws_retry=True)['TagList']) - desired_tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], existing_tags, desired_tags, purge_tags) + return dict(changed=changed, **fetch_tags(get_snapshot(snapshot_name))) - snapshot = get_snapshot(client, module, snapshot_name) + existing_tags = get_tags(client, module, snapshot["DBSnapshotArn"]) + changed |= ensure_tags(client, module, snapshot["DBSnapshotArn"], existing_tags, + module.params["tags"], module.params["purge_tags"]) - return dict(changed=changed, **snapshot_to_facts(client, module, snapshot)) + if module.check_mode: + return dict(changed=changed) + + return dict(changed=changed, **fetch_tags(get_snapshot(snapshot_name))) def main(): + global client + global module + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), + db_instance_identifier=dict(aliases=['instance_id']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + ) module = AnsibleAWSModule( - argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), - db_instance_identifier=dict(aliases=['instance_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=True), - ), - required_if=[['state', 'present', ['db_instance_identifier']]] + argument_spec=argument_spec, + required_if=[['state', 'present', ['db_instance_identifier']]], + supports_check_mode=True, ) - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['DBSnapshotNotFound'])) + retry_decorator = AWSRetry.jittered_backoff(retries=10) + try: + client = module.client('rds', retry_decorator=retry_decorator) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") if module.params['state'] == 'absent': - ret_dict = ensure_snapshot_absent(client, module) + ret_dict = ensure_snapshot_absent() else: - ret_dict = ensure_snapshot_present(client, module) + ret_dict = ensure_snapshot_present() module.exit_json(**ret_dict) From bbfe0e446a3bc5325eaf5bcfa87f32571d44edc1 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Tue, 10 May 2022 13:22:25 -0700 Subject: [PATCH 437/683] route53: add support for GeoLocation parameter (#1117) route53: add support for GeoLocation parameter SUMMARY Added support for GeoLocation parameter to community.aws.route53 Fixes #89. ISSUE TYPE Feature Pull Request COMPONENT NAME route53 ADDITIONAL INFORMATION Uses https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/route53.html#Route53.Client.change_resource_record_sets Reviewed-by: Joseph Torcasso Reviewed-by: Mandar Kulkarni Reviewed-by: Sloane Hertel Reviewed-by: Alina Buzachis --- route53.py | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/route53.py b/route53.py index 4ddacdca09e..bebdacdbf9a 100644 --- a/route53.py +++ b/route53.py @@ -108,6 +108,30 @@ latency-based routing - Mutually exclusive with I(weight) and I(failover). type: str + geo_location: + description: + - Allows to control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. + - Two geolocation resource record sets that specify same geographic location cannot be created. + - Non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation + resource record sets cannot be created. + suboptions: + continent_code: + description: + - The two-letter code for the continent. + - Specifying I(continent_code) with either I(country_code) or I(subdivision_code) returns an InvalidInput error. + type: str + country_code: + description: + - The two-letter code for a country. + - Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2 . + type: str + subdivision_code: + description: + - The two-letter code for a state of the United States. + - To specify I(subdivision_code), I(country_code) must be set to C(US). + type: str + type: dict + version_added: 3.3.0 health_check: description: - Health check to associate with this record @@ -166,6 +190,12 @@ returned: always type: str sample: PRIMARY + geo_location: + description: geograpic location based on which Route53 resonds to DNS queries. + returned: when configured + type: dict + sample: { continent_code: "NA", country_code: "US", subdivision_code: "CA" } + version_added: 3.3.0 health_check: description: health_check associated with this record. returned: always @@ -350,6 +380,29 @@ - 0 issue "ca.example.net" - 0 issuewild ";" - 0 iodef "mailto:security@example.com" +- name: Create a record with geo_location - country_code + community.aws.route53: + state: present + zone: '{{ zone_one }}' + record: 'geo-test.{{ zone_one }}' + identifier: "geohost@www" + type: A + value: 1.1.1.1 + ttl: 30 + geo_location: + country_code: US +- name: Create a record with geo_location - subdivision code + community.aws.route53: + state: present + zone: '{{ zone_one }}' + record: 'geo-test.{{ zone_one }}' + identifier: "geohost@www" + type: A + value: 1.1.1.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX ''' from operator import itemgetter @@ -495,6 +548,12 @@ def main(): identifier=dict(type='str'), weight=dict(type='int'), region=dict(type='str'), + geo_location=dict(type='dict', + options=dict( + continent_code=dict(type="str"), + country_code=dict(type="str"), + subdivision_code=dict(type="str")), + required=False), health_check=dict(type='str'), failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']), vpc_id=dict(type='str'), @@ -518,11 +577,12 @@ def main(): ('failover', 'region', 'weight'), ('alias', 'ttl'), ], - # failover, region and weight require identifier + # failover, region, weight and geo_location require identifier required_by=dict( failover=('identifier',), region=('identifier',), weight=('identifier',), + geo_location=('identifier'), ), ) @@ -557,6 +617,7 @@ def main(): vpc_id_in = module.params.get('vpc_id') wait_in = module.params.get('wait') wait_timeout_in = module.params.get('wait_timeout') + geo_location = module.params.get('geo_location') if zone_in[-1:] != '.': zone_in += "." @@ -567,8 +628,8 @@ def main(): if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None: - module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.") + if not any([weight_in, region_in, failover_in, geo_location]) and identifier_in is not None: + module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") retry_decorator = AWSRetry.jittered_backoff( retries=MAX_AWS_RETRIES, @@ -604,6 +665,30 @@ def main(): 'HealthCheckId': health_check_in, 'SetIdentifier': identifier_in, }) + + if geo_location: + continent_code = geo_location.get('continent_code') + country_code = geo_location.get('country_code') + subdivision_code = geo_location.get('subdivision_code') + + if continent_code and (country_code or subdivision_code): + module.fail_json(changed=False, msg='While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.') + + if not any([continent_code, country_code, subdivision_code]): + module.fail_json(changed=False, msg='To use geo_location please specify either continent_code, country_code, or subdivision_code.') + + if geo_location.get('subdivision_code') and geo_location.get('country_code').lower() != 'us': + module.fail_json(changed=False, msg='To use subdivision_code, you must specify country_code as US.') + + # Build geo_location suboptions specification + resource_record_set['GeoLocation'] = {} + if continent_code: + resource_record_set['GeoLocation']['ContinentCode'] = continent_code + if country_code: + resource_record_set['GeoLocation']['CountryCode'] = country_code + if subdivision_code: + resource_record_set['GeoLocation']['SubdivisionCode'] = subdivision_code + if command_in == 'delete' and aws_record is not None: resource_record_set['TTL'] = aws_record.get('TTL') if not resource_record_set['ResourceRecords']: From ff3080f1899c869c55c306ae31e5a971f12f493f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Sun, 15 May 2022 08:02:59 -0400 Subject: [PATCH 438/683] ec2_transit_gateway_IncorrectState: retry on IncorrectState (#1110) * ec2_transit_gateway_IncorrectState: retry on IncorrectState Do not immediately bail out in case of IncorrectState error. We now instead wait and retry. E.g: https://ddf1d621d7b816539203-e56a3e9170eb32404c2ff3e77b13356f.ssl.cf2.rackcdn.com/799/690df9c2d3f69323837542b96fa7318650fd5308/check/integration-community.aws-1/f30fb4f/job-output.txt --- ec2_transit_gateway.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index c013ea67379..4237376203b 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -242,7 +242,11 @@ class AnsibleEc2Tgw(object): def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client('ec2') + retry_decorator = AWSRetry.jittered_backoff( + catch_extra_error_codes=['IncorrectState'], + ) + connection = module.client('ec2', retry_decorator=retry_decorator) + self._connection = connection self._check_mode = self._module.check_mode def process(self): From 1aa92664de9c4daea886f9e2346c2fbba81cf6c5 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Mon, 16 May 2022 08:55:39 -0400 Subject: [PATCH 439/683] aws_kms - minor doc fixes (#1101) aws_kms - minor doc fixes SUMMARY Minor doc fixes ISSUE TYPE Docs Pull Request COMPONENT NAME aws_kms aws_kms_info Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- aws_kms.py | 68 ++++++++++++++++++++++++++-------------------- aws_kms_info.py | 71 ++++++++++++++++++++++++++++--------------------- 2 files changed, 79 insertions(+), 60 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index cf9c4b5eb96..95bc51834cc 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -10,7 +10,7 @@ --- module: aws_kms version_added: 1.0.0 -short_description: Perform various KMS management tasks. +short_description: Perform various KMS management tasks description: - Manage role/user access to a KMS key. Not designed for encrypting/decrypting. options: @@ -106,7 +106,7 @@ default: present type: str enabled: - description: Whether or not a key is enabled + description: Whether or not a key is enabled. default: True type: bool description: @@ -128,12 +128,12 @@ version_added: 1.4.0 purge_tags: description: Whether the I(tags) argument should cause tags not in the list to - be removed + be removed. default: False type: bool purge_grants: description: Whether the I(grants) argument should cause grants not in the list to - be removed + be removed. default: False type: bool grants: @@ -270,17 +270,20 @@ RETURN = r''' key_id: - description: ID of key + description: ID of key. type: str returned: always sample: abcd1234-abcd-1234-5678-ef1234567890 key_arn: - description: ARN of key + description: ARN of key. type: str returned: always sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 key_state: - description: The state of the key + description: + - The state of the key. + - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'), + C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating'). type: str returned: always sample: PendingDeletion @@ -297,23 +300,29 @@ returned: always sample: AWS_KMS aws_account_id: - description: The AWS Account ID that the key belongs to + description: The AWS Account ID that the key belongs to. type: str returned: always sample: 1234567890123 creation_date: - description: Date of creation of the key + description: Date and time of creation of the key. type: str returned: always sample: "2017-04-18T15:12:08.551000+10:00" +deletion_date: + description: Date and time after which KMS deletes this KMS key. + type: str + returned: when key_state is PendingDeletion + sample: "2017-04-18T15:12:08.551000+10:00" + version_added: 3.3.0 description: - description: Description of the key + description: Description of the key. type: str returned: always sample: "My Key for Protecting important stuff" enabled: - description: Whether the key is enabled. True if C(KeyState) is true. - type: str + description: Whether the key is enabled. True if I(key_state) is C(Enabled). + type: bool returned: always sample: false enable_key_rotation: @@ -322,14 +331,14 @@ returned: always sample: false aliases: - description: list of aliases associated with the key + description: List of aliases associated with the key. type: list returned: always sample: - aws/acm - aws/ebs policies: - description: list of policy documents for the key. Empty when access is denied even if there are policies. + description: List of policy documents for the key. Empty when access is denied even if there are policies. type: list returned: always elements: str @@ -340,7 +349,7 @@ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" Effect: "Allow" Principal: - AWS: "*" + AWS: "*" Action: - "kms:Encrypt" - "kms:Decrypt" @@ -356,7 +365,7 @@ - Sid: "Allow direct access to key metadata to the account" Effect: "Allow" Principal: - AWS: "arn:aws:iam::111111111111:root" + AWS: "arn:aws:iam::111111111111:root" Action: - "kms:Describe*" - "kms:Get*" @@ -364,7 +373,7 @@ - "kms:RevokeGrant" Resource: "*" key_policies: - description: list of policy documents for the key. Empty when access is denied even if there are policies. + description: List of policy documents for the key. Empty when access is denied even if there are policies. type: list returned: always elements: dict @@ -400,15 +409,16 @@ Resource: "*" version_added: 3.3.0 tags: - description: dictionary of tags applied to the key + description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags. type: dict returned: always sample: Name: myKey Purpose: protecting_stuff grants: - description: list of grants associated with a key - type: complex + description: List of grants associated with a key. + type: list + elements: dict returned: always contains: constraints: @@ -420,22 +430,22 @@ encryption_context_equals: "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" creation_date: - description: Date of creation of the grant + description: Date of creation of the grant. type: str returned: always sample: "2017-04-18T15:12:08+10:00" grant_id: - description: The unique ID for the grant + description: The unique ID for the grant. type: str returned: always sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 grantee_principal: - description: The principal that receives the grant's permissions + description: The principal that receives the grant's permissions. type: str returned: always sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz issuing_account: - description: The AWS account under which the grant was issued + description: The AWS account under which the grant was issued. type: str returned: always sample: arn:aws:iam::01234567890:root @@ -445,29 +455,29 @@ returned: always sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 name: - description: The friendly name that identifies the grant + description: The friendly name that identifies the grant. type: str returned: always sample: xyz operations: - description: The list of operations permitted by the grant + description: The list of operations permitted by the grant. type: list returned: always sample: - Decrypt - RetireGrant retiring_principal: - description: The principal that can retire the grant + description: The principal that can retire the grant. type: str returned: always sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz changes_needed: - description: grant types that would be changed/were changed. + description: Grant types that would be changed/were changed. type: dict returned: always sample: { "role": "add", "role grant": "add" } had_invalid_entries: - description: there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made. + description: Whether there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made. type: bool returned: always ''' diff --git a/aws_kms_info.py b/aws_kms_info.py index c67e58d27ec..fabff61fcc1 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -6,13 +6,13 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: aws_kms_info version_added: 1.0.0 short_description: Gather information about AWS KMS keys description: - - Gather information about AWS KMS keys including tags and grants + - Gather information about AWS KMS keys including tags and grants. author: "Will Thames (@willthames)" options: alias: @@ -42,7 +42,7 @@ - Mutually exclusive with I(alias) and I(key_id). type: dict pending_deletion: - description: Whether to get full details (tags, grants etc.) of keys pending deletion + description: Whether to get full details (tags, grants etc.) of keys pending deletion. default: False type: bool keys_attr: @@ -59,7 +59,7 @@ ''' -EXAMPLES = ''' +EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all KMS keys @@ -76,24 +76,27 @@ "tag:Name": Example ''' -RETURN = ''' +RETURN = r''' kms_keys: - description: list of keys + description: List of keys. type: complex returned: always contains: key_id: - description: ID of key + description: ID of key. type: str returned: always sample: abcd1234-abcd-1234-5678-ef1234567890 key_arn: - description: ARN of key + description: ARN of key. type: str returned: always sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 key_state: - description: The state of the key + description: + - The state of the key. + - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'), + C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating'). type: str returned: always sample: PendingDeletion @@ -103,54 +106,59 @@ returned: always sample: ENCRYPT_DECRYPT origin: - description: - The source of the key's key material. When this value is C(AWS_KMS), + description: The source of the key's key material. When this value is C(AWS_KMS), AWS KMS created the key material. When this value is C(EXTERNAL), the key material was imported or the CMK lacks key material. type: str returned: always sample: AWS_KMS aws_account_id: - description: The AWS Account ID that the key belongs to + description: The AWS Account ID that the key belongs to. type: str returned: always sample: 1234567890123 creation_date: - description: Date of creation of the key + description: Date and time of creation of the key. type: str returned: always sample: "2017-04-18T15:12:08.551000+10:00" + deletion_date: + description: Date and time after which KMS deletes this KMS key. + type: str + returned: when key_state is PendingDeletion + sample: "2017-04-18T15:12:08.551000+10:00" + version_added: 3.3.0 description: - description: Description of the key + description: Description of the key. type: str returned: always sample: "My Key for Protecting important stuff" enabled: - description: Whether the key is enabled. True if C(KeyState) is true. - type: str + description: Whether the key is enabled. True if I(key_state) is C(Enabled). + type: bool returned: always sample: false enable_key_rotation: - description: Whether the automatically key rotation every year is enabled. Returns None if key rotation status can't be determined. + description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined. type: bool returned: always sample: false aliases: - description: list of aliases associated with the key + description: list of aliases associated with the key. type: list returned: always sample: - aws/acm - aws/ebs tags: - description: dictionary of tags applied to the key. Empty when access is denied even if there are tags. + description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags. type: dict returned: always sample: Name: myKey Purpose: protecting_stuff policies: - description: list of policy documents for the key. Empty when access is denied even if there are policies. + description: List of policy documents for the key. Empty when access is denied even if there are policies. type: list returned: always elements: str @@ -185,7 +193,7 @@ - "kms:RevokeGrant" Resource: "*" key_policies: - description: list of policy documents for the key. Empty when access is denied even if there are policies. + description: List of policy documents for the key. Empty when access is denied even if there are policies. type: list returned: always elements: dict @@ -221,8 +229,9 @@ Resource: "*" version_added: 3.3.0 grants: - description: list of grants associated with a key - type: complex + description: List of grants associated with a key. + type: list + elements: dict returned: always contains: constraints: @@ -232,24 +241,24 @@ returned: always sample: encryption_context_equals: - "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" + "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" creation_date: - description: Date of creation of the grant + description: Date of creation of the grant. type: str returned: always sample: "2017-04-18T15:12:08+10:00" grant_id: - description: The unique ID for the grant + description: The unique ID for the grant. type: str returned: always sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 grantee_principal: - description: The principal that receives the grant's permissions + description: The principal that receives the grant's permissions. type: str returned: always sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz issuing_account: - description: The AWS account under which the grant was issued + description: The AWS account under which the grant was issued. type: str returned: always sample: arn:aws:iam::01234567890:root @@ -259,19 +268,19 @@ returned: always sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 name: - description: The friendly name that identifies the grant + description: The friendly name that identifies the grant. type: str returned: always sample: xyz operations: - description: The list of operations permitted by the grant + description: The list of operations permitted by the grant. type: list returned: always sample: - Decrypt - RetireGrant retiring_principal: - description: The principal that can retire the grant + description: The principal that can retire the grant. type: str returned: always sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz From 72a864eb303b3e6662927fb68c7a4856ada4c468 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Tue, 17 May 2022 10:31:57 +0100 Subject: [PATCH 440/683] Add retries to elb / target group info modules (#1113) SUMMARY Currently there is backoff retries applied with 10 attempts overall, but due to the pagination its defaulting back to 4 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_application_lb_info elb_target_group_info ADDITIONAL INFORMATION --- elb_application_lb_info.py | 2 +- elb_target_group_info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py index dbd4b7e0ab6..9a6e817469f 100644 --- a/elb_application_lb_info.py +++ b/elb_application_lb_info.py @@ -223,7 +223,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict -@AWSRetry.jittered_backoff() +@AWSRetry.jittered_backoff(retries=10) def get_paginator(connection, **kwargs): paginator = connection.get_paginator('describe_load_balancers') return paginator.paginate(**kwargs).build_full_result() diff --git a/elb_target_group_info.py b/elb_target_group_info.py index c17b61ab669..88d3491077b 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -217,7 +217,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict -@AWSRetry.jittered_backoff() +@AWSRetry.jittered_backoff(retries=10) def get_paginator(**kwargs): paginator = client.get_paginator('describe_target_groups') return paginator.paginate(**kwargs).build_full_result() From fc622a7ea66e4122488664a4c6b1bc2f17f974bb Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 20 May 2022 13:45:40 +0200 Subject: [PATCH 441/683] aws_secret - Support purge_tags (#1150) aws_secret - Support purge_tags SUMMARY aws_secret currently defaults to purging all tags (even if tags isn't specified), this is a little aggressive. Add purge_tags parameter Only purge tags if tags: {} is set (rather than when tags is None ISSUE TYPE Feature Pull Request COMPONENT NAME aws_secret ADDITIONAL INFORMATION Related to #1146 Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- aws_secret.py | 70 ++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 58 insertions(+), 12 deletions(-) diff --git a/aws_secret.py b/aws_secret.py index 03f4a8d3592..7ebce8da603 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -62,8 +62,17 @@ version_added: 3.1.0 tags: description: - - Specifies a list of user-defined tags that are attached to the secret. + - Specifies a dictionary of user-defined tags that are attached to the secret. + - To remove all tags set I(tags={}) and I(purge_tags=true). type: dict + purge_tags: + description: + - If I(purge_tags=true) and I(tags) is set, existing tags will be purged from the resource + to match exactly what is defined by I(tags) parameter. + type: bool + required: false + default: true + version_added: 4.0.0 rotation_lambda: description: - Specifies the ARN of the Lambda function that can rotate the secret. @@ -110,12 +119,17 @@ type: complex contains: arn: - description: The ARN of the secret + description: The ARN of the secret. returned: always type: str sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx + description: + description: A description of the secret. + returned: when the secret has a description + type: str + sample: An example description last_accessed_date: - description: The date the secret was last accessed + description: The date the secret was last accessed. returned: always type: str sample: '2018-11-20T01:00:00+01:00' @@ -139,6 +153,29 @@ returned: always type: dict sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] } + tags: + description: + - A list of dictionaries representing the tags associated with the secret in the standard boto3 format. + returned: when the secret has tags + type: list + elements: dict + contains: + key: + description: The name or key of the tag. + type: str + example: MyTag + returned: success + value: + description: The value of the tag. + type: str + example: Some value. + returned: success + tags_dict: + description: A dictionary representing the tags associated with the secret. + type: dict + returned: when the secret has tags + example: {'MyTagName': 'Some Value'} + version_added: 4.0.0 ''' from ansible.module_utils._text import to_bytes @@ -328,12 +365,16 @@ def update_rotation(self, secret): return response def tag_secret(self, secret_name, tags): + if self.module.check_mode: + self.module.exit_json(changed=True) try: self.client.tag_resource(SecretId=secret_name, Tags=tags) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret") def untag_secret(self, secret_name, tag_keys): + if self.module.check_mode: + self.module.exit_json(changed=True) try: self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys) except (BotoCoreError, ClientError) as e: @@ -391,7 +432,8 @@ def main(): 'secret_type': dict(choices=['binary', 'string'], default="string"), 'secret': dict(default="", no_log=True), 'resource_policy': dict(type='json', default=None), - 'tags': dict(type='dict', default={}), + 'tags': dict(type='dict', default=None), + 'purge_tags': dict(type='bool', default=True), 'rotation_lambda': dict(), 'rotation_interval': dict(type='int', default=30), 'recovery_window': dict(type='int', default=30), @@ -414,6 +456,7 @@ def main(): lambda_arn=module.params.get('rotation_lambda'), rotation_interval=module.params.get('rotation_interval') ) + purge_tags = module.params.get('purge_tags') current_secret = secrets_mgr.get_secret(secret.name) @@ -453,15 +496,18 @@ def main(): else: result = secrets_mgr.put_resource_policy(secret) changed = True - current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags) - if tags_to_add: - secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) - changed = True - if tags_to_remove: - secrets_mgr.untag_secret(secret.name, tags_to_remove) - changed = True + if module.params.get('tags') is not None: + current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags) + if tags_to_add: + secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) + changed = True + if tags_to_remove: + secrets_mgr.untag_secret(secret.name, tags_to_remove) + changed = True result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) + if result.get('tags', None) is not None: + result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', [])) result.pop("response_metadata") module.exit_json(changed=changed, secret=result) From f02517ba5c22e72bb5a32b18afdf2131b0e38382 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 23 May 2022 13:08:50 +0200 Subject: [PATCH 442/683] New Modules : NetworkFirewall firewall (#1107) New Modules : NetworkFirewall firewall SUMMARY New modules for managing the NetworkFirewall firewall resources. ISSUE TYPE New Module Pull Request COMPONENT NAME networkfirewall networkfirewall_info ADDITIONAL INFORMATION TODO: Finish up documentation (usage examples) Integration tests Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell --- networkfirewall.py | 365 ++++++++++++++++++++++++++++++++++++++++ networkfirewall_info.py | 236 ++++++++++++++++++++++++++ 2 files changed, 601 insertions(+) create mode 100644 networkfirewall.py create mode 100644 networkfirewall_info.py diff --git a/networkfirewall.py b/networkfirewall.py new file mode 100644 index 00000000000..fefb565fef5 --- /dev/null +++ b/networkfirewall.py @@ -0,0 +1,365 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: networkfirewall +short_description: manage AWS Network Firewall firewalls +version_added: 4.0.0 +description: + - A module for creating, updating and deleting AWS Network Firewall firewalls. +options: + arn: + description: + - The ARN of the firewall. + - Exactly one of I(arn) or I(name) must be provided. + required: false + type: str + aliases: ['firewall_arn'] + name: + description: + - The name of the firewall. + - Cannot be updated after creation. + - Exactly one of I(arn) or I(name) must be provided. + required: false + type: str + aliases: ['firewall_name'] + state: + description: + - Create or remove the firewall. + required: false + choices: ['present', 'absent'] + default: 'present' + type: str + description: + description: + - A description for the firewall. + required: false + type: str + tags: + description: + - A dictionary representing the tags associated with the firewall. + - 'For example C({"Example Tag": "some example value"})' + - Unless I(purge_tags=False) all other tags will be removed from the + firewall. + type: dict + required: false + delete_protection: + description: + - When I(delete_protection=True), the firewall is protected from deletion. + - Defaults to C(false) when not provided on creation. + type: bool + required: false + policy_change_protection: + description: + - When I(policy_change_protection=True), the firewall is protected from + changes to which policy is attached to the firewall. + - Defaults to C(false) when not provided on creation. + type: bool + required: false + aliases: ['firewall_policy_change_protection'] + subnet_change_protection: + description: + - When I(subnet_change_protection=True), the firewall is protected from + changes to which subnets is attached to the firewall. + - Defaults to C(false) when not provided on creation. + type: bool + required: false + purge_tags: + description: + - If I(purge_tags=true) and I(tags) is defined existing tags will be + purged from the resource to match exactly what is defined by the + I(tags) parameter. + type: bool + required: false + default: True + wait: + description: + - On creation, whether to wait for the firewall to reach the C(READY) + state. + - On deletion, whether to wait for the firewall to reach the C(DELETED) + state. + - On update, whether to wait for the firewall to reach the C(IN_SYNC) + configuration synchronization state. + type: bool + required: false + default: true + wait_timeout: + description: + - Maximum time, in seconds, to wait for the firewall to reach the + expected state. + - Defaults to 600 seconds. + type: int + required: false + subnets: + description: + - The ID of the subnets to which the firewall will be associated. + - Required when creating a new firewall. + type: list + elements: str + required: false + purge_subnets: + description: + - If I(purge_subnets=true), existing subnets will be removed from the + firewall as necessary to match exactly what is defined by I(subnets). + type: bool + required: false + default: true + policy: + description: + - The ARN of the Network Firewall policy to use for the firewall. + - Required when creating a new firewall. + type: str + required: false + aliases: ['firewall_policy_arn'] + +author: Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' +# Create an AWS Network Firewall +- community.aws.networkfirewall: + name: 'ExampleFirewall' + state: present + policy: 'ExamplePolicy' + subnets: + - 'subnet-123456789abcdef01' + +# Create an AWS Network Firewall with various options, don't wait for creation +# to finish. +- community.aws.networkfirewall: + name: 'ExampleFirewall' + state: present + delete_protection: True + description: "An example Description" + policy: 'ExamplePolicy' + policy_change_protection: True + subnets: + - 'subnet-123456789abcdef01' + - 'subnet-abcdef0123456789a' + subnet_change_protection: True + tags: + ExampleTag: Example Value + another_tag: another_example + wait: false + + +# Delete an AWS Network Firewall +- community.aws.networkfirewall: + state: absent + name: 'ExampleFirewall' +''' + +RETURN = ''' +firewall: + description: The full details of the firewall + returned: success + type: dict + contains: + firewall: + description: The details of the firewall + type: dict + returned: success + contains: + delete_protection: + description: A flag indicating whether it is possible to delete the firewall. + type: str + returned: success + example: true + description: + description: A description of the firewall. + type: str + returned: success + example: "Description" + firewall_arn: + description: The ARN of the firewall. + type: str + returned: success + example: "arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall" + firewall_id: + description: A unique ID for the firewall. + type: str + returned: success + example: "12345678-abcd-1234-abcd-123456789abc" + firewall_name: + description: The name of the firewall. + type: str + returned: success + example: "ExampleFirewall" + firewall_policy_arn: + description: The ARN of the firewall policy used by the firewall. + type: str + returned: success + example: "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy" + firewall_policy_change_protection: + description: + - A flag indicating whether it is possible to change which firewall + policy is used by the firewall. + type: bool + returned: success + example: false + subnet_change_protection: + description: + - A flag indicating whether it is possible to change which subnets + the firewall endpoints are in. + type: bool + returned: success + example: true + subnets: + description: A list of the subnets the firewall endpoints are in. + type: list + elements: str + example: ["subnet-12345678", "subnet-87654321"] + subnet_mappings: + description: A list representing the subnets the firewall endpoints are in. + type: list + elements: dict + contains: + subnet_id: + description: The ID of the subnet. + type: str + returned: success + example: "subnet-12345678" + tags: + description: The tags associated with the firewall. + type: dict + returned: success + example: '{"SomeTag": "SomeValue"}' + vpc_id: + description: The ID of the VPC that the firewall is used by. + type: str + returned: success + example: "vpc-0123456789abcdef0" + firewall_metadata: + description: Metadata about the firewall + type: dict + returned: success + contains: + configuration_sync_state_summary: + description: + - A short summary of the synchronization status of the + policy and rule groups. + type: str + returned: success + example: "IN_SYNC" + status: + description: + - A short summary of the status of the firewall endpoints. + type: str + returned: success + example: "READY" + sync_states: + description: + - A description, broken down by availability zone, of the status + of the firewall endpoints as well as the synchronization status + of the policies and rule groups. + type: dict + returned: success + example: + { + "us-east-1a": { + "attachment": { + "endpoint_id": "vpce-123456789abcdef01", + "status": "READY", + "subnet_id": "subnet-12345678" + }, + "config": { + "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Ansible-Example": { + "sync_status": "IN_SYNC", + "update_token": "abcdef01-0000-0000-0000-123456789abc" + }, + "arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleDomainList": { + "sync_status": "IN_SYNC", + "update_token": "12345678-0000-0000-0000-abcdef012345" + } + } + } + } +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=False, aliases=['firewall_name']), + arn=dict(type='str', required=False, aliases=['firewall_arn']), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + description=dict(type='str', required=False), + tags=dict(type='dict', required=False), + purge_tags=dict(type='bool', required=False, default=True), + wait=dict(type='bool', required=False, default=True), + wait_timeout=dict(type='int', required=False), + subnet_change_protection=dict(type='bool', required=False), + policy_change_protection=dict(type='bool', required=False, aliases=['firewall_policy_change_protection']), + delete_protection=dict(type='bool', required=False), + subnets=dict(type='list', elements='str', required=False), + purge_subnets=dict(type='bool', required=False, default=True), + policy=dict(type='str', required=False, aliases=['firewall_policy_arn']), + ) + + mutually_exclusive = [ + ('arn', 'name',) + ] + required_one_of = [ + ('arn', 'name',) + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + ) + + arn = module.params.get('arn') + name = module.params.get('name') + state = module.params.get('state') + + manager = NetworkFirewallManager(module, name=name, arn=arn) + manager.set_wait(module.params.get('wait', None)) + manager.set_wait_timeout(module.params.get('wait_timeout', None)) + + if state == 'absent': + manager.set_delete_protection(module.params.get('delete_protection', None)) + manager.delete() + else: + if not manager.original_resource: + if not module.params.get('subnets', None): + module.fail_json('The subnets parameter must be provided on creation.') + if not module.params.get('policy', None): + module.fail_json('The policy parameter must be provided on creation.') + manager.set_description(module.params.get('description', None)) + manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) + manager.set_subnet_change_protection(module.params.get('subnet_change_protection', None)) + manager.set_policy_change_protection(module.params.get('policy_change_protection', None)) + manager.set_delete_protection(module.params.get('delete_protection', None)) + manager.set_subnets(module.params.get('subnets', None), module.params.get('purge_subnets', None)) + manager.set_policy(module.params.get('policy', None)) + manager.flush_changes() + + results = dict( + changed=manager.changed, + firewall=manager.updated_resource, + ) + if manager.changed: + diff = dict( + before=manager.original_resource, + after=manager.updated_resource, + ) + results['diff'] = diff + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/networkfirewall_info.py b/networkfirewall_info.py new file mode 100644 index 00000000000..48db97ea67a --- /dev/null +++ b/networkfirewall_info.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: networkfirewall_info +short_description: describe AWS Network Firewall firewalls +version_added: 4.0.0 +description: + - A module for describing AWS Network Firewall firewalls. +options: + arn: + description: + - The ARN of the Network Firewall. + - Mutually exclusive with I(name) and I(vpc_ids). + required: false + type: str + name: + description: + - The name of the Network Firewall. + - Mutually exclusive with I(arn) and I(vpc_ids). + required: false + type: str + vpc_ids: + description: + - A List of VPCs to retrieve the firewalls for. + - Mutually exclusive with I(name) and I(arn). + required: false + type: list + elements: str + aliases: ['vpcs', 'vpc_id'] + +author: Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' + +# Describe all firewalls in an account +- community.aws.networkfirewall_info: {} + +# Describe a firewall by ARN +- community.aws.networkfirewall_info: + arn: arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall + +# Describe a firewall by name +- community.aws.networkfirewall_info: + name: ExampleFirewall +''' + +RETURN = ''' +firewall_list: + description: A list of ARNs of the matching firewalls. + type: list + elements: str + returned: When a firewall name isn't specified + example: ['arn:aws:network-firewall:us-east-1:123456789012:firewall/Example1', + 'arn:aws:network-firewall:us-east-1:123456789012:firewall/Example2'] + +firewalls: + description: The details of the firewalls + returned: success + type: list + elements: dict + contains: + firewall: + description: The details of the firewall + type: dict + returned: success + contains: + delete_protection: + description: A flag indicating whether it is possible to delete the firewall. + type: str + returned: success + example: true + description: + description: A description of the firewall. + type: str + returned: success + example: "Description" + firewall_arn: + description: The ARN of the firewall. + type: str + returned: success + example: "arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall" + firewall_id: + description: A unique ID for the firewall. + type: str + returned: success + example: "12345678-abcd-1234-abcd-123456789abc" + firewall_name: + description: The name of the firewall. + type: str + returned: success + example: "ExampleFirewall" + firewall_policy_arn: + description: The ARN of the firewall policy used by the firewall. + type: str + returned: success + example: "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy" + firewall_policy_change_protection: + description: + - A flag indicating whether it is possible to change which firewall + policy is used by the firewall. + type: bool + returned: success + example: false + subnet_change_protection: + description: + - A flag indicating whether it is possible to change which subnets + the firewall endpoints are in. + type: bool + returned: success + example: true + subnet_mappings: + description: A list of the subnets the firewall endpoints are in. + type: list + elements: dict + contains: + subnet_id: + description: The ID of the subnet. + type: str + returned: success + example: "subnet-12345678" + tags: + description: The tags associated with the firewall. + type: dict + returned: success + example: '{"SomeTag": "SomeValue"}' + vpc_id: + description: The ID of the VPC that the firewall is used by. + type: str + returned: success + example: "vpc-0123456789abcdef0" + firewall_metadata: + description: Metadata about the firewall + type: dict + returned: success + contains: + configuration_sync_state_summary: + description: + - A short summary of the synchronization status of the + policy and rule groups. + type: str + returned: success + example: "IN_SYNC" + status: + description: + - A short summary of the status of the firewall endpoints. + type: str + returned: success + example: "READY" + sync_states: + description: + - A description, broken down by availability zone, of the status + of the firewall endpoints as well as the synchronization status + of the policies and rule groups. + type: dict + returned: success + example: + { + "us-east-1a": { + "attachment": { + "endpoint_id": "vpce-123456789abcdef01", + "status": "READY", + "subnet_id": "subnet-12345678" + }, + "config": { + "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Ansible-Example": { + "sync_status": "IN_SYNC", + "update_token": "abcdef01-0000-0000-0000-123456789abc" + }, + "arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleDomainList": { + "sync_status": "IN_SYNC", + "update_token": "12345678-0000-0000-0000-abcdef012345" + } + } + } + } +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=False), + arn=dict(type='str', required=False), + vpc_ids=dict(type='list', required=False, elements='str', aliases=['vpcs', 'vpc_id']), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('arn', 'name', 'vpc_ids',), + ], + ) + + arn = module.params.get('arn') + name = module.params.get('name') + vpcs = module.params.get('vpc_ids') + + manager = NetworkFirewallManager(module) + + results = dict(changed=False) + + if name or arn: + firewall = manager.get_firewall(name=name, arn=arn) + if firewall: + results['firewalls'] = [firewall] + else: + results['firewalls'] = [] + else: + if vpcs: + firewall_list = manager.list(vpc_ids=vpcs) + else: + firewall_list = manager.list() + results['firewall_list'] = firewall_list + firewalls = [manager.get_firewall(arn=f) for f in firewall_list] + results['firewalls'] = firewalls + + module.exit_json(**results) + + +if __name__ == '__main__': + main() From bf9e5de18ac4d7c81efa925c7a7607042bc8f94b Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Mon, 23 May 2022 20:02:01 +0200 Subject: [PATCH 443/683] fix ecs_cluster integration test (#1145) fix ecs_cluster integration test SUMMARY ecs_cluster: make ecs_cluster integration test work again - as it is bugs I've hit and must be fixed to complete this challenge ecs_taskdefinition: fix change detection of changing launch_type parameter ecs_service: compare of task_definition never works and results always in a changed task change detect of health_check_grace_period_seconds was never implemented, but tested and failing, after the task_definition is compared correctly ref: #1142 ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_taskdefinition ecs_service ADDITIONAL INFORMATION basically the existing test tasks are not changed. just sorted and removed what was marked as fixme because it's simple not possible (changing network settings of a created service). Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- ecs_service.py | 9 ++++++++- ecs_taskdefinition.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 8e7adbcacc2..66f20b63d81 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -549,7 +549,14 @@ def describe_service(self, cluster_name, service_name): raise Exception("Unknown problem describing service %s." % service_name) def is_matching_service(self, expected, existing): - if expected['task_definition'] != existing['taskDefinition']: + # aws returns the arn of the task definition + # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3 + # but the user is just entering + # ansible-fargate-nginx:3 + if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: + return False + + if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): return False if (expected['load_balancers'] or []) != existing['loadBalancers']: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 34574aae4ed..1c0c863750d 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -956,7 +956,7 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ if requested_task_role_arn != td.get('taskRoleArn', ""): return None - if requested_launch_type is not None and requested_launch_type not in td.get('compatibilities', []): + if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []): return None existing_volumes = td.get('volumes', []) or [] From 758f573c8a6658a8718b63d953b008ce4b5583ef Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Mon, 23 May 2022 21:29:45 -0400 Subject: [PATCH 444/683] rds_instance - add snapshot tests, update docs, refactor tests (#1081) rds_instance - add snapshot tests, update docs, refactor tests Depends-On: ansible-collections/amazon.aws#776 Depends-On: #1105 SUMMARY add snapshot tests to test restoring db from snapshot and fix bugs associated fix some typos in documentation and remove duplicate parameter (added as alias so no breaking change) remove unused IAM role in tests and add some missing cleanups ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME rds_instance ADDITIONAL INFORMATION this module had both db_snapshot_identifier and snapshot_identifier as separate params, with the latter being required to restore from snapshot, resulting in some parameter missing errors. moving snapshot_identifier as an alias of db_snapshot_identifier fixes this issue. Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell Reviewed-by: Sloane Hertel --- rds_cluster.py | 2 +- rds_instance.py | 68 ++++++++++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 30 deletions(-) diff --git a/rds_cluster.py b/rds_cluster.py index 16f2ed5a97a..1d2ed3fdd9b 100644 --- a/rds_cluster.py +++ b/rds_cluster.py @@ -758,7 +758,7 @@ def get_rds_method_attribute_name(cluster): method_name = 'modify_db_cluster' method_options_name = 'get_modify_options' elif creation_source == 'snapshot': - method_name = 'restore_db_cluster_from_db_snapshot' + method_name = 'restore_db_cluster_from_snapshot' method_options_name = 'get_restore_snapshot_options' elif creation_source == 's3': method_name = 'restore_db_cluster_from_s3' diff --git a/rds_instance.py b/rds_instance.py index 09cb6c06979..083042d7d91 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -38,7 +38,7 @@ type: str force_update_password: description: - - Set to True to update your cluster password with I(master_user_password). Since comparing passwords to determine + - Set to C(True) to update your instance password with I(master_user_password). Since comparing passwords to determine if it needs to be updated is not possible this is set to False by default to allow idempotence. type: bool default: False @@ -52,12 +52,12 @@ default: True read_replica: description: - - Set to False to promote a read replica cluster or true to create one. When creating a read replica C(creation_source) should + - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. type: bool wait: description: - - Whether to wait for the cluster to be available, stopped, or deleted. At a later time a wait_timeout option may be added. + - Whether to wait for the instance to be available, stopped, or deleted. At a later time a I(wait_timeout) option may be added. Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). @@ -76,7 +76,7 @@ type: bool apply_immediately: description: - - A value that specifies whether modifying a cluster with I(new_db_instance_identifier) and I(master_user_password) + - A value that specifies whether modifying an instance with I(new_db_instance_identifier) and I(master_user_password) should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes are applied during the next maintenance window. type: bool @@ -87,8 +87,8 @@ type: bool availability_zone: description: - - A list of EC2 Availability Zones that instances in the DB cluster can be created in. - May be used when creating a cluster or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az). + - A list of EC2 Availability Zones that the DB instance can be created in. + May be used when creating an instance or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az). aliases: - az - zone @@ -97,7 +97,7 @@ description: - The number of days for which automated backups are retained. - When set to C(0), automated backups will be disabled. (Not applicable if the DB instance is a source to read replicas) - - May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. + - May be used when creating a new instance, when restoring from S3, or when modifying an instance. type: int ca_certificate_identifier: description: @@ -105,7 +105,7 @@ type: str character_set_name: description: - - The character set to associate with the DB cluster. + - The character set to associate with the DB instance. type: str copy_tags_to_snapshot: description: @@ -152,8 +152,11 @@ elements: str db_snapshot_identifier: description: - - The identifier for the DB snapshot to restore from if using I(creation_source=snapshot). + - The identifier or ARN of the DB snapshot to restore from when using I(creation_source=snapshot). type: str + aliases: + - snapshot_identifier + - snapshot_id db_subnet_group_name: description: - The DB subnet group name to use for the DB instance. @@ -185,7 +188,7 @@ enable_iam_database_authentication: description: - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. - If this option is omitted when creating the cluster, Amazon RDS sets this to False. + If this option is omitted when creating the instance, Amazon RDS sets this to False. type: bool enable_performance_insights: description: @@ -256,7 +259,7 @@ type: str master_username: description: - - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter. + - The name of the master user for the DB instance. Must be 1-16 letters or numbers and begin with a letter. aliases: - username type: str @@ -279,7 +282,7 @@ type: bool new_db_instance_identifier: description: - - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB instance. The identifier must contain + - The new DB instance (lowercase) identifier for the DB instance when renaming a DB instance. The identifier must contain from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the next maintenance window. @@ -369,14 +372,10 @@ type: str skip_final_snapshot: description: - - Whether a final DB cluster snapshot is created before the DB cluster is deleted. If this is false I(final_db_snapshot_identifier) + - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is false I(final_db_snapshot_identifier) must be provided. type: bool default: false - snapshot_identifier: - description: - - The ARN of the DB snapshot to restore from when using I(creation_source=snapshot). - type: str source_db_instance_identifier: description: - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time @@ -410,7 +409,7 @@ type: str tags: description: - - A dictionary of key value pairs to assign the DB cluster. + - A dictionary of key value pairs to assign the DB instance. type: dict tde_credential_arn: description: @@ -439,7 +438,7 @@ - restore_from_latest vpc_security_group_ids: description: - - A list of EC2 VPC security groups to associate with the DB cluster. + - A list of EC2 VPC security groups to associate with the DB instance. type: list elements: str purge_security_groups: @@ -528,13 +527,25 @@ community.aws.rds_instance: id: "my-instance-id" state: present - engine: postgres - engine_version: 14.2 - username: "{{ username }}" - password: "{{ password }}" - db_instance_class: db.m6g.large - allocated_storage: "{{ allocated_storage }}" purge_iam_roles: yes + +# Restore DB instance from snapshot +- name: Create a snapshot and wait until completion + community.aws.rds_instance_snapshot: + instance_id: 'my-instance-id' + snapshot_id: 'my-new-snapshot' + state: present + wait: yes + register: snapshot + +- name: Restore DB from snapshot + community.aws.rds_instance: + id: 'my-restored-db' + creation_source: snapshot + snapshot_identifier: 'my-new-snapshot' + engine: mariadb + state: present + register: restored_db ''' RETURN = r''' @@ -1267,7 +1278,7 @@ def main(): db_name=dict(), db_parameter_group_name=dict(), db_security_groups=dict(type='list', elements='str'), - db_snapshot_identifier=dict(), + db_snapshot_identifier=dict(type='str', aliases=['snapshot_identifier', 'snapshot_id']), db_subnet_group_name=dict(aliases=['subnet_group']), deletion_protection=dict(type='bool'), domain=dict(), @@ -1304,7 +1315,6 @@ def main(): s3_ingestion_role_arn=dict(), s3_prefix=dict(), skip_final_snapshot=dict(type='bool', default=False), - snapshot_identifier=dict(), source_db_instance_identifier=dict(), source_engine=dict(choices=['mysql']), source_engine_version=dict(), @@ -1325,13 +1335,13 @@ def main(): ('engine', 'aurora-mysql', ('db_cluster_identifier',)), ('engine', 'aurora-postresql', ('db_cluster_identifier',)), ('storage_type', 'io1', ('iops', 'allocated_storage')), - ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), + ('creation_source', 'snapshot', ('db_snapshot_identifier', 'engine')), ('creation_source', 's3', ( 's3_bucket_name', 'engine', 'master_username', 'master_user_password', 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), ] mutually_exclusive = [ - ('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'), + ('s3_bucket_name', 'source_db_instance_identifier', 'db_snapshot_identifier'), ('use_latest_restorable_time', 'restore_time'), ('availability_zone', 'multi_az'), ] From 7a544fedd2ff998873d5398c6a05920ffd7d380a Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 25 May 2022 00:41:29 -0400 Subject: [PATCH 445/683] rds_instance_snapshot - add copy snapshot functionality (#1078) rds_instance_snapshot - add copy snapshot functionality Depends-On: ansible-collections/amazon.aws#776 Depends-On: #1116 SUMMARY Add support for copying a snapshot Fixes #210 Don't require db_instance_identifier on state = present (only required for creation) ISSUE TYPE Feature Pull Request COMPONENT NAME rds_instance_snapshot Reviewed-by: Markus Bergholz Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis --- rds_instance_snapshot.py | 163 +++++++++++++++++++++++++++++---------- 1 file changed, 121 insertions(+), 42 deletions(-) diff --git a/rds_instance_snapshot.py b/rds_instance_snapshot.py index 2fa30f92d09..0d7a50a06e7 100644 --- a/rds_instance_snapshot.py +++ b/rds_instance_snapshot.py @@ -32,15 +32,37 @@ type: str db_instance_identifier: description: - - Database instance identifier. Required when state is present. + - Database instance identifier. Required when creating a snapshot. aliases: - instance_id type: str + source_db_snapshot_identifier: + description: + - The identifier of the source DB snapshot. + - Required when copying a snapshot. + - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier. + - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN. + aliases: + - source_id + - source_snapshot_id + type: str + version_added: 3.3.0 + source_region: + description: + - The region that contains the snapshot to be copied. + type: str + version_added: 3.3.0 + copy_tags: + description: + - Whether to copy all tags from I(source_db_snapshot_identifier) to I(db_instance_identifier). + type: bool + default: False + version_added: 3.3.0 wait: description: - Whether or not to wait for snapshot creation or deletion. type: bool - default: 'no' + default: False wait_timeout: description: - how long before wait gives up, in seconds. @@ -52,13 +74,14 @@ type: dict purge_tags: description: - - whether to remove tags not present in the C(tags) parameter. + - whether to remove tags not present in the I(tags) parameter. default: True type: bool author: - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" - "Alina Buzachis (@alinabuzachis)" + - "Joseph Torcasso (@jatorcasso)" extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -70,6 +93,15 @@ community.aws.rds_instance_snapshot: db_instance_identifier: new-database db_snapshot_identifier: new-database-snapshot + register: snapshot + +- name: Copy snapshot from a different region and copy its tags + community.aws.rds_instance_snapshot: + id: new-database-snapshot-copy + region: us-east-1 + source_id: "{{ snapshot.db_snapshot_arn }}" + source_region: us-east-2 + copy_tags: yes - name: Delete snapshot community.aws.rds_instance_snapshot: @@ -163,6 +195,12 @@ returned: always type: list sample: [] +source_db_snapshot_identifier: + description: The DB snapshot ARN that the DB snapshot was copied from. + returned: when snapshot is a copy + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot-source + version_added: 3.3.0 snapshot_create_time: description: Creation time of the snapshot. returned: always @@ -202,31 +240,41 @@ # import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags def get_snapshot(snapshot_id): try: - response = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id) - except is_boto3_error_code("DBSnapshotNotFoundFault"): - return None - except is_boto3_error_code("DBSnapshotNotFound"): # pylint: disable=duplicate-except - return None + snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)['DBSnapshots'][0] + snapshot['Tags'] = get_tags(client, module, snapshot['DBSnapshotArn']) + except is_boto3_error_code("DBSnapshotNotFound"): + return {} except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) - return response['DBSnapshots'][0] + return snapshot -def fetch_tags(snapshot): - snapshot["Tags"] = get_tags(client, module, snapshot["DBSnapshotArn"]) +def get_parameters(parameters, method_name): + if method_name == 'copy_db_snapshot': + parameters['TargetDBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] - return camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"]) + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any(parameters.get(k) is None for k in required_options): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + return parameters def ensure_snapshot_absent(): @@ -236,40 +284,68 @@ def ensure_snapshot_absent(): snapshot = get_snapshot(snapshot_name) if not snapshot: - return dict(changed=changed) + module.exit_json(changed=changed) elif snapshot and snapshot["Status"] != "deleting": snapshot, changed = call_method(client, module, "delete_db_snapshot", params) - return dict(changed=changed) + module.exit_json(changed=changed) -def ensure_snapshot_present(): - db_instance_identifier = module.params.get('db_instance_identifier') +def ensure_snapshot_present(params): + source_id = module.params.get('source_db_snapshot_identifier') snapshot_name = module.params.get('db_snapshot_identifier') changed = False snapshot = get_snapshot(snapshot_name) + + # Copy snapshot + if source_id: + changed |= copy_snapshot(params) + + # Create snapshot + elif not snapshot: + changed |= create_snapshot(params) + + # Snapshot exists and we're not creating a copy - modify exising snapshot + else: + changed |= modify_snapshot() + + snapshot = get_snapshot(snapshot_name) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + + +def create_snapshot(params): + method_params = get_parameters(params, 'create_db_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + snapshot, changed = call_method(client, module, 'create_db_snapshot', method_params) + + return changed + + +def copy_snapshot(params): + changed = False + snapshot_id = module.params.get('db_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + if not snapshot: - params = { - "DBSnapshotIdentifier": snapshot_name, - "DBInstanceIdentifier": db_instance_identifier - } - if module.params.get("tags"): - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) - _result, changed = call_method(client, module, "create_db_snapshot", params) + method_params = get_parameters(params, 'copy_db_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + result, changed = call_method(client, module, 'copy_db_snapshot', method_params) - if module.check_mode: - return dict(changed=changed) + return changed - return dict(changed=changed, **fetch_tags(get_snapshot(snapshot_name))) - existing_tags = get_tags(client, module, snapshot["DBSnapshotArn"]) - changed |= ensure_tags(client, module, snapshot["DBSnapshotArn"], existing_tags, - module.params["tags"], module.params["purge_tags"]) +def modify_snapshot(): + # TODO - add other modifications aside from purely tags + changed = False + snapshot_id = module.params.get('db_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) - if module.check_mode: - return dict(changed=changed) + if module.params.get('tags'): + changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) - return dict(changed=changed, **fetch_tags(get_snapshot(snapshot_name))) + return changed def main(): @@ -280,16 +356,18 @@ def main(): state=dict(choices=['present', 'absent'], default='present'), db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), db_instance_identifier=dict(aliases=['instance_id']), + source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), + copy_tags=dict(type='bool', default=False), + source_region=dict(type='str'), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['db_instance_identifier']]], - supports_check_mode=True, + supports_check_mode=True ) retry_decorator = AWSRetry.jittered_backoff(retries=10) @@ -298,12 +376,13 @@ def main(): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to connect to AWS.") - if module.params['state'] == 'absent': - ret_dict = ensure_snapshot_absent() - else: - ret_dict = ensure_snapshot_present() + state = module.params.get("state") + if state == 'absent': + ensure_snapshot_absent() - module.exit_json(**ret_dict) + elif state == 'present': + params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) + ensure_snapshot_present(params) if __name__ == '__main__': From 63ac832071753bc2a8ad86520b7f401ffc75ceb4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 25 May 2022 17:28:05 +0200 Subject: [PATCH 446/683] ec2_lc - remove unused associate_public_ip_address option (#1158) ec2_lc - remove unused associate_public_ip_address option SUMMARY The associate_public_ip_address option has always been ignored by ec2_lc, remove it. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_lc ADDITIONAL INFORMATION See also: ansible/ansible#64230 Reviewed-by: Alina Buzachis --- ec2_lc.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ec2_lc.py b/ec2_lc.py index de3a7a5443f..4b383f7279c 100644 --- a/ec2_lc.py +++ b/ec2_lc.py @@ -180,10 +180,6 @@ - When not set AWS will default to C(default). type: str choices: ['default', 'dedicated'] - associate_public_ip_address: - description: - - The I(associate_public_ip_address) option does nothing and will be removed after 2022-06-01 - type: bool extends_documentation_fragment: - amazon.aws.aws @@ -668,7 +664,6 @@ def main(): ramdisk_id=dict(), instance_profile_name=dict(), ebs_optimized=dict(default=False, type='bool'), - associate_public_ip_address=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), instance_monitoring=dict(default=False, type='bool'), assign_public_ip=dict(type='bool'), classic_link_vpc_security_groups=dict(type='list', elements='str'), From 56b6235d4c8f89a9d8b22f5c4b8113ed05838a54 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 25 May 2022 20:21:42 +0200 Subject: [PATCH 447/683] ec2_eip - remove wait_timeout (#1159) ec2_eip - remove wait_timeout SUMMARY Remove the wait_timeout option, it has always been ignored by the module ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_eip ADDITIONAL INFORMATION See also: ansible/ansible#64230 Reviewed-by: Markus Bergholz --- ec2_eip.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ec2_eip.py b/ec2_eip.py index e0031eaf10a..37ef0fa7540 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -88,10 +88,6 @@ - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP) only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). type: str - wait_timeout: - description: - - The I(wait_timeout) option does nothing and will be removed after 2022-06-01 - type: int extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -546,7 +542,6 @@ def main(): default=False), release_on_disassociation=dict(required=False, type='bool', default=False), allow_reassociation=dict(type='bool', default=False), - wait_timeout=dict(type='int', removed_at_date='2022-06-01', removed_from_collection='community.aws'), private_ip_address=dict(), tags=dict(required=False, type='dict'), purge_tags=dict(required=False, type='bool', default=True), From 02214674e4ea45cd83aa611205abef368a1b46ec Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 09:44:32 +0200 Subject: [PATCH 448/683] s3_lifecycle - drop deprecated requester_pays parameter (#1165) s3_lifecycle - drop deprecated requester_pays parameter SUMMARY The requester_pays did nothing, drop it. ISSUE TYPE Feature Pull Request COMPONENT NAME s3_lifecycle ADDITIONAL INFORMATION See also: ansible/ansible#63989 Reviewed-by: Markus Bergholz --- s3_lifecycle.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index a9aed2b29fd..a9a5f5dbf65 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -133,10 +133,6 @@ I(storage_class) type: list elements: dict - requester_pays: - description: - - The I(requester_pays) option does nothing and will be removed after 2022-06-01 - type: bool wait: description: - Wait for the configuration to complete before returning. @@ -574,7 +570,6 @@ def main(): noncurrent_version_transition_days=dict(type='int'), noncurrent_version_transitions=dict(type='list', elements='dict'), prefix=dict(), - requester_pays=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), rule_id=dict(), state=dict(default='present', choices=['present', 'absent']), status=dict(default='enabled', choices=['enabled', 'disabled']), From aa2b32a9535bbf1658be7d00cafda7568700386d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 09:44:36 +0200 Subject: [PATCH 449/683] s3_sync - drop unused retries parameter (#1166) s3_sync - drop unused retries parameter SUMMARY The retries parameter was never used, remove it. ISSUE TYPE Feature Pull Request COMPONENT NAME s3_sync ADDITIONAL INFORMATION See also: ansible/ansible#63989 Reviewed-by: Markus Bergholz --- s3_sync.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/s3_sync.py b/s3_sync.py index 75c653f5712..f0135f6c13a 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -124,10 +124,6 @@ required: false default: no type: bool - retries: - description: - - The I(retries) option does nothing and will be removed after 2022-06-01 - type: str author: Ted Timmons (@tedder) extends_documentation_fragment: @@ -542,7 +538,6 @@ def main(): file_root=dict(required=True, type='path'), permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), - retries=dict(required=False, removed_at_date='2022-06-01', removed_from_collection='community.aws'), mime_map=dict(required=False, type='dict'), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), From 73a51e945684c83ecf11ac641b6183de46686802 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 09:44:42 +0200 Subject: [PATCH 450/683] ecs_ecr - remove deprecated delete_policy option (#1161) ecs_ecr - remove deprecated delete_policy option SUMMARY Remove the deprecated delete_policy option. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_ecr ADDITIONAL INFORMATION See also: ansible/ansible#48997 Reviewed-by: Markus Bergholz --- ecs_ecr.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index 2b22147212b..487bf452f7f 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -43,11 +43,9 @@ purge_policy: description: - If yes, remove the policy from the repository. - - Alias C(delete_policy) has been deprecated and will be removed after 2022-06-01. - Defaults to C(false). required: false type: bool - aliases: [ delete_policy ] image_tag_mutability: description: - Configure whether repository should be mutable (ie. an already existing tag can be overwritten) or not. @@ -536,8 +534,7 @@ def main(): policy=dict(required=False, type='json'), image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], default='mutable'), - purge_policy=dict(required=False, type='bool', aliases=['delete_policy'], - deprecated_aliases=[dict(name='delete_policy', date='2022-06-01', collection_name='community.aws')]), + purge_policy=dict(required=False, type='bool'), lifecycle_policy=dict(required=False, type='json'), purge_lifecycle_policy=dict(required=False, type='bool'), scan_on_push=(dict(required=False, type='bool', default=False)) From a51e68cd27a17812673657e50308d839182d31a5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 09:44:45 +0200 Subject: [PATCH 451/683] elb_network_lb - change default state from 'absent' to 'present' (#1167) elb_network_lb - change default state from 'absent' to 'present' SUMMARY change default state from 'absent' to 'present' to match our usual behaviour. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_network_lb ADDITIONAL INFORMATION See also: ansible/ansible#65468 Reviewed-by: Markus Bergholz --- elb_network_lb.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/elb_network_lb.py b/elb_network_lb.py index 2f664c721ee..768900832c5 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -108,11 +108,10 @@ state: description: - Create or destroy the load balancer. - - The current default is C(absent). However, this behavior is inconsistent with other modules - and as such the default will change to C(present) in a release after 2022-06-01. - To maintain the existing behavior explicitly set I(state=absent). + - The default changed from C('absent') to C('present') in release 4.0.0. choices: [ 'present', 'absent' ] type: str + default: 'present' tags: description: - A dictionary of one or more tags to assign to the load balancer. @@ -448,7 +447,7 @@ def main(): subnets=dict(type='list', elements='str'), subnet_mappings=dict(type='list', elements='dict'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], type='str'), + state=dict(choices=['present', 'absent'], type='str', default='present'), tags=dict(type='dict'), wait_timeout=dict(type='int'), wait=dict(type='bool'), @@ -456,20 +455,16 @@ def main(): ) ) + required_if = [ + ('state', 'present', ('subnets', 'subnet_mappings',), True) + ] + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=required_if, mutually_exclusive=[['subnets', 'subnet_mappings']]) # Check for subnets or subnet_mappings if state is present state = module.params.get("state") - if state == 'present': - if module.params.get("subnets") is None and module.params.get("subnet_mappings") is None: - module.fail_json(msg="'subnets' or 'subnet_mappings' is required when state=present") - - if state is None: - # See below, unless state==present we delete. Ouch. - module.deprecate('State currently defaults to absent. This is inconsistent with other modules' - ' and the default will be changed to `present` in a release after 2022-06-01', - date='2022-06-01', collection_name='community.aws') # Quick check of listeners parameters listeners = module.params.get("listeners") From 6cdc207a69b74ada56973cb6b98ed894c1f3d4f5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 09:44:49 +0200 Subject: [PATCH 452/683] iam_managed_policy - remove unused fail_on_delete parameter (#1168) iam_managed_policy - remove unused fail_on_delete parameter SUMMARY remove unused fail_on_delete parameter ISSUE TYPE Feature Pull Request COMPONENT NAME iam_managed_policy ADDITIONAL INFORMATION ansible/ansible#63961 Reviewed-by: Markus Bergholz --- iam_managed_policy.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 4c02054db21..3e30c4a667c 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -44,10 +44,6 @@ default: present choices: [ "present", "absent" ] type: str - fail_on_delete: - description: - - The I(fail_on_delete) option does nothing and will be removed after 2022-06-01 - type: bool author: "Dan Kozlowski (@dkhenry)" extends_documentation_fragment: @@ -345,7 +341,6 @@ def main(): policy=dict(type='json'), make_default=dict(type='bool', default=True), only_version=dict(type='bool', default=False), - fail_on_delete=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='community.aws'), state=dict(default='present', choices=['present', 'absent']), ) From bd10f480ae444273517d7809721376377d577c80 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 10:02:42 +0200 Subject: [PATCH 453/683] Fix issue when creating GSI with global_keys_only (#1162) Fix issue when creating GSI with global_keys_only SUMMARY fixes: #967 Undocumented requirement on NonKeyAttributes that it should be omitted rather than an empty list. ISSUE TYPE Bugfix Pull Request COMPONENT NAME dynamodb_table ADDITIONAL INFORMATION An exception occurred during task execution. To see the full traceback, use -vvv. The error was: Invalid length for parameter GlobalSecondaryIndexUpdates[0].Create.Projection.NonKeyAttributes, value: 0, valid min length: 1 fatal: [localhost]: FAILED! => {"boto3_version": "1.18.47", "botocore_version": "1.21.47", "changed": false, "msg": "Failed to update table: Parameter validation failed:\nInvalid length for parameter GlobalSecondaryIndexUpdates[0].Create.Projection.NonKeyAttributes, value: 0, valid min length: 1"} Reviewed-by: Markus Bergholz --- dynamodb_table.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 839178256aa..c1d9b65686e 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -724,7 +724,8 @@ def _generate_index(index, include_throughput=True): ProjectionType=index['type'], ) if index['type'] != 'ALL': - projection['NonKeyAttributes'] = non_key_attributes + if non_key_attributes: + projection['NonKeyAttributes'] = non_key_attributes else: if non_key_attributes: module.fail_json( From 66b30d6fc3b7f5314d1395b60ce7840b80ad052f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 10:02:46 +0200 Subject: [PATCH 454/683] data_pipeline - remove unused version option (#1160) data_pipeline - remove unused version option SUMMARY The version option has always been ignored and has now been removed. ISSUE TYPE Feature Pull Request COMPONENT NAME data_pipeline ADDITIONAL INFORMATION See also: ansible/ansible#64368 Reviewed-by: Markus Bergholz --- data_pipeline.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/data_pipeline.py b/data_pipeline.py index d10e7989f32..4fee6423165 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -121,10 +121,6 @@ description: - A dict of key:value pair(s) to add to the pipeline. type: dict - version: - description: - - The version option has never had any effect and will be removed after 2022-06-01. - type: str ''' EXAMPLES = r''' @@ -600,7 +596,6 @@ def create_pipeline(client, module): def main(): argument_spec = dict( name=dict(required=True), - version=dict(removed_at_date='2022-06-01', removed_from_collection='community.aws'), description=dict(required=False, default=''), objects=dict(required=False, type='list', default=[], elements='dict'), parameters=dict(required=False, type='list', default=[], elements='dict'), From b336565d32056cecccd417c97cac2cdfd5f65927 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 26 May 2022 10:43:25 +0200 Subject: [PATCH 455/683] ec2_metric_alarm - drop deprecated support for symbolic operators (#1164) ec2_metric_alarm - drop deprecated support for symbolic operators SUMMARY With the migration to boto3 symbolic operators were deprecated. Remove it. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_metric_alarm ADDITIONAL INFORMATION See also: ansible/ansible#62669 Reviewed-by: Markus Bergholz --- ec2_metric_alarm.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/ec2_metric_alarm.py b/ec2_metric_alarm.py index effa9bd5c4e..8c30909907e 100644 --- a/ec2_metric_alarm.py +++ b/ec2_metric_alarm.py @@ -59,7 +59,6 @@ comparison: description: - Determines how the threshold value is compared - - Symbolic comparison operators have been deprecated, and will be removed after 2022-06-22. required: false type: str choices: @@ -67,10 +66,6 @@ - 'GreaterThanThreshold' - 'LessThanThreshold' - 'LessThanOrEqualToThreshold' - - '<=' - - '<' - - '>=' - - '>' threshold: description: - Sets the min/max bound for triggering the alarm. @@ -192,7 +187,7 @@ metric: "StatusCheckFailed_System" namespace: "AWS/EC2" statistic: "Minimum" - comparison: ">=" + comparison: "GreaterThanOrEqualToThreshold" threshold: 1.0 period: 60 evaluation_periods: 2 @@ -214,16 +209,6 @@ def create_metric_alarm(connection, module, params): alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) - comparisons = {'<=': 'LessThanOrEqualToThreshold', - '<': 'LessThanThreshold', - '>=': 'GreaterThanOrEqualToThreshold', - '>': 'GreaterThanThreshold'} - if params['ComparisonOperator'] in ('<=', '<', '>', '>='): - module.deprecate('Using the <=, <, > and >= operators for comparison has been deprecated. Please use LessThanOrEqualToThreshold, ' - 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.', - date='2022-06-01', collection_name='community.aws') - params['ComparisonOperator'] = comparisons[params['ComparisonOperator']] - if not isinstance(params['Dimensions'], list): fixed_dimensions = [] for key, value in params['Dimensions'].items(): @@ -314,7 +299,7 @@ def main(): namespace=dict(type='str'), statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold', - 'GreaterThanOrEqualToThreshold', '<=', '<', '>', '>=']), + 'GreaterThanOrEqualToThreshold']), threshold=dict(type='float'), period=dict(type='int'), unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', From 69fd7018ad4db9fc309a370193fec4af4fbc7c99 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 30 May 2022 11:50:47 +0200 Subject: [PATCH 456/683] aws_kms_info - remove deprecated keys_attr parameter (#1172) aws_kms_info - remove deprecated keys_attr parameter SUMMARY remove deprecated keys_attr parameter ISSUE TYPE Feature Pull Request COMPONENT NAME aws_kms_info ADDITIONAL INFORMATION See also: #838 Reviewed-by: Alina Buzachis --- aws_kms_info.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/aws_kms_info.py b/aws_kms_info.py index fabff61fcc1..2863fd0538e 100644 --- a/aws_kms_info.py +++ b/aws_kms_info.py @@ -45,14 +45,6 @@ description: Whether to get full details (tags, grants etc.) of keys pending deletion. default: False type: bool - keys_attr: - description: - - Returning the C(keys) attribute conflicted with the builtin keys() - method on dictionaries and as such was deprecated. - - This parameter now does nothing, and after version C(4.0.0) this - parameter will be removed. - type: bool - version_added: 2.0.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -496,7 +488,6 @@ def main(): key_id=dict(aliases=['key_arn']), filters=dict(type='dict'), pending_deletion=dict(type='bool', default=False), - keys_attr=dict(type='bool'), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -515,12 +506,6 @@ def main(): filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] ret_params = dict(kms_keys=filtered_keys) - # We originally returned "keys" - if module.params.get('keys_attr') is not None: - module.deprecate("Returning results in the 'keys' attribute conflicts with the builtin keys() method on " - "dicts and as such was removed in version 3.0.0. Please use the kms_keys attribute. " - "This parameter is now ignored and will be removed in version 4.0.0.", - version='4.0.0', collection_name='community.aws') module.exit_json(**ret_params) From ce17c056185bc6c14b97ce77c5ba43a33989735b Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 30 May 2022 11:50:50 +0200 Subject: [PATCH 457/683] elb_instance - remove ec2_elbs fact (#1173) elb_instance - remove ec2_elbs fact SUMMARY The ec2_elbs fact was deprecated when we migrated to boto3, remove it. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_instance ADDITIONAL INFORMATION See also: #773 Reviewed-by: Alina Buzachis --- elb_instance.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/elb_instance.py b/elb_instance.py index 51ec03d5702..b0dafecb9ee 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -55,8 +55,8 @@ default: 0 type: int notes: -- The ec2_elb fact currently set by this module has been deprecated and will no - longer be set after release 4.0.0 of the collection. +- The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release + 4.0.0 is no longer set. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -388,13 +388,8 @@ def main(): elif module.params['state'] == 'absent': elb_man.deregister(wait, timeout) - # XXX We're not an _fact module we shouldn't be returning a fact and poluting - # the namespace - ansible_facts = {'ec2_elbs': [lb['LoadBalancerName'] for lb in elb_man.lbs]} - module.exit_json( changed=elb_man.changed, - ansible_facts=ansible_facts, updated_elbs=list(elb_man.updated_elbs), ) From f4e447c4ad15dbc2d110db0ea50b6a7c5c593c24 Mon Sep 17 00:00:00 2001 From: Oleksandr Novak Date: Mon, 30 May 2022 11:51:33 +0200 Subject: [PATCH 458/683] ecs_service - fix validation for `placementConstraints` (#1170) ecs_service - fix validation for `placementConstraints` SUMMARY Fixes #1058 ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_service Reviewed-by: Markus Bergholz Reviewed-by: Oleksandr Novak Reviewed-by: Alina Buzachis --- ecs_service.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 66f20b63d81..4f94a452ce9 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -113,6 +113,7 @@ type: str expression: description: A cluster query language expression to apply to the constraint. + required: false type: str placement_strategy: description: @@ -584,7 +585,6 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan clientToken=client_token, role=role, deploymentConfiguration=deployment_configuration, - placementConstraints=placement_constraints, placementStrategy=placement_strategy ) if network_configuration: @@ -597,6 +597,13 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds if service_registries: params['serviceRegistries'] = service_registries + + # filter placement_constraint and left only those where value is not None + # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation + if placement_constraints: + params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints] + # desired count is not required if scheduling strategy is daemon if desired_count is not None: params['desiredCount'] = desired_count @@ -674,7 +681,7 @@ def main(): elements='dict', options=dict( type=dict(type='str'), - expression=dict(type='str') + expression=dict(required=False, type='str') ) ), placement_strategy=dict( From 42343b800fe0b6867926169a5ac482c8fc844e75 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 31 May 2022 21:16:43 +0200 Subject: [PATCH 459/683] Tagging - remove default empty dict where purge_tags default is False (#1186) Tagging - remove default empty dict where purge_tags default is False Depends-On: ansible-collections/amazon.aws#844 SUMMARY Deprecate purge_tags=False Remove default of empty dict for tags ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_kms.py plugins/modules/cloudfront_distribution.py plugins/modules/ec2_vpc_vpn.py plugins/modules/rds_param_group.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- aws_kms.py | 37 +++++++++++++++++++++------------- cloudfront_distribution.py | 41 +++++++++++++++++--------------------- ec2_vpc_vpn.py | 39 ++++++++++++++++++++---------------- rds_param_group.py | 35 +++++++++++++++++--------------- 4 files changed, 82 insertions(+), 70 deletions(-) diff --git a/aws_kms.py b/aws_kms.py index 95bc51834cc..046af605da1 100644 --- a/aws_kms.py +++ b/aws_kms.py @@ -12,7 +12,7 @@ version_added: 1.0.0 short_description: Perform various KMS management tasks description: - - Manage role/user access to a KMS key. Not designed for encrypting/decrypting. + - Manage role/user access to a KMS key. Not designed for encrypting/decrypting. options: alias: description: An alias for a key. For safety, even though KMS does not require keys @@ -114,9 +114,6 @@ A description of the CMK. Use a description that helps you decide whether the CMK is appropriate for a task. type: str - tags: - description: A dictionary of tags to apply to a key. - type: dict pending_window: description: - The number of days between requesting deletion of the CMK and when it will actually be deleted. @@ -126,11 +123,6 @@ type: int aliases: ['deletion_delay'] version_added: 1.4.0 - purge_tags: - description: Whether the I(tags) argument should cause tags not in the list to - be removed. - default: False - type: bool purge_grants: description: Whether the I(grants) argument should cause grants not in the list to be removed. @@ -196,8 +188,9 @@ - Will Thames (@willthames) - Mark Chappell (@tremble) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags.deprecated_purge notes: @@ -809,6 +802,9 @@ def update_description(connection, module, key, description): def update_tags(connection, module, key, desired_tags, purge_tags): + if desired_tags is None: + return False + # purge_tags needs to be explicitly set, so an empty tags list means remove # all tags @@ -933,8 +929,13 @@ def update_key(connection, module, key): def create_key(connection, module): key_usage = module.params.get('key_usage') key_spec = module.params.get('key_spec') + tags_list = ansible_dict_to_boto3_tag_list( + module.params['tags'] or {}, + # KMS doesn't use "Key" and "Value" as other APIs do. + tag_name_key_name='TagKey', tag_value_key_name='TagValue' + ) params = dict(BypassPolicyLockoutSafetyCheck=False, - Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'), + Tags=tags_list, KeyUsage=key_usage, CustomerMasterKeySpec=key_spec, Origin='AWS_KMS') @@ -1148,8 +1149,8 @@ def main(): key_id=dict(aliases=['key_arn']), description=dict(), enabled=dict(type='bool', default=True), - tags=dict(type='dict', default={}), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), grants=dict(type='list', default=[], elements='dict'), policy=dict(type='json'), purge_grants=dict(type='bool', default=False), @@ -1170,6 +1171,14 @@ def main(): kms = module.client('kms') + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", date='2024-05-01', collection_name='community.aws') diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 4c021d6f007..c07435345ea 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -12,19 +12,19 @@ version_added: 1.0.0 module: cloudfront_distribution -short_description: Create, update and delete AWS CloudFront distributions. +short_description: Create, update and delete AWS CloudFront distributions description: - - Allows for easy creation, updating and deletion of CloudFront distributions. + - Allows for easy creation, updating and deletion of CloudFront distributions. author: - Willem van Ketwich (@wilvk) - Will Thames (@willthames) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags.deprecated_purge options: @@ -58,21 +58,6 @@ C(YYYY-MM-DDTHH:MM:SS.ffffff). type: str - tags: - description: - - Should be input as a dict of key-value pairs. - - "Note that numeric keys or values must be wrapped in quotes. e.g. C(Priority: '1')" - type: dict - - purge_tags: - description: - - Specifies whether existing tags will be removed before adding new tags. - - When I(purge_tags=yes), existing tags are removed and I(tags) are added, if specified. - If no tags are specified, it removes all existing tags for the distribution. - - When I(purge_tags=no), existing tags are kept and I(tags) are added, if specified. - default: false - type: bool - alias: description: - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only @@ -1492,6 +1477,8 @@ def list_tags_for_resource(client, module, arn): def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn): + if valid_tags is None: + return False changed = False to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags) if to_remove: @@ -2121,8 +2108,8 @@ def main(): comment=dict(), distribution_id=dict(), e_tag=dict(), - tags=dict(type='dict', default={}), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), alias=dict(), aliases=dict(type='list', default=[], elements='str'), purge_aliases=dict(type='bool', default=False), @@ -2161,6 +2148,14 @@ def main(): ] ) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) validation_mgr = CloudFrontValidationManager(module) @@ -2239,7 +2234,7 @@ def main(): if create: config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference) - result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags)) + result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {})) result = camel_dict_to_snake_dict(result) result['tags'] = list_tags_for_resource(client, module, result['arn']) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index df060eaa4c8..a1877326d33 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -10,14 +10,16 @@ --- module: ec2_vpc_vpn version_added: 1.0.0 -short_description: Create, modify, and delete EC2 VPN connections. +short_description: Create, modify, and delete EC2 VPN connections description: - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters option or specifying the VPN connection identifier. extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -author: "Sloane Hertel (@s-hertel)" + - amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.tags.deprecated_purge +author: + - "Sloane Hertel (@s-hertel)" options: state: description: @@ -44,15 +46,6 @@ description: - The ID of the VPN connection. Required to modify or delete a connection if the filters option does not provide a unique match. type: str - tags: - description: - - Tags to attach to the VPN connection. - type: dict - purge_tags: - description: - - Whether or not to delete VPN connections tags that are associated with the connection but not specified in the task. - type: bool - default: false static_only: description: - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP. @@ -580,8 +573,12 @@ def check_for_update(connection, module_params, vpn_connection_id): # Get changes to tags current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value') - tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags) - changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) + if tags is None: + changes['tags_to_remove'] = [] + changes['tags_to_add'] = [] + else: + tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags) + changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) # Get changes to routes if 'Routes' in vpn_connection: current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']] @@ -766,13 +763,13 @@ def main(): state=dict(type='str', default='present', choices=['present', 'absent']), filters=dict(type='dict', default={}), vpn_gateway_id=dict(type='str'), - tags=dict(default={}, type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), connection_type=dict(default='ipsec.1', type='str'), tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'), static_only=dict(default=False, type='bool'), customer_gateway_id=dict(type='str'), vpn_connection_id=dict(type='str'), - purge_tags=dict(type='bool', default=False), + purge_tags=dict(type='bool'), routes=dict(type='list', default=[], elements='str'), purge_routes=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), @@ -782,6 +779,14 @@ def main(): supports_check_mode=True) connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + state = module.params.get('state') parameters = dict(module.params) diff --git a/rds_param_group.py b/rds_param_group.py index 7d5d216d092..1d52ea51817 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -12,7 +12,7 @@ version_added: 1.0.0 short_description: manage RDS parameter groups description: - - Creates, modifies, and deletes RDS parameter groups. + - Creates, modifies, and deletes RDS parameter groups. options: state: description: @@ -48,21 +48,13 @@ or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. aliases: [parameters] type: dict - tags: - description: - - Dictionary of tags to attach to the parameter group. - type: dict - purge_tags: - description: - - Whether or not to remove tags that do not appear in the C(tags) list. - type: bool - default: False author: - - "Scott Anderson (@tastychutney)" - - "Will Thames (@willthames)" + - "Scott Anderson (@tastychutney)" + - "Will Thames (@willthames)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags.deprecated_purge ''' @@ -216,7 +208,10 @@ def update_parameters(module, connection): def update_tags(module, connection, group, tags): + if tags is None: + return False changed = False + existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList'] to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, module.params['purge_tags']) @@ -319,8 +314,8 @@ def main(): description=dict(), params=dict(aliases=['parameters'], type='dict'), immediate=dict(type='bool', aliases=['apply_immediately']), - tags=dict(type='dict', default={}), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -328,6 +323,14 @@ def main(): supports_check_mode=True ) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + try: conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: From 753e8b27c5012397e525d9d3ff0baacc55d3930d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 31 May 2022 21:51:06 +0200 Subject: [PATCH 460/683] Tagging - Add simple deprecations for purge_tags=False (#1185) Tagging - Add simple deprecations for purge_tags=False Depends-On: ansible-collections/amazon.aws#844 SUMMARY Deprecate the use of purge_tags=False as a default ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_acm.py plugins/modules/route53_health_check.py plugins/modules/route53_zone.py plugins/modules/sqs_queue.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- aws_acm.py | 45 +++++++++++++++++++---------------------- route53_health_check.py | 33 +++++++++++++++--------------- route53_zone.py | 35 ++++++++++++++++---------------- sqs_queue.py | 24 ++++++++++------------ 4 files changed, 66 insertions(+), 71 deletions(-) diff --git a/aws_acm.py b/aws_acm.py index d6ba255d575..33c8d5fe903 100644 --- a/aws_acm.py +++ b/aws_acm.py @@ -28,8 +28,7 @@ DOCUMENTATION = r''' --- module: aws_acm -short_description: > - Upload and delete certificates in the AWS Certificate Manager service +short_description: Upload and delete certificates in the AWS Certificate Manager service version_added: 1.0.0 description: - > @@ -45,7 +44,7 @@ - > When I(state=present), if there is one certificate in ACM - with a C(Name) tag equal to the C(name_tag) parameter, + with a C(Name) tag equal to the I(name_tag) parameter, and an identical body and chain, this task will succeed without effect. - > @@ -139,6 +138,12 @@ - > If I(state=absent), you must provide exactly one of I(certificate_arn), I(domain_name) or I(name_tag). + - > + If both I(name_tag) and the 'Name' tag in I(tags) are set, + the values must be the same. + - > + If the 'Name' tag in I(tags) is not set and I(name_tag) is set, + the I(name_tag) value is copied to I(tags). type: str aliases: [name] private_key: @@ -163,30 +168,14 @@ default: present type: str - tags: - description: - - Tags to apply to certificates imported in ACM. - - > - If both I(name_tag) and the 'Name' tag in I(tags) are set, - the values must be the same. - - > - If the 'Name' tag in I(tags) is not set and I(name_tag) is set, - the I(name_tag) value is copied to I(tags). - type: dict - version_added: 3.2.0 - - purge_tags: - description: - - whether to remove tags not present in the C(tags) parameter. - default: false - type: bool - version_added: 3.2.0 - +notes: + - Support for I(tags) and I(purge_tags) was added in release 3.2.0 author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags.deprecated_purge ''' @@ -504,8 +493,8 @@ def main(): domain_name=dict(aliases=['domain']), name_tag=dict(aliases=['name']), private_key=dict(no_log=True), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), state=dict(default='present', choices=['present', 'absent']), ) module = AnsibleAWSModule( @@ -514,6 +503,14 @@ def main(): ) acm = ACMServiceManager(module) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + # Check argument requirements if module.params['state'] == 'present': # at least one of these should be specified. diff --git a/route53_health_check.py b/route53_health_check.py index 22ce36beba8..5b7cce3c147 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -86,21 +86,14 @@ - Will default to C(3) if not specified on creation. choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] type: int - tags: - description: - - A hash/dictionary of tags to set on the health check. - type: dict - version_added: 2.1.0 - purge_tags: - description: - - Delete any tags not specified in I(tags). - default: false - type: bool - version_added: 2.1.0 -author: "zimbatm (@zimbatm)" +author: + - "zimbatm (@zimbatm)" +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags.deprecated_purge ''' EXAMPLES = ''' @@ -432,8 +425,8 @@ def main(): string_match=dict(), request_interval=dict(type='int', choices=[10, 30], default=30), failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), ) args_one_of = [ @@ -454,6 +447,14 @@ def main(): supports_check_mode=True, ) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + state_in = module.params.get('state') ip_addr_in = module.params.get('ip_address') port_in = module.params.get('port') diff --git a/route53_zone.py b/route53_zone.py index ba51fcbb9e2..334233b4e44 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -46,23 +46,14 @@ - The reusable delegation set ID to be associated with the zone. - Note that you can't associate a reusable delegation set with a private hosted zone. type: str - tags: - description: - - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one. - type: dict - version_added: 2.1.0 - purge_tags: - description: - - Delete any tags not specified in the task that are on the zone. - This means you have to specify all the desired tags on each task affecting a zone. - default: false - type: bool - version_added: 2.1.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -author: "Christopher Troup (@minichate)" + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags.deprecated_purge +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. +author: + - "Christopher Troup (@minichate)" ''' EXAMPLES = r''' @@ -445,8 +436,8 @@ def main(): comment=dict(default=''), hosted_zone_id=dict(), delegation_set_id=dict(), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), ) mutually_exclusive = [ @@ -460,6 +451,14 @@ def main(): supports_check_mode=True, ) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + zone_in = module.params.get('zone').lower() state = module.params.get('state').lower() vpc_id = module.params.get('vpc_id') diff --git a/sqs_queue.py b/sqs_queue.py index 1be1936c55c..ba6432e93ac 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -87,20 +87,10 @@ description: - Enables content-based deduplication. Used for FIFOs only. - Defaults to C(false). - tags: - description: - - Tag dict to apply to the queue. - - To remove all tags set I(tags={}) and I(purge_tags=true). - type: dict - purge_tags: - description: - - Remove tags not listed in I(tags). - type: bool - default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - + - amazon.aws.tags.deprecated_purge ''' RETURN = r''' @@ -483,11 +473,19 @@ def main(): kms_master_key_id=dict(type='str'), kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), content_based_deduplication=dict(type='bool'), - tags=dict(type='dict'), - purge_tags=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module.params.get('purge_tags') is None: + module.deprecate( + 'The purge_tags parameter currently defaults to False.' + ' For consistency across the collection, this default value' + ' will change to True in release 5.0.0.', + version='5.0.0', collection_name='community.aws') + module.params['purge_tags'] = False + state = module.params.get('state') retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue']) try: From 1f1760fbb8e8c138c814a8cc874271040e38aa94 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 1 Jun 2022 01:20:57 -0400 Subject: [PATCH 461/683] lambda_info - refactor to fix bug when querying all lambdas (#1152) lambda_info - refactor to fix bug when querying all lambdas Depends-On: ansible/ansible-zuul-jobs#1558 SUMMARY Fix bug that forces query: config when getting info for all lambdas. Refactored to return the expected info Add extra cleanup at end of tests Fixes #1151 ISSUE TYPE Bugfix Pull Request COMPONENT NAME lambda_info ADDITIONAL INFORMATION This module also currently returns a dict of dicts (as opposed to a list of dicts), but I wanted to keep the scope of this PR to fixing the bug. Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso Reviewed-by: Jill R --- lambda_info.py | 226 ++++++++++++++++++++++++------------------------- 1 file changed, 113 insertions(+), 113 deletions(-) diff --git a/lambda_info.py b/lambda_info.py index c76ecba3d1e..1ad2749c5f8 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -13,16 +13,17 @@ short_description: Gathers AWS Lambda function details description: - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. - - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases and - M(community.aws.lambda_event) to manage lambda event source mappings. + - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases, + M(community.aws.lambda_event) to manage lambda event source mappings, and M(community.aws.lambda_policy) to manage policy statements. options: query: description: - - Specifies the resource type for which to gather information. Leave blank to retrieve all information. + - Specifies the resource type for which to gather information. + - Defaults to C(all) when I(function_name) is specified. + - Defaults to C(config) when I(function_name) is NOT specified. choices: [ "aliases", "all", "config", "mappings", "policy", "versions", "tags" ] - default: "all" type: str function_name: description: @@ -48,17 +49,20 @@ query: all function_name: myFunction register: my_function_details + # List all versions of a function - name: List function versions community.aws.lambda_info: query: versions function_name: myFunction register: my_function_versions -# List all lambda function versions -- name: List all function + +# List all info for all functions +- name: List all functions community.aws.lambda_info: query: all register: output + - name: show Lambda information ansible.builtin.debug: msg: "{{ output['function'] }}" @@ -120,108 +124,118 @@ def fix_return(node): return node_value -def alias_details(client, module): +def alias_details(client, module, function_name): """ Returns list of aliases for a specified function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(aliases=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get aliases") - else: - module.fail_json(msg='Parameter function_name required for query=aliases.') + try: + lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(aliases=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get aliases") - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def all_details(client, module): +def list_lambdas(client, module): """ - Returns all lambda related facts. + Returns queried facts for a specified function (or all functions). :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ - lambda_info = dict() - function_name = module.params.get('function_name') if function_name: - lambda_info[function_name] = {} - lambda_info[function_name].update(config_details(client, module)[function_name]) - lambda_info[function_name].update(alias_details(client, module)[function_name]) - lambda_info[function_name].update(policy_details(client, module)[function_name]) - lambda_info[function_name].update(version_details(client, module)[function_name]) - lambda_info[function_name].update(mapping_details(client, module)[function_name]) - lambda_info[function_name].update(tags_details(client, module)[function_name]) + # Function name is specified - retrieve info on that function + function_names = [function_name] + else: - lambda_info.update(config_details(client, module)) + # Function name is not specified - retrieve all function names + all_function_info = _paginate(client, 'list_functions')['Functions'] + function_names = [function_info['FunctionName'] for function_info in all_function_info] + + query = module.params['query'] + lambdas = dict() + + for function_name in function_names: + lambdas[function_name] = {} - return lambda_info + if query == 'all': + lambdas[function_name].update(config_details(client, module, function_name)) + lambdas[function_name].update(alias_details(client, module, function_name)) + lambdas[function_name].update(policy_details(client, module, function_name)) + lambdas[function_name].update(version_details(client, module, function_name)) + lambdas[function_name].update(mapping_details(client, module, function_name)) + lambdas[function_name].update(tags_details(client, module, function_name)) + elif query == 'config': + lambdas[function_name].update(config_details(client, module, function_name)) -def config_details(client, module): + elif query == 'aliases': + lambdas[function_name].update(alias_details(client, module, function_name)) + + elif query == 'policy': + lambdas[function_name].update(policy_details(client, module, function_name)) + + elif query == 'versions': + lambdas[function_name].update(version_details(client, module, function_name)) + + elif query == 'mappings': + lambdas[function_name].update(mapping_details(client, module, function_name)) + + elif query == 'tags': + lambdas[function_name].update(tags_details(client, module, function_name)) + + return lambdas + + +def config_details(client, module, function_name): """ - Returns configuration details for one or all lambda functions. + Returns configuration details for a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) - else: - try: - lambda_info.update(function_list=_paginate(client, 'list_functions')['Functions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function_list=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get function list") - - functions = dict() - for func in lambda_info.pop('function_list', []): - func['tags'] = client.get_function(FunctionName=func['FunctionName']).get('Tags', {}) - functions[func['FunctionName']] = camel_dict_to_snake_dict(func) - return functions + try: + lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def mapping_details(client, module): +def mapping_details(client, module, function_name): """ Returns all lambda event source mappings. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() params = dict() - function_name = module.params.get('function_name') - if function_name: - params['FunctionName'] = module.params.get('function_name') + params['FunctionName'] = function_name if module.params.get('event_source_arn'): params['EventSourceArn'] = module.params.get('event_source_arn') @@ -233,86 +247,74 @@ def mapping_details(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get source event mappings") - if function_name: - return {function_name: camel_dict_to_snake_dict(lambda_info)} - return camel_dict_to_snake_dict(lambda_info) -def policy_details(client, module): +def policy_details(client, module, function_name): """ Returns policy attached to a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(policy={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=policy.') + try: + # get_policy returns a JSON string so must convert to dict before reassigning to its key + lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(policy={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def version_details(client, module): +def version_details(client, module, function_name): """ Returns all lambda function versions. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(versions=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=versions.') + try: + lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(versions=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def tags_details(client, module): +def tags_details(client, module, function_name): """ - Returns tag details for one or all lambda functions. + Returns tag details for a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=tags.') + try: + lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) def main(): @@ -323,7 +325,7 @@ def main(): """ argument_spec = dict( function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default='all'), + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None), event_source_arn=dict(required=False, default=None), ) @@ -344,20 +346,18 @@ def main(): if len(function_name) > 64: module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + # create default values for query if not specified. + # if function name exists, query should default to 'all'. + # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. + if not module.params.get('query'): + if function_name: + module.params['query'] = 'all' + else: + module.params['query'] = 'config' - invocations = dict( - aliases='alias_details', - all='all_details', - config='config_details', - mappings='mapping_details', - policy='policy_details', - versions='version_details', - tags='tags_details', - ) + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - this_module_function = globals()[invocations[module.params['query']]] - all_facts = fix_return(this_module_function(client, module)) + all_facts = fix_return(list_lambdas(client, module)) results = dict(function=all_facts, changed=False) From d81af94a163a36eab664cff6c6cd8c258b320cf4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 1 Jun 2022 12:34:44 +0200 Subject: [PATCH 462/683] ecs_tag/efs_tag - add resource_tags as alias for tags (#1184) ecs_tag/efs_tag - add resource_tags as alias for tags SUMMARY Cleanup related to the tagging fragment ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_tag efs_tag ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- ecs_tag.py | 3 ++- efs_tag.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ecs_tag.py b/ecs_tag.py index 32915d6e0c5..87c000cd342 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -45,6 +45,7 @@ - A dictionary of tags to add or remove from the resource. - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. type: dict + aliases: ['resource_tags'] purge_tags: description: - Whether unspecified tags should be removed from the resource. @@ -155,7 +156,7 @@ def main(): argument_spec = dict( cluster_name=dict(required=True), resource=dict(required=False), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container']) diff --git a/efs_tag.py b/efs_tag.py index f44b28833dd..209c2a276d6 100644 --- a/efs_tag.py +++ b/efs_tag.py @@ -36,6 +36,7 @@ - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. type: dict required: True + aliases: ['resource_tags'] purge_tags: description: - Whether unspecified tags should be removed from the resource. @@ -124,7 +125,7 @@ def main(): ''' argument_spec = dict( resource=dict(required=True), - tags=dict(type='dict', required=True), + tags=dict(type='dict', required=True, aliases=['resource_tags']), purge_tags=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']) ) From 0d66fa5bb3ba2095dbd61c95b3413d47a6ab7317 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 1 Jun 2022 13:11:59 +0200 Subject: [PATCH 463/683] Tagging fragment - use fragment and remove default empty dict where purge_tags default is True (#1183) Tagging - remove default empty dict where purge_tags default is True Depends-On: ansible-collections/amazon.aws#844 SUMMARY Move modules over to the new tagging fragment Update modules to remove default tags of {} and use None instead, so that purging tags only happens if someone explicitly passes the tags parameter ISSUE TYPE Docs Pull Request Feature Pull Request COMPONENT NAME plugins/modules/ec2_transit_gateway.py plugins/modules/efs.py plugins/modules/eks_fargate_profile.py plugins/modules/elb_target_group.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ec2_transit_gateway.py | 94 +++++++----------------------------------- efs.py | 13 ++---- eks_fargate_profile.py | 28 ++++++------- elb_target_group.py | 30 +++++--------- 4 files changed, 43 insertions(+), 122 deletions(-) diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 4237376203b..be1082768fa 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -44,11 +44,6 @@ - Whether to enable AWS DNS support. default: true type: bool - purge_tags: - description: - - Whether to purge existing tags not included with tags argument. - default: true - type: bool state: description: - C(present) to ensure resource is created. @@ -56,10 +51,6 @@ default: present choices: [ "present", "absent"] type: str - tags: - description: - - A dictionary of resource tags - type: dict transit_gateway_id: description: - The ID of the transit gateway. @@ -80,11 +71,12 @@ default: 300 type: int -author: "Bob Boldin (@BobBoldin)" +author: + - "Bob Boldin (@BobBoldin)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -226,15 +218,11 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from time import sleep, time -from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - ansible_dict_to_boto3_tag_list, - ansible_dict_to_boto3_filter_list, - AWSRetry, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - compare_aws_tags -) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags class AnsibleEc2Tgw(object): @@ -412,57 +400,6 @@ def delete_tgw(self, tgw_id): return result - def ensure_tags(self, tgw_id, tags, purge_tags): - """ - Ensures tags are applied to the transit gateway. Optionally will remove any - existing tags not in the tags argument if purge_tags is set to true - - :param tgw_id: The AWS id of the transit gateway - :param tags: list of tags to apply to the transit gateway. - :param purge_tags: when true existing tags not in tags parms are removed - :return: true if tags were updated - """ - tags_changed = False - filters = ansible_dict_to_boto3_filter_list({'resource-id': tgw_id}) - try: - cur_tags = self._connection.describe_tags(Filters=filters) - except (ClientError, BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't describe tags") - - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) - - if to_update: - try: - if not self._check_mode: - AWSRetry.exponential_backoff()(self._connection.create_tags)( - Resources=[tgw_id], - Tags=ansible_dict_to_boto3_tag_list(to_update) - ) - self._results['changed'] = True - tags_changed = True - except (ClientError, BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't create tags {0} for resource {1}".format( - ansible_dict_to_boto3_tag_list(to_update), tgw_id)) - - if to_delete: - try: - if not self._check_mode: - tags_list = [] - for key in to_delete: - tags_list.append({'Key': key}) - - AWSRetry.exponential_backoff()(self._connection.delete_tags)( - Resources=[tgw_id], - Tags=tags_list - ) - self._results['changed'] = True - tags_changed = True - except (ClientError, BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Couldn't delete tags {0} for resource {1}".format( - ansible_dict_to_boto3_tag_list(to_delete), tgw_id)) - - return tags_changed - def ensure_tgw_present(self, tgw_id=None, description=None): """ Will create a tgw if no match to the tgw_id or description are found @@ -488,10 +425,11 @@ def ensure_tgw_present(self, tgw_id=None, description=None): except (BotoCoreError, ClientError) as e: self._module.fail_json_aws(e, msg='Unable to create Transit Gateway') - if self._module.params.get('tags') != tgw.get('tags'): - stringed_tags_dict = dict((to_text(k), to_text(v)) for k, v in self._module.params.get('tags').items()) - if self.ensure_tags(tgw['transit_gateway_id'], stringed_tags_dict, self._module.params.get('purge_tags')): - self._results['changed'] = True + self._results['changed'] |= ensure_ec2_tags( + self._connection, self._module, tgw['transit_gateway_id'], + tags=self._module.params.get('tags'), + purge_tags=self._module.params.get('purge_tags'), + ) self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id']) @@ -539,7 +477,7 @@ def setup_module_object(): dns_support=dict(type='bool', default='yes'), purge_tags=dict(type='bool', default='yes'), state=dict(default='present', choices=['present', 'absent']), - tags=dict(default=dict(), type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), transit_gateway_id=dict(type='str'), vpn_ecmp_support=dict(type='bool', default='yes'), wait=dict(type='bool', default='yes'), diff --git a/efs.py b/efs.py index a67c83be3c7..a78f832d971 100644 --- a/efs.py +++ b/efs.py @@ -28,12 +28,6 @@ required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN. type: str - purge_tags: - description: - - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter - is not set then tags will not be modified. - type: bool - default: true state: description: - Allows to create, search and destroy Amazon EFS file system. @@ -107,8 +101,9 @@ version_added: 2.1.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -725,7 +720,7 @@ def main(): purge_tags=dict(default=True, type='bool'), id=dict(required=False, type='str', default=None), name=dict(required=False, type='str', default=None), - tags=dict(required=False, type="dict", default={}), + tags=dict(required=False, type="dict", aliases=['resource_tags']), targets=dict(required=False, type="list", default=[], elements='dict'), performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None), diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 5ddad654d7f..72164a36fea 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -12,8 +12,9 @@ version_added: 4.0.0 short_description: Manage EKS Fargate Profile description: - - Manage EKS Fargate Profile. -author: Tiago Jarra (@tjarra) + - Manage EKS Fargate Profile. +author: + - Tiago Jarra (@tjarra) options: name: description: Name of EKS Fargate Profile. @@ -54,14 +55,6 @@ - present default: present type: str - tags: - description: A dictionary of resource tags. - type: dict - purge_tags: - description: - - Purge existing tags that are not found in the cluster. - type: bool - default: true wait: description: >- Specifies whether the module waits until the profile is created or deleted before moving on. @@ -74,8 +67,9 @@ default: 1200 type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -184,9 +178,13 @@ def validate_tags(client, module, fargate_profile): changed = False + desired_tags = module.params.get('tags') + if desired_tags is None: + return False + try: existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) except(botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) @@ -215,7 +213,7 @@ def create_or_update_fargate_profile(client, module): role_arn = module.params['role_arn'] cluster_name = module.params['cluster_name'] selectors = module.params['selectors'] - tags = module.params['tags'] + tags = module.params['tags'] or {} wait = module.params.get('wait') fargate_profile = get_fargate_profile(client, module, name, cluster_name) @@ -325,7 +323,7 @@ def main(): namespace=dict(type='str'), labels=dict(type='dict', default={}) )), - tags=dict(type='dict', default={}), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), state=dict(choices=['absent', 'present'], default='present'), wait=dict(default=False, type='bool'), diff --git a/elb_target_group.py b/elb_target_group.py index 917e352c75b..c0a71c3c0e5 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -12,10 +12,11 @@ version_added: 1.0.0 short_description: Manage a target group for an Application or Network load balancer description: - - Manage an AWS Elastic Load Balancer target group. See - U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or - U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details. -author: "Rob White (@wimnat)" + - Manage an AWS Elastic Load Balancer target group. See + U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or + U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details. +author: + - "Rob White (@wimnat)" options: deregistration_delay_timeout: description: @@ -88,13 +89,6 @@ required: false choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] type: str - purge_tags: - description: - - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the tag parameter is not set then - tags will not be modified. - required: false - default: yes - type: bool state: description: - Create or destroy the target group. @@ -143,11 +137,6 @@ - Requires the I(health_check_protocol) parameter to be set. required: false type: str - tags: - description: - - A dictionary of one or more tags to assign to the target group. - required: false - type: dict target_type: description: - The type of target that you must specify when registering targets with this target group. The possible values are @@ -208,8 +197,9 @@ default: 200 type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags notes: - Once a target group has been created, only its health check can then be modified using subsequent calls @@ -857,7 +847,7 @@ def create_or_update_target_group(connection, module): changed = True # Tags - only need to play with tags if tags parameter has been set to something - if tags: + if tags is not None: # Get tags current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn']) @@ -931,7 +921,7 @@ def main(): load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']), state=dict(required=True, choices=['present', 'absent']), successful_response_codes=dict(), - tags=dict(default={}, type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']), targets=dict(type='list', elements='dict'), unhealthy_threshold_count=dict(type='int'), From 48833b490c72b35ef019391e0bae25f173b354b4 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Wed, 1 Jun 2022 13:12:03 +0200 Subject: [PATCH 464/683] ecs_service and ecs_service_info: add name and service aliases (#1187) ecs_service and ecs_service_info: add name and service aliases SUMMARY while ecs_service is using name for the service name parameter, ecs_service_info is using service for the same purpose. this PR adds just aliases to both modules, to use the same parameter to address the ecs service name. ref #1142 ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request COMPONENT NAME ecs_service ecs_service_info Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell --- ecs_service.py | 3 ++- ecs_service_info.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 4f94a452ce9..9f35c26b9fa 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -34,6 +34,7 @@ - The name of the service. required: true type: str + aliases: ['service'] cluster: description: - The name of the cluster in which the service exists. @@ -662,7 +663,7 @@ def health_check_setable(self, params): def main(): argument_spec = dict( state=dict(required=True, choices=['present', 'absent', 'deleting']), - name=dict(required=True, type='str'), + name=dict(required=True, type='str', aliases=['service']), cluster=dict(required=False, type='str'), task_definition=dict(required=False, type='str'), load_balancers=dict(required=False, default=[], type='list', elements='dict'), diff --git a/ecs_service_info.py b/ecs_service_info.py index e6167afd09c..b04f94241f5 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -40,6 +40,7 @@ required: false type: list elements: str + aliases: ['name'] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -214,7 +215,7 @@ def main(): details=dict(type='bool', default=False), events=dict(type='bool', default=True), cluster=dict(), - service=dict(type='list', elements='str') + service=dict(type='list', elements='str', aliases=['name']) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) From d1fdc80adb02cb564924c1b10b789245a700b81b Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 1 Jun 2022 15:03:38 +0200 Subject: [PATCH 465/683] Tagging fragment - Move simplest cases over to the docs fragment. (#1182) Tagging fragment - Move simplest cases over to the docs fragment. Depends-On: ansible-collections/amazon.aws#844 SUMMARY Migrate simplest cases over to the new docs fragment and add resource_tags as an alias to tags. ISSUE TYPE Docs Pull Request Feature Pull Request COMPONENT NAME changelogs/fragments/1182-tagging.yml plugins/modules/aws_glue_job.py plugins/modules/aws_msk_cluster.py plugins/modules/aws_secret.py plugins/modules/aws_step_functions_state_machine.py plugins/modules/dynamodb_table.py plugins/modules/ec2_eip.py plugins/modules/ec2_transit_gateway_vpc_attachment.py plugins/modules/ec2_vpc_peer.py plugins/modules/elb_application_lb.py plugins/modules/elb_network_lb.py plugins/modules/iam_role.py plugins/modules/iam_user.py plugins/modules/networkfirewall.py plugins/modules/networkfirewall_policy.py plugins/modules/networkfirewall_rule_group.py plugins/modules/rds_cluster.py plugins/modules/rds_instance.py plugins/modules/rds_instance_snapshot.py plugins/modules/rds_option_group.py plugins/modules/rds_subnet_group.py plugins/modules/redshift.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_glue_job.py | 25 +++++------------ aws_msk_cluster.py | 12 +++------ aws_secret.py | 30 +++++++-------------- aws_step_functions_state_machine.py | 30 ++++++--------------- ec2_eip.py | 39 +++++++++++---------------- ec2_transit_gateway_vpc_attachment.py | 22 +++------------ ec2_vpc_peer.py | 23 ++++++---------- elb_application_lb.py | 22 +++++---------- elb_network_lb.py | 25 ++++++----------- iam_role.py | 20 +++++--------- iam_user.py | 22 +++++---------- networkfirewall_policy.py | 22 +++------------ networkfirewall_rule_group.py | 20 +++----------- rds_cluster.py | 17 +++--------- rds_instance.py | 16 +++-------- rds_option_group.py | 19 ++++--------- rds_subnet_group.py | 27 +++++++------------ redshift.py | 15 +++-------- 18 files changed, 116 insertions(+), 290 deletions(-) diff --git a/aws_glue_job.py b/aws_glue_job.py index 4e278c81734..e95e9d69163 100644 --- a/aws_glue_job.py +++ b/aws_glue_job.py @@ -12,7 +12,7 @@ version_added: 1.0.0 short_description: Manage an AWS Glue job description: - - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. + - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. author: - "Rob White (@wimnat)" - "Vijayanand Sharma (@vijayanandsharma)" @@ -77,13 +77,6 @@ - The number of workers of a defined workerType that are allocated when a job runs. type: int version_added: 1.5.0 - purge_tags: - description: - - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - - If the I(tags) parameter is not set then tags will not be modified. - default: true - type: bool - version_added: 2.2.0 role: description: - The name or ARN of the IAM role associated with this job. @@ -95,12 +88,6 @@ required: true choices: [ 'present', 'absent' ] type: str - tags: - description: - - A hash/dictionary of tags to be applied to the job. - - Remove completely or specify an empty dictionary to remove all tags. - type: dict - version_added: 2.2.0 timeout: description: - The job timeout in minutes. @@ -111,10 +98,12 @@ choices: [ 'Standard', 'G.1X', 'G.2X' ] type: str version_added: 1.5.0 +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.2.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -463,7 +452,7 @@ def main(): purge_tags=dict(type='bool', default=True), role=dict(type='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), timeout=dict(type='int'), worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'), ) diff --git a/aws_msk_cluster.py b/aws_msk_cluster.py index 320b867680b..559660d786f 100644 --- a/aws_msk_cluster.py +++ b/aws_msk_cluster.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: aws_msk_cluster -short_description: Manage Amazon MSK clusters. +short_description: Manage Amazon MSK clusters version_added: "2.0.0" description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) clusters. @@ -198,16 +198,10 @@ description: How many seconds to wait. Cluster creation can take up to 20-30 minutes. type: int default: 3600 - tags: - description: Tag dictionary to apply to the cluster. - type: dict - purge_tags: - description: Remove tags not listed in I(tags) when tags is specified. - default: true - type: bool extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags notes: - All operations are time consuming, for example create takes 20-30 minutes, update kafka version -- more than one hour, update configuration -- 10-15 minutes; @@ -769,7 +763,7 @@ def main(): ), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=3600), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), ) diff --git a/aws_secret.py b/aws_secret.py index 7ebce8da603..044ba1061ae 100644 --- a/aws_secret.py +++ b/aws_secret.py @@ -10,10 +10,11 @@ --- module: aws_secret version_added: 1.0.0 -short_description: Manage secrets stored in AWS Secrets Manager. +short_description: Manage secrets stored in AWS Secrets Manager description: - - Create, update, and delete secrets stored in AWS Secrets Manager. -author: "REY Remi (@rrey)" + - Create, update, and delete secrets stored in AWS Secrets Manager. +author: + - "REY Remi (@rrey)" options: name: description: @@ -60,19 +61,6 @@ required: false type: json version_added: 3.1.0 - tags: - description: - - Specifies a dictionary of user-defined tags that are attached to the secret. - - To remove all tags set I(tags={}) and I(purge_tags=true). - type: dict - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is set, existing tags will be purged from the resource - to match exactly what is defined by I(tags) parameter. - type: bool - required: false - default: true - version_added: 4.0.0 rotation_lambda: description: - Specifies the ARN of the Lambda function that can rotate the secret. @@ -83,9 +71,11 @@ default: 30 type: int extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws - + - amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.tags +notes: + - Support for I(purge_tags) was added in release 4.0.0. ''' EXAMPLES = r''' @@ -432,7 +422,7 @@ def main(): 'secret_type': dict(choices=['binary', 'string'], default="string"), 'secret': dict(default="", no_log=True), 'resource_policy': dict(type='json', default=None), - 'tags': dict(type='dict', default=None), + 'tags': dict(type='dict', default=None, aliases=['resource_tags']), 'purge_tags': dict(type='bool', default=True), 'rotation_lambda': dict(), 'rotation_interval': dict(type='int', default=30), diff --git a/aws_step_functions_state_machine.py b/aws_step_functions_state_machine.py index be9d594d7c2..452ebc4237a 100644 --- a/aws_step_functions_state_machine.py +++ b/aws_step_functions_state_machine.py @@ -11,14 +11,11 @@ --- module: aws_step_functions_state_machine version_added: 1.0.0 - short_description: Manage AWS Step Functions state machines - - description: - - Create, update and delete state machines in AWS Step Functions. - - Calling the module in C(state=present) for an existing AWS Step Functions state machine - will attempt to update the state machine definition, IAM Role, or tags with the provided data. + - Create, update and delete state machines in AWS Step Functions. + - Calling the module in C(state=present) for an existing AWS Step Functions state machine + will attempt to update the state machine definition, IAM Role, or tags with the provided data. options: name: @@ -44,24 +41,13 @@ default: present choices: [ present, absent ] type: str - tags: - description: - - A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one. - type: dict - purge_tags: - description: - - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - If the I(tags) parameter is not set then tags will not be modified. - default: yes - type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags author: - - Tom De Keyser (@tdekeyser) + - Tom De Keyser (@tdekeyser) ''' EXAMPLES = ''' @@ -210,7 +196,7 @@ def main(): definition=dict(type='json'), role_arn=dict(type='str'), state=dict(choices=['present', 'absent'], default='present'), - tags=dict(default=None, type='dict'), + tags=dict(default=None, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, type='bool'), ) module = AnsibleAWSModule( diff --git a/ec2_eip.py b/ec2_eip.py index 37ef0fa7540..531af689792 100644 --- a/ec2_eip.py +++ b/ec2_eip.py @@ -14,8 +14,8 @@ version_added: 1.0.0 short_description: manages EC2 elastic IP (EIP) addresses. description: - - This module can allocate or release an EIP. - - This module can associate/disassociate an EIP with instances or network interfaces. + - This module can allocate or release an EIP. + - This module can associate/disassociate an EIP with instances or network interfaces. options: device_id: description: @@ -64,16 +64,6 @@ network interface or instance to be re-associated with the specified instance or interface. default: false type: bool - tags: - description: A dictionary of tags to apply to the EIP. - type: dict - version_added: 2.1.0 - purge_tags: - description: Whether the I(tags) argument should cause tags not in the - dictionary to be removed. - default: True - type: bool - version_added: 2.1.0 tag_name: description: - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse @@ -89,18 +79,21 @@ only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags -author: "Rick Mendes (@rickmendes) " +author: + - "Rick Mendes (@rickmendes) " notes: - - There may be a delay between the time the EIP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and - pause to delay further playbook execution until the instance is reachable, - if necessary. - - This module returns multiple changed statuses on disassociation or release. - It returns an overall status based on any changes occurring. It also returns - individual changed statuses for disassociation and release. + - There may be a delay between the time the EIP is assigned and when + the cloud instance is reachable via the new address. Use wait_for and + pause to delay further playbook execution until the instance is reachable, + if necessary. + - This module returns multiple changed statuses on disassociation or release. + It returns an overall status based on any changes occurring. It also returns + individual changed statuses for disassociation and release. + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. ''' EXAMPLES = ''' @@ -543,7 +536,7 @@ def main(): release_on_disassociation=dict(required=False, type='bool', default=False), allow_reassociation=dict(type='bool', default=False), private_ip_address=dict(), - tags=dict(required=False, type='dict'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(required=False, type='bool', default=True), tag_name=dict(), tag_value=dict(), diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py index 13518fdbe2a..7f2fc2988ba 100644 --- a/ec2_transit_gateway_vpc_attachment.py +++ b/ec2_transit_gateway_vpc_attachment.py @@ -81,22 +81,6 @@ for the life of a flow to send traffic to. type: bool required: false - tags: - description: - - A dictionary representing the tags associated with the Transit Gateway - attachment. - - 'For example C({"Example Tag": "some example value"})' - - Unless I(purge_tags=False) all other tags will be removed from the - attachment. - type: dict - required: false - purge_tags: - description: - - If I(purge_tags=true), existing tags will be purged from the resource - to match exactly what is defined by I(tags) parameter. - type: bool - required: false - default: true wait: description: - Whether to wait for the Transit Gateway attachment to reach the @@ -111,10 +95,12 @@ - Defaults to 600 seconds. type: int required: false -author: "Mark Chappell (@tremble)" +author: + - "Mark Chappell (@tremble)" extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -246,7 +232,7 @@ def main(): name=dict(type='str', required=False), subnets=dict(type='list', elements='str', required=False), purge_subnets=dict(type='bool', required=False, default=True), - tags=dict(type='dict', required=False), + tags=dict(type='dict', required=False, aliases=['resource_tags']), purge_tags=dict(type='bool', required=False, default=True), appliance_mode_support=dict(type='bool', required=False), dns_support=dict(type='bool', required=False), diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index b651b173ce4..79bcbf58b59 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -39,17 +39,6 @@ - The AWS account number for cross account peering. required: false type: str - tags: - description: - - Dictionary of tags to look for and apply when creating a Peering Connection. - required: false - type: dict - purge_tags: - description: - - Remove tags not listed in I(tags). - type: bool - default: true - version_added: 2.0.0 state: description: - Create, delete, accept, reject a peering connection. @@ -63,10 +52,14 @@ required: false default: false type: bool -author: Mike Mochan (@mmochan) +notes: + - Support for I(purge_tags) was added in release 2.0.0. +author: + - Mike Mochan (@mmochan) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -547,7 +540,7 @@ def main(): peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), - tags=dict(required=False, type='dict'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, type='bool'), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), wait=dict(default=False, type='bool'), diff --git a/elb_application_lb.py b/elb_application_lb.py index a7c75c00cd3..2e84242d382 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -24,8 +24,9 @@ version_added: 1.0.0 short_description: Manage an Application Load Balancer description: - - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. -author: "Rob White (@wimnat)" + - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. +author: + - "Rob White (@wimnat)" options: access_logs_enabled: description: @@ -154,12 +155,6 @@ - If the I(listeners) parameter is not set then listeners will not be modified. default: yes type: bool - purge_tags: - description: - - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - - If the I(tags) parameter is not set then tags will not be modified. - default: yes - type: bool subnets: description: - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from @@ -186,10 +181,6 @@ default: present choices: [ 'present', 'absent' ] type: str - tags: - description: - - A dictionary of one or more tags to assign to the load balancer. - type: dict wait: description: - Wait for the load balancer to have a state of 'active' before completing. A status check is @@ -217,8 +208,9 @@ type: bool version_added: 3.2.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. @@ -771,7 +763,7 @@ def main(): security_groups=dict(type='list', elements='str'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], default='present'), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), waf_fail_open=dict(type='bool'), wait_timeout=dict(type='int'), wait=dict(default=False, type='bool'), diff --git a/elb_network_lb.py b/elb_network_lb.py index 768900832c5..00b8f466f8a 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -13,9 +13,10 @@ version_added: 1.0.0 short_description: Manage a Network Load Balancer description: - - Manage an AWS Network Elastic Load Balancer. See - U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details. -author: "Rob White (@wimnat)" + - Manage an AWS Network Elastic Load Balancer. See + U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details. +author: + - "Rob White (@wimnat)" options: cross_zone_load_balancing: description: @@ -77,12 +78,6 @@ - If the I(listeners) parameter is not set then listeners will not be modified. default: true type: bool - purge_tags: - description: - - If I(purge_tags=true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - - If the I(tags) parameter is not set then tags will not be modified. - default: true - type: bool subnet_mappings: description: - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP @@ -112,10 +107,6 @@ choices: [ 'present', 'absent' ] type: str default: 'present' - tags: - description: - - A dictionary of one or more tags to assign to the load balancer. - type: dict wait: description: - Whether or not to wait for the network load balancer to reach the desired state. @@ -130,9 +121,9 @@ choices: [ 'ipv4', 'dualstack' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. @@ -448,7 +439,7 @@ def main(): subnet_mappings=dict(type='list', elements='dict'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], type='str', default='present'), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), wait_timeout=dict(type='int'), wait=dict(type='bool'), ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) diff --git a/iam_role.py b/iam_role.py index 814dbbb8b99..76cd04950d3 100644 --- a/iam_role.py +++ b/iam_role.py @@ -12,7 +12,8 @@ short_description: Manage AWS IAM roles description: - Manage AWS IAM roles. -author: "Rob White (@wimnat)" +author: + - "Rob White (@wimnat)" options: path: description: @@ -78,15 +79,6 @@ - Only applies when I(state=absent). default: false type: bool - tags: - description: - - Tag dict to apply to the queue. - type: dict - purge_tags: - description: - - Remove tags not listed in I(tags) when tags is specified. - default: true - type: bool wait_timeout: description: - How long (in seconds) to wait for creation / update to complete. @@ -99,9 +91,9 @@ default: True type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -673,7 +665,7 @@ def main(): create_instance_profile=dict(type='bool', default=True), delete_instance_profile=dict(type='bool', default=False), purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), wait_timeout=dict(default=120, type='int'), diff --git a/iam_user.py b/iam_user.py index c5e7160f98b..b6b3ce34873 100644 --- a/iam_user.py +++ b/iam_user.py @@ -70,18 +70,6 @@ default: false type: bool aliases: ['purge_policy', 'purge_managed_policies'] - tags: - description: - - Tag dict to apply to the user. - required: false - type: dict - version_added: 2.1.0 - purge_tags: - description: - - Remove tags not listed in I(tags) when tags is specified. - default: true - type: bool - version_added: 2.1.0 wait: description: - When I(wait=True) the module will wait for up to I(wait_timeout) seconds @@ -95,10 +83,12 @@ default: 120 type: int version_added: 2.2.0 +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -552,7 +542,7 @@ def main(): managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), state=dict(choices=['present', 'absent'], required=True), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), wait_timeout=dict(default=120, type='int'), diff --git a/networkfirewall_policy.py b/networkfirewall_policy.py index 5672a83501c..18a5565129b 100644 --- a/networkfirewall_policy.py +++ b/networkfirewall_policy.py @@ -124,22 +124,6 @@ required: false default: True aliases: ['purge_custom_stateless_actions'] - tags: - description: - - A dictionary representing the tags associated with the policy. - - 'For example C({"Example Tag": "some example value"})' - - Unless I(purge_tags=False) all other tags will be removed from the - policy. - type: dict - required: false - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is defined existing tags will be - purged from the resource to match exactly what is defined by the - I(tags) parameter. - type: bool - required: false - default: True wait: description: - Whether to wait for the firewall policy to reach the @@ -156,10 +140,12 @@ required: false -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -371,7 +357,7 @@ def main(): arn=dict(type='str', required=False), state=dict(type='str', required=False, default='present', choices=['present', 'absent']), description=dict(type='str', required=False), - tags=dict(type='dict', required=False), + tags=dict(type='dict', required=False, aliases=['resource_tags']), purge_tags=dict(type='bool', required=False, default=True), stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']), stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']), diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py index a0898b30884..fef080bcd3c 100644 --- a/networkfirewall_rule_group.py +++ b/networkfirewall_rule_group.py @@ -248,20 +248,6 @@ U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html). type: dict required: false - tags: - description: - - A dictionary representing the tags associated with the rule group. - - 'For example C({"Example Tag": "some example value"})' - - Unless I(purge_tags=False) all other tags will be removed from the rule - group. - type: dict - required: false - purge_tags: - description: - - If I(purge_tags=true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - type: bool - required: false - default: True wait: description: - Whether to wait for the firewall rule group to reach the @@ -278,10 +264,12 @@ required: false -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -764,7 +752,7 @@ def main(): rule_strings=dict(type='list', elements='str', required=False), domain_list=dict(type='dict', options=domain_list_spec, required=False), rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False), - tags=dict(type='dict', required=False), + tags=dict(type='dict', required=False, aliases=['resource_tags']), purge_tags=dict(type='bool', required=False, default=True), wait=dict(type='bool', required=False, default=True), wait_timeout=dict(type='int', required=False), diff --git a/rds_cluster.py b/rds_cluster.py index 1d2ed3fdd9b..68e0ef17fc9 100644 --- a/rds_cluster.py +++ b/rds_cluster.py @@ -15,8 +15,9 @@ description: - Create, modify, and delete RDS clusters. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags author: - Sloane Hertel (@s-hertel) - Alina Buzachis (@alinabuzachis) @@ -47,12 +48,6 @@ Set I(enable_cloudwatch_logs_exports) to an empty list to disable all. type: bool default: true - purge_tags: - description: - - Whether or not to remove tags assigned to the DB cluster if not specified in the playbook. To remove all tags - set I(tags) to an empty dictionary in conjunction with this. - type: bool - default: true purge_security_groups: description: - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the cluster. @@ -322,10 +317,6 @@ description: - Whether the DB cluster is encrypted. type: bool - tags: - description: - - A dictionary of key value pairs to assign the DB cluster. - type: dict use_earliest_time_on_point_in_time_unavailable: description: - If I(backtrack_to) is set to a timestamp earlier than the earliest backtrack time, this value backtracks the DB cluster to @@ -951,7 +942,7 @@ def main(): source_engine_version=dict(), source_region=dict(), storage_encrypted=dict(type='bool'), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), use_earliest_time_on_point_in_time_unavailable=dict(type='bool'), use_latest_restorable_time=dict(type='bool'), vpc_security_group_ids=dict(type='list', elements='str'), diff --git a/rds_instance.py b/rds_instance.py index 083042d7d91..f5e3aca4bbc 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -14,9 +14,9 @@ description: - Create, modify, and delete RDS instances. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags author: - Sloane Hertel (@s-hertel) @@ -46,10 +46,6 @@ description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. type: bool default: True - purge_tags: - description: Set to False to retain any tags that aren't specified in task and are associated with the instance. - type: bool - default: True read_replica: description: - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should @@ -407,10 +403,6 @@ - gp2 - io1 type: str - tags: - description: - - A dictionary of key value pairs to assign the DB instance. - type: dict tde_credential_arn: description: - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is @@ -1321,7 +1313,7 @@ def main(): source_region=dict(), storage_encrypted=dict(type='bool'), storage_type=dict(choices=['standard', 'gp2', 'io1']), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), timezone=dict(), diff --git a/rds_option_group.py b/rds_option_group.py index d4ed9e6ac65..1efc80cf55f 100644 --- a/rds_option_group.py +++ b/rds_option_group.py @@ -8,7 +8,7 @@ DOCUMENTATION = r''' module: rds_option_group -short_description: rds_option_group module +short_description: Manages the creation, modification, deletion of RDS option groups version_added: 2.1.0 description: - Manages the creation, modification, deletion of RDS option groups. @@ -118,23 +118,14 @@ required: false type: list elements: str - tags: - description: - - A dictionary of key value pairs to assign the option group. - - To remove all tags set I(tags={}) and I(purge_tags=true). - type: dict - purge_tags: - description: - - Remove tags not listed in I(tags). - type: bool - default: true wait: description: Whether to wait for the cluster to be available or deleted. type: bool default: True extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -644,7 +635,7 @@ def main(): options=dict(required=False, type='list', elements='dict'), apply_immediately=dict(type='bool', default=False), state=dict(required=True, choices=['present', 'absent']), - tags=dict(required=False, type='dict'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), ) diff --git a/rds_subnet_group.py b/rds_subnet_group.py index b0a9f8ae806..3ce90a5d863 100644 --- a/rds_subnet_group.py +++ b/rds_subnet_group.py @@ -15,7 +15,7 @@ version_added: 1.0.0 short_description: manage RDS database subnet groups description: - - Creates, modifies, and deletes RDS database subnet groups. + - Creates, modifies, and deletes RDS database subnet groups. options: state: description: @@ -39,24 +39,15 @@ - Required when I(state=present). type: list elements: str - tags: - description: - - A hash/dictionary of tags to add to the new RDS subnet group or to add/remove from an existing one. - type: dict - version_added: 3.2.0 - purge_tags: - description: - - Whether or not to remove tags assigned to the RDS subnet group if not specified in the playbook. - - To remove all tags set I(tags) to an empty dictionary in conjunction with this. - default: True - type: bool - version_added: 3.2.0 +notes: + - Support for I(tags) and I(purge_tags) was added in release 3.2.0. author: - - "Scott Anderson (@tastychutney)" - - "Alina Buzachis (@alinabuzachis)" + - "Scott Anderson (@tastychutney)" + - "Alina Buzachis (@alinabuzachis)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -265,7 +256,7 @@ def main(): name=dict(required=True), description=dict(required=False), subnets=dict(required=False, type='list', elements='str'), - tags=dict(required=False, type='dict'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), ) required_if = [('state', 'present', ['description', 'subnets'])] diff --git a/redshift.py b/redshift.py index 41482c682b7..ca3e1a45052 100644 --- a/redshift.py +++ b/redshift.py @@ -167,21 +167,12 @@ - Whether the cluster should have enhanced VPC routing enabled. default: false type: bool - tags: - description: - - A dictionary of resource tags. - type: dict - aliases: ['resource_tags'] - version_added: "1.3.0" - purge_tags: - description: - - Purge existing tags that are not found in the cluster - type: bool - default: 'yes' - version_added: "1.3.0" +notes: + - Support for I(tags) and I(purge_tags) was added in release 1.3.0. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' From c76346cda53f8fe197750713d9c1c3b16efddfe8 Mon Sep 17 00:00:00 2001 From: Justin McCormick Date: Thu, 2 Jun 2022 00:29:24 -0500 Subject: [PATCH 466/683] ecs_service -- Capacity provider strategy (#1181) ecs_service -- Capacity provider strategy SUMMARY Fixes #1137 Per request, allow for the user to provide a capacity_provider_strategy when creating or updating an ECS service. This capacity_provider_strategy is a list of 1-6 dictionaries. The new capacity_provider_strategy is mutually exclusive with launch_type and an existing service cannot be changed from one to the other. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION The new parameter is optional and non-default. If neither launch_type or capacity_provider_strategy are provided, the new service will default to EC2 launch_type. The module handles the mutually exclusivity and also catches and fails cleanly when trying to change an existing service from launch_type to capacity_provider_strategy or vice versa. Tested pretty thoroughly against ansible 2.9.27. Updated parameters, examples, and return objects provided. Before merge the module will just ignore the capacity_provider_strategy and default to EC2 launch_type. After merge the module will handle either launch_type or capacity_provider_strategy and create/update the service as necessary. - community.aws.ecs_service: state: present name: test-service cluster: test-cluster task_definition: test-task-definition desired_count: 1 capacity_provider_strategy: - capacity_provider: test-capacity-provider-1 weight: 1 base: 0 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- ecs_service.py | 93 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 85 insertions(+), 8 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 9f35c26b9fa..f7bd5779e18 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -160,6 +160,26 @@ required: false choices: ["EC2", "FARGATE"] type: str + capacity_provider_strategy: + version_added: 4.0.0 + description: + - The capacity provider strategy to use with your service. You can specify a maximum of 6 providers per strategy. + required: false + type: list + elements: dict + suboptions: + capacity_provider: + description: + - Name of capacity provider. + type: str + weight: + description: + - The relative percentage of the total number of launched tasks that should use the specified provider. + type: int + base: + description: + - How many tasks, at a minimum, should use the specified provider. + type: int platform_version: type: str description: @@ -251,6 +271,18 @@ placement_strategy: - type: binpack field: memory + +# With capacity_provider_strategy (added in version 4.0) +- community.aws.ecs_service: + state: present + name: test-service + cluster: test-cluster + task_definition: test-task-definition + desired_count: 1 + capacity_provider_strategy: + - capacity_provider: test-capacity-provider-1 + weight: 1 + base: 0 ''' RETURN = r''' @@ -259,6 +291,24 @@ returned: when creating a service type: complex contains: + capacityProviderStrategy: + version_added: 4.0.0 + description: The capacity provider strategy to use with your service. + returned: always + type: complex + contains: + base: + description: How many tasks, at a minimum, should use the specified provider. + returned: always + type: int + capacityProvider: + description: Name of capacity provider. + returned: always + type: str + weight: + description: The relative percentage of the total number of launched tasks that should use the specified provider. + returned: always + type: int clusterArn: description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. returned: always @@ -576,7 +626,7 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan desired_count, client_token, role, deployment_configuration, placement_constraints, placement_strategy, health_check_grace_period_seconds, network_configuration, service_registries, launch_type, platform_version, - scheduling_strategy): + scheduling_strategy, capacity_provider_strategy): params = dict( cluster=cluster_name, @@ -608,7 +658,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan # desired count is not required if scheduling strategy is daemon if desired_count is not None: params['desiredCount'] = desired_count - + if capacity_provider_strategy: + params['capacityProviderStrategy'] = capacity_provider_strategy if scheduling_strategy: params['schedulingStrategy'] = scheduling_strategy response = self.ecs.create_service(**params) @@ -616,7 +667,7 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan def update_service(self, service_name, cluster_name, task_definition, desired_count, deployment_configuration, network_configuration, - health_check_grace_period_seconds, force_new_deployment): + health_check_grace_period_seconds, force_new_deployment, capacity_provider_strategy): params = dict( cluster=cluster_name, service=service_name, @@ -626,6 +677,8 @@ def update_service(self, service_name, cluster_name, task_definition, params['networkConfiguration'] = network_configuration if force_new_deployment: params['forceNewDeployment'] = force_new_deployment + if capacity_provider_strategy: + params['capacityProviderStrategy'] = capacity_provider_strategy if health_check_grace_period_seconds is not None: params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds # desired count is not required if scheduling strategy is daemon @@ -704,19 +757,34 @@ def main(): launch_type=dict(required=False, choices=['EC2', 'FARGATE']), platform_version=dict(required=False, type='str'), service_registries=dict(required=False, type='list', default=[], elements='dict'), - scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']) + scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']), + capacity_provider_strategy=dict( + required=False, + type='list', + default=[], + elements='dict', + options=dict( + capacity_provider=dict(type='str'), + weight=dict(type='int'), + base=dict(type='int') + ) + ) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[('state', 'present', ['task_definition']), ('launch_type', 'FARGATE', ['network_configuration'])], - required_together=[['load_balancers', 'role']]) + required_together=[['load_balancers', 'role']], + mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': if module.params['desired_count'] is None: module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') + if len(module.params['capacity_provider_strategy']) > 6: + module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') + service_mgr = EcsServiceManager(module) if module.params['network_configuration']: network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) @@ -728,6 +796,7 @@ def main(): deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) + capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy'])) try: existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) @@ -776,7 +845,12 @@ def main(): if module.params['service_registries']: if (existing['serviceRegistries'] or []) != serviceRegistries: module.fail_json(msg="It is not possible to update the service registries of an existing service") - + if module.params['capacity_provider_strategy']: + if 'launchType' in existing.keys(): + module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.") + if module.params['launch_type']: + if 'capacityProviderStrategy' in existing.keys(): + module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") if (existing['loadBalancers'] or []) != loadBalancers: module.fail_json(msg="It is not possible to update the load balancers of an existing service") @@ -788,7 +862,9 @@ def main(): deploymentConfiguration, network_configuration, module.params['health_check_grace_period_seconds'], - module.params['force_new_deployment']) + module.params['force_new_deployment'], + capacityProviders + ) else: try: @@ -807,7 +883,8 @@ def main(): serviceRegistries, module.params['launch_type'], module.params['platform_version'], - module.params['scheduling_strategy'] + module.params['scheduling_strategy'], + capacityProviders ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't create service") From c26eda75a932363c859c16cb7767b9fa9f2ab843 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 2 Jun 2022 09:58:31 +0200 Subject: [PATCH 467/683] ec2_vpc_nacl - Add support for purge_tags (#1189) ec2_vpc_nacl - Add support for purge_tags SUMMARY Add support to ec2_vpc_nacl for purge_tags Use ec2 helper for tagging Use TaggingSpecifications on creation ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_vpc_nacl ADDITIONAL INFORMATION Changelog Integration tests Reviewed-by: Joseph Torcasso --- ec2_vpc_nacl.py | 109 +++++++++++++++++------------------------------- 1 file changed, 38 insertions(+), 71 deletions(-) diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 04da531a2f8..9968e2929ff 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -8,7 +8,7 @@ DOCUMENTATION = r''' module: ec2_vpc_nacl -short_description: create and delete Network ACLs. +short_description: create and delete Network ACLs version_added: 1.0.0 description: - Read the AWS documentation for Network ACLS @@ -64,11 +64,6 @@ required: false type: list elements: list - tags: - description: - - Dictionary of tags to look for and apply when creating a network ACL. - required: false - type: dict state: description: - Creates or modifies an existing NACL @@ -79,8 +74,11 @@ default: present author: Mike Mochan (@mmochan) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags +notes: + - Support for I(purge_tags) was added in release 4.0.0. ''' EXAMPLES = r''' @@ -161,6 +159,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml @@ -173,17 +173,6 @@ def icmp_present(entry): return True -def load_tags(module): - tags = [] - if module.params.get('tags'): - for name, value in module.params.get('tags').items(): - tags.append({'Key': name, 'Value': str(value)}) - tags.append({'Key': "Name", 'Value': module.params.get('name')}) - else: - tags.append({'Key': "Name", 'Value': module.params.get('name')}) - return tags - - def subnets_removed(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) associations = results['NetworkAcls'][0]['Associations'] @@ -243,27 +232,25 @@ def nacls_changed(nacl, client, module): def tags_changed(nacl_id, client, module): + tags = module.params.get('tags') + name = module.params.get('name') + purge_tags = module.params.get('purge_tags') changed = False - tags = dict() - if module.params.get('tags'): - tags = module.params.get('tags') - if module.params.get('name') and not tags.get('Name'): - tags['Name'] = module.params['name'] - nacl = find_acl_by_id(nacl_id, client, module) - if nacl['NetworkAcls']: - nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']] - nacl_tags = [item for sublist in nacl_values for item in sublist] - tag_values = [[key, str(value)] for key, value in tags.items()] - tags = [item for sublist in tag_values for item in sublist] - if sorted(nacl_tags) == sorted(tags): - changed = False - return changed - else: - delete_tags(nacl_id, client, module) - create_tags(nacl_id, client, module) - changed = True - return changed - return changed + + if name is None and tags is None: + return False + + if module.params.get('tags') is None: + # Only purge tags if tags is explicitly set to {} and purge_tags is True + purge_tags = False + + new_tags = dict() + if module.params.get('name') is not None: + new_tags['Name'] = module.params.get('name') + new_tags.update(module.params.get('tags') or {}) + + return ensure_ec2_tags(client, module, nacl_id, tags=new_tags, + purge_tags=purge_tags, retry_codes=['InvalidNetworkAclID.NotFound']) def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): @@ -340,9 +327,12 @@ def setup_network_acl(client, module): changed = False nacl = describe_network_acl(client, module) if not nacl['NetworkAcls']: - nacl = create_network_acl(module.params.get('vpc_id'), client, module) + tags = {} + if module.params.get('name'): + tags['Name'] = module.params.get('name') + tags.update(module.params.get('tags') or {}) + nacl = create_network_acl(module.params.get('vpc_id'), client, module, tags) nacl_id = nacl['NetworkAcl']['NetworkAclId'] - create_tags(nacl_id, client, module) subnets = subnets_to_associate(nacl, client, module) replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) @@ -389,12 +379,15 @@ def _create_network_acl(client, *args, **kwargs): return client.create_network_acl(*args, **kwargs) -def create_network_acl(vpc_id, client, module): +def create_network_acl(vpc_id, client, module, tags): + params = dict(VpcId=vpc_id) + if tags: + params['TagSpecifications'] = boto3_tag_specifications(tags, ['network-acl']) try: if module.check_mode: nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) else: - nacl = _create_network_acl(client, VpcId=vpc_id) + nacl = _create_network_acl(client, **params) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) return nacl @@ -413,20 +406,6 @@ def create_network_acl_entry(params, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) -def _create_tags(client, *args, **kwargs): - return client.create_tags(*args, **kwargs) - - -def create_tags(nacl_id, client, module): - try: - delete_tags(nacl_id, client, module) - if not module.check_mode: - _create_tags(client, Resources=[nacl_id], Tags=load_tags(module)) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - @AWSRetry.jittered_backoff() def _delete_network_acl(client, *args, **kwargs): return client.delete_network_acl(*args, **kwargs) @@ -453,19 +432,6 @@ def delete_network_acl_entry(params, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) -def _delete_tags(client, *args, **kwargs): - return client.delete_tags(*args, **kwargs) - - -def delete_tags(nacl_id, client, module): - try: - if not module.check_mode: - _delete_tags(client, Resources=[nacl_id]) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - @AWSRetry.jittered_backoff() def _describe_network_acls(client, **kwargs): return client.describe_network_acls(**kwargs) @@ -614,7 +580,8 @@ def main(): name=dict(), nacl_id=dict(), subnets=dict(required=False, type='list', default=list(), elements='str'), - tags=dict(required=False, type='dict'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(required=False, type='bool', default=True), ingress=dict(required=False, type='list', default=list(), elements='list'), egress=dict(required=False, type='list', default=list(), elements='list'), state=dict(default='present', choices=['present', 'absent']), From c28c9ac95e04da408df909ed96b4ffdd258f6292 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 2 Jun 2022 11:11:11 +0200 Subject: [PATCH 468/683] Tagging fragment - Move simplest cases over to the docs fragment. (#1199) Tagging fragment - dynamodb_table - Move over to the docs fragment. SUMMARY The dynamodb tests are slow enough to cause test timeouts when combined with too many other changed, split this off. ISSUE TYPE Feature Pull Request COMPONENT NAME dynamodb_table ADDITIONAL INFORMATION The change itself was approved on #1182 --- dynamodb_table.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index c1d9b65686e..943cdea02b1 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -15,7 +15,8 @@ - Create or delete AWS Dynamo DB tables. - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. -author: Alan Loi (@loia) +author: + - Alan Loi (@loia) options: state: description: @@ -128,16 +129,6 @@ choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS'] type: str version_added: 3.1.0 - tags: - description: - - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag. - - 'For example: C({"key":"value"}) or C({"key":"value","key2":"value2"})' - type: dict - purge_tags: - description: - - Remove tags not listed in I(tags). - default: True - type: bool wait_timeout: description: - How long (in seconds) to wait for creation / update / deletion to complete. @@ -151,9 +142,9 @@ default: True type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -1042,7 +1033,7 @@ def main(): write_capacity=dict(type='int'), indexes=dict(default=[], type='list', elements='dict', options=index_options), table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']), From 6244246651552092191bff094be57432f818adbe Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 2 Jun 2022 11:11:15 +0200 Subject: [PATCH 469/683] Tagging fragment - Move simplest cases over to the docs fragment. (#1200) Tagging fragment - rds_instance_snapshot - Move over to the docs fragment. SUMMARY The rds_instance_snapshot tests are slow enough to cause test timeouts when combined with too many other changed, split this off. ISSUE TYPE Feature Pull Request COMPONENT NAME rds_instance_snapshot ADDITIONAL INFORMATION The change itself was approved on #1182 --- rds_instance_snapshot.py | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/rds_instance_snapshot.py b/rds_instance_snapshot.py index 0d7a50a06e7..fc32ef75e4c 100644 --- a/rds_instance_snapshot.py +++ b/rds_instance_snapshot.py @@ -14,7 +14,7 @@ version_added: 1.0.0 short_description: Manage Amazon RDS instance snapshots description: - - Creates or deletes RDS snapshots. + - Creates or deletes RDS snapshots. options: state: description: @@ -68,24 +68,15 @@ - how long before wait gives up, in seconds. default: 300 type: int - tags: - description: - - tags dict to apply to a snapshot. - type: dict - purge_tags: - description: - - whether to remove tags not present in the I(tags) parameter. - default: True - type: bool author: - - "Will Thames (@willthames)" - - "Michael De La Rue (@mikedlr)" - - "Alina Buzachis (@alinabuzachis)" - - "Joseph Torcasso (@jatorcasso)" + - "Will Thames (@willthames)" + - "Michael De La Rue (@mikedlr)" + - "Alina Buzachis (@alinabuzachis)" + - "Joseph Torcasso (@jatorcasso)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -359,7 +350,7 @@ def main(): source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), copy_tags=dict(type='bool', default=False), source_region=dict(type='str'), From 87d006f00ea90e37a46ea2d2e94a435815d0bb46 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 2 Jun 2022 13:50:42 +0200 Subject: [PATCH 470/683] Tagging - cleanup docs for ec2_snapshot_copy (#1201) Tagging - ec2_snapshot_copy SUMMARY Add the "resource_tags" alias, for consistency with other modules minor docs clean-up Use TagSpecification on creation rather than making a separate API call to tag the resource after creation. Does not add purge_tags, since the module performs a one-shot action rather than managing the resources. ISSUE TYPE Docs Pull Request Feature Pull Request COMPONENT NAME ec2_snapshot_copy ADDITIONAL INFORMATION Since we don't have a tags-only fragment, I've not switched this over to using a fragment. If I find more modules with a similar use-case I'll try to find a standard fragment we can use. Reviewed-by: Alina Buzachis --- ec2_snapshot_copy.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 5ad307dd693..2d0d40546e7 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -11,9 +11,9 @@ --- module: ec2_snapshot_copy version_added: 1.0.0 -short_description: Copies an EC2 snapshot and returns the new Snapshot ID. +short_description: Copies an EC2 snapshot and returns the new Snapshot ID description: - - Copies an EC2 Snapshot from a source region to a destination region. + - Copies an EC2 Snapshot from a source region to a destination region. options: source_region: description: @@ -40,7 +40,7 @@ type: str wait: description: - - Wait for the copied Snapshot to be in 'Available' state before returning. + - Wait for the copied Snapshot to be in the C(Available) state before returning. type: bool default: 'no' wait_timeout: @@ -50,12 +50,14 @@ type: int tags: description: - - A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}' + - A dictionary representing the tags to be applied to the newly created resource. type: dict -author: Deepak Kothandan (@Deepakkothandan) + aliases: ['resource_tags'] +author: + - Deepak Kothandan (@Deepakkothandan) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -112,6 +114,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications def copy_snapshot(module, ec2): @@ -134,6 +137,9 @@ def copy_snapshot(module, ec2): if module.params.get('kms_key_id'): params['KmsKeyId'] = module.params.get('kms_key_id') + if module.params.get('tags'): + params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags')) + try: snapshot_id = ec2.copy_snapshot(**params)['SnapshotId'] if module.params.get('wait'): @@ -145,11 +151,6 @@ def copy_snapshot(module, ec2): SnapshotIds=[snapshot_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) ) - if module.params.get('tags'): - ec2.create_tags( - Resources=[snapshot_id], - Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()] - ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.') @@ -166,7 +167,7 @@ def main(): kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), ) module = AnsibleAWSModule(argument_spec=argument_spec) From 11531a2294e8fb4ba4e7cf4d702ed8d448d2a9e9 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 3 Jun 2022 09:53:38 +0200 Subject: [PATCH 471/683] lambda - Add support for purge_tags (#1202) lambda - Add support for purge_tags SUMMARY Use tagging fragment Add purge_tags Add resource_tags alias to tags fix bug with returned tag names getting snake cased fix bug where the lambda module was modifying tags in check mode Tweak tagging to require an explicit tags: {} to remove tags ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request COMPONENT NAME lambda ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell --- lambda.py | 48 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/lambda.py b/lambda.py index ff469c5bc71..22629754e65 100644 --- a/lambda.py +++ b/lambda.py @@ -103,10 +103,6 @@ - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default. choices: ['Active', 'PassThrough'] type: str - tags: - description: - - Tag dict to apply to the function. - type: dict kms_key_arn: description: - The KMS key ARN used to encrypt the function's environment variables. @@ -117,6 +113,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.tags ''' @@ -391,7 +388,10 @@ def sha256sum(filename): return hex_digest -def set_tag(client, module, tags, function): +def set_tag(client, module, tags, function, purge_tags): + + if tags is None: + return False changed = False arn = function['Configuration']['FunctionArn'] @@ -401,7 +401,13 @@ def set_tag(client, module, tags, function): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to list tags") - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + if not tags_to_remove and not tags_to_add: + return False + + if module.check_mode: + return True try: if tags_to_remove: @@ -438,6 +444,14 @@ def wait_for_lambda(client, module, name): module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') +def format_response(response): + tags = response.get("Tags", {}) + result = camel_dict_to_snake_dict(response) + # Lambda returns a dict rather than the usual boto3 list of dicts + result["tags"] = tags + return result + + def main(): argument_spec = dict( name=dict(required=True), @@ -458,7 +472,8 @@ def main(): dead_letter_arn=dict(), kms_key_arn=dict(type='str', no_log=False), tracing_mode=dict(choices=['Active', 'PassThrough']), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), ) mutually_exclusive = [['zip_file', 's3_key'], @@ -494,6 +509,7 @@ def main(): dead_letter_arn = module.params.get('dead_letter_arn') tracing_mode = module.params.get('tracing_mode') tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') kms_key_arn = module.params.get('kms_key_arn') check_mode = module.check_mode @@ -614,7 +630,7 @@ def main(): # Tag Function if tags is not None: - if set_tag(client, module, tags, current_function): + if set_tag(client, module, tags, current_function, purge_tags): changed = True # Upload new code if needed (e.g. code checksum has changed) @@ -634,9 +650,9 @@ def main(): response = get_current_function(client, name, qualifier=current_version) if not response: module.fail_json(msg='Unable to get function information after updating') - + response = format_response(response) # We're done - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + module.exit_json(changed=changed, **response) # Function doesn't exists, create new Lambda function elif state == 'present': @@ -691,6 +707,10 @@ def main(): func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, 'SecurityGroupIds': vpc_security_group_ids}}) + # Tag Function + if tags: + func_kwargs.update({'Tags': tags}) + # Function would have been created if not check mode if check_mode: module.exit_json(changed=True) @@ -704,15 +724,11 @@ def main(): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to create function") - # Tag Function - if tags is not None: - if set_tag(client, module, tags, get_current_function(client, name)): - changed = True - response = get_current_function(client, name, qualifier=current_version) if not response: module.fail_json(msg='Unable to get function information after creating') - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + response = format_response(response) + module.exit_json(changed=changed, **response) # Delete existing Lambda function if state == 'absent' and current_function: From 96f100f22ade04ead14bff669e097b42c25e4d87 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Fri, 3 Jun 2022 05:56:26 -0700 Subject: [PATCH 472/683] ec2_customer_gateway: fix bgp_asn documentation inconsistency (#1197) ec2_customer_gateway: fix bgp_asn documentation inconsistency SUMMARY Fixes #1075 ISSUE TYPE Docs Pull Request COMPONENT NAME ec2_customer_gateway ADDITIONAL INFORMATION While current documentation for the parameter states that Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present)., according to boto3 documentation, bgp_asn defaults to 65000. Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- ec2_customer_gateway.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 9c00783a58a..f07e92f4f7c 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description: From 0cf4423baeb6b45c7b5f026cf2749130535b2a1d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 3 Jun 2022 15:02:37 +0200 Subject: [PATCH 473/683] Tagging - Add resource_tags as an alias for tags (#1204) Tagging - Add resource_tags as an alias for tags SUMMARY There are a number of modules where we always create a fresh resource. For now let's just add resource_tags, long term it would be good to add purge_tags and use the docs fragment. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/data_pipeline.py plugins/modules/ec2_ami_copy.py plugins/modules/ec2_launch_template.py plugins/modules/ecs_task.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- data_pipeline.py | 18 +++++++++--------- ec2_ami_copy.py | 13 +++++++------ ec2_launch_template.py | 16 ++++++++-------- ecs_task.py | 12 +++++++----- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/data_pipeline.py b/data_pipeline.py index 4fee6423165..e0ddaa936de 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -16,15 +16,14 @@ - Sloane Hertel (@s-hertel) short_description: Create and manage AWS Datapipelines extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 description: - - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) - given to the datapipeline. - - The pipeline definition must be in the format given here - U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax). - - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state. + - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) + given to the datapipeline. + - The pipeline definition must be in the format given here + U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax). + - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state. options: name: description: @@ -121,6 +120,7 @@ description: - A dict of key:value pair(s) to add to the pipeline. type: dict + aliases: ['resource_tags'] ''' EXAMPLES = r''' @@ -602,7 +602,7 @@ def main(): timeout=dict(required=False, type='int', default=300), state=dict(default='present', choices=['present', 'absent', 'active', 'inactive']), - tags=dict(required=False, type='dict', default={}), + tags=dict(required=False, type='dict', default={}, aliases=['resource_tags']), values=dict(required=False, type='list', default=[], elements='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index e5628b00034..ecb723dfea6 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -13,7 +13,7 @@ version_added: 1.0.0 short_description: copies AMI between AWS regions, return new image id description: - - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.) + - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.) options: source_region: description: @@ -60,6 +60,7 @@ description: - 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})' type: dict + aliases: ['resource_tags'] tag_equality: description: - Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match @@ -67,11 +68,11 @@ default: false type: bool author: -- Amir Moulavi (@amir343) -- Tim C (@defunctio) + - Amir Moulavi (@amir343) + - Tim C (@defunctio) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -208,7 +209,7 @@ def main(): kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), tag_equality=dict(type='bool', default=False)) module = AnsibleAWSModule(argument_spec=argument_spec) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index bccc79eddb7..4f2d05e1630 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -11,17 +11,16 @@ version_added: 1.0.0 short_description: Manage EC2 launch templates description: - - Create, modify, and delete EC2 Launch Templates, which can be used to - create individual instances or with Autoscaling Groups. - - The M(amazon.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all - parameters on those tasks, be passed a Launch Template which contains - settings like instance size, disk type, subnet, and more. +- Create, modify, and delete EC2 Launch Templates, which can be used to + create individual instances or with Autoscaling Groups. +- The M(amazon.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all + parameters on those tasks, be passed a Launch Template which contains + settings like instance size, disk type, subnet, and more. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - author: - - Ryan Scott Brown (@ryansb) +- Ryan Scott Brown (@ryansb) options: template_id: description: @@ -320,6 +319,7 @@ - A set of key-value pairs to be applied to resources when this Launch Template is used. - "Tag key constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with I(aws:)" - "Tag value constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters." + aliases: ['resource_tags'] user_data: description: > The Base64-encoded user data to make available to the instance. For more information, see the Linux @@ -739,7 +739,7 @@ def main(): ram_disk_id=dict(), security_group_ids=dict(type='list', elements='str'), security_groups=dict(type='list', elements='str'), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), user_data=dict(), ) diff --git a/ecs_task.py b/ecs_task.py index b2ca36e21de..b4c625df712 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -10,10 +10,11 @@ --- module: ecs_task version_added: 1.0.0 -short_description: Run, start or stop a task in ecs +short_description: Run, start or stop a task in ECS description: - Creates or deletes instances of task definitions. -author: Mark Chance (@Java1Guy) +author: + - Mark Chance (@Java1Guy) options: operation: description: @@ -88,9 +89,10 @@ description: - Tags that will be added to ecs tasks on start and run required: false + aliases: ['resource_tags'] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' @@ -349,7 +351,7 @@ def main(): started_by=dict(required=False, type='str'), # R S network_configuration=dict(required=False, type='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - tags=dict(required=False, type='dict') + tags=dict(required=False, type='dict', aliases=['resource_tags']) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, From 35cf525911a22410dfcb17a2dbcc3bf2f86f8d82 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 3 Jun 2022 16:30:43 +0200 Subject: [PATCH 474/683] wafv2_ip_set - add support for updating tags (#1205) wafv2_ip_set - add support for updating tags SUMMARY Added support for purge_tags Added tags to return values Added support for updating tags Moved to common docs_fragment for tagging ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/module_utils/wafv2.py plugins/modules/wafv2_ip_set.py plugins/modules/wafv2_ip_set_info.py ADDITIONAL INFORMATION Depends on : mattclay/aws-terminator#213 Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- wafv2_ip_set.py | 49 +++++++++++++++++++++++++++----------------- wafv2_ip_set_info.py | 8 +++++--- 2 files changed, 35 insertions(+), 22 deletions(-) diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index 1efaf31f77a..add677eba1f 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -53,21 +53,19 @@ from the IP set. The entire IP set itself will stay present. type: list elements: str - tags: - description: - - Key value pairs to associate with the resource. - - Currently tags are not visible. Nor in the web ui, nor via cli and nor in boto3. - required: false - type: dict purge_addresses: description: - When set to C(no), keep the existing addresses in place. Will modify and add, but will not delete. default: yes type: bool +notes: + - Support for I(purge_tags) was added in release 4.0.0. + extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -125,6 +123,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags class IpSet: @@ -133,15 +133,18 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.name = name self.scope = scope self.fail_json_aws = fail_json_aws - self.existing_set, self.id, self.locktoken = self.get_set() + self.existing_set, self.id, self.locktoken, self.arn = self.get_set() def description(self): return self.existing_set.get('Description') + def _format_set(self, ip_set): + if ip_set is None: + return None + return camel_dict_to_snake_dict(self.existing_set, ignore_list=['tags']) + def get(self): - if self.existing_set: - return camel_dict_to_snake_dict(self.existing_set) - return None + return self._format_set(self.existing_set) def remove(self): try: @@ -174,8 +177,8 @@ def create(self, description, ip_address_version, addresses, tags): except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to create wafv2 ip set.") - self.existing_set, self.id, self.locktoken = self.get_set() - return camel_dict_to_snake_dict(self.existing_set) + self.existing_set, self.id, self.locktoken, self.arn = self.get_set() + return self._format_set(self.existing_set) def update(self, description, addresses): req_obj = { @@ -194,13 +197,14 @@ def update(self, description, addresses): except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 ip set.") - self.existing_set, self.id, self.locktoken = self.get_set() - return camel_dict_to_snake_dict(self.existing_set) + self.existing_set, self.id, self.locktoken, self.arn = self.get_set() + return self._format_set(self.existing_set) def get_set(self): response = self.list() existing_set = None id = None + arn = None locktoken = None for item in response.get('IPSets'): if item.get('Name') == self.name: @@ -216,8 +220,10 @@ def get_set(self): ).get('IPSet') except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") + tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) + existing_set['tags'] = tags - return existing_set, id, locktoken + return existing_set, id, locktoken, arn def list(self, Nextmarker=None): # there is currently no paginator for wafv2 @@ -275,8 +281,9 @@ def main(): description=dict(type='str'), ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']), addresses=dict(type='list', elements='str'), - tags=dict(type='dict'), - purge_addresses=dict(type='bool', default=True) + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + purge_addresses=dict(type='bool', default=True), ) module = AnsibleAWSModule( @@ -292,6 +299,7 @@ def main(): ip_address_version = module.params.get("ip_address_version") addresses = module.params.get("addresses") tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") purge_addresses = module.params.get("purge_addresses") check_mode = module.check_mode @@ -303,7 +311,9 @@ def main(): ip_set = IpSet(wafv2, name, scope, module.fail_json_aws) if state == 'present': + if ip_set.get(): + tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode) change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) if (change or ip_set.description() != description) and not check_mode: retval = ip_set.update( @@ -312,6 +322,7 @@ def main(): ) else: retval = ip_set.get() + change |= tags_updated else: if not check_mode: retval = ip_set.create( diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index 0c2bf3f0e4e..6e3c1075257 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -77,6 +77,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): @@ -93,7 +94,7 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): if response.get('NextMarker'): response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets') except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to list wafv2 ip set.") + fail_json_aws(e, msg="Failed to list wafv2 ip set") return response @@ -105,7 +106,7 @@ def get_ip_set(wafv2, name, scope, id, fail_json_aws): Id=id ) except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to get wafv2 ip set.") + fail_json_aws(e, msg="Failed to get wafv2 ip set") return response @@ -134,13 +135,14 @@ def main(): for item in response.get('IPSets'): if item.get('Name') == name: id = item.get('Id') + arn = item.get('ARN') retval = {} existing_set = None if id: existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_set.get('IPSet')) - + retval['tags'] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} module.exit_json(**retval) From ba3e8ef59f8e1bca10622c3b66a39dc7af3e753a Mon Sep 17 00:00:00 2001 From: Justin McCormick Date: Sat, 4 Jun 2022 01:43:36 -0500 Subject: [PATCH 475/683] =?UTF-8?q?ecs=5Fecr=20--=20Return=20repository=20?= =?UTF-8?q?policy=20if=20it=20exists,=20even=20if=20we=20did=20not=20?= =?UTF-8?q?=E2=80=A6=20(#1171)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ecs_ecr -- Return repository policy if it exists, even if we did not … Return repository policy if it exists, even if we did not create or modify it. SUMMARY Existing behavior will only print the ECR repo (permissions) policy if we just created/edited that policy. There is no way to just pull the existing policy and use it in a subsequent task. New behavior will print the existing ECR repo policy information if it exists, even if we did not just create/edit that policy. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_ecr -- run function ADDITIONAL INFORMATION Given the scenario that my ECR repo already exists and already has a permissions policy on it. I would like to use ansible to retrieve that policy. Today this module will only return the policy as part of the return object if we are telling the module to create or edit the policy. After merging, the module will create the policy if it was defined, or update the policy if needed, and if neither of those were defined (else) we will check for an existing permissions policy and add it to the return object. Given the example tasks: - name: Pull the existing permissions policy for the ECR repo. community.aws.ecs_ecr: region: "{{ AWS_REGION }}" aws_access_key: "{{ survey_access_key_id }}" aws_secret_key: "{{ survey_secret_access_key }}" aws_security_token: "{{ survey_session_token }}" validate_certs: False name: 'example/nginx' register: ecr_repo_info - name: debug debug: msg: "{{ ecr_repo_info }}" Here is the current behavior: ASK [debug] ********************************************************************************************************************************************************************************************************************************* ok: [localhost] => { "msg": { "changed": false, "created": false, "failed": false, "repository": { "createdAt": "2022-05-19T11:15:49-05:00", "encryptionConfiguration": { "encryptionType": "AES256" }, "imageScanningConfiguration": { "scanOnPush": false }, "imageTagMutability": "MUTABLE", "registryId": "12345", "repositoryArn": "arn:aws:ecr:us-east-1:123456789:repository/example/nginx", "repositoryName": "example/nginx", "repositoryUri": "123456789.dkr.ecr.us-east-1.amazonaws.com/example/nginx" }, "state": "present" } } And here is the post-merge output: ASK [debug] ********************************************************************************************************************************************************************************************************************************* ok: [localhost] => { "msg": { "changed": false, "created": false, "failed": false, "policy": { "Statement": [ { "Action": [ "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:ListImages", "ecr:DescribeImages", "ecr:DescribeRepositories" ], "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::12345678:root" }, "Sid": "Allow managed accounts to access this repo." } ], "Version": "2008-10-17" }, "repository": { "createdAt": "2022-05-19T11:15:49-05:00", "encryptionConfiguration": { "encryptionType": "AES256" }, "imageScanningConfiguration": { "scanOnPush": false }, "imageTagMutability": "MUTABLE", "registryId": "12345", "repositoryArn": "arn:aws:ecr:us-east-1:123456789:repository/example/nginx", "repositoryName": "example/nginx", "repositoryUri": "123456789.dkr.ecr.us-east-1.amazonaws.com/example/nginx" }, "state": "present" } } Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell Reviewed-by: Justin McCormick Reviewed-by: Alina Buzachis --- ecs_ecr.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index 487bf452f7f..294459135fe 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -167,11 +167,16 @@ name: type: str description: The name of the repository - returned: "when state == 'absent'" + returned: I(state=absent) +policy: + type: dict + description: The existing, created or updated repository policy. + returned: I(state=present) + version_added: 4.0.0 repository: type: dict description: The created or updated repository - returned: "when state == 'present'" + returned: I(state=present) sample: createdAt: '2017-01-17T08:41:32-06:00' registryId: '999999999999' @@ -494,6 +499,11 @@ def run(ecr, params): result['policy'] = policy_text raise + else: + original_policy = ecr.get_repository_policy(registry_id, name) + if original_policy: + result['policy'] = original_policy + original_scan_on_push = ecr.get_repository(registry_id, name) if original_scan_on_push is not None: if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']: From d768880d8fd64539fb1d40c3e3a8b9e23d0801df Mon Sep 17 00:00:00 2001 From: "Kevin Teague (at Tasktop)" Date: Sat, 4 Jun 2022 01:29:52 -0700 Subject: [PATCH 476/683] Add AWSRetry.jittered_backoff to rds_instance_info (#1026) Add AWSRetry.jittered_backoff to rds_instance_info SUMMARY Add AWSRetry.jittered_backoff to the rds_instance_info module. When calling rds_instance_info we have been seeing API rate limit errors from AWS. When calling this module, it usually runs to about 90-150 times in a minute before we get rate limited. Using jittered_backoff should significantly decrease the number of times we see API rate limits here. 02:20:36 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (Throttling) when calling the DescribeDBInstances operation (reached max retries: 4): Rate exceeded 02:20:36 fatal: [polaris -> localhost]: FAILED! => {"boto3_version": "1.20.22", "botocore_version": "1.23.22", "changed": false, "error": {"code": "Throttling", "message": "Rate exceeded", "type": "Sender"}, "msg": "Couldn't get instance information: An error occurred (Throttling) when calling the DescribeDBInstances operation (reached max retries: 4): Rate exceeded", "response_metadata": {"http_headers": {"connection": "close", "content-length": "254", "content-type": "text/xml", "date": "Tue, 15 Mar 2022 09:20:34 GMT", "x-amzn-requestid": "5de8131e-3f59-4b04-af25-5f7083ee09b9"}, "http_status_code": 400, "max_attempts_reached": true, "request_id": "5de8131e-3f59-4b04-af25-5f7083ee09b9", "retry_attempts": 4}} ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_instance_info ADDITIONAL INFORMATION Decorated rds_instance_info with AWSRetry.jittered_backoff Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell --- rds_instance_info.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/rds_instance_info.py b/rds_instance_info.py index 6e41ea62940..e26e0f680a6 100644 --- a/rds_instance_info.py +++ b/rds_instance_info.py @@ -365,6 +365,17 @@ pass # handled by AnsibleAWSModule +@AWSRetry.jittered_backoff() +def _describe_db_instances(conn, **params): + paginator = conn.get_paginator('describe_db_instances') + try: + results = paginator.paginate(**params).build_full_result()['DBInstances'] + except is_boto3_error_code('DBInstanceNotFound'): + results = [] + + return results + + def instance_info(module, conn): instance_name = module.params.get('db_instance_identifier') filters = module.params.get('filters') @@ -375,12 +386,9 @@ def instance_info(module, conn): if filters: params['Filters'] = ansible_dict_to_boto3_filter_list(filters) - paginator = conn.get_paginator('describe_db_instances') try: - results = paginator.paginate(**params).build_full_result()['DBInstances'] - except is_boto3_error_code('DBInstanceNotFound'): - results = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + results = _describe_db_instances(conn, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get instance information") for instance in results: From 7b43b7d9c1bec1ec177e9a2559d0c332bb8f0489 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 4 Jun 2022 13:23:26 +0200 Subject: [PATCH 477/683] wafv2_ip_set - fix bugs with changing description (#1211) wafv2_ip_set - fix bugs with changing description SUMMARY updating just the description didn't update the changed state ISSUE TYPE Bugfix Pull Request COMPONENT NAME wafv2_ip_set ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- wafv2_ip_set.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index add677eba1f..b4b3e4f8609 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -314,15 +314,19 @@ def main(): if ip_set.get(): tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode) - change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) - if (change or ip_set.description() != description) and not check_mode: + ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state) + description_updated = bool(description) and ip_set.description() != description + change = ips_updated or description_updated or tags_updated + retval = ip_set.get() + if module.check_mode: + pass + elif ips_updated or description_updated: retval = ip_set.update( description=description, addresses=addresses ) - else: - retval = ip_set.get() - change |= tags_updated + elif tags_updated: + retval, id, locktoken, arn = ip_set.get_set() else: if not check_mode: retval = ip_set.create( From d201e38b2981b634f15a94bb7968a2d2e1306a13 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 4 Jun 2022 17:39:27 +0200 Subject: [PATCH 478/683] Tagging fragment - Move simplest cases over to the docs fragment. (#1198) Tagging fragment - networkfirewall - Move over to the docs fragment SUMMARY The networkfirewall tests are slow enough to be flakey, split this off. ISSUE TYPE Feature Pull Request COMPONENT NAME networkfirewall ADDITIONAL INFORMATION The change itself was approved on #1182 --- networkfirewall.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/networkfirewall.py b/networkfirewall.py index fefb565fef5..9e9b02d0edc 100644 --- a/networkfirewall.py +++ b/networkfirewall.py @@ -40,14 +40,6 @@ - A description for the firewall. required: false type: str - tags: - description: - - A dictionary representing the tags associated with the firewall. - - 'For example C({"Example Tag": "some example value"})' - - Unless I(purge_tags=False) all other tags will be removed from the - firewall. - type: dict - required: false delete_protection: description: - When I(delete_protection=True), the firewall is protected from deletion. @@ -69,14 +61,6 @@ - Defaults to C(false) when not provided on creation. type: bool required: false - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is defined existing tags will be - purged from the resource to match exactly what is defined by the - I(tags) parameter. - type: bool - required: false - default: True wait: description: - On creation, whether to wait for the firewall to reach the C(READY) @@ -117,10 +101,12 @@ required: false aliases: ['firewall_policy_arn'] -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -296,7 +282,7 @@ def main(): arn=dict(type='str', required=False, aliases=['firewall_arn']), state=dict(type='str', required=False, default='present', choices=['present', 'absent']), description=dict(type='str', required=False), - tags=dict(type='dict', required=False), + tags=dict(type='dict', required=False, aliases=['resource_tags']), purge_tags=dict(type='bool', required=False, default=True), wait=dict(type='bool', required=False, default=True), wait_timeout=dict(type='int', required=False), From d1164cbc3031950d44ac385f8e1856e177d8100f Mon Sep 17 00:00:00 2001 From: Justin McCormick Date: Sat, 4 Jun 2022 15:24:25 -0500 Subject: [PATCH 479/683] ecs_service - document circuit breaker feature (#1215) ecs_service - document circuit breaker feature SUMMARY Fixes #921 This feature works with the existing code, so this was mainly adding documentation, examples, and an integration test. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION The deployment circuit breaker is part of the deployment configuration dictionary, which is already snake<=>camel cased. Thus the existing code was handling 99% of the feature, we just added some type validation, documentation, examples, and an integration test. - community.aws.ecs_service: state: present name: test-service cluster: test-cluster task_definition: test-task-definition desired_count: 3 deployment_configuration: deployment_circuit_breaker: enable: True rollback: True Reviewed-by: Mark Chappell --- ecs_service.py | 51 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/ecs_service.py b/ecs_service.py index f7bd5779e18..b2f681af2ba 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -101,6 +101,16 @@ minimum_healthy_percent: type: int description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. + deployment_circuit_breaker: + type: dict + description: The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. + suboptions: + enable: + type: bool + description: If enabled, a service deployment will transition to a failed state and stop launching new tasks. + rollback: + type: bool + description: If enabled, ECS will roll back your service to the last completed deployment after a failure. placement_constraints: description: - The placement constraints for the tasks in the service. @@ -272,6 +282,18 @@ - type: binpack field: memory +# With deployment circuit breaker (added in version 4.0) +- community.aws.ecs_service: + state: present + name: test-service + cluster: test-cluster + task_definition: test-task-definition + desired_count: 3 + deployment_configuration: + deployment_circuit_breaker: + enable: True + rollback: True + # With capacity_provider_strategy (added in version 4.0) - community.aws.ecs_service: state: present @@ -378,6 +400,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -494,6 +529,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -535,7 +583,8 @@ DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int' + 'minimum_healthy_percent': 'int', + 'deployment_circuit_breaker': 'dict', } from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule From 35c775a55087d63fee6145bfeabfed9a24bb227e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 7 Jun 2022 09:34:41 +0200 Subject: [PATCH 480/683] cloudtrail - add support for purge_tags (#1219) cloudtrail - add support for purge_tags SUMMARY Move to tagging docs fragment Update tagging code so that "tags" must be explicitly passed to remove tags add purge_tags parameter add resource_tags as an alias for tags Update tagging code so that tags are set as part of the create call rather than tagging after creation ISSUE TYPE Feature Pull Request COMPONENT NAME cloudtrail ADDITIONAL INFORMATION Note: tests are currently not run in CI. Reviewed-by: Joseph Torcasso --- cloudtrail.py | 115 ++++++++++++++++++++++++-------------------------- 1 file changed, 54 insertions(+), 61 deletions(-) diff --git a/cloudtrail.py b/cloudtrail.py index d30466710eb..df95d5bfb7b 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -14,9 +14,9 @@ description: - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled. author: - - Ansible Core Team - - Ted Timmons (@tedder) - - Daniel Shepherd (@shepdelacreme) + - Ansible Core Team + - Ted Timmons (@tedder) + - Daniel Shepherd (@shepdelacreme) options: state: description: @@ -88,16 +88,13 @@ - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html). type: str - tags: - description: - - A hash/dictionary of tags to be applied to the CloudTrail resource. - - Remove completely or specify an empty dictionary to remove all tags. - default: {} - type: dict +notes: + - The I(purge_tags) option was added in release 4.0.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -251,11 +248,12 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (camel_dict_to_snake_dict, - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - ) +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def get_kms_key_aliases(module, client, keyId): @@ -293,7 +291,7 @@ def create_trail(module, client, ct_params): return resp -def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False): +def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True): """ Creates, updates, removes tags on a CloudTrail resource @@ -304,45 +302,35 @@ def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False): curr_tags : Dict of the current tags on resource, if any dry_run : true/false to determine if changes will be made if needed """ - adds = [] - removes = [] - updates = [] - changed = False - - if curr_tags is None: - # No current tags so just convert all to a tag list - adds = ansible_dict_to_boto3_tag_list(tags) - else: - curr_keys = set(curr_tags.keys()) - new_keys = set(tags.keys()) - add_keys = new_keys - curr_keys - remove_keys = curr_keys - new_keys - update_keys = dict() - for k in curr_keys.intersection(new_keys): - if curr_tags[k] != tags[k]: - update_keys.update({k: tags[k]}) - - adds = get_tag_list(add_keys, tags) - removes = get_tag_list(remove_keys, curr_tags) - updates = get_tag_list(update_keys, tags) - - if removes or updates: - changed = True - if not dry_run: - try: - client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to remove tags from Trail") - if updates or adds: - changed = True - if not dry_run: - try: - client.add_tags(ResourceId=trail_arn, TagsList=updates + adds) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to add tags to Trail") + if tags is None: + return False + + curr_tags = curr_tags or {} - return changed + tags_to_add, tags_to_remove = compare_aws_tags(curr_tags, tags, purge_tags=purge_tags) + if not tags_to_add and not tags_to_remove: + return False + + if module.check_mode: + return True + + if tags_to_remove: + remove = {k: curr_tags[k] for k in tags_to_remove} + tags_to_remove = ansible_dict_to_boto3_tag_list(remove) + try: + client.remove_tags(ResourceId=trail_arn, TagsList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from Trail") + + if tags_to_add: + tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_add) + try: + client.add_tags(ResourceId=trail_arn, TagsList=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to Trail") + + return True def get_tag_list(keys, tags): @@ -461,7 +449,8 @@ def main(): cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), - tags=dict(default={}, type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool') ) required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] @@ -475,6 +464,7 @@ def main(): elif module.params['state'] in ('absent', 'disabled'): state = 'absent' tags = module.params['tags'] + purge_tags = module.params['purge_tags'] enable_logging = module.params['enable_logging'] ct_params = dict( Name=module.params['name'], @@ -586,13 +576,16 @@ def main(): set_logging(module, client, name=ct_params['Name'], action='stop') # Check if we need to update tags on resource - tag_dry_run = False - if module.check_mode: - tag_dry_run = True - tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run) + tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], + purge_tags=purge_tags) if tags_changed: + updated_tags = dict() + if not purge_tags: + updated_tags = trail['tags'] + updated_tags.update(tags) results['changed'] = True - trail['tags'] = tags + trail['tags'] = updated_tags + # Populate trail facts in output results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) @@ -601,10 +594,10 @@ def main(): results['changed'] = True results['exists'] = True if not module.check_mode: + if tags: + ct_params['TagList'] = ansible_dict_to_boto3_tag_list(tags) # If we aren't in check_mode then actually create it created_trail = create_trail(module, client, ct_params) - # Apply tags - tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN']) # Get the trail status try: status_resp = client.get_trail_status(Name=created_trail['Name']) From a1c66a59dc8f5b91ed1651116498b4026437fab2 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 7 Jun 2022 09:38:08 +0200 Subject: [PATCH 481/683] aws_codebuild - Add resource_tags parameter and deprecate tags (#1221) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit aws_codebuild - Add resource_tags parameter and deprecate tags SUMMARY aws_codebuild currently uses the boto3 style 'list of dictionaries' format rather than the usual dictionary format. Add a resource_tags parameter that accepts the usual dictionary format Add the purge_tags parameter deprecate the tags parameter in preparation for switching it to the usual dict format expand integration tests for tags and description make source and artifacts optional unless creating a new project fix bug with inconsistent "changed" state due to tag order not being guaranteed ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME aws_codebuild ADDITIONAL INFORMATION The (boto3) tags format in the return value when describing a project makes no guarantees about the order it'll return the key/value pairs. As such, when multiple tags were set the naïve original == new comparison would sporadically return "changed" when no change had occurred. Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell --- aws_codebuild.py | 104 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 92 insertions(+), 12 deletions(-) diff --git a/aws_codebuild.py b/aws_codebuild.py index 9462d180e78..92e65ec1fe0 100644 --- a/aws_codebuild.py +++ b/aws_codebuild.py @@ -31,7 +31,7 @@ source: description: - Configure service and location for the build input source. - required: true + - I(source) is required when creating a new project. suboptions: type: description: @@ -58,7 +58,7 @@ artifacts: description: - Information about the build output artifacts for the build project. - required: true + - I(artifacts) is required when creating a new project. suboptions: type: description: @@ -137,6 +137,11 @@ tags: description: - A set of tags for the build project. + - Mutually exclusive with the I(resource_tags) parameter. + - In release 6.0.0 this parameter will accept a simple dictionary + instead of the list of dictionaries format. To use the simple + dictionary format prior to release 6.0.0 the I(resource_tags) can + be used instead of I(tags). type: list elements: dict suboptions: @@ -156,9 +161,30 @@ default: 'present' choices: ['present', 'absent'] type: str + resource_tags: + description: + - A dictionary representing the tags to be applied to the build project. + - If the I(resource_tags) parameter is not set then tags will not be modified. + - Mutually exclusive with the I(tags) parameter. + type: dict + required: false + purge_tags: + description: + - If I(purge_tags=true) and I(tags) is set, existing tags will be purged + from the resource to match exactly what is defined by I(tags) parameter. + - If the I(resource_tags) parameter is not set then tags will not be modified, even + if I(purge_tags=True). + - Tag keys beginning with C(aws:) are reserved by Amazon and can not be + modified. As such they will be ignored for the purposes of the + I(purge_tags) parameter. See the Amazon documentation for more information + U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). + type: bool + default: true + required: false + extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' @@ -275,9 +301,20 @@ type: int sample: 60 tags: - description: Tags added to the project + description: + - Tags added to the project in the boto3 list of dictionaries format. + - I(tags) and I(reource_tags) represent the same information in + different formats. returned: when configured type: list + reource_tags: + description: + - A simple dictionary representing the tags added to the project. + - I(tags) and I(reource_tags) represent the same information in + different formats. + returned: when configured + type: dict + version_added: 4.0.0 created: description: Timestamp of the create time of the project returned: always @@ -285,8 +322,13 @@ sample: "2018-04-17T16:56:03.245000+02:00" ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict try: @@ -312,20 +354,36 @@ def create_or_update_project(client, params, module): if 'name' in found: found_project = found + found_tags = found_project.pop('tags', []) + # Support tagging using a dict instead of the list of dicts + if params['resource_tags'] is not None: + if params['purge_tags']: + tags = dict() + else: + tags = boto3_tag_list_to_ansible_dict(found_tags) + tags.update(params['resource_tags']) + formatted_update_params['tags'] = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') + resp = update_project(client=client, params=formatted_update_params, module=module) updated_project = resp['project'] # Prep both dicts for sensible change comparison: found_project.pop('lastModified') updated_project.pop('lastModified') - if 'tags' not in updated_project: - updated_project['tags'] = [] + updated_tags = updated_project.pop('tags', []) + found_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(found_tags) + updated_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(updated_tags) if updated_project != found_project: changed = True + updated_project['tags'] = updated_tags return resp, changed # Or create new project: try: + if params['source'] is None or params['artifacts'] is None: + module.fail_json( + "The source and artifacts parameters must be provided when " + "creating a new project. No existing project was found.") resp = client.create_project(**formatted_create_params) changed = True return resp, changed @@ -367,18 +425,30 @@ def describe_project(client, name, module): module.fail_json_aws(e, msg="Unable to describe CodeBuild projects") +def format_project_result(project_result): + formated_result = camel_dict_to_snake_dict(project_result) + project = project_result.get('project', {}) + if project: + tags = project.get('tags', []) + formated_result['project']['resource_tags'] = boto3_tag_list_to_ansible_dict(tags) + formated_result['ORIGINAL'] = project_result + return formated_result + + def main(): argument_spec = dict( name=dict(required=True), description=dict(), - source=dict(required=True, type='dict'), - artifacts=dict(required=True, type='dict'), + source=dict(type='dict'), + artifacts=dict(type='dict'), cache=dict(type='dict'), environment=dict(type='dict'), service_role=dict(), timeout_in_minutes=dict(type='int', default=60), encryption_key=dict(no_log=False), tags=dict(type='list', elements='dict'), + resource_tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), vpc_config=dict(type='dict'), state=dict(choices=['present', 'absent'], default='present') ) @@ -389,6 +459,15 @@ def main(): state = module.params.get('state') changed = False + if module.params['tags']: + module.deprecate( + 'The tags parameter currently uses a non-standard format and has ' + 'been deprecated. In release 6.0.0 this paramater will accept ' + 'a simple key/value pair dictionary instead of the current list ' + 'of dictionaries. It is recommended to migrate to using the ' + 'resource_tags parameter which already accepts the simple dictionary ' + 'format.', version='6.0.0', collection_name='community.aws') + if state == 'present': project_result, changed = create_or_update_project( client=client_conn, @@ -397,7 +476,8 @@ def main(): elif state == 'absent': project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result)) + formatted_result = format_project_result(project_result) + module.exit_json(changed=changed, **formatted_result) if __name__ == '__main__': From eaedf010154d7054216927cb734afd779e40de5f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 7 Jun 2022 15:31:37 +0200 Subject: [PATCH 482/683] wafv2_web_acl - fix return values (#1216) wafv2_web_acl - fix return values SUMMARY split integration tests from full wafv2 tests relax botocore requirement to bare minimum required return web acl info on update consistently return web acl info as described in documentation (create would nest it under "web_acl") fix "changed" value when description not specified ISSUE TYPE Bugfix Pull Request COMPONENT NAME wafv2_web_acl ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- wafv2_web_acl.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index b11b0872b0e..d225a0ae890 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -93,7 +93,7 @@ - A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing - the key of the body dictionary element. - Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content). - - Requires botocore >= 1.21.0 + - Requires botocore >= 1.20.40 type: dict version_added: 3.1.0 purge_rules: @@ -341,7 +341,6 @@ def update(self, default_action, description, rules, sampled_requests, cloudwatc 'Scope': self.scope, 'Id': self.id, 'DefaultAction': default_action, - 'Description': description, 'Rules': rules, 'VisibilityConfig': { 'SampledRequestsEnabled': sampled_requests, @@ -351,6 +350,9 @@ def update(self, default_action, description, rules, sampled_requests, cloudwatc 'LockToken': self.locktoken } + if description: + req_obj['Description'] = description + if custom_response_bodies: req_obj['CustomResponseBodies'] = custom_response_bodies @@ -358,7 +360,9 @@ def update(self, default_action, description, rules, sampled_requests, cloudwatc response = self.wafv2.update_web_acl(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 web acl.") - return response + + self.existing_acl, self.id, self.locktoken = self.get_web_acl() + return self.existing_acl def remove(self): try: @@ -433,6 +437,18 @@ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, me return self.existing_acl +def format_result(result): + + # We were returning details of the Web ACL inside a "web_acl" parameter on + # creation, keep returning it to avoid breaking existing playbooks, but also + # return what the docs said we return (and returned when no change happened) + retval = dict(result) + if "WebACL" in retval: + retval.update(retval["WebACL"]) + + return camel_dict_to_snake_dict(retval, ignore_list=['tags']) + + def main(): arg_spec = dict( @@ -471,7 +487,7 @@ def main(): custom_response_bodies = module.params.get("custom_response_bodies") if custom_response_bodies: - module.require_botocore_at_least('1.21.0', reason='to set custom response bodies') + module.require_botocore_at_least('1.20.40', reason='to set custom response bodies') custom_response_bodies = {} for custom_name, body in module.params.get("custom_response_bodies").items(): @@ -497,8 +513,8 @@ def main(): if state == 'present': if web_acl.get(): change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) - change = change or web_acl.get().get('WebACL').get('Description') != description - change = change or web_acl.get().get('WebACL').get('DefaultAction') != default_action + change = change or (description and web_acl.get().get('WebACL').get('Description') != description) + change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action) if change and not check_mode: retval = web_acl.update( @@ -548,7 +564,7 @@ def main(): if not check_mode: retval = web_acl.remove() - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) + module.exit_json(changed=change, **format_result(retval)) if __name__ == '__main__': From a0519b970294e808f391ebe9dc1e45c2155d2b5a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 7 Jun 2022 22:25:13 +0200 Subject: [PATCH 483/683] wafv2_rule_group - tagging (#1210) wafv2_rule_group - support for managing tags SUMMARY Add support for returning tags Add support for updating tags Add support for purge_tags Add fix for updating description when rules don't change. Ensure description of rule group is returned when updates happen Split integration tests from full wafv2 tests (full tests are broken) ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME wafv2_rule_group wafv2_rule_group_info ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell --- wafv2_rule_group.py | 77 ++++++++++++++++++++++++---------------- wafv2_rule_group_info.py | 20 +++++++---- 2 files changed, 60 insertions(+), 37 deletions(-) diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 179ac2e85f2..5a6cafdf1dd 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -60,10 +60,6 @@ description: - capacity of wafv2 rule group. type: int - tags: - description: - - tags for wafv2 rule group. - type: dict purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. @@ -73,6 +69,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.tags ''' @@ -213,15 +210,18 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags class RuleGroup: def __init__(self, wafv2, name, scope, fail_json_aws): self.wafv2 = wafv2 + self.id = None self.name = name self.scope = scope self.fail_json_aws = fail_json_aws - self.existing_group, self.id, self.locktoken = self.get_group() + self.existing_group = self.get_group() def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): req_obj = { @@ -244,32 +244,38 @@ def update(self, description, rules, sampled_requests, cloudwatch_metrics, metri response = self.wafv2.update_rule_group(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 rule group.") - return response + return self.refresh_group() def get_group(self): - response = self.list() - id = None - locktoken = None - arn = None + if self.id is None: + response = self.list() + + for item in response.get('RuleGroups'): + if item.get('Name') == self.name: + self.id = item.get('Id') + self.locktoken = item.get('LockToken') + self.arn = item.get('ARN') - for item in response.get('RuleGroups'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + return self.refresh_group() + def refresh_group(self): existing_group = None - if id: + if self.id: try: - existing_group = self.wafv2.get_rule_group( + response = self.wafv2.get_rule_group( Name=self.name, Scope=self.scope, - Id=id + Id=self.id ) + existing_group = response.get('RuleGroup') + self.locktoken = response.get('LockToken') except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 rule group.") - return existing_group, id, locktoken + tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws) + existing_group['tags'] = tags or {} + + return existing_group def list(self): return wafv2_list_rule_groups(self.wafv2, self.scope, self.fail_json_aws) @@ -315,7 +321,7 @@ def create(self, capacity, description, rules, sampled_requests, cloudwatch_metr except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to create wafv2 rule group.") - self.existing_group, self.id, self.locktoken = self.get_group() + self.existing_group = self.get_group() return self.existing_group @@ -332,8 +338,9 @@ def main(): sampled_requests=dict(type='bool', default=False), cloudwatch_metrics=dict(type='bool', default=True), metric_name=dict(type='str'), - tags=dict(type='dict'), - purge_rules=dict(default=True, type='bool') + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), + purge_rules=dict(default=True, type='bool'), ) module = AnsibleAWSModule( @@ -352,6 +359,7 @@ def main(): cloudwatch_metrics = module.params.get("cloudwatch_metrics") metric_name = module.params.get("metric_name") tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") purge_rules = module.params.get("purge_rules") check_mode = module.check_mode @@ -363,17 +371,24 @@ def main(): if not metric_name: metric_name = name - rule_group = RuleGroup(module.client('wafv2'), name, scope, module.fail_json_aws) + wafv2 = module.client('wafv2') + rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws) change = False retval = {} if state == 'present': if rule_group.get(): - change, rules = compare_priority_rules(rule_group.get().get('RuleGroup').get('Rules'), rules, purge_rules, state) - change = change or rule_group.get().get('RuleGroup').get('Description') != description - - if change and not check_mode: + tagging_change = ensure_wafv2_tags(wafv2, rule_group.arn, tags, purge_tags, + module.fail_json_aws, module.check_mode) + rules_change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) + description_change = bool(description) and (rule_group.get().get('Description') != description) + change = tagging_change or rules_change or description_change + retval = rule_group.get() + if module.check_mode: + # In check mode nothing changes... + pass + elif rules_change or description_change: retval = rule_group.update( description, rules, @@ -381,8 +396,8 @@ def main(): cloudwatch_metrics, metric_name ) - else: - retval = rule_group.get().get('RuleGroup') + elif tagging_change: + retval = rule_group.refresh_group() else: change = True @@ -401,7 +416,7 @@ def main(): if rule_group.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(rule_group.get().get('RuleGroup').get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) if change and not check_mode: retval = rule_group.update( description, @@ -415,7 +430,7 @@ def main(): if not check_mode: retval = rule_group.remove() - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=['tags'])) if __name__ == '__main__': diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index 47d1e68cc55..1daa2dd1cf7 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -17,9 +17,8 @@ options: state: description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - required: true + - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01. + required: false type: str name: description: @@ -34,8 +33,8 @@ type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' @@ -102,6 +101,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups +from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags def get_rule_group(wafv2, name, scope, id, fail_json_aws): @@ -118,7 +118,7 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): def main(): arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), + state=dict(type='str', required=False), name=dict(type='str', required=True), scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) ) @@ -134,6 +134,11 @@ def main(): wafv2 = module.client('wafv2') + if state: + module.deprecate( + 'The state parameter does nothing, has been deprecated, and will be removed in a future release.', + version='6.0.0', collection_name='community.aws') + # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None @@ -142,11 +147,14 @@ def main(): for item in response.get('RuleGroups'): if item.get('Name') == name: id = item.get('Id') + arn = item.get('ARN') existing_group = None if id: existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup')) + tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) + retval['tags'] = tags or {} module.exit_json(**retval) From cc6f6f4ce392c28ac962bb4a8ef8aa2120d0d176 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 8 Jun 2022 13:01:47 +0200 Subject: [PATCH 484/683] Tagging - wafv2_web_acl add support for managing and purging tags (#1218) Tagging - wafv2_web_acl add support for managing and purging tags SUMMARY Add support for returning tags Add support for updating tags Add support for purge_tags ISSUE TYPE Feature Pull Request COMPONENT NAME wafv2_web_acl wafv2_web_acl_info ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- wafv2_web_acl.py | 33 ++++++++++++++++++++++----------- wafv2_web_acl_info.py | 5 +++++ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index d225a0ae890..c51a04e49e7 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -57,10 +57,6 @@ - Name of cloudwatch metrics. - If not given and cloudwatch_metrics is enabled, the name of the web acl itself will be taken. type: str - tags: - description: - - tags for wafv2 web acl. - type: dict rules: description: - The Rule statements used to identify the web requests that you want to allow, block, or count. @@ -102,9 +98,13 @@ default: yes type: bool +notes: + - Support for the I(purge_tags) parameter was added in release 4.0.0. + extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -323,6 +323,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules +from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict @@ -403,6 +405,8 @@ def get_web_acl(self): ) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") + tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) + existing_acl['tags'] = tags return existing_acl, id, locktoken def list(self): @@ -461,9 +465,10 @@ def main(): sampled_requests=dict(type='bool', default=False), cloudwatch_metrics=dict(type='bool', default=True), metric_name=dict(type='str'), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), custom_response_bodies=dict(type='dict'), - purge_rules=dict(default=True, type='bool') + purge_rules=dict(default=True, type='bool'), ) module = AnsibleAWSModule( @@ -482,6 +487,7 @@ def main(): cloudwatch_metrics = module.params.get("cloudwatch_metrics") metric_name = module.params.get("metric_name") tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") purge_rules = module.params.get("purge_rules") check_mode = module.check_mode @@ -506,12 +512,14 @@ def main(): if not metric_name: metric_name = name - web_acl = WebACL(module.client('wafv2'), name, scope, module.fail_json_aws) + wafv2 = module.client('wafv2') + web_acl = WebACL(wafv2, name, scope, module.fail_json_aws) change = False retval = {} if state == 'present': if web_acl.get(): + tags_changed = ensure_wafv2_tags(wafv2, web_acl.get().get('WebACL').get('ARN'), tags, purge_tags, module.fail_json_aws, module.check_mode) change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) change = change or (description and web_acl.get().get('WebACL').get('Description') != description) change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action) @@ -526,9 +534,12 @@ def main(): metric_name, custom_response_bodies ) - + elif tags_changed: + retval, id, locktoken = web_acl.get_web_acl() else: - retval = web_acl.get().get('WebACL') + retval = web_acl.get() + + change |= tags_changed else: change = True diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 3fb91fbd802..86fd603e7cc 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -96,6 +96,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls @@ -132,15 +133,19 @@ def main(): response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) id = None + arn = None retval = {} for item in response.get('WebACLs'): if item.get('Name') == name: id = item.get('Id') + arn = item.get('ARN') if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_acl.get('WebACL')) + tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) + retval['tags'] = tags module.exit_json(**retval) From e1e61c237b2ea94c65d10386429946223a8f2196 Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 8 Jun 2022 07:50:35 -0400 Subject: [PATCH 485/683] ecs_* - fix idempotence bug in ecs_service and dont require ``cluster`` (#1212) ecs_* - fix idempotence bug in ecs_service and dont require ``cluster`` SUMMARY Don't require cluster param and use cluster name 'default' when not specified (see docs). Fix bug when comparing health_check_grace_period_seconds when not input by user. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_service ecs_task ADDITIONAL INFORMATION Split up from #1209 to backport to stable-2 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- ecs_service.py | 9 ++++++--- ecs_task.py | 6 ++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index b2f681af2ba..9327191e80c 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -38,8 +38,10 @@ cluster: description: - The name of the cluster in which the service exists. + - If not specified, the cluster name will be C(default). required: false type: str + default: 'default' task_definition: description: - The task definition the service will run. @@ -657,8 +659,9 @@ def is_matching_service(self, expected, existing): if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: return False - if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): - return False + if expected.get('health_check_grace_period_seconds'): + if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): + return False if (expected['load_balancers'] or []) != existing['loadBalancers']: return False @@ -766,7 +769,7 @@ def main(): argument_spec = dict( state=dict(required=True, choices=['present', 'absent', 'deleting']), name=dict(required=True, type='str', aliases=['service']), - cluster=dict(required=False, type='str'), + cluster=dict(required=False, type='str', default='default'), task_definition=dict(required=False, type='str'), load_balancers=dict(required=False, default=[], type='list', elements='dict'), desired_count=dict(required=False, type='int'), diff --git a/ecs_task.py b/ecs_task.py index b4c625df712..5e8eda99dd3 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -28,8 +28,10 @@ cluster: description: - The name of the cluster to run the task on. - required: True + - If not specified, the cluster name will be C(default). + required: False type: str + default: 'default' task_definition: description: - The task definition to start, run or stop. @@ -342,7 +344,7 @@ def ecs_task_long_format_enabled(self): def main(): argument_spec = dict( operation=dict(required=True, choices=['run', 'start', 'stop']), - cluster=dict(required=True, type='str'), # R S P + cluster=dict(required=False, type='str', default='default'), # R S P task_definition=dict(required=False, type='str'), # R* S* overrides=dict(required=False, type='dict'), # R S count=dict(required=False, type='int'), # R From 561c0072965fbf124fb38312104d386b1938dec8 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 9 Jun 2022 11:55:39 +0200 Subject: [PATCH 486/683] ec2_vpc_vgw - Add support for purge_tags (#1232) ec2_vpc_vgw - Add support for purge_tags SUMMARY Adds support for purge_tags to ec2_vpc_vgw Updates behaviour so that tags must be explicitly set to {} to remove tags Updates ec2_vpc_vgw to pass tags as part of the VGW creation rather than tagging the VGW after creation. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_vpc_vgw ec2_vpc_vgw_info ADDITIONAL INFORMATION return docs changelog Reviewed-by: Alina Buzachis --- ec2_vpc_vgw.py | 182 ++++++++++++++++---------------------------- ec2_vpc_vgw_info.py | 111 ++++++++++++++++++--------- 2 files changed, 141 insertions(+), 152 deletions(-) diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index b46d0f9ac47..126f5ff920d 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -8,7 +8,7 @@ DOCUMENTATION = ''' module: ec2_vpc_vgw -short_description: Create and delete AWS VPN Virtual Gateways. +short_description: Create and delete AWS VPN Virtual Gateways version_added: 1.0.0 description: - Creates AWS VPN Virtual Gateways @@ -18,52 +18,50 @@ options: state: description: - - present to ensure resource is created. - - absent to remove resource + - C(present) to ensure resource is created. + - C(absent) to remove resource. default: present choices: [ "present", "absent"] type: str name: description: - - name of the vgw to be created or deleted + - Name of the VGW to be created or deleted. type: str type: description: - - type of the virtual gateway to be created + - Type of the virtual gateway to be created. choices: [ "ipsec.1" ] default: "ipsec.1" type: str vpn_gateway_id: description: - - vpn gateway id of an existing virtual gateway + - VPN gateway ID of an existing virtual gateway. type: str vpc_id: description: - - the vpc-id of a vpc to attach or detach + - The ID of a VPC to attach or detach to the VGW. type: str asn: description: - - the BGP ASN of the amazon side + - The BGP ASN on the Amazon side. type: int wait_timeout: description: - - number of seconds to wait for status during vpc attach and detach + - Number of seconds to wait for status during VPC attach and detach. default: 320 type: int - tags: - description: - - dictionary of resource tags - aliases: [ "resource_tags" ] - type: dict -author: Nick Aslanidis (@naslanidis) +notes: + - Support for I(purge_tags) was added in release 4.0.0. +author: + - Nick Aslanidis (@naslanidis) extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws - + - amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.tags ''' EXAMPLES = ''' -- name: Create a new vgw attached to a specific VPC +- name: Create a new VGW attached to a specific VPC community.aws.ec2_vpc_vgw: state: present region: ap-southeast-2 @@ -73,7 +71,7 @@ type: ipsec.1 register: created_vgw -- name: Create a new unattached vgw +- name: Create a new unattached VGW community.aws.ec2_vpc_vgw: state: present region: ap-southeast-2 @@ -85,7 +83,7 @@ owner: ABC register: created_vgw -- name: Remove a new vgw using the name +- name: Remove a new VGW using the name community.aws.ec2_vpc_vgw: state: absent region: ap-southeast-2 @@ -94,7 +92,7 @@ type: ipsec.1 register: deleted_vgw -- name: Remove a new vgw using the vpn_gateway_id +- name: Remove a new VGW using the vpn_gateway_id community.aws.ec2_vpc_vgw: state: absent region: ap-southeast-2 @@ -104,10 +102,36 @@ ''' RETURN = ''' -result: - description: The result of the create, or delete action. +vgw: + description: A description of the VGW returned: success type: dict + contains: + id: + description: The ID of the VGW. + type: str + returned: success + example: "vgw-0123456789abcdef0" + state: + description: The state of the VGW. + type: str + returned: success + example: "available" + tags: + description: A dictionary representing the tags attached to the VGW + type: dict + returned: success + example: { "Name": "ansible-test-ec2-vpc-vgw" } + type: + description: The type of VPN connection the virtual private gateway supports. + type: str + returned: success + example: "ipsec.1" + vpc_id: + description: The ID of the VPC to which the VGW is attached. + type: str + returned: success + example: vpc-123456789abcdef01 ''' import time @@ -120,7 +144,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict # AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' @@ -159,8 +186,8 @@ def get_vgw_info(vgws): 'tags': dict() } - for tag in vgw['Tags']: - vgw_info['tags'][tag['Key']] = tag['Value'] + if vgw['Tags']: + vgw_info['tags'] = boto3_tag_list_to_ansible_dict(vgw['Tags']) if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached': vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId'] @@ -234,6 +261,9 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): def create_vgw(client, module): params = dict() params['Type'] = module.params.get('type') + tags = module.params.get('tags') or {} + tags['Name'] = module.params.get('name') + params['TagSpecifications'] = boto3_tag_specifications(tags, ['vpn-gateway']) if module.params.get('asn'): params['AmazonSideAsn'] = module.params.get('asn') @@ -267,92 +297,6 @@ def delete_vgw(client, module, vpn_gateway_id): return result -def create_tags(client, module, vpn_gateway_id): - params = dict() - - try: - response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module), aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to add tags") - - result = response - return result - - -def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None): - params = dict() - - try: - if tags_to_delete: - response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete, aws_retry=True) - else: - response = client.delete_tags(Resources=[vpn_gateway_id], aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to remove tags from gateway') - - result = response - return result - - -def load_tags(module): - tags = [] - - if module.params.get('tags'): - for name, value in module.params.get('tags').items(): - tags.append({'Key': name, 'Value': str(value)}) - tags.append({'Key': "Name", 'Value': module.params.get('name')}) - else: - tags.append({'Key': "Name", 'Value': module.params.get('name')}) - return tags - - -def find_tags(client, module, resource_id=None): - - if resource_id: - try: - response = client.describe_tags(aws_retry=True, Filters=[ - {'Name': 'resource-id', 'Values': [resource_id]}, - ]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe tags searching by resource') - - result = response - return result - - -def check_tags(client, module, existing_vgw, vpn_gateway_id): - params = dict() - params['Tags'] = module.params.get('tags') - vgw = existing_vgw - changed = False - tags_list = {} - - # format tags for comparison - for tags in existing_vgw[0]['Tags']: - if tags['Key'] != 'Name': - tags_list[tags['Key']] = tags['Value'] - - # if existing tags don't match the tags arg, delete existing and recreate with new list - if params['Tags'] is not None and tags_list != params['Tags']: - delete_tags(client, module, vpn_gateway_id) - create_tags(client, module, vpn_gateway_id) - vgw = find_vgw(client, module) - changed = True - - # if no tag args are supplied, delete any existing tags with the exception of the name tag - if params['Tags'] is None and tags_list != {}: - tags_to_delete = [] - for tags in existing_vgw[0]['Tags']: - if tags['Key'] != 'Name': - tags_to_delete.append(tags) - - delete_tags(client, module, vpn_gateway_id, tags_to_delete) - vgw = find_vgw(client, module) - changed = True - - return vgw, changed - - def find_vpc(client, module): params = dict() params['vpc_id'] = module.params.get('vpc_id') @@ -409,7 +353,15 @@ def ensure_vgw_present(client, module): if existing_vgw != []: vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id) + desired_tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + if desired_tags is None: + desired_tags = dict() + purge_tags = False + tags = dict(Name=module.params.get('name')) + tags.update(desired_tags) + changed = ensure_ec2_tags(client, module, vpn_gateway_id, resource_type='vpn-gateway', + tags=tags, purge_tags=purge_tags) # if a vpc_id was provided, check if it exists and if it's attached if params['VpcId']: @@ -446,9 +398,6 @@ def ensure_vgw_present(client, module): changed = True vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId'] - # tag the new virtual gateway - create_tags(client, module, vpn_gateway_id) - # if a vpc-id was supplied, attempt to attach it to the vgw if params['VpcId']: attached_vgw = attach_vgw(client, module, vpn_gateway_id) @@ -559,6 +508,7 @@ def main(): wait_timeout=dict(type='int', default=320), type=dict(default='ipsec.1', choices=['ipsec.1']), tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['state', 'present', ['name']]]) diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index aa4a4719ffe..a84b07bf589 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -12,7 +12,7 @@ version_added: 1.0.0 short_description: Gather information about virtual gateways in AWS description: - - Gather information about virtual gateways in AWS. + - Gather information about virtual gateways in AWS. options: filters: description: @@ -24,11 +24,11 @@ - Get details of a specific Virtual Gateway ID. This value should be provided as a list. type: list elements: str -author: "Nick Aslanidis (@naslanidis)" +author: + - "Nick Aslanidis (@naslanidis)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' @@ -61,31 +61,64 @@ description: The virtual gateways for the account. returned: always type: list - sample: [ - { - "state": "available", - "tags": [ - { - "key": "Name", - "value": "TEST-VGW" - } - ], - "type": "ipsec.1", - "vpc_attachments": [ - { - "state": "attached", - "vpc_id": "vpc-22a93c74" - } - ], - "vpn_gateway_id": "vgw-23e3d64e" - } - ] - -changed: - description: True if listing the virtual gateways succeeds. - returned: always - type: bool - sample: "false" + elements: dict + contains: + vpn_gateway_id: + description: The ID of the VGW. + type: str + returned: success + example: "vgw-0123456789abcdef0" + state: + description: The current state of the VGW. + type: str + returned: success + example: "available" + type: + description: The type of VPN connection the VGW supports. + type: str + returned: success + example: "ipsec.1" + vpc_attachments: + description: A description of the attachment of VPCs to the VGW. + type: list + elements: dict + returned: success + contains: + state: + description: The current state of the attachment. + type: str + returned: success + example: available + vpc_id: + description: The ID of the VPC. + type: str + returned: success + example: vpc-12345678901234567 + tags: + description: + - A list of dictionaries representing the tags attached to the VGW. + - Represents the same details as I(resource_tags). + type: list + elements: dict + returned: success + contains: + key: + description: The key of the tag. + type: str + returned: success + example: MyKey + value: + description: The value of the tag. + type: str + returned: success + example: MyValue + resource_tags: + description: + - A dictionary representing the tags attached to the VGW. + - Represents the same details as I(tags). + type: dict + returned: success + example: {"MyKey": "MyValue"} ''' try: @@ -97,14 +130,20 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def get_virtual_gateway_info(virtual_gateway): - virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'], - 'State': virtual_gateway['State'], - 'Type': virtual_gateway['Type'], - 'VpcAttachments': virtual_gateway['VpcAttachments'], - 'Tags': virtual_gateway.get('Tags', [])} + tags = virtual_gateway.get('Tags', []) + resource_tags = boto3_tag_list_to_ansible_dict(tags) + virtual_gateway_info = dict( + VpnGatewayId=virtual_gateway['VpnGatewayId'], + State=virtual_gateway['State'], + Type=virtual_gateway['Type'], + VpcAttachments=virtual_gateway['VpcAttachments'], + Tags=tags, + ResourceTags=resource_tags, + ) return virtual_gateway_info @@ -122,7 +161,7 @@ def list_virtual_gateways(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to list gateways") - return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw)) + return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=['ResourceTags']) for vgw in all_virtual_gateways['VpnGateways']] From 7d62367f913418956924f95af521afcb4a403617 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 11 Jun 2022 10:18:55 +0200 Subject: [PATCH 487/683] cloudwatchlogs_log_group - Tagging support (#1233) cloudwatchlogs_log_group - Tagging support SUMMARY Ensure cloudwatchlogs_log_group returns values defined in RETURN docs Add support for updating tags (including purge_tags) split cloudwatchlogs_log_group tests Add some basic integration tests for cloudwatchlogs_log_group_info ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME cloudwatchlogs_log_group cloudwatchlogs_log_group_info ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell --- cloudwatchlogs_log_group.py | 226 +++++++++++++++++++------------ cloudwatchlogs_log_group_info.py | 39 ++++-- 2 files changed, 165 insertions(+), 100 deletions(-) diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index 295ff48e669..f237223901f 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -13,58 +13,55 @@ version_added: 1.0.0 short_description: create or delete log_group in CloudWatchLogs notes: - - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html). + - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html). + - Support for I(purge_tags) was added in release 4.0.0. description: - - Create or delete log_group in CloudWatchLogs. + - Create or delete log_group in CloudWatchLogs. author: - - Willian Ricardo (@willricardo) + - Willian Ricardo (@willricardo) options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - default: present - required: false - type: str - log_group_name: - description: - - The name of the log group. - required: true - type: str - kms_key_id: - description: - - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. - required: false - type: str - tags: - description: - - The key-value pairs to use for the tags. - required: false - type: dict - retention: - description: - - The number of days to retain the log events in the specified log group. - - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" - - Mutually exclusive with I(purge_retention_policy). - required: false - type: int - purge_retention_policy: - description: - - "Whether to purge the retention policy or not." - - "Mutually exclusive with I(retention) and I(overwrite)." - default: false - required: false - type: bool - overwrite: - description: - - Whether an existing log group should be overwritten on create. - - Mutually exclusive with I(purge_retention_policy). - default: false - required: false - type: bool + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + default: present + required: false + type: str + log_group_name: + description: + - The name of the log group. + required: true + type: str + kms_key_id: + description: + - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + required: false + type: str + retention: + description: + - The number of days to retain the log events in the specified log group. + - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" + - Mutually exclusive with I(purge_retention_policy). + required: false + type: int + purge_retention_policy: + description: + - "Whether to purge the retention policy or not." + - "Mutually exclusive with I(retention) and I(overwrite)." + default: false + required: false + type: bool + overwrite: + description: + - Whether an existing log group should be overwritten on create. + - Mutually exclusive with I(purge_retention_policy). + default: false + required: false + type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' @@ -96,6 +93,7 @@ description: Return the list of complex objects representing log groups returned: success type: complex + version_added: 4.0.0 contains: log_group_name: description: The name of the log group. @@ -125,6 +123,10 @@ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. returned: always type: str + tags: + description: A dictionary representing the tags on the log group. + returned: always + type: dict ''' try: @@ -135,6 +137,8 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): @@ -154,15 +158,11 @@ def create_log_group(client, log_group_name, kms_key_id, tags, retention, module log_group_name=log_group_name, retention=retention, module=module) - desc_log_group = describe_log_group(client=client, - log_group_name=log_group_name, - module=module) + found_log_group = describe_log_group(client=client, log_group_name=log_group_name, module=module) - if 'logGroups' in desc_log_group: - for i in desc_log_group['logGroups']: - if log_group_name == i['logGroupName']: - return i - module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!") + if not found_log_group: + module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!") + return found_log_group def input_retention_policy(client, log_group_name, retention, module): @@ -187,27 +187,69 @@ def delete_retention_policy(client, log_group_name, module): def delete_log_group(client, log_group_name, module): - desc_log_group = describe_log_group(client=client, - log_group_name=log_group_name, - module=module) - try: - if 'logGroups' in desc_log_group: - for i in desc_log_group['logGroups']: - if log_group_name == i['logGroupName']: - client.delete_log_group(logGroupName=log_group_name) - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + client.delete_log_group(logGroupName=log_group_name) + except is_boto3_error_code('ResourceNotFoundException'): + return {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) def describe_log_group(client, log_group_name, module): try: desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) - return desc_log_group except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + matching_logs = [log for log in desc_log_group.get('logGroups', []) if log['logGroupName'] == log_group_name] + + if not matching_logs: + return {} + + found_log_group = matching_logs[0] + + try: + tags = client.list_tags_log_group(logGroupName=log_group_name) + except is_boto3_error_code('AccessDeniedException'): + tags = {} + module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) + + found_log_group['tags'] = tags.get('tags', {}) + return found_log_group + + +def format_result(found_log_group): + # Prior to 4.0.0 we documented returning log_groups=[log_group], but returned **log_group + # Return both to avoid a breaking change. + log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=['tags']) + return dict(log_groups=[log_group], **log_group) + + +def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): + if desired_tags is None: + return False + + group_name = module.params.get('log_group_name') + current_tags = found_log_group.get('tags', {}) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags) + + if not tags_to_add and not tags_to_remove: + return False + if module.check_mode: + return True + + try: + if tags_to_remove: + client.untag_log_group(logGroupName=group_name, tags=tags_to_remove) + if tags_to_add: + client.tag_log_group(logGroupName=group_name, tags=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to update tags') + + return True + def main(): argument_spec = dict( @@ -215,7 +257,8 @@ def main(): state=dict(choices=['present', 'absent'], default='present'), kms_key_id=dict(required=False, type='str'), - tags=dict(required=False, type='dict'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(required=False, type='bool', default=True), retention=dict(required=False, type='int'), purge_retention_policy=dict(required=False, type='bool', default=False), overwrite=dict(required=False, type='bool', default=False), @@ -233,12 +276,7 @@ def main(): changed = False # Determine if the log group exists - desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) - found_log_group = {} - for i in desc_log_group.get('logGroups', []): - if module.params['log_group_name'] == i['logGroupName']: - found_log_group = i - break + found_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) if state == 'present': if found_log_group: @@ -251,20 +289,29 @@ def main(): tags=module.params['tags'], retention=module.params['retention'], module=module) - elif module.params['purge_retention_policy']: - if found_log_group.get('retentionInDays'): - changed = True - delete_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - elif module.params['retention'] != found_log_group.get('retentionInDays'): - if module.params['retention'] is not None: - changed = True - input_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - retention=module.params['retention'], - module=module) - found_log_group['retentionInDays'] = module.params['retention'] + else: + changed |= ensure_tags(client=logs, + found_log_group=found_log_group, + desired_tags=module.params['tags'], + purge_tags=module.params['purge_tags'], + module=module) + if module.params['purge_retention_policy']: + if found_log_group.get('retentionInDays'): + changed = True + delete_retention_policy(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + elif module.params['retention'] != found_log_group.get('retentionInDays'): + if module.params['retention'] is not None: + changed = True + input_retention_policy(client=logs, + log_group_name=module.params['log_group_name'], + retention=module.params['retention'], + module=module) + if changed: + found_log_group = describe_log_group(client=logs, + log_group_name=module.params['log_group_name'], + module=module) elif not found_log_group: changed = True @@ -275,7 +322,8 @@ def main(): retention=module.params['retention'], module=module) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group)) + result = format_result(found_log_group) + module.exit_json(changed=changed, **result) elif state == 'absent': if found_log_group: diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py index ff80191790d..b3d0ca223b2 100644 --- a/cloudwatchlogs_log_group_info.py +++ b/cloudwatchlogs_log_group_info.py @@ -13,18 +13,17 @@ version_added: 1.0.0 short_description: Get information about log_group in CloudWatchLogs description: - - Lists the specified log groups. You can list all your log groups or filter the results by prefix. + - Lists the specified log groups. You can list all your log groups or filter the results by prefix. author: - - Willian Ricardo (@willricardo) + - Willian Ricardo (@willricardo) options: - log_group_name: - description: - - The name or prefix of the log group to filter by. - type: str + log_group_name: + description: + - The name or prefix of the log group to filter by. + type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -67,6 +66,11 @@ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. returned: always type: str + tags: + description: A dictionary representing the tags on the log group. + returned: always + type: dict + version_added: 4.0.0 ''' try: @@ -77,6 +81,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def describe_log_group(client, log_group_name, module): @@ -86,10 +91,22 @@ def describe_log_group(client, log_group_name, module): try: paginator = client.get_paginator('describe_log_groups') desc_log_group = paginator.paginate(**params).build_full_result() - return desc_log_group except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + for log_group in desc_log_group['logGroups']: + log_group_name = log_group['logGroupName'] + try: + tags = client.list_tags_log_group(logGroupName=log_group_name) + except is_boto3_error_code('AccessDeniedException'): + tags = {} + module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) + log_group['tags'] = tags.get('tags', {}) + + return desc_log_group + def main(): argument_spec = dict( @@ -109,7 +126,7 @@ def main(): final_log_group_snake = [] for log_group in desc_log_group['logGroups']: - final_log_group_snake.append(camel_dict_to_snake_dict(log_group)) + final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=['tags'])) desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) module.exit_json(**desc_log_group_result) From 2f4acfc99108df139b76be2167188f625cf19f90 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 13 Jun 2022 15:34:16 +0200 Subject: [PATCH 488/683] dms_endpoint - Support modifying tags (#1234) dms_endpoint - Support modifying tags SUMMARY Return details of the endpoint, even when not making changes add purge_tags / resource_tags with the usual behaviour Split tests Add some very basic idempotency tests support modifying tags ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME dms_endpoint ADDITIONAL INFORMATION changelog Reviewed-by: Alina Buzachis --- dms_endpoint.py | 322 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 283 insertions(+), 39 deletions(-) diff --git a/dms_endpoint.py b/dms_endpoint.py index 6cc3bc3f896..6dcbcc8868c 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -12,8 +12,8 @@ version_added: 1.0.0 short_description: Creates or destroys a data migration services endpoint description: - - Creates or destroys a data migration services endpoint, - that can be used to replicate data. + - Creates or destroys a data migration services endpoint, + that can be used to replicate data. options: state: description: @@ -29,19 +29,19 @@ endpointtype: description: - Type of endpoint we want to manage. + - Required when I(state=present). choices: ['source', 'target'] type: str - required: true enginename: description: - Database engine that we want to use, please refer to the AWS DMS for more information on the supported engines and their limitations. + - Required when I(state=present). choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora', 'redshift', 's3', 'db2', 'azuredb', 'sybase', 'dynamodb', 'mongodb', 'sqlserver'] type: str - required: true username: description: - Username our endpoint will use to connect to the database. @@ -141,11 +141,11 @@ - Required when I(wait=true). type: int author: - - "Rui Moreira (@ruimoreira)" + - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' @@ -165,28 +165,219 @@ wait: false ''' -RETURN = ''' # ''' +RETURN = ''' +endpoint: + description: + - A description of the DMS endpoint. + returned: success + type: dict + contains: + database_name: + description: + - The name of the database at the endpoint. + type: str + returned: success + example: "exampledb" + endpoint_arn: + description: + - The ARN that uniquely identifies the endpoint. + type: str + returned: success + example: "arn:aws:dms:us-east-1:012345678901:endpoint:1234556789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + endpoint_identifier: + description: + - The database endpoint identifier. + type: str + returned: success + example: "ansible-test-12345678-dms" + endpoint_type: + description: + - The type of endpoint. Valid values are C(SOURCE) and C(TARGET). + type: str + returned: success + example: "SOURCE" + engine_display_name: + description: + - The expanded name for the engine name. + type: str + returned: success + example: "Amazon Aurora MySQL" + engine_name: + description: + - The database engine name. + type: str + returned: success + example: "aurora" + kms_key_id: + description: + - An KMS key ID that is used to encrypt the connection parameters for the endpoint. + type: str + returned: success + example: "arn:aws:kms:us-east-1:012345678901:key/01234567-abcd-12ab-98fe-123456789abc" + port: + description: + - The port used to access the endpoint. + type: str + returned: success + example: 3306 + server_name: + description: + - The name of the server at the endpoint. + type: str + returned: success + example: "ansible-test-123456789.example.com" + ssl_mode: + description: + - The SSL mode used to connect to the endpoint. + type: str + returned: success + example: "none" + tags: + description: + - A dictionary representing the tags attached to the endpoint. + type: dict + returned: success + example: {"MyTagKey": "MyTagValue"} + username: + description: + - The user name used to connect to the endpoint. + type: str + returned: success + example: "example-username" + dms_transfer_settings: + description: + - Additional transfer related settings. + type: dict + returned: when additional DMS Transfer settings have been configured. + s3_settings: + description: + - Additional settings for S3 endpoints. + type: dict + returned: when the I(endpoint_type) is C(s3) + mongo_db_settings: + description: + - Additional settings for MongoDB endpoints. + type: dict + returned: when the I(endpoint_type) is C(mongodb) + kinesis_settings: + description: + - Additional settings for Kinesis endpoints. + type: dict + returned: when the I(endpoint_type) is C(kinesis) + kafka_settings: + description: + - Additional settings for Kafka endpoints. + type: dict + returned: when the I(endpoint_type) is C(kafka) + elasticsearch_settings: + description: + - Additional settings for Elasticsearch endpoints. + type: dict + returned: when the I(endpoint_type) is C(elasticsearch) + neptune_settings: + description: + - Additional settings for Amazon Neptune endpoints. + type: dict + returned: when the I(endpoint_type) is C(neptune) + redshift_settings: + description: + - Additional settings for Redshift endpoints. + type: dict + returned: when the I(endpoint_type) is C(redshift) + postgre_sql_settings: + description: + - Additional settings for PostgrSQL endpoints. + type: dict + returned: when the I(endpoint_type) is C(postgres) + my_sql_settings: + description: + - Additional settings for MySQL endpoints. + type: dict + returned: when the I(endpoint_type) is C(mysql) + oracle_settings: + description: + - Additional settings for Oracle endpoints. + type: dict + returned: when the I(endpoint_type) is C(oracle) + sybase_settings: + description: + - Additional settings for Sybase endpoints. + type: dict + returned: when the I(endpoint_type) is C(sybase) + microsoft_sql_server_settings: + description: + - Additional settings for Microsoft SQL Server endpoints. + type: dict + returned: when the I(endpoint_type) is C(sqlserver) + i_b_m_db_settings: + description: + - Additional settings for IBM DB2 endpoints. + type: dict + returned: when the I(endpoint_type) is C(db2) + doc_db_settings: + description: + - Additional settings for DocumentDB endpoints. + type: dict + returned: when the I(endpoint_type) is C(documentdb) + redis_settings: + description: + - Additional settings for Redis endpoints. + type: dict + returned: when the I(endpoint_type) is C(redshift) +''' try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags backoff_params = dict(retries=5, delay=1, backoff=1.5) @AWSRetry.jittered_backoff(**backoff_params) -def describe_endpoints(connection, endpoint_identifier): +def dms_describe_tags(connection, **params): + """ checks if the endpoint exists """ + tags = connection.list_tags_for_resource(**params).get('TagList', []) + return boto3_tag_list_to_ansible_dict(tags) + + +@AWSRetry.jittered_backoff(**backoff_params) +def dms_describe_endpoints(connection, **params): + try: + endpoints = connection.describe_endpoints(**params) + except is_boto3_error_code('ResourceNotFoundFault'): + return None + return endpoints.get('Endpoints', None) + + +def describe_endpoint(connection, endpoint_identifier): """ checks if the endpoint exists """ + endpoint_filter = dict(Name='endpoint-id', + Values=[endpoint_identifier]) try: - endpoint_filter = dict(Name='endpoint-id', - Values=[endpoint_identifier]) - return connection.describe_endpoints(Filters=[endpoint_filter]) - except botocore.exceptions.ClientError: - return {'Endpoints': []} + endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe the DMS endpoint.") + + if not endpoints: + return None + + endpoint = endpoints[0] + try: + tags = dms_describe_tags(connection, ResourceArn=endpoint['EndpointArn']) + endpoint['tags'] = tags + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags") + return endpoint @AWSRetry.jittered_backoff(**backoff_params) @@ -215,6 +406,16 @@ def get_endpoint_deleted_waiter(client): return client.get_waiter('endpoint_deleted') +@AWSRetry.jittered_backoff(**backoff_params) +def dms_remove_tags(client, **params): + return client.remove_tags_from_resource(**params) + + +@AWSRetry.jittered_backoff(**backoff_params) +def dms_add_tags(client, **params): + return client.add_tags_to_resource(**params) + + def endpoint_exists(endpoint): """ Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint @@ -223,11 +424,8 @@ def endpoint_exists(endpoint): return bool(len(endpoint['Endpoints'])) -def delete_dms_endpoint(connection): +def delete_dms_endpoint(connection, endpoint_arn): try: - endpoint = describe_endpoints(connection, - module.params.get('endpointidentifier')) - endpoint_arn = endpoint['Endpoints'][0].get('EndpointArn') delete_arn = dict( EndpointArn=endpoint_arn ) @@ -336,7 +534,11 @@ def compare_params(param_described): a DMS endpoint does not return the value for the password for security reasons ( I assume ) """ + param_described = dict(param_described) modparams = create_module_params() + # modify can't update tags + param_described.pop('Tags', None) + modparams.pop('Tags', None) changed = False for paramname in modparams: if paramname == 'Password' or paramname in param_described \ @@ -349,13 +551,45 @@ def compare_params(param_described): return changed -def modify_dms_endpoint(connection): - +def modify_dms_endpoint(connection, endpoint): + arn = endpoint['EndpointArn'] try: params = create_module_params() - return dms_modify_endpoint(connection, **params) + # modify can't update tags + params.pop('Tags', None) + return dms_modify_endpoint(connection, EndpointArn=arn, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update DMS endpoint.") + module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params) + + +def ensure_tags(connection, endpoint): + desired_tags = module.params.get('tags', None) + if desired_tags is None: + return False + + current_tags = endpoint.get('tags', {}) + + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, + module.params.get('purge_tags')) + + if not tags_to_remove and not tags_to_add: + return False + + if module.check_mode: + return True + + arn = endpoint.get('EndpointArn') + + try: + if tags_to_remove: + dms_remove_tags(connection, ResourceArn=arn, TagKeys=tags_to_remove) + if tags_to_add: + tag_list = ansible_dict_to_boto3_tag_list(tags_to_add) + dms_add_tags(connection, ResourceArn=arn, Tags=tag_list) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update DMS endpoint tags.") + + return True def create_dms_endpoint(connection): @@ -376,11 +610,11 @@ def main(): argument_spec = dict( state=dict(choices=['present', 'absent'], default='present'), endpointidentifier=dict(required=True), - endpointtype=dict(choices=['source', 'target'], required=True), + endpointtype=dict(choices=['source', 'target']), enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb', 'aurora', 'redshift', 's3', 'db2', 'azuredb', 'sybase', 'dynamodb', 'mongodb', 'sqlserver'], - required=True), + required=False), username=dict(), password=dict(no_log=True), servername=dict(), @@ -388,7 +622,8 @@ def main(): databasename=dict(), extraconnectionattributes=dict(), kmskeyid=dict(no_log=False), - tags=dict(type='dict'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), certificatearn=dict(), sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'], default='none'), @@ -408,6 +643,8 @@ def main(): module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[ + ["state", "present", ["endpointtype"]], + ["state", "present", ["enginename"]], ["state", "absent", ["wait"]], ["wait", "True", ["timeout"]], ["wait", "True", ["retries"]], @@ -420,33 +657,40 @@ def main(): state = module.params.get('state') dmsclient = module.client('dms') - endpoint = describe_endpoints(dmsclient, - module.params.get('endpointidentifier')) + endpoint = describe_endpoint(dmsclient, + module.params.get('endpointidentifier')) if state == 'present': - if endpoint_exists(endpoint): - module.params['EndpointArn'] = \ - endpoint['Endpoints'][0].get('EndpointArn') - params_changed = compare_params(endpoint["Endpoints"][0]) + if endpoint: + changed |= ensure_tags(dmsclient, endpoint) + params_changed = compare_params(endpoint) if params_changed: - updated_dms = modify_dms_endpoint(dmsclient) + updated_dms = modify_dms_endpoint(dmsclient, endpoint) exit_message = updated_dms + endpoint = exit_message.get('Endpoint') changed = True else: - module.exit_json(changed=False, msg="Endpoint Already Exists") + exit_message = "Endpoint Already Exists" else: - dms_properties = create_dms_endpoint(dmsclient) - exit_message = dms_properties + exit_message = create_dms_endpoint(dmsclient) + endpoint = exit_message.get('Endpoint') changed = True + + if changed: + # modify and create don't return tags + tags = dms_describe_tags(dmsclient, ResourceArn=endpoint['EndpointArn']) + endpoint['tags'] = tags elif state == 'absent': - if endpoint_exists(endpoint): - delete_results = delete_dms_endpoint(dmsclient) + if endpoint: + delete_results = delete_dms_endpoint(dmsclient, endpoint['EndpointArn']) exit_message = delete_results + endpoint = None changed = True else: changed = False exit_message = 'DMS Endpoint does not exist' - module.exit_json(changed=changed, msg=exit_message) + endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=['tags']) + module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message) if __name__ == '__main__': From 9d5a6cab304444c6b810fa90513e8f125cf4f5c9 Mon Sep 17 00:00:00 2001 From: Sebastien Rosset Date: Mon, 13 Jun 2022 12:28:23 -0700 Subject: [PATCH 489/683] Add 'opensearch' and 'opensearch_info' modules (#859) Add 'opensearch' and 'opensearch_info' modules SUMMARY Add opensearch module to create/update AWS OpenSearch/Elasticsearch domains. Add opensearch_info module to query AWS OpenSearch/Elasticsearch domains. Fixes #858 Requires mattclay/aws-terminator#187 ISSUE TYPE New Module Pull Request COMPONENT NAME Creates OpenSearch or ElasticSearch domain. ADDITIONAL INFORMATION The minimum version of botocore for these modules is 1.21.38. The integration tests take more than 4 hours to execute. Tests time out in the CI. I was able to run the integration tests locally. Reviewed-by: Alina Buzachis Reviewed-by: Sebastien Rosset Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- opensearch.py | 1507 ++++++++++++++++++++++++++++++++++++++++++++ opensearch_info.py | 530 ++++++++++++++++ 2 files changed, 2037 insertions(+) create mode 100644 opensearch.py create mode 100644 opensearch_info.py diff --git a/opensearch.py b/opensearch.py new file mode 100644 index 00000000000..422feb7d31a --- /dev/null +++ b/opensearch.py @@ -0,0 +1,1507 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: opensearch +short_description: Creates OpenSearch or ElasticSearch domain. +description: + - Creates or modify a Amazon OpenSearch Service domain. +version_added: 3.1.0 +author: "Sebastien Rosset (@sebastien-rosset)" +options: + state: + description: + - Creates or modifies an existing OpenSearch domain. + - Deletes an OpenSearch domain. + required: false + type: str + choices: ['present', 'absent'] + default: present + domain_name: + description: + - The name of the Amazon OpenSearch/ElasticSearch Service domain. + - Domain names are unique across the domains owned by an account within an AWS region. + required: true + type: str + engine_version: + description: + -> + The engine version to use. For example, 'ElasticSearch_7.10' or 'OpenSearch_1.1'. + -> + If the currently running version is not equal to I(engine_version), + a cluster upgrade is triggered. + -> + It may not be possible to upgrade directly from the currently running version + to I(engine_version). In that case, the upgrade is performed incrementally by + upgrading to the highest compatible version, then repeat the operation until + the cluster is running at the target version. + -> + The upgrade operation fails if there is no path from current version to I(engine_version). + -> + See OpenSearch documentation for upgrade compatibility. + required: false + type: str + allow_intermediate_upgrades: + description: + - > + If true, allow OpenSearch domain to be upgraded through one or more intermediate versions. + - > + If false, do not allow OpenSearch domain to be upgraded through intermediate versions. + The upgrade operation fails if it's not possible to ugrade to I(engine_version) directly. + required: false + type: bool + default: true + cluster_config: + description: + - Parameters for the cluster configuration of an OpenSearch Service domain. + type: dict + suboptions: + instance_type: + description: + - Type of the instances to use for the domain. + required: false + type: str + instance_count: + description: + - Number of instances for the domain. + required: false + type: int + zone_awareness: + description: + - A boolean value to indicate whether zone awareness is enabled. + required: false + type: bool + availability_zone_count: + description: + - > + An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. + This should be equal to number of subnets if VPC endpoints is enabled. + required: false + type: int + dedicated_master: + description: + - A boolean value to indicate whether a dedicated master node is enabled. + required: false + type: bool + dedicated_master_instance_type: + description: + - The instance type for a dedicated master node. + required: false + type: str + dedicated_master_instance_count: + description: + - Total number of dedicated master nodes, active and on standby, for the domain. + required: false + type: int + warm_enabled: + description: + - True to enable UltraWarm storage. + required: false + type: bool + warm_type: + description: + - The instance type for the OpenSearch domain's warm nodes. + required: false + type: str + warm_count: + description: + - The number of UltraWarm nodes in the domain. + required: false + type: int + cold_storage_options: + description: + - Specifies the ColdStorageOptions config for a Domain. + type: dict + suboptions: + enabled: + description: + - True to enable cold storage. Supported on Elasticsearch 7.9 or above. + required: false + type: bool + ebs_options: + description: + - Parameters to configure EBS-based storage for an OpenSearch Service domain. + type: dict + suboptions: + ebs_enabled: + description: + - Specifies whether EBS-based storage is enabled. + required: false + type: bool + volume_type: + description: + - Specifies the volume type for EBS-based storage. "standard"|"gp2"|"io1" + required: false + type: str + volume_size: + description: + - Integer to specify the size of an EBS volume. + required: false + type: int + iops: + description: + - The IOPD for a Provisioned IOPS EBS volume (SSD). + required: false + type: int + vpc_options: + description: + - Options to specify the subnets and security groups for a VPC endpoint. + type: dict + suboptions: + subnets: + description: + - Specifies the subnet ids for VPC endpoint. + required: false + type: list + elements: str + security_groups: + description: + - Specifies the security group ids for VPC endpoint. + required: false + type: list + elements: str + snapshot_options: + description: + - Option to set time, in UTC format, of the daily automated snapshot. + type: dict + suboptions: + automated_snapshot_start_hour: + description: + - > + Integer value from 0 to 23 specifying when the service takes a daily automated snapshot + of the specified Elasticsearch domain. + required: false + type: int + access_policies: + description: + - IAM access policy as a JSON-formatted string. + required: false + type: dict + encryption_at_rest_options: + description: + - Parameters to enable encryption at rest. + type: dict + suboptions: + enabled: + description: + - Should data be encrypted while at rest. + required: false + type: bool + kms_key_id: + description: + - If encryption at rest enabled, this identifies the encryption key to use. + - The value should be a KMS key ARN. It can also be the KMS key id. + required: false + type: str + node_to_node_encryption_options: + description: + - Node-to-node encryption options. + type: dict + suboptions: + enabled: + description: + - True to enable node-to-node encryption. + required: false + type: bool + cognito_options: + description: + - Parameters to configure OpenSearch Service to use Amazon Cognito authentication for OpenSearch Dashboards. + type: dict + suboptions: + enabled: + description: + - The option to enable Cognito for OpenSearch Dashboards authentication. + required: false + type: bool + user_pool_id: + description: + - The Cognito user pool ID for OpenSearch Dashboards authentication. + required: false + type: str + identity_pool_id: + description: + - The Cognito identity pool ID for OpenSearch Dashboards authentication. + required: false + type: str + role_arn: + description: + - The role ARN that provides OpenSearch permissions for accessing Cognito resources. + required: false + type: str + domain_endpoint_options: + description: + - Options to specify configuration that will be applied to the domain endpoint. + type: dict + suboptions: + enforce_https: + description: + - Whether only HTTPS endpoint should be enabled for the domain. + type: bool + tls_security_policy: + description: + - Specify the TLS security policy to apply to the HTTPS endpoint of the domain. + type: str + custom_endpoint_enabled: + description: + - Whether to enable a custom endpoint for the domain. + type: bool + custom_endpoint: + description: + - The fully qualified domain for your custom endpoint. + type: str + custom_endpoint_certificate_arn: + description: + - The ACM certificate ARN for your custom endpoint. + type: str + advanced_security_options: + description: + - Specifies advanced security options. + type: dict + suboptions: + enabled: + description: + - True if advanced security is enabled. + - You must enable node-to-node encryption to use advanced security options. + type: bool + internal_user_database_enabled: + description: + - True if the internal user database is enabled. + type: bool + master_user_options: + description: + - Credentials for the master user, username and password, ARN, or both. + type: dict + suboptions: + master_user_arn: + description: + - ARN for the master user (if IAM is enabled). + type: str + master_user_name: + description: + - The username of the master user, which is stored in the Amazon OpenSearch Service domain internal database. + type: str + master_user_password: + description: + - The password of the master user, which is stored in the Amazon OpenSearch Service domain internal database. + type: str + saml_options: + description: + - The SAML application configuration for the domain. + type: dict + suboptions: + enabled: + description: + - True if SAML is enabled. + - To use SAML authentication, you must enable fine-grained access control. + - You can only enable SAML authentication for OpenSearch Dashboards on existing domains, + not during the creation of new ones. + - Domains only support one Dashboards authentication method at a time. + If you have Amazon Cognito authentication for OpenSearch Dashboards enabled, + you must disable it before you can enable SAML. + type: bool + idp: + description: + - The SAML Identity Provider's information. + type: dict + suboptions: + metadata_content: + description: + - The metadata of the SAML application in XML format. + type: str + entity_id: + description: + - The unique entity ID of the application in SAML identity provider. + type: str + master_user_name: + description: + - The SAML master username, which is stored in the Amazon OpenSearch Service domain internal database. + type: str + master_backend_role: + description: + - The backend role that the SAML master user is mapped to. + type: str + subject_key: + description: + - Element of the SAML assertion to use for username. Default is NameID. + type: str + roles_key: + description: + - Element of the SAML assertion to use for backend roles. Default is roles. + type: str + session_timeout_minutes: + description: + - The duration, in minutes, after which a user session becomes inactive. Acceptable values are between 1 and 1440, and the default value is 60. + type: int + auto_tune_options: + description: + - Specifies Auto-Tune options. + type: dict + suboptions: + desired_state: + description: + - The Auto-Tune desired state. Valid values are ENABLED and DISABLED. + type: str + choices: ['ENABLED', 'DISABLED'] + maintenance_schedules: + description: + - A list of maintenance schedules. + type: list + elements: dict + suboptions: + start_at: + description: + - The timestamp at which the Auto-Tune maintenance schedule starts. + type: str + duration: + description: + - Specifies maintenance schedule duration, duration value and duration unit. + type: dict + suboptions: + value: + description: + - Integer to specify the value of a maintenance schedule duration. + type: int + unit: + description: + - The unit of a maintenance schedule duration. Valid value is HOURS. + choices: ['HOURS'] + type: str + cron_expression_for_recurrence: + description: + - A cron expression for a recurring maintenance schedule. + type: str + wait: + description: + - Whether or not to wait for completion of OpenSearch creation, modification or deletion. + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds. + default: 300 + type: int + tags: + description: + - tags dict to apply to an OpenSearch cluster. + type: dict + purge_tags: + description: + - whether to remove tags not present in the C(tags) parameter. + default: True + type: bool +requirements: +- botocore >= 1.21.38 +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +""" + +EXAMPLES = """ + +- name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters + community.aws.opensearch: + domain_name: "dev-cluster" + engine_version: Elasticsearch_1.1 + cluster_config: + instance_type: "t2.small.search" + instance_count: 2 + zone_awareness: false + dedicated_master: false + ebs_options: + ebs_enabled: true + volume_type: "gp2" + volume_size: 10 + access_policies: "{{ lookup('file', 'policy.json') | from_json }}" + +- name: Create OpenSearch domain with dedicated masters + community.aws.opensearch: + domain_name: "my-domain" + engine_version: OpenSearch_1.1 + cluster_config: + instance_type: "t2.small.search" + instance_count: 12 + dedicated_master: true + zone_awareness: true + availability_zone_count: 2 + dedicated_master_instance_type: "t2.small.search" + dedicated_master_instance_count: 3 + warm_enabled: true + warm_type: "ultrawarm1.medium.search" + warm_count: 1 + cold_storage_options: + enabled: false + ebs_options: + ebs_enabled: true + volume_type: "io1" + volume_size: 10 + iops: 1000 + vpc_options: + subnets: + - "subnet-e537d64a" + - "subnet-e537d64b" + security_groups: + - "sg-dd2f13cb" + - "sg-dd2f13cc" + snapshot_options: + automated_snapshot_start_hour: 13 + access_policies: "{{ lookup('file', 'policy.json') | from_json }}" + encryption_at_rest_options: + enabled: false + node_to_node_encryption_options: + enabled: false + auto_tune_options: + enabled: true + maintenance_schedules: + - start_at: "2025-01-12" + duration: + value: 1 + unit: "HOURS" + cron_expression_for_recurrence: "cron(0 12 * * ? *)" + - start_at: "2032-01-12" + duration: + value: 2 + unit: "HOURS" + cron_expression_for_recurrence: "cron(0 12 * * ? *)" + tags: + Environment: Development + Application: Search + wait: true + +- name: Increase size of EBS volumes for existing cluster + community.aws.opensearch: + domain_name: "my-domain" + ebs_options: + volume_size: 5 + wait: true + +- name: Increase instance count for existing cluster + community.aws.opensearch: + domain_name: "my-domain" + cluster_config: + instance_count: 40 + wait: true + +""" + +from copy import deepcopy +import datetime +import json + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.six import string_types + +# import module snippets +from ansible_collections.amazon.aws.plugins.module_utils.core import ( + AnsibleAWSModule, + is_boto3_error_code, +) +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( + AWSRetry, + boto3_tag_list_to_ansible_dict, + compare_policies, +) +from ansible_collections.community.aws.plugins.module_utils.opensearch import ( + compare_domain_versions, + ensure_tags, + get_domain_status, + get_domain_config, + get_target_increment_version, + normalize_opensearch, + parse_version, + wait_for_domain_status, +) + + +def ensure_domain_absent(client, module): + domain_name = module.params.get("domain_name") + changed = False + + domain = get_domain_status(client, module, domain_name) + if module.check_mode: + module.exit_json( + changed=True, msg="Would have deleted domain if not in check mode" + ) + try: + client.delete_domain(DomainName=domain_name) + changed = True + except is_boto3_error_code("ResourceNotFoundException"): + # The resource does not exist, or it has already been deleted + return dict(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="trying to delete domain") + + # If we're not waiting for a delete to complete then we're all done + # so just return + if not domain or not module.params.get("wait"): + return dict(changed=changed) + try: + wait_for_domain_status(client, module, domain_name, "domain_deleted") + return dict(changed=changed) + except is_boto3_error_code("ResourceNotFoundException"): + return dict(changed=changed) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "awaiting domain deletion") + + +def upgrade_domain(client, module, source_version, target_engine_version): + domain_name = module.params.get("domain_name") + # Determine if it's possible to upgrade directly from source version + # to target version, or if it's necessary to upgrade through intermediate major versions. + next_version = target_engine_version + # When perform_check_only is true, indicates that an upgrade eligibility check needs + # to be performed. Does not actually perform the upgrade. + perform_check_only = False + if module.check_mode: + perform_check_only = True + current_version = source_version + while current_version != target_engine_version: + v = get_target_increment_version(client, module, domain_name, target_engine_version) + if v is None: + # There is no compatible version, according to the get_compatible_versions() API. + # The upgrade should fail, but try anyway. + next_version = target_engine_version + if next_version != target_engine_version: + # It's not possible to upgrade directly to the target version. + # Check the module parameters to determine if this is allowed or not. + if not module.params.get("allow_intermediate_upgrades"): + module.fail_json(msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( + source_version, target_engine_version, next_version)) + + parameters = { + "DomainName": domain_name, + "TargetVersion": next_version, + "PerformCheckOnly": perform_check_only, + } + + if not module.check_mode: + # If background tasks are in progress, wait until they complete. + # This can take several hours depending on the cluster size and the type of background tasks + # (maybe an upgrade is already in progress). + # It's not possible to upgrade a domain that has background tasks are in progress, + # the call to client.upgrade_domain would fail. + wait_for_domain_status(client, module, domain_name, "domain_available") + + try: + client.upgrade_domain(**parameters) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + # In check mode (=> PerformCheckOnly==True), a ValidationException may be + # raised if it's not possible to upgrade to the target version. + module.fail_json_aws( + e, + msg="Couldn't upgrade domain {0} from {1} to {2}".format( + domain_name, current_version, next_version + ), + ) + + if module.check_mode: + module.exit_json( + changed=True, + msg="Would have upgraded domain from {0} to {1} if not in check mode".format( + current_version, next_version + ), + ) + current_version = next_version + + if module.params.get("wait"): + wait_for_domain_status(client, module, domain_name, "domain_available") + + +def set_cluster_config( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + + cluster_config = desired_domain_config["ClusterConfig"] + cluster_opts = module.params.get("cluster_config") + if cluster_opts is not None: + if cluster_opts.get("instance_type") is not None: + cluster_config["InstanceType"] = cluster_opts.get("instance_type") + if cluster_opts.get("instance_count") is not None: + cluster_config["InstanceCount"] = cluster_opts.get("instance_count") + if cluster_opts.get("zone_awareness") is not None: + cluster_config["ZoneAwarenessEnabled"] = cluster_opts.get("zone_awareness") + if cluster_config["ZoneAwarenessEnabled"]: + if cluster_opts.get("availability_zone_count") is not None: + cluster_config["ZoneAwarenessConfig"] = { + "AvailabilityZoneCount": cluster_opts.get( + "availability_zone_count" + ), + } + + if cluster_opts.get("dedicated_master") is not None: + cluster_config["DedicatedMasterEnabled"] = cluster_opts.get( + "dedicated_master" + ) + if cluster_config["DedicatedMasterEnabled"]: + if cluster_opts.get("dedicated_master_instance_type") is not None: + cluster_config["DedicatedMasterType"] = cluster_opts.get( + "dedicated_master_instance_type" + ) + if cluster_opts.get("dedicated_master_instance_count") is not None: + cluster_config["DedicatedMasterCount"] = cluster_opts.get( + "dedicated_master_instance_count" + ) + + if cluster_opts.get("warm_enabled") is not None: + cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled") + if cluster_config["WarmEnabled"]: + if cluster_opts.get("warm_type") is not None: + cluster_config["WarmType"] = cluster_opts.get("warm_type") + if cluster_opts.get("warm_count") is not None: + cluster_config["WarmCount"] = cluster_opts.get("warm_count") + + cold_storage_opts = None + if cluster_opts is not None: + cold_storage_opts = cluster_opts.get("cold_storage_options") + if compare_domain_versions(desired_domain_config["EngineVersion"], "Elasticsearch_7.9") < 0: + # If the engine version is ElasticSearch < 7.9, cold storage is not supported. + # When querying a domain < 7.9, the AWS API indicates cold storage is disabled (Enabled: False), + # which makes sense. However, trying to do HTTP POST with Enable: False causes an API error. + # The 'ColdStorageOptions' attribute should not be present in HTTP POST. + if cold_storage_opts is not None and cold_storage_opts.get("enabled"): + module.fail_json(msg="Cold Storage is not supported") + cluster_config.pop("ColdStorageOptions", None) + if ( + current_domain_config is not None + and "ClusterConfig" in current_domain_config + ): + # Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff + # will indicate a change must be done. + current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None) + else: + # Elasticsearch 7.9 and above support ColdStorageOptions. + if ( + cold_storage_opts is not None + and cold_storage_opts.get("enabled") is not None + ): + cluster_config["ColdStorageOptions"] = { + "Enabled": cold_storage_opts.get("enabled"), + } + + if ( + current_domain_config is not None + and current_domain_config["ClusterConfig"] != cluster_config + ): + change_set.append( + "ClusterConfig changed from {0} to {1}".format( + current_domain_config["ClusterConfig"], cluster_config + ) + ) + changed = True + return changed + + +def set_ebs_options(module, current_domain_config, desired_domain_config, change_set): + changed = False + ebs_config = desired_domain_config["EBSOptions"] + ebs_opts = module.params.get("ebs_options") + if ebs_opts is None: + return changed + if ebs_opts.get("ebs_enabled") is not None: + ebs_config["EBSEnabled"] = ebs_opts.get("ebs_enabled") + + if not ebs_config["EBSEnabled"]: + desired_domain_config["EBSOptions"] = { + "EBSEnabled": False, + } + else: + if ebs_opts.get("volume_type") is not None: + ebs_config["VolumeType"] = ebs_opts.get("volume_type") + if ebs_opts.get("volume_size") is not None: + ebs_config["VolumeSize"] = ebs_opts.get("volume_size") + if ebs_opts.get("iops") is not None: + ebs_config["Iops"] = ebs_opts.get("iops") + + if ( + current_domain_config is not None + and current_domain_config["EBSOptions"] != ebs_config + ): + change_set.append( + "EBSOptions changed from {0} to {1}".format( + current_domain_config["EBSOptions"], ebs_config + ) + ) + changed = True + return changed + + +def set_encryption_at_rest_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"] + encryption_at_rest_opts = module.params.get("encryption_at_rest_options") + if encryption_at_rest_opts is None: + return False + if encryption_at_rest_opts.get("enabled") is not None: + encryption_at_rest_config["Enabled"] = encryption_at_rest_opts.get("enabled") + if not encryption_at_rest_config["Enabled"]: + desired_domain_config["EncryptionAtRestOptions"] = { + "Enabled": False, + } + else: + if encryption_at_rest_opts.get("kms_key_id") is not None: + encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get( + "kms_key_id" + ) + + if ( + current_domain_config is not None + and current_domain_config["EncryptionAtRestOptions"] + != encryption_at_rest_config + ): + change_set.append( + "EncryptionAtRestOptions changed from {0} to {1}".format( + current_domain_config["EncryptionAtRestOptions"], + encryption_at_rest_config, + ) + ) + changed = True + return changed + + +def set_node_to_node_encryption_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + node_to_node_encryption_config = desired_domain_config[ + "NodeToNodeEncryptionOptions" + ] + node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options") + if node_to_node_encryption_opts is None: + return changed + if node_to_node_encryption_opts.get("enabled") is not None: + node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get( + "enabled" + ) + + if ( + current_domain_config is not None + and current_domain_config["NodeToNodeEncryptionOptions"] + != node_to_node_encryption_config + ): + change_set.append( + "NodeToNodeEncryptionOptions changed from {0} to {1}".format( + current_domain_config["NodeToNodeEncryptionOptions"], + node_to_node_encryption_config, + ) + ) + changed = True + return changed + + +def set_vpc_options(module, current_domain_config, desired_domain_config, change_set): + changed = False + vpc_config = None + if "VPCOptions" in desired_domain_config: + vpc_config = desired_domain_config["VPCOptions"] + vpc_opts = module.params.get("vpc_options") + if vpc_opts is None: + return changed + vpc_subnets = vpc_opts.get("subnets") + if vpc_subnets is not None: + if vpc_config is None: + vpc_config = {} + desired_domain_config["VPCOptions"] = vpc_config + # OpenSearch cluster is attached to VPC + if isinstance(vpc_subnets, string_types): + vpc_subnets = [x.strip() for x in vpc_subnets.split(",")] + vpc_config["SubnetIds"] = vpc_subnets + + vpc_security_groups = vpc_opts.get("security_groups") + if vpc_security_groups is not None: + if vpc_config is None: + vpc_config = {} + desired_domain_config["VPCOptions"] = vpc_config + if isinstance(vpc_security_groups, string_types): + vpc_security_groups = [x.strip() for x in vpc_security_groups.split(",")] + vpc_config["SecurityGroupIds"] = vpc_security_groups + + if current_domain_config is not None: + # Modify existing cluster. + current_cluster_is_vpc = False + desired_cluster_is_vpc = False + if ( + "VPCOptions" in current_domain_config + and "SubnetIds" in current_domain_config["VPCOptions"] + and len(current_domain_config["VPCOptions"]["SubnetIds"]) > 0 + ): + current_cluster_is_vpc = True + if ( + "VPCOptions" in desired_domain_config + and "SubnetIds" in desired_domain_config["VPCOptions"] + and len(desired_domain_config["VPCOptions"]["SubnetIds"]) > 0 + ): + desired_cluster_is_vpc = True + if current_cluster_is_vpc != desired_cluster_is_vpc: + # AWS does not allow changing the type. Don't fail here so we return the AWS API error. + change_set.append("VPCOptions changed between Internet and VPC") + changed = True + elif desired_cluster_is_vpc is False: + # There are no VPCOptions to configure. + pass + else: + # Note the subnets may be the same but be listed in a different order. + if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set( + vpc_config["SubnetIds"] + ): + change_set.append( + "SubnetIds changed from {0} to {1}".format( + current_domain_config["VPCOptions"]["SubnetIds"], + vpc_config["SubnetIds"], + ) + ) + changed = True + if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set( + vpc_config["SecurityGroupIds"] + ): + change_set.append( + "SecurityGroup changed from {0} to {1}".format( + current_domain_config["VPCOptions"]["SecurityGroupIds"], + vpc_config["SecurityGroupIds"], + ) + ) + changed = True + return changed + + +def set_snapshot_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + snapshot_config = desired_domain_config["SnapshotOptions"] + snapshot_opts = module.params.get("snapshot_options") + if snapshot_opts is None: + return changed + if snapshot_opts.get("automated_snapshot_start_hour") is not None: + snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get( + "automated_snapshot_start_hour" + ) + if ( + current_domain_config is not None + and current_domain_config["SnapshotOptions"] != snapshot_config + ): + change_set.append("SnapshotOptions changed") + changed = True + return changed + + +def set_cognito_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + cognito_config = desired_domain_config["CognitoOptions"] + cognito_opts = module.params.get("cognito_options") + if cognito_opts is None: + return changed + if cognito_opts.get("enabled") is not None: + cognito_config["Enabled"] = cognito_opts.get("enabled") + if not cognito_config["Enabled"]: + desired_domain_config["CognitoOptions"] = { + "Enabled": False, + } + else: + if cognito_opts.get("cognito_user_pool_id") is not None: + cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id") + if cognito_opts.get("cognito_identity_pool_id") is not None: + cognito_config["IdentityPoolId"] = cognito_opts.get( + "cognito_identity_pool_id" + ) + if cognito_opts.get("cognito_role_arn") is not None: + cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") + + if ( + current_domain_config is not None + and current_domain_config["CognitoOptions"] != cognito_config + ): + change_set.append( + "CognitoOptions changed from {0} to {1}".format( + current_domain_config["CognitoOptions"], cognito_config + ) + ) + changed = True + return changed + + +def set_advanced_security_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + advanced_security_config = desired_domain_config["AdvancedSecurityOptions"] + advanced_security_opts = module.params.get("advanced_security_options") + if advanced_security_opts is None: + return changed + if advanced_security_opts.get("enabled") is not None: + advanced_security_config["Enabled"] = advanced_security_opts.get("enabled") + if not advanced_security_config["Enabled"]: + desired_domain_config["AdvancedSecurityOptions"] = { + "Enabled": False, + } + else: + if advanced_security_opts.get("internal_user_database_enabled") is not None: + advanced_security_config[ + "InternalUserDatabaseEnabled" + ] = advanced_security_opts.get("internal_user_database_enabled") + master_user_opts = advanced_security_opts.get("master_user_options") + if master_user_opts is not None: + if master_user_opts.get("master_user_arn") is not None: + advanced_security_config["MasterUserOptions"][ + "MasterUserARN" + ] = master_user_opts.get("master_user_arn") + if master_user_opts.get("master_user_name") is not None: + advanced_security_config["MasterUserOptions"][ + "MasterUserName" + ] = master_user_opts.get("master_user_name") + if master_user_opts.get("master_user_password") is not None: + advanced_security_config["MasterUserOptions"][ + "MasterUserPassword" + ] = master_user_opts.get("master_user_password") + saml_opts = advanced_security_opts.get("saml_options") + if saml_opts is not None: + if saml_opts.get("enabled") is not None: + advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get( + "enabled" + ) + idp_opts = saml_opts.get("idp") + if idp_opts is not None: + if idp_opts.get("metadata_content") is not None: + advanced_security_config["SamlOptions"]["Idp"][ + "MetadataContent" + ] = idp_opts.get("metadata_content") + if idp_opts.get("entity_id") is not None: + advanced_security_config["SamlOptions"]["Idp"][ + "EntityId" + ] = idp_opts.get("entity_id") + if saml_opts.get("master_user_name") is not None: + advanced_security_config["SamlOptions"][ + "MasterUserName" + ] = saml_opts.get("master_user_name") + if saml_opts.get("master_backend_role") is not None: + advanced_security_config["SamlOptions"][ + "MasterBackendRole" + ] = saml_opts.get("master_backend_role") + if saml_opts.get("subject_key") is not None: + advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get( + "subject_key" + ) + if saml_opts.get("roles_key") is not None: + advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get( + "roles_key" + ) + if saml_opts.get("session_timeout_minutes") is not None: + advanced_security_config["SamlOptions"][ + "SessionTimeoutMinutes" + ] = saml_opts.get("session_timeout_minutes") + + if ( + current_domain_config is not None + and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config + ): + change_set.append( + "AdvancedSecurityOptions changed from {0} to {1}".format( + current_domain_config["AdvancedSecurityOptions"], + advanced_security_config, + ) + ) + changed = True + return changed + + +def set_domain_endpoint_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + domain_endpoint_config = desired_domain_config["DomainEndpointOptions"] + domain_endpoint_opts = module.params.get("domain_endpoint_options") + if domain_endpoint_opts is None: + return changed + if domain_endpoint_opts.get("enforce_https") is not None: + domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get( + "enforce_https" + ) + if domain_endpoint_opts.get("tls_security_policy") is not None: + domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get( + "tls_security_policy" + ) + if domain_endpoint_opts.get("custom_endpoint_enabled") is not None: + domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get( + "custom_endpoint_enabled" + ) + if domain_endpoint_config["CustomEndpointEnabled"]: + if domain_endpoint_opts.get("custom_endpoint") is not None: + domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get( + "custom_endpoint" + ) + if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None: + domain_endpoint_config[ + "CustomEndpointCertificateArn" + ] = domain_endpoint_opts.get("custom_endpoint_certificate_arn") + + if ( + current_domain_config is not None + and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config + ): + change_set.append( + "DomainEndpointOptions changed from {0} to {1}".format( + current_domain_config["DomainEndpointOptions"], domain_endpoint_config + ) + ) + changed = True + return changed + + +def set_auto_tune_options( + module, current_domain_config, desired_domain_config, change_set +): + changed = False + auto_tune_config = desired_domain_config["AutoTuneOptions"] + auto_tune_opts = module.params.get("auto_tune_options") + if auto_tune_opts is None: + return changed + schedules = auto_tune_opts.get("maintenance_schedules") + if auto_tune_opts.get("desired_state") is not None: + auto_tune_config["DesiredState"] = auto_tune_opts.get("desired_state") + if auto_tune_config["DesiredState"] != "ENABLED": + desired_domain_config["AutoTuneOptions"] = { + "DesiredState": "DISABLED", + } + elif schedules is not None: + auto_tune_config["MaintenanceSchedules"] = [] + for s in schedules: + schedule_entry = {} + start_at = s.get("start_at") + if start_at is not None: + if isinstance(start_at, datetime.datetime): + # The property was parsed from yaml to datetime, but the AWS API wants a string + start_at = start_at.strftime("%Y-%m-%d") + schedule_entry["StartAt"] = start_at + duration_opt = s.get("duration") + if duration_opt is not None: + schedule_entry["Duration"] = {} + if duration_opt.get("value") is not None: + schedule_entry["Duration"]["Value"] = duration_opt.get("value") + if duration_opt.get("unit") is not None: + schedule_entry["Duration"]["Unit"] = duration_opt.get("unit") + if s.get("cron_expression_for_recurrence") is not None: + schedule_entry["CronExpressionForRecurrence"] = s.get( + "cron_expression_for_recurrence" + ) + auto_tune_config["MaintenanceSchedules"].append(schedule_entry) + if current_domain_config is not None: + if ( + current_domain_config["AutoTuneOptions"]["DesiredState"] + != auto_tune_config["DesiredState"] + ): + change_set.append( + "AutoTuneOptions.DesiredState changed from {0} to {1}".format( + current_domain_config["AutoTuneOptions"]["DesiredState"], + auto_tune_config["DesiredState"], + ) + ) + changed = True + if ( + auto_tune_config["MaintenanceSchedules"] + != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"] + ): + change_set.append( + "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format( + current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"], + auto_tune_config["MaintenanceSchedules"], + ) + ) + changed = True + return changed + + +def set_access_policy(module, current_domain_config, desired_domain_config, change_set): + access_policy_config = None + changed = False + access_policy_opt = module.params.get("access_policies") + if access_policy_opt is None: + return changed + try: + access_policy_config = json.dumps(access_policy_opt) + except Exception as e: + module.fail_json( + msg="Failed to convert the policy into valid JSON: %s" % str(e) + ) + if current_domain_config is not None: + # Updating existing domain + current_access_policy = json.loads(current_domain_config["AccessPolicies"]) + if not compare_policies(current_access_policy, access_policy_opt): + change_set.append( + "AccessPolicy changed from {0} to {1}".format( + current_access_policy, access_policy_opt + ) + ) + changed = True + desired_domain_config["AccessPolicies"] = access_policy_config + else: + # Creating new domain + desired_domain_config["AccessPolicies"] = access_policy_config + return changed + + +def ensure_domain_present(client, module): + domain_name = module.params.get("domain_name") + + # Create default if OpenSearch does not exist. If domain already exists, + # the data is populated by retrieving the current configuration from the API. + desired_domain_config = { + "DomainName": module.params.get("domain_name"), + "EngineVersion": "OpenSearch_1.1", + "ClusterConfig": { + "InstanceType": "t2.small.search", + "InstanceCount": 2, + "ZoneAwarenessEnabled": False, + "DedicatedMasterEnabled": False, + "WarmEnabled": False, + }, + # By default create ES attached to the Internet. + # If the "VPCOptions" property is specified, even if empty, the API server interprets + # as incomplete VPC configuration. + # "VPCOptions": {}, + "EBSOptions": { + "EBSEnabled": False, + }, + "EncryptionAtRestOptions": { + "Enabled": False, + }, + "NodeToNodeEncryptionOptions": { + "Enabled": False, + }, + "SnapshotOptions": { + "AutomatedSnapshotStartHour": 0, + }, + "CognitoOptions": { + "Enabled": False, + }, + "AdvancedSecurityOptions": { + "Enabled": False, + }, + "DomainEndpointOptions": { + "CustomEndpointEnabled": False, + }, + "AutoTuneOptions": { + "DesiredState": "DISABLED", + }, + } + # Determine if OpenSearch domain already exists. + # current_domain_config may be None if the domain does not exist. + (current_domain_config, domain_arn) = get_domain_config(client, module, domain_name) + if current_domain_config is not None: + desired_domain_config = deepcopy(current_domain_config) + + if module.params.get("engine_version") is not None: + # Validate the engine_version + v = parse_version(module.params.get("engine_version")) + if v is None: + module.fail_json( + "Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y" + ) + desired_domain_config["EngineVersion"] = module.params.get("engine_version") + + changed = False + change_set = [] # For check mode purpose + + changed |= set_cluster_config( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_ebs_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_encryption_at_rest_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_node_to_node_encryption_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_vpc_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_snapshot_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_cognito_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_advanced_security_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_domain_endpoint_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_auto_tune_options( + module, current_domain_config, desired_domain_config, change_set + ) + changed |= set_access_policy( + module, current_domain_config, desired_domain_config, change_set + ) + + if current_domain_config is not None: + if ( + desired_domain_config["EngineVersion"] + != current_domain_config["EngineVersion"] + ): + changed = True + change_set.append("EngineVersion changed") + upgrade_domain( + client, + module, + current_domain_config["EngineVersion"], + desired_domain_config["EngineVersion"], + ) + + if changed: + if module.check_mode: + module.exit_json( + changed=True, + msg=f"Would have updated domain if not in check mode: {change_set}", + ) + # Remove the "EngineVersion" attribute, the AWS API does not accept this attribute. + desired_domain_config.pop("EngineVersion", None) + try: + client.update_domain_config(**desired_domain_config) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws( + e, msg="Couldn't update domain {0}".format(domain_name) + ) + + else: + # Create new OpenSearch cluster + if module.params.get("access_policies") is None: + module.fail_json( + "state is present but the following is missing: access_policies" + ) + + changed = True + if module.check_mode: + module.exit_json( + changed=True, msg="Would have created a domain if not in check mode" + ) + try: + response = client.create_domain(**desired_domain_config) + domain = response["DomainStatus"] + domain_arn = domain["ARN"] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: + module.fail_json_aws( + e, msg="Couldn't update domain {0}".format(domain_name) + ) + + try: + existing_tags = boto3_tag_list_to_ansible_dict( + client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name) + + desired_tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + changed |= ensure_tags( + client, module, domain_arn, existing_tags, desired_tags, purge_tags + ) + + if module.params.get("wait") and not module.check_mode: + wait_for_domain_status(client, module, domain_name, "domain_available") + + domain = get_domain_status(client, module, domain_name) + + return dict(changed=changed, **normalize_opensearch(client, module, domain)) + + +def main(): + + module = AnsibleAWSModule( + argument_spec=dict( + state=dict(choices=["present", "absent"], default="present"), + domain_name=dict(required=True), + engine_version=dict(), + allow_intermediate_upgrades=dict(required=False, type="bool", default=True), + access_policies=dict(required=False, type="dict"), + cluster_config=dict( + type="dict", + default=None, + options=dict( + instance_type=dict(), + instance_count=dict(required=False, type="int"), + zone_awareness=dict(required=False, type="bool"), + availability_zone_count=dict(required=False, type="int"), + dedicated_master=dict(required=False, type="bool"), + dedicated_master_instance_type=dict(), + dedicated_master_instance_count=dict(type="int"), + warm_enabled=dict(required=False, type="bool"), + warm_type=dict(required=False), + warm_count=dict(required=False, type="int"), + cold_storage_options=dict( + type="dict", + default=None, + options=dict( + enabled=dict(required=False, type="bool"), + ), + ), + ), + ), + snapshot_options=dict( + type="dict", + default=None, + options=dict( + automated_snapshot_start_hour=dict(required=False, type="int"), + ), + ), + ebs_options=dict( + type="dict", + default=None, + options=dict( + ebs_enabled=dict(required=False, type="bool"), + volume_type=dict(required=False), + volume_size=dict(required=False, type="int"), + iops=dict(required=False, type="int"), + ), + ), + vpc_options=dict( + type="dict", + default=None, + options=dict( + subnets=dict(type="list", elements="str", required=False), + security_groups=dict(type="list", elements="str", required=False), + ), + ), + cognito_options=dict( + type="dict", + default=None, + options=dict( + enabled=dict(required=False, type="bool"), + user_pool_id=dict(required=False), + identity_pool_id=dict(required=False), + role_arn=dict(required=False, no_log=False), + ), + ), + encryption_at_rest_options=dict( + type="dict", + default=None, + options=dict( + enabled=dict(type="bool"), + kms_key_id=dict(required=False), + ), + ), + node_to_node_encryption_options=dict( + type="dict", + default=None, + options=dict( + enabled=dict(type="bool"), + ), + ), + domain_endpoint_options=dict( + type="dict", + default=None, + options=dict( + enforce_https=dict(type="bool"), + tls_security_policy=dict(), + custom_endpoint_enabled=dict(type="bool"), + custom_endpoint=dict(), + custom_endpoint_certificate_arn=dict(), + ), + ), + advanced_security_options=dict( + type="dict", + default=None, + options=dict( + enabled=dict(type="bool"), + internal_user_database_enabled=dict(type="bool"), + master_user_options=dict( + type="dict", + default=None, + options=dict( + master_user_arn=dict(), + master_user_name=dict(), + master_user_password=dict(no_log=True), + ), + ), + saml_options=dict( + type="dict", + default=None, + options=dict( + enabled=dict(type="bool"), + idp=dict( + type="dict", + default=None, + options=dict( + metadata_content=dict(), + entity_id=dict(), + ), + ), + master_user_name=dict(), + master_backend_role=dict(), + subject_key=dict(no_log=False), + roles_key=dict(no_log=False), + session_timeout_minutes=dict(type="int"), + ), + ), + ), + ), + auto_tune_options=dict( + type="dict", + default=None, + options=dict( + desired_state=dict(choices=["ENABLED", "DISABLED"]), + maintenance_schedules=dict( + type="list", + elements="dict", + default=None, + options=dict( + start_at=dict(), + duration=dict( + type="dict", + default=None, + options=dict( + value=dict(type="int"), + unit=dict(choices=["HOURS"]), + ), + ), + cron_expression_for_recurrence=dict(), + ), + ), + ), + ), + tags=dict(type="dict"), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + ), + supports_check_mode=True, + ) + + module.require_botocore_at_least("1.21.38") + + try: + client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS opensearch service") + + if module.params["state"] == "absent": + ret_dict = ensure_domain_absent(client, module) + else: + ret_dict = ensure_domain_present(client, module) + + module.exit_json(**ret_dict) + + +if __name__ == "__main__": + main() diff --git a/opensearch_info.py b/opensearch_info.py new file mode 100644 index 00000000000..6a884fdb076 --- /dev/null +++ b/opensearch_info.py @@ -0,0 +1,530 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: opensearch_info +short_description: obtain information about one or more OpenSearch or ElasticSearch domain. +description: + - obtain information about one Amazon OpenSearch Service domain. +version_added: 3.1.0 +author: "Sebastien Rosset (@sebastien-rosset)" +options: + domain_name: + description: + - The name of the Amazon OpenSearch/ElasticSearch Service domain. + required: false + type: str + tags: + description: + - > + A dict of tags that are used to filter OpenSearch domains that match + all tag key, value pairs. + required: false + type: dict +requirements: +- botocore >= 1.21.38 +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +""" + +EXAMPLES = ''' +- name: Get information about an OpenSearch domain instance + community.aws.opensearch_info: + domain-name: my-search-cluster + register: new_cluster_info + +- name: Get all OpenSearch instances + community.aws.opensearch_info: + +- name: Get all OpenSearch instances that have the specified Key, Value tags + community.aws.opensearch_info: + tags: + Applications: search + Environment: Development +''' + +RETURN = ''' +instances: + description: List of OpenSearch domain instances + returned: always + type: complex + contains: + domain_status: + description: The current status of the OpenSearch domain. + returned: always + type: complex + contains: + arn: + description: The ARN of the OpenSearch domain. + returned: always + type: str + domain_id: + description: The unique identifier for the OpenSearch domain. + returned: always + type: str + domain_name: + description: The name of the OpenSearch domain. + returned: always + type: str + created: + description: + - > + The domain creation status. True if the creation of a domain is complete. + False if domain creation is still in progress. + returned: always + type: bool + deleted: + description: + - > + The domain deletion status. + True if a delete request has been received for the domain but resource cleanup is still in progress. + False if the domain has not been deleted. + Once domain deletion is complete, the status of the domain is no longer returned. + returned: always + type: bool + endpoint: + description: The domain endpoint that you use to submit index and search requests. + returned: always + type: str + endpoints: + description: + - > + Map containing the domain endpoints used to submit index and search requests. + - > + When you create a domain attached to a VPC domain, this propery contains + the DNS endpoint to which service requests are submitted. + - > + If you query the opensearch_info immediately after creating the OpenSearch cluster, + the VPC endpoint may not be returned. It may take several minutes until the + endpoints is available. + type: dict + processing: + description: + - > + The status of the domain configuration. + True if Amazon OpenSearch Service is processing configuration changes. + False if the configuration is active. + returned: always + type: bool + upgrade_processing: + description: true if a domain upgrade operation is in progress. + returned: always + type: bool + engine_version: + description: The version of the OpenSearch domain. + returned: always + type: str + sample: OpenSearch_1.1 + cluster_config: + description: + - Parameters for the cluster configuration of an OpenSearch Service domain. + type: complex + contains: + instance_type: + description: + - Type of the instances to use for the domain. + type: str + instance_count: + description: + - Number of instances for the domain. + type: int + zone_awareness: + description: + - A boolean value to indicate whether zone awareness is enabled. + type: bool + availability_zone_count: + description: + - > + An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. + This should be equal to number of subnets if VPC endpoints is enabled. + type: int + dedicated_master_enabled: + description: + - A boolean value to indicate whether a dedicated master node is enabled. + type: bool + zone_awareness_enabled: + description: + - A boolean value to indicate whether zone awareness is enabled. + type: bool + zone_awareness_config: + description: + - The zone awareness configuration for a domain when zone awareness is enabled. + type: complex + contains: + availability_zone_count: + description: + - An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. + type: int + dedicated_master_type: + description: + - The instance type for a dedicated master node. + type: str + dedicated_master_count: + description: + - Total number of dedicated master nodes, active and on standby, for the domain. + type: int + warm_enabled: + description: + - True to enable UltraWarm storage. + type: bool + warm_type: + description: + - The instance type for the OpenSearch domain's warm nodes. + type: str + warm_count: + description: + - The number of UltraWarm nodes in the domain. + type: int + cold_storage_options: + description: + - Specifies the ColdStorageOptions config for a Domain. + type: complex + contains: + enabled: + description: + - True to enable cold storage. Supported on Elasticsearch 7.9 or above. + type: bool + ebs_options: + description: + - Parameters to configure EBS-based storage for an OpenSearch Service domain. + type: complex + contains: + ebs_enabled: + description: + - Specifies whether EBS-based storage is enabled. + type: bool + volume_type: + description: + - Specifies the volume type for EBS-based storage. "standard"|"gp2"|"io1" + type: str + volume_size: + description: + - Integer to specify the size of an EBS volume. + type: int + iops: + description: + - The IOPD for a Provisioned IOPS EBS volume (SSD). + type: int + vpc_options: + description: + - Options to specify the subnets and security groups for a VPC endpoint. + type: complex + contains: + vpc_id: + description: The VPC ID for the domain. + type: str + subnet_ids: + description: + - Specifies the subnet ids for VPC endpoint. + type: list + elements: str + security_group_ids: + description: + - Specifies the security group ids for VPC endpoint. + type: list + elements: str + availability_zones: + description: + - The Availability Zones for the domain.. + type: list + elements: str + snapshot_options: + description: + - Option to set time, in UTC format, of the daily automated snapshot. + type: complex + contains: + automated_snapshot_start_hour: + description: + - > + Integer value from 0 to 23 specifying when the service takes a daily automated snapshot + of the specified Elasticsearch domain. + type: int + access_policies: + description: + - IAM access policy as a JSON-formatted string. + type: complex + encryption_at_rest_options: + description: + - Parameters to enable encryption at rest. + type: complex + contains: + enabled: + description: + - Should data be encrypted while at rest. + type: bool + kms_key_id: + description: + - If encryption at rest enabled, this identifies the encryption key to use. + - The value should be a KMS key ARN. It can also be the KMS key id. + type: str + node_to_node_encryption_options: + description: + - Node-to-node encryption options. + type: complex + contains: + enabled: + description: + - True to enable node-to-node encryption. + type: bool + cognito_options: + description: + - Parameters to configure OpenSearch Service to use Amazon Cognito authentication for OpenSearch Dashboards. + type: complex + contains: + enabled: + description: + - The option to enable Cognito for OpenSearch Dashboards authentication. + type: bool + user_pool_id: + description: + - The Cognito user pool ID for OpenSearch Dashboards authentication. + type: str + identity_pool_id: + description: + - The Cognito identity pool ID for OpenSearch Dashboards authentication. + type: str + role_arn: + description: + - The role ARN that provides OpenSearch permissions for accessing Cognito resources. + type: str + domain_endpoint_options: + description: + - Options to specify configuration that will be applied to the domain endpoint. + type: complex + contains: + enforce_https: + description: + - Whether only HTTPS endpoint should be enabled for the domain. + type: bool + tls_security_policy: + description: + - Specify the TLS security policy to apply to the HTTPS endpoint of the domain. + type: str + custom_endpoint_enabled: + description: + - Whether to enable a custom endpoint for the domain. + type: bool + custom_endpoint: + description: + - The fully qualified domain for your custom endpoint. + type: str + custom_endpoint_certificate_arn: + description: + - The ACM certificate ARN for your custom endpoint. + type: str + advanced_security_options: + description: + - Specifies advanced security options. + type: complex + contains: + enabled: + description: + - True if advanced security is enabled. + - You must enable node-to-node encryption to use advanced security options. + type: bool + internal_user_database_enabled: + description: + - True if the internal user database is enabled. + type: bool + master_user_options: + description: + - Credentials for the master user, username and password, ARN, or both. + type: complex + contains: + master_user_arn: + description: + - ARN for the master user (if IAM is enabled). + type: str + master_user_name: + description: + - The username of the master user, which is stored in the Amazon OpenSearch Service domain internal database. + type: str + master_user_password: + description: + - The password of the master user, which is stored in the Amazon OpenSearch Service domain internal database. + type: str + saml_options: + description: + - The SAML application configuration for the domain. + type: complex + contains: + enabled: + description: + - True if SAML is enabled. + type: bool + idp: + description: + - The SAML Identity Provider's information. + type: complex + contains: + metadata_content: + description: + - The metadata of the SAML application in XML format. + type: str + entity_id: + description: + - The unique entity ID of the application in SAML identity provider. + type: str + master_user_name: + description: + - The SAML master username, which is stored in the Amazon OpenSearch Service domain internal database. + type: str + master_backend_role: + description: + - The backend role that the SAML master user is mapped to. + type: str + subject_key: + description: + - Element of the SAML assertion to use for username. Default is NameID. + type: str + roles_key: + description: + - Element of the SAML assertion to use for backend roles. Default is roles. + type: str + session_timeout_minutes: + description: + - > + The duration, in minutes, after which a user session becomes inactive. + Acceptable values are between 1 and 1440, and the default value is 60. + type: int + auto_tune_options: + description: + - Specifies Auto-Tune options. + type: complex + contains: + desired_state: + description: + - The Auto-Tune desired state. Valid values are ENABLED and DISABLED. + type: str + maintenance_schedules: + description: + - A list of maintenance schedules. + type: list + elements: dict + contains: + start_at: + description: + - The timestamp at which the Auto-Tune maintenance schedule starts. + type: str + duration: + description: + - Specifies maintenance schedule duration, duration value and duration unit. + type: complex + contains: + value: + description: + - Integer to specify the value of a maintenance schedule duration. + type: int + unit: + description: + - The unit of a maintenance schedule duration. Valid value is HOURS. + type: str + cron_expression_for_recurrence: + description: + - A cron expression for a recurring maintenance schedule. + type: str + domain_config: + description: The OpenSearch domain configuration + returned: always + type: complex + contains: + domain_name: + description: The name of the OpenSearch domain. + returned: always + type: str +''' + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( + AWSRetry, + boto3_tag_list_to_ansible_dict, + camel_dict_to_snake_dict, +) +from ansible_collections.community.aws.plugins.module_utils.opensearch import ( + get_domain_config, + get_domain_status, +) + + +def domain_info(client, module): + domain_name = module.params.get('domain_name') + filter_tags = module.params.get('tags') + + domain_list = [] + if domain_name: + domain_status = get_domain_status(client, module, domain_name) + if domain_status: + domain_list.append({'DomainStatus': domain_status}) + else: + domain_summary_list = client.list_domain_names()['DomainNames'] + for d in domain_summary_list: + domain_status = get_domain_status(client, module, d['DomainName']) + if domain_status: + domain_list.append({'DomainStatus': domain_status}) + + # Get the domain tags + for domain in domain_list: + current_domain_tags = None + domain_arn = domain['DomainStatus']['ARN'] + try: + current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] + domain['Tags'] = boto3_tag_list_to_ansible_dict(current_domain_tags) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # This could potentially happen if a domain is deleted between the time + # its domain status was queried and the tags were queried. + domain['Tags'] = {} + + # Filter by tags + if filter_tags: + for tag_key in filter_tags: + try: + domain_list = [c for c in domain_list if ('Tags' in c) and (tag_key in c['Tags']) and (c['Tags'][tag_key] == filter_tags[tag_key])] + except (TypeError, AttributeError) as e: + module.fail_json(msg="OpenSearch tag filtering error", exception=e) + + # Get the domain config + for idx, domain in enumerate(domain_list): + domain_name = domain['DomainStatus']['DomainName'] + (domain_config, arn) = get_domain_config(client, module, domain_name) + if domain_config: + domain['DomainConfig'] = domain_config + domain_list[idx] = camel_dict_to_snake_dict(domain, + ignore_list=['AdvancedOptions', 'Endpoints', 'Tags']) + + return dict(changed=False, domains=domain_list) + + +def main(): + module = AnsibleAWSModule( + argument_spec=dict( + domain_name=dict(required=False), + tags=dict(type='dict', required=False), + ), + supports_check_mode=True, + ) + module.require_botocore_at_least("1.21.38") + + try: + client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS opensearch service") + + module.exit_json(**domain_info(client, module)) + + +if __name__ == '__main__': + main() From 98038185dab9cbe675f3a867d12e9ae7b80826c3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 14 Jun 2022 10:17:51 +0200 Subject: [PATCH 490/683] opensearch - Minor fixups (#1235) opensearch - Minor fixups SUMMARY Minor fixups from #859 Module hasn't been release yet, no need for changelog ISSUE TYPE Docs Pull Request Feature Pull Request COMPONENT NAME opensearch opensearch_info ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- opensearch.py | 22 +++++++--------------- opensearch_info.py | 12 ++++++------ 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/opensearch.py b/opensearch.py index 422feb7d31a..0035352b2a5 100644 --- a/opensearch.py +++ b/opensearch.py @@ -10,10 +10,10 @@ DOCUMENTATION = """ --- module: opensearch -short_description: Creates OpenSearch or ElasticSearch domain. +short_description: Creates OpenSearch or ElasticSearch domain description: - Creates or modify a Amazon OpenSearch Service domain. -version_added: 3.1.0 +version_added: 4.0.0 author: "Sebastien Rosset (@sebastien-rosset)" options: state: @@ -387,20 +387,12 @@ - how long before wait gives up, in seconds. default: 300 type: int - tags: - description: - - tags dict to apply to an OpenSearch cluster. - type: dict - purge_tags: - description: - - whether to remove tags not present in the C(tags) parameter. - default: True - type: bool requirements: -- botocore >= 1.21.38 + - botocore >= 1.21.38 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags """ EXAMPLES = """ @@ -1480,7 +1472,7 @@ def main(): ), ), ), - tags=dict(type="dict"), + tags=dict(type="dict", aliases=["resource_tags"]), purge_tags=dict(type="bool", default=True), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=300), diff --git a/opensearch_info.py b/opensearch_info.py index 6a884fdb076..50b6ddc08d8 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -10,10 +10,10 @@ DOCUMENTATION = """ --- module: opensearch_info -short_description: obtain information about one or more OpenSearch or ElasticSearch domain. +short_description: obtain information about one or more OpenSearch or ElasticSearch domain description: - - obtain information about one Amazon OpenSearch Service domain. -version_added: 3.1.0 + - Obtain information about one Amazon OpenSearch Service domain. +version_added: 4.0.0 author: "Sebastien Rosset (@sebastien-rosset)" options: domain_name: @@ -29,10 +29,10 @@ required: false type: dict requirements: -- botocore >= 1.21.38 + - botocore >= 1.21.38 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 """ EXAMPLES = ''' From 9abafc12bfbd91726e3b3ba5fcd34cc091ac6780 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Tue, 14 Jun 2022 10:26:28 -0700 Subject: [PATCH 491/683] route53_info: Add snake_cased return key,values and a deprecation message (#1236) route53_info: Add snake_cased return key,values and a deprecation message Depends-On: ansible/ansible-zuul-jobs#1564 SUMMARY Add snake_case return values and a deprecation message for existing CamelCase return values. Route53_info currently returns CamelCase values, to have uniformity along all *_info modules in terms of return values, it should return snake_case values instead. Proposed change should make addition of snake_case return values and the deprecation message provides time for users to upgrade their playbooks to avoid breaking existing playbooks due to the proposed change. ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_info ADDITIONAL INFORMATION This PR is relation to the initiative for having updated developer guidelines for *_info modules specifically related to guidelines for deprecating return values. Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis --- route53_info.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/route53_info.py b/route53_info.py index 7622113c25e..5e40efa4aad 100644 --- a/route53_info.py +++ b/route53_info.py @@ -213,6 +213,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict # Split out paginator to allow for the backoff decorator to function @@ -270,10 +271,17 @@ def list_hosted_zones(): params['DelegationSetId'] = module.params.get('delegation_set_id') zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] + snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones] + + module.deprecate("The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \ + will be replaced by 'snake_case' return values with key 'hosted_zones'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='community.aws') return { "HostedZones": zones, "list": zones, + "hosted_zones": snaked_zones, } @@ -367,10 +375,17 @@ def list_health_checks(): ) health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] + snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks] + + module.deprecate("The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \ + will be replaced by 'snake_case' return values with key 'health_checks'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='community.aws') return { "HealthChecks": health_checks, "list": health_checks, + "health_checks": snaked_health_checks, } @@ -399,10 +414,17 @@ def record_sets_details(): ) record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] + snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets] + + module.deprecate("The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \ + will be replaced by 'snake_case' return values with key 'resource_record_sets'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='community.aws') return { "ResourceRecordSets": record_sets, "list": record_sets, + "resource_record_sets": snaked_record_sets, } From e16d048cbe4da3fd4665c3551fc95cd2f2b1df83 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 14 Jun 2022 21:16:16 +0200 Subject: [PATCH 492/683] rds_cluster_snapshot - new module to handle RDS cluster snapshotting (#788) rds_cluster_snapshot - new module to handle RDS cluster snapshotting SUMMARY rds_cluster_snapshot - new module to handle RDS cluster snapshotting Requires rds_cluster to be merged first #687 Depends-On: ansible-collections/amazon.aws#840 Requires: mattclay/aws-terminator#212 Requires also mattclay/aws-terminator#184 ISSUE TYPE New Module Pull Request COMPONENT NAME rds_cluster_snapshot Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso --- rds_cluster_snapshot.py | 372 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 372 insertions(+) create mode 100644 rds_cluster_snapshot.py diff --git a/rds_cluster_snapshot.py b/rds_cluster_snapshot.py new file mode 100644 index 00000000000..09077c9638b --- /dev/null +++ b/rds_cluster_snapshot.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# Copyright (c) 2014 Ansible Project +# Copyright (c) 2021 Alina Buzachis (@alinabuzachis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_cluster_snapshot +version_added: 4.0.0 +short_description: Manage Amazon RDS snapshots of DB clusters +description: + - Create, modify and delete RDS snapshots of DB clusters. +options: + state: + description: + - Specify the desired state of the snapshot. + default: present + choices: [ 'present', 'absent'] + type: str + db_cluster_snapshot_identifier: + description: + - The identifier of the DB cluster snapshot. + required: true + aliases: + - snapshot_id + - id + - snapshot_name + type: str + db_cluster_identifier: + description: + - The identifier of the DB cluster to create a snapshot for. + - Required when I(state=present). + aliases: + - cluster_id + - cluster_name + type: str + source_db_cluster_snapshot_identifier: + description: + - The identifier of the DB cluster snapshot to copy. + - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier. + - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN. + aliases: + - source_id + - source_snapshot_id + type: str + source_region: + description: + - The region that contains the snapshot to be copied. + type: str + copy_tags: + description: + - Whether to copy all tags from I(source_db_cluster_snapshot_identifier) to I(db_cluster_snapshot_identifier). + type: bool + default: False + wait: + description: + - Whether or not to wait for snapshot creation or deletion. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int +notes: + - Retrieve the information about a specific DB cluster or list the DB cluster snapshots for a specific DB cluster + can de done using M(community.aws.rds_snapshot_info). +author: + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags +''' + +EXAMPLES = r''' +- name: Create a DB cluster snapshot + community.aws.rds_cluster_snapshot: + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: new-cluster-snapshot + +- name: Delete a DB cluster snapshot + community.aws.rds_cluster_snapshot: + db_cluster_snapshot_identifier: new-cluster-snapshot + state: absent + +- name: Copy snapshot from a different region and copy its tags + community.aws.rds_cluster_snapshot: + id: new-database-snapshot-copy + region: us-east-1 + source_id: "{{ snapshot.db_snapshot_arn }}" + source_region: us-east-2 + copy_tags: yes +''' + +RETURN = r''' +availability_zone: + description: Availability zone of the database from which the snapshot was created. + returned: always + type: str + sample: us-west-2a +db_cluster_snapshot_identifier: + description: Specifies the identifier for the DB cluster snapshot. + returned: always + type: str + sample: ansible-test-16638696-test-snapshot +db_cluster_identifier: + description: Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from. + returned: always + type: str + sample: ansible-test-16638696 +snapshot_create_time: + description: Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). + returned: always + type: str + sample: '2019-06-15T10:46:23.776000+00:00' +engine: + description: Specifies the name of the database engine for this DB cluster snapshot. + returned: always + type: str + sample: "aurora" +engine_mode: + description: Provides the engine mode of the database engine for this DB cluster snapshot. + returned: always + type: str + sample: "5.6.mysql_aurora.1.22.5" +allocated_storage: + description: Specifies the allocated storage size in gibibytes (GiB). + returned: always + type: int + sample: 20 +status: + description: Specifies the status of this DB cluster snapshot. + returned: always + type: str + sample: available +port: + description: Port on which the database is listening. + returned: always + type: int + sample: 3306 +vpc_id: + description: ID of the VPC in which the DB lives. + returned: always + type: str + sample: vpc-09ff232e222710ae0 +cluster_create_time: + description: Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC). + returned: always + type: str + sample: '2019-06-15T10:15:56.221000+00:00' +master_username: + description: Provides the master username for this DB cluster snapshot. + returned: always + type: str + sample: test +engine_version: + description: Version of the cluster from which the snapshot was created. + returned: always + type: str + sample: "5.6.mysql_aurora.1.22.5" +license_model: + description: Provides the license model information for this DB cluster snapshot. + returned: always + type: str + sample: general-public-license +snapshot_type: + description: How the snapshot was created (always manual for this module!). + returned: always + type: str + sample: manual +percent_progress: + description: Specifies the percentage of the estimated data that has been transferred. + returned: always + type: int + sample: 100 +storage_encrypted: + description: Specifies whether the DB cluster snapshot is encrypted. + returned: always + type: bool + sample: false +kms_key_id: + description: The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. + returned: always + type: str +db_cluster_snapshot_arn: + description: Amazon Resource Name for the snapshot. + returned: always + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot +source_db_cluster_snapshot_arn: + description: If the DB cluster snapshot was copied from a source DB cluster snapshot, the ARN for the source DB cluster snapshot, otherwise, null. + returned: always + type: str + sample: null +iam_database_authentication_enabled: + description: Whether IAM database authentication is enabled. + returned: always + type: bool + sample: false +tag_list: + description: A list of tags. + returned: always + type: list + sample: [] +tags: + description: Tags applied to the snapshot. + returned: always + type: complex + contains: {} +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params + + +def get_snapshot(snapshot_id): + try: + snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)["DBClusterSnapshots"][0] + snapshot["Tags"] = get_tags(client, module, snapshot["DBClusterSnapshotArn"]) + except is_boto3_error_code("DBClusterSnapshotNotFound"): + return {} + except is_boto3_error_code("DBClusterSnapshotNotFoundFault"): # pylint: disable=duplicate-except + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + return snapshot + + +def get_parameters(parameters, method_name): + if method_name == 'copy_db_cluster_snapshot': + parameters['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier'] + + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any(parameters.get(k) is None for k in required_options): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + return parameters + + +def ensure_snapshot_absent(): + snapshot_name = module.params.get("db_cluster_snapshot_identifier") + params = {"DBClusterSnapshotIdentifier": snapshot_name} + changed = False + + snapshot = get_snapshot(snapshot_name) + if not snapshot: + module.exit_json(changed=changed) + elif snapshot and snapshot["Status"] != "deleting": + snapshot, changed = call_method(client, module, "delete_db_cluster_snapshot", params) + + module.exit_json(changed=changed) + + +def copy_snapshot(params): + changed = False + snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + + if not snapshot: + method_params = get_parameters(params, 'copy_db_cluster_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + result, changed = call_method(client, module, 'copy_db_cluster_snapshot', method_params) + + return changed + + +def ensure_snapshot_present(params): + source_id = module.params.get('source_db_cluster_snapshot_identifier') + snapshot_name = module.params.get("db_cluster_snapshot_identifier") + changed = False + + snapshot = get_snapshot(snapshot_name) + + # Copy snapshot + if source_id: + changed |= copy_snapshot(params) + + # Create snapshot + elif not snapshot: + changed |= create_snapshot(params) + + # Snapshot exists and we're not creating a copy - modify exising snapshot + else: + changed |= modify_snapshot() + + snapshot = get_snapshot(snapshot_name) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + + +def create_snapshot(params): + method_params = get_parameters(params, 'create_db_cluster_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + snapshot, changed = call_method(client, module, 'create_db_cluster_snapshot', method_params) + + return changed + + +def modify_snapshot(): + # TODO - add other modifications aside from purely tags + changed = False + snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + + if module.params.get('tags'): + changed |= ensure_tags(client, module, snapshot['DBClusterSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + + return changed + + +def main(): + global client + global module + + argument_spec = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + db_cluster_snapshot_identifier=dict(type='str', aliases=['id', 'snapshot_id', 'snapshot_name'], required=True), + db_cluster_identifier=dict(type='str', aliases=['cluster_id', 'cluster_name']), + source_db_cluster_snapshot_identifier=dict(type='str', aliases=['source_id', 'source_snapshot_id']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + copy_tags=dict(type='bool', default=False), + source_region=dict(type='str'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + retry_decorator = AWSRetry.jittered_backoff(retries=10) + try: + client = module.client('rds', retry_decorator=retry_decorator) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + state = module.params.get("state") + + if state == "absent": + ensure_snapshot_absent() + elif state == "present": + params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) + ensure_snapshot_present(params) + + +if __name__ == '__main__': + main() From 114a34fbb2e53b1ffe8146ecd898eccd70abdfb8 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Fri, 17 Jun 2022 14:11:50 -0700 Subject: [PATCH 493/683] route53_info: Add RETURN block (#1240) route53_info: Add RETURN block SUMMARY Currently route53_info is mising a return block. This is a follow up on #1236 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_info Reviewed-by: Joseph Torcasso Reviewed-by: Mike Graves Reviewed-by: Mandar Kulkarni --- route53_info.py | 179 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/route53_info.py b/route53_info.py index 5e40efa4aad..4e90556a1ec 100644 --- a/route53_info.py +++ b/route53_info.py @@ -204,6 +204,185 @@ register: RECORDS ''' +RETURN = r''' +resource_record_sets: + description: A list of resource record sets returned by list_resource_record_sets in boto3. + returned: when I(query=record_sets) + type: list + elements: dict + contains: + name: + description: The name of a record in the specified hosted zone. + type: str + sample: 'www.example.com' + type: + description: The DNS record type. + type: str + sample: 'A' + ttl: + description: The resource record cache time to live (TTL), in seconds. + type: int + sample: 60 + set_identifier: + description: An identifier that differentiates among multiple resource record sets that have the same combination of name and type. + type: str + sample: 'abcd' + resource_records: + description: Information about the resource records. + type: list + elements: dict + contains: + value: + description: The current or new DNS record value. + type: str + sample: 'ns-12.awsdns-34.com.' + geo_location: + description: The specified geographic location for which the Route53 responds to based on location. + type: dict + elements: str + contains: + continent_code: + description: The two-letter code for the continent. + type: str + sample: 'NA' + country_code: + description: The two-letter code for a country. + type: str + sample: 'US' + subdivision_code: + description: The two-letter code for a state of the United States + type: str + sample: 'NY' + version_added: 4.0.0 +hosted_zones: + description: A list of hosted zones returned by list_hosted_zones in boto3. + returned: when I(query=hosted_zone) + type: list + elements: dict + contains: + id: + description: The ID of the hosted zone assigned by Amazon Route53 to the hosted zone at the creation time. + type: str + sample: '/hostedzone/Z01234567AB1234567890' + name: + description: The name of the domain. + type: str + sample: 'example.io' + resource_record_set_count: + description: The number of resource record sets in the hosted zone. + type: int + sample: 3 + caller_reference: + description: The value specified for CallerReference at the time of hosted zone creation. + type: str + sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' + config: + description: A dict that contains Comment and PrivateZone elements. + type: dict + contains: + comment: + description: Any comments that included about in the hosted zone. + type: str + sample: 'HostedZone created by Route53 Registrar' + private_zone: + description: A value that indicates whether this is a private hosted zone or not. + type: bool + sample: false + version_added: 4.0.0 +health_checks: + description: A list of Route53 health checks returned by list_health_checks in boto3. + type: list + elements: dict + returned: when I(query=health_check) + contains: + id: + description: The identifier that Amazon Route53 assigned to the health check at the time of creation. + type: str + sample: '12345cdc-2cc4-1234-bed2-123456abc1a2' + health_check_version: + description: The version of the health check. + type: str + sample: 1 + caller_reference: + description: A unique string that you specified when you created the health check. + type: str + sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' + health_check_config: + description: A dict that contains detailed information about one health check. + type: dict + contains: + disabled: + description: Whether Route53 should stop performing health checks on a endpoint. + type: bool + sample: false + enable_sni: + description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation. + type: bool + sample: true + failure_threshold: + description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint. + type: int + sample: 3 + fully_qualified_domain_name: + description: The fully qualified DNS name of the endpoint on which Route53 performs health checks. + type: str + sample: 'hello' + inverted: + description: Whether Route53 should invert the status of a health check. + type: bool + sample: false + ip_address: + description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on. + type: str + sample: 192.0.2.44 + measure_latency: + description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint. + type: bool + sample: false + port: + description: The port of the endpoint that Route53 should perform health checks on. + type: int + sample: 80 + request_interval: + description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request. + type: int + sample: 30 + resource_path: + description: The path that Route53 requests when performing health checks. + type: str + sample: '/welcome.html' + search_string: + description: The string that Route53 uses to search for in the response body from specified resource. + type: str + sample: 'test-string-to-match' + type: + description: The type of the health check. + type: str + sample: HTTPS + version_added: 4.0.0 +ResourceRecordSets: + description: A deprecated CamelCased list of resource record sets returned by list_resource_record_sets in boto3. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + returned: when I(query=record_sets) + type: list + elements: dict +HostedZones: + description: A deprecated CamelCased list of hosted zones returned by list_hosted_zones in boto3. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + returned: when I(query=hosted_zone) + type: list + elements: dict +HealthChecks: + description: A deprecated CamelCased list of Route53 health checks returned by list_health_checks in boto3. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: list + elements: dict + returned: when I(query=health_check) +''' + try: import botocore except ImportError: From 9ce7972a22e55dc185bf78422f4c202cd347f66f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 20 Jun 2022 11:18:41 +0200 Subject: [PATCH 494/683] ec2_vpc_vpn - fix exception when running in check mode without tags set (#1242) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ec2_vpc_vpn - fix exception when running in check mode without tags set SUMMARY Rename ec2_vpc_vpn_info tests to ec2_vpc_vpn so we have baseline testing for VPNs (keep ec2_vpc_vpn_info as an alias) fix exception when run in check mode without tags set. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_vpc_vpn ADDITIONAL INFORMATION TASK [ec2_vpc_vpn : (check) No change to tags without setting tags] ************ task path: /root/ansible_collections/community/aws/tests/output/.tmp/integration/ec2_vpc_vpn-oywt620e-ÅÑŚÌβŁÈ/tests/integration/targets/ec2_vpc_vpn/tasks/tags.yml:248 Using module file /root/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py Pipelining is enabled. ESTABLISH LOCAL CONNECTION FOR USER: root EXEC /bin/sh -c 'ANSIBLE_DEBUG_BOTOCORE_LOGS=True /usr/bin/python3.10 && sleep 0' The full traceback is: Traceback (most recent call last): File "", line 121, in File "", line 113, in _ansiballz_main File "", line 61, in invoke_module File "/usr/lib/python3.10/runpy.py", line 209, in run_module return _run_module_code(code, init_globals, run_name, mod_spec) File "/usr/lib/python3.10/runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "/usr/lib/python3.10/runpy.py", line 86, in _run_code exec(code, run_globals) File "/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py", line 808, in File "/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py", line 795, in main File "/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py", line 714, in ensure_present File "/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py", line 661, in get_check_mode_results AttributeError: 'NoneType' object has no attribute 'keys' fatal: [testhost]: FAILED! => { "changed": false, "module_stderr": "Traceback (most recent call last):\n File \"\", line 121, in \n File \"\", line 113, in _ansiballz_main\n File \"\", line 61, in invoke_module\n File \"/usr/lib/python3.10/runpy.py\", line 209, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/lib/python3.10/runpy.py\", line 96, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/usr/lib/python3.10/runpy.py\", line 86, in _run_code\n exec(code, run_globals)\n File \"/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py\", line 808, in \n File \"/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py\", line 795, in main\n File \"/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py\", line 714, in ensure_present\n File \"/tmp/ansible_ec2_vpc_vpn_payload_0zo_xg79/ansible_ec2_vpc_vpn_payload.zip/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py\", line 661, in get_check_mode_results\nAttributeError: 'NoneType' object has no attribute 'keys'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } Reviewed-by: Alina Buzachis --- ec2_vpc_vpn.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index a1877326d33..8e22b973c2e 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -652,18 +652,16 @@ def get_check_mode_results(connection, module_params, vpn_connection_id=None, cu # get combined current tags and tags to set present_tags = module_params.get('tags') - if current_state and 'Tags' in current_state: + if present_tags is None: + pass + elif current_state and 'Tags' in current_state: current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags']) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get('purge_tags')) + changed |= bool(tags_to_remove) or bool(tags_to_add) if module_params.get('purge_tags'): - if current_tags != present_tags: - changed = True - elif current_tags != present_tags: - if not set(present_tags.keys()) < set(current_tags.keys()): - changed = True - # add preexisting tags that new tags didn't overwrite - present_tags.update((tag, current_tags[tag]) for tag in current_tags if tag not in present_tags) - elif current_tags.keys() == present_tags.keys() and set(present_tags.values()) != set(current_tags.values()): - changed = True + current_tags = {} + current_tags.update(present_tags) + results['tags'] = current_tags elif module_params.get('tags'): changed = True if present_tags: From 7b6c8e5166acb423a0e03263f82223573137c638 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 21 Jun 2022 15:23:55 +0200 Subject: [PATCH 495/683] Re-enable and update integration tests for aws_ssm_parameter_store (#1241) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Re-enable and update integration tests for aws_ssm_parameter_store SUMMARY Fixes exception when description was updated within passing value. Rewrite and enable integration tests for module Add some return values While the integration tests don't include coverage for the more complex options for aws_ssm_parameter_store these have never existed. Having something enabled gives future module developers something to build upon. ISSUE TYPE Bugfix Pull Request COMPONENT NAME aws_ssm_parameter_store ADDITIONAL INFORMATION Exception: TASK [aws_ssm_parameter_store : Update description] **************************** task path: /root/ansible_collections/community/aws/tests/output/.tmp/integration/aws_ssm_parameter_store-071mpr89-ÅÑŚÌβŁÈ/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml:64 Using module file /root/ansible_collections/community/aws/plugins/modules/aws_ssm_parameter_store.py Pipelining is enabled. ESTABLISH LOCAL CONNECTION FOR USER: root EXEC /bin/sh -c 'ANSIBLE_DEBUG_BOTOCORE_LOGS=True /usr/bin/python3.10 && sleep 0' The full traceback is: Traceback (most recent call last): File "/tmp/ansible_aws_ssm_parameter_store_payload_13s3_ao3/ansible_aws_ssm_parameter_store_payload.zip/ansible_collections/community/aws/plugins/modules/aws_ssm_parameter_store.py", line 160, in update_parameter File "/usr/local/lib/python3.10/dist-packages/botocore/client.py", line 357, in _api_call return self._make_api_call(operation_name, kwargs) File "/usr/local/lib/python3.10/dist-packages/botocore/client.py", line 648, in _make_api_call request_dict = self._convert_to_request_dict( File "/usr/local/lib/python3.10/dist-packages/botocore/client.py", line 696, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( File "/usr/local/lib/python3.10/dist-packages/botocore/validate.py", line 293, in serialize_to_request raise ParamValidationError(report=report.generate_report()) botocore.exceptions.ParamValidationError: Parameter validation failed: Invalid type for parameter Value, value: None, type: , valid types: fatal: [testhost]: FAILED! => { "boto3_version": "1.17.0", "botocore_version": "1.20.0", "changed": false, "invocation": { "module_args": { "aws_access_key": "ASIA6CCDWXDOM4BEFFL4", "aws_ca_bundle": null, "aws_config": null, "aws_secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "debug_botocore_endpoint_logs": true, "decryption": true, "description": "This is an updated example", "ec2_url": null, "key_id": "alias/aws/ssm", "name": "/ansible-test-12184966-mchappel/Simple", "overwrite_value": "changed", "profile": null, "region": "us-east-1", "security_token": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "state": "present", "string_type": "String", "tier": "Standard", "validate_certs": true, "value": null } }, "msg": "setting parameter: Parameter validation failed:\nInvalid type for parameter Value, value: None, type: , valid types: ", "resource_actions": [ "ssm:GetParameter" ] } Reviewed-by: Alina Buzachis --- aws_ssm_parameter_store.py | 148 +++++++++++++++++++++++++++++-------- 1 file changed, 117 insertions(+), 31 deletions(-) diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 856f7eec8a7..37ec605e5a3 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -10,9 +10,11 @@ --- module: aws_ssm_parameter_store version_added: 1.0.0 -short_description: Manage key-value pairs in aws parameter store. +short_description: Manage key-value pairs in AWS SSM parameter store description: - - Manage key-value pairs in aws parameter store. + - Manage key-value pairs in AWS SSM parameter store. + - To retreive SSM parameters use the M(amazon.aws.aws_ssm_parameter) lookup + plugin. options: name: description: @@ -44,6 +46,7 @@ choices: ['String', 'StringList', 'SecureString'] default: String type: str + aliases: ['type'] decryption: description: - Work with SecureString type to get plain text secrets @@ -86,7 +89,7 @@ ''' EXAMPLES = ''' -- name: Create or update key/value pair in aws parameter store +- name: Create or update key/value pair in AWS SSM parameter store community.aws.aws_ssm_parameter_store: name: "Hello" description: "This is your first key" @@ -120,7 +123,7 @@ value: "Test1234" overwrite_value: "always" -- name: Create or update key/value pair in aws parameter store with tier +- name: Create or update key/value pair in AWS SSM parameter store with tier community.aws.aws_ssm_parameter_store: name: "Hello" description: "This is your first key" @@ -129,18 +132,78 @@ - name: recommend to use with aws_ssm lookup plugin ansible.builtin.debug: - msg: "{{ lookup('amazon.aws.aws_ssm', 'hello') }}" + msg: "{{ lookup('amazon.aws.aws_ssm', 'Hello') }}" ''' RETURN = ''' -put_parameter: - description: Add one or more parameters to the system. - returned: success - type: dict -delete_parameter: - description: Delete a parameter from the system. - returned: success - type: dict +parameter_metadata: + type: dict + description: + - Information about a parameter. + - Does not include the value of the parameter as this can be sensitive + information. + returned: success + contains: + data_type: + type: str + description: Parameter Data type. + example: text + returned: success + description: + type: str + description: Parameter key description. + example: This is your first key + returned: success + last_modified_date: + type: str + description: Time and date that the parameter was last modified. + example: '2022-06-20T09:56:58.573000+00:00' + returned: success + last_modified_user: + type: str + description: ARN of the last user to modify the parameter. + example: 'arn:aws:sts::123456789012:assumed-role/example-role/session=example' + returned: success + name: + type: str + description: Parameter key name. + example: Hello + returned: success + policies: + type: list + description: A list of policies associated with a parameter. + elements: dict + returned: success + contains: + policy_text: + type: str + description: The JSON text of the policy. + returned: success + policy_type: + type: str + description: The type of policy. + example: Expiration + returned: success + policy_status: + type: str + description: The status of the policy. + example: Pending + returned: success + tier: + type: str + description: Parameter tier. + example: Standard + returned: success + type: + type: str + description: Parameter type + example: String + returned: success + version: + type: int + description: Parameter version number + example: 3 + returned: success ''' try: @@ -148,11 +211,13 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -def update_parameter(client, module, args): +def update_parameter(client, module, **args): changed = False response = {} @@ -165,6 +230,16 @@ def update_parameter(client, module, args): return changed, response +def describe_parameter(client, module, **args): + paginator = client.get_paginator('describe_parameters') + existing_parameter = paginator.paginate(**args).build_full_result() + + if not existing_parameter['Parameters']: + return None + + return existing_parameter['Parameters'][0] + + def create_update_parameter(client, module): changed = False existing_parameter = None @@ -172,7 +247,6 @@ def create_update_parameter(client, module): args = dict( Name=module.params.get('name'), - Value=module.params.get('value'), Type=module.params.get('string_type'), Tier=module.params.get('tier') ) @@ -182,6 +256,9 @@ def create_update_parameter(client, module): else: args.update(Overwrite=False) + if module.params.get('value') is not None: + args.update(Value=module.params.get('value')) + if module.params.get('description'): args.update(Description=module.params.get('description')) @@ -194,32 +271,32 @@ def create_update_parameter(client, module): pass if existing_parameter: - if (module.params.get('overwrite_value') == 'always'): + if 'Value' not in args: + args['Value'] = existing_parameter['Parameter']['Value'] - (changed, response) = update_parameter(client, module, args) + if (module.params.get('overwrite_value') == 'always'): + (changed, response) = update_parameter(client, module, **args) elif (module.params.get('overwrite_value') == 'changed'): if existing_parameter['Parameter']['Type'] != args['Type']: - (changed, response) = update_parameter(client, module, args) + (changed, response) = update_parameter(client, module, **args) - if existing_parameter['Parameter']['Value'] != args['Value']: - (changed, response) = update_parameter(client, module, args) + elif existing_parameter['Parameter']['Value'] != args['Value']: + (changed, response) = update_parameter(client, module, **args) - if args.get('Description'): + elif args.get('Description'): # Description field not available from get_parameter function so get it from describe_parameters - describe_existing_parameter = None try: - describe_existing_parameter_paginator = client.get_paginator('describe_parameters') - describe_existing_parameter = describe_existing_parameter_paginator.paginate( - Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result() - + describe_existing_parameter = describe_parameter( + client, module, + Filters=[{"Key": "Name", "Values": [args['Name']]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']: - (changed, response) = update_parameter(client, module, args) + if describe_existing_parameter['Description'] != args['Description']: + (changed, response) = update_parameter(client, module, **args) else: - (changed, response) = update_parameter(client, module, args) + (changed, response) = update_parameter(client, module, **args) return changed, response @@ -250,7 +327,7 @@ def setup_module_object(): description=dict(), value=dict(required=False, no_log=True), state=dict(default='present', choices=['present', 'absent']), - string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']), + string_type=dict(default='String', choices=['String', 'StringList', 'SecureString'], aliases=['type']), decryption=dict(default=True, type='bool'), key_id=dict(default="alias/aws/ssm"), overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), @@ -272,7 +349,16 @@ def main(): "absent": delete_parameter, } (changed, response) = invocations[state](client, module) - module.exit_json(changed=changed, response=response) + + result = {"response": response} + + parameter_metadata = describe_parameter( + client, module, + Filters=[{"Key": "Name", "Values": [module.params.get('name')]}]) + if parameter_metadata: + result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata) + + module.exit_json(changed=changed, **result) if __name__ == '__main__': From 9db4f2524a7378182e58025e2623b0cd9c5c88ba Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 24 Jun 2022 21:07:50 +0200 Subject: [PATCH 496/683] Bump various deprecations from 4.0.0 to 5.0.0 (#1258) Bump various deprecations from 4.0.0 to 5.0.0 SUMMARY Bump various deprecations from 4.0.0 to 5.0.0 we missed them prior to releasing 4.0.0 ISSUE TYPE Feature Pull Request COMPONENT NAME iam_server_certficate rds_instance_snapshot ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- iam_server_certificate.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 142d391ac06..f027ad3f75b 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -58,28 +58,28 @@ - The path to, or content of, the CA certificate chain in PEM encoded format. - If the parameter is not a file, it is assumed to be content. - Passing a file name is deprecated, and support will be dropped in - version 4.0.0 of this collection. + version 5.0.0 of this collection. type: str cert: description: - The path to, or content of the certificate body in PEM encoded format. - If the parameter is not a file, it is assumed to be content. - Passing a file name is deprecated, and support will be dropped in - version 4.0.0 of this collection. + version 5.0.0 of this collection. type: str key: description: - The path to, or content of the private key in PEM encoded format. If the parameter is not a file, it is assumed to be content. - Passing a file name is deprecated, and support will be dropped in - version 4.0.0 of this collection. + version 5.0.0 of this collection. type: str dup_ok: description: - By default the module will not upload a certificate that is already uploaded into AWS. - If I(dup_ok=True), it will upload the certificate as long as the name is unique. - Currently defaults to C(false), this will default to C(true) in release - 4.0.0. + 5.0.0. type: bool author: Jonathan I. Davila (@defionscode) @@ -321,21 +321,21 @@ def load_data(): module.deprecate( 'Passing a file name as the cert argument has been deprecated. ' 'Please use a lookup instead, see the documentation for examples.', - version='4.0.0', collection_name='community.aws') + version='5.0.0', collection_name='community.aws') if key and os.path.isfile(key): with open(key, 'r') as key_fh: key = key_fh.read().rstrip() module.deprecate( 'Passing a file name as the key argument has been deprecated. ' 'Please use a lookup instead, see the documentation for examples.', - version='4.0.0', collection_name='community.aws') + version='5.0.0', collection_name='community.aws') if cert_chain and os.path.isfile(cert_chain): with open(cert_chain, 'r') as cert_chain_fh: cert_chain = cert_chain_fh.read() module.deprecate( 'Passing a file name as the cert_chain argument has been deprecated. ' 'Please use a lookup instead, see the documentation for examples.', - version='4.0.0', collection_name='community.aws') + version='5.0.0', collection_name='community.aws') return cert, key, cert_chain @@ -407,7 +407,7 @@ def main(): if dup_ok is None: module.deprecate( 'The dup_ok module currently defaults to false, this will change in ' - 'release 4.0.0 to true.', version='4.0.0', collection_name='community.aws') + 'release 5.0.0 to true.', version='5.0.0', collection_name='community.aws') current_cert = get_server_certificate(name) From d44c54db5c2feadeed7a6ebb36bd72dc5e877422 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 25 Jun 2022 13:01:18 +0200 Subject: [PATCH 497/683] Rename ACM modules (#1263) Rename ACM modules SUMMARY In line with what I understood to be the consensus on ansible-collections/amazon.aws#881 and ansible-collections/amazon.aws#610 Rename aws_acm to acm_certificate Rename aws_acm_info to acm_certificate_info ISSUE TYPE Feature Pull Request COMPONENT NAME aws_acm aws_acm_info acm_certificate acm_certificate_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- aws_acm.py => acm_certificate.py | 5 +++-- aws_acm_info.py => acm_certificate_info.py | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) rename aws_acm.py => acm_certificate.py (99%) rename aws_acm_info.py => acm_certificate_info.py (97%) diff --git a/aws_acm.py b/acm_certificate.py similarity index 99% rename from aws_acm.py rename to acm_certificate.py index 33c8d5fe903..6b48579d5bc 100644 --- a/aws_acm.py +++ b/acm_certificate.py @@ -27,7 +27,7 @@ DOCUMENTATION = r''' --- -module: aws_acm +module: acm_certificate short_description: Upload and delete certificates in the AWS Certificate Manager service version_added: 1.0.0 description: @@ -74,6 +74,8 @@ - > Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API. + - Prior to release 5.0.0 this module was called C(community.aws.aws_acm). + The usage did not change. options: certificate: description: @@ -176,7 +178,6 @@ - amazon.aws.aws - amazon.aws.ec2 - amazon.aws.tags.deprecated_purge - ''' EXAMPLES = ''' diff --git a/aws_acm_info.py b/acm_certificate_info.py similarity index 97% rename from aws_acm_info.py rename to acm_certificate_info.py index 8d61dde4d3c..8e16162cedb 100644 --- a/aws_acm_info.py +++ b/acm_certificate_info.py @@ -7,12 +7,14 @@ DOCUMENTATION = r''' -module: aws_acm_info +module: acm_certificate_info short_description: Retrieve certificate information from AWS Certificate Manager service version_added: 1.0.0 description: - - Retrieve information for ACM certificates + - Retrieve information for ACM certificates. - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API. + - Prior to release 5.0.0 this module was called C(community.aws.aws_acm_info). + The usage did not change. options: certificate_arn: description: @@ -24,13 +26,13 @@ type: str domain_name: description: - - The domain name of an ACM certificate to limit the search to + - The domain name of an ACM certificate to limit the search to. aliases: - name type: str statuses: description: - - Status to filter the certificate results + - Status to filter the certificate results. choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] type: list elements: str From 8daccc5fab430227b6cb910736ae7500e8ae3673 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jun 2022 10:29:20 +0200 Subject: [PATCH 498/683] [5.0.0] iam_server_cerificate - complete deprecation cycle and remove support (#1265) [5.0.0] iam_server_cerificate - complete deprecation cycles SUMMARY fixes: #1257 In #735 we deprecated passing filenames and prepared to switch the default value of dup_ok to True. This PR completes the deprecation cycle (1 release late) ISSUE TYPE Feature Pull Request COMPONENT NAME iam_server_certificate ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- iam_server_certificate.py | 79 +++++++-------------------------------- 1 file changed, 14 insertions(+), 65 deletions(-) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index f027ad3f75b..824c8bd96c7 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -55,31 +55,22 @@ type: str cert_chain: description: - - The path to, or content of, the CA certificate chain in PEM encoded format. - - If the parameter is not a file, it is assumed to be content. - - Passing a file name is deprecated, and support will be dropped in - version 5.0.0 of this collection. + - The content of the CA certificate chain in PEM encoded format. type: str cert: description: - - The path to, or content of the certificate body in PEM encoded format. - - If the parameter is not a file, it is assumed to be content. - - Passing a file name is deprecated, and support will be dropped in - version 5.0.0 of this collection. + - The content of the certificate body in PEM encoded format. type: str key: description: - - The path to, or content of the private key in PEM encoded format. - If the parameter is not a file, it is assumed to be content. - - Passing a file name is deprecated, and support will be dropped in - version 5.0.0 of this collection. + - The content of the private key in PEM encoded format. type: str dup_ok: description: - By default the module will not upload a certificate that is already uploaded into AWS. - If I(dup_ok=True), it will upload the certificate as long as the name is unique. - - Currently defaults to C(false), this will default to C(true) in release - 5.0.0. + - The default value for this value changed in release 5.0.0 to C(true). + default: true type: bool author: Jonathan I. Davila (@defionscode) @@ -97,29 +88,20 @@ key: "{{ lookup('file', 'path/to/key') }}" cert_chain: "{{ lookup('file', 'path/to/certchain') }}" -- name: Basic server certificate upload - community.aws.iam_server_certificate: - name: very_ssl - state: present - cert: path/to/cert - key: path/to/key - cert_chain: path/to/certchain - - name: Server certificate upload using key string community.aws.iam_server_certificate: name: very_ssl state: present path: "/a/cert/path/" - cert: body_of_somecert - key: vault_body_of_privcertkey - cert_chain: body_of_myverytrustedchain + cert: "{{ lookup('file', 'path/to/cert') }}" + key: "{{ lookup('file', 'path/to/key') }}" + cert_chain: "{{ lookup('file', 'path/to/certchain') }}" - name: Basic rename of existing certificate community.aws.iam_server_certificate: name: very_ssl new_name: new_very_ssl state: present - ''' import os @@ -177,8 +159,8 @@ def _compare_cert(cert_a, cert_b): def update_server_certificate(current_cert): changed = False - - cert, key, cert_chain = load_data() + cert = module.params.get('cert') + cert_chain = module.params.get('cert_chain') if not _compare_cert(cert, current_cert.get('certificate_body', None)): module.fail_json(msg='Modifying the certificate body is not supported by AWS') @@ -196,7 +178,9 @@ def update_server_certificate(current_cert): def create_server_certificate(): - cert, key, cert_chain = load_data() + cert = module.params.get('cert') + key = module.params.get('key') + cert_chain = module.params.get('cert_chain') if not module.params.get('dup_ok'): check_duplicate_cert(cert) @@ -309,36 +293,6 @@ def get_server_certificate(name): return cert -def load_data(): - cert = module.params.get('cert') - key = module.params.get('key') - cert_chain = module.params.get('cert_chain') - - # if paths are provided rather than lookups read the files and return the contents - if cert and os.path.isfile(cert): - with open(cert, 'r') as cert_fh: - cert = cert_fh.read().rstrip() - module.deprecate( - 'Passing a file name as the cert argument has been deprecated. ' - 'Please use a lookup instead, see the documentation for examples.', - version='5.0.0', collection_name='community.aws') - if key and os.path.isfile(key): - with open(key, 'r') as key_fh: - key = key_fh.read().rstrip() - module.deprecate( - 'Passing a file name as the key argument has been deprecated. ' - 'Please use a lookup instead, see the documentation for examples.', - version='5.0.0', collection_name='community.aws') - if cert_chain and os.path.isfile(cert_chain): - with open(cert_chain, 'r') as cert_chain_fh: - cert_chain = cert_chain_fh.read() - module.deprecate( - 'Passing a file name as the cert_chain argument has been deprecated. ' - 'Please use a lookup instead, see the documentation for examples.', - version='5.0.0', collection_name='community.aws') - return cert, key, cert_chain - - def compatability_results(current_cert): compat_results = dict() @@ -379,7 +333,7 @@ def main(): new_name=dict(), path=dict(default='/'), new_path=dict(), - dup_ok=dict(type='bool'), + dup_ok=dict(type='bool', default=True), ) module = AnsibleAWSModule( @@ -404,11 +358,6 @@ def main(): new_path = module.params.get('new_path') dup_ok = module.params.get('dup_ok') - if dup_ok is None: - module.deprecate( - 'The dup_ok module currently defaults to false, this will change in ' - 'release 5.0.0 to true.', version='5.0.0', collection_name='community.aws') - current_cert = get_server_certificate(name) results = dict() From 9a4f1a68b5ffdce4778e36d8eae505881cc803f9 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jun 2022 10:30:33 +0200 Subject: [PATCH 499/683] Rename ses modules (#1264) Rename SES modules SUMMARY In line with what I understood to be the consensus on ansible-collections/amazon.aws#881 and ansible-collections/amazon.aws#610 Rename ses modules to remove the aws_ prefix. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_ses_identity.py plugins/modules/aws_ses_identity_policy.py plugins/modules/aws_ses_rule_set.py plugins/modules/ses_identity.py plugins/modules/ses_identity_policy.py plugins/modules/ses_rule_set.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_ses_identity.py => ses_identity.py | 23 ++++++++++--------- ...entity_policy.py => ses_identity_policy.py | 23 +++++++++++-------- aws_ses_rule_set.py => ses_rule_set.py | 21 +++++++++-------- 3 files changed, 36 insertions(+), 31 deletions(-) rename aws_ses_identity.py => ses_identity.py (98%) rename aws_ses_identity_policy.py => ses_identity_policy.py (94%) rename aws_ses_rule_set.py => ses_rule_set.py (95%) diff --git a/aws_ses_identity.py b/ses_identity.py similarity index 98% rename from aws_ses_identity.py rename to ses_identity.py index caa250c220c..4f64b2be89b 100644 --- a/aws_ses_identity.py +++ b/ses_identity.py @@ -8,15 +8,17 @@ DOCUMENTATION = ''' --- -module: aws_ses_identity +module: ses_identity version_added: 1.0.0 short_description: Manages SES email and domain identity description: - This module allows the user to manage verified email and domain identity for SES. - This covers verifying and removing identities as well as setting up complaint, bounce and delivery notification settings. -author: Ed Costello (@orthanc) - + - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_identity). + The usage did not change. +author: + - Ed Costello (@orthanc) options: identity: description: @@ -86,26 +88,25 @@ type: 'bool' default: True extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Ensure example@example.com email identity exists - community.aws.aws_ses_identity: + community.aws.ses_identity: identity: example@example.com state: present - name: Delete example@example.com email identity - community.aws.aws_ses_identity: + community.aws.ses_identity: email: example@example.com state: absent - name: Ensure example.com domain identity exists - community.aws.aws_ses_identity: + community.aws.ses_identity: identity: example.com state: present @@ -119,7 +120,7 @@ register: topic_info - name: Deliver feedback to topic instead of owner email - community.aws.aws_ses_identity: + community.aws.ses_identity: identity: example@example.com state: present complaint_notifications: @@ -140,7 +141,7 @@ register: topic_info - name: Delivery notifications to topic - community.aws.aws_ses_identity: + community.aws.ses_identity: identity: example@example.com state: present delivery_notifications: diff --git a/aws_ses_identity_policy.py b/ses_identity_policy.py similarity index 94% rename from aws_ses_identity_policy.py rename to ses_identity_policy.py index e7f9e9417e9..0b93921ec5a 100644 --- a/aws_ses_identity_policy.py +++ b/ses_identity_policy.py @@ -8,14 +8,18 @@ DOCUMENTATION = ''' --- -module: aws_ses_identity_policy +module: ses_identity_policy version_added: 1.0.0 short_description: Manages SES sending authorization policies description: - - This module allows the user to manage sending authorization policies associated with an SES identity (email or domain). + - This module allows the user to manage sending authorization policies associated with an SES + identity (email or domain). - SES authorization sending policies can be used to control what actors are able to send email on behalf of the validated identity and what conditions must be met by the sent emails. -author: Ed Costello (@orthanc) + - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_identity_policy). + The usage did not change. +author: + - Ed Costello (@orthanc) options: identity: @@ -37,37 +41,36 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: add sending authorization policy to domain identity - community.aws.aws_ses_identity_policy: + community.aws.ses_identity_policy: identity: example.com policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: add sending authorization policy to email identity - community.aws.aws_ses_identity_policy: + community.aws.ses_identity_policy: identity: example@example.com policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: add sending authorization policy to identity using ARN - community.aws.aws_ses_identity_policy: + community.aws.ses_identity_policy: identity: "arn:aws:ses:us-east-1:12345678:identity/example.com" policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: remove sending authorization policy - community.aws.aws_ses_identity_policy: + community.aws.ses_identity_policy: identity: example.com policy_name: ExamplePolicy state: absent diff --git a/aws_ses_rule_set.py b/ses_rule_set.py similarity index 95% rename from aws_ses_rule_set.py rename to ses_rule_set.py index c87145eab5e..a16a0b2b047 100644 --- a/aws_ses_rule_set.py +++ b/ses_rule_set.py @@ -8,11 +8,13 @@ DOCUMENTATION = ''' --- -module: aws_ses_rule_set +module: ses_rule_set version_added: 1.0.0 short_description: Manages SES inbound receipt rule sets description: - - The M(community.aws.aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets + - This module allows you to create, delete, and manage SES receipt rule sets + - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_rule_set). + The usage did not change. author: - "Ben Tomasik (@tomislacker)" - "Ed Costello (@orthanc)" @@ -44,9 +46,8 @@ required: False default: False extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = """ @@ -54,29 +55,29 @@ # It is assumed that their matching environment variables are set. --- - name: Create default rule set and activate it if not already - community.aws.aws_ses_rule_set: + community.aws.ses_rule_set: name: default-rule-set state: present active: yes - name: Create some arbitrary rule set but do not activate it - community.aws.aws_ses_rule_set: + community.aws.ses_rule_set: name: arbitrary-rule-set state: present - name: Explicitly deactivate the default rule set leaving no active rule set - community.aws.aws_ses_rule_set: + community.aws.ses_rule_set: name: default-rule-set state: present active: no - name: Remove an arbitrary inactive rule set - community.aws.aws_ses_rule_set: + community.aws.ses_rule_set: name: arbitrary-rule-set state: absent - name: Remove an ruleset even if we have to first deactivate it to remove it - community.aws.aws_ses_rule_set: + community.aws.ses_rule_set: name: default-rule-set state: absent force: yes From 89292527a714592b6cdda338b632746b00a3176c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jun 2022 10:32:08 +0200 Subject: [PATCH 500/683] simplify github workflows (#1261) * simplify github workflows * Fix broken link in aws_ssm_parameter_store docs --- aws_ssm_parameter_store.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index 37ec605e5a3..b46214cd263 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -13,8 +13,6 @@ short_description: Manage key-value pairs in AWS SSM parameter store description: - Manage key-value pairs in AWS SSM parameter store. - - To retreive SSM parameters use the M(amazon.aws.aws_ssm_parameter) lookup - plugin. options: name: description: @@ -76,6 +74,9 @@ default: Standard type: str version_added: 1.5.0 +seealso: + - ref: amazon.aws.aws_ssm lookup + description: The documentation for the C(amazon.aws.aws_ssm) lookup plugin. author: - "Davinder Pal (@116davinder) " From 1e4dcb4348adbc8a923d755c0e0d988d72898ab5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jun 2022 12:46:37 +0200 Subject: [PATCH 501/683] rename aws_eks_cluster -> eks_cluster (#1269) rename aws_eks_cluster -> eks_cluster SUMMARY Renames aws_eks_cluster to eks_cluster in line with new naming guidelines ISSUE TYPE Feature Pull Request COMPONENT NAME aws_eks_cluster ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_eks_cluster.py => eks_cluster.py | 38 +++++++++++++++------------- 1 file changed, 20 insertions(+), 18 deletions(-) rename aws_eks_cluster.py => eks_cluster.py (91%) diff --git a/aws_eks_cluster.py b/eks_cluster.py similarity index 91% rename from aws_eks_cluster.py rename to eks_cluster.py index 895758f0cf5..0794efef16d 100644 --- a/aws_eks_cluster.py +++ b/eks_cluster.py @@ -8,35 +8,38 @@ DOCUMENTATION = r''' --- -module: aws_eks_cluster +module: eks_cluster version_added: 1.0.0 -short_description: Manage Elastic Kubernetes Service Clusters +short_description: Manage Elastic Kubernetes Service (EKS) Clusters description: - - Manage Elastic Kubernetes Service Clusters - -author: Will Thames (@willthames) - + - Manage Elastic Kubernetes Service (EKS) Clusters. + - Prior to release 5.0.0 this module was called C(community.aws.aws_eks_cluster). + The usage did not change. +author: + - Will Thames (@willthames) options: name: - description: Name of EKS cluster + description: Name of the EKS cluster. required: True type: str version: - description: Kubernetes version - defaults to latest + description: + - Kubernetes version. + - Defaults to C(latest). type: str role_arn: - description: ARN of IAM role used by the EKS cluster + description: ARN of IAM role used by the EKS cluster. type: str subnets: - description: list of subnet IDs for the Kubernetes cluster + description: List of subnet IDs for the Kubernetes cluster. type: list elements: str security_groups: - description: list of security group names or IDs + description: List of security group names or IDs. type: list elements: str state: - description: desired state of the EKS cluster + description: Desired state of the EKS cluster. choices: - absent - present @@ -55,16 +58,15 @@ default: 1200 type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS cluster - community.aws.aws_eks_cluster: + community.aws.eks_cluster: name: my_cluster version: 1.14 role_arn: my_eks_role @@ -76,7 +78,7 @@ register: caller_facts - name: Remove an EKS cluster - community.aws.aws_eks_cluster: + community.aws.eks_cluster: name: my_cluster wait: yes state: absent @@ -141,7 +143,7 @@ description: ARN of the IAM role used by the cluster returned: when state is present type: str - sample: arn:aws:iam::111111111111:role/aws_eks_cluster_role + sample: arn:aws:iam::111111111111:role/eks_cluster_role status: description: status of the EKS cluster returned: when state is present From b9410241b4e136f8f39a9da1fc2664f7e5d75689 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jun 2022 21:49:26 +0200 Subject: [PATCH 502/683] Rename s3 modules (#1271) Rename s3 modules SUMMARY In line with the naming guidelines, removes the aws_ prefix from aws_s3_bucket_info and aws_s3_cors ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_s3_bucket_info.py plugins/modules/aws_s3_cors.py plugins/modules/s3_bucket_info.py plugins/modules/s3_cors.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_s3_bucket_info.py => s3_bucket_info.py | 23 ++++++++++++--------- aws_s3_cors.py => s3_cors.py | 24 ++++++++++++---------- 2 files changed, 26 insertions(+), 21 deletions(-) rename aws_s3_bucket_info.py => s3_bucket_info.py (97%) rename aws_s3_cors.py => s3_cors.py (90%) diff --git a/aws_s3_bucket_info.py b/s3_bucket_info.py similarity index 97% rename from aws_s3_bucket_info.py rename to s3_bucket_info.py index 03da910549a..d164fde5d16 100644 --- a/aws_s3_bucket_info.py +++ b/s3_bucket_info.py @@ -10,12 +10,15 @@ DOCUMENTATION = ''' --- -module: aws_s3_bucket_info +module: s3_bucket_info version_added: 1.0.0 -author: "Gerben Geijteman (@hyperized)" -short_description: lists S3 buckets in AWS +author: + - "Gerben Geijteman (@hyperized)" +short_description: Lists S3 buckets in AWS description: - - Lists S3 buckets and details about those buckets. + - Lists S3 buckets and details about those buckets. + - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_bucket_info). + The usage did not change. options: name: description: @@ -31,7 +34,7 @@ version_added: 1.4.0 bucket_facts: description: - - Retrieve requested S3 bucket detailed information + - Retrieve requested S3 bucket detailed information. - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution. - You can limit buckets by using the I(name) or I(name_filter) option. suboptions: @@ -111,8 +114,8 @@ default: False version_added: 1.4.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -120,12 +123,12 @@ # Note: Only AWS S3 is currently supported -# Lists all s3 buckets -- community.aws.aws_s3_bucket_info: +# Lists all S3 buckets +- community.aws.s3_bucket_info: register: result # Retrieve detailed bucket information -- community.aws.aws_s3_bucket_info: +- community.aws.s3_bucket_info: # Show only buckets with name matching name_filter: your.testing # Choose facts to retrieve diff --git a/aws_s3_cors.py b/s3_cors.py similarity index 90% rename from aws_s3_cors.py rename to s3_cors.py index 58e33cf1104..5500fecab64 100644 --- a/aws_s3_cors.py +++ b/s3_cors.py @@ -9,40 +9,42 @@ DOCUMENTATION = r''' --- -module: aws_s3_cors +module: s3_cors version_added: 1.0.0 short_description: Manage CORS for S3 buckets in AWS description: - - Manage CORS for S3 buckets in AWS -author: "Oyvind Saltvik (@fivethreeo)" + - Manage CORS for S3 buckets in AWS. + - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_cors). + The usage did not change. +author: + - "Oyvind Saltvik (@fivethreeo)" options: name: description: - - Name of the s3 bucket + - Name of the S3 bucket. required: true type: str rules: description: - - Cors rules to put on the s3 bucket + - Cors rules to put on the S3 bucket. type: list elements: dict state: description: - - Create or remove cors on the s3 bucket + - Create or remove cors on the S3 bucket. required: true choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple cors for s3 bucket -- community.aws.aws_s3_cors: +- community.aws.s3_cors: name: mys3bucket state: present rules: @@ -59,7 +61,7 @@ max_age_seconds: 30000 # Remove cors for s3 bucket -- community.aws.aws_s3_cors: +- community.aws.s3_cors: name: mys3bucket state: absent ''' From b5372166d9d91d01a8cd8bd57e35d2163fa2475e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jun 2022 22:08:35 +0200 Subject: [PATCH 503/683] Rename batch modules (#1272) Rename batch modules SUMMARY In line with the naming guidelines, removes the aws_ prefix from aws_batch_compute_environment, aws_batch_job_definition and aws_batch_job_queue. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_batch_compute_environment.py plugins/modules/aws_batch_job_definition.py plugins/modules/aws_batch_job_queue.py plugins/modules/batch_compute_environment.py plugins/modules/batch_job_definition.py plugins/modules/batch_job_queue.py ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso --- ...ronment.py => batch_compute_environment.py | 24 ++++++------- ...b_definition.py => batch_job_definition.py | 27 +++++++------- aws_batch_job_queue.py => batch_job_queue.py | 36 ++++++++++--------- 3 files changed, 43 insertions(+), 44 deletions(-) rename aws_batch_compute_environment.py => batch_compute_environment.py (96%) rename aws_batch_job_definition.py => batch_job_definition.py (96%) rename aws_batch_job_queue.py => batch_job_queue.py (91%) diff --git a/aws_batch_compute_environment.py b/batch_compute_environment.py similarity index 96% rename from aws_batch_compute_environment.py rename to batch_compute_environment.py index 86a971ea0e6..fbe69139457 100644 --- a/aws_batch_compute_environment.py +++ b/batch_compute_environment.py @@ -8,17 +8,18 @@ DOCUMENTATION = r''' --- -module: aws_batch_compute_environment +module: batch_compute_environment version_added: 1.0.0 short_description: Manage AWS Batch Compute Environments description: - - This module allows the management of AWS Batch Compute Environments. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.aws_batch_compute_environment) to manage the compute - environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. - - -author: Jon Meran (@jonmer85) + - This module allows the management of AWS Batch Compute Environments. + - It is idempotent and supports "Check" mode. + - Use module M(community.aws.batch_compute_environment) to manage the compute + environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions. + - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_compute_environment). + The usage did not change. +author: + - Jon Meran (@jonmer85) options: compute_environment_name: description: @@ -119,14 +120,13 @@ - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' - name: My Batch Compute Environment - community.aws.aws_batch_compute_environment: + community.aws.batch_compute_environment: compute_environment_name: computeEnvironmentName state: present region: us-east-1 diff --git a/aws_batch_job_definition.py b/batch_job_definition.py similarity index 96% rename from aws_batch_job_definition.py rename to batch_job_definition.py index afaaf0c54e8..c62f37fd8ba 100644 --- a/aws_batch_job_definition.py +++ b/batch_job_definition.py @@ -8,15 +8,18 @@ DOCUMENTATION = r''' --- -module: aws_batch_job_definition +module: batch_job_definition version_added: 1.0.0 short_description: Manage AWS Batch Job Definitions description: - - This module allows the management of AWS Batch Job Definitions. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.aws_batch_compute_environment) to manage the compute - environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. -author: Jon Meran (@jonmer85) + - This module allows the management of AWS Batch Job Definitions. + - It is idempotent and supports "Check" mode. + - Use module M(community.aws.batch_compute_environment) to manage the compute + environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions. + - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_job_definition). + The usage did not change. +author: + - Jon Meran (@jonmer85) options: job_definition_arn: description: @@ -171,20 +174,14 @@ many times. type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' --- -- hosts: localhost - gather_facts: no - vars: - state: present - tasks: - name: My Batch Job Definition - community.aws.aws_batch_job_definition: + community.aws.batch_job_definition: job_definition_name: My Batch Job Definition state: present type: container diff --git a/aws_batch_job_queue.py b/batch_job_queue.py similarity index 91% rename from aws_batch_job_queue.py rename to batch_job_queue.py index 7091c0756b3..8a6224dfb68 100644 --- a/aws_batch_job_queue.py +++ b/batch_job_queue.py @@ -8,19 +8,22 @@ DOCUMENTATION = r''' --- -module: aws_batch_job_queue +module: batch_job_queue version_added: 1.0.0 short_description: Manage AWS Batch Job Queues description: - - This module allows the management of AWS Batch Job Queues. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.aws_batch_compute_environment) to manage the compute - environment, M(community.aws.aws_batch_job_queue) to manage job queues, M(community.aws.aws_batch_job_definition) to manage job definitions. -author: Jon Meran (@jonmer85) + - This module allows the management of AWS Batch Job Queues. + - It is idempotent and supports "Check" mode. + - Use module M(community.aws.batch_compute_environment) to manage the compute + environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions. + - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_job_queue). + The usage did not change. +author: + - Jon Meran (@jonmer85) options: job_queue_name: description: - - The name for the job queue + - The name for the job queue. required: true type: str state: @@ -53,21 +56,20 @@ type: list elements: dict suboptions: - order: - type: int - description: The relative priority of the environment. - compute_environment: - type: str - description: The name of the compute environment. + order: + type: int + description: The relative priority of the environment. + compute_environment: + type: str + description: The name of the compute environment. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: My Batch Job Queue - community.aws.aws_batch_job_queue: + community.aws.batch_job_queue: job_queue_name: jobQueueName state: present region: us-east-1 From b0b1161f86418f7a2bfadaa2c4d9e783fdc0d526 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 29 Jun 2022 07:12:04 +0200 Subject: [PATCH 504/683] rename execute_lambda (#1273) rename execute_lambda SUMMARY In line with the naming guidelines, rename execute_lambda to lambda_execute ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/execute_lambda.py plugins/modules/lambda_execute.py ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso --- execute_lambda.py => lambda_execute.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) rename execute_lambda.py => lambda_execute.py (96%) diff --git a/execute_lambda.py b/lambda_execute.py similarity index 96% rename from execute_lambda.py rename to lambda_execute.py index f9131e2bbea..880ad4cb036 100644 --- a/execute_lambda.py +++ b/lambda_execute.py @@ -8,17 +8,19 @@ DOCUMENTATION = ''' --- -module: execute_lambda +module: lambda_execute version_added: 1.0.0 short_description: Execute an AWS Lambda function description: - This module executes AWS Lambda functions, allowing synchronous and asynchronous invocation. + - Prior to release 5.0.0 this module was called C(community.aws.execute_lambda). + The usage did not change. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -author: "Ryan Scott Brown (@ryansb) " + - amazon.aws.aws + - amazon.aws.ec2 +author: + - "Ryan Scott Brown (@ryansb) " notes: - Async invocation will always return an empty C(output) key. - Synchronous invocation may result in a function timeout, resulting in an @@ -72,7 +74,7 @@ ''' EXAMPLES = ''' -- community.aws.execute_lambda: +- community.aws.lambda_execute: name: test-function # the payload is automatically serialized and sent to the function payload: @@ -82,11 +84,11 @@ # Test that you have sufficient permissions to execute a Lambda function in # another account -- community.aws.execute_lambda: +- community.aws.lambda_execute: function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function dry_run: true -- community.aws.execute_lambda: +- community.aws.lambda_execute: name: test-function payload: foo: bar @@ -97,12 +99,12 @@ # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda # Pass the Lambda event payload as a json file. -- community.aws.execute_lambda: +- community.aws.lambda_execute: name: test-function payload: "{{ lookup('file','lambda_event.json') }}" register: response -- community.aws.execute_lambda: +- community.aws.lambda_execute: name: test-function version_qualifier: PRODUCTION ''' From b23afd18671fa4ad09261df34568a0e5d78b015a Mon Sep 17 00:00:00 2001 From: Raz M Date: Wed, 29 Jun 2022 00:36:36 -0700 Subject: [PATCH 505/683] sns_topic - Fix bug when used in GovCloud - issue 836 (#1195) SUMMARY Add region detection to skip usage of FIFO topics when using GovCloud regions Fixes #836 ISSUE TYPE Bugfix Pull Request COMPONENT NAME community.aws.sns_topic * * Add endpoint detection to skip usage of FIFO topics for GovCloud regions * * Remove GovCloud hack for FIFO topics * Update plugins/modules/sns_topic.py * Update plugins/modules/sns_topic.py * minor docs tweak --- sns_topic.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/sns_topic.py b/sns_topic.py index 9755450c455..561c9d615c4 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -26,8 +26,11 @@ type: str topic_type: description: - - The type of topic that should be created. Either Standard for FIFO (first-in, first-out) - choices: ['standard', 'fifo'] + - The type of topic that should be created. Either Standard for FIFO (first-in, first-out). + - Some regions, including GovCloud regions do not support FIFO topics. + Use a default value of 'standard' or omit the option if the region + does not support FIFO topics. + choices: ["standard", "fifo"] default: 'standard' type: str version_added: 2.0.0 @@ -363,9 +366,11 @@ def __init__(self, self.attributes_set = [] def _create_topic(self): - attributes = {'FifoTopic': 'false'} + attributes = {} tags = [] + # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) + # don't support the attribute being set, even to False. if self.topic_type == 'fifo': attributes['FifoTopic'] = 'true' if not self.name.endswith('.fifo'): @@ -373,7 +378,9 @@ def _create_topic(self): if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) + response = self.connection.create_topic(Name=self.name, + Attributes=attributes, + Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) self.topic_arn = response['TopicArn'] @@ -506,7 +513,6 @@ def ensure_gone(self): def main(): - # We're kinda stuck with CamelCase here, it would be nice to switch to # snake_case, but we'd need to purge out the alias entries http_retry_args = dict( From 726f6a2f975311ef4f653e3619fc057060fec40c Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 29 Jun 2022 03:38:19 -0400 Subject: [PATCH 506/683] ecs_* - add waiters (#1209) SUMMARY Add wait parameter to utilize boto3 waiters in ecs_service and ecs_task (ServicesInactive, TasksStopped, TasksRunning). There's an additional waiter for ServicesStable but idempotence checked never failed locally so it seems redundant when creating a service. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ecs_task ADDITIONAL INFORMATION When testing the waiter for TasksRunning, tests failed on waiter error due to the container instance not being able to be created, not because of the waiter, so I commented out those tests for now. In the ECS console: Stopped reason CannotPullContainerError: inspect image has been retried 5 time(s): failed to resolve ref "docker.io/library/nginx:latest": failed to do request: Head https://registry-1.docker.io/v2/library/nginx/manifests/latest: dial tcp 34.237.244.67:443: i/o timeout * add waiters and fix some bugs * add changelog * move bugfixes to different PR for backporting purposes * update wait description * catch WaiterError * Bump version_added --- ecs_service.py | 24 ++++++++++++++++++++++++ ecs_task.py | 42 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 9327191e80c..aa455d72345 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -230,6 +230,13 @@ required: false choices: ["DAEMON", "REPLICA"] type: str + wait: + description: + - Whether or not to wait for the service to be inactive. + - Waits only when I(state) is C(absent). + type: bool + default: false + version_added: 4.1.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -780,6 +787,7 @@ def main(): force_new_deployment=dict(required=False, default=False, type='bool'), force_deletion=dict(required=False, default=False, type='bool'), deployment_configuration=dict(required=False, default={}, type='dict'), + wait=dict(required=False, default=False, type='bool'), placement_constraints=dict( required=False, default=[], @@ -964,8 +972,24 @@ def main(): module.params['cluster'], module.params['force_deletion'], ) + + # Wait for service to be INACTIVE prior to exiting + if module.params['wait']: + waiter = service_mgr.ecs.get_waiter('services_inactive') + try: + waiter.wait( + services=[module.params['name']], + cluster=module.params['cluster'], + WaiterConfig={ + 'Delay': module.params['delay'], + 'MaxAttempts': module.params['repeat'] + } + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, 'Timeout waiting for service removal') except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") + results['changed'] = True elif module.params['state'] == 'deleting': diff --git a/ecs_task.py b/ecs_task.py index 5e8eda99dd3..3db08a5b2af 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -92,6 +92,12 @@ - Tags that will be added to ecs tasks on start and run required: false aliases: ['resource_tags'] + wait: + description: + - Whether or not to wait for the desired state. + type: bool + default: false + version_added: 4.1.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -353,7 +359,8 @@ def main(): started_by=dict(required=False, type='str'), # R S network_configuration=dict(required=False, type='dict'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - tags=dict(required=False, type='dict', aliases=['resource_tags']) + tags=dict(required=False, type='dict', aliases=['resource_tags']), + wait=dict(required=False, default=False, type='bool'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, @@ -395,7 +402,9 @@ def main(): results['task'] = existing else: if not module.check_mode: - results['task'] = service_mgr.run_task( + + # run_task returns a list of tasks created + tasks = service_mgr.run_task( module.params['cluster'], module.params['task_definition'], module.params['overrides'], @@ -404,6 +413,21 @@ def main(): module.params['launch_type'], module.params['tags'], ) + + # Wait for task(s) to be running prior to exiting + if module.params['wait']: + + waiter = service_mgr.ecs.get_waiter('tasks_running') + try: + waiter.wait( + tasks=[task['taskArn'] for task in tasks], + cluster=module.params['cluster'], + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, 'Timeout waiting for tasks to run') + + results['task'] = tasks + results['changed'] = True elif module.params['operation'] == 'start': @@ -420,6 +444,7 @@ def main(): module.params['started_by'], module.params['tags'], ) + results['changed'] = True elif module.params['operation'] == 'stop': @@ -433,6 +458,19 @@ def main(): module.params['cluster'], module.params['task'] ) + + # Wait for task to be stopped prior to exiting + if module.params['wait']: + + waiter = service_mgr.ecs.get_waiter('tasks_stopped') + try: + waiter.wait( + tasks=[module.params['task']], + cluster=module.params['cluster'], + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, 'Timeout waiting for task to stop') + results['changed'] = True module.exit_json(**results) From f247112c0290e67d7aaf6f1312f67df3ed947fa5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 29 Jun 2022 11:22:32 +0200 Subject: [PATCH 507/683] Complete iam_cert and rds_snapshot deprecations (#1276) Complete iam_cert and rds_snapshot deprecations SUMMARY Delayed from 4.0.0 to 5.0.0 remove the deprecated aliases ISSUE TYPE Feature Pull Request COMPONENT NAME iam_server_certificate rds_instance_snapshot ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- iam_server_certificate.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 824c8bd96c7..8520c5bff0e 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -22,9 +22,9 @@ --- module: iam_server_certificate version_added: 1.0.0 -short_description: Manage server certificates for use on ELBs and CloudFront +short_description: Manage IAM server certificates for use on ELBs and CloudFront description: - - Allows for the management of server certificates. + - Allows for the management of IAM server certificates. options: name: description: @@ -73,10 +73,11 @@ default: true type: bool -author: Jonathan I. Davila (@defionscode) +author: + - Jonathan I. Davila (@defionscode) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -103,7 +104,6 @@ new_name: new_very_ssl state: present ''' -import os try: import botocore From e1a6d6ae1d7a49a6577f44ad2716c747b37a89d7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 29 Jun 2022 17:30:40 +0200 Subject: [PATCH 508/683] Rename KMS modules (#1284) Rename KMS modules SUMMARY In line with the naming guidelines, rename aws_kms and aws_kms_info ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_kms.py plugins/modules/aws_kms_info.py plugins/modules/kms_key.py plugins/modules/kms_key_info.py ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis --- aws_kms.py => kms_key.py | 128 +++++++++++++++-------------- aws_kms_info.py => kms_key_info.py | 20 +++-- 2 files changed, 77 insertions(+), 71 deletions(-) rename aws_kms.py => kms_key.py (91%) rename aws_kms_info.py => kms_key_info.py (97%) diff --git a/aws_kms.py b/kms_key.py similarity index 91% rename from aws_kms.py rename to kms_key.py index 046af605da1..53d691888b6 100644 --- a/aws_kms.py +++ b/kms_key.py @@ -8,97 +8,102 @@ DOCUMENTATION = r''' --- -module: aws_kms +module: kms_key version_added: 1.0.0 -short_description: Perform various KMS management tasks +short_description: Perform various KMS key management tasks description: - - Manage role/user access to a KMS key. Not designed for encrypting/decrypting. + - Manage role/user access to a KMS key. + - Not designed for encrypting/decrypting. + - Prior to release 5.0.0 this module was called C(community.aws.aws_kms). + The usage did not change. options: alias: - description: An alias for a key. For safety, even though KMS does not require keys - to have an alias, this module expects all new keys to be given an alias - to make them easier to manage. Existing keys without an alias may be - referred to by I(key_id). Use M(community.aws.aws_kms_info) to find key ids. Required - if I(key_id) is not given. Note that passing a I(key_id) and I(alias) - will only cause a new alias to be added, an alias will never be renamed. - The 'alias/' prefix is optional. + description: + - An alias for a key. + - For safety, even though KMS does not require keys to have an alias, this module expects all + new keys to be given an alias to make them easier to manage. Existing keys without an alias + may be referred to by I(key_id). Use M(community.aws.kms_key_info) to find key ids. + - Note that passing a I(key_id) and I(alias) will only cause a new alias to be added, an alias will never be renamed. + - The C(alias/) prefix is optional. + - Required if I(key_id) is not given. required: false aliases: - key_alias type: str key_id: description: - - Key ID or ARN of the key. - - One of I(alias) or I(key_id) are required. + - Key ID or ARN of the key. + - One of I(alias) or I(key_id) are required. required: false aliases: - key_arn type: str enable_key_rotation: description: - - Whether the key should be automatically rotated every year. + - Whether the key should be automatically rotated every year. required: false type: bool policy_mode: description: - - (deprecated) Grant or deny access. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. + - (deprecated) Grant or deny access. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. default: grant choices: [ grant, deny ] aliases: - - mode + - mode type: str policy_role_name: description: - - (deprecated) Role to allow/deny access. - - One of I(policy_role_name) or I(policy_role_arn) are required. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. + - (deprecated) Role to allow/deny access. + - One of I(policy_role_name) or I(policy_role_arn) are required. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. required: false aliases: - - role_name + - role_name type: str policy_role_arn: description: - - (deprecated) ARN of role to allow/deny access. - - One of I(policy_role_name) or I(policy_role_arn) are required. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. + - (deprecated) ARN of role to allow/deny access. + - One of I(policy_role_name) or I(policy_role_arn) are required. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. type: str required: false aliases: - - role_arn + - role_arn policy_grant_types: description: - - (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin". - - Required when I(policy_mode=grant). - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. + - (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin". + - Required when I(policy_mode=grant). + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. required: false aliases: - - grant_types + - grant_types type: list elements: str policy_clean_invalid_entries: description: - - (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases. - - Only cleans if changes are being made. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. + - (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases. + - Only cleans if changes are being made. + - Used for modifying the Key Policy rather than modifying a grant and only + works on the default policy created through the AWS Console. + - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. type: bool default: true aliases: - clean_invalid_entries state: - description: Whether a key should be present or absent. Note that making an - existing key absent only schedules a key for deletion. Passing a key that - is scheduled for deletion with state present will cancel key deletion. + description: + - Whether a key should be present or absent. + - Note that making an existing key C(absent) only schedules a key for deletion. + - Passing a key that is scheduled for deletion with I(state=present) will cancel key deletion. required: False choices: - present @@ -111,21 +116,21 @@ type: bool description: description: - A description of the CMK. Use a description that helps you decide - whether the CMK is appropriate for a task. + - A description of the CMK. + - Use a description that helps you decide whether the CMK is appropriate for a task. type: str pending_window: description: - - The number of days between requesting deletion of the CMK and when it will actually be deleted. - - Only used when I(state=absent) and the CMK has not yet been deleted. - - Valid values are between 7 and 30 (inclusive). - - 'See also: U(https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html#KMS-ScheduleKeyDeletion-request-PendingWindowInDays)' + - The number of days between requesting deletion of the CMK and when it will actually be deleted. + - Only used when I(state=absent) and the CMK has not yet been deleted. + - Valid values are between 7 and 30 (inclusive). + - 'See also: U(https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html#KMS-ScheduleKeyDeletion-request-PendingWindowInDays)' type: int aliases: ['deletion_delay'] version_added: 1.4.0 purge_grants: - description: Whether the I(grants) argument should cause grants not in the list to - be removed. + description: + - Whether the I(grants) argument should cause grants not in the list to be removed. default: False type: bool grants: @@ -192,40 +197,39 @@ - amazon.aws.ec2 - amazon.aws.tags.deprecated_purge - notes: - There are known inconsistencies in the amount of time required for updates of KMS keys to be fully reflected on AWS. - This can cause issues when running duplicate tasks in succession or using the aws_kms_info module to fetch key metadata + This can cause issues when running duplicate tasks in succession or using the M(community.aws.kms_key_info) module to fetch key metadata shortly after modifying keys. - For this reason, it is recommended to use the return data from this module (aws_kms) to fetch a key's metadata. + For this reason, it is recommended to use the return data from this module (M(community.aws.kms_key)) to fetch a key's metadata. ''' EXAMPLES = r''' # Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile # and has been deprecated in favour of the policy option. - name: grant user-style access to production secrets - community.aws.aws_kms: + community.aws.kms_key: args: alias: "alias/my_production_secrets" policy_mode: grant policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" policy_grant_types: "role,role grant" - name: remove access to production secrets from role - community.aws.aws_kms: + community.aws.kms_key: args: alias: "alias/my_production_secrets" policy_mode: deny policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" # Create a new KMS key -- community.aws.aws_kms: +- community.aws.kms_key: alias: mykey tags: Name: myKey Purpose: protect_stuff # Update previous key with more tags -- community.aws.aws_kms: +- community.aws.kms_key: alias: mykey tags: Name: myKey @@ -235,7 +239,7 @@ # Update a known key with grants allowing an instance with the billing-prod IAM profile # to decrypt data encrypted with the environment: production, application: billing # encryption context -- community.aws.aws_kms: +- community.aws.kms_key: key_id: abcd1234-abcd-1234-5678-ef1234567890 grants: - name: billing_prod @@ -249,13 +253,13 @@ - RetireGrant - name: Update IAM policy on an existing KMS key - community.aws.aws_kms: + community.aws.kms_key: alias: my-kms-key policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { } ]}' state: present - name: Example using lookup for policy json - community.aws.aws_kms: + community.aws.kms_key: alias: my-kms-key policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" state: present diff --git a/aws_kms_info.py b/kms_key_info.py similarity index 97% rename from aws_kms_info.py rename to kms_key_info.py index 2863fd0538e..b9ecf80fcc3 100644 --- a/aws_kms_info.py +++ b/kms_key_info.py @@ -8,12 +8,15 @@ DOCUMENTATION = r''' --- -module: aws_kms_info +module: kms_key_info version_added: 1.0.0 short_description: Gather information about AWS KMS keys description: - - Gather information about AWS KMS keys including tags and grants. -author: "Will Thames (@willthames)" + - Gather information about AWS KMS keys including tags and grants. + - Prior to release 5.0.0 this module was called C(community.aws.aws_kms_info). + The usage did not change. +author: + - "Will Thames (@willthames)" options: alias: description: @@ -46,24 +49,23 @@ default: False type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all KMS keys -- community.aws.aws_kms_info: +- community.aws.kms_key_info: # Gather information about all keys with a Name tag -- community.aws.aws_kms_info: +- community.aws.kms_key_info: filters: tag-key: Name # Gather information about all keys with a specific name -- community.aws.aws_kms_info: +- community.aws.kms_key_info: filters: "tag:Name": Example ''' From 5a4c427447ad6905cc9f12a8a1684707b4d76a8e Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 29 Jun 2022 16:31:13 -0400 Subject: [PATCH 509/683] lambda_info - Return list of dicts as opposed to dict of dicts (#1239) lambda_info - Return list of dicts as opposed to dict of dicts SUMMARY Previously returned function, which was a dict of dicts. Deprecated function in replace of lambda_info which is a list of dicts. Return config details regardless of query (function_arn, function_name, etc) ISSUE TYPE Feature Pull Request COMPONENT NAME lambda_info Reviewed-by: Mark Chappell Reviewed-by: Joseph Torcasso Reviewed-by: Alina Buzachis --- lambda_info.py | 274 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 217 insertions(+), 57 deletions(-) diff --git a/lambda_info.py b/lambda_info.py index 1ad2749c5f8..e3d00ab08cc 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -71,16 +71,197 @@ RETURN = ''' --- function: - description: lambda function list + description: + - lambda function list. + - C(function) has been deprecated in will be removed in the next major release after 2025-01-01. returned: success type: dict function.TheName: - description: lambda function information, including event, mapping, and version information + description: + - lambda function information, including event, mapping, and version information. + - C(function) has been deprecated in will be removed in the next major release after 2025-01-01. returned: success type: dict +functions: + description: List of information for each lambda function matching the query. + returned: always + type: list + elements: dict + version_added: 4.1.0 + contains: + aliases: + description: The aliases associated with the function. + returned: when C(query) is I(aliases) or I(all) + type: list + elements: str + code_sha256: + description: The SHA256 hash of the function's deployment package. + returned: success + type: str + sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=' + code_size: + description: The size of the function's deployment package in bytes. + returned: success + type: int + sample: 123 + dead_letter_config: + description: The function's dead letter queue. + returned: when the function has a dead letter queue configured + type: dict + sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 } + contains: + target_arn: + description: The ARN of an SQS queue or SNS topic. + returned: when the function has a dead letter queue configured + type: str + sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 + description: + description: The function's description. + returned: success + type: str + sample: 'My function' + environment: + description: The function's environment variables. + returned: when environment variables exist + type: dict + contains: + variables: + description: Environment variable key-value pairs. + returned: when environment variables exist + type: dict + sample: {'key': 'value'} + error: + description: Error message for environment variables that could not be applied. + returned: when there is an error applying environment variables + type: dict + contains: + error_code: + description: The error code. + returned: when there is an error applying environment variables + type: str + message: + description: The error message. + returned: when there is an error applying environment variables + type: str + function_arn: + description: The function's Amazon Resource Name (ARN). + returned: on success + type: str + sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1' + function_name: + description: The function's name. + returned: on success + type: str + sample: 'myFunction' + handler: + description: The function Lambda calls to begin executing your function. + returned: on success + type: str + sample: 'index.handler' + last_modified: + description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD). + returned: on success + type: str + sample: '2017-08-01T00:00:00.000+0000' + mappings: + description: List of configuration information for each event source mapping. + returned: when C(query) is I(all) or I(mappings) + type: list + elements: dict + contains: + uuid: + description: The AWS Lambda assigned opaque identifier for the mapping. + returned: on success + type: str + batch_size: + description: The largest number of records that AWS Lambda will retrieve from the event source at the time of invoking the function. + returned: on success + type: int + event_source_arn: + description: The ARN of the Amazon Kinesis or DyanmoDB stream that is the source of events. + returned: on success + type: str + function_arn: + description: The Lambda function to invoke when AWS Lambda detects an event on the poll-based source. + returned: on success + type: str + last_modified: + description: The UTC time string indicating the last time the event mapping was updated. + returned: on success + type: str + last_processing_result: + description: The result of the last AWS Lambda invocation of your Lambda function. + returned: on success + type: str + state: + description: The state of the event source mapping. + returned: on success + type: str + state_transition_reason: + description: The reason the event source mapping is in its current state. + returned: on success + type: str + memory_size: + description: The memory allocated to the function. + returned: on success + type: int + sample: 128 + policy: + description: The policy associated with the function. + returned: when C(query) is I(all) or I(policy) + type: dict + revision_id: + description: The latest updated revision of the function or alias. + returned: on success + type: str + sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c' + role: + description: The function's execution role. + returned: on success + type: str + sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + runtime: + description: The funtime environment for the Lambda function. + returned: on success + type: str + sample: 'nodejs6.10' + tracing_config: + description: The function's AWS X-Ray tracing configuration. + returned: on success + type: dict + sample: { 'mode': 'Active' } + contains: + mode: + description: The tracing mode. + returned: on success + type: str + sample: 'Active' + timeout: + description: The amount of time that Lambda allows a function to run before terminating it. + returned: on success + type: int + sample: 3 + version: + description: The version of the Lambda function. + returned: on success + type: str + sample: '1' + versions: + description: List of Lambda function versions. + returned: when C(query) is I(all) or I(versions) + type: list + elements: dict + vpc_config: + description: The function's networking configuration. + returned: on success + type: dict + sample: { + 'security_group_ids': [], + 'subnet_ids': [], + 'vpc_id': '123' + } ''' import json -import datetime import re try: @@ -101,29 +282,6 @@ def _paginate(client, function, **params): return paginator.paginate(**params).build_full_result() -def fix_return(node): - """ - fixup returned dictionary - - :param node: - :return: - """ - - if isinstance(node, datetime.datetime): - node_value = str(node) - - elif isinstance(node, list): - node_value = [fix_return(item) for item in node] - - elif isinstance(node, dict): - node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) - - else: - node_value = node - - return node_value - - def alias_details(client, module, function_name): """ Returns list of aliases for a specified function. @@ -146,13 +304,12 @@ def alias_details(client, module, function_name): return camel_dict_to_snake_dict(lambda_info) -def list_lambdas(client, module): +def list_functions(client, module): """ Returns queried facts for a specified function (or all functions). :param client: AWS API client reference (boto3) :param module: Ansible module reference - :return dict: """ function_name = module.params.get('function_name') @@ -166,38 +323,40 @@ def list_lambdas(client, module): function_names = [function_info['FunctionName'] for function_info in all_function_info] query = module.params['query'] - lambdas = dict() + functions = [] + + # keep returning deprecated response (dict of dicts) until removed + all_facts = {} for function_name in function_names: - lambdas[function_name] = {} + function = {} - if query == 'all': - lambdas[function_name].update(config_details(client, module, function_name)) - lambdas[function_name].update(alias_details(client, module, function_name)) - lambdas[function_name].update(policy_details(client, module, function_name)) - lambdas[function_name].update(version_details(client, module, function_name)) - lambdas[function_name].update(mapping_details(client, module, function_name)) - lambdas[function_name].update(tags_details(client, module, function_name)) + # query = 'config' returns info such as FunctionName, FunctionArn, Description, etc + # these details should be returned regardless of the query + function.update(config_details(client, module, function_name)) - elif query == 'config': - lambdas[function_name].update(config_details(client, module, function_name)) + if query in ['all', 'aliases']: + function.update(alias_details(client, module, function_name)) - elif query == 'aliases': - lambdas[function_name].update(alias_details(client, module, function_name)) + if query in ['all', 'policy']: + function.update(policy_details(client, module, function_name)) - elif query == 'policy': - lambdas[function_name].update(policy_details(client, module, function_name)) + if query in ['all', 'versions']: + function.update(version_details(client, module, function_name)) - elif query == 'versions': - lambdas[function_name].update(version_details(client, module, function_name)) + if query in ['all', 'mappings']: + function.update(mapping_details(client, module, function_name)) - elif query == 'mappings': - lambdas[function_name].update(mapping_details(client, module, function_name)) + if query in ['all', 'tags']: + function.update(tags_details(client, module, function_name)) - elif query == 'tags': - lambdas[function_name].update(tags_details(client, module, function_name)) + all_facts[function['function_name']] = function - return lambdas + # add current lambda to list of lambdas + functions.append(function) + + # return info + module.exit_json(function=all_facts, functions=functions, changed=False) def config_details(client, module, function_name): @@ -357,14 +516,15 @@ def main(): client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - all_facts = fix_return(list_lambdas(client, module)) - - results = dict(function=all_facts, changed=False) - - if module.check_mode: - results['msg'] = 'Check mode set but ignored for fact gathering only.' + # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts + module.deprecate( + "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions'," + " which returns a list of dictionaries. Both keys are returned for now.", + date='2025-01-01', + collection_name='community.aws' + ) - module.exit_json(**results) + list_functions(client, module) if __name__ == '__main__': From 108b20d31733d559ca2ae6b484f4e0cbf22c9708 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 30 Jun 2022 13:29:24 +0200 Subject: [PATCH 510/683] Rename apigateway modules (#1288) Rename api_gateway modules SUMMARY In line with the naming guidelines, removes the aws_ prefix from aws_api_gateway and aws_api_gateway_domain. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_api_gateway.py plugins/modules/aws_api_gateway_domain.py plugins/modules/api_gateway.py plugins/modules/api_gateway_domain.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- aws_api_gateway.py => api_gateway.py | 40 +++++++++---------- ...gateway_domain.py => api_gateway_domain.py | 22 +++++----- 2 files changed, 31 insertions(+), 31 deletions(-) rename aws_api_gateway.py => api_gateway.py (92%) rename aws_api_gateway_domain.py => api_gateway_domain.py (94%) diff --git a/aws_api_gateway.py b/api_gateway.py similarity index 92% rename from aws_api_gateway.py rename to api_gateway.py index 5ce411195e9..4e467bf677b 100644 --- a/aws_api_gateway.py +++ b/api_gateway.py @@ -10,18 +10,18 @@ DOCUMENTATION = ''' --- -module: aws_api_gateway +module: api_gateway version_added: 1.0.0 short_description: Manage AWS API Gateway APIs description: - - Allows for the management of API Gateway APIs. - - Normally you should give the api_id since there is no other - stable guaranteed unique identifier for the API. If you do - not give api_id then a new API will be created each time - this is run. - - swagger_file and swagger_text are passed directly on to AWS - transparently whilst swagger_dict is an ansible dict which is - converted to JSON before the API definitions are uploaded. + - Allows for the management of API Gateway APIs. + - Normally you should give the api_id since there is no other + stable guaranteed unique identifier for the API. If you do + not give api_id then a new API will be created each time + this is run. + - swagger_file and swagger_text are passed directly on to AWS + transparently whilst swagger_dict is an ansible dict which is + converted to JSON before the API definitions are uploaded. options: api_id: description: @@ -99,21 +99,21 @@ type: str default: EDGE author: - - 'Michael De La Rue (@mikedlr)' + - 'Michael De La Rue (@mikedlr)' extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 notes: - - A future version of this module will probably use tags or another - ID so that an API can be created only once. - - As an early work around an intermediate version will probably do - the same using a tag embedded in the API name. + - A future version of this module will probably use tags or another + ID so that an API can be created only once. + - As an early work around an intermediate version will probably do + the same using a tag embedded in the API name. ''' EXAMPLES = ''' - name: Setup AWS API Gateway setup on AWS and deploy API definition - community.aws.aws_api_gateway: + community.aws.api_gateway: swagger_file: my_api.yml stage: production cache_enabled: true @@ -123,7 +123,7 @@ state: present - name: Update API definition to deploy new version - community.aws.aws_api_gateway: + community.aws.api_gateway: api_id: 'abc123321cba' swagger_file: my_api.yml deploy_desc: Make auth fix available. @@ -133,7 +133,7 @@ state: present - name: Update API definitions and settings and deploy as canary - community.aws.aws_api_gateway: + community.aws.api_gateway: api_id: 'abc123321cba' swagger_file: my_api.yml cache_enabled: true @@ -264,7 +264,7 @@ def create_empty_api(module, client, endpoint_type): temporarily set to show the API as incomplete but should be updated when the API is configured. """ - desc = "Incomplete API creation by ansible aws_api_gateway module" + desc = "Incomplete API creation by ansible api_gateway module" try: awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: diff --git a/aws_api_gateway_domain.py b/api_gateway_domain.py similarity index 94% rename from aws_api_gateway_domain.py rename to api_gateway_domain.py index a8a04295dae..7352dc69477 100644 --- a/aws_api_gateway_domain.py +++ b/api_gateway_domain.py @@ -8,15 +8,15 @@ DOCUMENTATION = ''' --- -module: aws_api_gateway_domain +module: api_gateway_domain short_description: Manage AWS API Gateway custom domains description: - - Manages API Gateway custom domains for API GW Rest APIs. - - AWS API Gateway custom domain setups use CloudFront behind the scenes. - So you will get a CloudFront distribution as a result, configured to be aliased with your domain. + - Manages API Gateway custom domains for API GW Rest APIs. + - AWS API Gateway custom domain setups use CloudFront behind the scenes. + So you will get a CloudFront distribution as a result, configured to be aliased with your domain. version_added: '3.3.0' author: - - 'Stefan Horning (@stefanhorning)' + - 'Stefan Horning (@stefanhorning)' options: domain_name: description: @@ -56,17 +56,17 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 notes: - - Does not create a DNS entry on Route53, for that use the route53 module. - - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) - options to add own Certificates. + - Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module. + - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) + options to add own Certificates. ''' EXAMPLES = ''' - name: Setup endpoint for a custom domain for your API Gateway HTTP API - community.aws.aws_api_gateway_domain: + community.aws.api_gateway_domain: domain_name: myapi.foobar.com certificate_arn: 'arn:aws:acm:us-east-1:1231123123:certificate/8bd89412-abc123-xxxxx' security_policy: TLS_1_2 From df9e7af3d54b10591dd12c21f39490580e91f627 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 1 Jul 2022 09:36:25 +0200 Subject: [PATCH 511/683] Rename directconnect modules (#1286) Rename directconnect modules SUMMARY Rename directconnect modules in line with new naming guidelines dropping the aws_ prefix. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_direct_connect_confirm_connection.py plugins/modules/aws_direct_connect_connection.py plugins/modules/aws_direct_connect_gateway.py plugins/modules/aws_direct_connect_link_aggregation_group.py plugins/modules/aws_direct_connect_virtual_interface.py plugins/modules/directconnect_confirm_connection.py plugins/modules/directconnect_connection.py plugins/modules/directconnect_gateway.py plugins/modules/directconnect_link_aggregation_group.py plugins/modules/directconnect_virtual_interface.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- ....py => directconnect_confirm_connection.py | 19 +++++++++-------- ...nnection.py => directconnect_connection.py | 21 ++++++++++--------- ...ect_gateway.py => directconnect_gateway.py | 14 ++++++------- ...=> directconnect_link_aggregation_group.py | 12 +++++------ ...e.py => directconnect_virtual_interface.py | 14 ++++++------- 5 files changed, 41 insertions(+), 39 deletions(-) rename aws_direct_connect_confirm_connection.py => directconnect_confirm_connection.py (93%) rename aws_direct_connect_connection.py => directconnect_connection.py (96%) rename aws_direct_connect_gateway.py => directconnect_gateway.py (98%) rename aws_direct_connect_link_aggregation_group.py => directconnect_link_aggregation_group.py (99%) rename aws_direct_connect_virtual_interface.py => directconnect_virtual_interface.py (98%) diff --git a/aws_direct_connect_confirm_connection.py b/directconnect_confirm_connection.py similarity index 93% rename from aws_direct_connect_confirm_connection.py rename to directconnect_confirm_connection.py index b583def09d9..57934f3a332 100644 --- a/aws_direct_connect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -11,16 +11,17 @@ DOCUMENTATION = ''' --- -module: aws_direct_connect_confirm_connection -short_description: Confirms the creation of a hosted DirectConnect connection. +module: directconnect_confirm_connection +short_description: Confirms the creation of a hosted DirectConnect connection description: - Confirms the creation of a hosted DirectConnect, which requires approval before it can be used. - - DirectConnect connections that require approval would be in the 'ordering'. - - After confirmation, they will move to the 'pending' state and finally the 'available' state. -author: "Matt Traynham (@mtraynham)" + - DirectConnect connections that require approval would be in the C(ordering). + - After confirmation, they will move to the C(pending) state and finally the C(available) state. +author: + - "Matt Traynham (@mtraynham)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: name: description: @@ -38,12 +39,12 @@ # confirm a Direct Connect by name - name: confirm the connection id - aws_direct_connect_confirm_connection: + community.aws.directconnect_confirm_connection: name: my_host_direct_connect # confirm a Direct Connect by connection_id - name: confirm the connection id - aws_direct_connect_confirm_connection: + community.aws.directconnect_confirm_connection: connection_id: dxcon-xxxxxxxx ''' diff --git a/aws_direct_connect_connection.py b/directconnect_connection.py similarity index 96% rename from aws_direct_connect_connection.py rename to directconnect_connection.py index 3764b1c7802..20ab393fd75 100644 --- a/aws_direct_connect_connection.py +++ b/directconnect_connection.py @@ -8,17 +8,18 @@ DOCUMENTATION = ''' --- -module: aws_direct_connect_connection +module: directconnect_connection version_added: 1.0.0 short_description: Creates, deletes, modifies a DirectConnect connection description: - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location. - Upon creation the connection may be added to a link aggregation group or established as a standalone connection. - The connection may later be associated or disassociated with a link aggregation group. -author: "Sloane Hertel (@s-hertel)" + - Upon creation the connection may be added to a link aggregation group or established as a standalone connection. + - The connection may later be associated or disassociated with a link aggregation group. +author: + - "Sloane Hertel (@s-hertel)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: state: description: @@ -69,7 +70,7 @@ EXAMPLES = """ # create a Direct Connect connection -- community.aws.aws_direct_connect_connection: +- community.aws.directconnect_connection: name: ansible-test-connection state: present location: EqDC2 @@ -78,14 +79,14 @@ register: dc # disassociate the LAG from the connection -- community.aws.aws_direct_connect_connection: +- community.aws.directconnect_connection: state: present connection_id: dc.connection.connection_id location: EqDC2 bandwidth: 1Gbps # replace the connection with one with more bandwidth -- community.aws.aws_direct_connect_connection: +- community.aws.directconnect_connection: state: present name: ansible-test-connection location: EqDC2 @@ -93,7 +94,7 @@ forced_update: true # delete the connection -- community.aws.aws_direct_connect_connection: +- community.aws.directconnect_connection: state: absent name: ansible-test-connection """ diff --git a/aws_direct_connect_gateway.py b/directconnect_gateway.py similarity index 98% rename from aws_direct_connect_gateway.py rename to directconnect_gateway.py index 54c336fffbe..bdf388675f6 100644 --- a/aws_direct_connect_gateway.py +++ b/directconnect_gateway.py @@ -7,8 +7,9 @@ DOCUMENTATION = ''' -module: aws_direct_connect_gateway -author: Gobin Sougrakpam (@gobins) +module: directconnect_gateway +author: + - Gobin Sougrakpam (@gobins) version_added: 1.0.0 short_description: Manage AWS Direct Connect gateway description: @@ -17,8 +18,8 @@ - Attaches Virtual Gateways to Direct Connect Gateway. - Detaches Virtual Gateways to Direct Connect Gateway. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: state: description: @@ -54,7 +55,7 @@ EXAMPLES = ''' - name: Create a new direct connect gateway attached to virtual private gateway - community.aws.aws_direct_connect_gateway: + community.aws.directconnect_gateway: state: present name: my-dx-gateway amazon_asn: 7224 @@ -62,12 +63,11 @@ register: created_dxgw - name: Create a new unattached dxgw - community.aws.aws_direct_connect_gateway: + community.aws.directconnect_gateway: state: present name: my-dx-gateway amazon_asn: 7224 register: created_dxgw - ''' RETURN = ''' diff --git a/aws_direct_connect_link_aggregation_group.py b/directconnect_link_aggregation_group.py similarity index 99% rename from aws_direct_connect_link_aggregation_group.py rename to directconnect_link_aggregation_group.py index 0567ba90288..77ff74de6d5 100644 --- a/aws_direct_connect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -8,15 +8,16 @@ DOCUMENTATION = ''' --- -module: aws_direct_connect_link_aggregation_group +module: directconnect_link_aggregation_group version_added: 1.0.0 short_description: Manage Direct Connect LAG bundles description: - Create, delete, or modify a Direct Connect link aggregation group. -author: "Sloane Hertel (@s-hertel)" +author: + - "Sloane Hertel (@s-hertel)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: state: description: @@ -82,12 +83,11 @@ EXAMPLES = """ # create a Direct Connect connection -- community.aws.aws_direct_connect_link_aggregation_group: +- community.aws.directconnect_link_aggregation_group: state: present location: EqDC2 lag_id: dxlag-xxxxxxxx bandwidth: 1Gbps - """ RETURN = """ diff --git a/aws_direct_connect_virtual_interface.py b/directconnect_virtual_interface.py similarity index 98% rename from aws_direct_connect_virtual_interface.py rename to directconnect_virtual_interface.py index f0c1b7f7800..8cd5fd70f55 100644 --- a/aws_direct_connect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -8,12 +8,13 @@ DOCUMENTATION = r''' --- -module: aws_direct_connect_virtual_interface +module: directconnect_virtual_interface version_added: 1.0.0 short_description: Manage Direct Connect virtual interfaces description: - Create, delete, or modify a Direct Connect public or private virtual interface. -author: "Sloane Hertel (@s-hertel)" +author: + - "Sloane Hertel (@s-hertel)" options: state: description: @@ -83,9 +84,8 @@ - The virtual interface ID. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' RETURN = r''' @@ -230,14 +230,14 @@ EXAMPLES = r''' --- - name: create an association between a LAG and connection - community.aws.aws_direct_connect_virtual_interface: + community.aws.directconnect_virtual_interface: state: present name: "{{ name }}" link_aggregation_group_id: LAG-XXXXXXXX connection_id: dxcon-XXXXXXXX - name: remove an association between a connection and virtual interface - community.aws.aws_direct_connect_virtual_interface: + community.aws.directconnect_virtual_interface: state: absent connection_id: dxcon-XXXXXXXX virtual_interface_id: dxv-XXXXXXXX From a57873dcc50720d100a825415d060818880abac7 Mon Sep 17 00:00:00 2001 From: Kevin Masseix Date: Fri, 1 Jul 2022 12:38:07 +0200 Subject: [PATCH 512/683] [Plugin elasticache_info] includes the description of each replication group (#646) [Plugin elasticache_info] includes the description of each replication group SUMMARY This pull request add the description of the corresponding Elasticache replication group into each described cache cluster. ISSUE TYPE Feature Pull Request COMPONENT NAME elasticache_info ADDITIONAL INFORMATION Actually the only way to describe Elasticache replication groups with Ansible is to use the dynamic inventory. However, exposing the informations about each replication group could be useful in other contexts. In my case, I am generating a few .env files and I need to include some Elasticache replication group primary endpoint and reader endpoint. Since the elasticache_info plugin already exist and already retrieves the appropriate replication_group_id, I believe that this plugin should also retrieve the replication group description when it exists. OTHER REMARKS CONCERNING THIS PULL REQUEST Each provided description is copy/pasted from the official boto3 documentation which is distributed under an Apache 2.0 licence. I did it for consistency reason between and also because I am not a native speaker. Also the provided description is incomplete : I only documented the fields that I was able to retrieve from my current AWS clusters. Boto3 documentation of the "describe_replication_groups" function : https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elasticache.html#ElastiCache.Client.describe_replication_groups Moreover this pull request might violates the "integration tests" rule : I didn't find any integration test for the elasticache_info plugin and I don't know why no integration test have been made. Reviewed-by: Mark Chappell --- elasticache_info.py | 300 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 251 insertions(+), 49 deletions(-) diff --git a/elasticache_info.py b/elasticache_info.py index c9fa9cc4502..154567ac581 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -11,19 +11,17 @@ short_description: Retrieve information for AWS ElastiCache clusters version_added: 1.0.0 description: - - Retrieve information from AWS ElastiCache clusters + - Retrieve information from AWS ElastiCache clusters. options: name: description: - The name of an ElastiCache cluster. type: str - author: - Will Thames (@willthames) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -37,170 +35,354 @@ RETURN = ''' elasticache_clusters: - description: List of ElastiCache clusters + description: List of ElastiCache clusters. returned: always - type: complex + type: list + elements: dict contains: + arn: + description: ARN of the cache cluster. + returned: always + type: str + sample: 'arn:aws:elasticache:us-east-1:123456789012:cluster:ansible-test' auto_minor_version_upgrade: - description: Whether to automatically upgrade to minor versions + description: Whether to automatically upgrade to minor versions. returned: always type: bool sample: true cache_cluster_create_time: - description: Date and time cluster was created + description: Date and time cluster was created. returned: always type: str sample: '2017-09-15T05:43:46.038000+00:00' cache_cluster_id: - description: ID of the cache cluster + description: ID of the cache cluster. returned: always type: str sample: abcd-1234-001 cache_cluster_status: - description: Status of ElastiCache cluster + description: Status of ElastiCache cluster. returned: always type: str sample: available cache_node_type: - description: Instance type of ElastiCache nodes + description: Instance type of ElastiCache nodes. returned: always type: str sample: cache.t2.micro cache_nodes: - description: List of ElastiCache nodes in the cluster + description: List of ElastiCache nodes in the cluster. returned: always - type: complex + type: list + elements: dict contains: cache_node_create_time: - description: Date and time node was created + description: Date and time node was created. returned: always type: str sample: '2017-09-15T05:43:46.038000+00:00' cache_node_id: - description: ID of the cache node + description: ID of the cache node. returned: always type: str sample: '0001' cache_node_status: - description: Status of the cache node + description: Status of the cache node. returned: always type: str sample: available customer_availability_zone: - description: Availability Zone in which the cache node was created + description: Availability Zone in which the cache node was created. returned: always type: str sample: ap-southeast-2b endpoint: - description: Connection details for the cache node + description: Connection details for the cache node. returned: always - type: complex + type: dict contains: address: - description: URL of the cache node endpoint + description: URL of the cache node endpoint. returned: always type: str sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com port: - description: Port of the cache node endpoint + description: Port of the cache node endpoint. returned: always type: int sample: 6379 parameter_group_status: - description: Status of the Cache Parameter Group + description: Status of the Cache Parameter Group. returned: always type: str sample: in-sync cache_parameter_group: - description: Contents of the Cache Parameter Group + description: Contents of the Cache Parameter Group. returned: always - type: complex + type: dict contains: cache_node_ids_to_reboot: - description: Cache nodes which need to be rebooted for parameter changes to be applied + description: Cache nodes which need to be rebooted for parameter changes to be applied. returned: always type: list + elements: str sample: [] cache_parameter_group_name: - description: Name of the cache parameter group + description: Name of the cache parameter group. returned: always type: str sample: default.redis3.2 parameter_apply_status: - description: Status of parameter updates + description: Status of parameter updates. returned: always type: str sample: in-sync cache_security_groups: - description: Security Groups used by the cache + description: Security Groups used by the cache. returned: always type: list + elements: str sample: - 'sg-abcd1234' cache_subnet_group_name: - description: ElastiCache Subnet Group used by the cache + description: ElastiCache Subnet Group used by the cache. returned: always type: str sample: abcd-subnet-group client_download_landing_page: - description: URL of client download web page + description: URL of client download web page. returned: always type: str sample: 'https://console.aws.amazon.com/elasticache/home#client-download:' engine: - description: Engine used by ElastiCache + description: Engine used by ElastiCache. returned: always type: str sample: redis engine_version: - description: Version of ElastiCache engine + description: Version of ElastiCache engine. returned: always type: str sample: 3.2.4 notification_configuration: - description: Configuration of notifications + description: Configuration of notifications. returned: if notifications are enabled - type: complex + type: dict contains: topic_arn: - description: ARN of notification destination topic + description: ARN of notification destination topic. returned: if notifications are enabled type: str sample: arn:aws:sns:*:123456789012:my_topic topic_name: - description: Name of notification destination topic + description: Name of notification destination topic. returned: if notifications are enabled type: str sample: MyTopic num_cache_nodes: - description: Number of Cache Nodes + description: Number of Cache Nodes. returned: always type: int sample: 1 pending_modified_values: - description: Values that are pending modification + description: Values that are pending modification. returned: always - type: complex - contains: {} + type: dict preferred_availability_zone: - description: Preferred Availability Zone + description: Preferred Availability Zone. returned: always type: str sample: ap-southeast-2b preferred_maintenance_window: - description: Time slot for preferred maintenance window + description: Time slot for preferred maintenance window. returned: always type: str sample: sat:12:00-sat:13:00 + replication_group: + description: Informations about the associated replication group. + version_added: 4.1.0 + returned: if replication is enabled + type: dict + contains: + arn: + description: The ARN (Amazon Resource Name) of the replication group. + returned: always + type: str + at_rest_encryption_enabled: + description: A flag that enables encryption at-rest when set to true. + returned: always + type: bool + auth_token_enabled: + description: A flag that enables using an AuthToken (password) when issuing Redis commands. + returned: always + type: bool + automatic_failover: + description: Indicates the status of automatic failover for this Redis replication group. + returned: always + type: str + sample: enabled + cache_node_type: + description: The name of the compute and memory capacity node type for each node in the replication group. + returned: always + type: str + sample: cache.t3.medium + cluster_enabled: + description: A flag indicating whether or not this replication group is cluster enabled. + returned: always + type: bool + description: + description: The user supplied description of the replication group. + returned: always + type: str + global_replication_group_info: + description: The name of the Global datastore and role of this replication group in the Global datastore. + returned: always + type: dict + contains: + global_replication_group_id: + description: The name of the Global datastore. + returned: always + type: str + global_replication_group_member_role: + description: The role of the replication group in a Global datastore. Can be primary or secondary. + returned: always + type: str + kms_key_id: + description: The ID of the KMS key used to encrypt the disk in the cluster. + returned: always + type: str + member_clusters: + description: The names of all the cache clusters that are part of this replication group. + returned: always + type: list + elements: str + multi_az: + description: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + returned: always + type: str + sample: enabled + node_groups: + description: A list of node groups in this replication group. + returned: always + type: list + elements: dict + contains: + node_group_id: + description: The identifier for the node group (shard). + returned: always + type: str + node_group_members: + description: A list containing information about individual nodes within the node group (shard). + returned: always + type: list + elements: dict + contains: + cache_cluster_id: + description: The ID of the cluster to which the node belongs. + returned: always + type: str + cache_node_id: + description: The ID of the node within its cluster. + returned: always + type: str + current_role: + description: The role that is currently assigned to the node - primary or replica. + returned: always + type: str + sample: primary + preferred_availability_zone: + description: The name of the Availability Zone in which the node is located. + returned: always + type: str + read_endpoint: + description: The information required for client programs to connect to a node for read operations. + returned: always + type: list + elements: dict + contains: + address: + description: The DNS hostname of the cache node. + returned: always + type: str + port: + description: The port number that the cache engine is listening on. + returned: always + type: int + sample: 6379 + primary_endpoint: + description: The endpoint of the primary node in this node group (shard). + returned: always + type: list + elements: dict + contains: + address: + description: The DNS hostname of the cache node. + returned: always + type: str + port: + description: The port number that the cache engine is listening on. + returned: always + type: int + sample: 6379 + reader_endpoint: + description: The endpoint of the cache node. + returned: always + type: dict + contains: + address: + description: The DNS hostname of the cache node. + returned: always + type: str + port: + description: The port number that the cache engine is listening on. + returned: always + type: int + sample: 6379 + status: + description: The current state of this replication group - C(creating), C(available), C(modifying), C(deleting). + returned: always + type: str + sample: available + pending_modified_values: + description: A group of settings to be applied to the replication group, either immediately or during the next maintenance window. + returned: always + type: dict + replication_group_id: + description: Replication Group Id. + returned: always + type: str + sample: replication-001 + snapshot_retention_limit: + description: The number of days for which ElastiCache retains automatic cluster snapshots before deleting them. + returned: always + type: int + snapshot_window: + description: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). + returned: always + type: str + sample: 07:00-09:00 + snapshotting_cluster_id: + description: The cluster ID that is used as the daily snapshot source for the replication group. + returned: always + type: str + status: + description: The current state of this replication group - C(creating), C(available), C(modifying), C(deleting), C(create-failed), C(snapshotting) + returned: always + type: str + transit_encryption_enabled: + description: A flag that enables in-transit encryption when set to C(true). + returned: always + type: bool replication_group_id: - description: Replication Group Id - returned: always + description: Replication Group Id. + returned: if replication is enabled type: str sample: replication-001 security_groups: - description: List of Security Groups associated with ElastiCache + description: List of Security Groups associated with ElastiCache. returned: always - type: complex + type: list + elements: dict contains: security_group_id: description: Security Group ID @@ -215,8 +397,7 @@ tags: description: Tags applied to the ElastiCache cluster returned: always - type: complex - contains: {} + type: dict sample: Application: web Environment: test @@ -248,6 +429,16 @@ def describe_cache_clusters_with_backoff(client, cluster_id=None): return response['CacheClusters'] +@AWSRetry.exponential_backoff() +def describe_replication_group_with_backoff(client, replication_group_id): + try: + response = client.describe_replication_groups(ReplicationGroupId=replication_group_id) + except is_boto3_error_code('ReplicationGroupNotFoundFault'): + return None + + return response['ReplicationGroups'][0] + + @AWSRetry.exponential_backoff() def get_elasticache_tags_with_backoff(client, cluster_id): return client.list_tags_for_resource(ResourceName=cluster_id)['TagList'] @@ -284,6 +475,17 @@ def get_elasticache_clusters(client, module): module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") cluster['tags'] = boto3_tag_list_to_ansible_dict(tags) + + if cluster.get('replication_group_id', None): + try: + replication_group = describe_replication_group_with_backoff(client, cluster['replication_group_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain replication group info") + + if replication_group is not None: + replication_group = camel_dict_to_snake_dict(replication_group) + cluster['replication_group'] = replication_group + results.append(cluster) return results From 3c58ff95dee5cbece6e74559147e5fef5dd3c6b5 Mon Sep 17 00:00:00 2001 From: Daniel Cotton <31445918+danielcotton@users.noreply.github.com> Date: Fri, 1 Jul 2022 21:49:57 +0930 Subject: [PATCH 513/683] Add lightsail_static_ip module (#259) Add lightsail_static_ip module SUMMARY Adds a module to manage AWS Lightsail Static IP addresses Fixes #250 ISSUE TYPE New Module Pull Request COMPONENT NAME community.aws.lightsail_static_ip ADDITIONAL INFORMATION As per my earlier pull request, this is part of larger work to add functionality to this module in terms of managing AWS Lightsail. Apologies if there are any issues/I have missed anything. I am new to Ansible development, but have tried to follow the contribution docs as best as I can. Example playbook: - name: create static IP community.aws.lightsail_static_ip: name: "test_static" state: present region: ap-southeast-2 Reviewed-by: Jill R Reviewed-by: Mark Chappell --- lightsail_static_ip.py | 148 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 lightsail_static_ip.py diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py new file mode 100644 index 00000000000..7f5d5377c0a --- /dev/null +++ b/lightsail_static_ip.py @@ -0,0 +1,148 @@ +#!/usr/bin/python + +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lightsail_static_ip +version_added: 4.1.0 +short_description: Manage static IP addresses in AWS Lightsail +description: + - Manage static IP addresses in AWS Lightsail. +author: + - "Daniel Cotton (@danielcotton)" +options: + state: + description: + - Describes the desired state. + default: present + choices: ['present', 'absent'] + type: str + name: + description: Name of the static IP. + required: true + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + + +EXAMPLES = ''' +- name: Provision a Lightsail static IP + community.aws.lightsail_static_ip: + state: present + name: my_static_ip + register: my_ip + +- name: Remove a static IP + community.aws.lightsail_static_ip: + state: absent + name: my_static_ip +''' + +RETURN = ''' +static_ip: + description: static_ipinstance data + returned: always + type: dict + sample: + arn: "arn:aws:lightsail:ap-southeast-2:184297340509:StaticIp/d8f47672-c261-4443-a484-4a2ec983db9a" + created_at: "2021-02-28T00:04:05.202000+10:30" + ip_address: "192.0.2.5" + is_attached: false + location: + availability_zone: all + region_name: ap-southeast-2 + name: "static_ip" + resource_type: StaticIp + support_code: "677585553206/192.0.2.5" +''' + +try: + import botocore +except ImportError: + # will be caught by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): + + try: + res = client.get_static_ip(staticIpName=static_ip_name) + except is_boto3_error_code('NotFoundException') as e: + if fail_if_not_found: + module.fail_json_aws(e) + return None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + return res['staticIp'] + + +def create_static_ip(module, client, static_ip_name): + + inst = find_static_ip_info(module, client, static_ip_name) + if inst: + module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst)) + else: + create_params = {'staticIpName': static_ip_name} + + try: + client.allocate_static_ip(**create_params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + inst = find_static_ip_info(module, client, static_ip_name, fail_if_not_found=True) + + module.exit_json(changed=True, static_ip=camel_dict_to_snake_dict(inst)) + + +def delete_static_ip(module, client, static_ip_name): + + inst = find_static_ip_info(module, client, static_ip_name) + if inst is None: + module.exit_json(changed=False, static_ip={}) + + changed = False + try: + client.release_static_ip(staticIpName=static_ip_name) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed, static_ip=camel_dict_to_snake_dict(inst)) + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + client = module.client('lightsail') + + name = module.params.get('name') + state = module.params.get('state') + + if state == 'present': + create_static_ip(module, client, name) + elif state == 'absent': + delete_static_ip(module, client, name) + + +if __name__ == '__main__': + main() From adc7febdf1a72bb147686187ddd1c4dd9f1c44af Mon Sep 17 00:00:00 2001 From: luiseterc Date: Sat, 2 Jul 2022 00:04:38 +0200 Subject: [PATCH 514/683] Add source_version param to ec2_launch_template module (#239) Add source_version param to ec2_launch_template module SUMMARY Add support for Boto3.create_launch_template_version's source_version parameter. Accepted values: int (specific version) : Creates a new launch template using this version as the base, hence keeping all its parameters string ( 'latest') : Uses the latest found in list template_versions ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_launch_template.py ADDITIONAL INFORMATION Sanity tests passed for this module. Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz Reviewed-by: Joseph Torcasso --- ec2_launch_template.py | 47 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 4f2d05e1630..9e7bcd7cda2 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -313,6 +313,14 @@ For any VPC other than Default, you must use I(security_group_ids). type: list elements: str + source_version: + description: > + The version number of the launch template version on which to base the new version. + The new version inherits the same launch parameters as the source version, except for parameters that you explicity specify. + Snapshots applied to the block device mapping are ignored when creating a new version unless they are explicitly included. + type: str + default: latest + version_added: 4.1.0 tags: type: dict description: @@ -569,12 +577,38 @@ def create_or_update(module, template_options): out['changed'] = False return out try: - resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], - LaunchTemplateData=lt_data, - ClientToken=uuid4().hex, - aws_retry=True, - ) + if module.params.get('source_version') in (None, ''): + resp = ec2.create_launch_template_version( + LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateData=lt_data, + ClientToken=uuid4().hex, + aws_retry=True, + ) + elif module.params.get('source_version') == 'latest': + resp = ec2.create_launch_template_version( + LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateData=lt_data, + ClientToken=uuid4().hex, + SourceVersion=str(most_recent['VersionNumber']), + aws_retry=True, + ) + else: + try: + int(module.params.get('source_version')) + except ValueError: + module.fail_json(msg='source_version param was not a valid integer, got "{0}"'.format(module.params.get('source_version'))) + # get source template version + source_version = next((v for v in template_versions if v['VersionNumber'] == int(module.params.get('source_version'))), None) + if source_version is None: + module.fail_json(msg='source_version does not exist, got "{0}"'.format(module.params.get('source_version'))) + resp = ec2.create_launch_template_version( + LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateData=lt_data, + ClientToken=uuid4().hex, + SourceVersion=str(source_version['VersionNumber']), + aws_retry=True, + ) + if module.params.get('default_version') in (None, ''): # no need to do anything, leave the existing version as default pass @@ -748,6 +782,7 @@ def main(): template_name=dict(aliases=['name']), template_id=dict(aliases=['id']), default_version=dict(default='latest'), + source_version=dict(default='latest') ) arg_spec.update(template_options) From 4a2f86a4aa1b15d0e19f03f677cb46d6b4e65c89 Mon Sep 17 00:00:00 2001 From: Ivan Chekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Sat, 2 Jul 2022 04:00:07 -0400 Subject: [PATCH 515/683] Fix idempotency and logic to update existing aggregator (#645) Fix idempotency and logic to update existing aggregator SUMMARY describe_configuration_aggregators method returns output similar to the following: { "ConfigurationAggregators": [ { "ConfigurationAggregatorName": "test-name", "ConfigurationAggregatorArn": "arn:aws:config:us-east-1:123412341234:config-aggregator/config-aggregator-xbxbvyq5", "OrganizationAggregationSource": { "RoleArn": "arn:aws:iam::123412341234:role/my-role", "AllAwsRegions": true }, "CreationTime": 1619030767.047, "LastUpdatedTime": 1626463216.998 } ] } As a result, lines 134-136 fail: del current_params['ConfigurationAggregatorArn'] del current_params['CreationTime'] del current_params['LastUpdatedTime'] as they try to delete attributes from the current_params as opposed to current_params['ConfigurationAggregators'][0]. The error message is: KeyError: 'ConfigurationAggregators' Additionally, if no account_sources attribute is specified, the module fails idempotency check, because in that case AccountAggregationSources attribute is present in params, but not in current_params. ISSUE TYPE Bugfix Pull Request COMPONENT NAME aws_config_aggregator ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- aws_config_aggregator.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/aws_config_aggregator.py b/aws_config_aggregator.py index 393413c07b9..f46f11fcafa 100644 --- a/aws_config_aggregator.py +++ b/aws_config_aggregator.py @@ -126,22 +126,25 @@ def create_resource(client, module, params, result): def update_resource(client, module, params, result): + result['changed'] = False + current_params = client.describe_configuration_aggregators( ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] - ) + )['ConfigurationAggregators'][0] - del current_params['ConfigurationAggregatorArn'] - del current_params['CreationTime'] - del current_params['LastUpdatedTime'] + if params['AccountAggregationSources'] != current_params.get('AccountAggregationSources', []): + result['changed'] = True + + if params['OrganizationAggregationSource'] != current_params.get('OrganizationAggregationSource', {}): + result['changed'] = True - if params != current_params['ConfigurationAggregators'][0]: + if result['changed']: try: client.put_configuration_aggregator( ConfigurationAggregatorName=params['ConfigurationAggregatorName'], AccountAggregationSources=params['AccountAggregationSources'], OrganizationAggregationSource=params['OrganizationAggregationSource'] ) - result['changed'] = True result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: From 3cb79c7b08a42ef20827a74138f0d07797e15f38 Mon Sep 17 00:00:00 2001 From: Ivan Chekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Sat, 2 Jul 2022 10:17:49 -0400 Subject: [PATCH 516/683] New module - aws_glue_crawler (#546) New module - aws_glue_crawler SUMMARY New module - aws_glue_crawler. ISSUE TYPE New Module Pull Request COMPONENT NAME aws_glue_crawler ADDITIONAL INFORMATION Example: community.aws.aws_glue_crawler: - name: my-crawler database_name: my_database role: my-role recrawl_policy: RecrawlBehavior: CRAWL_EVERYTHING schema_change_policy: DeleteBehavior: DELETE_FROM_DATABASE UpdateBehavior: UPDATE_IN_DATABASE targets: S3Targets: - Path: "s3://my-bucket/prefix/folder/" tags: Environment: test Reviewed-by: Mark Chappell Reviewed-by: Ivan Chekaldin Reviewed-by: Jill R --- aws_glue_crawler.py | 428 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 428 insertions(+) create mode 100644 aws_glue_crawler.py diff --git a/aws_glue_crawler.py b/aws_glue_crawler.py new file mode 100644 index 00000000000..11ff1b63304 --- /dev/null +++ b/aws_glue_crawler.py @@ -0,0 +1,428 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Rob White (@wimnat) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: aws_glue_crawler +version_added: 4.1.0 +short_description: Manage an AWS Glue crawler +description: + - Manage an AWS Glue crawler. See U(https://aws.amazon.com/glue/) for details. +author: + - 'Ivan Chekaldin (@ichekaldin)' +options: + database_name: + description: + - The name of the database where results are written. + type: str + description: + description: + - Description of the crawler being defined. + type: str + name: + description: + - The name you assign to this crawler definition. It must be unique in your account. + required: true + type: str + recrawl_policy: + description: + - A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run. + suboptions: + recrawl_behavior: + description: + - Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. + - Supported options are C(CRAWL_EVERYTHING) and C(CRAWL_NEW_FOLDERS_ONLY). + type: str + type: dict + role: + description: + - The name or ARN of the IAM role associated with this crawler. + - Required when I(state=present). + type: str + schema_change_policy: + description: + - The policy for the crawler's update and deletion behavior. + suboptions: + delete_behavior: + description: + - Defines the deletion behavior when the crawler finds a deleted object. + - Supported options are C(LOG), C(DELETE_FROM_DATABASE), and C(DEPRECATE_IN_DATABASE). + type: str + update_behavior: + description: + - Defines the update behavior when the crawler finds a changed schema.. + - Supported options are C(LOG) and C(UPDATE_IN_DATABASE). + type: str + type: dict + state: + description: + - Create or delete the AWS Glue crawler. + required: true + choices: [ 'present', 'absent' ] + type: str + table_prefix: + description: + - The table prefix used for catalog tables that are created. + type: str + targets: + description: + - A list of targets to crawl. See example below. + - Required when I(state=present). + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.tags +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an AWS Glue crawler +- community.aws.aws_glue_crawler: + name: my-glue-crawler + database_name: my_database + role: my-iam-role + schema_change_policy: + delete_behavior: DELETE_FROM_DATABASE + update_behavior: UPDATE_IN_DATABASE + recrawl_policy: + recrawl_ehavior: CRAWL_EVERYTHING + targets: + S3Targets: + - Path: "s3://my-bucket/prefix/folder/" + ConnectionName: my-connection + Exclusions: + - "**.json" + - "**.yml" + state: present + +# Delete an AWS Glue crawler +- community.aws.aws_glue_crawler: + name: my-glue-crawler + state: absent + +''' + +RETURN = r''' +creation_time: + description: The time and date that this crawler definition was created. + returned: when state is present + type: str + sample: '2021-04-01T05:19:58.326000+00:00' +database_name: + description: The name of the database where results are written. + returned: when state is present + type: str + sample: my_table +description: + description: Description of the crawler. + returned: when state is present + type: str + sample: My crawler +last_updated: + description: The time and date that this crawler definition was last updated. + returned: when state is present + type: str + sample: '2021-04-01T05:19:58.326000+00:00' +name: + description: The name of the AWS Glue crawler. + returned: always + type: str + sample: my-glue-crawler +recrawl_policy: + description: A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run. + returned: when state is present + type: complex + contains: + RecrawlBehavior: + description: Whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. + returned: when state is present + type: str + sample: CRAWL_EVERYTHING +role: + description: The name or ARN of the IAM role associated with this crawler. + returned: when state is present + type: str + sample: my-iam-role +schema_change_policy: + description: The policy for the crawler's update and deletion behavior. + returned: when state is present + type: complex + contains: + DeleteBehavior: + description: The deletion behavior when the crawler finds a deleted object. + returned: when state is present + type: str + sample: DELETE_FROM_DATABASE + UpdateBehavior: + description: The update behavior when the crawler finds a changed schema. + returned: when state is present + type: str + sample: UPDATE_IN_DATABASE + +table_prefix: + description: The table prefix used for catalog tables that are created. + returned: when state is present + type: str + sample: my_prefix +targets: + description: A list of targets to crawl. + returned: when state is present + type: complex + contains: + S3Targets: + description: List of S3 targets. + returned: when state is present + type: list + JdbcTargets: + description: List of JDBC targets. + returned: when state is present + type: list + MongoDBTargets: + description: List of Mongo DB targets. + returned: when state is present + type: list + DynamoDBTargets: + description: List of DynamoDB targets. + returned: when state is present + type: list + CatalogTargets: + description: List of catalog targets. + returned: when state is present + type: list +''' + +# Non-ansible imports +import copy +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info + + +def _get_glue_crawler(connection, module, glue_crawler_name): + ''' + Get an AWS Glue crawler based on name. If not found, return None. + ''' + try: + return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)['Crawler'] + except is_boto3_error_code('EntityNotFoundException'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +def _trim_targets(targets): + return [_trim_target(t) for t in targets] + + +def _trim_target(target): + """ + Some target types have optional parameters which AWS will fill in and return + To compare the desired targets and the current targets we need to ignore the defaults + """ + if not target: + return None + retval = target.copy() + if not retval.get('Exclusions', None): + retval.pop('Exclusions', None) + return retval + + +def _compare_glue_crawler_params(user_params, current_params): + ''' + Compare Glue crawler params. If there is a difference, return True immediately else return False + ''' + if 'DatabaseName' in user_params and user_params['DatabaseName'] != current_params['DatabaseName']: + return True + if 'Description' in user_params and user_params['Description'] != current_params['Description']: + return True + if 'RecrawlPolicy' in user_params and user_params['RecrawlPolicy'] != current_params['RecrawlPolicy']: + return True + if 'Role' in user_params and user_params['Role'] != current_params['Role']: + return True + if 'SchemaChangePolicy' in user_params and user_params['SchemaChangePolicy'] != current_params['SchemaChangePolicy']: + return True + if 'TablePrefix' in user_params and user_params['TablePrefix'] != current_params['TablePrefix']: + return True + if 'Targets' in user_params: + if 'S3Targets' in user_params['Targets']: + if _trim_targets(user_params['Targets']['S3Targets']) != _trim_targets(current_params['Targets']['S3Targets']): + return True + if 'JdbcTargets' in user_params['Targets'] and user_params['Targets']['JdbcTargets'] != current_params['Targets']['JdbcTargets']: + if _trim_targets(user_params['Targets']['JdbcTargets']) != _trim_targets(current_params['Targets']['JdbcTargets']): + return True + if 'MongoDBTargets' in user_params['Targets'] and user_params['Targets']['MongoDBTargets'] != current_params['Targets']['MongoDBTargets']: + return True + if 'DynamoDBTargets' in user_params['Targets'] and user_params['Targets']['DynamoDBTargets'] != current_params['Targets']['DynamoDBTargets']: + return True + if 'CatalogTargets' in user_params['Targets'] and user_params['Targets']['CatalogTargets'] != current_params['Targets']['CatalogTargets']: + return True + + return False + + +def ensure_tags(connection, module, glue_crawler): + changed = False + + if module.params.get('tags') is None: + return False + + account_id, partition = get_aws_account_info(module) + arn = 'arn:{0}:glue:{1}:{2}:crawler/{3}'.format(partition, module.region, account_id, module.params.get('name')) + + try: + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + existing_tags = {} + else: + module.fail_json_aws(e, msg='Unable to get tags for Glue crawler %s' % module.params.get('name')) + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + + if tags_to_remove: + changed = True + if not module.check_mode: + try: + connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + + if tags_to_add: + changed = True + if not module.check_mode: + try: + connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + + return changed + + +def create_or_update_glue_crawler(connection, module, glue_crawler): + ''' + Create or update an AWS Glue crawler + ''' + + changed = False + params = dict() + params['Name'] = module.params.get('name') + params['Role'] = module.params.get('role') + params['Targets'] = module.params.get('targets') + if module.params.get('database_name') is not None: + params['DatabaseName'] = module.params.get('database_name') + if module.params.get('description') is not None: + params['Description'] = module.params.get('description') + if module.params.get('recrawl_policy') is not None: + params['RecrawlPolicy'] = snake_dict_to_camel_dict(module.params.get('recrawl_policy'), capitalize_first=True) + if module.params.get('role') is not None: + params['Role'] = module.params.get('role') + if module.params.get('schema_change_policy') is not None: + params['SchemaChangePolicy'] = snake_dict_to_camel_dict(module.params.get('schema_change_policy'), capitalize_first=True) + if module.params.get('table_prefix') is not None: + params['TablePrefix'] = module.params.get('table_prefix') + if module.params.get('targets') is not None: + params['Targets'] = module.params.get('targets') + + if glue_crawler: + if _compare_glue_crawler_params(params, glue_crawler): + try: + if not module.check_mode: + connection.update_crawler(aws_retry=True, **params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + else: + try: + if not module.check_mode: + connection.create_crawler(aws_retry=True, **params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + glue_crawler = _get_glue_crawler(connection, module, params['Name']) + + changed |= ensure_tags(connection, module, glue_crawler) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=['SchemaChangePolicy', 'RecrawlPolicy', 'Targets'])) + + +def delete_glue_crawler(connection, module, glue_crawler): + ''' + Delete an AWS Glue crawler + ''' + changed = False + + if glue_crawler: + try: + if not module.check_mode: + connection.delete_crawler(aws_retry=True, Name=glue_crawler['Name']) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ( + dict( + database_name=dict(type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + purge_tags=dict(type='bool', default=True), + recrawl_policy=dict(type='dict', options=dict( + recrawl_behavior=dict(type='str') + )), + role=dict(type='str'), + schema_change_policy=dict(type='dict', options=dict( + delete_behavior=dict(type='str'), + update_behavior=dict(type='str') + )), + state=dict(required=True, choices=['present', 'absent'], type='str'), + table_prefix=dict(type='str'), + tags=dict(type='dict', aliases=['resource_tags']), + targets=dict(type='dict') + ) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['role', 'targets']) + ], + supports_check_mode=True + ) + + connection = module.client('glue', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + state = module.params.get('state') + + glue_crawler = _get_glue_crawler(connection, module, module.params.get('name')) + + if state == 'present': + create_or_update_glue_crawler(connection, module, glue_crawler) + else: + delete_glue_crawler(connection, module, glue_crawler) + + +if __name__ == '__main__': + main() From e19e4c0f8727e5060436016521bffeb9923d256b Mon Sep 17 00:00:00 2001 From: canidam Date: Sun, 3 Jul 2022 18:02:05 +0300 Subject: [PATCH 517/683] Add support for subscriptions attributes parameter - currently only for RawMessageDelievery (SQS) (#640) Add support for SQS RawMessageDelievery attribute in subscriptions - issue #193 SUMMARY Added support to configure RawMessageDelievery option when configuring sqs endpoints. It use boto3 set_subscription_attributes() to configure changes. It currently supports only this option, but should be easily extended in the future. Attributes are expected in the form as in the boto3 docs: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes Fixes #193 ISSUE TYPE Feature Pull Request COMPONENT NAME updates sns_topic.py to support new functionality example: - sns_topic: name: '{{ sns_topic_topic_name }}' display_name: My new topic name subscriptions: - endpoint: "{{ sqs_arn }}" protocol: sqs attributes: RawMessageDelivery: true Reviewed-by: Jill R Reviewed-by: None Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- sns_topic.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/sns_topic.py b/sns_topic.py index 561c9d615c4..59ace8b051d 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -133,6 +133,10 @@ protocol: description: Protocol of subscription. required: true + attributes: + description: Attributes of subscription. Only supports RawMessageDelievery for SQS endpoints. + default: {} + version_added: "4.1.0" type: list elements: dict default: [] @@ -358,6 +362,8 @@ def __init__(self, self.subscriptions_existing = [] self.subscriptions_deleted = [] self.subscriptions_added = [] + self.subscriptions_attributes_set = [] + self.desired_subscription_attributes = dict() self.purge_subscriptions = purge_subscriptions self.check_mode = check_mode self.topic_created = False @@ -455,6 +461,45 @@ def _set_topic_subs(self): self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) return changed + def _init_desired_subscription_attributes(self): + for sub in self.subscriptions: + sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint'])) + tmp_dict = sub.get('attributes', {}) + # aws sdk expects values to be strings + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes + for k, v in tmp_dict.items(): + tmp_dict[k] = str(v) + + self.desired_subscription_attributes[sub_key] = tmp_dict + + def _set_topic_subs_attributes(self): + changed = False + for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): + sub_key = (sub['Protocol'], sub['Endpoint']) + sub_arn = sub['SubscriptionArn'] + if sub_key not in self.desired_subscription_attributes: + # subscription isn't defined in desired, skipping + continue + + try: + sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) + + raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery') + if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes: + if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower(): + changed = True + if not self.check_mode: + try: + self.connection.set_subscription_attributes(SubscriptionArn=sub_arn, + AttributeName='RawMessageDelivery', + AttributeValue=raw_message) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute") + + return changed + def _delete_subscriptions(self): # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days # https://forums.aws.amazon.com/thread.jspa?threadID=85993 @@ -496,6 +541,13 @@ def ensure_ok(self): elif self.display_name or self.policy or self.delivery_policy: self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") changed |= self._set_topic_subs() + + self._init_desired_subscription_attributes() + if self.topic_arn in list_topics(self.connection, self.module): + changed |= self._set_topic_subs_attributes() + elif any(self.desired_subscription_attributes.values()): + self.module.fail_json(msg="Cannot set subscription attributes for SNS topics not owned by this account") + return changed def ensure_gone(self): From a3e953c630a0be8eb6af293c1c19a05ffaa7b2eb Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 4 Jul 2022 09:54:19 +0200 Subject: [PATCH 518/683] Rename aws_sgw_info module (#1301) Rename aws_sgw_info module SUMMARY In line with the new naming guidelines rename aws_sgw_info to storagegateway_info ISSUE TYPE Feature Pull Request COMPONENT NAME aws_sgw_info storagegateway_info ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_sgw_info.py => storagegateway_info.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) rename aws_sgw_info.py => storagegateway_info.py (98%) diff --git a/aws_sgw_info.py b/storagegateway_info.py similarity index 98% rename from aws_sgw_info.py rename to storagegateway_info.py index f49299947be..a765300e354 100644 --- a/aws_sgw_info.py +++ b/storagegateway_info.py @@ -10,12 +10,13 @@ DOCUMENTATION = ''' --- -module: aws_sgw_info +module: storagegateway_info version_added: 1.0.0 short_description: Fetch AWS Storage Gateway information description: - - Fetch AWS Storage Gateway information -author: Loic Blot (@nerzhul) + - Fetch AWS Storage Gateway information +author: + - Loic Blot (@nerzhul) options: gather_local_disks: description: @@ -42,9 +43,8 @@ required: false default: true extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' RETURN = ''' From 1018ceab6a934540923c5c7ff8983545eca3d3c5 Mon Sep 17 00:00:00 2001 From: Berend de Boer Date: Mon, 4 Jul 2022 19:54:24 +1200 Subject: [PATCH 519/683] ecs_service - support setting deployment controller on a service (#340) ecs_service - support setting deployment controller on a service SUMMARY Support setting platform version to 1.4.0 (LATEST is 1.3.0) and deployment controller. The first allows access to new 1.4.0 features. The second change allows you to create a service that can be controlled with Code Deploy. Example: - name: create a Fargate service community.aws.ecs_service: state: present name: "my-service" cluster: "my-cluster" platform_version: 1.4.0 task_definition: "my-task" desired_count: "1" launch_type: FARGATE scheduling_strategy: REPLICA deployment_controller: type: CODE_DEPLOY load_balancers: - targetGroupArn: "arn:..." containerName: example containerPort: 80 network_configuration: subnets: - "{{vpc_zone_a.subnet.id}}" - "{{vpc_zone_b.subnet.id}}" security_groups: - "sg-example" assign_public_ip: true This fixes #338. ISSUE TYPE Feature Pull Request Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- ecs_service.py | 59 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 12 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index aa455d72345..78e352447ee 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -12,16 +12,16 @@ version_added: 1.0.0 short_description: Create, terminate, start or stop a service in ECS description: - - Creates or terminates ECS. services. + - Creates or terminates ECS services. notes: - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com) - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html). - An IAM role must have been previously created. author: - - "Mark Chance (@Java1Guy)" - - "Darek Kaczynski (@kaczynskid)" - - "Stephane Maarek (@simplesteph)" - - "Zac Blazic (@zacblazic)" + - "Mark Chance (@Java1Guy)" + - "Darek Kaczynski (@kaczynskid)" + - "Stephane Maarek (@simplesteph)" + - "Zac Blazic (@zacblazic)" options: state: description: @@ -46,11 +46,15 @@ description: - The task definition the service will run. - This parameter is required when I(state=present). + - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case + the task definition is managed by Code Pipeline and cannot be updated. required: false type: str load_balancers: description: - The list of ELBs defined for this service. + - Load balancers for an existing service cannot be updated, and it is an error to do so. + - When the deployment controller is CODE_DEPLOY changes to this value are simply ignored, and do not cause an error. required: false type: list elements: dict @@ -90,6 +94,17 @@ required: false type: bool default: false + deployment_controller: + description: + - The deployment controller to use for the service. If no deploymenet controller is specified, the ECS controller is used. + required: false + version_added: 4.1.0 + type: dict + suboptions: + type: + type: str + choices: ["ECS", "CODE_DEPLOY", "EXTERNAL"] + description: The deployment controller type to use. deployment_configuration: description: - Optional parameters that control the deployment_configuration. @@ -238,9 +253,8 @@ default: false version_added: 4.1.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' @@ -590,6 +604,10 @@ ''' import time +DEPLOYMENT_CONTROLLER_TYPE_MAP = { + 'type': 'str', +} + DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', 'minimum_healthy_percent': 'int', @@ -664,7 +682,8 @@ def is_matching_service(self, expected, existing): # but the user is just entering # ansible-fargate-nginx:3 if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: - return False + if existing['deploymentController']['type'] != 'CODE_DEPLOY': + return False if expected.get('health_check_grace_period_seconds'): if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): @@ -682,7 +701,7 @@ def is_matching_service(self, expected, existing): return True def create_service(self, service_name, cluster_name, task_definition, load_balancers, - desired_count, client_token, role, deployment_configuration, + desired_count, client_token, role, deployment_controller, deployment_configuration, placement_constraints, placement_strategy, health_check_grace_period_seconds, network_configuration, service_registries, launch_type, platform_version, scheduling_strategy, capacity_provider_strategy): @@ -699,6 +718,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan ) if network_configuration: params['networkConfiguration'] = network_configuration + if deployment_controller: + params['deploymentController'] = deployment_controller if launch_type: params['launchType'] = launch_type if platform_version: @@ -786,6 +807,7 @@ def main(): repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), force_deletion=dict(required=False, default=False, type='bool'), + deployment_controller=dict(required=False, default={}, type='dict'), deployment_configuration=dict(required=False, default={}, type='dict'), wait=dict(required=False, default=False, type='bool'), placement_constraints=dict( @@ -851,6 +873,11 @@ def main(): else: network_configuration = None + deployment_controller = map_complex_type(module.params['deployment_controller'], + DEPLOYMENT_CONTROLLER_TYPE_MAP) + + deploymentController = snake_dict_to_camel_dict(deployment_controller) + deployment_configuration = map_complex_type(module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) @@ -912,12 +939,19 @@ def main(): if 'capacityProviderStrategy' in existing.keys(): module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") if (existing['loadBalancers'] or []) != loadBalancers: - module.fail_json(msg="It is not possible to update the load balancers of an existing service") + if existing['deploymentController']['type'] != 'CODE_DEPLOY': + module.fail_json(msg="It is not possible to update the load balancers of an existing service") + + if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY': + task_definition = '' + network_configuration = [] + else: + task_definition = module.params['task_definition'] # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'], - module.params['task_definition'], + task_definition, module.params['desired_count'], deploymentConfiguration, network_configuration, @@ -935,6 +969,7 @@ def main(): module.params['desired_count'], clientToken, role, + deploymentController, deploymentConfiguration, module.params['placement_constraints'], module.params['placement_strategy'], From 5699bb48691d8bd38aada9d794fd49e5a9fb4935 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 4 Jul 2022 10:48:19 +0200 Subject: [PATCH 520/683] Rename WAF (v1) modules (#1299) Rename WAF (v1) modules SUMMARY In line with the new naming guidelines drop "aws_" from the start of the WAFv1 modules ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_waf_condition.py plugins/modules/aws_waf_info.py plugins/modules/aws_waf_rule.py plugins/modules/aws_waf_web_acl.py plugins/modules/waf_condition.py plugins/modules/waf_info.py plugins/modules/waf_rule.py plugins/modules/waf_web_acl.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_waf_condition.py => waf_condition.py | 247 ++++++++++++----------- aws_waf_info.py => waf_info.py | 21 +- aws_waf_rule.py => waf_rule.py | 101 +++++---- aws_waf_web_acl.py => waf_web_acl.py | 124 ++++++------ 4 files changed, 248 insertions(+), 245 deletions(-) rename aws_waf_condition.py => waf_condition.py (84%) rename aws_waf_info.py => waf_info.py (89%) rename aws_waf_rule.py => waf_rule.py (86%) rename aws_waf_web_acl.py => waf_web_acl.py (83%) diff --git a/aws_waf_condition.py b/waf_condition.py similarity index 84% rename from aws_waf_condition.py rename to waf_condition.py index 77f66f9f767..b948ec9a81f 100644 --- a/aws_waf_condition.py +++ b/waf_condition.py @@ -8,138 +8,139 @@ DOCUMENTATION = r''' -module: aws_waf_condition +module: waf_condition short_description: Create and delete WAF Conditions version_added: 1.0.0 description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/) + - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_condition). + The usage did not change. author: - Will Thames (@willthames) - Mike Mochan (@mmochan) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: - name: - description: Name of the Web Application Firewall condition to manage. - required: true - type: str - type: - description: The type of matching to perform. - choices: - - byte - - geo - - ip - - regex - - size - - sql - - xss - type: str - required: true - filters: - description: - - A list of the filters against which to match. - - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string). - - For I(type=geo), the only valid key is I(country). - - For I(type=ip), the only valid key is I(ip_address). - - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern). - - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size). - - For I(type=sql), valid keys are I(field_to_match) and I(transformation). - - For I(type=xss), valid keys are I(field_to_match) and I(transformation). - - Required when I(state=present). - type: list - elements: dict - suboptions: - field_to_match: - description: - - The field upon which to perform the match. - - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). - type: str - choices: ['uri', 'query_string', 'header', 'method', 'body'] - position: - description: - - Where in the field the match needs to occur. - - Only valid when I(type=byte). - type: str - choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word'] - header: - description: - - Which specific header should be matched. - - Required when I(field_to_match=header). - - Valid when I(type=byte). - type: str - transformation: - description: - - A transform to apply on the field prior to performing the match. - - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). - type: str - choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode'] - country: - description: - - Value of geo constraint (typically a two letter country code). - - The only valid key when I(type=geo). - type: str - ip_address: - description: - - An IP Address or CIDR to match. - - The only valid key when I(type=ip). - type: str - regex_pattern: - description: - - A dict describing the regular expressions used to perform the match. - - Only valid when I(type=regex). - type: dict - suboptions: - name: - description: A name to describe the set of patterns. - type: str - regex_strings: - description: A list of regular expressions to match. - type: list - elements: str - comparison: - description: - - What type of comparison to perform. - - Only valid key when I(type=size). - type: str - choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT'] - size: - description: - - The size of the field (in bytes). - - Only valid key when I(type=size). - type: int - target_string: - description: - - The string to search for. - - May be up to 50 bytes. - - Valid when I(type=byte). - type: str - purge_filters: - description: - - Whether to remove existing filters from a condition if not passed in I(filters). - default: false - type: bool - waf_regional: - description: Whether to use waf-regional module. - default: false - required: no - type: bool - state: - description: Whether the condition should be C(present) or C(absent). - choices: - - present - - absent - default: present - type: str - + name: + description: Name of the Web Application Firewall condition to manage. + required: true + type: str + type: + description: The type of matching to perform. + choices: + - byte + - geo + - ip + - regex + - size + - sql + - xss + type: str + required: true + filters: + description: + - A list of the filters against which to match. + - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string). + - For I(type=geo), the only valid key is I(country). + - For I(type=ip), the only valid key is I(ip_address). + - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern). + - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size). + - For I(type=sql), valid keys are I(field_to_match) and I(transformation). + - For I(type=xss), valid keys are I(field_to_match) and I(transformation). + - Required when I(state=present). + type: list + elements: dict + suboptions: + field_to_match: + description: + - The field upon which to perform the match. + - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). + type: str + choices: ['uri', 'query_string', 'header', 'method', 'body'] + position: + description: + - Where in the field the match needs to occur. + - Only valid when I(type=byte). + type: str + choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word'] + header: + description: + - Which specific header should be matched. + - Required when I(field_to_match=header). + - Valid when I(type=byte). + type: str + transformation: + description: + - A transform to apply on the field prior to performing the match. + - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). + type: str + choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode'] + country: + description: + - Value of geo constraint (typically a two letter country code). + - The only valid key when I(type=geo). + type: str + ip_address: + description: + - An IP Address or CIDR to match. + - The only valid key when I(type=ip). + type: str + regex_pattern: + description: + - A dict describing the regular expressions used to perform the match. + - Only valid when I(type=regex). + type: dict + suboptions: + name: + description: A name to describe the set of patterns. + type: str + regex_strings: + description: A list of regular expressions to match. + type: list + elements: str + comparison: + description: + - What type of comparison to perform. + - Only valid key when I(type=size). + type: str + choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT'] + size: + description: + - The size of the field (in bytes). + - Only valid key when I(type=size). + type: int + target_string: + description: + - The string to search for. + - May be up to 50 bytes. + - Valid when I(type=byte). + type: str + purge_filters: + description: + - Whether to remove existing filters from a condition if not passed in I(filters). + default: false + type: bool + waf_regional: + description: Whether to use C(waf-regional) module. + default: false + required: no + type: bool + state: + description: Whether the condition should be C(present) or C(absent). + choices: + - present + - absent + default: present + type: str ''' EXAMPLES = r''' - name: create WAF byte condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: my_byte_condition filters: - field_to_match: header @@ -149,7 +150,7 @@ type: byte - name: create WAF geo condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: my_geo_condition filters: - country: US @@ -158,7 +159,7 @@ type: geo - name: create IP address condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: "{{ resource_prefix }}_ip_condition" filters: - ip_address: "10.0.0.0/8" @@ -166,7 +167,7 @@ type: ip - name: create WAF regex condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: my_regex_condition filters: - field_to_match: query_string @@ -179,7 +180,7 @@ type: regex - name: create WAF size condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: my_size_condition filters: - field_to_match: query_string @@ -188,7 +189,7 @@ type: size - name: create WAF sql injection condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: my_sql_condition filters: - field_to_match: query_string @@ -196,7 +197,7 @@ type: sql - name: create WAF xss condition - community.aws.aws_waf_condition: + community.aws.waf_condition: name: my_xss_condition filters: - field_to_match: query_string @@ -728,7 +729,7 @@ def main(): if state == 'present': (changed, results) = condition.ensure_condition_present() - # return a condition agnostic ID for use by aws_waf_rule + # return a condition agnostic ID for use by waf_rule results['ConditionId'] = results[condition.conditionsetid] else: (changed, results) = condition.ensure_condition_absent() diff --git a/aws_waf_info.py b/waf_info.py similarity index 89% rename from aws_waf_info.py rename to waf_info.py index 838f9491dfd..81538e62923 100644 --- a/aws_waf_info.py +++ b/waf_info.py @@ -7,18 +7,20 @@ DOCUMENTATION = ''' -module: aws_waf_info -short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters. +module: waf_info +short_description: Retrieve information for WAF ACLs, Rules, Conditions and Filters version_added: 1.0.0 description: - - Retrieve information for WAF ACLs, Rule , Conditions and Filters. + - Retrieve information for WAF ACLs, Rules, Conditions and Filters. + - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_info). + The usage did not change. options: name: description: - The name of a Web Application Firewall. type: str waf_regional: - description: Whether to use the waf-regional module. + description: Whether to use the C(waf-regional) module. default: false required: no type: bool @@ -27,21 +29,20 @@ - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: obtain all WAF information - community.aws.aws_waf_info: + community.aws.waf_info: - name: obtain all information for a single WAF - community.aws.aws_waf_info: + community.aws.waf_info: name: test_waf - name: obtain all information for a single WAF Regional - community.aws.aws_waf_info: + community.aws.waf_info: name: test_waf waf_regional: true ''' diff --git a/aws_waf_rule.py b/waf_rule.py similarity index 86% rename from aws_waf_rule.py rename to waf_rule.py index f5701b2ff00..188c6de9df6 100644 --- a/aws_waf_rule.py +++ b/waf_rule.py @@ -8,74 +8,73 @@ DOCUMENTATION = r''' -module: aws_waf_rule +module: waf_rule short_description: Create and delete WAF Rules version_added: 1.0.0 description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/). + - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_rule). + The usage did not change. author: - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: - name: - description: Name of the Web Application Firewall rule. - required: yes - type: str - metric_name: - description: - - A friendly name or description for the metrics for the rule. - - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. - - You can't change I(metric_name) after you create the rule. - - Defaults to the same as I(name) with disallowed characters removed. + name: + description: Name of the Web Application Firewall rule. + required: yes + type: str + metric_name: + description: + - A friendly name or description for the metrics for the rule. + - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name may not contain whitespace. + - You can't change I(metric_name) after you create the rule. + - Defaults to the same as I(name) with disallowed characters removed. + type: str + state: + description: Whether the rule should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + conditions: + description: > + List of conditions used in the rule. M(community.aws.waf_condition) can be used to create new conditions. + type: list + elements: dict + suboptions: + type: + required: true type: str - state: - description: Whether the rule should be present or absent. - choices: - - present - - absent - default: present - type: str - conditions: - description: > - List of conditions used in the rule. M(community.aws.aws_waf_condition) can be used to - create new conditions. - type: list - elements: dict - suboptions: - type: - required: true - type: str - choices: ['byte','geo','ip','size','sql','xss'] - description: The type of rule to match. - negated: - required: true - type: bool - description: Whether the condition should be negated. - condition: - required: true - type: str - description: The name of the condition. The condition must already exist. - purge_conditions: - description: - - Whether or not to remove conditions that are not passed when updating I(conditions). - default: false - type: bool - waf_regional: - description: Whether to use waf-regional module. - default: false - required: false + choices: ['byte','geo','ip','size','sql','xss'] + description: The type of rule to match. + negated: + required: true type: bool + description: Whether the condition should be negated. + condition: + required: true + type: str + description: The name of the condition. The condition must already exist. + purge_conditions: + description: + - Whether or not to remove conditions that are not passed when updating I(conditions). + default: false + type: bool + waf_regional: + description: Whether to use C(waf-regional) module. + default: false + required: false + type: bool ''' EXAMPLES = r''' - name: create WAF rule - community.aws.aws_waf_rule: + community.aws.waf_rule: name: my_waf_rule conditions: - name: my_regex_condition @@ -89,7 +88,7 @@ negated: yes - name: remove WAF rule - community.aws.aws_waf_rule: + community.aws.waf_rule: name: "my_waf_rule" state: absent ''' diff --git a/aws_waf_web_acl.py b/waf_web_acl.py similarity index 83% rename from aws_waf_web_acl.py rename to waf_web_acl.py index 609df528a0a..c4958a7c41f 100644 --- a/aws_waf_web_acl.py +++ b/waf_web_acl.py @@ -7,86 +7,88 @@ DOCUMENTATION = r''' -module: aws_waf_web_acl +module: waf_web_acl short_description: Create and delete WAF Web ACLs version_added: 1.0.0 description: - Module for WAF classic, for WAF v2 use the I(wafv2_*) modules. - Read the AWS documentation for WAF U(https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html). + - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_web_acl). + The usage did not change. author: - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 options: - name: - description: Name of the Web Application Firewall ACL to manage. - required: yes + name: + description: Name of the Web Application Firewall ACL to manage. + required: yes + type: str + default_action: + description: The action that you want AWS WAF to take when a request doesn't + match the criteria specified in any of the Rule objects that are associated with the WebACL. + choices: + - block + - allow + - count + type: str + state: + description: Whether the Web ACL should be present or absent. + choices: + - present + - absent + default: present + type: str + metric_name: + description: + - A friendly name or description for the metrics for this WebACL. + - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. + - You can't change I(metric_name) after you create the WebACL. + - Metric name will default to I(name) with disallowed characters stripped out. + type: str + rules: + description: + - A list of rules that the Web ACL will enforce. + type: list + elements: dict + suboptions: + name: + description: Name of the rule. type: str - default_action: - description: The action that you want AWS WAF to take when a request doesn't - match the criteria specified in any of the Rule objects that are associated with the WebACL. - choices: - - block - - allow - - count + required: true + action: + description: The action to perform. type: str - state: - description: Whether the Web ACL should be present or absent. + required: true + priority: + description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first. + type: int + required: true + type: + description: The type of rule. choices: - - present - - absent - default: present + - rate_based + - regular type: str - metric_name: - description: - - A friendly name or description for the metrics for this WebACL. - - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. - - You can't change I(metric_name) after you create the WebACL. - - Metric name will default to I(name) with disallowed characters stripped out. - type: str - rules: - description: - - A list of rules that the Web ACL will enforce. - type: list - elements: dict - suboptions: - name: - description: Name of the rule. - type: str - required: true - action: - description: The action to perform. - type: str - required: true - priority: - description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first. - type: int - required: true - type: - description: The type of rule. - choices: - - rate_based - - regular - type: str - purge_rules: - description: - - Whether to remove rules that aren't passed with I(rules). - default: False - type: bool - waf_regional: - description: Whether to use waf-regional module. - default: false - required: no - type: bool + purge_rules: + description: + - Whether to remove rules that aren't passed with I(rules). + default: False + type: bool + waf_regional: + description: Whether to use C(waf-regional) module. + default: false + required: no + type: bool ''' EXAMPLES = r''' - name: create web ACL - community.aws.aws_waf_web_acl: + community.aws.waf_web_acl: name: my_web_acl rules: - name: my_rule @@ -97,7 +99,7 @@ state: present - name: delete the web acl - community.aws.aws_waf_web_acl: + community.aws.waf_web_acl: name: my_web_acl state: absent ''' From 1a83126fcbd551761ae27ba894388bbf17e32c32 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 4 Jul 2022 13:37:58 +0200 Subject: [PATCH 521/683] Rename autoscaling modules (#1294) Rename autoscaling modules SUMMARY Rename the autoscaling modules based on the new naming scheme ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/autoscaling_group.py plugins/modules/autoscaling_group_info.py plugins/modules/autoscaling_instance_refresh.py plugins/modules/autoscaling_instance_refresh_info.py plugins/modules/autoscaling_launch_config.py plugins/modules/autoscaling_launch_config_find.py plugins/modules/autoscaling_launch_config_info.py plugins/modules/autoscaling_lifecycle_hook.py plugins/modules/autoscaling_policy.py plugins/modules/autoscaling_scheduled_action.py plugins/modules/ec2_asg.py plugins/modules/ec2_asg_info.py plugins/modules/ec2_asg_instance_refresh.py plugins/modules/ec2_asg_instance_refresh_info.py plugins/modules/ec2_asg_lifecycle_hook.py plugins/modules/ec2_asg_scheduled_action.py plugins/modules/ec2_lc.py plugins/modules/ec2_lc_find.py plugins/modules/ec2_lc_info.py plugins/modules/ec2_scaling_policy.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- ec2_asg.py => autoscaling_group.py | 31 +++++------ ec2_asg_info.py => autoscaling_group_info.py | 32 ++++++------ ...resh.py => autoscaling_instance_refresh.py | 20 ++++---- ...py => autoscaling_instance_refresh_info.py | 41 +++++++-------- ec2_lc.py => autoscaling_launch_config.py | 32 ++++++------ ...nd.py => autoscaling_launch_config_find.py | 17 ++++--- ...fo.py => autoscaling_launch_config_info.py | 51 ++++++++++--------- ...e_hook.py => autoscaling_lifecycle_hook.py | 33 ++++++------ ...scaling_policy.py => autoscaling_policy.py | 47 ++++++++--------- ...tion.py => autoscaling_scheduled_action.py | 13 +++-- ec2_launch_template.py | 2 +- 11 files changed, 165 insertions(+), 154 deletions(-) rename ec2_asg.py => autoscaling_group.py (99%) rename ec2_asg_info.py => autoscaling_group_info.py (95%) rename ec2_asg_instance_refresh.py => autoscaling_instance_refresh.py (95%) rename ec2_asg_instance_refresh_info.py => autoscaling_instance_refresh_info.py (83%) rename ec2_lc.py => autoscaling_launch_config.py (96%) rename ec2_lc_find.py => autoscaling_launch_config_find.py (95%) rename ec2_lc_info.py => autoscaling_launch_config_info.py (85%) rename ec2_asg_lifecycle_hook.py => autoscaling_lifecycle_hook.py (93%) rename ec2_scaling_policy.py => autoscaling_policy.py (92%) rename ec2_asg_scheduled_action.py => autoscaling_scheduled_action.py (96%) diff --git a/ec2_asg.py b/autoscaling_group.py similarity index 99% rename from ec2_asg.py rename to autoscaling_group.py index 0aa16aeaa0b..753f2a08727 100644 --- a/ec2_asg.py +++ b/autoscaling_group.py @@ -8,13 +8,16 @@ DOCUMENTATION = r''' --- -module: ec2_asg +module: autoscaling_group version_added: 1.0.0 short_description: Create or delete AWS AutoScaling Groups (ASGs) description: - Can create or delete AWS AutoScaling Groups. - - Can be used with the M(community.aws.ec2_lc) module to manage Launch Configurations. -author: "Gareth Rushgrove (@garethr)" + - Can be used with the M(community.aws.autoscaling_launch_config) module to manage Launch Configurations. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg). + The usage did not change. +author: + - "Gareth Rushgrove (@garethr)" options: state: description: @@ -45,7 +48,7 @@ elements: str launch_config_name: description: - - Name of the Launch configuration to use for the group. See the community.aws.ec2_lc) module for managing these. + - Name of the Launch configuration to use for the group. See the community.aws.autoscaling_launch_config) module for managing these. - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided. type: str launch_template: @@ -318,15 +321,14 @@ type: list elements: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' # Basic configuration with Launch Configuration -- community.aws.ec2_asg: +- community.aws.autoscaling_group: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] @@ -352,7 +354,7 @@ # will have the current launch configuration. - name: create launch config - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: my_new_lc image_id: ami-lkajsf key_name: mykey @@ -361,7 +363,7 @@ instance_type: m1.small assign_public_ip: yes -- community.aws.ec2_asg: +- community.aws.autoscaling_group: name: myasg launch_config_name: my_new_lc health_check_period: 60 @@ -375,7 +377,7 @@ # To only replace a couple of instances instead of all of them, supply a list # to "replace_instances": -- community.aws.ec2_asg: +- community.aws.autoscaling_group: name: myasg launch_config_name: my_new_lc health_check_period: 60 @@ -390,7 +392,7 @@ # Basic Configuration with Launch Template -- community.aws.ec2_asg: +- community.aws.autoscaling_group: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] @@ -408,7 +410,7 @@ # Basic Configuration with Launch Template using mixed instance policy -- community.aws.ec2_asg: +- community.aws.autoscaling_group: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] @@ -508,7 +510,7 @@ launch_config_name: description: > Name of launch configuration associated with the ASG. Same as launch_configuration_name, - provided for compatibility with ec2_asg module. + provided for compatibility with M(community.aws.autoscaling_group) module. returned: success type: str sample: "public-webapp-production-1" @@ -652,7 +654,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', diff --git a/ec2_asg_info.py b/autoscaling_group_info.py similarity index 95% rename from ec2_asg_info.py rename to autoscaling_group_info.py index 55d463e096b..4db9ac26a37 100644 --- a/ec2_asg_info.py +++ b/autoscaling_group_info.py @@ -8,12 +8,15 @@ DOCUMENTATION = ''' --- -module: ec2_asg_info +module: autoscaling_group_info version_added: 1.0.0 -short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS +short_description: Gather information about EC2 Auto Scaling Groups (ASGs) in AWS description: - - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS -author: "Rob White (@wimnat)" + - Gather information about EC2 Auto Scaling Groups (ASGs) in AWS. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_info). + The usage did not change. +author: + - "Rob White (@wimnat)" options: name: description: @@ -29,45 +32,44 @@ required: false type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Find all groups - community.aws.ec2_asg_info: + community.aws.autoscaling_group_info: register: asgs - name: Find a group with matching name/prefix - community.aws.ec2_asg_info: + community.aws.autoscaling_group_info: name: public-webserver-asg register: asgs - name: Find a group with matching tags - community.aws.ec2_asg_info: + community.aws.autoscaling_group_info: tags: project: webapp env: production register: asgs - name: Find a group with matching name/prefix and tags - community.aws.ec2_asg_info: + community.aws.autoscaling_group_info: name: myproject tags: env: production register: asgs - name: Fail if no groups are found - community.aws.ec2_asg_info: + community.aws.autoscaling_group_info: name: public-webserver-asg register: asgs failed_when: "{{ asgs.results | length == 0 }}" - name: Fail if more than 1 group is found - community.aws.ec2_asg_info: + community.aws.autoscaling_group_info: name: public-webserver-asg register: asgs failed_when: "{{ asgs.results | length > 1 }}" @@ -132,7 +134,7 @@ launch_config_name: description: > Name of launch configuration associated with the ASG. Same as launch_configuration_name, - provided for compatibility with ec2_asg module. + provided for compatibility with M(community.aws.autoscaling_group) module. returned: success type: str sample: "public-webapp-production-1" @@ -401,7 +403,7 @@ def find_asgs(conn, module, name=None, tags=None): if matched_name and matched_tags: asg = camel_dict_to_snake_dict(asg) - # compatibility with ec2_asg module + # compatibility with autoscaling_group module if 'launch_configuration_name' in asg: asg['launch_config_name'] = asg['launch_configuration_name'] # workaround for https://github.com/ansible/ansible/pull/25015 diff --git a/ec2_asg_instance_refresh.py b/autoscaling_instance_refresh.py similarity index 95% rename from ec2_asg_instance_refresh.py rename to autoscaling_instance_refresh.py index faa61fa74cb..43ef665e3f3 100644 --- a/ec2_asg_instance_refresh.py +++ b/autoscaling_instance_refresh.py @@ -8,13 +8,16 @@ DOCUMENTATION = ''' --- -module: ec2_asg_instance_refresh +module: autoscaling_instance_refresh version_added: 3.2.0 short_description: Start or cancel an EC2 Auto Scaling Group (ASG) instance refresh in AWS description: - Start or cancel an EC2 Auto Scaling Group instance refresh in AWS. - - Can be used with M(community.aws.ec2_asg_instance_refresh_info) to track the subsequent progress. -author: "Dan Khersonsky (@danquixote)" + - Can be used with M(community.aws.autoscaling_instance_refresh_info) to track the subsequent progress. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh). + The usage did not change. +author: + - "Dan Khersonsky (@danquixote)" options: state: description: @@ -58,26 +61,25 @@ type: int type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Start a refresh - community.aws.ec2_asg_instance_refresh: + community.aws.autoscaling_instance_refresh: name: some-asg state: started - name: Cancel a refresh - community.aws.ec2_asg_instance_refresh: + community.aws.autoscaling_instance_refresh: name: some-asg state: cancelled - name: Start a refresh and pass preferences - community.aws.ec2_asg_instance_refresh: + community.aws.autoscaling_instance_refresh: name: some-asg state: started preferences: diff --git a/ec2_asg_instance_refresh_info.py b/autoscaling_instance_refresh_info.py similarity index 83% rename from ec2_asg_instance_refresh_info.py rename to autoscaling_instance_refresh_info.py index d4a12380098..4d9cb7e05b7 100644 --- a/ec2_asg_instance_refresh_info.py +++ b/autoscaling_instance_refresh_info.py @@ -10,12 +10,14 @@ DOCUMENTATION = ''' --- -module: ec2_asg_instance_refresh_info +module: autoscaling_instance_refresh_info version_added: 3.2.0 -short_description: Gather information about ec2 Auto Scaling Group (ASG) Instance Refreshes in AWS +short_description: Gather information about EC2 Auto Scaling Group (ASG) Instance Refreshes in AWS description: - Describes one or more instance refreshes. - You can determine the status of a request by looking at the I(status) parameter. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh_info). + The usage did not change. author: "Dan Khersonsky (@danquixote)" options: name: @@ -39,32 +41,31 @@ type: int required: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Find an refresh by ASG name - community.aws.ec2_asg_instance_refresh_info: + community.aws.autoscaling_instance_refresh_info: name: somename-asg - name: Find an refresh by ASG name and one or more refresh-IDs - community.aws.ec2_asg_instance_refresh_info: + community.aws.autoscaling_instance_refresh_info: name: somename-asg ids: ['some-id-123'] register: asgs - name: Find an refresh by ASG name and set max_records - community.aws.ec2_asg_instance_refresh_info: + community.aws.autoscaling_instance_refresh_info: name: somename-asg max_records: 4 register: asgs - name: Find an refresh by ASG name and NextToken, if received from a previous call - community.aws.ec2_asg_instance_refresh_info: + community.aws.autoscaling_instance_refresh_info: name: somename-asg next_token: 'some-token-123' register: asgs @@ -84,17 +85,17 @@ sample: "public-webapp-production-1" status: description: - - The current state of the group when DeleteAutoScalingGroup is in progress. - - The following are the possible statuses - - Pending -- The request was created, but the operation has not started. - - InProgress -- The operation is in progress. - - Successful -- The operation completed successfully. - - Failed -- The operation failed to complete. You can troubleshoot using the status reason and the scaling activities. - - Cancelling -- - - An ongoing operation is being cancelled. - - Cancellation does not roll back any replacements that have already been completed, - - but it prevents new replacements from being started. - - Cancelled -- The operation is cancelled. + - The current state of the group when DeleteAutoScalingGroup is in progress. + - The following are the possible statuses + - C(Pending) - The request was created, but the operation has not started. + - C(InProgress) - The operation is in progress. + - C(Successful) - The operation completed successfully. + - C(Failed) - The operation failed to complete. + You can troubleshoot using the status reason and the scaling activities. + - C(Cancelling) - An ongoing operation is being cancelled. + Cancellation does not roll back any replacements that have already been + completed, but it prevents new replacements from being started. + - C(Cancelled) - The operation is cancelled.' returned: success type: str sample: "Pending" diff --git a/ec2_lc.py b/autoscaling_launch_config.py similarity index 96% rename from ec2_lc.py rename to autoscaling_launch_config.py index 4b383f7279c..f1a014a563e 100644 --- a/ec2_lc.py +++ b/autoscaling_launch_config.py @@ -9,20 +9,21 @@ DOCUMENTATION = r''' --- -module: ec2_lc +module: autoscaling_launch_config version_added: 1.0.0 short_description: Create or delete AWS Autoscaling Launch Configurations description: - Can create or delete AWS Autoscaling Configurations. - - Works with the ec2_asg module to manage Autoscaling Groups. + - Works with the M(community.aws.autoscaling_group) module to manage Autoscaling Groups. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc). + The usage did not change. notes: - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the launch configuration on AWS. You must create a new config and assign it to the ASG instead. - author: - "Gareth Rushgrove (@garethr)" - "Willem van Ketwich (@wilvk)" @@ -54,14 +55,14 @@ type: str security_groups: description: - - A list of security groups to apply to the instances. Since version 2.4 you can specify either security group names or IDs or a mix. Previous - to 2.4, for VPC instances, specify security group IDs and for EC2-Classic, specify either security group names or IDs. + - A list of security groups to apply to the instances. + - You can specify either security group names or IDs or a mix. type: list elements: str volumes: description: - A list dictionaries defining the volumes to create. - - For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. + - For any volume, a volume size less than C(1) will be interpreted as a request not to create the volume. type: list elements: dict suboptions: @@ -180,17 +181,15 @@ - When not set AWS will default to C(default). type: str choices: ['default', 'dedicated'] - extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' - name: create a launch configuration with an encrypted volume - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: special image_id: ami-XXX key_name: default @@ -207,7 +206,7 @@ ephemeral: ephemeral0 - name: create a launch configuration using a running instance id as a basis - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: special instance_id: i-00a48b207ec59e948 key_name: default @@ -220,7 +219,7 @@ delete_on_termination: true - name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: special image_id: ami-XXX key_name: default @@ -244,7 +243,7 @@ encrypted: no - name: Create launch configuration - community.aws.ec2_lc: + community.aws.autoscaling_launch_config: name: lc1 image_id: ami-xxxx assign_public_ip: yes @@ -350,7 +349,7 @@ type: bool sample: true ebs_optimized: - description: Indicates whether the instance is optimized for EBS I/O (true) or not (false). + description: Indicates whether the instance is optimized for EBS I/O C(true) or not C(false). returned: when I(state=present) type: bool sample: false @@ -360,7 +359,7 @@ type: str sample: ami-9be6f38c instance_monitoring: - description: Indicates whether instances in this group are launched with detailed (true) or basic (false) monitoring. + description: Indicates whether instances in this group are launched with detailed C(true) or basic C(false) monitoring. returned: when I(state=present) type: bool sample: true @@ -439,7 +438,6 @@ type: list sample: - sg-5e27db2f - ''' diff --git a/ec2_lc_find.py b/autoscaling_launch_config_find.py similarity index 95% rename from ec2_lc_find.py rename to autoscaling_launch_config_find.py index 3e525adc6cf..e1f8ec5308b 100644 --- a/ec2_lc_find.py +++ b/autoscaling_launch_config_find.py @@ -10,15 +10,17 @@ DOCUMENTATION = ''' --- -module: ec2_lc_find +module: autoscaling_launch_config_find version_added: 1.0.0 short_description: Find AWS Autoscaling Launch Configurations description: - Returns list of matching Launch Configurations for a given name, along with other useful information. - Results can be sorted and sliced. - - Based on the work by Tom Bamford U(https://github.com/tombamford) - -author: "Jose Armesto (@fiunchinho)" + - Based on the work by Tom Bamford U(https://github.com/tombamford). + - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc_find). + The usage did not change. +author: + - "Jose Armesto (@fiunchinho)" options: name_regex: description: @@ -38,16 +40,15 @@ - Corresponds to Python slice notation like list[:limit]. type: int extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws - + - amazon.aws.ec2 + - amazon.aws.aws ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Search for the Launch Configurations that start with "app" - community.aws.ec2_lc_find: + community.aws.autoscaling_launch_config_find: name_regex: app.* sort_order: descending limit: 2 diff --git a/ec2_lc_info.py b/autoscaling_launch_config_info.py similarity index 85% rename from ec2_lc_info.py rename to autoscaling_launch_config_info.py index d05bf9876b4..7a9cfae9ad1 100644 --- a/ec2_lc_info.py +++ b/autoscaling_launch_config_info.py @@ -9,12 +9,15 @@ DOCUMENTATION = r''' --- -module: ec2_lc_info +module: autoscaling_launch_config_info version_added: 1.0.0 -short_description: Gather information about AWS Autoscaling Launch Configurations. +short_description: Gather information about AWS Autoscaling Launch Configurations description: - - Gather information about AWS Autoscaling Launch Configurations. -author: "Loïc Latreille (@psykotox)" + - Gather information about AWS Autoscaling Launch Configurations. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc_info). + The usage did not change. +author: + - "Loïc Latreille (@psykotox)" options: name: description: @@ -45,30 +48,29 @@ - Corresponds to Python slice notation. type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all launch configurations - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: - name: Gather information about launch configuration with name "example" - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: name: example - name: Gather information sorted by created_time from most recent to least recent - community.aws.ec2_lc_info: + community.aws.autoscaling_launch_config_info: sort: created_time sort_order: descending ''' RETURN = r''' block_device_mapping: - description: Block device mapping for the instances of launch configuration + description: Block device mapping for the instances of launch configuration. type: list returned: always sample: "[{ @@ -79,74 +81,73 @@ 'volume_type': 'gp2' }]" classic_link_vpc_security_groups: - description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id + description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id. type: str returned: always sample: created_time: - description: The creation date and time for the launch configuration + description: The creation date and time for the launch configuration. type: str returned: always sample: "2016-05-27T13:47:44.216000+00:00" ebs_optimized: - description: EBS I/O optimized (true ) or not (false ) + description: EBS I/O optimized C(true) or not C(false). type: bool returned: always sample: true, image_id: - description: ID of the Amazon Machine Image (AMI) + description: ID of the Amazon Machine Image (AMI). type: str returned: always sample: "ami-12345678" instance_monitoring: - description: Launched with detailed monitoring or not + description: Launched with detailed monitoring or not. type: dict returned: always sample: "{ 'enabled': true }" instance_type: - description: Instance type + description: Instance type. type: str returned: always sample: "t2.micro" kernel_id: - description: ID of the kernel associated with the AMI + description: ID of the kernel associated with the AMI. type: str returned: always sample: key_name: - description: Name of the key pair + description: Name of the key pair. type: str returned: always sample: "user_app" launch_configuration_arn: - description: Amazon Resource Name (ARN) of the launch configuration + description: Amazon Resource Name (ARN) of the launch configuration. type: str returned: always sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app" launch_configuration_name: - description: Name of the launch configuration + description: Name of the launch configuration. type: str returned: always sample: "lc-app" ramdisk_id: - description: ID of the RAM disk associated with the AMI + description: ID of the RAM disk associated with the AMI. type: str returned: always sample: security_groups: - description: Security groups to associated + description: Security groups to associated. type: list returned: always sample: "[ 'web' ]" user_data: - description: User data available + description: User data available. type: str returned: always - sample: ''' try: diff --git a/ec2_asg_lifecycle_hook.py b/autoscaling_lifecycle_hook.py similarity index 93% rename from ec2_asg_lifecycle_hook.py rename to autoscaling_lifecycle_hook.py index 351bba5b84d..55d28932338 100644 --- a/ec2_asg_lifecycle_hook.py +++ b/autoscaling_lifecycle_hook.py @@ -9,14 +9,17 @@ DOCUMENTATION = ''' --- -module: ec2_asg_lifecycle_hook +module: autoscaling_lifecycle_hook version_added: 1.0.0 -short_description: Create, delete or update AWS ASG Lifecycle Hooks. +short_description: Create, delete or update AWS ASG Lifecycle Hooks description: - Will create a new hook when I(state=present) and no given Hook is found. - Will update an existing hook when I(state=present) and a Hook is found, but current and provided parameters differ. - Will delete the hook when I(state=absent) and a Hook is found. -author: Igor 'Tsigankov' Eyrich (@tsiganenok) + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_lifecycle_hook). + The usage did not change. +author: + - Igor 'Tsigankov' Eyrich (@tsiganenok) options: state: description: @@ -61,7 +64,7 @@ - The amount of time, in seconds, that can elapse before the lifecycle hook times out. When the lifecycle hook times out, Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. - - By default Amazon AWS will use 3600 (1 hour) + - By default Amazon AWS will use C(3600) (1 hour). type: int default_result: description: @@ -71,14 +74,13 @@ default: ABANDON type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Create / Update lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: eu-central-1 state: present autoscaling_group_name: example @@ -88,43 +90,42 @@ default_result: ABANDON - name: Delete lifecycle hook - community.aws.ec2_asg_lifecycle_hook: + community.aws.autoscaling_lifecycle_hook: region: eu-central-1 state: absent autoscaling_group_name: example lifecycle_hook_name: example - ''' RETURN = ''' --- auto_scaling_group_name: - description: The unique name of the auto scaling group + description: The unique name of the auto scaling group. returned: success type: str sample: "myasg" default_result: - description: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs + description: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. returned: success type: str sample: CONTINUE global_timeout: - description: The maximum time, in seconds, that an instance can remain in a Pending:Wait or Terminating:Wait state + description: The maximum time, in seconds, that an instance can remain in a C(Pending:Wait) or C(Terminating:Wait) state. returned: success type: int sample: 172800 heartbeat_timeout: - description: The maximum time, in seconds, that can elapse before the lifecycle hook times out + description: The maximum time, in seconds, that can elapse before the lifecycle hook times out. returned: success type: int sample: 3600 lifecycle_hook_name: - description: The name of the lifecycle hook + description: The name of the lifecycle hook. returned: success type: str sample: "mylifecyclehook" lifecycle_transition: - description: The instance state to which lifecycle hook should be attached + description: The instance state to which lifecycle hook should be attached. returned: success type: str sample: "autoscaling:EC2_INSTANCE_LAUNCHING" diff --git a/ec2_scaling_policy.py b/autoscaling_policy.py similarity index 92% rename from ec2_scaling_policy.py rename to autoscaling_policy.py index 7aeabd1d7da..f79cbafb43a 100644 --- a/ec2_scaling_policy.py +++ b/autoscaling_policy.py @@ -7,12 +7,14 @@ DOCUMENTATION = r''' -module: ec2_scaling_policy +module: autoscaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups version_added: 1.0.0 description: - Can create or delete scaling policies for autoscaling groups. - Referenced autoscaling groups must already exist. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_scaling_policy). + The usage did not change. author: - Zacharie Eakin (@zeekin) - Will Thames (@willthames) @@ -81,7 +83,7 @@ step_adjustments: type: list description: - - list of dicts containing I(lower_bound), I(upper_bound) and I(scaling_adjustment) + - List of dicts containing I(lower_bound), I(upper_bound) and I(scaling_adjustment). - Intervals must not overlap or have a gap between them. - At most, one item can have an undefined I(lower_bound). If any item has a negative lower_bound, then there must be a step adjustment with an undefined I(lower_bound). @@ -112,13 +114,12 @@ description: - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Simple Scale Down policy - community.aws.ec2_scaling_policy: + community.aws.autoscaling_policy: state: present region: US-XXX name: "scaledown-policy" @@ -132,7 +133,7 @@ # following creates a stepped policy: # From 20-40 (0-20 above threshold), increase by 50% of existing capacity # From 41-infinity, increase by 100% of existing capacity -- community.aws.ec2_scaling_policy: +- community.aws.autoscaling_policy: state: present region: US-XXX name: "step-scale-up-policy" @@ -149,47 +150,47 @@ RETURN = ''' adjustment_type: - description: Scaling policy adjustment type + description: Scaling policy adjustment type. returned: always type: str sample: PercentChangeInCapacity alarms: - description: Cloudwatch alarms related to the policy + description: Cloudwatch alarms related to the policy. returned: always type: complex contains: alarm_name: - description: name of the Cloudwatch alarm + description: Name of the Cloudwatch alarm. returned: always type: str sample: cpu-very-high alarm_arn: - description: ARN of the Cloudwatch alarm + description: ARN of the Cloudwatch alarm. returned: always type: str sample: arn:aws:cloudwatch:us-east-2:1234567890:alarm:cpu-very-high arn: - description: ARN of the scaling policy. Provided for backward compatibility, value is the same as I(policy_arn) + description: ARN of the scaling policy. Provided for backward compatibility, value is the same as I(policy_arn). returned: always type: str sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy as_name: - description: Auto Scaling Group name. Provided for backward compatibility, value is the same as I(auto_scaling_group_name) + description: Auto Scaling Group name. Provided for backward compatibility, value is the same as I(auto_scaling_group_name). returned: always type: str sample: app-asg auto_scaling_group_name: - description: Name of Auto Scaling Group + description: Name of Auto Scaling Group. returned: always type: str sample: app-asg metric_aggregation_type: - description: Method used to aggregate metrics + description: Method used to aggregate metrics. returned: when I(policy_type) is C(StepScaling) type: str sample: Maximum name: - description: Name of the scaling policy. Provided for backward compatibility, value is the same as I(policy_name) + description: Name of the scaling policy. Provided for backward compatibility, value is the same as I(policy_name). returned: always type: str sample: app-policy @@ -199,37 +200,37 @@ type: str sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy policy_name: - description: Name of scaling policy + description: Name of scaling policy. returned: always type: str sample: app-policy policy_type: - description: Type of auto scaling policy + description: Type of auto scaling policy. returned: always type: str sample: StepScaling scaling_adjustment: - description: Adjustment to make when alarm is triggered + description: Adjustment to make when alarm is triggered. returned: When I(policy_type) is C(SimpleScaling) type: int sample: 1 step_adjustments: - description: List of step adjustments + description: List of step adjustments. returned: always type: complex contains: metric_interval_lower_bound: - description: Lower bound for metric interval + description: Lower bound for metric interval. returned: if step has a lower bound type: float sample: 20.0 metric_interval_upper_bound: - description: Upper bound for metric interval + description: Upper bound for metric interval. returned: if step has an upper bound type: float sample: 40.0 scaling_adjustment: - description: Adjustment to make if this step is reached + description: Adjustment to make if this step is reached. returned: always type: int sample: 50 diff --git a/ec2_asg_scheduled_action.py b/autoscaling_scheduled_action.py similarity index 96% rename from ec2_asg_scheduled_action.py rename to autoscaling_scheduled_action.py index 5f41dc31b05..60c91403ff0 100644 --- a/ec2_asg_scheduled_action.py +++ b/autoscaling_scheduled_action.py @@ -11,13 +11,15 @@ DOCUMENTATION = r''' --- -module: ec2_asg_scheduled_action +module: autoscaling_scheduled_action version_added: 2.2.0 -short_description: Create, modify and delete ASG scheduled scaling actions. +short_description: Create, modify and delete ASG scheduled scaling actions description: - The module will create a new scheduled action when I(state=present) and no given action is found. - The module will update a new scheduled action when I(state=present) and the given action is found. - The module will delete a new scheduled action when I(state=absent) and the given action is found. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_scheduled_action). + The usage did not change. options: autoscaling_group_name: description: @@ -74,7 +76,7 @@ EXAMPLES = r''' # Create a scheduled action for a autoscaling group. - name: Create a minimal scheduled action for autoscaling group - community.aws.ec2_asg_scheduled_action: + community.aws.autoscaling_scheduled_action: region: eu-west-1 autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action @@ -85,7 +87,7 @@ register: scheduled_action - name: Create a scheduled action for autoscaling group - community.aws.ec2_asg_scheduled_action: + community.aws.autoscaling_scheduled_action: region: eu-west-1 autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action @@ -100,12 +102,13 @@ register: scheduled_action - name: Delete scheduled action - community.aws.ec2_asg_scheduled_action: + community.aws.autoscaling_scheduled_action: region: eu-west-1 autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action state: absent ''' + RETURN = r''' scheduled_action_name: description: The name of the scheduled action. diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 9e7bcd7cda2..85fd8d6e9c5 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -13,7 +13,7 @@ description: - Create, modify, and delete EC2 Launch Templates, which can be used to create individual instances or with Autoscaling Groups. -- The M(amazon.aws.ec2_instance) and M(community.aws.ec2_asg) modules can, instead of specifying all +- The M(amazon.aws.ec2_instance) and M(community.aws.autoscaling_group) modules can, instead of specifying all parameters on those tasks, be passed a Launch Template which contains settings like instance size, disk type, subnet, and more. extends_documentation_fragment: From 6ad2b1df40486beb04e43825146fccd051601234 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 4 Jul 2022 14:43:39 +0200 Subject: [PATCH 522/683] Rename Glue jobs (#1300) Rename Glue jobs SUMMARY In line with out new naming guidelines, rename the glue jobs ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_glue_connection.py plugins/modules/aws_glue_crawler.py plugins/modules/aws_glue_job.py plugins/modules/glue_connection.py plugins/modules/glue_crawler.py plugins/modules/glue_job.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_glue_connection.py => glue_connection.py | 21 ++++++++++---------- aws_glue_crawler.py => glue_crawler.py | 21 +++++++++----------- aws_glue_job.py => glue_job.py | 9 +++++---- 3 files changed, 25 insertions(+), 26 deletions(-) rename aws_glue_connection.py => glue_connection.py (97%) rename aws_glue_crawler.py => glue_crawler.py (97%) rename aws_glue_job.py => glue_job.py (99%) diff --git a/aws_glue_connection.py b/glue_connection.py similarity index 97% rename from aws_glue_connection.py rename to glue_connection.py index 07bdddd92ab..c8adb83fc66 100644 --- a/aws_glue_connection.py +++ b/glue_connection.py @@ -8,12 +8,15 @@ DOCUMENTATION = r''' --- -module: aws_glue_connection +module: glue_connection version_added: 1.0.0 short_description: Manage an AWS Glue connection description: - - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details. -author: "Rob White (@wimnat)" + - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details. + - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_connection). + The usage did not change. +author: + - "Rob White (@wimnat)" options: availability_zone: description: @@ -69,16 +72,15 @@ - Required when I(connection_type=NETWORK). type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue connection -- community.aws.aws_glue_connection: +- community.aws.glue_connection: name: my-glue-connection connection_properties: JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename @@ -87,7 +89,7 @@ state: present # Create an AWS Glue network connection -- community.aws.aws_glue_connection: +- community.aws.glue_connection: name: my-glue-network-connection availability_zone: us-east-1a connection_properties: @@ -100,10 +102,9 @@ state: present # Delete an AWS Glue connection -- community.aws.aws_glue_connection: +- community.aws.glue_connection: name: my-glue-connection state: absent - ''' RETURN = r''' diff --git a/aws_glue_crawler.py b/glue_crawler.py similarity index 97% rename from aws_glue_crawler.py rename to glue_crawler.py index 11ff1b63304..ffe6efd1636 100644 --- a/aws_glue_crawler.py +++ b/glue_crawler.py @@ -8,11 +8,13 @@ DOCUMENTATION = r''' --- -module: aws_glue_crawler +module: glue_crawler version_added: 4.1.0 short_description: Manage an AWS Glue crawler description: - - Manage an AWS Glue crawler. See U(https://aws.amazon.com/glue/) for details. + - Manage an AWS Glue crawler. See U(https://aws.amazon.com/glue/) for details. + - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_crawler). + The usage did not change. author: - 'Ivan Chekaldin (@ichekaldin)' options: @@ -75,16 +77,16 @@ - Required when I(state=present). type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue crawler -- community.aws.aws_glue_crawler: +- community.aws.glue_crawler: name: my-glue-crawler database_name: my_database role: my-iam-role @@ -103,10 +105,9 @@ state: present # Delete an AWS Glue crawler -- community.aws.aws_glue_crawler: +- community.aws.glue_crawler: name: my-glue-crawler state: absent - ''' RETURN = r''' @@ -198,8 +199,6 @@ type: list ''' -# Non-ansible imports -import copy try: import botocore except ImportError: @@ -210,9 +209,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info diff --git a/aws_glue_job.py b/glue_job.py similarity index 99% rename from aws_glue_job.py rename to glue_job.py index e95e9d69163..3fc2ba929fb 100644 --- a/aws_glue_job.py +++ b/glue_job.py @@ -8,11 +8,13 @@ DOCUMENTATION = r''' --- -module: aws_glue_job +module: glue_job version_added: 1.0.0 short_description: Manage an AWS Glue job description: - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. + - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_job). + The usage did not change. author: - "Rob White (@wimnat)" - "Vijayanand Sharma (@vijayanandsharma)" @@ -110,7 +112,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue job -- community.aws.aws_glue_job: +- community.aws.glue_job: command_script_location: "s3://s3bucket/script.py" default_arguments: "--extra-py-files": s3://s3bucket/script-package.zip @@ -120,10 +122,9 @@ state: present # Delete an AWS Glue job -- community.aws.aws_glue_job: +- community.aws.glue_job: name: my-glue-job state: absent - ''' RETURN = r''' From 1e7ef3baee8010ca0faef93af5405f1e47cf1e69 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 4 Jul 2022 17:02:00 +0200 Subject: [PATCH 523/683] Rename cloudwatch metrics_alarm (#1304) Rename cloudwatch metrics_alarm SUMMARY In line with the new naming guidelines rename ec2_metric_alarm to cloudwatch_metric_alarm ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/cloudwatch_metric_alarm.py plugins/modules/ec2_metric_alarm.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ...ric_alarm.py => cloudwatch_metric_alarm.py | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) rename ec2_metric_alarm.py => cloudwatch_metric_alarm.py (96%) diff --git a/ec2_metric_alarm.py b/cloudwatch_metric_alarm.py similarity index 96% rename from ec2_metric_alarm.py rename to cloudwatch_metric_alarm.py index 8c30909907e..dbe6bf43f94 100644 --- a/ec2_metric_alarm.py +++ b/cloudwatch_metric_alarm.py @@ -19,13 +19,16 @@ DOCUMENTATION = r''' -module: ec2_metric_alarm -short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" +module: cloudwatch_metric_alarm +short_description: "Create/update or delete AWS CloudWatch 'metric alarms'" version_added: 1.0.0 description: - - Can create or delete AWS metric alarms. + - Can create or delete AWS CloudWatch metric alarms. - Metrics you wish to alarm on must already exist. -author: "Zacharie Eakin (@Zeekin)" + - Prior to release 5.0.0 this module was called C(community.aws.ec2_metric_alarm). + The usage did not change. +author: + - "Zacharie Eakin (@Zeekin)" options: state: description: @@ -46,7 +49,7 @@ type: str namespace: description: - - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in cloudwatch. + - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in CloudWatch. required: false type: str statistic: @@ -156,14 +159,13 @@ - 'missing' default: 'missing' extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' - name: create alarm - community.aws.ec2_metric_alarm: + community.aws.cloudwatch_metric_alarm: state: present region: ap-southeast-2 name: "cpu-low" @@ -180,7 +182,7 @@ alarm_actions: ["action1","action2"] - name: Create an alarm to recover a failed instance - community.aws.ec2_metric_alarm: + community.aws.cloudwatch_metric_alarm: state: present region: us-west-1 name: "recover-instance" @@ -195,7 +197,6 @@ description: "This will recover an instance when it fails" dimensions: {"InstanceId":'i-XXX'} alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] - ''' try: From e975c73cfd431b72751daf07cc828bc7f810aba2 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 4 Jul 2022 19:39:39 +0200 Subject: [PATCH 524/683] Rename AWS config modules (#1305) Rename AWS config modules SUMMARY Rename various aws_config modules in line with the naming guidelines (remove the aws_ prefix) ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_config_aggregation_authorization.py plugins/modules/config_aggregation_authorization.py plugins/modules/aws_config_aggregator.py plugins/modules/config_aggregator.py plugins/modules/aws_config_delivery_channel.py plugins/modules/config_delivery_channel.py plugins/modules/aws_config_recorder.py plugins/modules/config_recorder.py plugins/modules/aws_config_rule.py plugins/modules/config_rule.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ....py => config_aggregation_authorization.py | 19 +++--- ...nfig_aggregator.py => config_aggregator.py | 33 +++++----- ...y_channel.py => config_delivery_channel.py | 25 ++++---- aws_config_recorder.py => config_recorder.py | 53 ++++++++-------- aws_config_rule.py => config_rule.py | 63 +++++++++---------- 5 files changed, 94 insertions(+), 99 deletions(-) rename aws_config_aggregation_authorization.py => config_aggregation_authorization.py (91%) rename aws_config_aggregator.py => config_aggregator.py (89%) rename aws_config_delivery_channel.py => config_delivery_channel.py (90%) rename aws_config_recorder.py => config_recorder.py (75%) rename aws_config_rule.py => config_rule.py (74%) diff --git a/aws_config_aggregation_authorization.py b/config_aggregation_authorization.py similarity index 91% rename from aws_config_aggregation_authorization.py rename to config_aggregation_authorization.py index f3a8591c697..d2b97fa9a4b 100644 --- a/aws_config_aggregation_authorization.py +++ b/config_aggregation_authorization.py @@ -9,41 +9,40 @@ DOCUMENTATION = ''' --- -module: aws_config_aggregation_authorization +module: config_aggregation_authorization version_added: 1.0.0 short_description: Manage cross-account AWS Config authorizations description: - - Module manages AWS Config resources. + - Module manages AWS Config aggregation authorizations. author: - - "Aaron Smith (@slapula)" + - "Aaron Smith (@slapula)" options: state: description: - - Whether the Config rule should be present or absent. + - Whether the Config rule should be present or absent. default: present choices: ['present', 'absent'] type: str authorized_account_id: description: - - The 12-digit account ID of the account authorized to aggregate data. + - The 12-digit account ID of the account authorized to aggregate data. type: str required: true authorized_aws_region: description: - - The region authorized to collect aggregated data. + - The region authorized to collect aggregated data. type: str required: true extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Get current account ID community.aws.aws_caller_info: register: whoami -- community.aws.aws_config_aggregation_authorization: +- community.aws.config_aggregation_authorization: state: present authorized_account_id: '{{ whoami.account }}' authorized_aws_region: us-east-1 diff --git a/aws_config_aggregator.py b/config_aggregator.py similarity index 89% rename from aws_config_aggregator.py rename to config_aggregator.py index f46f11fcafa..a78a2cf84d2 100644 --- a/aws_config_aggregator.py +++ b/config_aggregator.py @@ -9,74 +9,73 @@ DOCUMENTATION = r''' --- -module: aws_config_aggregator +module: config_aggregator version_added: 1.0.0 short_description: Manage AWS Config aggregations across multiple accounts description: - - Module manages AWS Config resources + - Module manages AWS Config aggregator resources. author: - - "Aaron Smith (@slapula)" + - "Aaron Smith (@slapula)" options: name: description: - - The name of the AWS Config resource. + - The name of the AWS Config resource. required: true type: str state: description: - - Whether the Config rule should be present or absent. + - Whether the Config rule should be present or absent. default: present choices: ['present', 'absent'] type: str account_sources: description: - - Provides a list of source accounts and regions to be aggregated. + - Provides a list of source accounts and regions to be aggregated. suboptions: account_ids: description: - - A list of 12-digit account IDs of accounts being aggregated. + - A list of 12-digit account IDs of accounts being aggregated. type: list elements: str aws_regions: description: - - A list of source regions being aggregated. + - A list of source regions being aggregated. type: list elements: str all_aws_regions: description: - - If true, aggregate existing AWS Config regions and future regions. + - If true, aggregate existing AWS Config regions and future regions. type: bool type: list elements: dict required: true organization_source: description: - - The region authorized to collect aggregated data. + - The region authorized to collect aggregated data. suboptions: role_arn: description: - - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. + - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. type: str aws_regions: description: - - The source regions being aggregated. + - The source regions being aggregated. type: list elements: str all_aws_regions: description: - - If true, aggregate existing AWS Config regions and future regions. + - If true, aggregate existing AWS Config regions and future regions. type: bool type: dict required: true extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' - name: Create cross-account aggregator - community.aws.aws_config_aggregator: + community.aws.config_aggregator: name: test_config_rule state: present account_sources: diff --git a/aws_config_delivery_channel.py b/config_delivery_channel.py similarity index 90% rename from aws_config_delivery_channel.py rename to config_delivery_channel.py index fb3851a4ecc..4a282335e41 100644 --- a/aws_config_delivery_channel.py +++ b/config_delivery_channel.py @@ -9,52 +9,51 @@ DOCUMENTATION = ''' --- -module: aws_config_delivery_channel +module: config_delivery_channel version_added: 1.0.0 short_description: Manage AWS Config delivery channels description: - - This module manages AWS Config delivery locations for rule checks and configuration info. + - This module manages AWS Config delivery locations for rule checks and configuration info. author: - - "Aaron Smith (@slapula)" + - "Aaron Smith (@slapula)" options: name: description: - - The name of the AWS Config resource. + - The name of the AWS Config resource. required: true type: str state: description: - - Whether the Config rule should be present or absent. + - Whether the Config rule should be present or absent. default: present choices: ['present', 'absent'] type: str s3_bucket: description: - - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files. + - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files. type: str required: true s3_prefix: description: - - The prefix for the specified Amazon S3 bucket. + - The prefix for the specified Amazon S3 bucket. type: str sns_topic_arn: description: - - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. + - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. type: str delivery_frequency: description: - - The frequency with which AWS Config delivers configuration snapshots. + - The frequency with which AWS Config delivers configuration snapshots. choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Create Delivery Channel for AWS Config - community.aws.aws_config_delivery_channel: + community.aws.config_delivery_channel: name: test_delivery_channel state: present s3_bucket: 'test_aws_config_bucket' diff --git a/aws_config_recorder.py b/config_recorder.py similarity index 75% rename from aws_config_recorder.py rename to config_recorder.py index e9c2cbc17ad..6fda4a6df8a 100644 --- a/aws_config_recorder.py +++ b/config_recorder.py @@ -9,65 +9,64 @@ DOCUMENTATION = ''' --- -module: aws_config_recorder +module: config_recorder version_added: 1.0.0 short_description: Manage AWS Config Recorders description: - - Module manages AWS Config configuration recorder settings. + - Module manages AWS Config configuration recorder settings. author: - - "Aaron Smith (@slapula)" + - "Aaron Smith (@slapula)" options: name: description: - - The name of the AWS Config resource. + - The name of the AWS Config resource. required: true type: str state: description: - - Whether the Config rule should be present or absent. + - Whether the Config rule should be present or absent. default: present choices: ['present', 'absent'] type: str role_arn: description: - - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account. - - Required when I(state=present). + - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account. + - Required when I(state=present). type: str recording_group: description: - - Specifies the types of AWS resources for which AWS Config records configuration changes. - - Required when I(state=present) + - Specifies the types of AWS resources for which AWS Config records configuration changes. + - Required when I(state=present) suboptions: all_supported: description: - - Specifies whether AWS Config records configuration changes for every supported type of regional resource. - - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts - recording resources of that type automatically. - - If I(all_supported=true), you cannot enumerate a list of I(resource_types). + - Specifies whether AWS Config records configuration changes for every supported type of regional resource. + - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts + recording resources of that type automatically. + - If I(all_supported=true), you cannot enumerate a list of I(resource_types). include_global_types: description: - - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources) - with the resources that it records. - - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, - you should consider customizing AWS Config in only one region to record global resources. - - If you set I(include_global_types=true), you must also set I(all_supported=true). - - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording - resources of that type automatically. + - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources) + with the resources that it records. + - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, + you should consider customizing AWS Config in only one region to record global resources. + - If you set I(include_global_types=true), you must also set I(all_supported=true). + - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording + resources of that type automatically. resource_types: description: - - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, - C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)). - - Before you can set this option, you must set I(all_supported=false). + - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, + C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)). + - Before you can set this option, you must set I(all_supported=false). type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Create Configuration Recorder for AWS Config - community.aws.aws_config_recorder: + community.aws.config_recorder: name: test_configuration_recorder state: present role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' diff --git a/aws_config_rule.py b/config_rule.py similarity index 74% rename from aws_config_rule.py rename to config_rule.py index a27236d82b0..ef4e9fab392 100644 --- a/aws_config_rule.py +++ b/config_rule.py @@ -9,89 +9,88 @@ DOCUMENTATION = ''' --- -module: aws_config_rule +module: config_rule version_added: 1.0.0 -short_description: Manage AWS Config resources +short_description: Manage AWS Config rule resources description: - - Module manages AWS Config rules + - Module manages AWS Config rules. author: - - "Aaron Smith (@slapula)" + - "Aaron Smith (@slapula)" options: name: description: - - The name of the AWS Config resource. + - The name of the AWS Config resource. required: true type: str state: description: - - Whether the Config rule should be present or absent. + - Whether the Config rule should be present or absent. default: present choices: ['present', 'absent'] type: str description: description: - - The description that you provide for the AWS Config rule. + - The description that you provide for the AWS Config rule. type: str scope: description: - - Defines which resources can trigger an evaluation for the rule. + - Defines which resources can trigger an evaluation for the rule. suboptions: compliance_types: description: - - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. - You can only specify one type if you also specify a resource ID for I(compliance_id). + - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. + You can only specify one type if you also specify a resource ID for I(compliance_id). compliance_id: description: - - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, - you must specify one resource type for I(compliance_types). + - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, + you must specify one resource type for I(compliance_types). tag_key: description: - - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule. + - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule. tag_value: description: - - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. - If you specify a value for I(tag_value), you must also specify a value for I(tag_key). + - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. + If you specify a value for I(tag_value), you must also specify a value for I(tag_key). type: dict source: description: - - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to - evaluate your AWS resources. + - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to + evaluate your AWS resources. suboptions: owner: description: - - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. - You can only specify one type if you also specify a resource ID for I(compliance_id). + - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. + You can only specify one type if you also specify a resource ID for I(compliance_id). identifier: description: - - The ID of the only AWS resource that you want to trigger an evaluation for the rule. - If you specify a resource ID, you must specify one resource type for I(compliance_types). + - The ID of the only AWS resource that you want to trigger an evaluation for the rule. + If you specify a resource ID, you must specify one resource type for I(compliance_types). details: description: - - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. - - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs. - - Key C(EventSource) The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. - - Key C(MessageType) The type of notification that triggers AWS Config to run an evaluation for a rule. - - Key C(MaximumExecutionFrequency) The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. + - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. + - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs. + - Key C(EventSource) The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. + - Key C(MessageType) The type of notification that triggers AWS Config to run an evaluation for a rule. + - Key C(MaximumExecutionFrequency) The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. type: dict required: true input_parameters: description: - - A string, in JSON format, that is passed to the AWS Config rule Lambda function. + - A string, in JSON format, that is passed to the AWS Config rule Lambda function. type: str execution_frequency: description: - - The maximum frequency with which AWS Config runs evaluations for a rule. + - The maximum frequency with which AWS Config runs evaluations for a rule. choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Create Config Rule for AWS Config - community.aws.aws_config_rule: + community.aws.config_rule: name: test_config_rule state: present description: 'This AWS Config rule checks for public write access on S3 buckets' From a5d006ab046883c4d6996ff10ca18f115963c50b Mon Sep 17 00:00:00 2001 From: James McClune Date: Mon, 4 Jul 2022 15:25:25 -0400 Subject: [PATCH 525/683] add `TargetTrackingScaling` as a scaling policy option (#771) add `TargetTrackingScaling` as a scaling policy option SUMMARY Add TargetTrackingScaling as an EC2 scaling option. Fixes: #544 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_scaling_policy.py Reviewed-by: Jill R Reviewed-by: James McClune Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- autoscaling_policy.py | 264 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 241 insertions(+), 23 deletions(-) diff --git a/autoscaling_policy.py b/autoscaling_policy.py index f79cbafb43a..3fd63abc52b 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -69,6 +69,7 @@ choices: - StepScaling - SimpleScaling + - TargetTrackingScaling default: SimpleScaling metric_aggregation: type: str @@ -109,6 +110,80 @@ description: - The amount by which to scale. required: true + target_tracking_config: + type: dict + description: + - Allows you to specify a I(target_tracking_config) for autoscaling policies in AWS. + - I(target_tracking_config) can accept nested dicts for I(customized_metric_spec) or I(predefined_metric_spec). + Each specification aligns with their boto3 equivalent. + - Required when I(TargetTrackingScaling) policy is specified. + version_added: 4.1.0 + suboptions: + customized_metric_spec: + type: dict + description: + - Specify a dict will be passed in as a call for C(TargetTrackingConfiguration). + suboptions: + metric_name: + type: str + description: + - The name of the metric. + required: true + namespace: + type: str + description: + - The namespace of the metric. + required: true + statistic: + type: str + description: + - The statistic of the metric. + required: true + choices: + - Average + - Minimum + - Maximum + - SampleCount + - Sum + dimensions: + type: list + description: + - The dimensions of the metric. The element of the list should be a dict. + elements: dict + unit: + type: str + description: + - The unit of the metric. Reference AmazonCloudWatch API for valid Units. + predefined_metric_spec: + type: dict + description: + - Specify a dict will be passed in as a call for I(TargetTrackingConfiguration). + suboptions: + predefined_metric_type: + type: str + required: true + description: + - Required if C(predefined_metric_spec) is used. + choices: + - ASGAverageCPUUtilization + - ASGAverageNetworkIn + - ASGAverageNetworkOut + - ALBRequestCountPerTarget + resource_label: + type: str + description: + - Uniquely identifies a specific ALB target group from which to determine the average request count served by your Auto Scaling group. + - You can't specify a resource label unless the target group is attached to the Auto Scaling group. + target_value: + type: float + description: + - Specify a float number for target utilization. + - Required when I(target_tracking_config) is specified. + required: true + disable_scalein: + type: bool + description: + - Indicate whether scaling in by the target tracking scaling policy is disabled. estimated_instance_warmup: type: int description: @@ -146,6 +221,45 @@ scaling_adjustment: 100 adjustment_type: "PercentChangeInCapacity" asg_name: "application-asg" + +- name: create TargetTracking predefined policy + ec2_scaling_policy: + name: "predefined-policy-1" + policy_type: TargetTrackingScaling + target_tracking_config: + predefined_metric_spec: + predefined_metric_type: ASGAverageCPUUtilization + target_value: 98.0 + asg_name: "asg-test-1" + register: result + +- name: create TargetTracking predefined policy with resource_label + ec2_scaling_policy: + name: "predefined-policy-1" + policy_type: TargetTrackingScaling + target_tracking_config: + predefined_metric_spec: + predefined_metric_type: ALBRequestCountPerTarget + resource_label: app/my-alb/778d41231d141a0f/targetgroup/my-alb-target-group/942f017f100becff + target_value: 98.0 + asg_name: "asg-test-1" + register: result + +- name: create TargetTrackingScaling custom policy + ec2_scaling_policy: + name: "custom-policy-1" + policy_type: TargetTrackingScaling + target_tracking_config: + customized_metric_spec: + metric_name: metric_1 + namespace: namespace_1 + statistic: Minimum + unit: Gigabits + dimensions: [{'Name': 'dimension1', 'Value': 'value1'}] + disable_scalein: true + target_value: 98.0 + asg_name: asg-test-1 + register: result ''' RETURN = ''' @@ -241,9 +355,61 @@ except ImportError: pass # caught by imported AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +def build_target_specification(target_tracking_config): + + # Initialize an empty dict() for building TargetTrackingConfiguration policies, + # which will be returned + targetTrackingConfig = dict() + + if target_tracking_config.get('target_value'): + targetTrackingConfig['TargetValue'] = target_tracking_config['target_value'] + + if target_tracking_config.get('disable_scalein'): + targetTrackingConfig['DisableScaleIn'] = target_tracking_config['disable_scalein'] + else: + # Accounting for boto3 response + targetTrackingConfig['DisableScaleIn'] = False + + if target_tracking_config['predefined_metric_spec'] is not None: + # Build spec for predefined_metric_spec + targetTrackingConfig['PredefinedMetricSpecification'] = dict() + if target_tracking_config['predefined_metric_spec'].get('predefined_metric_type'): + targetTrackingConfig['PredefinedMetricSpecification']['PredefinedMetricType'] = \ + target_tracking_config['predefined_metric_spec']['predefined_metric_type'] + + if target_tracking_config['predefined_metric_spec'].get('resource_label'): + targetTrackingConfig['PredefinedMetricSpecification']['ResourceLabel'] = \ + target_tracking_config['predefined_metric_spec']['resource_label'] + + elif target_tracking_config['customized_metric_spec'] is not None: + # Build spec for customized_metric_spec + targetTrackingConfig['CustomizedMetricSpecification'] = dict() + if target_tracking_config['customized_metric_spec'].get('metric_name'): + targetTrackingConfig['CustomizedMetricSpecification']['MetricName'] = \ + target_tracking_config['customized_metric_spec']['metric_name'] + + if target_tracking_config['customized_metric_spec'].get('namespace'): + targetTrackingConfig['CustomizedMetricSpecification']['Namespace'] = \ + target_tracking_config['customized_metric_spec']['namespace'] + + if target_tracking_config['customized_metric_spec'].get('dimensions'): + targetTrackingConfig['CustomizedMetricSpecification']['Dimensions'] = \ + target_tracking_config['customized_metric_spec']['dimensions'] + + if target_tracking_config['customized_metric_spec'].get('statistic'): + targetTrackingConfig['CustomizedMetricSpecification']['Statistic'] = \ + target_tracking_config['customized_metric_spec']['statistic'] + + if target_tracking_config['customized_metric_spec'].get('unit'): + targetTrackingConfig['CustomizedMetricSpecification']['Unit'] = \ + target_tracking_config['customized_metric_spec']['unit'] + + return targetTrackingConfig def create_scaling_policy(connection, module): @@ -252,10 +418,15 @@ def create_scaling_policy(connection, module): policy_type = module.params['policy_type'] policy_name = module.params['name'] - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name, - AdjustmentType=module.params['adjustment_type']) + if policy_type == 'TargetTrackingScaling': + params = dict(PolicyName=policy_name, + PolicyType=policy_type, + AutoScalingGroupName=asg_name) + else: + params = dict(PolicyName=policy_name, + PolicyType=policy_type, + AutoScalingGroupName=asg_name, + AdjustmentType=module.params['adjustment_type']) # min_adjustment_step attribute is only relevant if the adjustment_type # is set to percentage change in capacity, so it is a special case @@ -268,18 +439,19 @@ def create_scaling_policy(connection, module): # it's only required if policy is SimpleScaling and state is present if not module.params['scaling_adjustment']: module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling ' - 'and state is present') + 'and state is present') params['ScalingAdjustment'] = module.params['scaling_adjustment'] if module.params['cooldown']: params['Cooldown'] = module.params['cooldown'] - if policy_type == 'StepScaling': + elif policy_type == 'StepScaling': if not module.params['step_adjustments']: - module.fail_json(msg='step_adjustments is required when policy_type is StepScaling ' - 'and state is present') + module.fail_json(msg='step_adjustments is required when policy_type is StepScaling' + 'and state is present') params['StepAdjustments'] = [] for step_adjustment in module.params['step_adjustments']: - step_adjust_params = dict(ScalingAdjustment=step_adjustment['scaling_adjustment']) + step_adjust_params = dict( + ScalingAdjustment=step_adjustment['scaling_adjustment']) if step_adjustment.get('lower_bound'): step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound'] if step_adjustment.get('upper_bound'): @@ -290,12 +462,23 @@ def create_scaling_policy(connection, module): if module.params['estimated_instance_warmup']: params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] + elif policy_type == 'TargetTrackingScaling': + if not module.params['target_tracking_config']: + module.fail_json(msg='target_tracking_config is required when policy_type is ' + 'TargetTrackingScaling and state is present') + else: + params['TargetTrackingConfiguration'] = build_target_specification(module.params.get('target_tracking_config')) + if module.params['estimated_instance_warmup']: + params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] + + # Ensure idempotency with policies try: policies = connection.describe_policies(aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name])['ScalingPolicies'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws( + e, msg="Failed to obtain autoscaling policy %s" % policy_name) before = after = {} if not policies: @@ -313,12 +496,14 @@ def create_scaling_policy(connection, module): connection.put_scaling_policy(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create autoscaling policy") + try: policies = connection.describe_policies(aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name])['ScalingPolicies'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws( + e, msg="Failed to obtain autoscaling policy %s" % policy_name) policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values @@ -327,7 +512,8 @@ def create_scaling_policy(connection, module): policy['name'] = policy['policy_name'] if before and after: - module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy) + module.exit_json(changed=changed, diff=dict( + before=before, after=after), **policy) else: module.exit_json(changed=changed, **policy) @@ -336,9 +522,11 @@ def delete_scaling_policy(connection, module): policy_name = module.params.get('name') try: - policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) + policy = connection.describe_policies( + aws_retry=True, PolicyNames=[policy_name]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws( + e, msg="Failed to obtain autoscaling policy %s" % policy_name) if policy['ScalingPolicies']: try: @@ -356,7 +544,32 @@ def main(): step_adjustment_spec = dict( lower_bound=dict(type='int'), upper_bound=dict(type='int'), - scaling_adjustment=dict(type='int', required=True)) + scaling_adjustment=dict(type='int', required=True) + ) + + predefined_metric_spec = dict( + predefined_metric_type=dict(type='str', choices=['ASGAverageCPUUtilization', + 'ASGAverageNetworkIn', + 'ASGAverageNetworkOut', + 'ALBRequestCountPerTarget'], required=True), + resource_label=dict(type='str') + ) + customized_metric_spec = dict( + metric_name=dict(type='str', required=True), + namespace=dict(type='str', required=True), + statistic=dict(type='str', required=True, choices=['Average', 'Minimum', 'Maximum', 'SampleCount', 'Sum']), + dimensions=dict(type='list', elements='dict'), + unit=dict(type='str') + ) + + target_tracking_spec = dict( + disable_scalein=dict(type='bool'), + target_value=dict(type='float', required=True), + predefined_metric_spec=dict(type='dict', + options=predefined_metric_spec), + customized_metric_spec=dict(type='dict', + options=customized_metric_spec) + ) argument_spec = dict( name=dict(required=True), @@ -366,18 +579,23 @@ def main(): min_adjustment_step=dict(type='int'), cooldown=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), - metric_aggregation=dict(default='Average', choices=['Minimum', 'Maximum', 'Average']), - policy_type=dict(default='SimpleScaling', choices=['SimpleScaling', 'StepScaling']), - step_adjustments=dict(type='list', options=step_adjustment_spec, elements='dict'), + metric_aggregation=dict(default='Average', choices=[ + 'Minimum', 'Maximum', 'Average']), + policy_type=dict(default='SimpleScaling', choices=[ + 'SimpleScaling', 'StepScaling', 'TargetTrackingScaling']), + target_tracking_config=dict(type='dict', options=target_tracking_spec), + step_adjustments=dict( + type='list', options=step_adjustment_spec, elements='dict'), estimated_instance_warmup=dict(type='int') ) module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['asg_name', 'adjustment_type']]]) - - connection = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff()) + required_if=[['state', 'present', ['asg_name']]]) + connection = module.client( + 'autoscaling', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get('state') + if state == 'present': create_scaling_policy(connection, module) elif state == 'absent': From d86ef7bf2b15360afe16801a779ad26293245e5a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 11:05:36 +0200 Subject: [PATCH 526/683] Rename CodeBuild/CodeCommit/CodePipeline modules (#1308) Rename CodeBuild/CodeCommit/CodePipeline modules SUMMARY In line with the new naming guidelines renames the CodeBuild/CodeCommit/CodePipeline modules ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_codebuild.py plugins/modules/aws_codecommit.py plugins/modules/aws_codepipeline.py plugins/modules/codebuild_project.py plugins/modules/codecommit_repository.py plugins/modules/codepipeline.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_codebuild.py => codebuild_project.py | 35 ++++++++++--------- aws_codecommit.py => codecommit_repository.py | 17 ++++----- aws_codepipeline.py => codepipeline.py | 31 ++++++++-------- 3 files changed, 45 insertions(+), 38 deletions(-) rename aws_codebuild.py => codebuild_project.py (94%) rename aws_codecommit.py => codecommit_repository.py (95%) rename aws_codepipeline.py => codepipeline.py (91%) diff --git a/aws_codebuild.py b/codebuild_project.py similarity index 94% rename from aws_codebuild.py rename to codebuild_project.py index 92e65ec1fe0..bef5d410748 100644 --- a/aws_codebuild.py +++ b/codebuild_project.py @@ -9,13 +9,15 @@ DOCUMENTATION = r''' --- -module: aws_codebuild +module: codebuild_project version_added: 1.0.0 short_description: Create or delete an AWS CodeBuild project notes: - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html). description: - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code. + - Prior to release 5.0.0 this module was called C(community.aws.aws_codebuild). + The usage did not change. author: - Stefan Horning (@stefanhorning) options: @@ -40,7 +42,7 @@ type: str location: description: - - Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified. + - Information about the location of the source code to be built. For I(type) C(CODEPIPELINE) location should not be specified. type: str git_clone_depth: description: @@ -48,7 +50,7 @@ type: int buildspec: description: - - The build spec declaration to use for the builds in this build project. Leave empty if part of the code project. + - The build spec declaration to use for the builds in this build project. Leave empty if part of the CodeBuild project. type: str insecure_ssl: description: @@ -66,7 +68,7 @@ required: true location: description: - - Information about the build output artifact location. When choosing type S3, set the bucket name here. + - Information about the build output artifact location. When choosing I(type) C(S3), set the bucket name here. path: description: - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts. @@ -119,7 +121,8 @@ - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }" privileged_mode: description: - - Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images. + - Enables running the Docker daemon inside a Docker container. + - Set to C(true) only if the build project is be used to build Docker images. type: dict service_role: description: @@ -157,7 +160,7 @@ type: dict state: description: - - Create or remove code build project. + - Create or remove CodeBuild project. default: 'present' choices: ['present', 'absent'] type: str @@ -191,7 +194,7 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- community.aws.aws_codebuild: +- community.aws.codebuild_project: name: my_project description: My nice little project service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role" @@ -223,17 +226,17 @@ type: complex contains: name: - description: Name of the CodeBuild project + description: Name of the CodeBuild project. returned: always type: str sample: my_project arn: - description: ARN of the CodeBuild project + description: ARN of the CodeBuild project. returned: always type: str sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder description: - description: A description of the build project + description: A description of the CodeBuild project. returned: always type: str sample: My nice little project @@ -243,7 +246,7 @@ type: complex contains: type: - description: The type of the repository + description: The type of the repository. returned: always type: str sample: CODEPIPELINE @@ -252,7 +255,7 @@ returned: when configured type: str git_clone_depth: - description: The git clone depth + description: The git clone depth. returned: when configured type: int build_spec: @@ -278,7 +281,7 @@ type: str sample: CODEPIPELINE location: - description: Output location for build artifacts + description: Output location for build artifacts. returned: when configured type: str # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project @@ -287,7 +290,7 @@ returned: when configured type: dict environment: - description: Environment settings for the build + description: Environment settings for the build. returned: always type: dict service_role: @@ -296,7 +299,7 @@ type: str sample: arn:aws:iam::123123123:role/codebuild-service-role timeout_in_minutes: - description: The timeout of a build in minutes + description: The timeout of a build in minutes. returned: always type: int sample: 60 @@ -316,7 +319,7 @@ type: dict version_added: 4.0.0 created: - description: Timestamp of the create time of the project + description: Timestamp of the create time of the project. returned: always type: str sample: "2018-04-17T16:56:03.245000+02:00" diff --git a/aws_codecommit.py b/codecommit_repository.py similarity index 95% rename from aws_codecommit.py rename to codecommit_repository.py index 090d0fd06f8..4eae0d90fb1 100644 --- a/aws_codecommit.py +++ b/codecommit_repository.py @@ -9,22 +9,24 @@ DOCUMENTATION = ''' --- -module: aws_codecommit +module: codecommit_repository version_added: 1.0.0 short_description: Manage repositories in AWS CodeCommit description: - Supports creation and deletion of CodeCommit repositories. - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit. + - Prior to release 5.0.0 this module was called C(community.aws.aws_codecommit). + The usage did not change. author: Shuang Wang (@ptux) options: name: description: - - name of repository. + - Name of repository. required: true type: str description: description: - - description or comment of repository. + - Description or comment of repository. required: false aliases: - comment @@ -36,9 +38,8 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' RETURN = ''' @@ -121,12 +122,12 @@ EXAMPLES = ''' # Create a new repository -- community.aws.aws_codecommit: +- community.aws.codecommit_repository: name: repo state: present # Delete a repository -- community.aws.aws_codecommit: +- community.aws.codecommit_repository: name: repo state: absent ''' diff --git a/aws_codepipeline.py b/codepipeline.py similarity index 91% rename from aws_codepipeline.py rename to codepipeline.py index e7f65705269..f95094bcbdc 100644 --- a/aws_codepipeline.py +++ b/codepipeline.py @@ -9,24 +9,26 @@ DOCUMENTATION = r''' --- -module: aws_codepipeline +module: codepipeline version_added: 1.0.0 short_description: Create or delete AWS CodePipelines notes: - - for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html) + - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html). description: - Create or delete a CodePipeline on AWS. + - Prior to release 5.0.0 this module was called C(community.aws.aws_codepipeline). + The usage did not change. author: - Stefan Horning (@stefanhorning) options: name: description: - - Name of the pipeline + - Name of the CodePipeline. required: true type: str role_arn: description: - - ARN of the IAM role to use when executing the pipeline + - ARN of the IAM role to use when executing the CodePipeline. required: true type: str artifact_store: @@ -50,7 +52,7 @@ suboptions: name: description: - - Name of the stage (step) in the codepipeline + - Name of the stage (step) in the CodePipeline. type: str actions: description: @@ -63,19 +65,18 @@ type: list version: description: - - Version number of the pipeline. This number is automatically incremented when a pipeline is updated. + - Version number of the CodePipeline. This number is automatically incremented when a CodePipeline is updated. required: false type: int state: description: - - Create or remove code pipeline + - Create or remove CodePipeline. default: 'present' choices: ['present', 'absent'] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' @@ -149,7 +150,7 @@ RETURN = r''' pipeline: - description: Returns the dictionary describing the code pipeline configuration. + description: Returns the dictionary describing the CodePipeline configuration. returned: success type: complex contains: @@ -159,7 +160,7 @@ type: str sample: my_deploy_pipeline role_arn: - description: ARN of the IAM role attached to the code pipeline + description: ARN of the IAM role attached to the CodePipeline returned: always type: str sample: arn:aws:iam::123123123:role/codepipeline-service-role @@ -183,11 +184,13 @@ returned: when configured type: str stages: - description: List of stages configured for this pipeline + description: List of stages configured for this CodePipeline returned: always type: list version: - description: The version number of the pipeline. This number is auto incremented when pipeline params are changed. + description: + - The version number of the CodePipeline. + - This number is auto incremented when CodePipeline params are changed. returned: always type: int ''' From c14de9d1ff29990d8fdbb307f2a9f65881b36f33 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 12:32:55 +0200 Subject: [PATCH 527/683] Rename aws_elasticbeanstalk_app elasticbeanstalk_app (#1307) Rename aws_elasticbeanstalk_app to elasticbeanstalk_app SUMMARY Drop the aws_ prefix from aws_elasticbeanstalk_app in line with the naming guidelines ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_elasticbeanstalk_app.py plugins/modules/elasticbeanstalk_app.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ...eanstalk_app.py => elasticbeanstalk_app.py | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) rename aws_elasticbeanstalk_app.py => elasticbeanstalk_app.py (94%) diff --git a/aws_elasticbeanstalk_app.py b/elasticbeanstalk_app.py similarity index 94% rename from aws_elasticbeanstalk_app.py rename to elasticbeanstalk_app.py index 19110282d0e..6eee7b13606 100644 --- a/aws_elasticbeanstalk_app.py +++ b/elasticbeanstalk_app.py @@ -8,19 +8,18 @@ DOCUMENTATION = ''' --- -module: aws_elasticbeanstalk_app +module: elasticbeanstalk_app version_added: 1.0.0 -short_description: Create, update, and delete an elastic beanstalk application - +short_description: Create, update, and delete an Elastic Beanstalk application description: - - Creates, updates, deletes beanstalk applications if app_name is provided. + - Creates, updates, deletes Elastic Beanstalk applications if I(app_name) is provided. options: app_name: description: - - Name of the beanstalk application you wish to manage. + - Name of the Beanstalk application you wish to manage. aliases: [ 'name' ] type: str description: @@ -39,23 +38,22 @@ default: false type: bool author: - - Harpreet Singh (@hsingh) - - Stephen Granger (@viper233) + - Harpreet Singh (@hsingh) + - Stephen Granger (@viper233) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Create or update an application -- community.aws.aws_elasticbeanstalk_app: +- community.aws.elasticbeanstalk_app: app_name: Sample_App description: "Hello World App" state: present # Delete application -- community.aws.aws_elasticbeanstalk_app: +- community.aws.elasticbeanstalk_app: app_name: Sample_App state: absent From dde7f099e5c67bc8971641e915ac0f9de86b0e37 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 14:38:13 +0200 Subject: [PATCH 528/683] Add support for check mode to SSM Parameter store (#1309) Add support for check mode to SSM Parameter store SUMMARY Adds support for check_mode Adds basic waiters for create/delete Fixes bug where module wasn't consistently idempotent ISSUE TYPE Bugfix Pull Request Feature Pull Request COMPONENT NAME plugins/modules/aws_ssm_parameter_store.py ADDITIONAL INFORMATION Module was using a deprecated parameter when calling describe_parameters (Filters). This deprecated parameter appears to have some form of caching applied to it and would sometimes return old values. By switching to the ParameterFilters replacement things seem to be more consistent. Reviewed-by: Alina Buzachis --- aws_ssm_parameter_store.py | 130 ++++++++++++++++++++++++++++++++++--- 1 file changed, 122 insertions(+), 8 deletions(-) diff --git a/aws_ssm_parameter_store.py b/aws_ssm_parameter_store.py index b46214cd263..b3c13015c26 100644 --- a/aws_ssm_parameter_store.py +++ b/aws_ssm_parameter_store.py @@ -207,6 +207,8 @@ returned: success ''' +import time + try: import botocore except ImportError: @@ -216,14 +218,94 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory + + +class ParameterWaiterFactory(BaseWaiterFactory): + def __init__(self, module): + client = module.client('ssm') + super(ParameterWaiterFactory, self).__init__(module, client) + + @property + def _waiter_model_data(self): + data = super(ParameterWaiterFactory, self)._waiter_model_data + ssm_data = dict( + parameter_exists=dict( + operation='DescribeParameters', + delay=1, maxAttempts=20, + acceptors=[ + dict(state='retry', matcher='error', expected='ParameterNotFound'), + dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) == `0`'), + dict(state='success', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), + ] + ), + parameter_deleted=dict( + operation='DescribeParameters', + delay=1, maxAttempts=20, + acceptors=[ + dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), + dict(state='success', matcher='path', expected=True, argument='length(Parameters[]) == `0`'), + dict(state='success', matcher='error', expected='ParameterNotFound'), + ] + ), + ) + data.update(ssm_data) + return data + + +def _wait_exists(client, module, name): + if module.check_mode: + return + wf = ParameterWaiterFactory(module) + waiter = wf.get_waiter('parameter_exists') + try: + waiter.wait( + ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ) + except botocore.exceptions.WaiterError: + module.warn("Timeout waiting for parameter to exist") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe parameter while waiting for creation") + + +def _wait_updated(client, module, name, version): + # Unfortunately we can't filter on the Version, as such we need something custom. + if module.check_mode: + return + for x in range(1, 10): + try: + parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}]) + if parameter.get('Version', 0) > version: + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update") + time.sleep(1) + + +def _wait_deleted(client, module, name): + if module.check_mode: + return + wf = ParameterWaiterFactory(module) + waiter = wf.get_waiter('parameter_deleted') + try: + waiter.wait( + ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ) + except botocore.exceptions.WaiterError: + module.warn("Timeout waiting for parameter to exist") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe parameter while waiting for deletion") def update_parameter(client, module, **args): changed = False response = {} + if module.check_mode: + return True, response try: - response = client.put_parameter(**args) + response = client.put_parameter(aws_retry=True, **args) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="setting parameter") @@ -231,6 +313,7 @@ def update_parameter(client, module, **args): return changed, response +@AWSRetry.jittered_backoff() def describe_parameter(client, module, **args): paginator = client.get_paginator('describe_parameters') existing_parameter = paginator.paginate(**args).build_full_result() @@ -267,11 +350,14 @@ def create_update_parameter(client, module): args.update(KeyId=module.params.get('key_id')) try: - existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True) - except Exception: + existing_parameter = client.get_parameter(aws_retry=True, Name=args['Name'], WithDecryption=True) + except botocore.exceptions.ClientError: pass + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="fetching parameter") if existing_parameter: + original_version = existing_parameter['Parameter']['Version'] if 'Value' not in args: args['Value'] = existing_parameter['Parameter']['Value'] @@ -290,14 +376,17 @@ def create_update_parameter(client, module): try: describe_existing_parameter = describe_parameter( client, module, - Filters=[{"Key": "Name", "Values": [args['Name']]}]) + ParameterFilters=[{"Key": "Name", "Values": [args['Name']]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") if describe_existing_parameter['Description'] != args['Description']: (changed, response) = update_parameter(client, module, **args) + if changed: + _wait_updated(client, module, module.params.get('name'), original_version) else: (changed, response) = update_parameter(client, module, **args) + _wait_exists(client, module, module.params.get('name')) return changed, response @@ -305,8 +394,24 @@ def create_update_parameter(client, module): def delete_parameter(client, module): response = {} + try: + existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get('name'), WithDecryption=True) + except is_boto3_error_code('ParameterNotFound'): + return False, {} + except botocore.exceptions.ClientError: + # If we can't describe the parameter we may still be able to delete it + existing_parameter = True + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="setting parameter") + + if not existing_parameter: + return False, {} + if module.check_mode: + return True, {} + try: response = client.delete_parameter( + aws_retry=True, Name=module.params.get('name') ) except is_boto3_error_code('ParameterNotFound'): @@ -314,11 +419,14 @@ def delete_parameter(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="deleting parameter") + _wait_deleted(client, module, module.params.get('name')) + return True, response def setup_client(module): - connection = module.client('ssm') + retry_decorator = AWSRetry.jittered_backoff() + connection = module.client('ssm', retry_decorator=retry_decorator) return connection @@ -337,6 +445,7 @@ def setup_module_object(): return AnsibleAWSModule( argument_spec=argument_spec, + supports_check_mode=True, ) @@ -353,9 +462,14 @@ def main(): result = {"response": response} - parameter_metadata = describe_parameter( - client, module, - Filters=[{"Key": "Name", "Values": [module.params.get('name')]}]) + try: + parameter_metadata = describe_parameter( + client, module, + ParameterFilters=[{"Key": "Name", "Values": [module.params.get('name')]}]) + except is_boto3_error_code('ParameterNotFound'): + return False, {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="to describe parameter") if parameter_metadata: result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata) From 281da66f5acd326ed219b1157e7e9be778cfdb71 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 15:16:10 +0200 Subject: [PATCH 529/683] Rename stepfunctions modules (#1310) Rename stepfunctions modules SUMMARY Rename stepfunctions modules in line with naming guidelines (drop the aws_ prefix) ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_step_functions_state_machine.py plugins/modules/stepfunctions_state_machine.py plugins/modules/aws_step_functions_state_machine_execution.py plugins/modules/stepfunctions_state_machine_execution.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- ...chine.py => stepfunctions_state_machine.py | 32 ++++++++++--------- ...> stepfunctions_state_machine_execution.py | 18 +++++------ 2 files changed, 26 insertions(+), 24 deletions(-) rename aws_step_functions_state_machine.py => stepfunctions_state_machine.py (89%) rename aws_step_functions_state_machine_execution.py => stepfunctions_state_machine_execution.py (93%) diff --git a/aws_step_functions_state_machine.py b/stepfunctions_state_machine.py similarity index 89% rename from aws_step_functions_state_machine.py rename to stepfunctions_state_machine.py index 452ebc4237a..227ec6f86c3 100644 --- a/aws_step_functions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -9,18 +9,20 @@ DOCUMENTATION = ''' --- -module: aws_step_functions_state_machine +module: stepfunctions_state_machine version_added: 1.0.0 short_description: Manage AWS Step Functions state machines description: - - Create, update and delete state machines in AWS Step Functions. - - Calling the module in C(state=present) for an existing AWS Step Functions state machine - will attempt to update the state machine definition, IAM Role, or tags with the provided data. + - Create, update and delete state machines in AWS Step Functions. + - Calling the module in C(state=present) for an existing AWS Step Functions state machine + will attempt to update the state machine definition, IAM Role, or tags with the provided data. + - Prior to release 5.0.0 this module was called C(community.aws.aws_step_functions_state_machine). + The usage did not change. options: name: description: - - Name of the state machine + - Name of the state machine. required: true type: str definition: @@ -28,32 +30,32 @@ - The Amazon States Language definition of the state machine. See U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more information on the Amazon States Language. - - "This parameter is required when C(state=present)." + - Required when I(state=present). type: json role_arn: description: - The ARN of the IAM Role that will be used by the state machine for its executions. - - "This parameter is required when C(state=present)." + - Required when I(state=present). type: str state: description: - - Desired state for the state machine + - Desired state for the state machine. default: present choices: [ present, absent ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags author: - - Tom De Keyser (@tdekeyser) + - Tom De Keyser (@tdekeyser) ''' EXAMPLES = ''' # Create a new AWS Step Functions state machine - name: Setup HelloWorld state machine - community.aws.aws_step_functions_state_machine: + community.aws.stepfunctions_state_machine: name: "HelloWorldStateMachine" definition: "{{ lookup('file','state_machine.json') }}" role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole @@ -62,7 +64,7 @@ # Update an existing state machine - name: Change IAM Role and tags of HelloWorld state machine - community.aws.aws_step_functions_state_machine: + community.aws.stepfunctions_state_machine: name: HelloWorldStateMachine definition: "{{ lookup('file','state_machine.json') }}" role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole @@ -71,7 +73,7 @@ # Remove the AWS Step Functions state machine - name: Delete HelloWorld state machine - community.aws.aws_step_functions_state_machine: + community.aws.stepfunctions_state_machine: name: HelloWorldStateMachine state: absent ''' diff --git a/aws_step_functions_state_machine_execution.py b/stepfunctions_state_machine_execution.py similarity index 93% rename from aws_step_functions_state_machine_execution.py rename to stepfunctions_state_machine_execution.py index 8ecc2a1272d..23c47af1d27 100644 --- a/aws_step_functions_state_machine_execution.py +++ b/stepfunctions_state_machine_execution.py @@ -9,18 +9,19 @@ DOCUMENTATION = ''' --- -module: aws_step_functions_state_machine_execution +module: stepfunctions_state_machine_execution version_added: 1.0.0 -short_description: Start or stop execution of an AWS Step Functions state machine. - +short_description: Start or stop execution of an AWS Step Functions state machine description: - Start or stop execution of a state machine in AWS Step Functions. + - Prior to release 5.0.0 this module was called C(community.aws.aws_step_functions_state_machine_execution). + The usage did not change. options: action: - description: Desired action (start or stop) for a state machine execution. + description: Desired action (C(start) or C(stop)) for a state machine execution. default: start choices: [ start, stop ] type: str @@ -47,9 +48,8 @@ default: '' extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 author: - Prasad Katti (@prasadkatti) @@ -57,13 +57,13 @@ EXAMPLES = ''' - name: Start an execution of a state machine - community.aws.aws_step_functions_state_machine_execution: + community.aws.stepfunctions_state_machine_execution: name: an_execution_name execution_input: '{ "IsHelloWorldExample": true }' state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine" - name: Stop an execution of a state machine - community.aws.aws_step_functions_state_machine_execution: + community.aws.stepfunctions_state_machine_execution: action: stop execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" cause: "cause of task failure" From a102038977234b23a283e5453440c2395e376606 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 16:31:28 +0200 Subject: [PATCH 530/683] Rename aws_ssm_parameter_store (#1313) Rename aws_ssm_parameter_store SUMMARY Rename aws_ssm_parameter_store to ssm_parameter in line with the new naming guidelines. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_ssm_parameter_store.py plugins/modules/ssm_parameter.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ...ssm_parameter_store.py => ssm_parameter.py | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) rename aws_ssm_parameter_store.py => ssm_parameter.py (95%) diff --git a/aws_ssm_parameter_store.py b/ssm_parameter.py similarity index 95% rename from aws_ssm_parameter_store.py rename to ssm_parameter.py index b3c13015c26..7a6e7eb7c20 100644 --- a/aws_ssm_parameter_store.py +++ b/ssm_parameter.py @@ -8,11 +8,13 @@ DOCUMENTATION = ''' --- -module: aws_ssm_parameter_store +module: ssm_parameter version_added: 1.0.0 -short_description: Manage key-value pairs in AWS SSM parameter store +short_description: Manage key-value pairs in AWS Systems Manager Parameter Store description: - - Manage key-value pairs in AWS SSM parameter store. + - Manage key-value pairs in AWS Systems Manager (SSM) Parameter Store. + - Prior to release 5.0.0 this module was called C(community.aws.aws_ssm_parameter_store). + The usage did not change. options: name: description: @@ -85,31 +87,31 @@ - "Michael De La Rue (@mikedlr)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Create or update key/value pair in AWS SSM parameter store - community.aws.aws_ssm_parameter_store: + community.aws.ssm_paramater: name: "Hello" description: "This is your first key" value: "World" - name: Delete the key - community.aws.aws_ssm_parameter_store: + community.aws.ssm_paramater: name: "Hello" state: absent -- name: Create or update secure key/value pair with default kms key (aws/ssm) - community.aws.aws_ssm_parameter_store: +- name: Create or update secure key/value pair with default KMS key (aws/ssm) + community.aws.ssm_paramater: name: "Hello" description: "This is your first key" string_type: "SecureString" value: "World" -- name: Create or update secure key/value pair with nominated kms key - community.aws.aws_ssm_parameter_store: +- name: Create or update secure key/value pair with nominated KMS key + community.aws.ssm_paramater: name: "Hello" description: "This is your first key" string_type: "SecureString" @@ -117,7 +119,7 @@ value: "World" - name: Always update a parameter store value and create a new version - community.aws.aws_ssm_parameter_store: + community.aws.ssm_paramater: name: "overwrite_example" description: "This example will always overwrite the value" string_type: "String" @@ -125,7 +127,7 @@ overwrite_value: "always" - name: Create or update key/value pair in AWS SSM parameter store with tier - community.aws.aws_ssm_parameter_store: + community.aws.ssm_paramater: name: "Hello" description: "This is your first key" value: "World" From 0a2c5b5c38082bc31d4e88c8788ea4a1a82a1336 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 18:00:21 +0200 Subject: [PATCH 531/683] Rename MSK modules (#1311) Rename MSK modules SUMMARY In line with the naming guidelines, drop the aws_ prefix from the MSK modules ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_msk_cluster.py plugins/modules/msk_cluster.py plugins/modules/aws_msk_config.py plugins/modules/msk_config.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- aws_msk_cluster.py => msk_cluster.py | 9 +++++---- aws_msk_config.py => msk_config.py | 10 ++++++---- 2 files changed, 11 insertions(+), 8 deletions(-) rename aws_msk_cluster.py => msk_cluster.py (99%) rename aws_msk_config.py => msk_config.py (97%) diff --git a/aws_msk_cluster.py b/msk_cluster.py similarity index 99% rename from aws_msk_cluster.py rename to msk_cluster.py index 559660d786f..8a1774b25ef 100644 --- a/aws_msk_cluster.py +++ b/msk_cluster.py @@ -9,11 +9,13 @@ DOCUMENTATION = r""" --- -module: aws_msk_cluster +module: msk_cluster short_description: Manage Amazon MSK clusters version_added: "2.0.0" description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) clusters. + - Prior to release 5.0.0 this module was called C(community.aws.aws_msk_cluster). + The usage did not change. author: - Daniil Kupchenko (@oukooveu) options: @@ -31,7 +33,6 @@ - The version of Apache Kafka. - This version should exist in given configuration. - This parameter is required when I(state=present). - - Update operation requires botocore version >= 1.16.19. type: str configuration_arn: description: @@ -212,7 +213,7 @@ EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. -- aws_msk_cluster: +- community.aws.msk_cluster: name: kafka-cluster state: present version: 2.6.1 @@ -227,7 +228,7 @@ configuration_arn: arn:aws:kafka:us-east-1:000000000001:configuration/kafka-cluster-configuration/aaaaaaaa-bbbb-4444-3333-ccccccccc-1 configuration_revision: 1 -- aws_msk_cluster: +- community.aws.msk_cluster: name: kafka-cluster state: absent """ diff --git a/aws_msk_config.py b/msk_config.py similarity index 97% rename from aws_msk_config.py rename to msk_config.py index afaea513937..0547a2a8bbd 100644 --- a/aws_msk_config.py +++ b/msk_config.py @@ -9,11 +9,13 @@ DOCUMENTATION = r""" --- -module: aws_msk_config -short_description: Manage Amazon MSK cluster configurations. +module: msk_config +short_description: Manage Amazon MSK cluster configurations version_added: "2.0.0" description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations. + - Prior to release 5.0.0 this module was called C(community.aws.aws_msk_config). + The usage did not change. author: - Daniil Kupchenko (@oukooveu) options: @@ -47,7 +49,7 @@ EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. -- aws_msk_config: +- community.aws.msk_config: name: kafka-cluster-configuration state: present kafka_versions: @@ -59,7 +61,7 @@ default.replication.factor: 3 zookeeper.session.timeout.ms: 18000 -- aws_msk_config: +- community.aws.msk_config: name: kafka-cluster-configuration state: absent """ From f5181fc3f1ee3fc028c49dceda72289024d9e3f1 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 5 Jul 2022 18:00:26 +0200 Subject: [PATCH 532/683] Rename aws_application_scaling_policy (#1314) Rename aws_application_scaling_policy SUMMARY In line with the naming guidelines drop the aws_ from aws_application_scaling_policy ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_application_scaling_policy.py plugins/modules/application_autoscaling_policy.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ...cy.py => application_autoscaling_policy.py | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) rename aws_application_scaling_policy.py => application_autoscaling_policy.py (97%) diff --git a/aws_application_scaling_policy.py b/application_autoscaling_policy.py similarity index 97% rename from aws_application_scaling_policy.py rename to application_autoscaling_policy.py index 5c8ac9b24ad..5d11fe47c0d 100644 --- a/aws_application_scaling_policy.py +++ b/application_autoscaling_policy.py @@ -8,14 +8,16 @@ DOCUMENTATION = ''' --- -module: aws_application_scaling_policy +module: application_autoscaling_policy version_added: 1.0.0 short_description: Manage Application Auto Scaling Scaling Policies notes: - - for details of the parameters and returns see + - For more details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy) description: - Creates, updates or removes a Scaling Policy. + - Prior to release 5.0.0 this module was called C(community.aws.aws_application_scaling_policy). + The usage did not change. author: - Gustavo Maia (@gurumaia) - Chen Leibovich (@chenl87) @@ -97,13 +99,13 @@ type: int override_task_capacity: description: - - Whether or not to override values of minimum and/or maximum tasks if it's already set. - - Defaults to C(false). + - Whether or not to override values of minimum and/or maximum tasks if it's already set. + - Defaults to C(false). required: no type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -111,7 +113,7 @@ # Create step scaling policy for ECS Service - name: scaling_policy - community.aws.aws_application_scaling_policy: + community.aws.application_autoscaling_policy: state: present policy_name: test_policy service_namespace: ecs @@ -132,7 +134,7 @@ # Create target tracking scaling policy for ECS Service - name: scaling_policy - community.aws.aws_application_scaling_policy: + community.aws.application_autoscaling_policy: state: present policy_name: test_policy service_namespace: ecs @@ -150,7 +152,7 @@ # Remove scalable target for ECS Service - name: scaling_policy - community.aws.aws_application_scaling_policy: + community.aws.application_autoscaling_policy: state: absent policy_name: test_policy policy_type: StepScaling From 10b1967153aeb7f7d7eab4ee2d1370f61bc2b067 Mon Sep 17 00:00:00 2001 From: Avishay Bar Date: Tue, 5 Jul 2022 19:02:58 +0300 Subject: [PATCH 533/683] Added support for InputTransformer attribute of cloudwatchevent_rule (#623) Added support for InputTransformer attribute of cloudwatchevent_rule SUMMARY EventBridge has the InputTransformer attribute on target to allow providing custom input to a target based on certain event data. This PR adds this functionality and includes an example usage. ISSUE TYPE Feature Pull Request COMPONENT NAME aws.cloudwatchevent_rule ADDITIONAL INFORMATION ... "targets": [ { "arn": "arn:aws:sns:us-east-1:123456789012:MySNSTopic", "id": "MySNSTopic", "input_transformer": { "input_paths_map": { "instance": "$.detail.instance-id", "state": "$.detail.state" }, "input_template": "\" is in state \"" } } ] Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: Avishay Bar Reviewed-by: Jill R Reviewed-by: Mark Chappell --- cloudwatchevent_rule.py | 49 +++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 7 deletions(-) diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index d38db416864..820e6c38543 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -3,8 +3,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type +__metaclass__ = type DOCUMENTATION = r''' --- @@ -22,8 +22,8 @@ - A rule must contain at least an I(event_pattern) or I(schedule_expression). A rule can have both an I(event_pattern) and a I(schedule_expression), in which case the rule will trigger on matching events as well as on a schedule. - - When specifying targets, I(input) and I(input_path) are mutually-exclusive - and optional parameters. + - When specifying targets, I(input), I(input_path), I(input_paths_map) and I(input_template) + are mutually-exclusive and optional parameters. options: name: description: @@ -81,15 +81,31 @@ type: str description: - A JSON object that will override the event data when passed to the target. - - If neither I(input) nor I(input_path) is specified, then the entire - event is passed to the target in JSON form. + - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) + is specified, then the entire event is passed to the target in JSON form. input_path: type: str description: - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be passed to the target. - - If neither I(input) nor I(input_path) is specified, then the entire - event is passed to the target in JSON form. + - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) + is specified, then the entire event is passed to the target in JSON form. + input_paths_map: + type: dict + version_added: 4.1.0 + description: + - A dict that specifies the transformation of the event data to + custom input parameters. + - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) + is specified, then the entire event is passed to the target in JSON form. + input_template: + type: str + version_added: 4.1.0 + description: + - A string that templates the values input_paths_map extracted from the event data. + It is used to produce the output you want to be sent to the target. + - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) + is specified, then the entire event is passed to the target in JSON form. ecs_parameters: type: dict description: @@ -123,6 +139,19 @@ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction input: '{"foo": "bar"}' +- community.aws.cloudwatchevent_rule: + name: MyInstanceLaunchEvent + description: "Rule for EC2 instance launch" + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: MyTargetSnsTopic + arn: arn:aws:sns:us-east-1:123456789012:MySNSTopic + input_paths_map: + instance: "$.detail.instance-id" + state: "$.detail.state" + input_template: " is in state " + - community.aws.cloudwatchevent_rule: name: MyCronTask state: absent @@ -286,6 +315,12 @@ def _targets_request(self, targets): target_request['Input'] = target['input'] if 'input_path' in target: target_request['InputPath'] = target['input_path'] + if 'input_paths_map' in target or 'input_template' in target: + target_request['InputTransformer'] = {} + target_request['InputTransformer']['InputPathsMap'] = target['input_paths_map'] + target_request['InputTransformer']['InputTemplate'] = '"{0}"'.format( + target['input_template'] + ) if 'role_arn' in target: target_request['RoleArn'] = target['role_arn'] if 'ecs_parameters' in target: From 4c75100655c0775ccadae99652d2ec416e185fdd Mon Sep 17 00:00:00 2001 From: tamirhad <46893693+tamirhad@users.noreply.github.com> Date: Wed, 6 Jul 2022 11:42:05 +0300 Subject: [PATCH 534/683] ECS service - add tag+propagate_tags upon creation. (#543) ECS service - add tag+propagate_tags upon creation. SUMMARY this PR is continuation of #242 this PR will enable the use of tags at creation time, in addition to propagate_tags. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION this PR is continuation of #242 i cant contribute to his repo as i am not a maintainer so i created a new PR, if its not the right git flow please be gentle, its my first time :) Reviewed-by: Mark Chappell Reviewed-by: None Reviewed-by: Alina Buzachis --- ecs_service.py | 102 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 91 insertions(+), 11 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 78e352447ee..800d670be21 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -5,7 +5,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type - DOCUMENTATION = r''' --- module: ecs_service @@ -252,6 +251,19 @@ type: bool default: false version_added: 4.1.0 + propagate_tags: + description: + - Propagate tags from ECS task defintition or ECS service to ECS task. + required: false + choices: ["TASK_DEFINITION", "SERVICE"] + type: str + version_added: 4.1.0 + tags: + description: + - A dictionary of tags to add or remove from the resource. + type: dict + required: false + version_added: 4.1.0 extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -259,7 +271,6 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. - # Basic provisioning example - community.aws.ecs_service: state: present @@ -328,6 +339,18 @@ - capacity_provider: test-capacity-provider-1 weight: 1 base: 0 + +# With tags and tag propagation +- community.aws.ecs_service: + state: present + name: tags-test-service + cluster: new_cluster + task_definition: 'new_cluster-task:1' + desired_count: 1 + tags: + Firstname: jane + lastName: doe + propagate_tags: SERVICE ''' RETURN = r''' @@ -401,6 +424,10 @@ description: The valid values are ACTIVE, DRAINING, or INACTIVE. returned: always type: str + tags: + description: The tags applied to this resource. + returned: success + type: dict taskDefinition: description: The ARN of a task definition to use for tasks in the service. returned: always @@ -472,7 +499,10 @@ such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY. returned: always type: str - + propagateTags: + description: The type of tag propagation applied to the resource. + returned: always + type: str ansible_facts: description: Facts about deleted service. returned: when deleting a service @@ -530,6 +560,11 @@ description: The valid values are ACTIVE, DRAINING, or INACTIVE. returned: always type: str + tags: + description: The tags applied to this resource. + returned: when tags found + type: list + elements: dict taskDefinition: description: The ARN of a task definition to use for tasks in the service. returned: always @@ -601,6 +636,11 @@ such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY. returned: always type: str + propagateTags: + description: The type of tag propagation applied to the resource + returned: always + type: str + ''' import time @@ -614,8 +654,13 @@ 'deployment_circuit_breaker': 'dict', } +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict try: import botocore @@ -662,8 +707,11 @@ def find_in_array(self, array_of_services, service_name, field_name='serviceArn' def describe_service(self, cluster_name, service_name): response = self.ecs.describe_services( cluster=cluster_name, - services=[service_name]) + services=[service_name], + include=['TAGS'], + ) msg = '' + if len(response['failures']) > 0: c = self.find_in_array(response['failures'], service_name, 'arn') msg += ", failure reason is " + c['reason'] @@ -692,6 +740,12 @@ def is_matching_service(self, expected, existing): if (expected['load_balancers'] or []) != existing['loadBalancers']: return False + if expected['propagate_tags'] != existing['propagateTags']: + return False + + if boto3_tag_list_to_ansible_dict(existing['tags']) != expected['tags']: + return False + # expected is params. DAEMON scheduling strategy returns desired count equal to # number of instances running; don't check desired count if scheduling strat is daemon if (expected['scheduling_strategy'] != 'DAEMON'): @@ -704,7 +758,7 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan desired_count, client_token, role, deployment_controller, deployment_configuration, placement_constraints, placement_strategy, health_check_grace_period_seconds, network_configuration, service_registries, launch_type, platform_version, - scheduling_strategy, capacity_provider_strategy): + scheduling_strategy, capacity_provider_strategy, tags, propagate_tags): params = dict( cluster=cluster_name, @@ -740,6 +794,14 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan params['desiredCount'] = desired_count if capacity_provider_strategy: params['capacityProviderStrategy'] = capacity_provider_strategy + if propagate_tags: + params['propagateTags'] = propagate_tags + # desired count is not required if scheduling strategy is daemon + if desired_count is not None: + params['desiredCount'] = desired_count + if tags: + params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + if scheduling_strategy: params['schedulingStrategy'] = scheduling_strategy response = self.ecs.create_service(**params) @@ -850,7 +912,9 @@ def main(): weight=dict(type='int'), base=dict(type='int') ) - ) + ), + propagate_tags=dict(required=False, choices=['TASK_DEFINITION', 'SERVICE']), + tags=dict(required=False, type='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -888,7 +952,9 @@ def main(): try: existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) except Exception as e: - module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e)) + module.fail_json_aws(e, + msg="Exception describing service '{0}' in cluster '{1}'" + .format(module.params['name'], module.params['cluster'])) results = dict(changed=False) @@ -948,6 +1014,12 @@ def main(): else: task_definition = module.params['task_definition'] + if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']: + module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service") + + if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']: + module.fail_json(msg="It is not currently supported to change tags of an existing service") + # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'], @@ -957,7 +1029,7 @@ def main(): network_configuration, module.params['health_check_grace_period_seconds'], module.params['force_new_deployment'], - capacityProviders + capacityProviders, ) else: @@ -977,13 +1049,18 @@ def main(): network_configuration, serviceRegistries, module.params['launch_type'], + module.params['scheduling_strategy'], module.params['platform_version'], module.params['scheduling_strategy'], - capacityProviders + capacityProviders, + module.params['tags'], + module.params['propagate_tags'], ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't create service") + if response.get('tags', None): + response['tags'] = boto3_tag_list_to_ansible_dict(response['tags']) results['service'] = response results['changed'] = True @@ -1044,7 +1121,10 @@ def main(): break time.sleep(delay) if i is repeat - 1: - module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.") + module.fail_json( + msg="Service still not deleted after {0} tries of {1} seconds each." + .format(repeat, delay) + ) return module.exit_json(**results) From 183dc8d032e062672942506dd199b3e2df721126 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 6 Jul 2022 11:37:55 +0200 Subject: [PATCH 535/683] Rename aws_inspector_target module (#1318) Rename aws_inspector_target module SUMMARY Drop the aws_ prefix from aws_inspector_target in line with the updated guidelines ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_inspector_target.py plugins/modules/inspector_target.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ...inspector_target.py => inspector_target.py | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) rename aws_inspector_target.py => inspector_target.py (92%) diff --git a/aws_inspector_target.py b/inspector_target.py similarity index 92% rename from aws_inspector_target.py rename to inspector_target.py index a84e245d152..ffcfb343ad5 100644 --- a/aws_inspector_target.py +++ b/inspector_target.py @@ -8,18 +8,21 @@ DOCUMENTATION = ''' --- -module: aws_inspector_target +module: inspector_target version_added: 1.0.0 -short_description: Create, Update and Delete Amazon Inspector Assessment - Targets -description: Creates, updates, or deletes Amazon Inspector Assessment Targets - and manages the required Resource Groups. -author: "Dennis Conrad (@dennisconrad)" +short_description: Create, Update and Delete Amazon Inspector Assessment Targets +description: + - Creates, updates, or deletes Amazon Inspector Assessment Targets and manages + the required Resource Groups. + - Prior to release 5.0.0 this module was called C(community.aws.aws_inspector_target). + The usage did not change. +author: + - "Dennis Conrad (@dennisconrad)" options: name: description: - - The user-defined name that identifies the assessment target. The name - must be unique within the AWS account. + - The user-defined name that identifies the assessment target. + - The name must be unique within the AWS account. required: true type: str state: @@ -33,29 +36,29 @@ tags: description: - Tags of the EC2 instances to be added to the assessment target. - - Required if C(state=present). + - Required if I(state=present). type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' - name: Create my_target Assessment Target - community.aws.aws_inspector_target: + community.aws.inspector_target: name: my_target tags: role: scan_target - name: Update Existing my_target Assessment Target with Additional Tags - community.aws.aws_inspector_target: + community.aws.inspector_target: name: my_target tags: env: dev role: scan_target - name: Delete my_target Assessment Target - community.aws.aws_inspector_target: + community.aws.inspector_target: name: my_target state: absent ''' From fe175018ccf39c2584f9949406924262e8aaf100 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 6 Jul 2022 11:37:59 +0200 Subject: [PATCH 536/683] Rename aws_secrets (#1315) Rename aws_secrets SUMMARY in line with the new naming guidelines rename aws_secret to secretsmanager_secret ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/aws_secret.py plugins/modules/secretsmanager_secret.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_secret.py => secretsmanager_secret.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) rename aws_secret.py => secretsmanager_secret.py (98%) diff --git a/aws_secret.py b/secretsmanager_secret.py similarity index 98% rename from aws_secret.py rename to secretsmanager_secret.py index 044ba1061ae..2ce3351077b 100644 --- a/aws_secret.py +++ b/secretsmanager_secret.py @@ -8,11 +8,13 @@ DOCUMENTATION = r''' --- -module: aws_secret +module: secretsmanager_secret version_added: 1.0.0 short_description: Manage secrets stored in AWS Secrets Manager description: - Create, update, and delete secrets stored in AWS Secrets Manager. + - Prior to release 5.0.0 this module was called C(community.aws.aws_secret). + The usage did not change. author: - "REY Remi (@rrey)" options: @@ -80,14 +82,14 @@ EXAMPLES = r''' - name: Add string to AWS Secrets Manager - community.aws.aws_secret: + community.aws.secretsmanager_secret: name: 'test_secret_string' state: present secret_type: 'string' secret: "{{ super_secret_string }}" - name: Add a secret with resource policy attached - community.aws.aws_secret: + community.aws.secretsmanager_secret: name: 'test_secret_string' state: present secret_type: 'string' @@ -95,7 +97,7 @@ resource_policy: "{{ lookup('template', 'templates/resource_policy.json.j2', convert_data=False) | string }}" - name: remove string from AWS Secrets Manager - community.aws.aws_secret: + community.aws.secretsmanager_secret: name: 'test_secret_string' state: absent secret_type: 'string' From 64d4ce3d1acccbbcd706288952d97ef8f5841ca9 Mon Sep 17 00:00:00 2001 From: Josiah Vranyes <35812920+vranyes@users.noreply.github.com> Date: Wed, 6 Jul 2022 09:26:17 -0500 Subject: [PATCH 537/683] Add force_absent parameter to ecs_ecr module (#1316) Add force_absent parameter to ecs_ecr module SUMMARY Adds a force_absent parameter to the ecs_ecr module. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_ecr module ADDITIONAL INFORMATION It would be useful for the ecs_ecr module to have capability for removing repositories which still contain images. This PR adds that ability by adding an additional parameter force_absent which has a default value of false and essentially just gets passed to the boto3 call for repo deletion. The following was run against a repository with an image in it. - name: Add ecr repos and sync with external sources. hosts: localhost connection: local gather_facts: false tasks: - name: test the changes register: state ecs_ecr: state: absent force_absent: true region: us-east-1 name: myimage/test - debug: var: state This was run with force_absent: true, force_absent: false and force_absent not defined. The expected behavior was seen in all three scenarios. I haven't added any new cases to the integration tests because there doesn't seem to be a great way to sync images into the repo that is created as part of the tests. Reviewed-by: Mark Chappell Reviewed-by: Josiah Vranyes --- ecs_ecr.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index 294459135fe..aa08e97d239 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -33,6 +33,13 @@ - JSON or dict that represents the new policy. required: false type: json + force_absent: + description: + - If I(force_absent=true), the repository will be removed, even if images are present. + required: false + default: false + type: bool + version_added: 4.1.0 force_set_policy: description: - If I(force_set_policy=false), it prevents setting a policy that would prevent you from @@ -277,10 +284,10 @@ def set_repository_policy(self, registry_id, name, policy_text, force): 'could not find repository {0}'.format(printable)) return - def delete_repository(self, registry_id, name): + def delete_repository(self, registry_id, name, force): if not self.check_mode: repo = self.ecr.delete_repository( - repositoryName=name, **build_kwargs(registry_id)) + repositoryName=name, force=force, **build_kwargs(registry_id)) self.changed = True return repo else: @@ -397,6 +404,7 @@ def run(ecr, params): state = params['state'] policy_text = params['policy'] purge_policy = params['purge_policy'] + force_absent = params['force_absent'] registry_id = params['registry_id'] force_set_policy = params['force_set_policy'] image_tag_mutability = params['image_tag_mutability'].upper() @@ -514,7 +522,7 @@ def run(ecr, params): elif state == 'absent': result['name'] = name if repo: - ecr.delete_repository(registry_id, name) + ecr.delete_repository(registry_id, name, force_absent) result['changed'] = True except Exception as err: @@ -540,6 +548,7 @@ def main(): registry_id=dict(required=False), state=dict(required=False, choices=['present', 'absent'], default='present'), + force_absent=dict(required=False, type='bool', default=False), force_set_policy=dict(required=False, type='bool', default=False), policy=dict(required=False, type='json'), image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], From 3e4672e7f1caa0bf7c88490e06ac21ac08eca20c Mon Sep 17 00:00:00 2001 From: Ivan Chekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Wed, 6 Jul 2022 11:27:37 -0400 Subject: [PATCH 538/683] glue_connection - Avoid converting connection_parameter keys to lowercase (#518) glue_connection - Avoid converting connection_parameter keys to lowercase SUMMARY This is a follow to my own PR #503. This is a cosmetic change that prevents converting keys in connection_parameters dict to lowercase. ISSUE TYPE Bugfix Pull Request COMPONENT NAME aws_glue_connection ADDITIONAL INFORMATION As an example, this: - community.aws.glue_connection: name: test-connection connection_parameters: JDBC_ENFORCE_SSL: "false" ... is a valid value, while this: - community.aws.glue_connection: name: test-connection connection_parameters: jdbc_enforce_ssl: "false" ... is not. This PR simply aligns the module output to the expected input. Reviewed-by: Mark Chappell --- glue_connection.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/glue_connection.py b/glue_connection.py index c8adb83fc66..1f278d66b3b 100644 --- a/glue_connection.py +++ b/glue_connection.py @@ -109,10 +109,12 @@ RETURN = r''' connection_properties: - description: A dict of key-value pairs used as parameters for this connection. + description: + - (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection. + - This return key has been deprecated, and will be removed in a release after 2024-06-01. returned: when state is present type: dict - sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'} + sample: {'jdbc_connection_url':'jdbc:mysql://mydb:3306/databasename','username':'x','password':'y'} connection_type: description: The type of the connection. returned: when state is present @@ -149,6 +151,11 @@ returned: when state is present type: dict sample: {'subnet-id':'subnet-aabbccddee'} +raw_connection_properties: + description: A dict of key-value pairs used as parameters for this connection. + returned: when state is present + type: dict + sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'} ''' # Non-ansible imports @@ -309,7 +316,13 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co if changed and not module.check_mode: glue_connection = _await_glue_connection(connection, module) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {})) + if glue_connection: + module.deprecate("The 'connection_properties' return key is deprecated and will be replaced" + " by 'raw_connection_properties'. Both values are returned for now.", + date='2024-06-01', collection_name='community.aws') + glue_connection['RawConnectionProperties'] = glue_connection['ConnectionProperties'] + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=['RawConnectionProperties'])) def delete_glue_connection(connection, module, glue_connection): From 3512246142ed0feccfb5a5854022cc1304c36915 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 6 Jul 2022 18:19:51 +0200 Subject: [PATCH 539/683] Add missing note about renames to description of various modules (#1319) Add missing note about renames to description of various modules SUMMARY Minor post-rename docs cleanups Add notes about rename regen docs with FQCN in examples ISSUE TYPE Docs Pull Request COMPONENT NAME docs/community.aws.api_gateway_domain_module.rst docs/community.aws.api_gateway_module.rst docs/community.aws.application_autoscaling_policy_module.rst docs/community.aws.config_aggregation_authorization_module.rst docs/community.aws.config_aggregator_module.rst docs/community.aws.config_delivery_channel_module.rst docs/community.aws.config_recorder_module.rst docs/community.aws.config_rule_module.rst docs/community.aws.directconnect_confirm_connection_module.rst docs/community.aws.directconnect_connection_module.rst docs/community.aws.directconnect_gateway_module.rst docs/community.aws.directconnect_link_aggregation_group_module.rst docs/community.aws.directconnect_virtual_interface_module.rst docs/community.aws.elasticbeanstalk_app_module.rst docs/community.aws.msk_cluster_module.rst docs/community.aws.msk_config_module.rst docs/community.aws.storagegateway_info_module.rst plugins/modules/api_gateway.py plugins/modules/api_gateway_domain.py plugins/modules/config_aggregation_authorization.py plugins/modules/config_aggregator.py plugins/modules/config_delivery_channel.py plugins/modules/config_recorder.py plugins/modules/config_rule.py plugins/modules/directconnect_confirm_connection.py plugins/modules/directconnect_connection.py plugins/modules/directconnect_gateway.py plugins/modules/directconnect_link_aggregation_group.py plugins/modules/directconnect_virtual_interface.py plugins/modules/elasticbeanstalk_app.py plugins/modules/storagegateway_info.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- api_gateway.py | 2 ++ api_gateway_domain.py | 2 ++ config_aggregation_authorization.py | 2 ++ config_aggregator.py | 2 ++ config_delivery_channel.py | 2 ++ config_recorder.py | 2 ++ config_rule.py | 2 ++ directconnect_confirm_connection.py | 2 ++ directconnect_connection.py | 2 ++ directconnect_gateway.py | 2 ++ directconnect_link_aggregation_group.py | 2 ++ directconnect_virtual_interface.py | 2 ++ elasticbeanstalk_app.py | 2 ++ storagegateway_info.py | 2 ++ 14 files changed, 28 insertions(+) diff --git a/api_gateway.py b/api_gateway.py index 4e467bf677b..787a7f4d1ec 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -22,6 +22,8 @@ - swagger_file and swagger_text are passed directly on to AWS transparently whilst swagger_dict is an ansible dict which is converted to JSON before the API definitions are uploaded. + - Prior to release 5.0.0 this module was called C(community.aws.aws_api_gateway). + The usage did not change. options: api_id: description: diff --git a/api_gateway_domain.py b/api_gateway_domain.py index 7352dc69477..f5183ae92af 100644 --- a/api_gateway_domain.py +++ b/api_gateway_domain.py @@ -14,6 +14,8 @@ - Manages API Gateway custom domains for API GW Rest APIs. - AWS API Gateway custom domain setups use CloudFront behind the scenes. So you will get a CloudFront distribution as a result, configured to be aliased with your domain. + - Prior to release 5.0.0 this module was called C(community.aws.aws_api_gateway_domain). + The usage did not change. version_added: '3.3.0' author: - 'Stefan Horning (@stefanhorning)' diff --git a/config_aggregation_authorization.py b/config_aggregation_authorization.py index d2b97fa9a4b..1ebfe0d94d6 100644 --- a/config_aggregation_authorization.py +++ b/config_aggregation_authorization.py @@ -14,6 +14,8 @@ short_description: Manage cross-account AWS Config authorizations description: - Module manages AWS Config aggregation authorizations. + - Prior to release 5.0.0 this module was called C(community.aws.aws_config_aggregation_authorization). + The usage did not change. author: - "Aaron Smith (@slapula)" options: diff --git a/config_aggregator.py b/config_aggregator.py index a78a2cf84d2..7a9bf4836f7 100644 --- a/config_aggregator.py +++ b/config_aggregator.py @@ -14,6 +14,8 @@ short_description: Manage AWS Config aggregations across multiple accounts description: - Module manages AWS Config aggregator resources. + - Prior to release 5.0.0 this module was called C(community.aws.aws_config_aggregator). + The usage did not change. author: - "Aaron Smith (@slapula)" options: diff --git a/config_delivery_channel.py b/config_delivery_channel.py index 4a282335e41..333c796fc71 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -14,6 +14,8 @@ short_description: Manage AWS Config delivery channels description: - This module manages AWS Config delivery locations for rule checks and configuration info. + - Prior to release 5.0.0 this module was called C(community.aws.aws_config_delivery_channel). + The usage did not change. author: - "Aaron Smith (@slapula)" options: diff --git a/config_recorder.py b/config_recorder.py index 6fda4a6df8a..65a8c9d1f9f 100644 --- a/config_recorder.py +++ b/config_recorder.py @@ -14,6 +14,8 @@ short_description: Manage AWS Config Recorders description: - Module manages AWS Config configuration recorder settings. + - Prior to release 5.0.0 this module was called C(community.aws.aws_config_recorder). + The usage did not change. author: - "Aaron Smith (@slapula)" options: diff --git a/config_rule.py b/config_rule.py index ef4e9fab392..36654c735c4 100644 --- a/config_rule.py +++ b/config_rule.py @@ -14,6 +14,8 @@ short_description: Manage AWS Config rule resources description: - Module manages AWS Config rules. + - Prior to release 5.0.0 this module was called C(community.aws.aws_config_rule). + The usage did not change. author: - "Aaron Smith (@slapula)" options: diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py index 57934f3a332..531683af2e4 100644 --- a/directconnect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -17,6 +17,8 @@ - Confirms the creation of a hosted DirectConnect, which requires approval before it can be used. - DirectConnect connections that require approval would be in the C(ordering). - After confirmation, they will move to the C(pending) state and finally the C(available) state. + - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_confirm_connection). + The usage did not change. author: - "Matt Traynham (@mtraynham)" extends_documentation_fragment: diff --git a/directconnect_connection.py b/directconnect_connection.py index 20ab393fd75..da2a06b9227 100644 --- a/directconnect_connection.py +++ b/directconnect_connection.py @@ -15,6 +15,8 @@ - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location. - Upon creation the connection may be added to a link aggregation group or established as a standalone connection. - The connection may later be associated or disassociated with a link aggregation group. + - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_connection). + The usage did not change. author: - "Sloane Hertel (@s-hertel)" extends_documentation_fragment: diff --git a/directconnect_gateway.py b/directconnect_gateway.py index bdf388675f6..987419d79c9 100644 --- a/directconnect_gateway.py +++ b/directconnect_gateway.py @@ -17,6 +17,8 @@ - Deletes AWS Direct Connect Gateway. - Attaches Virtual Gateways to Direct Connect Gateway. - Detaches Virtual Gateways to Direct Connect Gateway. + - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_gateway). + The usage did not change. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index 77ff74de6d5..f340ef3fcfb 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -13,6 +13,8 @@ short_description: Manage Direct Connect LAG bundles description: - Create, delete, or modify a Direct Connect link aggregation group. + - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_link_aggregation_group). + The usage did not change. author: - "Sloane Hertel (@s-hertel)" extends_documentation_fragment: diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py index 8cd5fd70f55..4f96459e179 100644 --- a/directconnect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -13,6 +13,8 @@ short_description: Manage Direct Connect virtual interfaces description: - Create, delete, or modify a Direct Connect public or private virtual interface. + - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_virtual_interface). + The usage did not change. author: - "Sloane Hertel (@s-hertel)" options: diff --git a/elasticbeanstalk_app.py b/elasticbeanstalk_app.py index 6eee7b13606..de18681a6d0 100644 --- a/elasticbeanstalk_app.py +++ b/elasticbeanstalk_app.py @@ -15,6 +15,8 @@ description: - Creates, updates, deletes Elastic Beanstalk applications if I(app_name) is provided. + - Prior to release 5.0.0 this module was called C(community.aws.aws_elasticbeanstalk_app). + The usage did not change. options: app_name: diff --git a/storagegateway_info.py b/storagegateway_info.py index a765300e354..9209dc401b4 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -15,6 +15,8 @@ short_description: Fetch AWS Storage Gateway information description: - Fetch AWS Storage Gateway information + - Prior to release 5.0.0 this module was called C(community.aws.aws_sgw_info). + The usage did not change. author: - Loic Blot (@nerzhul) options: From 226fa099774658ba6abb1ca43bf75aa75580edec Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 7 Jul 2022 00:21:17 -0700 Subject: [PATCH 540/683] route53_health_check: Add feature to create multiple health checks without updating existing health check (#1143) * Add health_check_name / name parameter support to naming health checks during creation * Handle create/update when health_check when name is unique identifier * Fix bug: tags not specified but name is specified resulting in python error * Add integration tests for as unique identifier * code cleanup to remove redundant usage of manage_tags * Feat: ability to Update and Delete health checks by ID * Add integration tests for testing update/delete health check by ID * Minor fix, improve login to handle find_health_check() call * Use required_together param * Added changelogs fragment * use fqdn for examples, fix changelog variable names --- route53_health_check.py | 175 +++++++++++++++++++++++++++++++++++----- 1 file changed, 157 insertions(+), 18 deletions(-) diff --git a/route53_health_check.py b/route53_health_check.py index 5b7cce3c147..83283ecf646 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -44,7 +44,7 @@ description: - The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy. - required: true + - Once health_check is created, type can not be changed. choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] type: str resource_path: @@ -86,6 +86,28 @@ - Will default to C(3) if not specified on creation. choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] type: int + health_check_name: + description: + - Name of the Health Check. + - Used together with I(use_unique_names) to set/make use of I(health_check_name) as a unique identifier. + type: str + required: False + aliases: ['name'] + version_added: 4.1.0 + use_unique_names: + description: + - Used together with I(health_check_name) to set/make use of I(health_check_name) as a unique identifier. + type: bool + required: False + version_added: 4.1.0 + health_check_id: + description: + - ID of the health check to be update or deleted. + - If provided, a health check can be updated or deleted based on the ID as unique identifier. + type: str + required: False + aliases: ['id'] + version_added: 4.1.0 author: - "zimbatm (@zimbatm)" notes: @@ -120,10 +142,35 @@ weight: 100 health_check: "{{ my_health_check.health_check.id }}" +- name: create a simple health check with health_check_name as unique identifier + community.aws.route53_health_check: + state: present + health_check_name: ansible + fqdn: ansible.com + port: 443 + type: HTTPS + use_unique_names: true + - name: Delete health-check community.aws.route53_health_check: state: absent fqdn: host1.example.com + +- name: Update Health check by ID - update ip_address + community.aws.route53_health_check: + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ip_address: 1.2.3.4 + +- name: Update Health check by ID - update port + community.aws.route53_health_check: + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ip_address: 8080 + +- name: Delete Health check by ID + community.aws.route53_health_check: + state: absent + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ''' RETURN = r''' @@ -249,7 +296,6 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): # Additionally, we can't properly wrap the paginator, so retrying means # starting from scratch with a paginator results = _list_health_checks() - while True: for check in results.get('HealthChecks'): config = check.get('HealthCheckConfig') @@ -268,6 +314,20 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): return None +def get_existing_checks_with_name(): + results = _list_health_checks() + health_checks_with_name = {} + while True: + for check in results.get('HealthChecks'): + if 'Name' in describe_health_check(check['Id'])['tags']: + check_name = describe_health_check(check['Id'])['tags']['Name'] + health_checks_with_name[check_name] = check + if results.get('IsTruncated', False): + results = _list_health_checks(Marker=results.get('NextMarker')) + else: + return health_checks_with_name + + def delete_health_check(check_id): if not check_id: return False, None @@ -348,10 +408,14 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ def update_health_check(existing_check): - # In theory it's also possible to update the IPAddress, Port and - # FullyQualifiedDomainName, however, because we use these in lieu of a - # 'Name' to uniquely identify the health check this isn't currently - # supported. If we accepted an ID it would be possible to modify them. + # It's possible to update following parameters + # - ResourcePath + # - SearchString + # - FailureThreshold + # - Disabled + # - IPAddress + # - Port + # - FullyQualifiedDomainName changes = dict() existing_config = existing_check.get('HealthCheckConfig') @@ -372,10 +436,23 @@ def update_health_check(existing_check): if disabled is not None and disabled != existing_config.get('Disabled'): changes['Disabled'] = module.params.get('disabled') + # If updating based on Health Check ID or health_check_name, we can update + if module.params.get('health_check_id') or module.params.get('use_unique_names'): + ip_address = module.params.get('ip_address', None) + if ip_address is not None and ip_address != existing_config.get('IPAddress'): + changes['IPAddress'] = module.params.get('ip_address') + + port = module.params.get('port', None) + if port is not None and port != existing_config.get('Port'): + changes['Port'] = module.params.get('port') + + fqdn = module.params.get('fqdn', None) + if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'): + changes['FullyQualifiedDomainName'] = module.params.get('fqdn') + # No changes... if not changes: return False, None - if module.check_mode: return True, 'update' @@ -419,7 +496,7 @@ def main(): disabled=dict(type='bool'), ip_address=dict(), port=dict(type='int'), - type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), resource_path=dict(), fqdn=dict(), string_match=dict(), @@ -427,16 +504,27 @@ def main(): failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool'), + health_check_id=dict(type='str', aliases=['id'], required=False), + health_check_name=dict(type='str', aliases=['name'], required=False), + use_unique_names=dict(type='bool', required=False), ) args_one_of = [ - ['ip_address', 'fqdn'], + ['ip_address', 'fqdn', 'health_check_id'], ] args_if = [ ['type', 'TCP', ('port',)], ] + args_required_together = [ + ['use_unique_names', 'health_check_name'], + ] + + args_mutually_exclusive = [ + ['health_check_id', 'health_check_name'] + ] + global module global client @@ -444,6 +532,8 @@ def main(): argument_spec=argument_spec, required_one_of=args_one_of, required_if=args_if, + required_together=args_required_together, + mutually_exclusive=args_mutually_exclusive, supports_check_mode=True, ) @@ -455,6 +545,9 @@ def main(): version='5.0.0', collection_name='community.aws') module.params['purge_tags'] = False + if not module.params.get('health_check_id') and not module.params.get('type'): + module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") + state_in = module.params.get('state') ip_addr_in = module.params.get('ip_address') port_in = module.params.get('port') @@ -464,6 +557,8 @@ def main(): string_match_in = module.params.get('string_match') request_interval_in = module.params.get('request_interval') failure_threshold_in = module.params.get('failure_threshold') + health_check_name = module.params.get('health_check_name') + tags = module.params.get('tags') # Default port if port_in is None: @@ -484,22 +579,66 @@ def main(): action = None check_id = None - existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - - if existing_check: - check_id = existing_check.get('Id') - + if module.params.get('use_unique_names') or module.params.get('health_check_id'): + module.deprecate( + 'The health_check_name is currently non required parameter.' + ' This behavior will change and health_check_name ' + ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.', + version='6.0.0', collection_name='community.aws') + + # If update or delete Health Check based on ID + update_delete_by_id = False + if module.params.get('health_check_id'): + update_delete_by_id = True + id_to_update_delete = module.params.get('health_check_id') + try: + existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete)) + else: + existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + if existing_check: + check_id = existing_check.get('Id') + + # Delete Health Check if state_in == 'absent': - changed, action = delete_health_check(check_id) + if update_delete_by_id: + changed, action = delete_health_check(id_to_update_delete) + else: + changed, action = delete_health_check(check_id) check_id = None + + # Create Health Check elif state_in == 'present': - if existing_check is None: + if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id: changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + + # Update Health Check else: - changed, action = update_health_check(existing_check) + # If health_check_name is a unique identifier + if module.params.get('use_unique_names'): + existing_checks_with_name = get_existing_checks_with_name() + # update the health_check if another health check with same name exists + if health_check_name in existing_checks_with_name: + changed, action = update_health_check(existing_checks_with_name[health_check_name]) + else: + # create a new health_check if another health check with same name does not exists + changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + # Add tag to add name to health check + if check_id: + if not tags: + tags = {} + tags['Name'] = health_check_name + + else: + if update_delete_by_id: + changed, action = update_health_check(existing_check) + else: + changed, action = update_health_check(existing_check) + if check_id: changed |= manage_tags(module, client, 'healthcheck', check_id, - module.params.get('tags'), module.params.get('purge_tags')) + tags, module.params.get('purge_tags')) health_check = describe_health_check(id=check_id) health_check['action'] = action From 09e5dc783d9789e685927ca50a7a7e61111da37a Mon Sep 17 00:00:00 2001 From: "Saleh A. Saber" Date: Thu, 7 Jul 2022 16:36:07 +0100 Subject: [PATCH 541/683] Add autoscaling_complete_lifecycle_action module (#479) Add autoscaling_complete_lifecycle_action module SUMMARY I am adding a new Ansible AWS Module which provides the ability to complete a lifecycle action for an AWS Auto Scaling Group. It basically implements the CompleteLifecycleAction AWS API. ISSUE TYPE New Module Pull Request COMPONENT NAME aws_asg_complete_lifecycle_action.py Reviewed-by: Mark Chappell --- autoscaling_complete_lifecycle_action.py | 101 +++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 autoscaling_complete_lifecycle_action.py diff --git a/autoscaling_complete_lifecycle_action.py b/autoscaling_complete_lifecycle_action.py new file mode 100644 index 00000000000..63fa3b63ef8 --- /dev/null +++ b/autoscaling_complete_lifecycle_action.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: autoscaling_complete_lifecycle_action +short_description: Completes the lifecycle action of an instance +description: + - Used to complete the lifecycle action for the specified instance with the specified result. +version_added: "4.1.0" +author: + - Saleh Abbas (@salehabbas) +options: + asg_name: + description: + - The name of the Auto Scaling Group which the instance belongs to. + type: str + required: true + lifecycle_hook_name: + description: + - The name of the lifecycle hook to complete. + type: str + required: true + lifecycle_action_result: + description: + - The action for the lifecycle hook to take. + choices: ['CONTINUE', 'ABANDON'] + type: str + required: true + instance_id: + description: + - The ID of the instance. + type: str + required: true +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Complete the lifecycle action +- aws_asg_complete_lifecycle_action: + asg_name: my-auto-scaling-group + lifecycle_hook_name: my-lifecycle-hook + lifecycle_action_result: CONTINUE + instance_id: i-123knm1l2312 +''' + +RETURN = ''' +--- +status: + description: How things went + returned: success + type: str + sample: ["OK"] +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + + +def main(): + argument_spec = dict( + asg_name=dict(required=True, type='str'), + lifecycle_hook_name=dict(required=True, type='str'), + lifecycle_action_result=dict(required=True, type='str', choices=['CONTINUE', 'ABANDON']), + instance_id=dict(required=True, type='str') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + asg_name = module.params.get('asg_name') + lifecycle_hook_name = module.params.get('lifecycle_hook_name') + lifecycle_action_result = module.params.get('lifecycle_action_result') + instance_id = module.params.get('instance_id') + + autoscaling = module.client('autoscaling') + try: + results = autoscaling.complete_lifecycle_action( + LifecycleHookName=lifecycle_hook_name, + AutoScalingGroupName=asg_name, + LifecycleActionResult=lifecycle_action_result, + InstanceId=instance_id + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to completes the lifecycle action") + + module.exit_json(results=results) + + +if __name__ == '__main__': + main() From 79dad520ac919bc957706f79ada6d46c2645d8ac Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 7 Jul 2022 13:43:55 -0700 Subject: [PATCH 542/683] route53_info: Add snake_cased return key,values to remaining methods (#1322) route53_info: Add snake_cased return key,values to remaining methods SUMMARY Following up on #1236 Found more places where route53_info module does not return a snake_case output. Added snake_case output to checker_ip_range_details , reusable_delegation_set_details, and get_health_check methods. ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_info Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell --- route53_info.py | 123 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) diff --git a/route53_info.py b/route53_info.py index 4e90556a1ec..68b0bb54b73 100644 --- a/route53_info.py +++ b/route53_info.py @@ -360,6 +360,88 @@ type: str sample: HTTPS version_added: 4.0.0 +checker_ip_ranges: + description: A list of IP ranges in CIDR format for Amazon Route 53 health checkers. + returned: when I(query=checker_ip_range) + type: list + elements: str + version_added: 4.1.0 +delegation_sets: + description: A list of dicts that contains information about the reusable delegation set. + returned: when I(query=reusable_delegation_set) + type: list + elements: dict + version_added: 4.1.0 +health_check: + description: A dict of Route53 health check details returned by get_health_check_status in boto3. + type: dict + returned: when I(query=health_check) and I(health_check_method=details) + contains: + id: + description: The identifier that Amazon Route53 assigned to the health check at the time of creation. + type: str + sample: '12345cdc-2cc4-1234-bed2-123456abc1a2' + health_check_version: + description: The version of the health check. + type: str + sample: 1 + caller_reference: + description: A unique string that you specified when you created the health check. + type: str + sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' + health_check_config: + description: A dict that contains detailed information about one health check. + type: dict + contains: + disabled: + description: Whether Route53 should stop performing health checks on a endpoint. + type: bool + sample: false + enable_sni: + description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation. + type: bool + sample: true + failure_threshold: + description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint. + type: int + sample: 3 + fully_qualified_domain_name: + description: The fully qualified DNS name of the endpoint on which Route53 performs health checks. + type: str + sample: 'hello' + inverted: + description: Whether Route53 should invert the status of a health check. + type: bool + sample: false + ip_address: + description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on. + type: str + sample: 192.0.2.44 + measure_latency: + description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint. + type: bool + sample: false + port: + description: The port of the endpoint that Route53 should perform health checks on. + type: int + sample: 80 + request_interval: + description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request. + type: int + sample: 30 + resource_path: + description: The path that Route53 requests when performing health checks. + type: str + sample: '/welcome.html' + search_string: + description: The string that Route53 uses to search for in the response body from specified resource. + type: str + sample: 'test-string-to-match' + type: + description: The type of the health check. + type: str + sample: HTTPS + version_added: 4.1.0 ResourceRecordSets: description: A deprecated CamelCased list of resource record sets returned by list_resource_record_sets in boto3. \ This list contains same elements/parameters as it's snake_cased version mentioned above. \ @@ -381,6 +463,26 @@ type: list elements: dict returned: when I(query=health_check) +CheckerIpRanges: + description: A deprecated CamelCased list of IP ranges in CIDR format for Amazon Route 53 health checkers.\ + This list contains same elements/parameters as it's snake_cased version mentioned abobe. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: list + elements: str + returned: when I(query=checker_ip_range) +DelegationSets: + description: A deprecated CamelCased list of dicts that contains information about the reusable delegation set. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: list + elements: dict + returned: when I(query=reusable_delegation_set) +HealthCheck: + description: A deprecated CamelCased dict of Route53 health check details returned by get_health_check_status in boto3. \ + This dict contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: dict + returned: when I(query=health_check) and I(health_check_method=details) ''' try: @@ -431,6 +533,12 @@ def reusable_delegation_set_details(): params['DelegationSetId'] = module.params.get('delegation_set_id') results = client.get_reusable_delegation_set(**params) + results['delegation_sets'] = results['DelegationSets'] + module.deprecate("The 'CamelCase' return values with key 'DelegationSets' is deprecated and \ + will be replaced by 'snake_case' return values with key 'delegation_sets'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='community.aws') + return results @@ -495,7 +603,14 @@ def change_details(): def checker_ip_range_details(): - return client.get_checker_ip_ranges() + results = client.get_checker_ip_ranges() + results['checker_ip_ranges'] = results['CheckerIpRanges'] + module.deprecate("The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \ + will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='community.aws') + + return results def get_count(): @@ -522,6 +637,12 @@ def get_health_check(): elif module.params.get('health_check_method') == 'status': results = client.get_health_check_status(**params) + results['health_check'] = camel_dict_to_snake_dict(results['HealthCheck']) + module.deprecate("The 'CamelCase' return values with key 'HealthCheck' is deprecated and \ + will be replaced by 'snake_case' return values with key 'health_check'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='community.aws') + return results From d02cb39a10141496a41b409f979b6b2bbcd6691f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 9 Jul 2022 22:13:36 +0200 Subject: [PATCH 543/683] s3_sync - Improve error handling when testing for existing files (#1330) s3_sync - Improve error handling when testing for existing files SUMMARY fixes: #58 Simplifies handling of '404' codes (use is_boto3_error_code) Assume 403 files need updating (it's the best we can do, and mimics aws cli) Allows Boto3 exceptions to fall through to the outer try/except clause and cleanly fail rather than rethrowing it as an Exception() ISSUE TYPE Feature Pull Request COMPONENT NAME s3_sync ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso --- s3_sync.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/s3_sync.py b/s3_sync.py index f0135f6c13a..aeb46b3fb55 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -265,6 +265,7 @@ # import module snippets from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code # the following function, calculate_multipart_etag, is from tlastowka @@ -427,17 +428,12 @@ def head_s3(s3, bucket, s3keys): retkeys = [] for entry in s3keys: retentry = entry.copy() - # don't modify the input dict try: retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path']) - except botocore.exceptions.ClientError as err: - if (hasattr(err, 'response') and - 'ResponseMetadata' in err.response and - 'HTTPStatusCode' in err.response['ResponseMetadata'] and - str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'): - pass - else: - raise Exception(err) + # 404 (Missing) - File doesn't exist, we'll need to upload + # 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload + except is_boto3_error_code(['404', '403']): + pass retkeys.append(retentry) return retkeys From fde8b4a87bda6fc63814f00cf06d20a985086baa Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 9 Jul 2022 22:14:53 +0200 Subject: [PATCH 544/683] ec2_vpc_vgw_info - update to not throw an error when run in check_mode (#1331) ec2_vpc_vgw_info - update to not throw an error when run in check_mode SUMMARY fixes: #137 ec2_vpc_vgw_info currently throws an error when run in check_mode (it's using "DryRun") this unexpected and undocumented behaviour, and is the same practical effect as not supporting check_mode at all. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/ec2_vpc_vgw_info.py ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso --- ec2_vpc_vgw_info.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index a84b07bf589..5ddb04d2ae3 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -12,7 +12,7 @@ version_added: 1.0.0 short_description: Gather information about virtual gateways in AWS description: - - Gather information about virtual gateways in AWS. + - Gather information about virtual gateways (VGWs) in AWS. options: filters: description: @@ -21,7 +21,7 @@ type: dict vpn_gateway_ids: description: - - Get details of a specific Virtual Gateway ID. This value should be provided as a list. + - Get details of a specific Virtual Gateway ID. type: list elements: str author: @@ -151,7 +151,6 @@ def list_virtual_gateways(client, module): params = dict() params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['DryRun'] = module.check_mode if module.params.get("vpn_gateway_ids"): params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids") @@ -168,7 +167,7 @@ def list_virtual_gateways(client, module): def main(): argument_spec = dict( filters=dict(type='dict', default=dict()), - vpn_gateway_ids=dict(type='list', default=None, elements='str') + vpn_gateway_ids=dict(type='list', default=None, elements='str'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) From f504b2a45f6fe88ad0946b86916c403e039c8a28 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 9 Jul 2022 22:31:47 +0200 Subject: [PATCH 545/683] kms_key_info - improve AccessDeniedException handing (#1332) kms_key_info - improve AccessDeniedException handing SUMMARY fixes: #206 Because KMS doesn't support server-side filtering of keys we have to pull full metadata for all KMS keys unless querying a specific key. This can result in additional permission denied errors, even though we may have permissions to read many of the keys. Try to handle AccessDeniedException more liberally. ISSUE TYPE Bugfix Pull Request COMPONENT NAME kms_key_info ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso --- kms_key_info.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/kms_key_info.py b/kms_key_info.py index b9ecf80fcc3..1ba01e50d61 100644 --- a/kms_key_info.py +++ b/kms_key_info.py @@ -435,13 +435,19 @@ def get_key_details(connection, module, key_id, tokens=None): key_id = result['Arn'] except is_boto3_error_code('NotFoundException'): return None + except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except + module.warn('Permission denied fetching key metadata ({0})'.format(key_id)) + return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key metadata") result['KeyArn'] = result.pop('Arn') try: aliases = get_kms_aliases_lookup(connection) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except is_boto3_error_code('AccessDeniedException'): + module.warn('Permission denied fetching key aliases') + aliases = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain aliases") # We can only get aliases for our own account, so we don't need the full ARN result['aliases'] = aliases.get(result['KeyId'], []) @@ -452,8 +458,12 @@ def get_key_details(connection, module, key_id, tokens=None): try: result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except is_boto3_error_code('AccessDeniedException'): + module.warn('Permission denied fetching key grants ({0})'.format(key_id)) + result['grants'] = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key grants") + tags = get_kms_tags(connection, module, key_id) result = camel_dict_to_snake_dict(result) From e6b9eb52d8eb742c2ff2dd9d91d0eef16b59c946 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 9 Jul 2022 22:32:03 +0200 Subject: [PATCH 546/683] Update s3_sync doc (#1329) Update s3_sync doc SUMMARY fixes: #1229 The key returned by the module is uploads not uploaded. ISSUE TYPE Docs Pull Request COMPONENT NAME s3_sync ADDITIONAL INFORMATION Docs don't match the actual returned value. Reviewed-by: Joseph Torcasso --- s3_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3_sync.py b/s3_sync.py index aeb46b3fb55..2bedaa70da5 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -225,7 +225,7 @@ "whysize": "151 / 151", "whytime": "1477931256 / 1477929260" }] -uploaded: +uploads: description: file listing (dicts) of files that were actually uploaded returned: always type: list From 50e525b24afc02369229de81bc1827d0915514a3 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 9 Jul 2022 22:33:53 +0200 Subject: [PATCH 547/683] Minor fixups (#1328) ecs_service - Minor fixups SUMMARY Bad rebase resulted in a duplicated parameter Fixup "changed" after adding initial support for tags ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION Bugs aren't in a released version yet, no need for changelog Reviewed-by: Joseph Torcasso --- ecs_service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 800d670be21..95fa43b52b4 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -730,7 +730,7 @@ def is_matching_service(self, expected, existing): # but the user is just entering # ansible-fargate-nginx:3 if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: - if existing['deploymentController']['type'] != 'CODE_DEPLOY': + if existing.get('deploymentController', {}).get('type', None) != 'CODE_DEPLOY': return False if expected.get('health_check_grace_period_seconds'): @@ -740,10 +740,10 @@ def is_matching_service(self, expected, existing): if (expected['load_balancers'] or []) != existing['loadBalancers']: return False - if expected['propagate_tags'] != existing['propagateTags']: + if (expected['propagate_tags'] or "NONE") != existing['propagateTags']: return False - if boto3_tag_list_to_ansible_dict(existing['tags']) != expected['tags']: + if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}): return False # expected is params. DAEMON scheduling strategy returns desired count equal to @@ -1049,7 +1049,6 @@ def main(): network_configuration, serviceRegistries, module.params['launch_type'], - module.params['scheduling_strategy'], module.params['platform_version'], module.params['scheduling_strategy'], capacityProviders, From 03d8b6c5297831ef53e5bf20d71b0a54d3c6ee23 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 10 Jul 2022 09:30:07 +0200 Subject: [PATCH 548/683] secretsmanager_secret - Support adding JSON (#1333) secretsmanager_secret - Support adding JSON SUMMARY fixes: #656 Amazon supports passing JSON in as the secret as a mechanism for storing and retreiving more complex structures. While in theory it's possible to pass JSON in as a string to secretsmanager_secret. However, because Ansible often does funky things with when templated strings are passed to a parameter (#656) it's non-trivial to pass JSON into secretsmanager_secret. ISSUE TYPE Feature Pull Request COMPONENT NAME secretsmanager_secret ADDITIONAL INFORMATION Backstory: If Ansible sees {{ }} within a string it'll trigger the safe_eval handlers, automatically converting the JSON into a complex structure of lists/dicts, which is then converted to the python string representation of the complex structures - the python string representation is not valid JSON and breaks the AWS integration. Reviewed-by: Joseph Torcasso --- secretsmanager_secret.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index 2ce3351077b..f35f28762f3 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -54,8 +54,16 @@ secret: description: - Specifies string or binary data that you want to encrypt and store in the new version of the secret. + - Mutually exclusive with the I(json_secret) option. default: "" type: str + json_secret: + description: + - Specifies JSON-formatted data that you want to encrypt and store in the new version of the + secret. + - Mutually exclusive with the I(secret) option. + type: json + version_added: 4.1.0 resource_policy: description: - Specifies JSON-formatted resource policy to attach to the secret. Useful when granting cross-account access @@ -423,6 +431,7 @@ def main(): 'kms_key_id': dict(), 'secret_type': dict(choices=['binary', 'string'], default="string"), 'secret': dict(default="", no_log=True), + 'json_secret': dict(type='json', no_log=True), 'resource_policy': dict(type='json', default=None), 'tags': dict(type='dict', default=None, aliases=['resource_tags']), 'purge_tags': dict(type='bool', default=True), @@ -430,6 +439,7 @@ def main(): 'rotation_interval': dict(type='int', default=30), 'recovery_window': dict(type='int', default=30), }, + mutually_exclusive=[['secret', 'json_secret']], supports_check_mode=True, ) @@ -440,7 +450,7 @@ def main(): secret = Secret( module.params.get('name'), module.params.get('secret_type'), - module.params.get('secret'), + module.params.get('secret') or module.params.get('json_secret'), description=module.params.get('description'), kms_key_id=module.params.get('kms_key_id'), resource_policy=module.params.get('resource_policy'), From 3905518d301e40ba35999ad4c153bb5831c0cf9f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 11 Jul 2022 13:52:40 +0200 Subject: [PATCH 549/683] kms_key - Finish deprecation of policy_grant_types and related keys (#1344) kms_key - Finish deprecation of policy_grant_types and related keys SUMMARY Managing the KMS IAM Policy via policy_mode and policy_grant_types was fragile and previously deprecated. Complete the deprecation and remove the options in in favour of the policy option. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/kms_key.py ADDITIONAL INFORMATION Original deprecation: ansible/ansible#60561 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- kms_key.py | 212 ----------------------------------------------------- 1 file changed, 212 deletions(-) diff --git a/kms_key.py b/kms_key.py index 53d691888b6..39ffc9109f9 100644 --- a/kms_key.py +++ b/kms_key.py @@ -43,62 +43,6 @@ - Whether the key should be automatically rotated every year. required: false type: bool - policy_mode: - description: - - (deprecated) Grant or deny access. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. - default: grant - choices: [ grant, deny ] - aliases: - - mode - type: str - policy_role_name: - description: - - (deprecated) Role to allow/deny access. - - One of I(policy_role_name) or I(policy_role_arn) are required. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. - required: false - aliases: - - role_name - type: str - policy_role_arn: - description: - - (deprecated) ARN of role to allow/deny access. - - One of I(policy_role_name) or I(policy_role_arn) are required. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. - type: str - required: false - aliases: - - role_arn - policy_grant_types: - description: - - (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin". - - Required when I(policy_mode=grant). - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. - required: false - aliases: - - grant_types - type: list - elements: str - policy_clean_invalid_entries: - description: - - (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases. - - Only cleans if changes are being made. - - Used for modifying the Key Policy rather than modifying a grant and only - works on the default policy created through the AWS Console. - - This option has been deprecated, and will be removed in a release after 2021-12-01. Use I(policy) instead. - type: bool - default: true - aliases: - - clean_invalid_entries state: description: - Whether a key should be present or absent. @@ -205,22 +149,6 @@ ''' EXAMPLES = r''' -# Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile -# and has been deprecated in favour of the policy option. -- name: grant user-style access to production secrets - community.aws.kms_key: - args: - alias: "alias/my_production_secrets" - policy_mode: grant - policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" - policy_grant_types: "role,role grant" -- name: remove access to production secrets from role - community.aws.kms_key: - args: - alias: "alias/my_production_secrets" - policy_mode: deny - policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L" - # Create a new KMS key - community.aws.kms_key: alias: mykey @@ -488,15 +416,12 @@ } import json -import re try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils.six import string_types - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @@ -987,106 +912,6 @@ def get_arn_from_role_name(iam, rolename): raise Exception('could not find arn for name {0}.'.format(rolename)) -def _clean_statement_principals(statement, clean_invalid_entries): - - # create Principal and 'AWS' so we can safely use them later. - if not isinstance(statement.get('Principal'), dict): - statement['Principal'] = dict() - - # If we have a single AWS Principal, ensure we still have a list (to manipulate) - if 'AWS' in statement['Principal'] and isinstance(statement['Principal']['AWS'], string_types): - statement['Principal']['AWS'] = [statement['Principal']['AWS']] - if not isinstance(statement['Principal'].get('AWS'), list): - statement['Principal']['AWS'] = list() - - valid_princ = re.compile('^arn:aws:(iam|sts)::') - - invalid_entries = [item for item in statement['Principal']['AWS'] if not valid_princ.match(item)] - valid_entries = [item for item in statement['Principal']['AWS'] if valid_princ.match(item)] - - if bool(invalid_entries) and clean_invalid_entries: - statement['Principal']['AWS'] = valid_entries - return True - - return False - - -def _do_statement_grant(statement, role_arn, grant_types, mode, grant_type): - - if mode == 'grant': - if grant_type in grant_types: - if role_arn not in statement['Principal']['AWS']: # needs to be added. - statement['Principal']['AWS'].append(role_arn) - return 'add' - elif role_arn in statement['Principal']['AWS']: # not one the places the role should be - statement['Principal']['AWS'].remove(role_arn) - return 'remove' - return None - - if mode == 'deny' and role_arn in statement['Principal']['AWS']: - # we don't selectively deny. that's a grant with a - # smaller list. so deny=remove all of this arn. - statement['Principal']['AWS'].remove(role_arn) - return 'remove' - return None - - -def do_policy_grant(module, kms, keyarn, role_arn, grant_types, mode='grant', dry_run=True, clean_invalid_entries=True): - ret = {} - policy = json.loads(get_key_policy_with_backoff(kms, keyarn, 'default')['Policy']) - - changes_needed = {} - assert_policy_shape(module, policy) - had_invalid_entries = False - for statement in policy['Statement']: - # We already tested that these are the only types in the statements - for grant_type in statement_label: - # Are we on this grant type's statement? - if statement['Sid'] != statement_label[grant_type]: - continue - - had_invalid_entries |= _clean_statement_principals(statement, clean_invalid_entries) - change = _do_statement_grant(statement, role_arn, grant_types, mode, grant_type) - if change: - changes_needed[grant_type] = change - - ret['changes_needed'] = changes_needed - ret['had_invalid_entries'] = had_invalid_entries - ret['new_policy'] = policy - ret['changed'] = bool(changes_needed) - - if dry_run or not ret['changed']: - return ret - - try: - policy_json_string = json.dumps(policy) - kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update key_policy', new_policy=policy_json_string) - - return ret - - -def assert_policy_shape(module, policy): - '''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.''' - errors = [] - if policy['Version'] != "2012-10-17": - errors.append('Unknown version/date ({0}) of policy. Things are probably different than we assumed they were.'.format(policy['Version'])) - - found_statement_type = {} - for statement in policy['Statement']: - for label, sidlabel in statement_label.items(): - if statement['Sid'] == sidlabel: - found_statement_type[label] = True - - for statementtype in statement_label: - if not found_statement_type.get(statementtype): - errors.append('Policy is missing {0}.'.format(statementtype)) - - if errors: - module.fail_json(msg='Problems asserting policy shape. Cowardly refusing to modify it', errors=errors, policy=policy) - - def canonicalize_alias_name(alias): if alias is None: return None @@ -1117,38 +942,9 @@ def fetch_key_metadata(connection, module, key_id, alias): module.fail_json_aws(e, 'Failed to fetch key metadata.') -def update_policy_grants(connection, module, key_metadata, mode): - iam = module.client('iam') - key_id = key_metadata['Arn'] - - if module.params.get('policy_role_name') and not module.params.get('policy_role_arn'): - module.params['policy_role_arn'] = get_arn_from_role_name(iam, module.params['policy_role_name']) - if not module.params.get('policy_role_arn'): - module.fail_json(msg='policy_role_arn or policy_role_name is required to {0}'.format(module.params['policy_mode'])) - - # check the grant types for 'grant' only. - if mode == 'grant': - for grant_type in module.params['policy_grant_types']: - if grant_type not in statement_label: - module.fail_json(msg='{0} is an unknown grant type.'.format(grant_type)) - - return do_policy_grant(module, connection, - key_id, - module.params['policy_role_arn'], - module.params['policy_grant_types'], - mode=mode, - dry_run=module.check_mode, - clean_invalid_entries=module.params['policy_clean_invalid_entries']) - - def main(): argument_spec = dict( alias=dict(aliases=['key_alias']), - policy_mode=dict(aliases=['mode'], choices=['grant', 'deny'], default='grant'), - policy_role_name=dict(aliases=['role_name']), - policy_role_arn=dict(aliases=['role_arn']), - policy_grant_types=dict(aliases=['grant_types'], type='list', elements='str'), - policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True), pending_window=dict(aliases=['deletion_delay'], type='int'), key_id=dict(aliases=['key_arn']), description=dict(), @@ -1171,8 +967,6 @@ def main(): required_one_of=[['alias', 'key_id']], ) - mode = module.params['policy_mode'] - kms = module.client('kms') if module.params.get('purge_tags') is None: @@ -1191,12 +985,6 @@ def main(): if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata: module.fail_json(msg="Could not find key with id {0} to update".format(module.params.get('key_id'))) - if module.params.get('policy_grant_types') or mode == 'deny': - module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile' - ' and has been deprecated in favour of the policy option.', date='2021-12-01', collection_name='community.aws') - result = update_policy_grants(kms, module, key_metadata, mode) - module.exit_json(**result) - if module.params.get('state') == 'absent': if key_metadata is None: module.exit_json(changed=False) From 43124cfb51f42d9850455d0bfbc3438055e1f64c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 11 Jul 2022 14:42:21 +0200 Subject: [PATCH 550/683] Update default value of purge_tags to True (#1343) Update default value of purge_tags to True SUMMARY Complete the deprecation cycle and update purge_tags default value to True ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/acm_certificate.py plugins/modules/cloudfront_distribution.py plugins/modules/ec2_vpc_vpn.py plugins/modules/kms_key.py plugins/modules/rds_param_group.py plugins/modules/route53_health_check.py plugins/modules/route53_zone.py plugins/modules/sqs_queue.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- acm_certificate.py | 15 +++++---------- cloudfront_distribution.py | 12 ++---------- ec2_vpc_vpn.py | 12 ++---------- kms_key.py | 15 ++------------- rds_param_group.py | 12 ++---------- route53_health_check.py | 12 ++---------- route53_zone.py | 12 ++---------- sqs_queue.py | 12 ++---------- 8 files changed, 19 insertions(+), 83 deletions(-) diff --git a/acm_certificate.py b/acm_certificate.py index 6b48579d5bc..8264404be7e 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -177,7 +177,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags ''' EXAMPLES = ''' @@ -495,7 +495,7 @@ def main(): name_tag=dict(aliases=['name']), private_key=dict(no_log=True), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), ) module = AnsibleAWSModule( @@ -504,14 +504,6 @@ def main(): ) acm = ACMServiceManager(module) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - # Check argument requirements if module.params['state'] == 'present': # at least one of these should be specified. @@ -532,6 +524,9 @@ def main(): desired_tags = None if module.params.get('tags') is not None: desired_tags = module.params['tags'] + else: + # Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed + module.params['purge_tags'] = False if module.params.get('name_tag') is not None: # The module was originally implemented to filter certificates based on the 'Name' tag. # Other tags are not used to filter certificates. diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index c07435345ea..2b58ac1e888 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -24,7 +24,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags options: @@ -2109,7 +2109,7 @@ def main(): distribution_id=dict(), e_tag=dict(), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), alias=dict(), aliases=dict(type='list', default=[], elements='str'), purge_aliases=dict(type='bool', default=False), @@ -2148,14 +2148,6 @@ def main(): ] ) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) validation_mgr = CloudFrontValidationManager(module) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 8e22b973c2e..9034dbbe713 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -17,7 +17,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags author: - "Sloane Hertel (@s-hertel)" options: @@ -767,7 +767,7 @@ def main(): static_only=dict(default=False, type='bool'), customer_gateway_id=dict(type='str'), vpn_connection_id=dict(type='str'), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), routes=dict(type='list', default=[], elements='str'), purge_routes=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), @@ -777,14 +777,6 @@ def main(): supports_check_mode=True) connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - state = module.params.get('state') parameters = dict(module.params) diff --git a/kms_key.py b/kms_key.py index 39ffc9109f9..2f664bbafff 100644 --- a/kms_key.py +++ b/kms_key.py @@ -139,7 +139,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags notes: - There are known inconsistencies in the amount of time required for updates of KMS keys to be fully reflected on AWS. @@ -734,9 +734,6 @@ def update_tags(connection, module, key, desired_tags, purge_tags): if desired_tags is None: return False - # purge_tags needs to be explicitly set, so an empty tags list means remove - # all tags - to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags) if not (bool(to_add) or bool(to_remove)): return False @@ -950,7 +947,7 @@ def main(): description=dict(), enabled=dict(type='bool', default=True), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), grants=dict(type='list', default=[], elements='dict'), policy=dict(type='json'), purge_grants=dict(type='bool', default=False), @@ -969,14 +966,6 @@ def main(): kms = module.client('kms') - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", date='2024-05-01', collection_name='community.aws') diff --git a/rds_param_group.py b/rds_param_group.py index 1d52ea51817..d1492779996 100644 --- a/rds_param_group.py +++ b/rds_param_group.py @@ -54,7 +54,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags ''' @@ -315,7 +315,7 @@ def main(): params=dict(aliases=['parameters'], type='dict'), immediate=dict(type='bool', aliases=['apply_immediately']), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -323,14 +323,6 @@ def main(): supports_check_mode=True ) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - try: conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/route53_health_check.py b/route53_health_check.py index 83283ecf646..b07672e9ddb 100644 --- a/route53_health_check.py +++ b/route53_health_check.py @@ -115,7 +115,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags ''' EXAMPLES = ''' @@ -503,7 +503,7 @@ def main(): request_interval=dict(type='int', choices=[10, 30], default=30), failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), health_check_id=dict(type='str', aliases=['id'], required=False), health_check_name=dict(type='str', aliases=['name'], required=False), use_unique_names=dict(type='bool', required=False), @@ -537,14 +537,6 @@ def main(): supports_check_mode=True, ) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - if not module.params.get('health_check_id') and not module.params.get('type'): module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") diff --git a/route53_zone.py b/route53_zone.py index 334233b4e44..e5c6f199b8e 100644 --- a/route53_zone.py +++ b/route53_zone.py @@ -49,7 +49,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. author: @@ -437,7 +437,7 @@ def main(): hosted_zone_id=dict(), delegation_set_id=dict(), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), ) mutually_exclusive = [ @@ -451,14 +451,6 @@ def main(): supports_check_mode=True, ) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - zone_in = module.params.get('zone').lower() state = module.params.get('state').lower() vpc_id = module.params.get('vpc_id') diff --git a/sqs_queue.py b/sqs_queue.py index ba6432e93ac..e83735254f4 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -90,7 +90,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - - amazon.aws.tags.deprecated_purge + - amazon.aws.tags ''' RETURN = r''' @@ -474,18 +474,10 @@ def main(): kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), content_based_deduplication=dict(type='bool'), tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool'), + purge_tags=dict(type='bool', default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - if module.params.get('purge_tags') is None: - module.deprecate( - 'The purge_tags parameter currently defaults to False.' - ' For consistency across the collection, this default value' - ' will change to True in release 5.0.0.', - version='5.0.0', collection_name='community.aws') - module.params['purge_tags'] = False - state = module.params.get('state') retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue']) try: From 694c2cd68b76c63ea65922eb4de365f2c9588e05 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 11 Jul 2022 16:42:08 +0200 Subject: [PATCH 551/683] iam_policy (#1345) iam_policy - complete 5.0.0 deprecation cycles SUMMARY skip_duplicates now defaults to False policy_document has been dropped. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/iam_policy.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- iam_policy.py | 60 +++++---------------------------------------------- 1 file changed, 5 insertions(+), 55 deletions(-) diff --git a/iam_policy.py b/iam_policy.py index 8989255d3c1..aae6376312b 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -32,17 +32,9 @@ - The name label for the policy to create or remove. required: true type: str - policy_document: - description: - - The path to the properly json formatted policy file. - - Mutually exclusive with I(policy_json). - - This option has been deprecated and will be removed in a release after 2022-06-01. The existing behavior can be - reproduced by using the I(policy_json) option and reading the file using the lookup plugin. - type: str policy_json: description: - A properly json formatted policy as string. - - Mutually exclusive with I(policy_document). - See U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) on how to use it properly. type: json state: @@ -55,9 +47,7 @@ description: - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. If there is a match it will not make a new policy object with the same rules. - - The current default is C(true). However, this behavior can be confusing and as such the default will - change to C(false) in a release after 2022-06-01. To maintain - the existing behavior explicitly set I(skip_duplicates=true). + default: false type: bool author: @@ -70,15 +60,6 @@ ''' EXAMPLES = ''' -# Create a policy with the name of 'Admin' to the group 'administrators' -- name: Assign a policy called Admin to the administrators group - community.aws.iam_policy: - iam_type: group - iam_name: administrators - policy_name: Admin - state: present - policy_document: admin_policy.json - # Advanced example, create two new groups and add a READ-ONLY policy to both # groups. - name: Create Two Groups, Mario and Luigi @@ -139,11 +120,10 @@ class PolicyError(Exception): class Policy: - def __init__(self, client, name, policy_name, policy_document, policy_json, skip_duplicates, state, check_mode): + def __init__(self, client, name, policy_name, policy_json, skip_duplicates, state, check_mode): self.client = client self.name = name self.policy_name = policy_name - self.policy_document = policy_document self.policy_json = policy_json self.skip_duplicates = skip_duplicates self.state = state @@ -188,25 +168,12 @@ def delete(self): def get_policy_text(self): try: - if self.policy_document is not None: - return self.get_policy_from_document() if self.policy_json is not None: return self.get_policy_from_json() except json.JSONDecodeError as e: raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e)) return None - def get_policy_from_document(self): - try: - with open(self.policy_document, 'r') as json_data: - pdoc = json.load(json_data) - json_data.close() - except IOError as e: - if e.errno == 2: - raise PolicyError('policy_document {0:!r} does not exist'.format(self.policy_document)) - raise - return pdoc - def get_policy_from_json(self): if isinstance(self.policy_json, string_types): pdoc = json.loads(self.policy_json) @@ -301,42 +268,25 @@ def main(): state=dict(default='present', choices=['present', 'absent']), iam_name=dict(required=True), policy_name=dict(required=True), - policy_document=dict(default=None, required=False), policy_json=dict(type='json', default=None, required=False), - skip_duplicates=dict(type='bool', default=None, required=False) + skip_duplicates=dict(type='bool', default=False, required=False) ) - mutually_exclusive = [['policy_document', 'policy_json']] required_if = [ - ('state', 'present', ('policy_document', 'policy_json'), True), + ('state', 'present', ('policy_json',), True), ] module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True ) - skip_duplicates = module.params.get('skip_duplicates') - - if (skip_duplicates is None): - module.deprecate('The skip_duplicates behaviour has caused confusion and' - ' will be disabled by default in a release after 2022-06-01', - date='2022-06-01', collection_name='community.aws') - skip_duplicates = True - - if module.params.get('policy_document'): - module.deprecate('The policy_document option has been deprecated and' - ' will be removed in a release after 2022-06-01', - date='2022-06-01', collection_name='community.aws') - args = dict( client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), name=module.params.get('iam_name'), policy_name=module.params.get('policy_name'), - policy_document=module.params.get('policy_document'), policy_json=module.params.get('policy_json'), - skip_duplicates=skip_duplicates, + skip_duplicates=module.params.get('skip_duplicates'), state=module.params.get('state'), check_mode=module.check_mode, ) From 91a222d1ce3361dd2f919d5cf95cb44d0ef7c771 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 12 Jul 2022 11:43:33 +0200 Subject: [PATCH 552/683] ec2_vpc_peer - Fix idempotency when accepter/requester is reversed (#1346) ec2_vpc_peer - Fix idempotency when accepter/requester is reversed SUMMARY fixes: #580 Fixes a bug where a new peering request would be created when the accepter/requester is reversed ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/ec2_vpc_peer.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ec2_vpc_peer.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 79bcbf58b59..2034f234340 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -394,6 +394,11 @@ def describe_peering_connections(params, client): Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) if result['VpcPeeringConnections'] == []: + # Try again with the VPC/Peer relationship reversed + peer_filter = { + 'requester-vpc-info.vpc-id': params['PeerVpcId'], + 'accepter-vpc-info.vpc-id': params['VpcId'], + } result = client.describe_vpc_peering_connections( aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(peer_filter), From 89a5358bced321ec08a2a47df438b9e01359a224 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 13 Jul 2022 17:23:57 +0200 Subject: [PATCH 553/683] Rename cloudfront_info (#1352) Rename cloudfront_info SUMMARY renames cloudfront_info to cloudfront_distribution_info to match it's pair cloudfront_distribution ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/cloudfront_info.py plugins/modules/cloudfront_distribution_info.py ADDITIONAL INFORMATION Also cleans up the legacy_missing_tests/aliases file Reviewed-by: Alina Buzachis --- ...info.py => cloudfront_distribution_info.py | 58 ++++++++++--------- 1 file changed, 30 insertions(+), 28 deletions(-) rename cloudfront_info.py => cloudfront_distribution_info.py (94%) diff --git a/cloudfront_info.py b/cloudfront_distribution_info.py similarity index 94% rename from cloudfront_info.py rename to cloudfront_distribution_info.py index b7914dcceb4..a9df0d8a9d6 100644 --- a/cloudfront_info.py +++ b/cloudfront_distribution_info.py @@ -8,12 +8,15 @@ DOCUMENTATION = ''' --- -module: cloudfront_info +module: cloudfront_distribution_info version_added: 1.0.0 short_description: Obtain facts about an AWS CloudFront distribution description: - Gets information about an AWS CloudFront distribution. -author: Willem van Ketwich (@wilvk) + - Prior to release 5.0.0 this module was called C(community.aws.cloudfront_info). + The usage did not change. +author: + - Willem van Ketwich (@wilvk) options: distribution_id: description: @@ -79,82 +82,81 @@ type: bool invalidation: description: - - Get information about an invalidation. - - Requires I(invalidation_id) to be specified. + - Get information about an invalidation. + - Requires I(invalidation_id) to be specified. required: false default: false type: bool streaming_distribution: description: - - Get information about a specified RTMP distribution. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. + - Get information about a specified RTMP distribution. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. required: false default: false type: bool streaming_distribution_config: description: - - Get the configuration information about a specified RTMP distribution. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. + - Get the configuration information about a specified RTMP distribution. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. required: false default: false type: bool list_origin_access_identities: description: - - Get a list of CloudFront origin access identities. - - Requires I(origin_access_identity_id) to be set. + - Get a list of CloudFront origin access identities. + - Requires I(origin_access_identity_id) to be set. required: false default: false type: bool list_distributions: description: - - Get a list of CloudFront distributions. + - Get a list of CloudFront distributions. required: false default: false type: bool list_distributions_by_web_acl_id: description: - - Get a list of distributions using web acl id as a filter. - - Requires I(web_acl_id) to be set. + - Get a list of distributions using web acl id as a filter. + - Requires I(web_acl_id) to be set. required: false default: false type: bool list_invalidations: description: - - Get a list of invalidations. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. + - Get a list of invalidations. + - Requires I(distribution_id) or I(domain_name_alias) to be specified. required: false default: false type: bool list_streaming_distributions: description: - - Get a list of streaming distributions. + - Get a list of streaming distributions. required: false default: false type: bool summary: description: - - Returns a summary of all distributions, streaming distributions and origin_access_identities. - - This is the default behaviour if no option is selected. + - Returns a summary of all distributions, streaming distributions and origin_access_identities. + - This is the default behaviour if no option is selected. required: false default: false type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get a summary of distributions - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: summary: true register: result - name: Get information about a distribution - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: distribution: true distribution_id: my-cloudfront-distribution-id register: result_did @@ -162,7 +164,7 @@ msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}" - name: Get information about a distribution using the CNAME of the cloudfront distribution. - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: distribution: true domain_name_alias: www.my-website.com register: result_website @@ -170,23 +172,23 @@ msg: "{{ result_website['cloudfront']['www.my-website.com'] }}" - name: Get all information about an invalidation for a distribution. - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: invalidation: true distribution_id: my-cloudfront-distribution-id invalidation_id: my-cloudfront-invalidation-id - name: Get all information about a CloudFront origin access identity. - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: origin_access_identity: true origin_access_identity_id: my-cloudfront-origin-access-identity-id - name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: origin_access_identity: true origin_access_identity_id: my-cloudfront-origin-access-identity-id - name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) - community.aws.cloudfront_info: + community.aws.cloudfront_distribution_info: all_lists: true ''' From f121c12e9d050dcbb99fa590be77ea72a20c43c1 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 14 Jul 2022 23:01:27 +0200 Subject: [PATCH 554/683] Add argspec validation for targets (#1355) cloudwatchevent_rule - Add argspec validation for targets SUMMARY fixes: #201 Targets currently has minimal validation applied. Because of the way Ansible converts JSON strings to dicts/lists, then back to the Python format string representing the dicts/lists, unless we explicitly define a parameter is a JSON string they get corrupted. This also moves the new input_paths_map/input_template parameters under input_transformer. Because we've not released 4.1.0 yet this doesn't cause any breakage. This will make adding other target parameters simpler further down the road. (There's a lot that we don't support today) ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudwatchevent_rule ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- cloudwatchevent_rule.py | 122 ++++++++++++++++++++++++---------------- 1 file changed, 73 insertions(+), 49 deletions(-) diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py index 820e6c38543..4780a4ae43d 100644 --- a/cloudwatchevent_rule.py +++ b/cloudwatchevent_rule.py @@ -39,10 +39,10 @@ type: str event_pattern: description: - - A string pattern (in valid JSON format) that is used to match against - incoming events to determine if the rule should be triggered. + - A string pattern that is used to match against incoming events to determine if the rule + should be triggered. required: false - type: str + type: json state: description: - Whether the rule is present (and enabled), disabled, or absent. @@ -78,34 +78,34 @@ type: str description: The ARN of the IAM role to be used for this target when the rule is triggered. input: - type: str + type: json description: - - A JSON object that will override the event data when passed to the target. - - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) + - A JSON object that will override the event data passed to the target. + - If neither I(input) nor I(input_path) nor I(input_transformer) is specified, then the entire event is passed to the target in JSON form. input_path: type: str description: - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be passed to the target. - - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) + - If neither I(input) nor I(input_path) nor I(input_transformer) is specified, then the entire event is passed to the target in JSON form. - input_paths_map: + input_transformer: type: dict - version_added: 4.1.0 description: - - A dict that specifies the transformation of the event data to - custom input parameters. - - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) - is specified, then the entire event is passed to the target in JSON form. - input_template: - type: str + - Settings to support providing custom input to a target based on certain event data. version_added: 4.1.0 - description: - - A string that templates the values input_paths_map extracted from the event data. - It is used to produce the output you want to be sent to the target. - - If neither I(input) nor I(input_path) nor I(input_paths_map) nor I(input_template) - is specified, then the entire event is passed to the target in JSON form. + suboptions: + input_paths_map: + type: dict + description: + - A dict that specifies the transformation of the event data to + custom input parameters. + input_template: + type: json + description: + - A string that templates the values input_paths_map extracted from the event data. + It is used to produce the output you want to be sent to the target. ecs_parameters: type: dict description: @@ -114,6 +114,7 @@ task_definition_arn: type: str description: The full ARN of the task definition. + required: true task_count: type: int description: The number of tasks to create based on I(task_definition). @@ -147,10 +148,11 @@ targets: - id: MyTargetSnsTopic arn: arn:aws:sns:us-east-1:123456789012:MySNSTopic - input_paths_map: - instance: "$.detail.instance-id" - state: "$.detail.state" - input_template: " is in state " + input_transformer: + input_paths_map: + instance: "$.detail.instance-id" + state: "$.detail.state" + input_template: " is in state " - community.aws.cloudwatchevent_rule: name: MyCronTask @@ -175,15 +177,28 @@ sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" ''' +import json + try: import botocore except ImportError: pass # handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + + +def _format_json(json_string): + # When passed a simple string, Ansible doesn't quote it to ensure it's a *quoted* string + try: + json.loads(json_string) + return json_string + except json.decoder.JSONDecodeError: + return str(json.dumps(json_string)) class CloudWatchEventRule(object): @@ -307,29 +322,14 @@ def _targets_request(self, targets): """Formats each target for the request""" targets_request = [] for target in targets: - target_request = { - 'Id': target['id'], - 'Arn': target['arn'] - } - if 'input' in target: - target_request['Input'] = target['input'] - if 'input_path' in target: - target_request['InputPath'] = target['input_path'] - if 'input_paths_map' in target or 'input_template' in target: - target_request['InputTransformer'] = {} - target_request['InputTransformer']['InputPathsMap'] = target['input_paths_map'] - target_request['InputTransformer']['InputTemplate'] = '"{0}"'.format( - target['input_template'] - ) - if 'role_arn' in target: - target_request['RoleArn'] = target['role_arn'] - if 'ecs_parameters' in target: - target_request['EcsParameters'] = {} - ecs_parameters = target['ecs_parameters'] - if 'task_definition_arn' in target['ecs_parameters']: - target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn'] - if 'task_count' in target['ecs_parameters']: - target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count'] + target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True)) + if target_request.get('Input', None): + target_request['Input'] = _format_json(target_request['Input']) + if target_request.get('InputTransformer', None): + if target_request.get('InputTransformer').get('InputTemplate', None): + target_request['InputTransformer']['InputTemplate'] = _format_json(target_request['InputTransformer']['InputTemplate']) + if target_request.get('InputTransformer').get('InputPathsMap', None): + target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map'] targets_request.append(target_request) return targets_request @@ -450,15 +450,39 @@ def _remote_state(self): def main(): + target_args = dict( + type='list', elements='dict', default=[], + options=dict( + id=dict(type='str', required=True), + arn=dict(type='str', required=True), + role_arn=dict(type='str'), + input=dict(type='json'), + input_path=dict(type='str'), + input_transformer=dict( + type='dict', + options=dict( + input_paths_map=dict(type='dict'), + input_template=dict(type='json'), + ), + ), + ecs_parameters=dict( + type='dict', + options=dict( + task_definition_arn=dict(type='str', required=True), + task_count=dict(type='int'), + ), + ), + ), + ) argument_spec = dict( name=dict(required=True), schedule_expression=dict(), - event_pattern=dict(), + event_pattern=dict(type='json'), state=dict(choices=['present', 'disabled', 'absent'], default='present'), description=dict(), role_arn=dict(), - targets=dict(type='list', default=[], elements='dict'), + targets=target_args, ) module = AnsibleAWSModule(argument_spec=argument_spec) From 809ebd7c0e56244316f72f89e8d5a00ca2a56c5e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 15 Jul 2022 14:13:22 +0200 Subject: [PATCH 555/683] make health_check_protocol required if health_check_path or successful_response_codes is set (#1354) elb_target_group make health_check_protocol required if health_check_path or successful_response_codes is set fixes: #29 SUMMARY health_check_path and successful_response_codes were previously silently dropped on the floor if health_check_protocol wasn't set. ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_target_group ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- elb_target_group.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index c0a71c3c0e5..b8110ea0876 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -931,14 +931,17 @@ def main(): wait_timeout=dict(type='int', default=200), wait=dict(type='bool', default=False) ) + required_by = dict( + health_check_path=['health_check_protocol'], + successful_response_codes=['health_check_protocol'], + ) + required_if = [ + ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], + ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], + ['target_type', 'alb', ['protocol', 'port', 'vpc_id']], + ] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], - ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], - ['target_type', 'alb', ['protocol', 'port', 'vpc_id']], - ] - ) + module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if) if module.params.get('target_type') is None: module.params['target_type'] = 'instance' From 98076ddfdcaf109cc7e8a2eb1dd6d00cbc34d9c4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 22 Jul 2022 11:41:50 +0200 Subject: [PATCH 556/683] Add documentation for TargetGroupName (#1366) elb_application_lb/elb_network_lb - Add documentation for TargetGroupName SUMMARY fixes: #915 elb_application_lb and elb_network_lb have a poorly documented feature, that you can use TargetGroupName instead of TargetGroupArn. While this is shown in the examples, it's in the options documentation. While undocumented the feature's been there since at release 1.0.0 ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/elb_application_lb.py plugins/modules/elb_network_lb.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- elb_application_lb.py | 8 +++++++- elb_network_lb.py | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/elb_application_lb.py b/elb_application_lb.py index 2e84242d382..2a2dd771528 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -122,8 +122,14 @@ description: The type of action. type: str TargetGroupArn: - description: The Amazon Resource Name (ARN) of the target group. + description: + - The Amazon Resource Name (ARN) of the target group. + - Mutually exclusive with I(TargetGroupName). type: str + TargetGroupName: + description: + - The name of the target group. + - Mutually exclusive with I(TargetGroupArn). Rules: type: list elements: dict diff --git a/elb_network_lb.py b/elb_network_lb.py index 00b8f466f8a..3c6b283d03f 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -64,8 +64,14 @@ description: The type of action. type: str TargetGroupArn: - description: The Amazon Resource Name (ARN) of the target group. + description: + - The Amazon Resource Name (ARN) of the target group. + - Mutually exclusive with I(TargetGroupName). type: str + TargetGroupName: + description: + - The name of the target group. + - Mutually exclusive with I(TargetGroupArn). name: description: - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric From 9feefc2cde75c8ceb6d6cb8a198e8e16928509d2 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 22 Jul 2022 12:30:01 +0200 Subject: [PATCH 557/683] Re-enable integration tests for elb_network_lb (#1365) Re-enable integration tests for elb_network_lb Depends-On: ansible-collections/amazon.aws#940 SUMMARY Re-enables integration tests for elb_network_lb Moves from hard-coded SSL certs to generating them on the fly Fixes bug where ip_address_type in return value wasn't updated ISSUE TYPE Bugfix Pull Request COMPONENT NAME tests/integration/targets/elb_network_lb ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- elb_network_lb.py | 244 ++++++++++++++++++++++++---------------------- 1 file changed, 126 insertions(+), 118 deletions(-) diff --git a/elb_network_lb.py b/elb_network_lb.py index 3c6b283d03f..3b5277d071d 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -189,138 +189,144 @@ ''' RETURN = r''' -availability_zones: - description: The Availability Zones for the load balancer. +load_balancer: + description: A representation of the Network Load Balancer returned: when state is present - type: list - sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]" -canonical_hosted_zone_id: - description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. - returned: when state is present - type: str - sample: ABCDEF12345678 -created_time: - description: The date and time the load balancer was created. - returned: when state is present - type: str - sample: "2015-02-12T02:14:02+00:00" -deletion_protection_enabled: - description: Indicates whether deletion protection is enabled. - returned: when state is present - type: str - sample: true -dns_name: - description: The public DNS name of the load balancer. - returned: when state is present - type: str - sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com -idle_timeout_timeout_seconds: - description: The idle timeout value, in seconds. - returned: when state is present - type: str - sample: 60 -ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. - returned: when state is present - type: str - sample: ipv4 -listeners: - description: Information about the listeners. - returned: when state is present - type: complex + type: dict + version_added: 5.0.0 contains: - listener_arn: - description: The Amazon Resource Name (ARN) of the listener. + availability_zones: + description: The Availability Zones for the load balancer. + returned: when state is present + type: list + sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]" + canonical_hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. returned: when state is present type: str - sample: "" - load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. + sample: ABCDEF12345678 + created_time: + description: The date and time the load balancer was created. returned: when state is present type: str - sample: "" - port: - description: The port on which the load balancer is listening. + sample: "2015-02-12T02:14:02+00:00" + deletion_protection_enabled: + description: Indicates whether deletion protection is enabled. returned: when state is present - type: int - sample: 80 - protocol: - description: The protocol for connections from clients to the load balancer. + type: str + sample: true + dns_name: + description: The public DNS name of the load balancer. returned: when state is present type: str - sample: HTTPS - certificates: - description: The SSL server certificate. + sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com + idle_timeout_timeout_seconds: + description: The idle timeout value, in seconds. + returned: when state is present + type: str + sample: 60 + ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + returned: when state is present + type: str + sample: ipv4 + listeners: + description: Information about the listeners. returned: when state is present type: complex contains: - certificate_arn: - description: The Amazon Resource Name (ARN) of the certificate. + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. returned: when state is present type: str sample: "" - ssl_policy: - description: The security policy that defines which ciphers and protocols are supported. - returned: when state is present - type: str - sample: "" - default_actions: - description: The default actions for the listener. - returned: when state is present - type: str - contains: - type: - description: The type of action. + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present type: str sample: "" - target_group_arn: - description: The Amazon Resource Name (ARN) of the target group. + port: + description: The port on which the load balancer is listening. + returned: when state is present + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + returned: when state is present + type: str + sample: HTTPS + certificates: + description: The SSL server certificate. + returned: when state is present + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + returned: when state is present + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. returned: when state is present type: str sample: "" -load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - returned: when state is present - type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 -load_balancer_name: - description: The name of the load balancer. - returned: when state is present - type: str - sample: my-elb -load_balancing_cross_zone_enabled: - description: Indicates whether cross-zone load balancing is enabled. - returned: when state is present - type: str - sample: true -scheme: - description: Internet-facing or internal load balancer. - returned: when state is present - type: str - sample: internal -state: - description: The state of the load balancer. - returned: when state is present - type: dict - sample: "{'code': 'active'}" -tags: - description: The tags attached to the load balancer. - returned: when state is present - type: dict - sample: "{ - 'Tag': 'Example' - }" -type: - description: The type of load balancer. - returned: when state is present - type: str - sample: network -vpc_id: - description: The ID of the VPC for the load balancer. - returned: when state is present - type: str - sample: vpc-0011223344 + default_actions: + description: The default actions for the listener. + returned: when state is present + type: str + contains: + type: + description: The type of action. + returned: when state is present + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + returned: when state is present + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + load_balancer_name: + description: The name of the load balancer. + returned: when state is present + type: str + sample: my-elb + load_balancing_cross_zone_enabled: + description: Indicates whether cross-zone load balancing is enabled. + returned: when state is present + type: str + sample: true + scheme: + description: Internet-facing or internal load balancer. + returned: when state is present + type: str + sample: internal + state: + description: The state of the load balancer. + returned: when state is present + type: dict + sample: "{'code': 'active'}" + tags: + description: The tags attached to the load balancer. + returned: when state is present + type: dict + sample: "{ + 'Tag': 'Example' + }" + type: + description: The type of load balancer. + returned: when state is present + type: str + sample: network + vpc_id: + description: The ID of the VPC for the load balancer. + returned: when state is present + type: str + sample: vpc-0011223344 ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -385,19 +391,18 @@ def create_or_update_elb(elb_obj): if listeners_obj.changed: elb_obj.changed = True + # Update ELB ip address type only if option has been provided + if elb_obj.module.params.get('ip_address_type') is not None: + elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + + # Update the objects to pickup changes # Get the ELB again elb_obj.update() - # Get the ELB listeners again listeners_obj.update() - # Update the ELB attributes elb_obj.update_elb_attributes() - # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) - # Convert to snake_case and merge in everything we want to return to the user snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) @@ -411,7 +416,10 @@ def create_or_update_elb(elb_obj): # ip address type snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() - elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) + elb_obj.module.exit_json( + changed=elb_obj.changed, + load_balancer=snaked_elb, + **snaked_elb) def delete_elb(elb_obj): From 93b9f973e0be09beb042a471227b668163578b99 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 26 Jul 2022 12:01:24 +0200 Subject: [PATCH 558/683] Nudge module to trigger Sanity tests (#1372) aws_region_info - Minor docs linting SUMMARY Nudge a module to trigger the sanity tests ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/aws_region_info.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- aws_region_info.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/aws_region_info.py b/aws_region_info.py index 66349e318a8..fc4c38b2579 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -8,11 +8,12 @@ DOCUMENTATION = ''' module: aws_region_info -short_description: Gather information about AWS regions. +short_description: Gather information about AWS regions version_added: 1.0.0 description: - - Gather information about AWS regions. -author: 'Henrique Rodrigues (@Sodki)' + - Gather information about AWS regions. +author: + - 'Henrique Rodrigues (@Sodki)' options: filters: description: @@ -25,8 +26,8 @@ default: {} type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' From 1e7d623cea85b3fcd23b108b3a35e6a8c12e4c85 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 26 Jul 2022 12:01:27 +0200 Subject: [PATCH 559/683] New module - accessanalyzer_validate_policy_info (#1370) New module - accessanalyzer_validate_policy_info SUMMARY fixes: #626 Adds a module which supports validating and linting IAM policies ISSUE TYPE New Module Pull Request COMPONENT NAME plugins/modules/accessanalyzer_validate_policy_info.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- accessanalyzer_validate_policy_info.py | 236 +++++++++++++++++++++++++ 1 file changed, 236 insertions(+) create mode 100644 accessanalyzer_validate_policy_info.py diff --git a/accessanalyzer_validate_policy_info.py b/accessanalyzer_validate_policy_info.py new file mode 100644 index 00000000000..218bd3b781e --- /dev/null +++ b/accessanalyzer_validate_policy_info.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: accessanalyzer_validate_policy_info +version_added: 5.0.0 +short_description: Performs validation of IAM policies +description: + - Requests the validation of a policy and returns a list of findings. +options: + policy: + description: + - A properly json formatted policy. + type: json + aliases: ['policy_document'] + required: true + locale: + description: + - The locale to use for localizing the findings. + - Supported locales include C(DE), C(EN), C(ES), C(FR), C(IT), C(JA), C(KO), C(PT_BR), + C(ZH_CN) and C(ZH_TW). + - For more information about supported locales see the AWS Documentation + C(https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_ValidatePolicy.html) + type: str + required: false + default: 'EN' + policy_type: + description: + - The type of policy to validate. + - C(identity) policies grant permissions to IAM principals, including both managed and inline + policies for IAM roles, users, and groups. + - C(resource) policies policies grant permissions on AWS resources, including trust policies + for IAM roles and bucket policies for S3 buckets. + type: str + choices: ['identity', 'resource', 'service_control'] + default: 'identity' + required: false + resource_type: + description: + - The type of resource to attach to your resource policy. + - Ignored unless I(policy_type=resource). + - Supported resource types include C(AWS::S3::Bucket), C(AWS::S3::AccessPoint), + C(AWS::S3::MultiRegionAccessPoint) and C(AWS::S3ObjectLambda::AccessPoint) + - For resource types not supported as valid values, IAM Access Analyzer runs policy checks + that apply to all resource policies. + - For more information about supported locales see the AWS Documentation + C(https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_ValidatePolicy.html) + type: str + required: false + results_filter: + description: + - Filter the findings and limit them to specific finding types. + type: list + elements: str + choices: ['error', 'security', 'suggestion', 'warning'] + required: false +author: + - Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + +EXAMPLES = r''' +# Validate a policy +- name: Validate a simple IAM policy + community.aws.accessanalyzer_validate_policy_info: + policy: "{{ lookup('template', 'managed_policy.json.j2') }}" +''' + +RETURN = r''' +findings: + description: The list of findings in a policy returned by IAM Access Analyzer based on its suite of policy checks. + returned: success + type: list + elements: dict + contains: + finding_details: + description: + - A localized message describing the finding. + type: str + returned: success + sample: 'Resource ARN does not match the expected ARN format. Update the resource portion of the ARN.' + finding_type: + description: + - The severity of the finding. + type: str + returned: success + sample: 'ERROR' + issue_code: + description: + - An identifier for the type of issue found. + type: str + returned: success + sample: 'INVALID_ARN_RESOURCE' + learn_more_link: + description: + - A link to additional information about the finding type. + type: str + returned: success + sample: 'https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-reference-policy-checks.html' + locations: + description: + - The location of the item resulting in the recommendations. + type: list + returned: success + elements: dict + contains: + path: + description: A path in a policy, represented as a sequence of path elements. + type: list + elements: dict + returned: success + sample: [{"value": "Statement"}, {"index": 0}, {"value": "Resource"}, {"index": 0}] + span: + description: + - Where in the policy the finding refers to. + - Note - when using lookups or passing dictionaries to I(policy) the policy string may be + converted to a single line of JSON, changing th column, line and offset values. + type: dict + contains: + start: + description: The start position of the span. + type: dict + returned: success + contains: + column: + description: The column of the position, starting from C(0). + type: int + returned: success + line: + description: The line of the position, starting from C(1). + type: int + returned: success + offset: + description: The offset within the policy that corresponds to the position, starting from C(0). + type: int + returned: success + end: + description: The end position of the span. + type: dict + returned: success + contains: + column: + description: The column of the position, starting from C(0). + type: int + returned: success + line: + description: The line of the position, starting from C(1). + type: int + returned: success + offset: + description: The offset within the policy that corresponds to the position, starting from C(0). + type: int + returned: success +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def filter_findings(findings, type_filter): + if not type_filter: + return findings + + # Convert type_filter to the findingType strings returned by the API + filter_map = dict(error='ERROR', security='SECURITY_WARNING', + suggestion='SUGGESTION', warning='WARNING') + allowed_types = [filter_map[t] for t in type_filter] + + filtered_results = [f for f in findings if f.get('findingType', None) in allowed_types] + return filtered_results + + +def main(): + # Botocore only supports specific values for locale and resource_type, however the supported + # values are likely to be expanded, let's avoid hard coding limits which might not hold true in + # the long term... + argument_spec = dict( + policy=dict(required=True, type='json', aliases=['policy_document']), + locale=dict(required=False, type='str', default='EN'), + policy_type=dict(required=False, type='str', default='identity', + choices=['identity', 'resource', 'service_control']), + resource_type=dict(required=False, type='str'), + results_filter=dict(required=False, type='list', elements='str', + choices=['error', 'security', 'suggestion', 'warning']), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + policy_type_map = dict(identity='IDENTITY_POLICY', resource='RESOURCE_POLICY', + service_control='SERVICE_CONTROL_POLICY') + + policy = module.params.get('policy') + policy_type = policy_type_map[module.params.get('policy_type')] + locale = module.params.get('locale').upper() + resource_type = module.params.get('resource_type') + results_filter = module.params.get('results_filter') + + try: + client = module.client('accessanalyzer', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + params = dict(locale=locale, policyDocument=policy, policyType=policy_type) + if policy_type == 'RESOURCE_POLICY' and resource_type: + params['policyType'] = resource_type + + results = client.validate_policy(aws_retry=True, **params) + + findings = filter_findings(results.get('findings', []), results_filter) + results['findings'] = findings + + results = camel_dict_to_snake_dict(results) + + module.exit_json(changed=False, **results) + + +if __name__ == '__main__': + main() From c49d61ec1958a9647c53e93192fd422c23af3873 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 28 Jul 2022 13:40:27 -0700 Subject: [PATCH 560/683] cloudwatchlogs_log_group: Add check_mode support (#1373) cloudwatchlogs_log_group: Add check_mode support SUMMARY Add check_mode support to cloudwatchlogs_log_group. ISSUE TYPE Feature Pull Request COMPONENT NAME cloudwatchlogs_log_group Reviewed-by: Alina Buzachis --- cloudwatchlogs_log_group.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py index f237223901f..31caeb60daf 100644 --- a/cloudwatchlogs_log_group.py +++ b/cloudwatchlogs_log_group.py @@ -148,6 +148,9 @@ def create_log_group(client, log_group_name, kms_key_id, tags, retention, module if tags: request['tags'] = tags + if module.check_mode: + module.exit_json(changed=True, msg="Would have created log group if not in check_mode.") + try: client.create_log_group(**request) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -180,6 +183,9 @@ def input_retention_policy(client, log_group_name, retention, module): def delete_retention_policy(client, log_group_name, module): + if module.check_mode: + return True + try: client.delete_retention_policy(logGroupName=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -187,6 +193,9 @@ def delete_retention_policy(client, log_group_name, module): def delete_log_group(client, log_group_name, module): + if module.check_mode: + module.exit_json(changed=True, msg="Would have deleted log group if not in check_mode.") + try: client.delete_log_group(logGroupName=log_group_name) except is_boto3_error_code('ResourceNotFoundException'): @@ -265,7 +274,7 @@ def main(): ) mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] - module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) + module = AnsibleAWSModule(supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) try: logs = module.client('logs') From 263e2d4111e4a81a2a51db4559634c35e4238bae Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 29 Jul 2022 13:58:35 +0200 Subject: [PATCH 561/683] iam_policy - add support for diff mode (#1375) iam_policy - add support for diff mode SUMMARY fixes: #560 Adds support for diff mode renames policies to policy_names so that in future we can return the policies (outside of the diff) too. Attempts to handle AccessDenied more cleanly ISSUE TYPE Feature Pull Request COMPONENT NAME iam_policy ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- iam_policy.py | 74 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 18 deletions(-) diff --git a/iam_policy.py b/iam_policy.py index aae6376312b..06f9e85bf3d 100644 --- a/iam_policy.py +++ b/iam_policy.py @@ -12,9 +12,9 @@ version_added: 1.0.0 short_description: Manage inline IAM policies for users, groups, and roles description: - - Allows uploading or removing inline IAM policies for IAM users, groups or roles. - - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), - M(community.aws.iam_group) and M(community.aws.iam_managed_policy) + - Allows uploading or removing inline IAM policies for IAM users, groups or roles. + - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), + M(community.aws.iam_group) and M(community.aws.iam_managed_policy) options: iam_type: description: @@ -35,7 +35,6 @@ policy_json: description: - A properly json formatted policy as string. - - See U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) on how to use it properly. type: json state: description: @@ -54,9 +53,8 @@ - "Jonathan I. Davila (@defionscode)" - "Dennis Podkovyrin (@sbj-ss)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -94,7 +92,7 @@ ''' RETURN = ''' -policies: +policy_names: description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role). returned: always type: list @@ -112,6 +110,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code class PolicyError(Exception): @@ -130,6 +129,9 @@ def __init__(self, client, name, policy_name, policy_json, skip_duplicates, stat self.check_mode = check_mode self.changed = False + self.original_policies = self.get_all_policies().copy() + self.updated_policies = {} + @staticmethod def _iam_type(): return '' @@ -138,33 +140,48 @@ def _list(self, name): return {} def list(self): - return self._list(self.name).get('PolicyNames', []) + try: + return self._list(self.name).get('PolicyNames', []) + except is_boto3_error_code('AccessDenied'): + return [] def _get(self, name, policy_name): return '{}' def get(self, policy_name): - return self._get(self.name, policy_name)['PolicyDocument'] + try: + return self._get(self.name, policy_name)['PolicyDocument'] + except is_boto3_error_code('AccessDenied'): + return {} def _put(self, name, policy_name, policy_doc): pass def put(self, policy_doc): - if not self.check_mode: - self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True)) self.changed = True + if self.check_mode: + return + + self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True)) + def _delete(self, name, policy_name): pass def delete(self): + self.updated_policies = self.original_policies.copy() + if self.policy_name not in self.list(): self.changed = False return - if not self.check_mode: - self._delete(self.name, self.policy_name) self.changed = True + self.updated_policies.pop(self.policy_name, None) + + if self.check_mode: + return + + self._delete(self.name, self.policy_name) def get_policy_text(self): try: @@ -181,17 +198,30 @@ def get_policy_from_json(self): pdoc = self.policy_json return pdoc + def get_all_policies(self): + policies = {} + for pol in self.list(): + policies[pol] = self.get(pol) + return policies + def create(self): matching_policies = [] policy_doc = self.get_policy_text() policy_match = False for pol in self.list(): - if not compare_policies(self.get(pol), policy_doc): + if not compare_policies(self.original_policies[pol], policy_doc): matching_policies.append(pol) policy_match = True - if (self.policy_name not in matching_policies) and not (self.skip_duplicates and policy_match): - self.put(policy_doc) + self.updated_policies = self.original_policies.copy() + + if self.policy_name in matching_policies: + return + if self.skip_duplicates and policy_match: + return + + self.put(policy_doc) + self.updated_policies[self.policy_name] = policy_doc def run(self): if self.state == 'present': @@ -201,7 +231,12 @@ def run(self): return { 'changed': self.changed, self._iam_type() + '_name': self.name, - 'policies': self.list() + 'policies': self.list(), + 'policy_names': self.list(), + 'diff': dict( + before=self.original_policies, + after=self.updated_policies, + ), } @@ -300,6 +335,9 @@ def main(): elif iam_type == 'group': policy = GroupPolicy(**args) + module.deprecate("The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.", + date='2024-08-01', collection_name='community.aws') + module.exit_json(**(policy.run())) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) From 5a505b397ef7ae0af134f1e30975ba2dcd7006f2 Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Tue, 2 Aug 2022 10:46:02 +0100 Subject: [PATCH 562/683] route53: Restore support for zero weighted DNS records (#1379) route53: Restore support for zero weighted DNS records SUMMARY In #1117 (comment) and https://github.com/ansible-collections/community.aws/pull/1117/files#r869391659 this line was recommended to be simplified, but not any will also return true if weight_in has a value of 0, not only when it is None Fixes #1378 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53 ADDITIONAL INFORMATION Previously it was possible to create weighted records with a weight of 0. Currently the playbook below returns the error: You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover. - name: Bug demo hosts: localhost tasks: - name: Set 0 weight for old env route53: wait: yes ttl: '5' type: 'CNAME' identifier: old overwrite: yes record: 'record.example.com.' zone: 'example.com.' value: 'record-old.example.com.' weight: '0' state: present Reviewed-by: Mark Chappell --- route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route53.py b/route53.py index bebdacdbf9a..db97197ec6b 100644 --- a/route53.py +++ b/route53.py @@ -628,7 +628,7 @@ def main(): if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if not any([weight_in, region_in, failover_in, geo_location]) and identifier_in is not None: + if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") retry_decorator = AWSRetry.jittered_backoff( From f41976c79ec6010759f636dbfffb30ef27dd1a48 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 2 Aug 2022 14:10:35 +0200 Subject: [PATCH 563/683] lambda - Add support for setting supported architectures (#1377) lambda - Add support for setting supported architectures SUMMARY fixes: #744 Adds support for setting supported architectures - needs botocore 1.21.51 ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/lambda.py plugins/modules/lambda_info.py ADDITIONAL INFORMATION Return docs changes have no version_added because they only depend the botocore version rather than the module version. (1.0.0 would return the values if botocore was new enough) Reviewed-by: Alina Buzachis --- lambda.py | 160 ++++++++++++++++++++++++++++++++----------------- lambda_info.py | 16 +++-- 2 files changed, 115 insertions(+), 61 deletions(-) diff --git a/lambda.py b/lambda.py index 22629754e65..6849e5af67b 100644 --- a/lambda.py +++ b/lambda.py @@ -12,7 +12,7 @@ version_added: 1.0.0 short_description: Manage AWS Lambda functions description: - - Allows for the management of Lambda functions. + - Allows for the management of Lambda functions. options: name: description: @@ -108,13 +108,21 @@ - The KMS key ARN used to encrypt the function's environment variables. type: str version_added: 3.3.0 + architecture: + description: + - The instruction set architecture that the function supports. + - Requires one of I(s3_bucket) or I(zip_file). + - Requires botocore >= 1.21.51. + type: str + choices: ['x86_64', 'arm64'] + aliases: ['architectures'] + version_added: 5.0.0 author: - - 'Steyn Huizinga (@steynovich)' + - 'Steyn Huizinga (@steynovich)' extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags - + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = r''' @@ -192,6 +200,12 @@ returned: success type: dict contains: + architectures: + description: The architectures supported by the function. + returned: successful run where botocore >= 1.21.51 + type: list + elements: str + sample: ['arm64'] code_sha256: description: The SHA256 hash of the function's deployment package. returned: success @@ -452,6 +466,66 @@ def format_response(response): return result +def _zip_args(zip_file, current_config, ignore_checksum): + if not zip_file: + return {} + + # If there's another change that needs to happen, we always re-upload the code + if not ignore_checksum: + local_checksum = sha256sum(zip_file) + remote_checksum = current_config.get('CodeSha256', '') + if local_checksum == remote_checksum: + return {} + + with open(zip_file, 'rb') as f: + zip_content = f.read() + return {'ZipFile': zip_content} + + +def _s3_args(s3_bucket, s3_key, s3_object_version): + if not s3_bucket: + return {} + if not s3_key: + return {} + + code = {'S3Bucket': s3_bucket, + 'S3Key': s3_key} + if s3_object_version: + code.update({'S3ObjectVersion': s3_object_version}) + + return code + + +def _code_args(module, current_config): + s3_bucket = module.params.get('s3_bucket') + s3_key = module.params.get('s3_key') + s3_object_version = module.params.get('s3_object_version') + zip_file = module.params.get('zip_file') + architectures = module.params.get('architecture') + checksum_match = False + + code_kwargs = {} + + if architectures and current_config.get('Architectures', None) != [architectures]: + module.warn('Arch Change') + code_kwargs.update({'Architectures': [architectures]}) + + try: + code_kwargs.update(_zip_args(zip_file, current_config, bool(code_kwargs))) + except IOError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + code_kwargs.update(_s3_args(s3_bucket, s3_key, s3_object_version)) + + if not code_kwargs: + return {} + + if not architectures and current_config.get('Architectures', None): + code_kwargs.update({'Architectures': current_config.get('Architectures', None)}) + + return code_kwargs + + def main(): argument_spec = dict( name=dict(required=True), @@ -472,6 +546,7 @@ def main(): dead_letter_arn=dict(), kms_key_arn=dict(type='str', no_log=False), tracing_mode=dict(choices=['Active', 'PassThrough']), + architecture=dict(choices=['x86_64', 'arm64'], type='str', aliases=['architectures']), tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), ) @@ -483,7 +558,11 @@ def main(): required_together = [['s3_key', 's3_bucket'], ['vpc_subnet_ids', 'vpc_security_group_ids']] - required_if = [['state', 'present', ['runtime', 'handler', 'role']]] + required_if = [ + ['state', 'present', ['runtime', 'handler', 'role']], + ['architecture', 'x86_64', ['zip_file', 's3_bucket'], True], + ['architecture', 'arm64', ['zip_file', 's3_bucket'], True], + ] module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, @@ -511,10 +590,15 @@ def main(): tags = module.params.get('tags') purge_tags = module.params.get('purge_tags') kms_key_arn = module.params.get('kms_key_arn') + architectures = module.params.get('architecture') check_mode = module.check_mode changed = False + if architectures: + module.require_botocore_at_least( + '1.21.51', reason='to configure the architectures that the function supports.') + try: client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) except (ClientError, BotoCoreError) as e: @@ -602,39 +686,17 @@ def main(): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to update lambda configuration") - # Update code configuration - code_kwargs = {'FunctionName': name, 'Publish': True} - - # Update S3 location - if s3_bucket and s3_key: - # If function is stored on S3 always update - code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key}) - - # If S3 Object Version is given - if s3_object_version: - code_kwargs.update({'S3ObjectVersion': s3_object_version}) - - # Compare local checksum, update remote code when different - elif zip_file: - local_checksum = sha256sum(zip_file) - remote_checksum = current_config['CodeSha256'] - - # Only upload new code when local code is different compared to the remote code - if local_checksum != remote_checksum: - try: - with open(zip_file, 'rb') as f: - encoded_zip = f.read() - code_kwargs.update({'ZipFile': encoded_zip}) - except IOError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - # Tag Function if tags is not None: if set_tag(client, module, tags, current_function, purge_tags): changed = True - # Upload new code if needed (e.g. code checksum has changed) - if len(code_kwargs) > 2: + code_kwargs = _code_args(module, current_config) + if code_kwargs: + + # Update code configuration + code_kwargs.update({'FunctionName': name, 'Publish': True}) + if not check_mode: wait_for_lambda(client, module, name) @@ -652,38 +714,26 @@ def main(): module.fail_json(msg='Unable to get function information after updating') response = format_response(response) # We're done - module.exit_json(changed=changed, **response) + module.exit_json(changed=changed, code_kwargs=code_kwargs, func_kwargs=func_kwargs, **response) # Function doesn't exists, create new Lambda function elif state == 'present': - if s3_bucket and s3_key: - # If function is stored on S3 - code = {'S3Bucket': s3_bucket, - 'S3Key': s3_key} - if s3_object_version: - code.update({'S3ObjectVersion': s3_object_version}) - elif zip_file: - # If function is stored in local zipfile - try: - with open(zip_file, 'rb') as f: - zip_content = f.read() - - code = {'ZipFile': zip_content} - except IOError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - - else: - module.fail_json(msg='Either S3 object or path to zipfile required') func_kwargs = {'FunctionName': name, 'Publish': True, 'Runtime': runtime, 'Role': role_arn, - 'Code': code, 'Timeout': timeout, 'MemorySize': memory_size, } + code = _code_args(module, {}) + if not code: + module.fail_json(msg='Either S3 object or path to zipfile required') + if 'Architectures' in code: + func_kwargs.update({'Architectures': code.pop('Architectures')}) + func_kwargs.update({'Code': code}) + if description is not None: func_kwargs.update({'Description': description}) diff --git a/lambda_info.py b/lambda_info.py index e3d00ab08cc..2f091e6e295 100644 --- a/lambda_info.py +++ b/lambda_info.py @@ -15,8 +15,6 @@ - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases, M(community.aws.lambda_event) to manage lambda event source mappings, and M(community.aws.lambda_policy) to manage policy statements. - - options: query: description: @@ -34,11 +32,11 @@ description: - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. type: str -author: Pierre Jodouin (@pjodouin) +author: + - Pierre Jodouin (@pjodouin) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = ''' @@ -94,6 +92,12 @@ returned: when C(query) is I(aliases) or I(all) type: list elements: str + architectures: + description: The architectures supported by the function. + returned: successful run where botocore >= 1.21.51 + type: list + elements: str + sample: ['arm64'] code_sha256: description: The SHA256 hash of the function's deployment package. returned: success From c4edf10f44ae9831896617acdc8fcf0f56a567d9 Mon Sep 17 00:00:00 2001 From: Joseph Spearritt Date: Wed, 3 Aug 2022 00:03:16 +1000 Subject: [PATCH 564/683] route53_info - fix max_items when not paginating (#1384) route53_info - fix max_items when not paginating SUMMARY As reported in #1383, the route53_info module presently fails to run with a boto3 parameter validation error if run with particular combinations of parameters, specifically: query: hosted_zone parameter with hosted_zone_method: list_by_name query: reusable_delegation_set without specifying a delegation_set_id I believe this is a regression introduced in #813 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_info ADDITIONAL INFORMATION Some further information is described in the issue but tl;dr the prior PR converted all cases in the module where params['MaxItems'] was set to instead pass a params['PaginationConfig'], however this should only be done if a boto3 paginator is actually being used, and will fail (as noted above, due to parameter validation) if called with a regular boto3 client method. Hence this PR switches back to directly setting MaxItems on the methods that do not use a paginator. Reviewed-by: Mark Chappell --- route53_info.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/route53_info.py b/route53_info.py index 68b0bb54b73..a331fae9319 100644 --- a/route53_info.py +++ b/route53_info.py @@ -519,11 +519,8 @@ def reusable_delegation_set_details(): params = dict() if not module.params.get('delegation_set_id'): - # Set PaginationConfig with max_items if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + params['MaxItems'] = str(module.params.get('max_items')) if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') @@ -581,11 +578,8 @@ def list_hosted_zones_by_name(): if module.params.get('dns_name'): params['DNSName'] = module.params.get('dns_name') - # Set PaginationConfig with max_items if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + params['MaxItems'] = str(module.params.get('max_items')) return client.list_hosted_zones_by_name(**params) From 63f18872760f724cb9b18da569ddb3b67046ad74 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 3 Aug 2022 11:04:35 +0200 Subject: [PATCH 565/683] Remove deprecated @AWSRetry.backoff usage (#1386) Remove deprecated AWSRetry.backoff usage Depends-On: ansible-collections/amazon.aws#946 SUMMARY AWSRetry.backoff was deprecated (and originally slated for removal in 4.0.0) remove usage. ISSUE TYPE Feature Pull Request COMPONENT NAME acm_certificate_info acm_certificate api_gateway_domain waf_condition waf_info waf_rule waf_web_acl ADDITIONAL INFORMATION WAF and ACM changes are coming from amazon.aws (linked PR), this just drops in a changelog fragment Reviewed-by: Alina Buzachis --- acm_certificate_info.py | 8 ++++---- api_gateway_domain.py | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/acm_certificate_info.py b/acm_certificate_info.py index 8e16162cedb..70f641e61df 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -20,9 +20,9 @@ description: - If provided, the results will be filtered to show only the certificate with this ARN. - If no certificate with this ARN exists, this task will fail. - - If a certificate with this ARN exists in a different region, this task will fail + - If a certificate with this ARN exists in a different region, this task will fail. aliases: - - arn + - arn type: str domain_name: description: @@ -43,8 +43,8 @@ author: - Will Thames (@willthames) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' diff --git a/api_gateway_domain.py b/api_gateway_domain.py index f5183ae92af..6a3a6c3ace1 100644 --- a/api_gateway_domain.py +++ b/api_gateway_domain.py @@ -229,20 +229,20 @@ def delete_domain(module, client): return camel_dict_to_snake_dict(result) -retry_params = {"tries": 10, "delay": 5, "backoff": 1.2} +retry_params = {"delay": 5, "backoff": 1.2} -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def get_domain_name(client, domain_name): return client.get_domain_name(domainName=domain_name) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def get_domain_mappings(client, domain_name): return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', []) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy): endpoint_configuration = {'types': [endpoint_type]} @@ -263,12 +263,12 @@ def create_domain_name(module, client, domain_name, certificate_arn, endpoint_ty ) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage): return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def update_domain_name(client, domain_name, **kwargs): patch_operations = [] @@ -281,12 +281,12 @@ def update_domain_name(client, domain_name, **kwargs): return client.update_domain_name(domainName=domain_name, patchOperations=patch_operations) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def delete_domain_name(client, domain_name): return client.delete_domain_name(domainName=domain_name) -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def delete_domain_mapping(client, domain_name, base_path): return client.delete_base_path_mapping(domainName=domain_name, basePath=base_path) From 8cc4910718ef57abb07ac933f44aa8d873e39b7c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 3 Aug 2022 11:04:39 +0200 Subject: [PATCH 566/683] s3_sync - fix when running in FIPS mode (#1385) s3_sync - fix when running in FIPS mode SUMMARY fixes: #757 pass usedforsecurity=False (the MD5 sum isn't used as a cryptographic hash) and attempt to handle FIPS errors more gracefully. ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_sync ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- s3_sync.py | 65 ++++++------------------------------------------------ 1 file changed, 7 insertions(+), 58 deletions(-) diff --git a/s3_sync.py b/s3_sync.py index 2bedaa70da5..602df0aec36 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -242,7 +242,6 @@ import datetime import fnmatch -import hashlib import mimetypes import os import stat as osstat # os.stat constants @@ -255,10 +254,7 @@ try: import botocore - from boto3.s3.transfer import TransferConfig - DEFAULT_CHUNK_SIZE = TransferConfig().multipart_chunksize except ImportError: - DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_text @@ -267,59 +263,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - -# the following function, calculate_multipart_etag, is from tlastowka -# on github and is used under its (compatible) GPL license. So this -# license applies to the following function. -# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py -# -# calculate_multipart_etag Copyright (C) 2015 -# Tony Lastowka -# https://github.com/tlastowka -# -# -# calculate_multipart_etag is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# calculate_multipart_etag is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with calculate_multipart_etag. If not, see . -def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): - """ - calculates a multipart upload etag for amazon s3 - - Arguments: - - source_path -- The file to calculate the etag for - chunk_size -- The chunk size to calculate for. - """ - - md5s = [] - - with open(source_path, 'rb') as fp: - while True: - - data = fp.read(chunk_size) - - if not data: - break - md5s.append(hashlib.md5(data)) - - if len(md5s) == 1: - new_etag = '"{0}"'.format(md5s[0].hexdigest()) - else: # > 1 - digests = b"".join(m.digest() for m in md5s) - - new_md5 = hashlib.md5(digests) - new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s)) - - return new_etag +from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag def gather_files(fileroot, include=None, exclude=None): @@ -565,7 +509,12 @@ def main(): result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map')) result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix']) - result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3']) + try: + result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3']) + except ValueError as e: + if module.params['file_change_strategy'] == 'checksum': + module.fail_json_aws(e, 'Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy') + result['filelist_local_etag'] = result['filelist_s3'].copy() result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) From dd2adef3657d25d7551276444703f552d65e9b96 Mon Sep 17 00:00:00 2001 From: Seena Fallah Date: Mon, 8 Aug 2022 15:37:22 +0200 Subject: [PATCH 567/683] s3-lifecycle: fix remove rule with empty prefix (#1398) s3-lifecycle: fix remove rule with empty prefix SUMMARY In case of removing a lifecycle policy without a prefix, there will be no prefix key in the existing_rule filter Signed-off-by: Seena Fallah seenafallah@gmail.com ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_lifecycle Reviewed-by: Mark Chappell --- s3_lifecycle.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index a9a5f5dbf65..9a2ea51e92f 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -345,9 +345,9 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru if current_lifecycle_rules: # If rule ID exists, use that for comparison otherwise compare based on prefix for existing_rule in current_lifecycle_rules: - if rule.get('ID') == existing_rule.get('ID') and rule['Filter']['Prefix'] != existing_rule.get('Filter', {}).get('Prefix', ''): + if rule.get('ID') == existing_rule.get('ID') and rule['Filter'].get('Prefix', '') != existing_rule.get('Filter', {}).get('Prefix', ''): existing_rule.pop('ID') - elif rule_id is None and rule['Filter']['Prefix'] == existing_rule.get('Filter', {}).get('Prefix', ''): + elif rule_id is None and rule['Filter'].get('Prefix', '') == existing_rule.get('Filter', {}).get('Prefix', ''): existing_rule.pop('ID') if rule.get('ID') == existing_rule.get('ID'): changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration) @@ -407,7 +407,7 @@ def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None): lifecycle_configuration['Rules'].append(existing_rule) else: for existing_rule in current_lifecycle_rules: - if prefix == existing_rule['Filter']['Prefix']: + if prefix == existing_rule['Filter'].get('Prefix', ''): # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: From 35738acc07577bcdf665a88753cd8205c564923c Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 15 Aug 2022 20:39:10 +0200 Subject: [PATCH 568/683] Minor sanity test fixes. (#1410) Minor sanity test fixes (new devel) SUMMARY ansible-devel has added a new PEP test (missing whitespace after keyword), this adds the fixes before the devel sanity tests are 'voting'. Additionally fixes: unused variables broad catching of Exception ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/autoscaling_group_info.py plugins/modules/cloudfront_distribution.py plugins/modules/cloudfront_origin_access_identity.py plugins/modules/cloudtrail.py plugins/modules/ec2_vpc_nacl.py plugins/modules/eks_fargate_profile.py plugins/modules/redshift.py plugins/modules/s3_bucket_info.py ADDITIONAL INFORMATION cloudfront_distribution still has a lot of catch Exception but it's part of parameter validation which should be overhauled separately, unfortunately the tests are rather b0rked. Reviewed-by: Alina Buzachis --- autoscaling_group_info.py | 4 ++-- cloudfront_distribution.py | 12 ++++++------ cloudfront_origin_access_identity.py | 3 +-- cloudtrail.py | 6 ++---- ec2_vpc_nacl.py | 5 ++--- eks_fargate_profile.py | 2 +- redshift.py | 13 +++++-------- s3_bucket_info.py | 29 ++++++++++------------------ 8 files changed, 29 insertions(+), 45 deletions(-) diff --git a/autoscaling_group_info.py b/autoscaling_group_info.py index 4db9ac26a37..e8ec13a12ba 100644 --- a/autoscaling_group_info.py +++ b/autoscaling_group_info.py @@ -381,7 +381,7 @@ def find_asgs(conn, module, name=None, tags=None): try: elbv2 = module.client('elbv2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # This is nice to have, not essential elbv2 = None matched_asgs = [] @@ -409,7 +409,7 @@ def find_asgs(conn, module, name=None, tags=None): # workaround for https://github.com/ansible/ansible/pull/25015 if 'target_group_ar_ns' in asg: asg['target_group_arns'] = asg['target_group_ar_ns'] - del(asg['target_group_ar_ns']) + del asg['target_group_ar_ns'] if asg.get('target_group_arns'): if elbv2: try: diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 2b58ac1e888..a2d439c7d93 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1676,7 +1676,7 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, if purge_origins: for domain in list(all_origins.keys()): if domain not in new_domains: - del(all_origins[domain]) + del all_origins[domain] return ansible_list_to_cloudfront_list(list(all_origins.values())) except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution origins") @@ -1694,7 +1694,7 @@ def validate_s3_origin_configuration(self, client, existing_config, origin): cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment)) oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] - except Exception as e: + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) return "origin-access-identity/cloudfront/%s" % oai @@ -1717,7 +1717,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): else: s3_origin_config = None - del(origin["s3_origin_access_identity_enabled"]) + del origin["s3_origin_access_identity_enabled"] if s3_origin_config: oai = s3_origin_config @@ -1766,7 +1766,7 @@ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior if purge_cache_behaviors: for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): - del(all_cache_behaviors[target_origin_id]) + del all_cache_behaviors[target_origin_id] return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors") @@ -1954,7 +1954,7 @@ def validate_custom_error_responses(self, config, custom_error_responses, purge_ if 'response_code' in custom_error_response: custom_error_response['response_code'] = str(custom_error_response['response_code']) if custom_error_response['error_code'] in existing_responses: - del(existing_responses[custom_error_response['error_code']]) + del existing_responses[custom_error_response['error_code']] result.append(custom_error_response) if not purge_custom_error_responses: result.extend(existing_responses.values()) @@ -2261,7 +2261,7 @@ def main(): if 'distribution_config' in result: result.update(result['distribution_config']) - del(result['distribution_config']) + del result['distribution_config'] module.exit_json(changed=changed, **result) diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 9fc83f64820..dc79c9bd1b2 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -256,8 +256,7 @@ def main(): else: result = service_mgr.create_origin_access_identity(caller_reference, comment) changed = True - elif(state == 'absent' and origin_access_identity_id is not None and - e_tag is not None): + elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None: result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) changed = True diff --git a/cloudtrail.py b/cloudtrail.py index df95d5bfb7b..aa3b637cee5 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -266,7 +266,7 @@ def get_kms_key_aliases(module, client, keyId): """ try: key_resp = client.list_aliases(KeyId=keyId) - except (BotoCoreError, ClientError) as err: + except (BotoCoreError, ClientError): # Don't fail here, just return [] to maintain backwards compat # in case user doesn't have kms:ListAliases permissions return [] @@ -558,9 +558,7 @@ def main(): # all aliases for a match. initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) for a in initial_aliases: - if(a['AliasName'] == new_key or - a['AliasArn'] == new_key or - a['TargetKeyId'] == new_key): + if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key: results['changed'] = False # Check if we need to start/stop logging diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 9968e2929ff..03cdef89c39 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -235,7 +235,6 @@ def tags_changed(nacl_id, client, module): tags = module.params.get('tags') name = module.params.get('name') purge_tags = module.params.get('purge_tags') - changed = False if name is None and tags is None: return False @@ -337,14 +336,14 @@ def setup_network_acl(client, module): replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) changed = True - return(changed, nacl['NetworkAcl']['NetworkAclId']) + return changed, nacl['NetworkAcl']['NetworkAclId'] else: changed = False nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] changed |= subnets_changed(nacl, client, module) changed |= nacls_changed(nacl, client, module) changed |= tags_changed(nacl_id, client, module) - return (changed, nacl_id) + return changed, nacl_id def remove_network_acl(client, module): diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 72164a36fea..4eae0983acc 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -185,7 +185,7 @@ def validate_tags(client, module, fargate_profile): try: existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) - except(botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) if tags_to_remove: diff --git a/redshift.py b/redshift.py index ca3e1a45052..475c2101a5e 100644 --- a/redshift.py +++ b/redshift.py @@ -453,7 +453,7 @@ def create_cluster(module, redshift): changed = True resource = _describe_cluster(redshift, identifier) - return(changed, _collect_facts(resource)) + return changed, _collect_facts(resource) def describe_cluster(module, redshift): @@ -470,7 +470,7 @@ def describe_cluster(module, redshift): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Error describing cluster") - return(True, _collect_facts(resource)) + return True, _collect_facts(resource) def delete_cluster(module, redshift): @@ -499,7 +499,7 @@ def delete_cluster(module, redshift): ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)) except is_boto3_error_code('ClusterNotFound'): - return(False, {}) + return False, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") @@ -514,7 +514,7 @@ def delete_cluster(module, redshift): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") - return(True, {}) + return True, {} def modify_cluster(module, redshift): @@ -528,9 +528,6 @@ def modify_cluster(module, redshift): identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - region = region = module.params.get('region') # Package up the optional parameters params = {} @@ -594,7 +591,7 @@ def modify_cluster(module, redshift): if _ensure_tags(redshift, identifier, resource['Tags'], module): resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] - return(True, _collect_facts(resource)) + return True, _collect_facts(resource) def main(): diff --git a/s3_bucket_info.py b/s3_bucket_info.py index d164fde5d16..81e1cd7217b 100644 --- a/s3_bucket_info.py +++ b/s3_bucket_info.py @@ -444,7 +444,7 @@ def get_bucket_list(module, connection, name="", name_filter=""): final_buckets = filtered_buckets else: final_buckets = buckets - return(final_buckets) + return final_buckets def get_buckets_facts(connection, buckets, requested_facts, transform_location): @@ -457,7 +457,7 @@ def get_buckets_facts(connection, buckets, requested_facts, transform_location): bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location)) full_bucket_list.append(bucket) - return(full_bucket_list) + return full_bucket_list def get_bucket_details(connection, name, requested_facts, transform_location): @@ -490,7 +490,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location): except botocore.exceptions.ClientError: pass - return(all_facts) + return all_facts @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) @@ -508,11 +508,8 @@ def get_bucket_location(name, connection, transform_location=False): except KeyError: pass # Strip response metadata (not needed) - try: - data.pop('ResponseMetadata') - return(data) - except KeyError: - return(data) + data.pop('ResponseMetadata', None) + return data @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) @@ -524,14 +521,11 @@ def get_bucket_tagging(name, connection): try: bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet']) - return(bucket_tags) + return bucket_tags except KeyError: # Strip response metadata (not needed) - try: - data.pop('ResponseMetadata') - return(data) - except KeyError: - return(data) + data.pop('ResponseMetadata', None) + return data @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) @@ -544,11 +538,8 @@ def get_bucket_property(name, connection, get_api_name): data = api_function(Bucket=name) # Strip response metadata (not needed) - try: - data.pop('ResponseMetadata') - return(data) - except KeyError: - return(data) + data.pop('ResponseMetadata', None) + return data def main(): From 70a9eb61905d747c491315ebeec06b7095aa5823 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 17 Aug 2022 20:57:54 +0200 Subject: [PATCH 569/683] Minor linting fixups - 2022-08-12 (#1408) Minor linting fixups - 2022-08-12 SUMMARY Various linting and unit test fixups unused variables overly broad Exception catching (highlighted some broken tests) removes direct use of unittest in favour of pytest (see also ansible-collections/amazon.aws#961) cleans up skipping of tests when botocore/boto3 aren't installed passes error message from VPNConnectionException into its super to make testing easier, should never be directly exposed to the user Removes tests for 3 modules which now have integration tests, they're either recording based (fragile)or test things which are separately tested in the integration tests. lambda s3_bucket_notifications route53_zone ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/ec2_vpc_vpn.py tests/unit ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ec2_vpc_vpn.py | 1 + lambda_event.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 9034dbbe713..4d19a8327d3 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -305,6 +305,7 @@ class VPNConnectionException(Exception): def __init__(self, msg, exception=None): + super(VPNConnectionException, self).__init__(msg) self.msg = msg self.exception = exception diff --git a/lambda_event.py b/lambda_event.py index 28d1d7bdd67..cd99ceb199f 100644 --- a/lambda_event.py +++ b/lambda_event.py @@ -63,7 +63,7 @@ enabled: description: - Indicates whether AWS Lambda should begin polling or readin from the event source. - default: true. + default: true type: bool batch_size: description: From aace667fbca028b121d4952236a82ec68b5d288c Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Fri, 26 Aug 2022 12:14:21 +0200 Subject: [PATCH 570/683] adjust booleans (#1420) adjust booleans: use true/false Depends-On: #1423 SUMMARY ansible-community/community-topics#116 ISSUE TYPE Docs Pull Request Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- application_autoscaling_policy.py | 22 +++++++++--------- autoscaling_group.py | 10 ++++---- autoscaling_launch_config.py | 4 ++-- cloudformation_stack_set.py | 2 +- cloudfront_distribution.py | 4 ++-- config_aggregator.py | 2 +- ec2_ami_copy.py | 10 ++++---- ec2_snapshot_copy.py | 10 ++++---- ec2_transit_gateway.py | 18 +++++++-------- ec2_vpc_vpn.py | 6 ++--- ec2_win_password.py | 2 +- ecs_ecr.py | 6 ++--- ecs_task.py | 2 +- eks_cluster.py | 2 +- eks_fargate_profile.py | 4 ++-- elasticache_parameter_group.py | 2 +- elb_application_lb.py | 10 ++++---- elb_instance.py | 4 ++-- elb_target_group.py | 2 +- elb_target_group_info.py | 4 ++-- iam_policy_info.py | 6 ++--- kinesis_stream.py | 12 +++++----- lambda_alias.py | 2 +- lambda_execute.py | 6 ++--- opensearch.py | 2 +- rds_cluster_snapshot.py | 2 +- rds_instance.py | 8 +++---- rds_instance_snapshot.py | 2 +- route53.py | 4 ++-- s3_lifecycle.py | 4 ++-- s3_sync.py | 2 +- ses_rule_set.py | 6 ++--- sqs_queue.py | 2 +- waf_condition.py | 2 +- waf_info.py | 2 +- waf_rule.py | 8 +++---- waf_web_acl.py | 6 ++--- wafv2_ip_set.py | 2 +- wafv2_rule_group.py | 18 +++++++-------- wafv2_web_acl.py | 38 +++++++++++++++---------------- 40 files changed, 130 insertions(+), 130 deletions(-) diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py index 5d11fe47c0d..6a636e8a7cd 100644 --- a/application_autoscaling_policy.py +++ b/application_autoscaling_policy.py @@ -24,25 +24,25 @@ options: state: description: Whether a policy should be C(present) or C(absent). - required: yes + required: true choices: ['absent', 'present'] type: str policy_name: description: The name of the scaling policy. - required: yes + required: true type: str service_namespace: description: The namespace of the AWS service. - required: yes + required: true choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb'] type: str resource_id: description: The identifier of the resource associated with the scalable target. - required: yes + required: true type: str scalable_dimension: description: The scalable dimension associated with the scalable target. - required: yes + required: true choices: [ 'ecs:service:DesiredCount', 'ec2:spot-fleet-request:TargetCapacity', 'elasticmapreduce:instancegroup:InstanceCount', @@ -54,19 +54,19 @@ type: str policy_type: description: The policy type. - required: yes + required: true choices: ['StepScaling', 'TargetTrackingScaling'] type: str step_scaling_policy_configuration: description: A step scaling policy. This parameter is required if you are creating a policy and I(policy_type=StepScaling). - required: no + required: false type: dict target_tracking_scaling_policy_configuration: description: - A target tracking policy. This parameter is required if you are creating a new policy and I(policy_type=TargetTrackingScaling). - 'Full documentation of the suboptions can be found in the API documentation:' - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)' - required: no + required: false type: dict suboptions: CustomizedMetricSpecification: @@ -90,18 +90,18 @@ minimum_tasks: description: The minimum value to scale to in response to a scale in event. This parameter is required if you are creating a first new policy for the specified service. - required: no + required: false type: int maximum_tasks: description: The maximum value to scale to in response to a scale out event. This parameter is required if you are creating a first new policy for the specified service. - required: no + required: false type: int override_task_capacity: description: - Whether or not to override values of minimum and/or maximum tasks if it's already set. - Defaults to C(false). - required: no + required: false type: bool extends_documentation_fragment: - amazon.aws.aws diff --git a/autoscaling_group.py b/autoscaling_group.py index 753f2a08727..84db04bce9c 100644 --- a/autoscaling_group.py +++ b/autoscaling_group.py @@ -339,7 +339,7 @@ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production - propagate_at_launch: no + propagate_at_launch: false # Rolling ASG Updates @@ -361,14 +361,14 @@ region: us-east-1 security_groups: sg-23423 instance_type: m1.small - assign_public_ip: yes + assign_public_ip: true - community.aws.autoscaling_group: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB - replace_all_instances: yes + replace_all_instances: true min_size: 5 max_size: 5 desired_capacity: 5 @@ -406,7 +406,7 @@ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production - propagate_at_launch: no + propagate_at_launch: false # Basic Configuration with Launch Template using mixed instance policy @@ -432,7 +432,7 @@ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production - propagate_at_launch: no + propagate_at_launch: false ''' RETURN = r''' diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index f1a014a563e..d353afe3b9f 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -240,13 +240,13 @@ snapshot: snap-XXXX volume_type: gp2 delete_on_termination: true - encrypted: no + encrypted: false - name: Create launch configuration community.aws.autoscaling_launch_config: name: lc1 image_id: ami-xxxx - assign_public_ip: yes + assign_public_ip: true instance_type: t2.medium key_name: my-key security_groups: "['sg-xxxx']" diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 750dceb2bf7..c3f631b1a91 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -337,7 +337,7 @@ def update_stack_set(module, stack_params, cfn): except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg="Another operation is already in progress on this stack set - please try again later. When making " - "multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors.") + "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.") except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") if module.params.get('wait'): diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index a2d439c7d93..48ff7247e1c 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -75,7 +75,7 @@ purge_aliases: description: - Specifies whether existing aliases will be removed before adding new aliases. - - When I(purge_aliases=yes), existing aliases are removed and I(aliases) are added. + - When I(purge_aliases=true), existing aliases are removed and I(aliases) are added. default: false type: bool @@ -624,7 +624,7 @@ - tested.com tags: Project: distribution 1.2 - purge_tags: yes + purge_tags: true - name: create a distribution with an origin, logging and default cache behavior community.aws.cloudfront_distribution: diff --git a/config_aggregator.py b/config_aggregator.py index 7a9bf4836f7..e4c23b9b5fa 100644 --- a/config_aggregator.py +++ b/config_aggregator.py @@ -85,7 +85,7 @@ - 1234567890 - 0123456789 - 9012345678 - all_aws_regions: yes + all_aws_regions: true ''' RETURN = r'''#''' diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index ecb723dfea6..46be5ec2024 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -47,7 +47,7 @@ description: - Wait for the copied AMI to be in state C(available) before returning. type: bool - default: 'no' + default: false wait_timeout: description: - How long before wait gives up, in seconds. @@ -87,7 +87,7 @@ source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx - wait: yes + wait: true wait_timeout: 1200 # Default timeout is 600 register: image_id @@ -107,21 +107,21 @@ tags: Name: My-Super-AMI Patch: 1.2.3 - tag_equality: yes + tag_equality: true - name: Encrypted AMI copy community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx - encrypted: yes + encrypted: true - name: Encrypted AMI copy with specified key community.aws.ec2_ami_copy: source_region: us-east-1 region: eu-west-1 source_image_id: ami-xxxxxxx - encrypted: yes + encrypted: true kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b ''' diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 2d0d40546e7..62612ad0a0a 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -33,7 +33,7 @@ description: - Whether or not the destination Snapshot should be encrypted. type: bool - default: 'no' + default: false kms_key_id: description: - KMS key id used to encrypt snapshot. If not specified, AWS defaults to C(alias/aws/ebs). @@ -42,7 +42,7 @@ description: - Wait for the copied Snapshot to be in the C(Available) state before returning. type: bool - default: 'no' + default: false wait_timeout: description: - How long before wait gives up, in seconds. @@ -72,7 +72,7 @@ source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx - wait: yes + wait: true wait_timeout: 1200 # Default timeout is 600 register: snapshot_id @@ -89,14 +89,14 @@ source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx - encrypted: yes + encrypted: true - name: Encrypted Snapshot copy with specified key community.aws.ec2_snapshot_copy: source_region: eu-central-1 region: eu-west-1 source_snapshot_id: snap-xxxxxxx - encrypted: yes + encrypted: true kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b ''' diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index be1082768fa..73822ebd87d 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -90,8 +90,8 @@ - name: Create a new transit gateway with options community.aws.ec2_transit_gateway: asn: 64514 - auto_associate: no - auto_propagate: no + auto_associate: false + auto_propagate: false dns_support: True description: "nonprod transit gateway" purge_tags: False @@ -470,17 +470,17 @@ def setup_module_object(): argument_spec = dict( asn=dict(type='int'), - auto_associate=dict(type='bool', default='yes'), - auto_attach=dict(type='bool', default='no'), - auto_propagate=dict(type='bool', default='yes'), + auto_associate=dict(type='bool', default=True), + auto_attach=dict(type='bool', default=False), + auto_propagate=dict(type='bool', default=True), description=dict(type='str'), - dns_support=dict(type='bool', default='yes'), - purge_tags=dict(type='bool', default='yes'), + dns_support=dict(type='bool', default=True), + purge_tags=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='dict', aliases=['resource_tags']), transit_gateway_id=dict(type='str'), - vpn_ecmp_support=dict(type='bool', default='yes'), - wait=dict(type='bool', default='yes'), + vpn_ecmp_support=dict(type='bool', default=True), + wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300) ) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 4d19a8327d3..039796701f1 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -26,7 +26,7 @@ - The desired state of the VPN connection. choices: ['present', 'absent'] default: present - required: no + required: false type: str customer_gateway_id: description: @@ -51,13 +51,13 @@ - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP. default: False type: bool - required: no + required: false tunnel_options: description: - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr) and/or I(PreSharedKey) keys with appropriate string values. AWS defaults will apply in absence of either of the aforementioned keys. - required: no + required: false type: list elements: dict suboptions: diff --git a/ec2_win_password.py b/ec2_win_password.py index 7f977360e80..2889f334aa8 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -89,7 +89,7 @@ instance_id: i-XXXXXX region: us-east-1 key_file: "~/aws-creds/my_test_key.pem" - wait: yes + wait: true wait_timeout: 45 ''' diff --git a/ecs_ecr.py b/ecs_ecr.py index aa08e97d239..a7194659974 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -49,7 +49,7 @@ type: bool purge_policy: description: - - If yes, remove the policy from the repository. + - If C(true), remove the policy from the repository. - Defaults to C(false). required: false type: bool @@ -133,7 +133,7 @@ - name: delete-policy community.aws.ecs_ecr: name: needs-no-policy - purge_policy: yes + purge_policy: true - name: create immutable ecr-repo community.aws.ecs_ecr: @@ -143,7 +143,7 @@ - name: set-lifecycle-policy community.aws.ecs_ecr: name: needs-lifecycle-policy - scan_on_push: yes + scan_on_push: true lifecycle_policy: rules: - rulePriority: 1 diff --git a/ecs_task.py b/ecs_task.py index 3db08a5b2af..9da2dcbf45e 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -165,7 +165,7 @@ started_by: ansible_user launch_type: FARGATE network_configuration: - assign_public_ip: yes + assign_public_ip: true subnets: - subnet-abcd1234 register: task_output diff --git a/eks_cluster.py b/eks_cluster.py index 0794efef16d..abdaee4ff95 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -80,7 +80,7 @@ - name: Remove an EKS cluster community.aws.eks_cluster: name: my_cluster - wait: yes + wait: true state: absent ''' diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 4eae0983acc..313f8ad33aa 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -88,13 +88,13 @@ labels: - label1: test state: present - wait: yes + wait: true - name: Remove an EKS Fargate Profile community.aws.eks_fargate_profile: name: test_fargate cluster_name: test_cluster - wait: yes + wait: true state: absent ''' diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 4cb553931f0..588dcf12214 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -29,7 +29,7 @@ name: description: - A user-specified name for the cache parameter group. - required: yes + required: true type: str description: description: diff --git a/elb_application_lb.py b/elb_application_lb.py index 2a2dd771528..37f771355d1 100644 --- a/elb_application_lb.py +++ b/elb_application_lb.py @@ -157,9 +157,9 @@ type: str purge_listeners: description: - - If C(yes), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter. + - If C(true), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter. - If the I(listeners) parameter is not set then listeners will not be modified. - default: yes + default: true type: bool subnets: description: @@ -191,7 +191,7 @@ description: - Wait for the load balancer to have a state of 'active' before completing. A status check is performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. - default: no + default: false type: bool wait_timeout: description: @@ -200,7 +200,7 @@ purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. - default: yes + default: true type: bool ip_address_type: description: @@ -249,7 +249,7 @@ # Create an ALB and attach a listener with logging enabled - community.aws.elb_application_lb: - access_logs_enabled: yes + access_logs_enabled: true access_logs_s3_bucket: mybucket access_logs_s3_prefix: "logs" name: myalb diff --git a/elb_instance.py b/elb_instance.py index b0dafecb9ee..dc79cd6ca9e 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -40,12 +40,12 @@ been enabled. - If I(enable_availability_zone=no), the task will fail if the availability zone is not enabled on the ELB. type: bool - default: 'yes' + default: true wait: description: - Wait for instance registration or deregistration to complete successfully before returning. type: bool - default: 'yes' + default: true wait_timeout: description: - Number of seconds to wait for an instance to change state. diff --git a/elb_target_group.py b/elb_target_group.py index b8110ea0876..cd750be188b 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -69,7 +69,7 @@ description: - Whether or not to alter existing targets in the group to match what is passed with the module required: false - default: yes + default: true type: bool name: description: diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 88d3491077b..5fd8a9b6c39 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -34,9 +34,9 @@ elements: str collect_targets_health: description: - - When set to "yes", output contains targets health description + - When set to C(True), output contains targets health description required: false - default: no + default: false type: bool extends_documentation_fragment: diff --git a/iam_policy_info.py b/iam_policy_info.py index 19c5a01885b..b408f01b450 100644 --- a/iam_policy_info.py +++ b/iam_policy_info.py @@ -17,18 +17,18 @@ iam_type: description: - Type of IAM resource you wish to retrieve inline policies for. - required: yes + required: true choices: [ "user", "group", "role"] type: str iam_name: description: - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name. - required: yes + required: true type: str policy_name: description: - Name of a specific IAM inline policy you with to retrieve. - required: no + required: false type: str author: diff --git a/kinesis_stream.py b/kinesis_stream.py index f3ff171b421..530bc0b7d3b 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -86,7 +86,7 @@ community.aws.kinesis_stream: name: test-stream shards: 10 - wait: yes + wait: true wait_timeout: 600 register: test_stream @@ -97,7 +97,7 @@ shards: 10 tags: Env: development - wait: yes + wait: true wait_timeout: 600 register: test_stream @@ -109,7 +109,7 @@ shards: 10 tags: Env: development - wait: yes + wait: true wait_timeout: 600 register: test_stream @@ -118,7 +118,7 @@ community.aws.kinesis_stream: name: test-stream state: absent - wait: yes + wait: true wait_timeout: 600 register: test_stream @@ -131,7 +131,7 @@ encryption_state: enabled encryption_type: KMS key_id: alias/aws/kinesis - wait: yes + wait: true wait_timeout: 600 register: test_stream @@ -144,7 +144,7 @@ encryption_state: disabled encryption_type: KMS key_id: alias/aws/kinesis - wait: yes + wait: true wait_timeout: 600 register: test_stream ''' diff --git a/lambda_alias.py b/lambda_alias.py index adc89f4cb99..f1722ab5ab5 100644 --- a/lambda_alias.py +++ b/lambda_alias.py @@ -56,7 +56,7 @@ --- # Simple example to create a lambda function and publish a version - hosts: localhost - gather_facts: no + gather_facts: false vars: state: present project_folder: /path/to/deployment/package diff --git a/lambda_execute.py b/lambda_execute.py index 880ad4cb036..1d652466d6b 100644 --- a/lambda_execute.py +++ b/lambda_execute.py @@ -38,9 +38,9 @@ type: str tail_log: description: - - If I(tail_log=yes), the result of the task will include the last 4 KB + - If I(tail_log=true), the result of the task will include the last 4 KB of the CloudWatch log for the function execution. Log tailing only - works if you use synchronous invocation I(wait=yes). This is usually + works if you use synchronous invocation I(wait=true). This is usually used for development or testing Lambdas. type: bool default: false @@ -48,7 +48,7 @@ description: - Whether to wait for the function results or not. If I(wait=no) the task will not return any results. To wait for the Lambda function - to complete, set I(wait=yes) and the result will be available in the + to complete, set I(wait=true) and the result will be available in the I(output) key. type: bool default: true diff --git a/opensearch.py b/opensearch.py index 0035352b2a5..1cd9dd51e67 100644 --- a/opensearch.py +++ b/opensearch.py @@ -381,7 +381,7 @@ description: - Whether or not to wait for completion of OpenSearch creation, modification or deletion. type: bool - default: 'no' + default: false wait_timeout: description: - how long before wait gives up, in seconds. diff --git a/rds_cluster_snapshot.py b/rds_cluster_snapshot.py index 09077c9638b..2386f5589d7 100644 --- a/rds_cluster_snapshot.py +++ b/rds_cluster_snapshot.py @@ -94,7 +94,7 @@ region: us-east-1 source_id: "{{ snapshot.db_snapshot_arn }}" source_region: us-east-2 - copy_tags: yes + copy_tags: true ''' RETURN = r''' diff --git a/rds_instance.py b/rds_instance.py index f5e3aca4bbc..5996ec2b2cf 100644 --- a/rds_instance.py +++ b/rds_instance.py @@ -221,12 +221,12 @@ description: - The name of the feature associated with the IAM role. type: str - required: yes + required: true role_arn: description: - The ARN of the IAM role to associate with the DB instance. type: str - required: yes + required: true version_added: 3.3.0 iops: description: @@ -519,7 +519,7 @@ community.aws.rds_instance: id: "my-instance-id" state: present - purge_iam_roles: yes + purge_iam_roles: true # Restore DB instance from snapshot - name: Create a snapshot and wait until completion @@ -527,7 +527,7 @@ instance_id: 'my-instance-id' snapshot_id: 'my-new-snapshot' state: present - wait: yes + wait: true register: snapshot - name: Restore DB from snapshot diff --git a/rds_instance_snapshot.py b/rds_instance_snapshot.py index fc32ef75e4c..e9430fa1af4 100644 --- a/rds_instance_snapshot.py +++ b/rds_instance_snapshot.py @@ -92,7 +92,7 @@ region: us-east-1 source_id: "{{ snapshot.db_snapshot_arn }}" source_region: us-east-2 - copy_tags: yes + copy_tags: true - name: Delete snapshot community.aws.rds_instance_snapshot: diff --git a/route53.py b/route53.py index db97197ec6b..620d1833b98 100644 --- a/route53.py +++ b/route53.py @@ -256,7 +256,7 @@ type: A ttl: 7200 value: 1.1.1.1,2.2.2.2,3.3.3.3 - wait: yes + wait: true - name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated community.aws.route53: state: present @@ -268,7 +268,7 @@ - 1.1.1.1 - 2.2.2.2 - 3.3.3.3 - wait: yes + wait: true - name: Retrieve the details for new.foo.com community.aws.route53: state: get diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 9a2ea51e92f..0e74feec7c1 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -138,7 +138,7 @@ - Wait for the configuration to complete before returning. version_added: 1.5.0 type: bool - default: no + default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -577,7 +577,7 @@ def main(): transition_days=dict(type='int'), transition_date=dict(), transitions=dict(type='list', elements='dict'), - purge_transitions=dict(default='yes', type='bool'), + purge_transitions=dict(default=True, type='bool'), wait=dict(type='bool', default=False) ) diff --git a/s3_sync.py b/s3_sync.py index 602df0aec36..0a1797c1133 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -122,7 +122,7 @@ description: - Remove remote files that exist in bucket but are not present in the file root. required: false - default: no + default: false type: bool author: Ted Timmons (@tedder) diff --git a/ses_rule_set.py b/ses_rule_set.py index a16a0b2b047..cf516048356 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -58,7 +58,7 @@ community.aws.ses_rule_set: name: default-rule-set state: present - active: yes + active: true - name: Create some arbitrary rule set but do not activate it community.aws.ses_rule_set: @@ -69,7 +69,7 @@ community.aws.ses_rule_set: name: default-rule-set state: present - active: no + active: false - name: Remove an arbitrary inactive rule set community.aws.ses_rule_set: @@ -80,7 +80,7 @@ community.aws.ses_rule_set: name: default-rule-set state: absent - force: yes + force: true """ RETURN = """ diff --git a/sqs_queue.py b/sqs_queue.py index e83735254f4..d4b159bbab9 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -187,7 +187,7 @@ name: fifo-queue region: ap-southeast-2 queue_type: fifo - content_based_deduplication: yes + content_based_deduplication: true - name: Tag queue community.aws.sqs_queue: diff --git a/waf_condition.py b/waf_condition.py index b948ec9a81f..e44e889a8fc 100644 --- a/waf_condition.py +++ b/waf_condition.py @@ -127,7 +127,7 @@ waf_regional: description: Whether to use C(waf-regional) module. default: false - required: no + required: false type: bool state: description: Whether the condition should be C(present) or C(absent). diff --git a/waf_info.py b/waf_info.py index 81538e62923..e91a6d62672 100644 --- a/waf_info.py +++ b/waf_info.py @@ -22,7 +22,7 @@ waf_regional: description: Whether to use the C(waf-regional) module. default: false - required: no + required: false type: bool author: diff --git a/waf_rule.py b/waf_rule.py index 188c6de9df6..201529f25d1 100644 --- a/waf_rule.py +++ b/waf_rule.py @@ -27,7 +27,7 @@ options: name: description: Name of the Web Application Firewall rule. - required: yes + required: true type: str metric_name: description: @@ -79,13 +79,13 @@ conditions: - name: my_regex_condition type: regex - negated: no + negated: false - name: my_geo_condition type: geo - negated: no + negated: false - name: my_byte_condition type: byte - negated: yes + negated: true - name: remove WAF rule community.aws.waf_rule: diff --git a/waf_web_acl.py b/waf_web_acl.py index c4958a7c41f..d814736ad32 100644 --- a/waf_web_acl.py +++ b/waf_web_acl.py @@ -26,7 +26,7 @@ options: name: description: Name of the Web Application Firewall ACL to manage. - required: yes + required: true type: str default_action: description: The action that you want AWS WAF to take when a request doesn't @@ -82,7 +82,7 @@ waf_regional: description: Whether to use C(waf-regional) module. default: false - required: no + required: false type: bool ''' @@ -95,7 +95,7 @@ priority: 1 action: block default_action: block - purge_rules: yes + purge_rules: true state: present - name: delete the web acl diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index b4b3e4f8609..33fb7c32f68 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -56,7 +56,7 @@ purge_addresses: description: - When set to C(no), keep the existing addresses in place. Will modify and add, but will not delete. - default: yes + default: true type: bool notes: diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 5a6cafdf1dd..44dc9ba88b5 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -63,7 +63,7 @@ purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. - default: yes + default: true type: bool extends_documentation_fragment: @@ -87,13 +87,13 @@ action: allow: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: fsd statement: ip_set_reference_statement: arn: "{{ IPSET.arn }}" - cloudwatch_metrics: yes + cloudwatch_metrics: true tags: A: B C: D @@ -112,8 +112,8 @@ action: allow: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: fsd statement: ip_set_reference_statement: @@ -123,8 +123,8 @@ action: block: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: ddos statement: or_statement: @@ -144,7 +144,7 @@ text_transformations: - type: NONE priority: 0 - cloudwatch_metrics: yes + cloudwatch_metrics: true tags: A: B C: D diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index c51a04e49e7..df4a01b5034 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -95,7 +95,7 @@ purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. - default: yes + default: true type: bool notes: @@ -115,8 +115,8 @@ description: hallo eins scope: REGIONAL default_action: Allow - sampled_requests: no - cloudwatch_metrics: yes + sampled_requests: false + cloudwatch_metrics: true metric_name: test05-acl-metric rules: - name: zwei @@ -124,8 +124,8 @@ action: block: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: ddos statement: xss_match_statement: @@ -139,8 +139,8 @@ override_action: none: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: fsd statement: managed_rule_group_statement: @@ -154,8 +154,8 @@ override_action: none: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: bad_input_protect statement: managed_rule_group_statement: @@ -171,8 +171,8 @@ action: block: {} visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: mydomain-ratelimit statement: rate_based_statement: @@ -199,7 +199,7 @@ text_transformations: - type: LOWERCASE priority: 0 - purge_rules: yes + purge_rules: true tags: A: B C: D @@ -211,8 +211,8 @@ description: ACL that filters web traffic based on rate limits and whitelists some IPs scope: REGIONAL default_action: Allow - sampled_requests: yes - cloudwatch_metrics: yes + sampled_requests: true + cloudwatch_metrics: true metric_name: ip-filtering-traffic rules: - name: whitelist-own-IPs @@ -223,8 +223,8 @@ ip_set_reference_statement: arn: 'arn:aws:wafv2:us-east-1:520789123123:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123' visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: waf-acl-rule-whitelist-own-IPs - name: rate-limit-per-IP priority: 1 @@ -238,10 +238,10 @@ limit: 5000 aggregate_key_type: IP visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes + sampled_requests_enabled: true + cloud_watch_metrics_enabled: true metric_name: waf-acl-rule-rate-limit-per-IP - purge_rules: yes + purge_rules: true custom_response_bodies: too_many_requests: content_type: APPLICATION_JSON From b201ae45cc2e8b3580dba14ede9224ee4739f82d Mon Sep 17 00:00:00 2001 From: Igshaan Mesias Date: Mon, 5 Sep 2022 10:52:10 +0200 Subject: [PATCH 571/683] Fix KeyError when Cluster Parameter Group is specified in rds_cluster.py (#1417) Fix KeyError when Cluster Parameter Group is specified in rds_cluster.py SUMMARY Fix KeyError when comparing state. Fixes: #1409 ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_cluster.py Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- rds_cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rds_cluster.py b/rds_cluster.py index 68e0ef17fc9..e774e7a66cd 100644 --- a/rds_cluster.py +++ b/rds_cluster.py @@ -833,6 +833,11 @@ def changing_cluster_options(modify_params, current_cluster): if desired_vpc_sgs: changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs + desired_db_cluster_parameter_group = modify_params.pop("DBClusterParameterGroupName", None) + if desired_db_cluster_parameter_group: + if desired_db_cluster_parameter_group != current_cluster["DBClusterParameterGroup"]: + changing_params["DBClusterParameterGroupName"] = desired_db_cluster_parameter_group + for param in modify_params: if modify_params[param] != current_cluster[param]: changing_params[param] = modify_params[param] From 41e7a00e406e3831848e5d439e3084f66b56f292 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Fri, 9 Sep 2022 17:29:59 -0400 Subject: [PATCH 572/683] Remove cloudtrail module (#1438) Remove cloudtrail module Depends-On: ansible-collections/amazon.aws#997 Since ansible-collections/amazon.aws#997, cloudtrail is now part of the amazon.aws collection. --- cloudtrail.py | 639 -------------------------------------------------- 1 file changed, 639 deletions(-) delete mode 100644 cloudtrail.py diff --git a/cloudtrail.py b/cloudtrail.py deleted file mode 100644 index aa3b637cee5..00000000000 --- a/cloudtrail.py +++ /dev/null @@ -1,639 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cloudtrail -version_added: 1.0.0 -short_description: manage CloudTrail create, delete, update -description: - - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled. -author: - - Ansible Core Team - - Ted Timmons (@tedder) - - Daniel Shepherd (@shepdelacreme) -options: - state: - description: - - Add or remove CloudTrail configuration. - - 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).' - - I(state=enabled) is equivalet to I(state=present). - - I(state=disabled) is equivalet to I(state=absent). - type: str - choices: ['present', 'absent', 'enabled', 'disabled'] - default: present - name: - description: - - Name for the CloudTrail. - - Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account. - type: str - default: default - enable_logging: - description: - - Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files. - default: true - type: bool - s3_bucket_name: - description: - - An existing S3 bucket where CloudTrail will deliver log files. - - This bucket should exist and have the proper policy. - - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html). - - Required when I(state=present). - type: str - s3_key_prefix: - description: - - S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed. - type: str - is_multi_region_trail: - description: - - Specify whether the trail belongs only to one region or exists in all regions. - default: false - type: bool - enable_log_file_validation: - description: - - Specifies whether log file integrity validation is enabled. - - CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered. - type: bool - aliases: [ "log_file_validation_enabled" ] - include_global_events: - description: - - Record API calls from global services such as IAM and STS. - default: true - type: bool - aliases: [ "include_global_service_events" ] - sns_topic_name: - description: - - SNS Topic name to send notifications to when a log file is delivered. - type: str - cloudwatch_logs_role_arn: - description: - - Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group. - - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html). - - Required when C(cloudwatch_logs_log_group_arn). - type: str - cloudwatch_logs_log_group_arn: - description: - - A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist. - - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html). - - Required when C(cloudwatch_logs_role_arn). - type: str - kms_key_id: - description: - - Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption. - - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. - - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html). - type: str -notes: - - The I(purge_tags) option was added in release 4.0.0 - -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -''' - -EXAMPLES = ''' -- name: create single region cloudtrail - community.aws.cloudtrail: - state: present - name: default - s3_bucket_name: mylogbucket - s3_key_prefix: cloudtrail - region: us-east-1 - -- name: create multi-region trail with validation and tags - community.aws.cloudtrail: - state: present - name: default - s3_bucket_name: mylogbucket - region: us-east-1 - is_multi_region_trail: true - enable_log_file_validation: true - cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role" - cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*" - kms_key_id: "alias/MyAliasName" - tags: - environment: dev - Name: default - -- name: show another valid kms_key_id - community.aws.cloudtrail: - state: present - name: default - s3_bucket_name: mylogbucket - kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" - # simply "12345678-1234-1234-1234-123456789012" would be valid too. - -- name: pause logging the trail we just created - community.aws.cloudtrail: - state: present - name: default - enable_logging: false - s3_bucket_name: mylogbucket - region: us-east-1 - is_multi_region_trail: true - enable_log_file_validation: true - tags: - environment: dev - Name: default - -- name: delete a trail - community.aws.cloudtrail: - state: absent - name: default -''' - -RETURN = ''' -exists: - description: whether the resource exists - returned: always - type: bool - sample: true -trail: - description: CloudTrail resource details - returned: always - type: complex - sample: hash/dictionary of values - contains: - trail_arn: - description: Full ARN of the CloudTrail resource - returned: success - type: str - sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default - name: - description: Name of the CloudTrail resource - returned: success - type: str - sample: default - is_logging: - description: Whether logging is turned on or paused for the Trail - returned: success - type: bool - sample: True - s3_bucket_name: - description: S3 bucket name where log files are delivered - returned: success - type: str - sample: myBucket - s3_key_prefix: - description: Key prefix in bucket where log files are delivered (if any) - returned: success when present - type: str - sample: myKeyPrefix - log_file_validation_enabled: - description: Whether log file validation is enabled on the trail - returned: success - type: bool - sample: true - include_global_service_events: - description: Whether global services (IAM, STS) are logged with this trail - returned: success - type: bool - sample: true - is_multi_region_trail: - description: Whether the trail applies to all regions or just one - returned: success - type: bool - sample: true - has_custom_event_selectors: - description: Whether any custom event selectors are used for this trail. - returned: success - type: bool - sample: False - home_region: - description: The home region where the trail was originally created and must be edited. - returned: success - type: str - sample: us-east-1 - sns_topic_name: - description: The SNS topic name where log delivery notifications are sent. - returned: success when present - type: str - sample: myTopic - sns_topic_arn: - description: Full ARN of the SNS topic where log delivery notifications are sent. - returned: success when present - type: str - sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic - cloud_watch_logs_log_group_arn: - description: Full ARN of the CloudWatch Logs log group where events are delivered. - returned: success when present - type: str - sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:* - cloud_watch_logs_role_arn: - description: Full ARN of the IAM role that CloudTrail assumes to deliver events. - returned: success when present - type: str - sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role - kms_key_id: - description: Full ARN of the KMS Key used to encrypt log files. - returned: success when present - type: str - sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012 - tags: - description: hash/dictionary of tags applied to this resource - returned: success - type: dict - sample: {'environment': 'dev', 'Name': 'default'} -''' - -try: - from botocore.exceptions import ClientError, BotoCoreError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - - -def get_kms_key_aliases(module, client, keyId): - """ - get list of key aliases - - module : AnsibleAWSModule object - client : boto3 client connection object for kms - keyId : keyId to get aliases for - """ - try: - key_resp = client.list_aliases(KeyId=keyId) - except (BotoCoreError, ClientError): - # Don't fail here, just return [] to maintain backwards compat - # in case user doesn't have kms:ListAliases permissions - return [] - - return key_resp['Aliases'] - - -def create_trail(module, client, ct_params): - """ - Creates a CloudTrail - - module : AnsibleAWSModule object - client : boto3 client connection object - ct_params : The parameters for the Trail to create - """ - resp = {} - try: - resp = client.create_trail(**ct_params) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to create Trail") - - return resp - - -def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True): - """ - Creates, updates, removes tags on a CloudTrail resource - - module : AnsibleAWSModule object - client : boto3 client connection object - tags : Dict of tags converted from ansible_dict to boto3 list of dicts - trail_arn : The ARN of the CloudTrail to operate on - curr_tags : Dict of the current tags on resource, if any - dry_run : true/false to determine if changes will be made if needed - """ - - if tags is None: - return False - - curr_tags = curr_tags or {} - - tags_to_add, tags_to_remove = compare_aws_tags(curr_tags, tags, purge_tags=purge_tags) - if not tags_to_add and not tags_to_remove: - return False - - if module.check_mode: - return True - - if tags_to_remove: - remove = {k: curr_tags[k] for k in tags_to_remove} - tags_to_remove = ansible_dict_to_boto3_tag_list(remove) - try: - client.remove_tags(ResourceId=trail_arn, TagsList=tags_to_remove) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to remove tags from Trail") - - if tags_to_add: - tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_add) - try: - client.add_tags(ResourceId=trail_arn, TagsList=tags_to_add) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to add tags to Trail") - - return True - - -def get_tag_list(keys, tags): - """ - Returns a list of dicts with tags to act on - keys : set of keys to get the values for - tags : the dict of tags to turn into a list - """ - tag_list = [] - for k in keys: - tag_list.append({'Key': k, 'Value': tags[k]}) - - return tag_list - - -def set_logging(module, client, name, action): - """ - Starts or stops logging based on given state - - module : AnsibleAWSModule object - client : boto3 client connection object - name : The name or ARN of the CloudTrail to operate on - action : start or stop - """ - if action == 'start': - try: - client.start_logging(Name=name) - return client.get_trail_status(Name=name) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to start logging") - elif action == 'stop': - try: - client.stop_logging(Name=name) - return client.get_trail_status(Name=name) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to stop logging") - else: - module.fail_json(msg="Unsupported logging action") - - -def get_trail_facts(module, client, name): - """ - Describes existing trail in an account - - module : AnsibleAWSModule object - client : boto3 client connection object - name : Name of the trail - """ - # get Trail info - try: - trail_resp = client.describe_trails(trailNameList=[name]) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to describe Trail") - - # Now check to see if our trail exists and get status and tags - if len(trail_resp['trailList']): - trail = trail_resp['trailList'][0] - try: - status_resp = client.get_trail_status(Name=trail['Name']) - tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']]) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to describe Trail") - - trail['IsLogging'] = status_resp['IsLogging'] - trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList']) - # Check for non-existent values and populate with None - optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId']) - for v in optional_vals - set(trail.keys()): - trail[v] = None - return trail - - else: - # trail doesn't exist return None - return None - - -def delete_trail(module, client, trail_arn): - """ - Delete a CloudTrail - - module : AnsibleAWSModule object - client : boto3 client connection object - trail_arn : Full CloudTrail ARN - """ - try: - client.delete_trail(Name=trail_arn) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to delete Trail") - - -def update_trail(module, client, ct_params): - """ - Delete a CloudTrail - - module : AnsibleAWSModule object - client : boto3 client connection object - ct_params : The parameters for the Trail to update - """ - try: - client.update_trail(**ct_params) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to update Trail") - - -def main(): - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), - name=dict(default='default'), - enable_logging=dict(default=True, type='bool'), - s3_bucket_name=dict(), - s3_key_prefix=dict(no_log=False), - sns_topic_name=dict(), - is_multi_region_trail=dict(default=False, type='bool'), - enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), - include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), - cloudwatch_logs_role_arn=dict(), - cloudwatch_logs_log_group_arn=dict(), - kms_key_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool') - ) - - required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] - required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) - - # collect parameters - if module.params['state'] in ('present', 'enabled'): - state = 'present' - elif module.params['state'] in ('absent', 'disabled'): - state = 'absent' - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - enable_logging = module.params['enable_logging'] - ct_params = dict( - Name=module.params['name'], - S3BucketName=module.params['s3_bucket_name'], - IncludeGlobalServiceEvents=module.params['include_global_events'], - IsMultiRegionTrail=module.params['is_multi_region_trail'], - ) - - if module.params['s3_key_prefix']: - ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') - - if module.params['sns_topic_name']: - ct_params['SnsTopicName'] = module.params['sns_topic_name'] - - if module.params['cloudwatch_logs_role_arn']: - ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn'] - - if module.params['cloudwatch_logs_log_group_arn']: - ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn'] - - if module.params['enable_log_file_validation'] is not None: - ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation'] - - if module.params['kms_key_id']: - ct_params['KmsKeyId'] = module.params['kms_key_id'] - - client = module.client('cloudtrail') - region = module.region - - results = dict( - changed=False, - exists=False - ) - - # Get existing trail facts - trail = get_trail_facts(module, client, ct_params['Name']) - - # If the trail exists set the result exists variable - if trail is not None: - results['exists'] = True - initial_kms_key_id = trail.get('KmsKeyId') - - if state == 'absent' and results['exists']: - # If Trail exists go ahead and delete - results['changed'] = True - results['exists'] = False - results['trail'] = dict() - if not module.check_mode: - delete_trail(module, client, trail['TrailARN']) - - elif state == 'present' and results['exists']: - # If Trail exists see if we need to update it - do_update = False - for key in ct_params: - tkey = str(key) - # boto3 has inconsistent parameter naming so we handle it here - if key == 'EnableLogFileValidation': - tkey = 'LogFileValidationEnabled' - # We need to make an empty string equal None - if ct_params.get(key) == '': - val = None - else: - val = ct_params.get(key) - if val != trail.get(tkey): - do_update = True - if tkey != 'KmsKeyId': - # We'll check if the KmsKeyId casues changes later since - # user could've provided a key alias, alias arn, or key id - # and trail['KmsKeyId'] is always a key arn - results['changed'] = True - # If we are in check mode copy the changed values to the trail facts in result output to show what would change. - if module.check_mode: - trail.update({tkey: ct_params.get(key)}) - - if not module.check_mode and do_update: - update_trail(module, client, ct_params) - trail = get_trail_facts(module, client, ct_params['Name']) - - # Determine if KmsKeyId changed - if not module.check_mode: - if initial_kms_key_id != trail.get('KmsKeyId'): - results['changed'] = True - else: - new_key = ct_params.get('KmsKeyId') - if initial_kms_key_id != new_key: - # Assume changed for a moment - results['changed'] = True - - # However, new_key could be a key id, alias arn, or alias name - # that maps back to the key arn in initial_kms_key_id. So check - # all aliases for a match. - initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) - for a in initial_aliases: - if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key: - results['changed'] = False - - # Check if we need to start/stop logging - if enable_logging and not trail['IsLogging']: - results['changed'] = True - trail['IsLogging'] = True - if not module.check_mode: - set_logging(module, client, name=ct_params['Name'], action='start') - if not enable_logging and trail['IsLogging']: - results['changed'] = True - trail['IsLogging'] = False - if not module.check_mode: - set_logging(module, client, name=ct_params['Name'], action='stop') - - # Check if we need to update tags on resource - tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], - purge_tags=purge_tags) - if tags_changed: - updated_tags = dict() - if not purge_tags: - updated_tags = trail['tags'] - updated_tags.update(tags) - results['changed'] = True - trail['tags'] = updated_tags - - # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) - - elif state == 'present' and not results['exists']: - # Trail doesn't exist just go create it - results['changed'] = True - results['exists'] = True - if not module.check_mode: - if tags: - ct_params['TagList'] = ansible_dict_to_boto3_tag_list(tags) - # If we aren't in check_mode then actually create it - created_trail = create_trail(module, client, ct_params) - # Get the trail status - try: - status_resp = client.get_trail_status(Name=created_trail['Name']) - except (BotoCoreError, ClientError) as err: - module.fail_json_aws(err, msg="Failed to fetch Trail statuc") - # Set the logging state for the trail to desired value - if enable_logging and not status_resp['IsLogging']: - set_logging(module, client, name=ct_params['Name'], action='start') - if not enable_logging and status_resp['IsLogging']: - set_logging(module, client, name=ct_params['Name'], action='stop') - # Get facts for newly created Trail - trail = get_trail_facts(module, client, ct_params['Name']) - - # If we are in check mode create a fake return structure for the newly minted trail - if module.check_mode: - acct_id = '123456789012' - try: - sts_client = module.client('sts') - acct_id = sts_client.get_caller_identity()['Account'] - except (BotoCoreError, ClientError): - pass - trail = dict() - trail.update(ct_params) - if 'EnableLogFileValidation' not in ct_params: - ct_params['EnableLogFileValidation'] = False - trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation'] - trail.pop('EnableLogFileValidation') - fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name'] - trail['HasCustomEventSelectors'] = False - trail['HomeRegion'] = region - trail['TrailARN'] = fake_arn - trail['IsLogging'] = enable_logging - trail['tags'] = tags - # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) - - module.exit_json(**results) - - -if __name__ == '__main__': - main() From 2eefdf52e2d9c08c03c95fb2fbfa4d243c44dd85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Fri, 16 Sep 2022 16:04:54 -0400 Subject: [PATCH 573/683] ec2_placement_group: Handle a potential race creation during create (#1477) ec2_placement_group: Handle a potential race creation during create Address a race condition during the creation of a new Placement Group. The consequence was a "placement_group": null in output after a successful new creation. e.g: https://af0ac3e5f4e1620a8d63-ed3785e7f94a59162d05eede0959ab4b.ssl.cf5.rackcdn.com/1459/71e1a28c8ab7c12471b7f1cce5a37846ff642439/check/integration-community.aws-12/aa426fe/job-output.txt Reviewed-by: Mark Chappell --- ec2_placement_group.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 9ca3bb02ab9..4b90baf57f4 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -112,7 +112,10 @@ @AWSRetry.exponential_backoff() -def get_placement_group_details(connection, module): +def search_placement_group(connection, module): + """ + Check if a placement group exists. + """ name = module.params.get("name") try: response = connection.describe_placement_groups( @@ -136,6 +139,22 @@ def get_placement_group_details(connection, module): } +@AWSRetry.exponential_backoff(catch_extra_error_codes=['InvalidPlacementGroup.Unknown']) +def get_placement_group_information(connection, name): + """ + Retrieve information about a placement group. + """ + response = connection.describe_placement_groups( + GroupNames=[name] + ) + placement_group = response['PlacementGroups'][0] + return { + "name": placement_group['GroupName'], + "state": placement_group['State'], + "strategy": placement_group['Strategy'], + } + + @AWSRetry.exponential_backoff() def create_placement_group(connection, module): name = module.params.get("name") @@ -167,9 +186,7 @@ def create_placement_group(connection, module): msg="Couldn't create placement group [%s]" % name) module.exit_json(changed=True, - placement_group=get_placement_group_details( - connection, module - )) + placement_group=get_placement_group_information(connection, name)) @AWSRetry.exponential_backoff() @@ -205,7 +222,7 @@ def main(): state = module.params.get("state") if state == 'present': - placement_group = get_placement_group_details(connection, module) + placement_group = search_placement_group(connection, module) if placement_group is None: create_placement_group(connection, module) else: @@ -223,7 +240,7 @@ def main(): strategy)) elif state == 'absent': - placement_group = get_placement_group_details(connection, module) + placement_group = search_placement_group(connection, module) if placement_group is None: module.exit_json(changed=False) else: From c0e13361ca1f3cbb387dcd8147ea8944dc7f1863 Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Wed, 21 Sep 2022 13:05:30 -0400 Subject: [PATCH 574/683] Migrate elb_application_lb module to amazon.aws (#1506) Migrate elb_application_lb module to amazon.aws SUMMARY Remove elb_application_lb* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Bikouo Aubin Reviewed-by: Alina Buzachis --- elb_application_lb.py | 821 ------------------------------------- elb_application_lb_info.py | 340 --------------- 2 files changed, 1161 deletions(-) delete mode 100644 elb_application_lb.py delete mode 100644 elb_application_lb_info.py diff --git a/elb_application_lb.py b/elb_application_lb.py deleted file mode 100644 index 37f771355d1..00000000000 --- a/elb_application_lb.py +++ /dev/null @@ -1,821 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: elb_application_lb -version_added: 1.0.0 -short_description: Manage an Application Load Balancer -description: - - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. -author: - - "Rob White (@wimnat)" -options: - access_logs_enabled: - description: - - Whether or not to enable access logs. - - When set, I(access_logs_s3_bucket) must also be set. - type: bool - access_logs_s3_bucket: - description: - - The name of the S3 bucket for the access logs. - - The bucket must exist in the same - region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket. - - Required if access logs in Amazon S3 are enabled. - - When set, I(access_logs_enabled) must also be set. - type: str - access_logs_s3_prefix: - description: - - The prefix for the log location in the S3 bucket. - - If you don't specify a prefix, the access logs are stored in the root of the bucket. - - Cannot begin or end with a slash. - type: str - deletion_protection: - description: - - Indicates whether deletion protection for the ALB is enabled. - - Defaults to C(False). - type: bool - http2: - description: - - Indicates whether to enable HTTP2 routing. - - Defaults to C(True). - type: bool - http_desync_mitigation_mode: - description: - - Determines how the load balancer handles requests that might pose a security risk to an application. - - Defaults to C('defensive') - type: str - choices: ['monitor', 'defensive', 'strictest'] - version_added: 3.2.0 - http_drop_invalid_header_fields: - description: - - Indicates whether HTTP headers with invalid header fields are removed by the load balancer C(True) or routed to targets C(False). - - Defaults to C(False). - type: bool - version_added: 3.2.0 - http_x_amzn_tls_version_and_cipher_suite: - description: - - Indicates whether the two headers are added to the client request before sending it to the target. - - Defaults to C(False). - type: bool - version_added: 3.2.0 - http_xff_client_port: - description: - - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. - - Defaults to C(False). - type: bool - version_added: 3.2.0 - idle_timeout: - description: - - The number of seconds to wait before an idle connection is closed. - type: int - listeners: - description: - - A list of dicts containing listeners to attach to the ALB. See examples for detail of the dict required. Note that listener keys - are CamelCased. - type: list - elements: dict - suboptions: - Port: - description: The port on which the load balancer is listening. - required: true - type: int - Protocol: - description: The protocol for connections from clients to the load balancer. - required: true - type: str - Certificates: - description: The SSL server certificate. - type: list - elements: dict - suboptions: - CertificateArn: - description: The Amazon Resource Name (ARN) of the certificate. - type: str - SslPolicy: - description: The security policy that defines which ciphers and protocols are supported. - type: str - DefaultActions: - description: The default actions for the listener. - required: true - type: list - elements: dict - suboptions: - Type: - description: The type of action. - type: str - TargetGroupArn: - description: - - The Amazon Resource Name (ARN) of the target group. - - Mutually exclusive with I(TargetGroupName). - type: str - TargetGroupName: - description: - - The name of the target group. - - Mutually exclusive with I(TargetGroupArn). - Rules: - type: list - elements: dict - description: - - A list of ALB Listener Rules. - - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:' - - 'https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_rule' - suboptions: - Conditions: - type: list - description: Conditions which must be met for the actions to be applied. - elements: dict - Priority: - type: int - description: The rule priority. - Actions: - type: list - description: Actions to apply if all of the rule's conditions are met. - elements: dict - name: - description: - - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric - characters or hyphens, and must not begin or end with a hyphen. - required: true - type: str - purge_listeners: - description: - - If C(true), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter. - - If the I(listeners) parameter is not set then listeners will not be modified. - default: true - type: bool - subnets: - description: - - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from - at least two Availability Zones. - - Required if I(state=present). - type: list - elements: str - security_groups: - description: - - A list of the names or IDs of the security groups to assign to the load balancer. - - Required if I(state=present). - - If C([]), the VPC's default security group will be used. - type: list - elements: str - scheme: - description: - - Internet-facing or internal load balancer. An ALB scheme can not be modified after creation. - default: internet-facing - choices: [ 'internet-facing', 'internal' ] - type: str - state: - description: - - Create or destroy the load balancer. - default: present - choices: [ 'present', 'absent' ] - type: str - wait: - description: - - Wait for the load balancer to have a state of 'active' before completing. A status check is - performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. - default: false - type: bool - wait_timeout: - description: - - The time in seconds to use in conjunction with I(wait). - type: int - purge_rules: - description: - - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. - default: true - type: bool - ip_address_type: - description: - - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. - choices: [ 'ipv4', 'dualstack' ] - type: str - waf_fail_open: - description: - - Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. - - Defaults to C(False). - type: bool - version_added: 3.2.0 -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -notes: - - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create an ALB and attach a listener -- community.aws.elb_application_lb: - name: myalb - security_groups: - - sg-12345678 - - my-sec-group - subnets: - - subnet-012345678 - - subnet-abcdef000 - listeners: - - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). - Port: 80 # Required. The port on which the load balancer is listening. - # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. - SslPolicy: ELBSecurityPolicy-2015-05 - Certificates: # The ARN of the certificate (only one certficate ARN should be provided) - - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com - DefaultActions: - - Type: forward # Required. - TargetGroupName: # Required. The name of the target group - state: present - -# Create an ALB and attach a listener with logging enabled -- community.aws.elb_application_lb: - access_logs_enabled: true - access_logs_s3_bucket: mybucket - access_logs_s3_prefix: "logs" - name: myalb - security_groups: - - sg-12345678 - - my-sec-group - subnets: - - subnet-012345678 - - subnet-abcdef000 - listeners: - - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). - Port: 80 # Required. The port on which the load balancer is listening. - # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. - SslPolicy: ELBSecurityPolicy-2015-05 - Certificates: # The ARN of the certificate (only one certficate ARN should be provided) - - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com - DefaultActions: - - Type: forward # Required. - TargetGroupName: # Required. The name of the target group - state: present - -# Create an ALB with listeners and rules -- community.aws.elb_application_lb: - name: test-alb - subnets: - - subnet-12345678 - - subnet-87654321 - security_groups: - - sg-12345678 - scheme: internal - listeners: - - Protocol: HTTPS - Port: 443 - DefaultActions: - - Type: forward - TargetGroupName: test-target-group - Certificates: - - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com - SslPolicy: ELBSecurityPolicy-2015-05 - Rules: - - Conditions: - - Field: path-pattern - Values: - - '/test' - Priority: '1' - Actions: - - TargetGroupName: test-target-group - Type: forward - - Conditions: - - Field: path-pattern - Values: - - "/redirect-path/*" - Priority: '2' - Actions: - - Type: redirect - RedirectConfig: - Host: "#{host}" - Path: "/example/redir" # or /#{path} - Port: "#{port}" - Protocol: "#{protocol}" - Query: "#{query}" - StatusCode: "HTTP_302" # or HTTP_301 - - Conditions: - - Field: path-pattern - Values: - - "/fixed-response-path/" - Priority: '3' - Actions: - - Type: fixed-response - FixedResponseConfig: - ContentType: "text/plain" - MessageBody: "This is the page you're looking for" - StatusCode: "200" - - Conditions: - - Field: host-header - Values: - - "hostname.domain.com" - - "alternate.domain.com" - Priority: '4' - Actions: - - TargetGroupName: test-target-group - Type: forward - state: present - -# Remove an ALB -- community.aws.elb_application_lb: - name: myalb - state: absent - -''' - -RETURN = r''' -access_logs_s3_bucket: - description: The name of the S3 bucket for the access logs. - returned: when state is present - type: str - sample: "mys3bucket" -access_logs_s3_enabled: - description: Indicates whether access logs stored in Amazon S3 are enabled. - returned: when state is present - type: bool - sample: true -access_logs_s3_prefix: - description: The prefix for the location in the S3 bucket. - returned: when state is present - type: str - sample: "my/logs" -availability_zones: - description: The Availability Zones for the load balancer. - returned: when state is present - type: list - sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] -canonical_hosted_zone_id: - description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. - returned: when state is present - type: str - sample: "ABCDEF12345678" -changed: - description: Whether an ALB was created/updated/deleted - returned: always - type: bool - sample: true -created_time: - description: The date and time the load balancer was created. - returned: when state is present - type: str - sample: "2015-02-12T02:14:02+00:00" -deletion_protection_enabled: - description: Indicates whether deletion protection is enabled. - returned: when state is present - type: bool - sample: true -dns_name: - description: The public DNS name of the load balancer. - returned: when state is present - type: str - sample: "internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com" -idle_timeout_timeout_seconds: - description: The idle timeout value, in seconds. - returned: when state is present - type: int - sample: 60 -ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. - returned: when state is present - type: str - sample: "ipv4" -listeners: - description: Information about the listeners. - returned: when state is present - type: complex - contains: - listener_arn: - description: The Amazon Resource Name (ARN) of the listener. - returned: when state is present - type: str - sample: "" - load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - returned: when state is present - type: str - sample: "" - port: - description: The port on which the load balancer is listening. - returned: when state is present - type: int - sample: 80 - protocol: - description: The protocol for connections from clients to the load balancer. - returned: when state is present - type: str - sample: "HTTPS" - certificates: - description: The SSL server certificate. - returned: when state is present - type: complex - contains: - certificate_arn: - description: The Amazon Resource Name (ARN) of the certificate. - returned: when state is present - type: str - sample: "" - ssl_policy: - description: The security policy that defines which ciphers and protocols are supported. - returned: when state is present - type: str - sample: "" - default_actions: - description: The default actions for the listener. - returned: when state is present - type: str - contains: - type: - description: The type of action. - returned: when state is present - type: str - sample: "" - target_group_arn: - description: The Amazon Resource Name (ARN) of the target group. - returned: when state is present - type: str - sample: "" -load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - returned: when state is present - type: str - sample: "arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-alb/001122334455" -load_balancer_name: - description: The name of the load balancer. - returned: when state is present - type: str - sample: "my-alb" -routing_http2_enabled: - description: Indicates whether HTTP/2 is enabled. - returned: when state is present - type: bool - sample: true -routing_http_desync_mitigation_mode: - description: Determines how the load balancer handles requests that might pose a security risk to an application. - returned: when state is present - type: str - sample: "defensive" -routing_http_drop_invalid_header_fields_enabled: - description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). - returned: when state is present - type: bool - sample: false -routing_http_x_amzn_tls_version_and_cipher_suite_enabled: - description: Indicates whether the two headers are added to the client request before sending it to the target. - returned: when state is present - type: bool - sample: false -routing_http_xff_client_port_enabled: - description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. - returned: when state is present - type: bool - sample: false -scheme: - description: Internet-facing or internal load balancer. - returned: when state is present - type: str - sample: "internal" -security_groups: - description: The IDs of the security groups for the load balancer. - returned: when state is present - type: list - sample: ['sg-0011223344'] -state: - description: The state of the load balancer. - returned: when state is present - type: dict - sample: {'code': 'active'} -tags: - description: The tags attached to the load balancer. - returned: when state is present - type: dict - sample: { - 'Tag': 'Example' - } -type: - description: The type of load balancer. - returned: when state is present - type: str - sample: "application" -vpc_id: - description: The ID of the VPC for the load balancer. - returned: when state is present - type: str - sample: "vpc-0011223344" -waf_fail_open_enabled: - description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. - returned: when state is present - type: bool - sample: false -''' -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( - ApplicationLoadBalancer, - ELBListener, - ELBListenerRule, - ELBListenerRules, - ELBListeners, -) -from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules - - -@AWSRetry.jittered_backoff() -def describe_sgs_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_security_groups') - return paginator.paginate(**params).build_full_result()['SecurityGroups'] - - -def find_default_sg(connection, module, vpc_id): - """ - Finds the default security group for the given VPC ID. - """ - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'group-name': 'default'}) - try: - sg = describe_sgs_with_backoff(connection, Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='No default security group found for VPC {0}'.format(vpc_id)) - if len(sg) == 1: - return sg[0]['GroupId'] - elif len(sg) == 0: - module.fail_json(msg='No default security group found for VPC {0}'.format(vpc_id)) - else: - module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id)) - - -def create_or_update_alb(alb_obj): - """Create ALB or modify main attributes. json_exit here""" - if alb_obj.elb: - # ALB exists so check subnets, security groups and tags match what has been passed - # Subnets - if not alb_obj.compare_subnets(): - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - alb_obj.modify_subnets() - - # Security Groups - if not alb_obj.compare_security_groups(): - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - alb_obj.modify_security_groups() - - # ALB attributes - if not alb_obj.compare_elb_attributes(): - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - alb_obj.update_elb_attributes() - alb_obj.modify_elb_attributes() - - # Tags - only need to play with tags if tags parameter has been set to something - if alb_obj.tags is not None: - - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags) - - # Exit on check_mode - if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - - # Delete necessary tags - if tags_to_delete: - alb_obj.delete_tags(tags_to_delete) - - # Add/update tags - if tags_need_modify: - alb_obj.modify_tags() - - else: - # Create load balancer - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') - alb_obj.create_elb() - - # Listeners - listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) - listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() - - # Exit on check_mode - if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - - # Delete listeners - for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) - listener_obj.delete() - listeners_obj.changed = True - - # Add listeners - for listener_to_add in listeners_to_add: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn']) - listener_obj.add() - listeners_obj.changed = True - - # Modify listeners - for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn']) - listener_obj.modify() - listeners_obj.changed = True - - # If listeners changed, mark ALB as changed - if listeners_obj.changed: - alb_obj.changed = True - - # Rules of each listener - for listener in listeners_obj.listeners: - if 'Rules' in listener: - rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) - rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() - - # Exit on check_mode - if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - - # Delete rules - if alb_obj.module.params['purge_rules']: - for rule in rules_to_delete: - rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) - rule_obj.delete() - alb_obj.changed = True - - # Add rules - for rule in rules_to_add: - rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) - rule_obj.create() - alb_obj.changed = True - - # Modify rules - for rule in rules_to_modify: - rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) - rule_obj.modify() - alb_obj.changed = True - - # Update ALB ip address type only if option has been provided - if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'): - # Exit on check_mode - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - - alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type')) - - # Exit on check_mode - no changes - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.') - - # Get the ALB again - alb_obj.update() - - # Get the ALB listeners again - listeners_obj.update() - - # Update the ALB attributes - alb_obj.update_elb_attributes() - - # Convert to snake_case and merge in everything we want to return to the user - snaked_alb = camel_dict_to_snake_dict(alb_obj.elb) - snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes)) - snaked_alb['listeners'] = [] - for listener in listeners_obj.current_listeners: - # For each listener, get listener rules - listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn']) - snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener)) - - # Change tags to ansible friendly dict - snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags']) - - # ip address type - snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type() - - alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb) - - -def delete_alb(alb_obj): - - if alb_obj.elb: - - # Exit on check_mode - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.') - - listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) - for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) - listener_obj.delete() - - alb_obj.delete() - - else: - - # Exit on check_mode - no changes - if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.') - - alb_obj.module.exit_json(changed=alb_obj.changed) - - -def main(): - - argument_spec = dict( - access_logs_enabled=dict(type='bool'), - access_logs_s3_bucket=dict(type='str'), - access_logs_s3_prefix=dict(type='str'), - deletion_protection=dict(type='bool'), - http2=dict(type='bool'), - http_desync_mitigation_mode=dict(type='str', choices=['monitor', 'defensive', 'strictest']), - http_drop_invalid_header_fields=dict(type='bool'), - http_x_amzn_tls_version_and_cipher_suite=dict(type='bool'), - http_xff_client_port=dict(type='bool'), - idle_timeout=dict(type='int'), - listeners=dict(type='list', - elements='dict', - options=dict( - Protocol=dict(type='str', required=True), - Port=dict(type='int', required=True), - SslPolicy=dict(type='str'), - Certificates=dict(type='list', elements='dict'), - DefaultActions=dict(type='list', required=True, elements='dict'), - Rules=dict(type='list', elements='dict') - ) - ), - name=dict(required=True, type='str'), - purge_listeners=dict(default=True, type='bool'), - purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], default='present'), - tags=dict(type='dict', aliases=['resource_tags']), - waf_fail_open=dict(type='bool'), - wait_timeout=dict(type='int'), - wait=dict(default=False, type='bool'), - purge_rules=dict(default=True, type='bool'), - ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['subnets', 'security_groups']) - ], - required_together=[ - ['access_logs_enabled', 'access_logs_s3_bucket'] - ], - supports_check_mode=True, - ) - - # Quick check of listeners parameters - listeners = module.params.get("listeners") - if listeners is not None: - for listener in listeners: - for key in listener.keys(): - if key == 'Protocol' and listener[key] == 'HTTPS': - if listener.get('SslPolicy') is None: - module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS") - - if listener.get('Certificates') is None: - module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS") - - connection = module.client('elbv2') - connection_ec2 = module.client('ec2') - - state = module.params.get("state") - - alb = ApplicationLoadBalancer(connection, connection_ec2, module) - - # Update security group if default is specified - if alb.elb and module.params.get('security_groups') == []: - module.params['security_groups'] = [find_default_sg(connection_ec2, module, alb.elb['VpcId'])] - alb = ApplicationLoadBalancer(connection, connection_ec2, module) - - if state == 'present': - create_or_update_alb(alb) - elif state == 'absent': - delete_alb(alb) - - -if __name__ == '__main__': - main() diff --git a/elb_application_lb_info.py b/elb_application_lb_info.py deleted file mode 100644 index 9a6e817469f..00000000000 --- a/elb_application_lb_info.py +++ /dev/null @@ -1,340 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: elb_application_lb_info -version_added: 1.0.0 -short_description: Gather information about Application Load Balancers in AWS -description: - - Gather information about Application Load Balancers in AWS -author: Rob White (@wimnat) -options: - load_balancer_arns: - description: - - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call. - required: false - type: list - elements: str - names: - description: - - The names of the load balancers. - required: false - type: list - elements: str - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all ALBs - community.aws.elb_application_lb_info: - -- name: Gather information about a particular ALB given its ARN - community.aws.elb_application_lb_info: - load_balancer_arns: - - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-alb/aabbccddeeff" - -- name: Gather information about ALBs named 'alb1' and 'alb2' - community.aws.elb_application_lb_info: - names: - - alb1 - - alb2 - -- name: Gather information about specific ALB - community.aws.elb_application_lb_info: - names: "alb-name" - region: "aws-region" - register: alb_info -- ansible.builtin.debug: - var: alb_info -''' - -RETURN = r''' -load_balancers: - description: a list of load balancers - returned: always - type: complex - contains: - access_logs_s3_bucket: - description: The name of the S3 bucket for the access logs. - type: str - sample: "mys3bucket" - access_logs_s3_enabled: - description: Indicates whether access logs stored in Amazon S3 are enabled. - type: bool - sample: true - access_logs_s3_prefix: - description: The prefix for the location in the S3 bucket. - type: str - sample: "my/logs" - availability_zones: - description: The Availability Zones for the load balancer. - type: list - sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] - canonical_hosted_zone_id: - description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. - type: str - sample: "ABCDEF12345678" - created_time: - description: The date and time the load balancer was created. - type: str - sample: "2015-02-12T02:14:02+00:00" - deletion_protection_enabled: - description: Indicates whether deletion protection is enabled. - type: bool - sample: true - dns_name: - description: The public DNS name of the load balancer. - type: str - sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com" - idle_timeout_timeout_seconds: - description: The idle timeout value, in seconds. - type: int - sample: 60 - ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. - type: str - sample: "ipv4" - listeners: - description: Information about the listeners. - type: complex - contains: - listener_arn: - description: The Amazon Resource Name (ARN) of the listener. - type: str - sample: "" - load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - type: str - sample: "" - port: - description: The port on which the load balancer is listening. - type: int - sample: 80 - protocol: - description: The protocol for connections from clients to the load balancer. - type: str - sample: "HTTPS" - certificates: - description: The SSL server certificate. - type: complex - contains: - certificate_arn: - description: The Amazon Resource Name (ARN) of the certificate. - type: str - sample: "" - ssl_policy: - description: The security policy that defines which ciphers and protocols are supported. - type: str - sample: "" - default_actions: - description: The default actions for the listener. - type: str - contains: - type: - description: The type of action. - type: str - sample: "" - target_group_arn: - description: The Amazon Resource Name (ARN) of the target group. - type: str - sample: "" - load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - type: str - sample: "arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-alb/001122334455" - load_balancer_name: - description: The name of the load balancer. - type: str - sample: "my-alb" - routing_http2_enabled: - description: Indicates whether HTTP/2 is enabled. - type: bool - sample: true - routing_http_desync_mitigation_mode: - description: Determines how the load balancer handles requests that might pose a security risk to an application. - type: str - sample: "defensive" - routing_http_drop_invalid_header_fields_enabled: - description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). - type: bool - sample: false - routing_http_x_amzn_tls_version_and_cipher_suite_enabled: - description: Indicates whether the two headers are added to the client request before sending it to the target. - type: bool - sample: false - routing_http_xff_client_port_enabled: - description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. - type: bool - sample: false - scheme: - description: Internet-facing or internal load balancer. - type: str - sample: "internal" - security_groups: - description: The IDs of the security groups for the load balancer. - type: list - sample: ['sg-0011223344'] - state: - description: The state of the load balancer. - type: dict - sample: {'code': 'active'} - tags: - description: The tags attached to the load balancer. - type: dict - sample: { - 'Tag': 'Example' - } - type: - description: The type of load balancer. - type: str - sample: "application" - vpc_id: - description: The ID of the VPC for the load balancer. - type: str - sample: "vpc-0011223344" - waf_fail_open_enabled: - description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets - if it is unable to forward the request to AWS WAF. - type: bool - sample: false -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict - - -@AWSRetry.jittered_backoff(retries=10) -def get_paginator(connection, **kwargs): - paginator = connection.get_paginator('describe_load_balancers') - return paginator.paginate(**kwargs).build_full_result() - - -def get_alb_listeners(connection, module, alb_arn): - - try: - return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe alb listeners") - - -def get_listener_rules(connection, module, listener_arn): - - try: - return connection.describe_rules(ListenerArn=listener_arn)['Rules'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe listener rules") - - -def get_load_balancer_attributes(connection, module, load_balancer_arn): - - try: - load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe load balancer attributes") - - # Replace '.' with '_' in attribute key names to make it more Ansibley - for k, v in list(load_balancer_attributes.items()): - load_balancer_attributes[k.replace('.', '_')] = v - del load_balancer_attributes[k] - - return load_balancer_attributes - - -def get_load_balancer_tags(connection, module, load_balancer_arn): - - try: - return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe load balancer tags") - - -def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn): - try: - return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe load balancer ip address type") - - -def list_load_balancers(connection, module): - load_balancer_arns = module.params.get("load_balancer_arns") - names = module.params.get("names") - - try: - if not load_balancer_arns and not names: - load_balancers = get_paginator(connection) - if load_balancer_arns: - load_balancers = get_paginator(connection, LoadBalancerArns=load_balancer_arns) - if names: - load_balancers = get_paginator(connection, Names=names) - except is_boto3_error_code('LoadBalancerNotFound'): - module.exit_json(load_balancers=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to list load balancers") - - for load_balancer in load_balancers['LoadBalancers']: - # Get the attributes for each alb - load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) - - # Get the listeners for each alb - load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn']) - - # For each listener, get listener rules - for listener in load_balancer['listeners']: - listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) - - # Get ALB ip address type - load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) - - # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] - - # Get tags for each load balancer - for snaked_load_balancer in snaked_load_balancers: - snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn']) - - module.exit_json(load_balancers=snaked_load_balancers) - - -def main(): - - argument_spec = dict( - load_balancer_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str') - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arns', 'names']], - supports_check_mode=True, - ) - - try: - connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - list_load_balancers(connection, module) - - -if __name__ == '__main__': - main() From d4a66cf5d1b4904e9961fc20849c744d35c15a3d Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Wed, 21 Sep 2022 14:35:09 -0400 Subject: [PATCH 575/683] Migrate iam_policy* (#1508) Migrate iam_policy* SUMMARY Remove iam_policy* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Alina Buzachis --- iam_policy.py | 349 --------------------------------------------- iam_policy_info.py | 209 --------------------------- 2 files changed, 558 deletions(-) delete mode 100644 iam_policy.py delete mode 100644 iam_policy_info.py diff --git a/iam_policy.py b/iam_policy.py deleted file mode 100644 index 06f9e85bf3d..00000000000 --- a/iam_policy.py +++ /dev/null @@ -1,349 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: iam_policy -version_added: 1.0.0 -short_description: Manage inline IAM policies for users, groups, and roles -description: - - Allows uploading or removing inline IAM policies for IAM users, groups or roles. - - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), - M(community.aws.iam_group) and M(community.aws.iam_managed_policy) -options: - iam_type: - description: - - Type of IAM resource. - required: true - choices: [ "user", "group", "role"] - type: str - iam_name: - description: - - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name. - required: true - type: str - policy_name: - description: - - The name label for the policy to create or remove. - required: true - type: str - policy_json: - description: - - A properly json formatted policy as string. - type: json - state: - description: - - Whether to create or delete the IAM policy. - choices: [ "present", "absent"] - default: present - type: str - skip_duplicates: - description: - - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. - If there is a match it will not make a new policy object with the same rules. - default: false - type: bool - -author: - - "Jonathan I. Davila (@defionscode)" - - "Dennis Podkovyrin (@sbj-ss)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = ''' -# Advanced example, create two new groups and add a READ-ONLY policy to both -# groups. -- name: Create Two Groups, Mario and Luigi - community.aws.iam_group: - name: "{{ item }}" - state: present - loop: - - Mario - - Luigi - register: new_groups - -- name: Apply READ-ONLY policy to new groups that have been recently created - community.aws.iam_policy: - iam_type: group - iam_name: "{{ item.iam_group.group.group_name }}" - policy_name: "READ-ONLY" - policy_json: "{{ lookup('template', 'readonly.json.j2') }}" - state: present - loop: "{{ new_groups.results }}" - -# Create a new S3 policy with prefix per user -- name: Create S3 policy from template - community.aws.iam_policy: - iam_type: user - iam_name: "{{ item.user }}" - policy_name: "s3_limited_access_{{ item.prefix }}" - state: present - policy_json: "{{ lookup('template', 's3_policy.json.j2') }}" - loop: - - user: s3_user - prefix: s3_user_prefix - -''' -RETURN = ''' -policy_names: - description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role). - returned: always - type: list - elements: str -''' - -import json - -try: - from botocore.exceptions import BotoCoreError, ClientError -except ImportError: - pass - -from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - - -class PolicyError(Exception): - pass - - -class Policy: - - def __init__(self, client, name, policy_name, policy_json, skip_duplicates, state, check_mode): - self.client = client - self.name = name - self.policy_name = policy_name - self.policy_json = policy_json - self.skip_duplicates = skip_duplicates - self.state = state - self.check_mode = check_mode - self.changed = False - - self.original_policies = self.get_all_policies().copy() - self.updated_policies = {} - - @staticmethod - def _iam_type(): - return '' - - def _list(self, name): - return {} - - def list(self): - try: - return self._list(self.name).get('PolicyNames', []) - except is_boto3_error_code('AccessDenied'): - return [] - - def _get(self, name, policy_name): - return '{}' - - def get(self, policy_name): - try: - return self._get(self.name, policy_name)['PolicyDocument'] - except is_boto3_error_code('AccessDenied'): - return {} - - def _put(self, name, policy_name, policy_doc): - pass - - def put(self, policy_doc): - self.changed = True - - if self.check_mode: - return - - self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True)) - - def _delete(self, name, policy_name): - pass - - def delete(self): - self.updated_policies = self.original_policies.copy() - - if self.policy_name not in self.list(): - self.changed = False - return - - self.changed = True - self.updated_policies.pop(self.policy_name, None) - - if self.check_mode: - return - - self._delete(self.name, self.policy_name) - - def get_policy_text(self): - try: - if self.policy_json is not None: - return self.get_policy_from_json() - except json.JSONDecodeError as e: - raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e)) - return None - - def get_policy_from_json(self): - if isinstance(self.policy_json, string_types): - pdoc = json.loads(self.policy_json) - else: - pdoc = self.policy_json - return pdoc - - def get_all_policies(self): - policies = {} - for pol in self.list(): - policies[pol] = self.get(pol) - return policies - - def create(self): - matching_policies = [] - policy_doc = self.get_policy_text() - policy_match = False - for pol in self.list(): - if not compare_policies(self.original_policies[pol], policy_doc): - matching_policies.append(pol) - policy_match = True - - self.updated_policies = self.original_policies.copy() - - if self.policy_name in matching_policies: - return - if self.skip_duplicates and policy_match: - return - - self.put(policy_doc) - self.updated_policies[self.policy_name] = policy_doc - - def run(self): - if self.state == 'present': - self.create() - elif self.state == 'absent': - self.delete() - return { - 'changed': self.changed, - self._iam_type() + '_name': self.name, - 'policies': self.list(), - 'policy_names': self.list(), - 'diff': dict( - before=self.original_policies, - after=self.updated_policies, - ), - } - - -class UserPolicy(Policy): - - @staticmethod - def _iam_type(): - return 'user' - - def _list(self, name): - return self.client.list_user_policies(aws_retry=True, UserName=name) - - def _get(self, name, policy_name): - return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) - - def _put(self, name, policy_name, policy_doc): - return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) - - def _delete(self, name, policy_name): - return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) - - -class RolePolicy(Policy): - - @staticmethod - def _iam_type(): - return 'role' - - def _list(self, name): - return self.client.list_role_policies(aws_retry=True, RoleName=name) - - def _get(self, name, policy_name): - return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) - - def _put(self, name, policy_name, policy_doc): - return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) - - def _delete(self, name, policy_name): - return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) - - -class GroupPolicy(Policy): - - @staticmethod - def _iam_type(): - return 'group' - - def _list(self, name): - return self.client.list_group_policies(aws_retry=True, GroupName=name) - - def _get(self, name, policy_name): - return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) - - def _put(self, name, policy_name, policy_doc): - return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) - - def _delete(self, name, policy_name): - return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) - - -def main(): - argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), - state=dict(default='present', choices=['present', 'absent']), - iam_name=dict(required=True), - policy_name=dict(required=True), - policy_json=dict(type='json', default=None, required=False), - skip_duplicates=dict(type='bool', default=False, required=False) - ) - required_if = [ - ('state', 'present', ('policy_json',), True), - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True - ) - - args = dict( - client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), - name=module.params.get('iam_name'), - policy_name=module.params.get('policy_name'), - policy_json=module.params.get('policy_json'), - skip_duplicates=module.params.get('skip_duplicates'), - state=module.params.get('state'), - check_mode=module.check_mode, - ) - iam_type = module.params.get('iam_type') - - try: - if iam_type == 'user': - policy = UserPolicy(**args) - elif iam_type == 'role': - policy = RolePolicy(**args) - elif iam_type == 'group': - policy = GroupPolicy(**args) - - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.", - date='2024-08-01', collection_name='community.aws') - - module.exit_json(**(policy.run())) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e) - except PolicyError as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/iam_policy_info.py b/iam_policy_info.py deleted file mode 100644 index b408f01b450..00000000000 --- a/iam_policy_info.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: iam_policy_info -version_added: 1.0.0 -short_description: Retrieve inline IAM policies for users, groups, and roles -description: - - Supports fetching of inline IAM policies for IAM users, groups and roles. -options: - iam_type: - description: - - Type of IAM resource you wish to retrieve inline policies for. - required: true - choices: [ "user", "group", "role"] - type: str - iam_name: - description: - - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name. - required: true - type: str - policy_name: - description: - - Name of a specific IAM inline policy you with to retrieve. - required: false - type: str - -author: - - Mark Chappell (@tremble) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' -- name: Describe all inline IAM policies on an IAM User - community.aws.iam_policy_info: - iam_type: user - iam_name: example_user - -- name: Describe a specific inline policy on an IAM Role - community.aws.iam_policy_info: - iam_type: role - iam_name: example_role - policy_name: example_policy - -''' -RETURN = ''' -policies: - description: A list containing the matching IAM inline policy names and their data - returned: success - type: complex - contains: - policy_name: - description: The Name of the inline policy - returned: success - type: str - policy_document: - description: The JSON document representing the inline IAM policy - returned: success - type: list -policy_names: - description: A list of matching names of the IAM inline policies on the queried object - returned: success - type: list -all_policy_names: - description: A list of names of all of the IAM inline policies on the queried object - returned: success - type: list -''' - -try: - import botocore -except ImportError: - pass - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -class Policy: - - def __init__(self, client, name, policy_name): - self.client = client - self.name = name - self.policy_name = policy_name - self.changed = False - - @staticmethod - def _iam_type(): - return '' - - def _list(self, name): - return {} - - def list(self): - return self._list(self.name).get('PolicyNames', []) - - def _get(self, name, policy_name): - return '{}' - - def get(self, policy_name): - return self._get(self.name, policy_name)['PolicyDocument'] - - def get_all(self): - policies = list() - for policy in self.list(): - policies.append({"policy_name": policy, "policy_document": self.get(policy)}) - return policies - - def run(self): - policy_list = self.list() - ret_val = { - 'changed': False, - self._iam_type() + '_name': self.name, - 'all_policy_names': policy_list - } - if self.policy_name is None: - ret_val.update(policies=self.get_all()) - ret_val.update(policy_names=policy_list) - elif self.policy_name in policy_list: - ret_val.update(policies=[{ - "policy_name": self.policy_name, - "policy_document": self.get(self.policy_name)}]) - ret_val.update(policy_names=[self.policy_name]) - return ret_val - - -class UserPolicy(Policy): - - @staticmethod - def _iam_type(): - return 'user' - - def _list(self, name): - return self.client.list_user_policies(aws_retry=True, UserName=name) - - def _get(self, name, policy_name): - return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) - - -class RolePolicy(Policy): - - @staticmethod - def _iam_type(): - return 'role' - - def _list(self, name): - return self.client.list_role_policies(aws_retry=True, RoleName=name) - - def _get(self, name, policy_name): - return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) - - -class GroupPolicy(Policy): - - @staticmethod - def _iam_type(): - return 'group' - - def _list(self, name): - return self.client.list_group_policies(aws_retry=True, GroupName=name) - - def _get(self, name, policy_name): - return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) - - -def main(): - argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), - iam_name=dict(required=True), - policy_name=dict(default=None, required=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - args = dict( - client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), - name=module.params.get('iam_name'), - policy_name=module.params.get('policy_name'), - ) - iam_type = module.params.get('iam_type') - - try: - if iam_type == 'user': - policy = UserPolicy(**args) - elif iam_type == 'role': - policy = RolePolicy(**args) - elif iam_type == 'group': - policy = GroupPolicy(**args) - - module.exit_json(**(policy.run())) - except is_boto3_error_code('NoSuchEntity') as e: - module.exit_json(changed=False, msg=e.response['Error']['Message']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -if __name__ == '__main__': - main() From 8b1a5cf5c8bfc402b17f4cc41521a7e6f0188423 Mon Sep 17 00:00:00 2001 From: Levi Notik Date: Thu, 22 Sep 2022 02:53:08 -0400 Subject: [PATCH 576/683] Fix spelling (#1522) Fix spelling SUMMARY ISSUE TYPE Docs Pull Request COMPONENT NAME community.aws.ecs_service module ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- ecs_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ecs_service.py b/ecs_service.py index 95fa43b52b4..f355fa32bed 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -157,7 +157,7 @@ type: str force_deletion: description: - - Forcabily delete the service. Required when deleting a service with >0 scale, or no target group. + - Forcibly delete the service. Required when deleting a service with >0 scale, or no target group. default: False type: bool version_added: 2.1.0 From 40b25f14bf27c01fced85049e26f4db61aa90dee Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Sep 2022 14:55:11 +0200 Subject: [PATCH 577/683] Migrate ec2_eip* modules and tests (#1491) Migrate ec2_eip* modules and tests Depends-On: ansible-collections/amazon.aws#1032 Remove ec2_eip* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mark Chappell Reviewed-by: Bikouo Aubin --- ec2_eip.py | 664 ------------------------------------------------ ec2_eip_info.py | 144 ----------- 2 files changed, 808 deletions(-) delete mode 100644 ec2_eip.py delete mode 100644 ec2_eip_info.py diff --git a/ec2_eip.py b/ec2_eip.py deleted file mode 100644 index 531af689792..00000000000 --- a/ec2_eip.py +++ /dev/null @@ -1,664 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ec2_eip -version_added: 1.0.0 -short_description: manages EC2 elastic IP (EIP) addresses. -description: - - This module can allocate or release an EIP. - - This module can associate/disassociate an EIP with instances or network interfaces. -options: - device_id: - description: - - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. - - The I(instance_id) alias has been deprecated and will be removed after 2022-12-01. - required: false - aliases: [ instance_id ] - type: str - public_ip: - description: - - The IP address of a previously allocated EIP. - - When I(state=present) and device is specified, the EIP is associated with the device. - - When I(state=absent) and device is specified, the EIP is disassociated from the device. - aliases: [ ip ] - type: str - state: - description: - - When C(state=present), allocate an EIP or associate an existing EIP with a device. - - When C(state=absent), disassociate the EIP from the device and optionally release it. - choices: ['present', 'absent'] - default: present - type: str - in_vpc: - description: - - Allocate an EIP inside a VPC or not. - - Required if specifying an ENI with I(device_id). - default: false - type: bool - reuse_existing_ip_allowed: - description: - - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one. - default: false - type: bool - release_on_disassociation: - description: - - Whether or not to automatically release the EIP when it is disassociated. - default: false - type: bool - private_ip_address: - description: - - The primary or secondary private IP address to associate with the Elastic IP address. - type: str - allow_reassociation: - description: - - Specify this option to allow an Elastic IP address that is already associated with another - network interface or instance to be re-associated with the specified instance or interface. - default: false - type: bool - tag_name: - description: - - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse - an Elastic IP if it is tagged with I(tag_name). - type: str - tag_value: - description: - - Supplements I(tag_name) but also checks that the value of the tag provided in I(tag_name) matches I(tag_value). - type: str - public_ipv4_pool: - description: - - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP) - only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). - type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -author: - - "Rick Mendes (@rickmendes) " -notes: - - There may be a delay between the time the EIP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and - pause to delay further playbook execution until the instance is reachable, - if necessary. - - This module returns multiple changed statuses on disassociation or release. - It returns an overall status based on any changes occurring. It also returns - individual changed statuses for disassociation and release. - - Support for I(tags) and I(purge_tags) was added in release 2.1.0. -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: associate an elastic IP with an instance - community.aws.ec2_eip: - device_id: i-1212f003 - ip: 93.184.216.119 - -- name: associate an elastic IP with a device - community.aws.ec2_eip: - device_id: eni-c8ad70f3 - ip: 93.184.216.119 - -- name: associate an elastic IP with a device and allow reassociation - community.aws.ec2_eip: - device_id: eni-c8ad70f3 - public_ip: 93.184.216.119 - allow_reassociation: true - -- name: disassociate an elastic IP from an instance - community.aws.ec2_eip: - device_id: i-1212f003 - ip: 93.184.216.119 - state: absent - -- name: disassociate an elastic IP with a device - community.aws.ec2_eip: - device_id: eni-c8ad70f3 - ip: 93.184.216.119 - state: absent - -- name: allocate a new elastic IP and associate it with an instance - community.aws.ec2_eip: - device_id: i-1212f003 - -- name: allocate a new elastic IP without associating it to anything - community.aws.ec2_eip: - state: present - register: eip - -- name: output the IP - ansible.builtin.debug: - msg: "Allocated IP is {{ eip.public_ip }}" - -- name: provision new instances with ec2 - amazon.aws.ec2: - keypair: mykey - instance_type: c1.medium - image: ami-40603AD1 - wait: true - group: webserver - count: 3 - register: ec2 - -- name: associate new elastic IPs with each of the instances - community.aws.ec2_eip: - device_id: "{{ item }}" - loop: "{{ ec2.instance_ids }}" - -- name: allocate a new elastic IP inside a VPC in us-west-2 - community.aws.ec2_eip: - region: us-west-2 - in_vpc: true - register: eip - -- name: output the IP - ansible.builtin.debug: - msg: "Allocated IP inside a VPC is {{ eip.public_ip }}" - -- name: allocate eip - reuse unallocated ips (if found) with FREE tag - community.aws.ec2_eip: - region: us-east-1 - in_vpc: true - reuse_existing_ip_allowed: true - tag_name: FREE - -- name: allocate eip - reuse unallocated ips if tag reserved is nope - community.aws.ec2_eip: - region: us-east-1 - in_vpc: true - reuse_existing_ip_allowed: true - tag_name: reserved - tag_value: nope - -- name: allocate new eip - from servers given ipv4 pool - community.aws.ec2_eip: - region: us-east-1 - in_vpc: true - public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 - -- name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic) - community.aws.ec2_eip: - region: us-east-1 - in_vpc: true - reuse_existing_ip_allowed: true - tag_name: dev-servers - public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 - -- name: allocate eip from pool - check if tag reserved_for exists and value is our hostname - community.aws.ec2_eip: - region: us-east-1 - in_vpc: true - reuse_existing_ip_allowed: true - tag_name: reserved_for - tag_value: "{{ inventory_hostname }}" - public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 -''' - -RETURN = ''' -allocation_id: - description: allocation_id of the elastic ip - returned: on success - type: str - sample: eipalloc-51aa3a6c -public_ip: - description: an elastic ip address - returned: on success - type: str - sample: 52.88.159.209 -''' - -try: - import botocore.exceptions -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags - - -def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): - if address_is_associated_with_device(ec2, module, address, device_id, is_instance): - return {'changed': False} - - # If we're in check mode, nothing else to do - if not check_mode: - if is_instance: - try: - params = dict( - InstanceId=device_id, - AllowReassociation=allow_reassociation, - ) - if private_ip_address: - params['PrivateIpAddress'] = private_ip_address - if address['Domain'] == 'vpc': - params['AllocationId'] = address['AllocationId'] - else: - params['PublicIp'] = address['PublicIp'] - res = ec2.associate_address(aws_retry=True, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) - module.fail_json_aws(e, msg=msg) - else: - params = dict( - NetworkInterfaceId=device_id, - AllocationId=address['AllocationId'], - AllowReassociation=allow_reassociation, - ) - - if private_ip_address: - params['PrivateIpAddress'] = private_ip_address - - try: - res = ec2.associate_address(aws_retry=True, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id) - module.fail_json_aws(e, msg=msg) - if not res: - module.fail_json_aws(e, msg='Association failed.') - - return {'changed': True} - - -def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True): - if not address_is_associated_with_device(ec2, module, address, device_id, is_instance): - return {'changed': False} - - # If we're in check mode, nothing else to do - if not check_mode: - try: - if address['Domain'] == 'vpc': - res = ec2.disassociate_address( - AssociationId=address['AssociationId'], aws_retry=True - ) - else: - res = ec2.disassociate_address( - PublicIp=address['PublicIp'], aws_retry=True - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed") - - return {'changed': True} - - -@AWSRetry.jittered_backoff() -def find_address(ec2, module, public_ip, device_id, is_instance=True): - """ Find an existing Elastic IP address """ - filters = [] - kwargs = {} - - if public_ip: - kwargs["PublicIps"] = [public_ip] - elif device_id: - if is_instance: - filters.append({"Name": 'instance-id', "Values": [device_id]}) - else: - filters.append({'Name': 'network-interface-id', "Values": [device_id]}) - - if len(filters) > 0: - kwargs["Filters"] = filters - elif len(filters) == 0 and public_ip is None: - return None - - try: - addresses = ec2.describe_addresses(**kwargs) - except is_boto3_error_code('InvalidAddress.NotFound') as e: - # If we're releasing and we can't find it, it's already gone... - if module.params.get('state') == 'absent': - module.exit_json(changed=False, disassociated=False, released=False) - module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") - - addresses = addresses["Addresses"] - if len(addresses) == 1: - return addresses[0] - elif len(addresses) > 1: - msg = "Found more than one address using args {0}".format(kwargs) - msg += "Addresses found: {0}".format(addresses) - module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) - - -def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): - """ Check if the elastic IP is currently associated with the device """ - address = find_address(ec2, module, address["PublicIp"], device_id, is_instance) - if address: - if is_instance: - if "InstanceId" in address and address["InstanceId"] == device_id: - return address - else: - if "NetworkInterfaceId" in address and address["NetworkInterfaceId"] == device_id: - return address - return False - - -def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None): - """ Allocate a new elastic IP address (when needed) and return it """ - if not domain: - domain = 'standard' - - if reuse_existing_ip_allowed: - filters = [] - filters.append({'Name': 'domain', "Values": [domain]}) - - if tag_dict is not None: - filters += ansible_dict_to_boto3_filter_list(tag_dict) - - try: - all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") - - all_addresses = all_addresses["Addresses"] - - if domain == 'vpc': - unassociated_addresses = [a for a in all_addresses - if not a.get('AssociationId', None)] - else: - unassociated_addresses = [a for a in all_addresses - if not a['InstanceId']] - if unassociated_addresses: - return unassociated_addresses[0], False - - if public_ipv4_pool: - return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True - - try: - if check_mode: - return None, True - result = ec2.allocate_address(Domain=domain, aws_retry=True), True - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") - return result - - -def release_address(ec2, module, address, check_mode): - """ Release a previously allocated elastic IP address """ - - # If we're in check mode, nothing else to do - if not check_mode: - try: - result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't release Elastic IP address") - - return {'changed': True} - - -@AWSRetry.jittered_backoff() -def describe_eni_with_backoff(ec2, module, device_id): - try: - return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id]) - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e: - module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") - - -def find_device(ec2, module, device_id, is_instance=True): - """ Attempt to find the EC2 instance and return it """ - - if is_instance: - try: - paginator = ec2.get_paginator('describe_instances') - reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]')) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't get list of instances") - - if len(reservations) == 1: - instances = reservations[0]['Instances'] - if len(instances) == 1: - return instances[0] - else: - try: - interfaces = describe_eni_with_backoff(ec2, module, device_id) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") - if len(interfaces) == 1: - return interfaces[0] - - -def ensure_present(ec2, module, domain, address, private_ip_address, device_id, - reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True): - changed = False - - # Return the EIP object since we've been given a public IP - if not address: - if check_mode: - return {'changed': True} - - address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode) - - if device_id: - # Allocate an IP for instance since no public_ip was provided - if is_instance: - instance = find_device(ec2, module, device_id) - if reuse_existing_ip_allowed: - if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None: - msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc" - module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) - - # Associate address object (provided or allocated) with instance - assoc_result = associate_ip_and_device( - ec2, module, address, private_ip_address, device_id, allow_reassociation, - check_mode - ) - else: - instance = find_device(ec2, module, device_id, is_instance=False) - # Associate address object (provided or allocated) with instance - assoc_result = associate_ip_and_device( - ec2, module, address, private_ip_address, device_id, allow_reassociation, - check_mode, is_instance=False - ) - - changed = changed or assoc_result['changed'] - - return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']} - - -def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True): - if not address: - return {'changed': False} - - # disassociating address from instance - if device_id: - if is_instance: - return disassociate_ip_and_device( - ec2, module, address, device_id, check_mode - ) - else: - return disassociate_ip_and_device( - ec2, module, address, device_id, check_mode, is_instance=False - ) - # releasing address - else: - return release_address(ec2, module, address, check_mode) - - -def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): - # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address - """ Overrides botocore's allocate_address function to support BYOIP """ - if check_mode: - return None - - params = {} - - if domain is not None: - params['Domain'] = domain - - if public_ipv4_pool is not None: - params['PublicIpv4Pool'] = public_ipv4_pool - - try: - result = ec2.allocate_address(aws_retry=True, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") - return result - - -def generate_tag_dict(module, tag_name, tag_value): - # type: (AnsibleAWSModule, str, str) -> Optional[Dict] - """ Generates a dictionary to be passed as a filter to Amazon """ - if tag_name and not tag_value: - if tag_name.startswith('tag:'): - tag_name = tag_name.strip('tag:') - return {'tag-key': tag_name} - - elif tag_name and tag_value: - if not tag_name.startswith('tag:'): - tag_name = 'tag:' + tag_name - return {tag_name: tag_value} - - elif tag_value and not tag_name: - module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')") - - -def main(): - argument_spec = dict( - device_id=dict(required=False, aliases=['instance_id'], - deprecated_aliases=[dict(name='instance_id', - date='2022-12-01', - collection_name='community.aws')]), - public_ip=dict(required=False, aliases=['ip']), - state=dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc=dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed=dict(required=False, type='bool', - default=False), - release_on_disassociation=dict(required=False, type='bool', default=False), - allow_reassociation=dict(type='bool', default=False), - private_ip_address=dict(), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - tag_name=dict(), - tag_value=dict(), - public_ipv4_pool=dict() - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_by={ - 'private_ip_address': ['device_id'], - }, - ) - - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - - device_id = module.params.get('device_id') - instance_id = module.params.get('instance_id') - public_ip = module.params.get('public_ip') - private_ip_address = module.params.get('private_ip_address') - state = module.params.get('state') - in_vpc = module.params.get('in_vpc') - domain = 'vpc' if in_vpc else None - reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - release_on_disassociation = module.params.get('release_on_disassociation') - allow_reassociation = module.params.get('allow_reassociation') - tag_name = module.params.get('tag_name') - tag_value = module.params.get('tag_value') - public_ipv4_pool = module.params.get('public_ipv4_pool') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - if instance_id: - is_instance = True - device_id = instance_id - else: - if device_id and device_id.startswith('i-'): - is_instance = True - elif device_id: - if device_id.startswith('eni-') and not in_vpc: - module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") - is_instance = False - - # Tags for *searching* for an EIP. - tag_dict = generate_tag_dict(module, tag_name, tag_value) - - try: - if device_id: - address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance) - else: - address = find_address(ec2, module, public_ip, None) - - if state == 'present': - if device_id: - result = ensure_present( - ec2, module, domain, address, private_ip_address, device_id, - reuse_existing_ip_allowed, allow_reassociation, - module.check_mode, is_instance=is_instance - ) - if 'allocation_id' not in result: - # Don't check tags on check_mode here - no EIP to pass through - module.exit_json(**result) - else: - if address: - result = { - 'changed': False, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] - } - else: - address, changed = allocate_address( - ec2, module, domain, reuse_existing_ip_allowed, - module.check_mode, tag_dict, public_ipv4_pool - ) - if address: - result = { - 'changed': changed, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] - } - else: - # Don't check tags on check_mode here - no EIP to pass through - result = { - 'changed': changed - } - module.exit_json(**result) - - result['changed'] |= ensure_ec2_tags( - ec2, module, result['allocation_id'], - resource_type='elastic-ip', tags=tags, purge_tags=purge_tags) - else: - if device_id: - disassociated = ensure_absent( - ec2, module, address, device_id, module.check_mode, is_instance=is_instance - ) - - if release_on_disassociation and disassociated['changed']: - released = release_address(ec2, module, address, module.check_mode) - result = { - 'changed': True, - 'disassociated': disassociated['changed'], - 'released': released['changed'] - } - else: - result = { - 'changed': disassociated['changed'], - 'disassociated': disassociated['changed'], - 'released': False - } - else: - released = release_address(ec2, module, address, module.check_mode) - result = { - 'changed': released['changed'], - 'disassociated': False, - 'released': released['changed'] - } - - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(str(e)) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ec2_eip_info.py b/ec2_eip_info.py deleted file mode 100644 index 31d8145742b..00000000000 --- a/ec2_eip_info.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ec2_eip_info -version_added: 1.0.0 -short_description: List EC2 EIP details -description: - - List details of EC2 Elastic IP addresses. -author: "Brad Macpherson (@iiibrad)" -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and filter - value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options) - for possible filters. Filter names and values are case sensitive. - required: false - default: {} - type: dict -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details or the AWS region, -# see the AWS Guide for details. - -- name: List all EIP addresses in the current region. - community.aws.ec2_eip_info: - register: regional_eip_addresses - -- name: List all EIP addresses for a VM. - community.aws.ec2_eip_info: - filters: - instance-id: i-123456789 - register: my_vm_eips - -- ansible.builtin.debug: - msg: "{{ my_vm_eips.addresses | selectattr('private_ip_address', 'equalto', '10.0.0.5') }}" - -- name: List all EIP addresses for several VMs. - community.aws.ec2_eip_info: - filters: - instance-id: - - i-123456789 - - i-987654321 - register: my_vms_eips - -- name: List all EIP addresses using the 'Name' tag as a filter. - community.aws.ec2_eip_info: - filters: - tag:Name: www.example.com - register: my_vms_eips - -- name: List all EIP addresses using the Allocation-id as a filter - community.aws.ec2_eip_info: - filters: - allocation-id: eipalloc-64de1b01 - register: my_vms_eips - -# Set the variable eip_alloc to the value of the first allocation_id -# and set the variable my_pub_ip to the value of the first public_ip -- ansible.builtin.set_fact: - eip_alloc: my_vms_eips.addresses[0].allocation_id - my_pub_ip: my_vms_eips.addresses[0].public_ip - -''' - - -RETURN = ''' -addresses: - description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP. - returned: on success - type: list - sample: [{ - "allocation_id": "eipalloc-64de1b01", - "association_id": "eipassoc-0fe9ce90d6e983e97", - "domain": "vpc", - "instance_id": "i-01020cfeb25b0c84f", - "network_interface_id": "eni-02fdeadfd4beef9323b", - "network_interface_owner_id": "0123456789", - "private_ip_address": "10.0.0.1", - "public_ip": "54.81.104.1", - "tags": { - "Name": "test-vm-54.81.104.1" - } - }] - -''' - -try: - from botocore.exceptions import (BotoCoreError, ClientError) -except ImportError: - pass # caught by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - - -def get_eips_details(module): - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - filters = module.params.get("filters") - try: - response = connection.describe_addresses( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(filters) - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg="Error retrieving EIPs") - - addresses = camel_dict_to_snake_dict(response)['addresses'] - for address in addresses: - if 'tags' in address: - address['tags'] = boto3_tag_list_to_ansible_dict(address['tags']) - return addresses - - -def main(): - module = AnsibleAWSModule( - argument_spec=dict( - filters=dict(type='dict', default={}) - ), - supports_check_mode=True - ) - - module.exit_json(changed=False, addresses=get_eips_details(module)) - - -if __name__ == '__main__': - main() From 464458e3eb524774221f7d2dfc2616e29d11f198 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Sep 2022 15:32:25 +0200 Subject: [PATCH 578/683] Migrate rds_snapshot* modules and tests (#1453) Migrate rds_snapshot* modules and tests Depends-On: ansible-collections/amazon.aws#1012 Depends-On: #1481 Remove rds_snapshot* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mike Graves Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- rds_snapshot_info.py | 387 ------------------------------------------- 1 file changed, 387 deletions(-) delete mode 100644 rds_snapshot_info.py diff --git a/rds_snapshot_info.py b/rds_snapshot_info.py deleted file mode 100644 index 7abc0cae675..00000000000 --- a/rds_snapshot_info.py +++ /dev/null @@ -1,387 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2014-2017 Ansible Project -# Copyright (c) 2017, 2018 Will Thames -# Copyright (c) 2017, 2018 Michael De La Rue -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rds_snapshot_info -version_added: 1.0.0 -short_description: obtain information about one or more RDS snapshots -description: - - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora). - - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed. -options: - db_snapshot_identifier: - description: - - Name of an RDS (unclustered) snapshot. - - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) - required: false - aliases: - - snapshot_name - type: str - db_instance_identifier: - description: - - RDS instance name for which to find snapshots. - - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) - required: false - type: str - db_cluster_identifier: - description: - - RDS cluster name for which to find snapshots. - - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier) - required: false - type: str - db_cluster_snapshot_identifier: - description: - - Name of an RDS cluster snapshot. - - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier) - required: false - type: str - snapshot_type: - description: - - Type of snapshot to find. - - By default both automated and manual snapshots will be returned. - required: false - choices: ['automated', 'manual', 'shared', 'public'] - type: str -author: - - "Will Thames (@willthames)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' -- name: Get information about an snapshot - community.aws.rds_snapshot_info: - db_snapshot_identifier: snapshot_name - register: new_database_info - -- name: Get all RDS snapshots for an RDS instance - community.aws.rds_snapshot_info: - db_instance_identifier: helloworld-rds-master -''' - -RETURN = ''' -snapshots: - description: List of non-clustered snapshots - returned: When cluster parameters are not passed - type: complex - contains: - allocated_storage: - description: How many gigabytes of storage are allocated - returned: always - type: int - sample: 10 - availability_zone: - description: The availability zone of the database from which the snapshot was taken - returned: always - type: str - sample: us-west-2b - db_instance_identifier: - description: Database instance identifier - returned: always - type: str - sample: hello-world-rds - db_snapshot_arn: - description: Snapshot ARN - returned: always - type: str - sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03 - db_snapshot_identifier: - description: Snapshot name - returned: always - type: str - sample: rds:hello-world-rds-us1-2018-05-16-04-03 - encrypted: - description: Whether the snapshot was encrypted - returned: always - type: bool - sample: true - engine: - description: Database engine - returned: always - type: str - sample: postgres - engine_version: - description: Database engine version - returned: always - type: str - sample: 9.5.10 - iam_database_authentication_enabled: - description: Whether database authentication through IAM is enabled - returned: always - type: bool - sample: false - instance_create_time: - description: Time the Instance was created - returned: always - type: str - sample: '2017-10-10T04:00:07.434000+00:00' - kms_key_id: - description: ID of the KMS Key encrypting the snapshot - returned: always - type: str - sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab - license_model: - description: License model - returned: always - type: str - sample: postgresql-license - master_username: - description: Database master username - returned: always - type: str - sample: dbadmin - option_group_name: - description: Database option group name - returned: always - type: str - sample: default:postgres-9-5 - percent_progress: - description: Percent progress of snapshot - returned: always - type: int - sample: 100 - snapshot_create_time: - description: Time snapshot was created - returned: always - type: str - sample: '2018-05-16T04:03:33.871000+00:00' - snapshot_type: - description: Type of snapshot - returned: always - type: str - sample: automated - status: - description: Status of snapshot - returned: always - type: str - sample: available - storage_type: - description: Storage type of underlying DB - returned: always - type: str - sample: gp2 - tags: - description: Snapshot tags - returned: when snapshot is not shared - type: complex - contains: {} - vpc_id: - description: ID of VPC containing the DB - returned: always - type: str - sample: vpc-abcd1234 -cluster_snapshots: - description: List of cluster snapshots - returned: always - type: complex - contains: - allocated_storage: - description: How many gigabytes of storage are allocated - returned: always - type: int - sample: 1 - availability_zones: - description: The availability zones of the database from which the snapshot was taken - returned: always - type: list - sample: - - ca-central-1a - - ca-central-1b - cluster_create_time: - description: Date and time the cluster was created - returned: always - type: str - sample: '2018-05-17T00:13:40.223000+00:00' - db_cluster_identifier: - description: Database cluster identifier - returned: always - type: str - sample: test-aurora-cluster - db_cluster_snapshot_arn: - description: ARN of the database snapshot - returned: always - type: str - sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot - db_cluster_snapshot_identifier: - description: Snapshot identifier - returned: always - type: str - sample: test-aurora-snapshot - engine: - description: Database engine - returned: always - type: str - sample: aurora - engine_version: - description: Database engine version - returned: always - type: str - sample: 5.6.10a - iam_database_authentication_enabled: - description: Whether database authentication through IAM is enabled - returned: always - type: bool - sample: false - kms_key_id: - description: ID of the KMS Key encrypting the snapshot - returned: always - type: str - sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab - license_model: - description: License model - returned: always - type: str - sample: aurora - master_username: - description: Database master username - returned: always - type: str - sample: shertel - percent_progress: - description: Percent progress of snapshot - returned: always - type: int - sample: 0 - port: - description: Database port - returned: always - type: int - sample: 0 - snapshot_create_time: - description: Date and time when the snapshot was created - returned: always - type: str - sample: '2018-05-17T00:23:23.731000+00:00' - snapshot_type: - description: Type of snapshot - returned: always - type: str - sample: manual - status: - description: Status of snapshot - returned: always - type: str - sample: creating - storage_encrypted: - description: Whether the snapshot is encrypted - returned: always - type: bool - sample: true - tags: - description: Tags of the snapshot - returned: when snapshot is not shared - type: complex - contains: {} - vpc_id: - description: VPC of the database - returned: always - type: str - sample: vpc-abcd1234 -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - - -def common_snapshot_info(module, conn, method, prefix, params): - paginator = conn.get_paginator(method) - try: - results = paginator.paginate(**params).build_full_result()['%ss' % prefix] - except is_boto3_error_code('%sNotFound' % prefix): - results = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "trying to get snapshot information") - - for snapshot in results: - try: - if snapshot['SnapshotType'] != 'shared': - snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], - aws_retry=True)['TagList']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) - - return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results] - - -def cluster_snapshot_info(module, conn): - snapshot_name = module.params.get('db_cluster_snapshot_identifier') - snapshot_type = module.params.get('snapshot_type') - instance_name = module.params.get('db_cluster_identifier') - - params = dict() - if snapshot_name: - params['DBClusterSnapshotIdentifier'] = snapshot_name - if instance_name: - params['DBClusterIdentifier'] = instance_name - if snapshot_type: - params['SnapshotType'] = snapshot_type - if snapshot_type == 'public': - params['IncludePublic'] = True - elif snapshot_type == 'shared': - params['IncludeShared'] = True - - return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params) - - -def standalone_snapshot_info(module, conn): - snapshot_name = module.params.get('db_snapshot_identifier') - snapshot_type = module.params.get('snapshot_type') - instance_name = module.params.get('db_instance_identifier') - - params = dict() - if snapshot_name: - params['DBSnapshotIdentifier'] = snapshot_name - if instance_name: - params['DBInstanceIdentifier'] = instance_name - if snapshot_type: - params['SnapshotType'] = snapshot_type - if snapshot_type == 'public': - params['IncludePublic'] = True - elif snapshot_type == 'shared': - params['IncludeShared'] = True - - return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params) - - -def main(): - argument_spec = dict( - db_snapshot_identifier=dict(aliases=['snapshot_name']), - db_instance_identifier=dict(), - db_cluster_identifier=dict(), - db_cluster_snapshot_identifier=dict(), - snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] - ) - - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - results = dict() - if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: - results['snapshots'] = standalone_snapshot_info(module, conn) - if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: - results['cluster_snapshots'] = cluster_snapshot_info(module, conn) - - module.exit_json(changed=False, **results) - - -if __name__ == '__main__': - main() From 9fe6d978f4365f7271a5395a9d2e6bb0436ab458 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Sep 2022 15:32:29 +0200 Subject: [PATCH 579/683] Migrate cloudwatchevent* modules and tests (#1517) Migrate cloudwatchevent* modules and tests Depends-On: ansible-collections/amazon.aws#1052 Remove cloudwatchevent* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mike Graves Reviewed-by: Mark Chappell --- cloudwatchevent_rule.py | 512 ---------------------------------------- 1 file changed, 512 deletions(-) delete mode 100644 cloudwatchevent_rule.py diff --git a/cloudwatchevent_rule.py b/cloudwatchevent_rule.py deleted file mode 100644 index 4780a4ae43d..00000000000 --- a/cloudwatchevent_rule.py +++ /dev/null @@ -1,512 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cloudwatchevent_rule -version_added: 1.0.0 -short_description: Manage CloudWatch Event rules and targets -description: - - This module creates and manages CloudWatch event rules and targets. -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -author: "Jim Dalton (@jsdalton) " -notes: - - A rule must contain at least an I(event_pattern) or I(schedule_expression). A - rule can have both an I(event_pattern) and a I(schedule_expression), in which - case the rule will trigger on matching events as well as on a schedule. - - When specifying targets, I(input), I(input_path), I(input_paths_map) and I(input_template) - are mutually-exclusive and optional parameters. -options: - name: - description: - - The name of the rule you are creating, updating or deleting. No spaces - or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)). - required: true - type: str - schedule_expression: - description: - - A cron or rate expression that defines the schedule the rule will - trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)). - required: false - type: str - event_pattern: - description: - - A string pattern that is used to match against incoming events to determine if the rule - should be triggered. - required: false - type: json - state: - description: - - Whether the rule is present (and enabled), disabled, or absent. - choices: ["present", "disabled", "absent"] - default: present - required: false - type: str - description: - description: - - A description of the rule. - required: false - type: str - role_arn: - description: - - The Amazon Resource Name (ARN) of the IAM role associated with the rule. - required: false - type: str - targets: - type: list - elements: dict - description: - - A list of targets to add to or update for the rule. - suboptions: - id: - type: str - required: true - description: The unique target assignment ID. - arn: - type: str - required: true - description: The ARN associated with the target. - role_arn: - type: str - description: The ARN of the IAM role to be used for this target when the rule is triggered. - input: - type: json - description: - - A JSON object that will override the event data passed to the target. - - If neither I(input) nor I(input_path) nor I(input_transformer) - is specified, then the entire event is passed to the target in JSON form. - input_path: - type: str - description: - - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be - passed to the target. - - If neither I(input) nor I(input_path) nor I(input_transformer) - is specified, then the entire event is passed to the target in JSON form. - input_transformer: - type: dict - description: - - Settings to support providing custom input to a target based on certain event data. - version_added: 4.1.0 - suboptions: - input_paths_map: - type: dict - description: - - A dict that specifies the transformation of the event data to - custom input parameters. - input_template: - type: json - description: - - A string that templates the values input_paths_map extracted from the event data. - It is used to produce the output you want to be sent to the target. - ecs_parameters: - type: dict - description: - - Contains the ECS task definition and task count to be used, if the event target is an ECS task. - suboptions: - task_definition_arn: - type: str - description: The full ARN of the task definition. - required: true - task_count: - type: int - description: The number of tasks to create based on I(task_definition). - required: false -''' - -EXAMPLES = r''' -- community.aws.cloudwatchevent_rule: - name: MyCronTask - schedule_expression: "cron(0 20 * * ? *)" - description: Run my scheduled task - targets: - - id: MyTargetId - arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction - -- community.aws.cloudwatchevent_rule: - name: MyDisabledCronTask - schedule_expression: "rate(5 minutes)" - description: Run my disabled scheduled task - state: disabled - targets: - - id: MyOtherTargetId - arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction - input: '{"foo": "bar"}' - -- community.aws.cloudwatchevent_rule: - name: MyInstanceLaunchEvent - description: "Rule for EC2 instance launch" - state: present - event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' - targets: - - id: MyTargetSnsTopic - arn: arn:aws:sns:us-east-1:123456789012:MySNSTopic - input_transformer: - input_paths_map: - instance: "$.detail.instance-id" - state: "$.detail.state" - input_template: " is in state " - -- community.aws.cloudwatchevent_rule: - name: MyCronTask - state: absent -''' - -RETURN = r''' -rule: - description: CloudWatch Event rule data. - returned: success - type: dict - sample: - arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask' - description: 'Run my scheduled task' - name: 'MyCronTask' - schedule_expression: 'cron(0 20 * * ? *)' - state: 'ENABLED' -targets: - description: CloudWatch Event target(s) assigned to the rule. - returned: success - type: list - sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" -''' - -import json - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters - - -def _format_json(json_string): - # When passed a simple string, Ansible doesn't quote it to ensure it's a *quoted* string - try: - json.loads(json_string) - return json_string - except json.decoder.JSONDecodeError: - return str(json.dumps(json_string)) - - -class CloudWatchEventRule(object): - def __init__(self, module, name, client, schedule_expression=None, - event_pattern=None, description=None, role_arn=None): - self.name = name - self.client = client - self.changed = False - self.schedule_expression = schedule_expression - self.event_pattern = event_pattern - self.description = description - self.role_arn = role_arn - self.module = module - - def describe(self): - """Returns the existing details of the rule in AWS""" - try: - rule_info = self.client.describe_rule(Name=self.name) - except is_boto3_error_code('ResourceNotFoundException'): - return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) - return self._snakify(rule_info) - - def put(self, enabled=True): - """Creates or updates the rule in AWS""" - request = { - 'Name': self.name, - 'State': "ENABLED" if enabled else "DISABLED", - } - if self.schedule_expression: - request['ScheduleExpression'] = self.schedule_expression - if self.event_pattern: - request['EventPattern'] = self.event_pattern - if self.description: - request['Description'] = self.description - if self.role_arn: - request['RoleArn'] = self.role_arn - try: - response = self.client.put_rule(**request) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name) - self.changed = True - return response - - def delete(self): - """Deletes the rule in AWS""" - self.remove_all_targets() - - try: - response = self.client.delete_rule(Name=self.name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name) - self.changed = True - return response - - def enable(self): - """Enables the rule in AWS""" - try: - response = self.client.enable_rule(Name=self.name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name) - self.changed = True - return response - - def disable(self): - """Disables the rule in AWS""" - try: - response = self.client.disable_rule(Name=self.name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name) - self.changed = True - return response - - def list_targets(self): - """Lists the existing targets for the rule in AWS""" - try: - targets = self.client.list_targets_by_rule(Rule=self.name) - except is_boto3_error_code('ResourceNotFoundException'): - return [] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) - return self._snakify(targets)['targets'] - - def put_targets(self, targets): - """Creates or updates the provided targets on the rule in AWS""" - if not targets: - return - request = { - 'Rule': self.name, - 'Targets': self._targets_request(targets), - } - try: - response = self.client.put_targets(**request) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name) - self.changed = True - return response - - def remove_targets(self, target_ids): - """Removes the provided targets from the rule in AWS""" - if not target_ids: - return - request = { - 'Rule': self.name, - 'Ids': target_ids - } - try: - response = self.client.remove_targets(**request) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name) - self.changed = True - return response - - def remove_all_targets(self): - """Removes all targets on rule""" - targets = self.list_targets() - return self.remove_targets([t['id'] for t in targets]) - - def _targets_request(self, targets): - """Formats each target for the request""" - targets_request = [] - for target in targets: - target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True)) - if target_request.get('Input', None): - target_request['Input'] = _format_json(target_request['Input']) - if target_request.get('InputTransformer', None): - if target_request.get('InputTransformer').get('InputTemplate', None): - target_request['InputTransformer']['InputTemplate'] = _format_json(target_request['InputTransformer']['InputTemplate']) - if target_request.get('InputTransformer').get('InputPathsMap', None): - target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map'] - targets_request.append(target_request) - return targets_request - - def _snakify(self, dict): - """Converts camel case to snake case""" - return camel_dict_to_snake_dict(dict) - - -class CloudWatchEventRuleManager(object): - RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn'] - - def __init__(self, rule, targets): - self.rule = rule - self.targets = targets - - def ensure_present(self, enabled=True): - """Ensures the rule and targets are present and synced""" - rule_description = self.rule.describe() - if rule_description: - # Rule exists so update rule, targets and state - self._sync_rule(enabled) - self._sync_targets() - self._sync_state(enabled) - else: - # Rule does not exist, so create new rule and targets - self._create(enabled) - - def ensure_disabled(self): - """Ensures the rule and targets are present, but disabled, and synced""" - self.ensure_present(enabled=False) - - def ensure_absent(self): - """Ensures the rule and targets are absent""" - rule_description = self.rule.describe() - if not rule_description: - # Rule doesn't exist so don't need to delete - return - self.rule.delete() - - def fetch_aws_state(self): - """Retrieves rule and target state from AWS""" - aws_state = { - 'rule': {}, - 'targets': [], - 'changed': self.rule.changed - } - rule_description = self.rule.describe() - if not rule_description: - return aws_state - - # Don't need to include response metadata noise in response - del rule_description['response_metadata'] - - aws_state['rule'] = rule_description - aws_state['targets'].extend(self.rule.list_targets()) - return aws_state - - def _sync_rule(self, enabled=True): - """Syncs local rule state with AWS""" - if not self._rule_matches_aws(): - self.rule.put(enabled) - - def _sync_targets(self): - """Syncs local targets with AWS""" - # Identify and remove extraneous targets on AWS - target_ids_to_remove = self._remote_target_ids_to_remove() - if target_ids_to_remove: - self.rule.remove_targets(target_ids_to_remove) - - # Identify targets that need to be added or updated on AWS - targets_to_put = self._targets_to_put() - if targets_to_put: - self.rule.put_targets(targets_to_put) - - def _sync_state(self, enabled=True): - """Syncs local rule state with AWS""" - remote_state = self._remote_state() - if enabled and remote_state != 'ENABLED': - self.rule.enable() - elif not enabled and remote_state != 'DISABLED': - self.rule.disable() - - def _create(self, enabled=True): - """Creates rule and targets on AWS""" - self.rule.put(enabled) - self.rule.put_targets(self.targets) - - def _rule_matches_aws(self): - """Checks if the local rule data matches AWS""" - aws_rule_data = self.rule.describe() - - # The rule matches AWS only if all rule data fields are equal - # to their corresponding local value defined in the task - return all( - getattr(self.rule, field) == aws_rule_data.get(field, None) - for field in self.RULE_FIELDS - ) - - def _targets_to_put(self): - """Returns a list of targets that need to be updated or added remotely""" - remote_targets = self.rule.list_targets() - return [t for t in self.targets if t not in remote_targets] - - def _remote_target_ids_to_remove(self): - """Returns a list of targets that need to be removed remotely""" - target_ids = [t['id'] for t in self.targets] - remote_targets = self.rule.list_targets() - return [ - rt['id'] for rt in remote_targets if rt['id'] not in target_ids - ] - - def _remote_state(self): - """Returns the remote state from AWS""" - description = self.rule.describe() - if not description: - return - return description['state'] - - -def main(): - target_args = dict( - type='list', elements='dict', default=[], - options=dict( - id=dict(type='str', required=True), - arn=dict(type='str', required=True), - role_arn=dict(type='str'), - input=dict(type='json'), - input_path=dict(type='str'), - input_transformer=dict( - type='dict', - options=dict( - input_paths_map=dict(type='dict'), - input_template=dict(type='json'), - ), - ), - ecs_parameters=dict( - type='dict', - options=dict( - task_definition_arn=dict(type='str', required=True), - task_count=dict(type='int'), - ), - ), - ), - ) - argument_spec = dict( - name=dict(required=True), - schedule_expression=dict(), - event_pattern=dict(type='json'), - state=dict(choices=['present', 'disabled', 'absent'], - default='present'), - description=dict(), - role_arn=dict(), - targets=target_args, - ) - module = AnsibleAWSModule(argument_spec=argument_spec) - - rule_data = dict( - [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS] - ) - targets = module.params.get('targets') - state = module.params.get('state') - client = module.client('events') - - cwe_rule = CloudWatchEventRule(module, client=client, **rule_data) - cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets) - - if state == 'present': - cwe_rule_manager.ensure_present() - elif state == 'disabled': - cwe_rule_manager.ensure_disabled() - elif state == 'absent': - cwe_rule_manager.ensure_absent() - else: - module.fail_json(msg="Invalid state '{0}' provided".format(state)) - - module.exit_json(**cwe_rule_manager.fetch_aws_state()) - - -if __name__ == '__main__': - main() From b81f284bc5dc96a3c0f17b7b3424018fdb6d8bca Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Thu, 22 Sep 2022 09:52:37 -0400 Subject: [PATCH 580/683] Promote autoscaling_group* module (#1514) Promote autoscaling_group* module SUMMARY Remove autoscaling_group* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- autoscaling_group.py | 1950 ------------------------------------- autoscaling_group_info.py | 458 --------- 2 files changed, 2408 deletions(-) delete mode 100644 autoscaling_group.py delete mode 100644 autoscaling_group_info.py diff --git a/autoscaling_group.py b/autoscaling_group.py deleted file mode 100644 index 84db04bce9c..00000000000 --- a/autoscaling_group.py +++ /dev/null @@ -1,1950 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: autoscaling_group -version_added: 1.0.0 -short_description: Create or delete AWS AutoScaling Groups (ASGs) -description: - - Can create or delete AWS AutoScaling Groups. - - Can be used with the M(community.aws.autoscaling_launch_config) module to manage Launch Configurations. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg). - The usage did not change. -author: - - "Gareth Rushgrove (@garethr)" -options: - state: - description: - - Register or deregister the instance. - choices: ['present', 'absent'] - default: present - type: str - name: - description: - - Unique name for group to be created or deleted. - required: true - type: str - load_balancers: - description: - - List of ELB names to use for the group. Use for classic load balancers. - type: list - elements: str - target_group_arns: - description: - - List of target group ARNs to use for the group. Use for application load balancers. - type: list - elements: str - availability_zones: - description: - - List of availability zone names in which to create the group. - - Defaults to all the availability zones in the region if I(vpc_zone_identifier) is not set. - type: list - elements: str - launch_config_name: - description: - - Name of the Launch configuration to use for the group. See the community.aws.autoscaling_launch_config) module for managing these. - - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided. - type: str - launch_template: - description: - - Dictionary describing the Launch Template to use - suboptions: - version: - description: - - The version number of the launch template to use. - - Defaults to latest version if not provided. - type: str - launch_template_name: - description: - - The name of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required. - type: str - launch_template_id: - description: - - The id of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required. - type: str - type: dict - min_size: - description: - - Minimum number of instances in group, if unspecified then the current group value will be used. - type: int - max_size: - description: - - Maximum number of instances in group, if unspecified then the current group value will be used. - type: int - max_instance_lifetime: - description: - - The maximum amount of time, in seconds, that an instance can be in service. - - Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified. - - Value of 0 removes lifetime restriction. - type: int - mixed_instances_policy: - description: - - A mixed instance policy to use for the ASG. - - Only used when the ASG is configured to use a Launch Template (I(launch_template)). - - 'See also U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-mixedinstancespolicy.html)' - required: false - suboptions: - instance_types: - description: - - A list of instance_types. - type: list - elements: str - required: false - instances_distribution: - description: - - >- - Specifies the distribution of On-Demand Instances and Spot Instances, the maximum price - to pay for Spot Instances, and how the Auto Scaling group allocates instance types - to fulfill On-Demand and Spot capacity. - - 'See also U(https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_InstancesDistribution.html)' - required: false - type: dict - version_added: 1.5.0 - suboptions: - on_demand_allocation_strategy: - description: - - Indicates how to allocate instance types to fulfill On-Demand capacity. - type: str - required: false - version_added: 1.5.0 - on_demand_base_capacity: - description: - - >- - The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand - Instances. This base portion is provisioned first as your group scales. - - >- - Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a - percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting. - type: int - required: false - version_added: 1.5.0 - on_demand_percentage_above_base_capacity: - description: - - Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity. - - Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances. - - 'Valid range: 0 to 100' - type: int - required: false - version_added: 1.5.0 - spot_allocation_strategy: - description: - - Indicates how to allocate instances across Spot Instance pools. - type: str - required: false - version_added: 1.5.0 - spot_instance_pools: - description: - - >- - The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from - the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2. - - Used only when the Spot allocation strategy is lowest-price. - - 'Valid Range: Minimum value of 1. Maximum value of 20.' - type: int - required: false - version_added: 1.5.0 - spot_max_price: - description: - - The maximum price per unit hour that you are willing to pay for a Spot Instance. - - If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price. - - To remove a value that you previously set, include the parameter but leave the value blank. - type: str - required: false - version_added: 1.5.0 - type: dict - placement_group: - description: - - Physical location of your cluster placement group created in Amazon EC2. - type: str - desired_capacity: - description: - - Desired number of instances in group, if unspecified then the current group value will be used. - type: int - replace_all_instances: - description: - - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration. - It increases the ASG size by I(replace_batch_size), waits for the new instances to be up and running. - After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced. - Once that's done the ASG size is reduced back to the expected size. - default: false - type: bool - replace_batch_size: - description: - - Number of instances you'd like to replace at a time. Used with I(replace_all_instances). - required: false - default: 1 - type: int - replace_instances: - description: - - List of I(instance_ids) belonging to the named AutoScalingGroup that you would like to terminate and be replaced with instances - matching the current launch configuration. - type: list - elements: str - detach_instances: - description: - - Removes one or more instances from the specified AutoScalingGroup. - - If I(decrement_desired_capacity) flag is not set, new instance(s) are launched to replace the detached instance(s). - - If a Classic Load Balancer is attached to the AutoScalingGroup, the instances are also deregistered from the load balancer. - - If there are target groups attached to the AutoScalingGroup, the instances are also deregistered from the target groups. - type: list - elements: str - version_added: 3.2.0 - decrement_desired_capacity: - description: - - Indicates whether the AutoScalingGroup decrements the desired capacity value by the number of instances detached. - default: false - type: bool - version_added: 3.2.0 - lc_check: - description: - - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config). - default: true - type: bool - lt_check: - description: - - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current - I(launch_template or I(launch_template) I(version). - default: true - type: bool - vpc_zone_identifier: - description: - - List of VPC subnets to use - type: list - elements: str - tags: - description: - - A list of tags to add to the Auto Scale Group. - - Optional key is I(propagate_at_launch), which defaults to true. - - When I(propagate_at_launch) is true the tags will be propagated to the Instances created. - type: list - elements: dict - purge_tags: - description: - - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. - - If the I(tags) parameter is not set then tags will not be modified. - default: false - type: bool - version_added: 3.2.0 - health_check_period: - description: - - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. - required: false - default: 300 - type: int - health_check_type: - description: - - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. - required: false - default: EC2 - choices: ['EC2', 'ELB'] - type: str - default_cooldown: - description: - - The number of seconds after a scaling activity completes before another can begin. - default: 300 - type: int - wait_timeout: - description: - - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy", - try increasing this value. - default: 300 - type: int - wait_for_instances: - description: - - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all - instances have a lifecycle_state of "InService" and a health_status of "Healthy". - default: true - type: bool - termination_policies: - description: - - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - - Using I(termination_policies=Default) when modifying an existing AutoScalingGroup will result in the existing policy being retained - instead of changed to C(Default). - - 'Valid values include: C(Default), C(OldestInstance), C(NewestInstance), C(OldestLaunchConfiguration), C(ClosestToNextInstanceHour)' - - 'Full documentation of valid values can be found in the AWS documentation:' - - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#custom-termination-policy)' - default: Default - type: list - elements: str - notification_topic: - description: - - A SNS topic ARN to send auto scaling notifications to. - type: str - notification_types: - description: - - A list of auto scaling events to trigger notifications on. - default: - - 'autoscaling:EC2_INSTANCE_LAUNCH' - - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR' - - 'autoscaling:EC2_INSTANCE_TERMINATE' - - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' - required: false - type: list - elements: str - suspend_processes: - description: - - A list of scaling processes to suspend. - - 'Valid values include:' - - C(Launch), C(Terminate), C(HealthCheck), C(ReplaceUnhealthy), C(AZRebalance), C(AlarmNotification), C(ScheduledActions), C(AddToLoadBalancer) - - 'Full documentation of valid values can be found in the AWS documentation:' - - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)' - default: [] - type: list - elements: str - metrics_collection: - description: - - Enable ASG metrics collection. - type: bool - default: false - metrics_granularity: - description: - - When I(metrics_collection=true) this will determine the granularity of metrics collected by CloudWatch. - default: "1Minute" - type: str - metrics_list: - description: - - List of autoscaling metrics to collect when I(metrics_collection=true). - default: - - 'GroupMinSize' - - 'GroupMaxSize' - - 'GroupDesiredCapacity' - - 'GroupInServiceInstances' - - 'GroupPendingInstances' - - 'GroupStandbyInstances' - - 'GroupTerminatingInstances' - - 'GroupTotalInstances' - type: list - elements: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = r''' -# Basic configuration with Launch Configuration - -- community.aws.autoscaling_group: - name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] - launch_config_name: 'lc-1' - min_size: 1 - max_size: 10 - desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] - tags: - - environment: production - propagate_at_launch: false - -# Rolling ASG Updates - -# Below is an example of how to assign a new launch config to an ASG and terminate old instances. -# -# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in -# a rolling fashion with instances using the current launch configuration, "my_new_lc". -# -# This could also be considered a rolling deploy of a pre-baked AMI. -# -# If this is a newly created group, the instances will not be replaced since all instances -# will have the current launch configuration. - -- name: create launch config - community.aws.autoscaling_launch_config: - name: my_new_lc - image_id: ami-lkajsf - key_name: mykey - region: us-east-1 - security_groups: sg-23423 - instance_type: m1.small - assign_public_ip: true - -- community.aws.autoscaling_group: - name: myasg - launch_config_name: my_new_lc - health_check_period: 60 - health_check_type: ELB - replace_all_instances: true - min_size: 5 - max_size: 5 - desired_capacity: 5 - region: us-east-1 - -# To only replace a couple of instances instead of all of them, supply a list -# to "replace_instances": - -- community.aws.autoscaling_group: - name: myasg - launch_config_name: my_new_lc - health_check_period: 60 - health_check_type: ELB - replace_instances: - - i-b345231 - - i-24c2931 - min_size: 5 - max_size: 5 - desired_capacity: 5 - region: us-east-1 - -# Basic Configuration with Launch Template - -- community.aws.autoscaling_group: - name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] - launch_template: - version: '1' - launch_template_name: 'lt-example' - launch_template_id: 'lt-123456' - min_size: 1 - max_size: 10 - desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] - tags: - - environment: production - propagate_at_launch: false - -# Basic Configuration with Launch Template using mixed instance policy - -- community.aws.autoscaling_group: - name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] - launch_template: - version: '1' - launch_template_name: 'lt-example' - launch_template_id: 'lt-123456' - mixed_instances_policy: - instance_types: - - t3a.large - - t3.large - - t2.large - instances_distribution: - on_demand_percentage_above_base_capacity: 0 - spot_allocation_strategy: capacity-optimized - min_size: 1 - max_size: 10 - desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] - tags: - - environment: production - propagate_at_launch: false -''' - -RETURN = r''' ---- -auto_scaling_group_name: - description: The unique name of the auto scaling group - returned: success - type: str - sample: "myasg" -auto_scaling_group_arn: - description: The unique ARN of the autoscaling group - returned: success - type: str - sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg" -availability_zones: - description: The availability zones for the auto scaling group - returned: success - type: list - sample: [ - "us-east-1d" - ] -created_time: - description: Timestamp of create time of the auto scaling group - returned: success - type: str - sample: "2017-11-08T14:41:48.272000+00:00" -default_cooldown: - description: The default cooldown time in seconds. - returned: success - type: int - sample: 300 -desired_capacity: - description: The number of EC2 instances that should be running in this group. - returned: success - type: int - sample: 3 -healthcheck_period: - description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. - returned: success - type: int - sample: 30 -healthcheck_type: - description: The service you want the health status from, one of "EC2" or "ELB". - returned: success - type: str - sample: "ELB" -healthy_instances: - description: Number of instances in a healthy state - returned: success - type: int - sample: 5 -in_service_instances: - description: Number of instances in service - returned: success - type: int - sample: 3 -instance_facts: - description: Dictionary of EC2 instances and their status as it relates to the ASG. - returned: success - type: dict - sample: { - "i-0123456789012": { - "health_status": "Healthy", - "launch_config_name": "public-webapp-production-1", - "lifecycle_state": "InService" - } - } -instances: - description: list of instance IDs in the ASG - returned: success - type: list - sample: [ - "i-0123456789012" - ] -launch_config_name: - description: > - Name of launch configuration associated with the ASG. Same as launch_configuration_name, - provided for compatibility with M(community.aws.autoscaling_group) module. - returned: success - type: str - sample: "public-webapp-production-1" -load_balancers: - description: List of load balancers names attached to the ASG. - returned: success - type: list - sample: ["elb-webapp-prod"] -max_instance_lifetime: - description: The maximum amount of time, in seconds, that an instance can be in service. - returned: success - type: int - sample: 604800 -max_size: - description: Maximum size of group - returned: success - type: int - sample: 3 -min_size: - description: Minimum size of group - returned: success - type: int - sample: 1 -mixed_instances_policy: - description: Returns the list of instance types if a mixed instances policy is set. - returned: success - type: list - sample: ["t3.micro", "t3a.micro"] -mixed_instances_policy_full: - description: Returns the full dictionary representation of the mixed instances policy if a mixed instances policy is set. - returned: success - type: dict - sample: { - "instances_distribution": { - "on_demand_allocation_strategy": "prioritized", - "on_demand_base_capacity": 0, - "on_demand_percentage_above_base_capacity": 0, - "spot_allocation_strategy": "capacity-optimized" - }, - "launch_template": { - "launch_template_specification": { - "launch_template_id": "lt-53c2425cffa544c23", - "launch_template_name": "random-LaunchTemplate", - "version": "2" - }, - "overrides": [ - { - "instance_type": "m5.xlarge" - }, - { - "instance_type": "m5a.xlarge" - }, - ] - } - } -pending_instances: - description: Number of instances in pending state - returned: success - type: int - sample: 1 -tags: - description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. - returned: success - type: list - sample: [ - { - "key": "Name", - "value": "public-webapp-production-1", - "resource_id": "public-webapp-production-1", - "resource_type": "auto-scaling-group", - "propagate_at_launch": "true" - }, - { - "key": "env", - "value": "production", - "resource_id": "public-webapp-production-1", - "resource_type": "auto-scaling-group", - "propagate_at_launch": "true" - } - ] -target_group_arns: - description: List of ARNs of the target groups that the ASG populates - returned: success - type: list - sample: [ - "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", - "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" - ] -target_group_names: - description: List of names of the target groups that the ASG populates - returned: success - type: list - sample: [ - "target-group-host-hello", - "target-group-path-world" - ] -termination_policies: - description: A list of termination policies for the group. - returned: success - type: list - sample: ["Default"] -unhealthy_instances: - description: Number of instances in an unhealthy state - returned: success - type: int - sample: 0 -viable_instances: - description: Number of instances in a viable state - returned: success - type: int - sample: 1 -vpc_zone_identifier: - description: VPC zone ID / subnet id for the auto scaling group - returned: success - type: str - sample: "subnet-a31ef45f" -metrics_collection: - description: List of enabled AutosSalingGroup metrics - returned: success - type: list - sample: [ - { - "Granularity": "1Minute", - "Metric": "GroupInServiceInstances" - } - ] -''' - -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', - 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', - 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', - 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', - 'VPCZoneIdentifier') - -INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') - -backoff_params = dict(retries=10, delay=3, backoff=1.5) - - -@AWSRetry.jittered_backoff(**backoff_params) -def describe_autoscaling_groups(connection, group_name): - pg = connection.get_paginator('describe_auto_scaling_groups') - return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) - - -@AWSRetry.jittered_backoff(**backoff_params) -def deregister_lb_instances(connection, lb_name, instance_id): - connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) - - -@AWSRetry.jittered_backoff(**backoff_params) -def describe_instance_health(connection, lb_name, instances): - params = dict(LoadBalancerName=lb_name) - if instances: - params.update(Instances=instances) - return connection.describe_instance_health(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def describe_target_health(connection, target_group_arn, instances): - return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) - - -@AWSRetry.jittered_backoff(**backoff_params) -def suspend_asg_processes(connection, asg_name, processes): - connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) - - -@AWSRetry.jittered_backoff(**backoff_params) -def resume_asg_processes(connection, asg_name, processes): - connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) - - -@AWSRetry.jittered_backoff(**backoff_params) -def describe_launch_configurations(connection, launch_config_name): - pg = connection.get_paginator('describe_launch_configurations') - return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() - - -@AWSRetry.jittered_backoff(**backoff_params) -def describe_launch_templates(connection, launch_template): - if launch_template['launch_template_id'] is not None: - try: - lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) - return lt - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): - module.fail_json(msg="No launch template found matching: %s" % launch_template) - else: - try: - lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) - return lt - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): - module.fail_json(msg="No launch template found matching: %s" % launch_template) - - -@AWSRetry.jittered_backoff(**backoff_params) -def create_asg(connection, **params): - connection.create_auto_scaling_group(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def put_notification_config(connection, asg_name, topic_arn, notification_types): - connection.put_notification_configuration( - AutoScalingGroupName=asg_name, - TopicARN=topic_arn, - NotificationTypes=notification_types - ) - - -@AWSRetry.jittered_backoff(**backoff_params) -def del_notification_config(connection, asg_name, topic_arn): - connection.delete_notification_configuration( - AutoScalingGroupName=asg_name, - TopicARN=topic_arn - ) - - -@AWSRetry.jittered_backoff(**backoff_params) -def attach_load_balancers(connection, asg_name, load_balancers): - connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) - - -@AWSRetry.jittered_backoff(**backoff_params) -def detach_load_balancers(connection, asg_name, load_balancers): - connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) - - -@AWSRetry.jittered_backoff(**backoff_params) -def attach_lb_target_groups(connection, asg_name, target_group_arns): - connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) - - -@AWSRetry.jittered_backoff(**backoff_params) -def detach_lb_target_groups(connection, asg_name, target_group_arns): - connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) - - -@AWSRetry.jittered_backoff(**backoff_params) -def update_asg(connection, **params): - connection.update_auto_scaling_group(**params) - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) -def delete_asg(connection, asg_name, force_delete): - connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) - - -@AWSRetry.jittered_backoff(**backoff_params) -def terminate_asg_instance(connection, instance_id, decrement_capacity): - connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, - ShouldDecrementDesiredCapacity=decrement_capacity) - - -@AWSRetry.jittered_backoff(**backoff_params) -def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capacity): - connection.detach_instances(InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, - ShouldDecrementDesiredCapacity=decrement_capacity) - - -def enforce_required_arguments_for_create(): - ''' As many arguments are not required for autoscale group deletion - they cannot be mandatory arguments for the module, so we enforce - them here ''' - missing_args = [] - if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: - module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") - for arg in ('min_size', 'max_size'): - if module.params[arg] is None: - missing_args.append(arg) - if missing_args: - module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) - - -def get_properties(autoscaling_group): - properties = dict( - healthy_instances=0, - in_service_instances=0, - unhealthy_instances=0, - pending_instances=0, - viable_instances=0, - terminating_instances=0 - ) - instance_facts = dict() - autoscaling_group_instances = autoscaling_group.get('Instances') - - if autoscaling_group_instances: - properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] - for i in autoscaling_group_instances: - instance_facts[i['InstanceId']] = { - 'health_status': i['HealthStatus'], - 'lifecycle_state': i['LifecycleState'] - } - if 'LaunchConfigurationName' in i: - instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName'] - elif 'LaunchTemplate' in i: - instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate'] - - if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': - properties['viable_instances'] += 1 - - if i['HealthStatus'] == 'Healthy': - properties['healthy_instances'] += 1 - else: - properties['unhealthy_instances'] += 1 - - if i['LifecycleState'] == 'InService': - properties['in_service_instances'] += 1 - if i['LifecycleState'] == 'Terminating': - properties['terminating_instances'] += 1 - if i['LifecycleState'] == 'Pending': - properties['pending_instances'] += 1 - else: - properties['instances'] = [] - - properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') - properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') - properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') - properties['created_time'] = autoscaling_group.get('CreatedTime') - properties['instance_facts'] = instance_facts - properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') - if 'LaunchConfigurationName' in autoscaling_group: - properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') - else: - properties['launch_template'] = autoscaling_group.get('LaunchTemplate') - properties['tags'] = autoscaling_group.get('Tags') - properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime') - properties['min_size'] = autoscaling_group.get('MinSize') - properties['max_size'] = autoscaling_group.get('MaxSize') - properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') - properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') - properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') - properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') - properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') - properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') - properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') - properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') - raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy') - if raw_mixed_instance_object: - properties['mixed_instances_policy_full'] = camel_dict_to_snake_dict(raw_mixed_instance_object) - properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')] - - metrics = autoscaling_group.get('EnabledMetrics') - if metrics: - metrics.sort(key=lambda x: x["Metric"]) - properties['metrics_collection'] = metrics - - if properties['target_group_arns']: - elbv2_connection = module.client('elbv2') - tg_paginator = elbv2_connection.get_paginator('describe_target_groups') - tg_result = tg_paginator.paginate( - TargetGroupArns=properties['target_group_arns'] - ).build_full_result() - target_groups = tg_result['TargetGroups'] - else: - target_groups = [] - - properties['target_group_names'] = [ - tg['TargetGroupName'] - for tg in target_groups - ] - - return properties - - -def get_launch_object(connection, ec2_connection): - launch_object = dict() - launch_config_name = module.params.get('launch_config_name') - launch_template = module.params.get('launch_template') - mixed_instances_policy = module.params.get('mixed_instances_policy') - if launch_config_name is None and launch_template is None: - return launch_object - elif launch_config_name: - try: - launch_configs = describe_launch_configurations(connection, launch_config_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe launch configurations") - if len(launch_configs['LaunchConfigurations']) == 0: - module.fail_json(msg="No launch config found with name %s" % launch_config_name) - launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} - return launch_object - elif launch_template: - lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] - if launch_template['version'] is not None: - launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} - else: - launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} - - if mixed_instances_policy: - instance_types = mixed_instances_policy.get('instance_types', []) - instances_distribution = mixed_instances_policy.get('instances_distribution', {}) - policy = { - 'LaunchTemplate': { - 'LaunchTemplateSpecification': launch_object['LaunchTemplate'] - } - } - if instance_types: - policy['LaunchTemplate']['Overrides'] = [] - for instance_type in instance_types: - instance_type_dict = {'InstanceType': instance_type} - policy['LaunchTemplate']['Overrides'].append(instance_type_dict) - if instances_distribution: - instances_distribution_params = scrub_none_parameters(instances_distribution) - policy['InstancesDistribution'] = snake_dict_to_camel_dict(instances_distribution_params, capitalize_first=True) - launch_object['MixedInstancesPolicy'] = policy - return launch_object - - -def elb_dreg(asg_connection, group_name, instance_id): - as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - wait_timeout = module.params.get('wait_timeout') - count = 1 - if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': - elb_connection = module.client('elb') - else: - return - - for lb in as_group['LoadBalancerNames']: - deregister_lb_instances(elb_connection, lb, instance_id) - module.debug("De-registering %s from ELB %s" % (instance_id, lb)) - - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and count > 0: - count = 0 - for lb in as_group['LoadBalancerNames']: - lb_instances = describe_instance_health(elb_connection, lb, []) - for i in lb_instances['InstanceStates']: - if i['InstanceId'] == instance_id and i['State'] == "InService": - count += 1 - module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) - time.sleep(10) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) - - -def elb_healthy(asg_connection, elb_connection, group_name): - healthy_instances = set() - as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - props = get_properties(as_group) - # get healthy, inservice instances from ASG - instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': - instances.append(dict(InstanceId=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) - module.debug("ELB instance status:") - lb_instances = list() - for lb in as_group.get('LoadBalancerNames'): - # we catch a race condition that sometimes happens if the instance exists in the ASG - # but has not yet show up in the ELB - try: - lb_instances = describe_instance_health(elb_connection, lb, instances) - except is_boto3_error_code('InvalidInstance'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get load balancer.") - - for i in lb_instances.get('InstanceStates'): - if i['State'] == "InService": - healthy_instances.add(i['InstanceId']) - module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) - return len(healthy_instances) - - -def tg_healthy(asg_connection, elbv2_connection, group_name): - healthy_instances = set() - as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - props = get_properties(as_group) - # get healthy, inservice instances from ASG - instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': - instances.append(dict(Id=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) - module.debug("Target Group instance status:") - tg_instances = list() - for tg in as_group.get('TargetGroupARNs'): - # we catch a race condition that sometimes happens if the instance exists in the ASG - # but has not yet show up in the ELB - try: - tg_instances = describe_target_health(elbv2_connection, tg, instances) - except is_boto3_error_code('InvalidInstance'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get target group.") - - for i in tg_instances.get('TargetHealthDescriptions'): - if i['TargetHealth']['State'] == "healthy": - healthy_instances.add(i['Target']['Id']) - module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) - return len(healthy_instances) - - -def wait_for_elb(asg_connection, group_name): - wait_timeout = module.params.get('wait_timeout') - - # if the health_check_type is ELB, we want to query the ELBs directly for instance - # status as to avoid health_check_grace period that is awarded to ASG instances - as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - - if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': - module.debug("Waiting for ELB to consider instances healthy.") - elb_connection = module.client('elb') - - wait_timeout = time.time() + wait_timeout - healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - - while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): - healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - module.debug("ELB thinks %s instances are healthy." % healthy_instances) - time.sleep(10) - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) - - -def wait_for_target_group(asg_connection, group_name): - wait_timeout = module.params.get('wait_timeout') - - # if the health_check_type is ELB, we want to query the ELBs directly for instance - # status as to avoid health_check_grace period that is awarded to ASG instances - as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - - if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': - module.debug("Waiting for Target Group to consider instances healthy.") - elbv2_connection = module.client('elbv2') - - wait_timeout = time.time() + wait_timeout - healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - - while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): - healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - module.debug("Target Group thinks %s instances are healthy." % healthy_instances) - time.sleep(10) - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) - - -def suspend_processes(ec2_connection, as_group): - suspend_processes = set(module.params.get('suspend_processes')) - - try: - suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) - except AttributeError: - # New ASG being created, no suspended_processes defined yet - suspended_processes = set() - - if suspend_processes == suspended_processes: - return False - - resume_processes = list(suspended_processes - suspend_processes) - if resume_processes: - resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) - - if suspend_processes: - suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) - - return True - - -def create_autoscaling_group(connection): - group_name = module.params.get('name') - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - availability_zones = module.params['availability_zones'] - launch_config_name = module.params.get('launch_config_name') - launch_template = module.params.get('launch_template') - mixed_instances_policy = module.params.get('mixed_instances_policy') - min_size = module.params['min_size'] - max_size = module.params['max_size'] - max_instance_lifetime = module.params.get('max_instance_lifetime') - placement_group = module.params.get('placement_group') - desired_capacity = module.params.get('desired_capacity') - vpc_zone_identifier = module.params.get('vpc_zone_identifier') - set_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - health_check_period = module.params.get('health_check_period') - health_check_type = module.params.get('health_check_type') - default_cooldown = module.params.get('default_cooldown') - wait_for_instances = module.params.get('wait_for_instances') - wait_timeout = module.params.get('wait_timeout') - termination_policies = module.params.get('termination_policies') - notification_topic = module.params.get('notification_topic') - notification_types = module.params.get('notification_types') - metrics_collection = module.params.get('metrics_collection') - metrics_granularity = module.params.get('metrics_granularity') - metrics_list = module.params.get('metrics_list') - - try: - as_groups = describe_autoscaling_groups(connection, group_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe auto scaling groups.") - - ec2_connection = module.client('ec2') - - if vpc_zone_identifier: - vpc_zone_identifier = ','.join(vpc_zone_identifier) - - asg_tags = [] - for tag in set_tags: - for k, v in tag.items(): - if k != 'propagate_at_launch': - asg_tags.append(dict(Key=k, - Value=to_native(v), - PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), - ResourceType='auto-scaling-group', - ResourceId=group_name)) - if not as_groups: - if module.check_mode: - module.exit_json(changed=True, msg="Would have created AutoScalingGroup if not in check_mode.") - - if not vpc_zone_identifier and not availability_zones: - availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for - zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] - - enforce_required_arguments_for_create() - - if desired_capacity is None: - desired_capacity = min_size - ag = dict( - AutoScalingGroupName=group_name, - MinSize=min_size, - MaxSize=max_size, - DesiredCapacity=desired_capacity, - Tags=asg_tags, - HealthCheckGracePeriod=health_check_period, - HealthCheckType=health_check_type, - DefaultCooldown=default_cooldown, - TerminationPolicies=termination_policies) - if vpc_zone_identifier: - ag['VPCZoneIdentifier'] = vpc_zone_identifier - if availability_zones: - ag['AvailabilityZones'] = availability_zones - if placement_group: - ag['PlacementGroup'] = placement_group - if load_balancers: - ag['LoadBalancerNames'] = load_balancers - if target_group_arns: - ag['TargetGroupARNs'] = target_group_arns - if max_instance_lifetime: - ag['MaxInstanceLifetime'] = max_instance_lifetime - - launch_object = get_launch_object(connection, ec2_connection) - if 'LaunchConfigurationName' in launch_object: - ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] - elif 'LaunchTemplate' in launch_object: - if 'MixedInstancesPolicy' in launch_object: - ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] - else: - ag['LaunchTemplate'] = launch_object['LaunchTemplate'] - else: - module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate") - - try: - create_asg(connection, **ag) - if metrics_collection: - connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) - - all_ag = describe_autoscaling_groups(connection, group_name) - if len(all_ag) == 0: - module.fail_json(msg="No auto scaling group found with the name %s" % group_name) - as_group = all_ag[0] - suspend_processes(connection, as_group) - if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') - if load_balancers: - wait_for_elb(connection, group_name) - # Wait for target group health if target group(s)defined - if target_group_arns: - wait_for_target_group(connection, group_name) - if notification_topic: - put_notification_config(connection, group_name, notification_topic, notification_types) - as_group = describe_autoscaling_groups(connection, group_name)[0] - asg_properties = get_properties(as_group) - changed = True - return changed, asg_properties - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create Autoscaling Group.") - else: - if module.check_mode: - module.exit_json(changed=True, msg="Would have modified AutoScalingGroup if required if not in check_mode.") - - as_group = as_groups[0] - initial_asg_properties = get_properties(as_group) - changed = False - - if suspend_processes(connection, as_group): - changed = True - - # process tag changes - have_tags = as_group.get('Tags') - want_tags = asg_tags - if purge_tags and not want_tags and have_tags: - connection.delete_tags(Tags=list(have_tags)) - - if len(set_tags) > 0: - if have_tags: - have_tags.sort(key=lambda x: x["Key"]) - if want_tags: - want_tags.sort(key=lambda x: x["Key"]) - dead_tags = [] - have_tag_keyvals = [x['Key'] for x in have_tags] - want_tag_keyvals = [x['Key'] for x in want_tags] - - for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): - changed = True - if purge_tags: - dead_tags.append(dict( - ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) - have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] - - if dead_tags: - connection.delete_tags(Tags=dead_tags) - - zipped = zip(have_tags, want_tags) - if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped): - changed = True - connection.create_or_update_tags(Tags=asg_tags) - - # Handle load balancer attachments/detachments - # Attach load balancers if they are specified but none currently exist - if load_balancers and not as_group['LoadBalancerNames']: - changed = True - try: - attach_load_balancers(connection, group_name, load_balancers) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") - - # Update load balancers if they are specified and one or more already exists - elif as_group['LoadBalancerNames']: - change_load_balancers = load_balancers is not None - # Get differences - if not load_balancers: - load_balancers = list() - wanted_elbs = set(load_balancers) - - has_elbs = set(as_group['LoadBalancerNames']) - # check if all requested are already existing - if has_elbs - wanted_elbs and change_load_balancers: - # if wanted contains less than existing, then we need to delete some - elbs_to_detach = has_elbs.difference(wanted_elbs) - if elbs_to_detach: - changed = True - try: - detach_load_balancers(connection, group_name, list(elbs_to_detach)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach)) - if wanted_elbs - has_elbs: - # if has contains less than wanted, then we need to add some - elbs_to_attach = wanted_elbs.difference(has_elbs) - if elbs_to_attach: - changed = True - try: - attach_load_balancers(connection, group_name, list(elbs_to_attach)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach)) - - # Handle target group attachments/detachments - # Attach target groups if they are specified but none currently exist - if target_group_arns and not as_group['TargetGroupARNs']: - changed = True - try: - attach_lb_target_groups(connection, group_name, target_group_arns) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") - # Update target groups if they are specified and one or more already exists - elif target_group_arns is not None and as_group['TargetGroupARNs']: - # Get differences - wanted_tgs = set(target_group_arns) - has_tgs = set(as_group['TargetGroupARNs']) - - tgs_to_detach = has_tgs.difference(wanted_tgs) - if tgs_to_detach: - changed = True - try: - detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) - - tgs_to_attach = wanted_tgs.difference(has_tgs) - if tgs_to_attach: - changed = True - try: - attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) - - # check for attributes that aren't required for updating an existing ASG - # check if min_size/max_size/desired capacity have been specified and if not use ASG values - if min_size is None: - min_size = as_group['MinSize'] - if max_size is None: - max_size = as_group['MaxSize'] - if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] - ag = dict( - AutoScalingGroupName=group_name, - MinSize=min_size, - MaxSize=max_size, - DesiredCapacity=desired_capacity, - HealthCheckGracePeriod=health_check_period, - HealthCheckType=health_check_type, - DefaultCooldown=default_cooldown, - TerminationPolicies=termination_policies) - - # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. - launch_object = get_launch_object(connection, ec2_connection) - if 'LaunchConfigurationName' in launch_object: - ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] - elif 'LaunchTemplate' in launch_object: - if 'MixedInstancesPolicy' in launch_object: - ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] - else: - ag['LaunchTemplate'] = launch_object['LaunchTemplate'] - else: - try: - ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] - except Exception: - launch_template = as_group['LaunchTemplate'] - # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. - ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} - - if availability_zones: - ag['AvailabilityZones'] = availability_zones - if vpc_zone_identifier: - ag['VPCZoneIdentifier'] = vpc_zone_identifier - if max_instance_lifetime is not None: - ag['MaxInstanceLifetime'] = max_instance_lifetime - - try: - update_asg(connection, **ag) - - if metrics_collection: - connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) - else: - connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update autoscaling group") - - if notification_topic: - try: - put_notification_config(connection, group_name, notification_topic, notification_types) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update Autoscaling Group notifications.") - if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') - # Wait for ELB health if ELB(s)defined - if load_balancers: - module.debug('\tWAITING FOR ELB HEALTH') - wait_for_elb(connection, group_name) - # Wait for target group health if target group(s)defined - - if target_group_arns: - module.debug('\tWAITING FOR TG HEALTH') - wait_for_target_group(connection, group_name) - - try: - as_group = describe_autoscaling_groups(connection, group_name)[0] - asg_properties = get_properties(as_group) - if asg_properties != initial_asg_properties: - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to read existing Autoscaling Groups.") - return changed, asg_properties - - -def delete_autoscaling_group(connection): - group_name = module.params.get('name') - notification_topic = module.params.get('notification_topic') - wait_for_instances = module.params.get('wait_for_instances') - wait_timeout = module.params.get('wait_timeout') - - if notification_topic: - del_notification_config(connection, group_name, notification_topic) - groups = describe_autoscaling_groups(connection, group_name) - if groups: - if module.check_mode: - module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup if not in check_mode.") - wait_timeout = time.time() + wait_timeout - if not wait_for_instances: - delete_asg(connection, group_name, force_delete=True) - else: - updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0) - update_asg(connection, **updated_params) - instances = True - while instances and wait_for_instances and wait_timeout >= time.time(): - tmp_groups = describe_autoscaling_groups(connection, group_name) - if tmp_groups: - tmp_group = tmp_groups[0] - if not tmp_group.get('Instances'): - instances = False - time.sleep(10) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) - - delete_asg(connection, group_name, force_delete=False) - while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): - time.sleep(5) - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) - return True - - return False - - -def get_chunks(l, n): - for i in range(0, len(l), n): - yield l[i:i + n] - - -def update_size(connection, group, max_size, min_size, dc): - module.debug("setting ASG sizes") - module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) - updated_group = dict() - updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] - updated_group['MinSize'] = min_size - updated_group['MaxSize'] = max_size - updated_group['DesiredCapacity'] = dc - update_asg(connection, **updated_group) - - -def replace(connection): - batch_size = module.params.get('replace_batch_size') - wait_timeout = module.params.get('wait_timeout') - wait_for_instances = module.params.get('wait_for_instances') - group_name = module.params.get('name') - max_size = module.params.get('max_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - launch_config_name = module.params.get('launch_config_name') - - # Required to maintain the default value being set to 'true' - if launch_config_name: - lc_check = module.params.get('lc_check') - else: - lc_check = False - # Mirror above behavior for Launch Templates - launch_template = module.params.get('launch_template') - if launch_template: - lt_check = module.params.get('lt_check') - else: - lt_check = False - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') - - as_group = describe_autoscaling_groups(connection, group_name)[0] - if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] - - if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') - - props = get_properties(as_group) - instances = props['instances'] - if replace_all_instances: - # If replacing all instances, then set replace_instances to current set - # This allows replace_instances and replace_all_instances to behave same - replace_instances = instances - if replace_instances: - instances = replace_instances - - # check to see if instances are replaceable if checking launch configs - if launch_config_name: - new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances) - elif launch_template: - new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances) - - num_new_inst_needed = desired_capacity - len(new_instances) - - if lc_check or lt_check: - if num_new_inst_needed == 0 and old_instances: - module.debug("No new instances needed, but old instances are present. Removing old instances") - terminate_batch(connection, old_instances, instances, True) - as_group = describe_autoscaling_groups(connection, group_name)[0] - props = get_properties(as_group) - changed = True - return changed, props - - # we don't want to spin up extra instances if not necessary - if num_new_inst_needed < batch_size: - module.debug("Overriding batch size to %s" % num_new_inst_needed) - batch_size = num_new_inst_needed - - if not old_instances: - changed = False - return changed, props - - # check if min_size/max_size/desired capacity have been specified and if not use ASG values - if min_size is None: - min_size = as_group['MinSize'] - if max_size is None: - max_size = as_group['MaxSize'] - - # set temporary settings and wait for them to be reached - # This should get overwritten if the number of instances left is less than the batch size. - - as_group = describe_autoscaling_groups(connection, group_name)[0] - update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) - - if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') - wait_for_elb(connection, group_name) - wait_for_target_group(connection, group_name) - - as_group = describe_autoscaling_groups(connection, group_name)[0] - props = get_properties(as_group) - instances = props['instances'] - if replace_instances: - instances = replace_instances - - module.debug("beginning main loop") - for i in get_chunks(instances, batch_size): - # break out of this loop if we have enough new instances - break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False) - - if wait_for_instances: - wait_for_term_inst(connection, term_instances) - wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') - wait_for_elb(connection, group_name) - wait_for_target_group(connection, group_name) - - if break_early: - module.debug("breaking loop") - break - - update_size(connection, as_group, max_size, min_size, desired_capacity) - as_group = describe_autoscaling_groups(connection, group_name)[0] - asg_properties = get_properties(as_group) - module.debug("Rolling update complete.") - changed = True - return changed, asg_properties - - -def detach(connection): - group_name = module.params.get('name') - detach_instances = module.params.get('detach_instances') - as_group = describe_autoscaling_groups(connection, group_name)[0] - decrement_desired_capacity = module.params.get('decrement_desired_capacity') - min_size = module.params.get('min_size') - props = get_properties(as_group) - instances = props['instances'] - - # check if provided instance exists in asg, create list of instances to detach which exist in asg - instances_to_detach = [] - for instance_id in detach_instances: - if instance_id in instances: - instances_to_detach.append(instance_id) - - # check if setting decrement_desired_capacity will make desired_capacity smaller - # than the currently set minimum size in ASG configuration - if decrement_desired_capacity: - decremented_desired_capacity = len(instances) - len(instances_to_detach) - if min_size and min_size > decremented_desired_capacity: - module.fail_json( - msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\ - which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format(decremented_desired_capacity, min_size)) - - if instances_to_detach: - try: - detach_asg_instances(connection, instances_to_detach, group_name, decrement_desired_capacity) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach instances from AutoScaling Group") - - asg_properties = get_properties(as_group) - return True, asg_properties - - -def get_instances_by_launch_config(props, lc_check, initial_instances): - new_instances = [] - old_instances = [] - # old instances are those that have the old launch config - if lc_check: - for i in props['instances']: - # Check if migrating from launch_template to launch_config first - if 'launch_template' in props['instance_facts'][i]: - old_instances.append(i) - elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']: - new_instances.append(i) - else: - old_instances.append(i) - - else: - module.debug("Comparing initial instances with current: %s" % initial_instances) - for i in props['instances']: - if i not in initial_instances: - new_instances.append(i) - else: - old_instances.append(i) - - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) - - return new_instances, old_instances - - -def get_instances_by_launch_template(props, lt_check, initial_instances): - new_instances = [] - old_instances = [] - # old instances are those that have the old launch template or version of the same launch template - if lt_check: - for i in props['instances']: - # Check if migrating from launch_config_name to launch_template_name first - if 'launch_config_name' in props['instance_facts'][i]: - old_instances.append(i) - elif props['instance_facts'][i].get('launch_template') == props['launch_template']: - new_instances.append(i) - else: - old_instances.append(i) - else: - module.debug("Comparing initial instances with current: %s" % initial_instances) - for i in props['instances']: - if i not in initial_instances: - new_instances.append(i) - else: - old_instances.append(i) - - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) - - return new_instances, old_instances - - -def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): - instances_to_terminate = [] - instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) - # check to make sure instances given are actually in the given ASG - # and they have a non-current launch config - if 'launch_config_name' in module.params: - if lc_check: - for i in instances: - if ( - 'launch_template' in props['instance_facts'][i] - or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name'] - ): - instances_to_terminate.append(i) - else: - for i in instances: - if i in initial_instances: - instances_to_terminate.append(i) - elif 'launch_template' in module.params: - if lt_check: - for i in instances: - if ( - 'launch_config_name' in props['instance_facts'][i] - or props['instance_facts'][i]['launch_template'] != props['launch_template'] - ): - instances_to_terminate.append(i) - else: - for i in instances: - if i in initial_instances: - instances_to_terminate.append(i) - - return instances_to_terminate - - -def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): - batch_size = module.params.get('replace_batch_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - group_name = module.params.get('name') - lc_check = module.params.get('lc_check') - lt_check = module.params.get('lt_check') - decrement_capacity = False - break_loop = False - - as_group = describe_autoscaling_groups(connection, group_name)[0] - if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] - - props = get_properties(as_group) - desired_size = as_group['MinSize'] - if module.params.get('launch_config_name'): - new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) - else: - new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) - num_new_inst_needed = desired_capacity - len(new_instances) - - # check to make sure instances given are actually in the given ASG - # and they have a non-current launch config - instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) - - module.debug("new instances needed: %s" % num_new_inst_needed) - module.debug("new instances: %s" % new_instances) - module.debug("old instances: %s" % old_instances) - module.debug("batch instances: %s" % ",".join(instances_to_terminate)) - - if num_new_inst_needed == 0: - decrement_capacity = True - if as_group['MinSize'] != min_size: - if min_size is None: - min_size = as_group['MinSize'] - updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) - update_asg(connection, **updated_params) - module.debug("Updating minimum size back to original of %s" % min_size) - # if are some leftover old instances, but we are already at capacity with new ones - # we don't want to decrement capacity - if leftovers: - decrement_capacity = False - break_loop = True - instances_to_terminate = old_instances - desired_size = min_size - module.debug("No new instances needed") - - if num_new_inst_needed < batch_size and num_new_inst_needed != 0: - instances_to_terminate = instances_to_terminate[:num_new_inst_needed] - decrement_capacity = False - break_loop = False - module.debug("%s new instances needed" % num_new_inst_needed) - - module.debug("decrementing capacity: %s" % decrement_capacity) - - for instance_id in instances_to_terminate: - elb_dreg(connection, group_name, instance_id) - module.debug("terminating instance: %s" % instance_id) - terminate_asg_instance(connection, instance_id, decrement_capacity) - - # we wait to make sure the machines we marked as Unhealthy are - # no longer in the list - - return break_loop, desired_size, instances_to_terminate - - -def wait_for_term_inst(connection, term_instances): - wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('name') - as_group = describe_autoscaling_groups(connection, group_name)[0] - count = 1 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and count > 0: - module.debug("waiting for instances to terminate") - count = 0 - as_group = describe_autoscaling_groups(connection, group_name)[0] - props = get_properties(as_group) - instance_facts = props['instance_facts'] - instances = (i for i in instance_facts if i in term_instances) - for i in instances: - lifecycle = instance_facts[i]['lifecycle_state'] - health = instance_facts[i]['health_status'] - module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) - if lifecycle.startswith('Terminating') or health == 'Unhealthy': - count += 1 - time.sleep(10) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) - - -def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): - # make sure we have the latest stats after that last loop. - as_group = describe_autoscaling_groups(connection, group_name)[0] - props = get_properties(as_group) - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) - # now we make sure that we have enough instances in a viable state - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and desired_size > props[prop]: - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) - time.sleep(10) - as_group = describe_autoscaling_groups(connection, group_name)[0] - props = get_properties(as_group) - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) - module.debug("Reached %s: %s" % (prop, desired_size)) - return props - - -def asg_exists(connection): - group_name = module.params.get('name') - as_group = describe_autoscaling_groups(connection, group_name) - return bool(len(as_group)) - - -def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - load_balancers=dict(type='list', elements='str'), - target_group_arns=dict(type='list', elements='str'), - availability_zones=dict(type='list', elements='str'), - launch_config_name=dict(type='str'), - launch_template=dict( - type='dict', - default=None, - options=dict( - version=dict(type='str'), - launch_template_name=dict(type='str'), - launch_template_id=dict(type='str'), - ) - ), - min_size=dict(type='int'), - max_size=dict(type='int'), - max_instance_lifetime=dict(type='int'), - mixed_instances_policy=dict( - type='dict', - default=None, - options=dict( - instance_types=dict( - type='list', - elements='str' - ), - instances_distribution=dict( - type='dict', - default=None, - options=dict( - on_demand_allocation_strategy=dict(type='str'), - on_demand_base_capacity=dict(type='int'), - on_demand_percentage_above_base_capacity=dict(type='int'), - spot_allocation_strategy=dict(type='str'), - spot_instance_pools=dict(type='int'), - spot_max_price=dict(type='str'), - ) - ) - ) - ), - placement_group=dict(type='str'), - desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='list', elements='str'), - replace_batch_size=dict(type='int', default=1), - replace_all_instances=dict(type='bool', default=False), - replace_instances=dict(type='list', default=[], elements='str'), - detach_instances=dict(type='list', default=[], elements='str'), - decrement_desired_capacity=dict(type='bool', default=False), - lc_check=dict(type='bool', default=True), - lt_check=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', default=[], elements='dict'), - purge_tags=dict(type='bool', default=False), - health_check_period=dict(type='int', default=300), - health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - default_cooldown=dict(type='int', default=300), - wait_for_instances=dict(type='bool', default=True), - termination_policies=dict(type='list', default='Default', elements='str'), - notification_topic=dict(type='str', default=None), - notification_types=dict( - type='list', - default=[ - 'autoscaling:EC2_INSTANCE_LAUNCH', - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', - 'autoscaling:EC2_INSTANCE_TERMINATE', - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' - ], - elements='str' - ), - suspend_processes=dict(type='list', default=[], elements='str'), - metrics_collection=dict(type='bool', default=False), - metrics_granularity=dict(type='str', default='1Minute'), - metrics_list=dict( - type='list', - default=[ - 'GroupMinSize', - 'GroupMaxSize', - 'GroupDesiredCapacity', - 'GroupInServiceInstances', - 'GroupPendingInstances', - 'GroupStandbyInstances', - 'GroupTerminatingInstances', - 'GroupTotalInstances' - ], - elements='str' - ) - ) - - global module - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ['replace_all_instances', 'replace_instances'], - ['replace_all_instances', 'detach_instances'], - ['launch_config_name', 'launch_template'], - ] - ) - - state = module.params.get('state') - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') - detach_instances = module.params.get('detach_instances') - - connection = module.client('autoscaling') - changed = create_changed = replace_changed = detach_changed = False - exists = asg_exists(connection) - - if state == 'present': - create_changed, asg_properties = create_autoscaling_group(connection) - elif state == 'absent': - changed = delete_autoscaling_group(connection) - module.exit_json(changed=changed) - - # Only replace instances if asg existed at start of call - if ( - exists - and (replace_all_instances or replace_instances) - and (module.params.get('launch_config_name') or module.params.get('launch_template')) - ): - replace_changed, asg_properties = replace(connection) - - # Only detach instances if asg existed at start of call - if ( - exists - and (detach_instances) - and (module.params.get('launch_config_name') or module.params.get('launch_template')) - ): - detach_changed, asg_properties = detach(connection) - - if create_changed or replace_changed or detach_changed: - changed = True - - module.exit_json(changed=changed, **asg_properties) - - -if __name__ == '__main__': - main() diff --git a/autoscaling_group_info.py b/autoscaling_group_info.py deleted file mode 100644 index e8ec13a12ba..00000000000 --- a/autoscaling_group_info.py +++ /dev/null @@ -1,458 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: autoscaling_group_info -version_added: 1.0.0 -short_description: Gather information about EC2 Auto Scaling Groups (ASGs) in AWS -description: - - Gather information about EC2 Auto Scaling Groups (ASGs) in AWS. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_info). - The usage did not change. -author: - - "Rob White (@wimnat)" -options: - name: - description: - - The prefix or name of the auto scaling group(s) you are searching for. - - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match." - type: str - required: false - tags: - description: - - > - A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling - group(s) you are searching for. - required: false - type: dict -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Find all groups - community.aws.autoscaling_group_info: - register: asgs - -- name: Find a group with matching name/prefix - community.aws.autoscaling_group_info: - name: public-webserver-asg - register: asgs - -- name: Find a group with matching tags - community.aws.autoscaling_group_info: - tags: - project: webapp - env: production - register: asgs - -- name: Find a group with matching name/prefix and tags - community.aws.autoscaling_group_info: - name: myproject - tags: - env: production - register: asgs - -- name: Fail if no groups are found - community.aws.autoscaling_group_info: - name: public-webserver-asg - register: asgs - failed_when: "{{ asgs.results | length == 0 }}" - -- name: Fail if more than 1 group is found - community.aws.autoscaling_group_info: - name: public-webserver-asg - register: asgs - failed_when: "{{ asgs.results | length > 1 }}" -''' - -RETURN = ''' ---- -auto_scaling_group_arn: - description: The Amazon Resource Name of the ASG - returned: success - type: str - sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1" -auto_scaling_group_name: - description: Name of autoscaling group - returned: success - type: str - sample: "public-webapp-production-1" -availability_zones: - description: List of Availability Zones that are enabled for this ASG. - returned: success - type: list - sample: ["us-west-2a", "us-west-2b", "us-west-2a"] -created_time: - description: The date and time this ASG was created, in ISO 8601 format. - returned: success - type: str - sample: "2015-11-25T00:05:36.309Z" -default_cooldown: - description: The default cooldown time in seconds. - returned: success - type: int - sample: 300 -desired_capacity: - description: The number of EC2 instances that should be running in this group. - returned: success - type: int - sample: 3 -health_check_period: - description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. - returned: success - type: int - sample: 30 -health_check_type: - description: The service you want the health status from, one of "EC2" or "ELB". - returned: success - type: str - sample: "ELB" -instances: - description: List of EC2 instances and their status as it relates to the ASG. - returned: success - type: list - sample: [ - { - "availability_zone": "us-west-2a", - "health_status": "Healthy", - "instance_id": "i-es22ad25", - "launch_configuration_name": "public-webapp-production-1", - "lifecycle_state": "InService", - "protected_from_scale_in": "false" - } - ] -launch_config_name: - description: > - Name of launch configuration associated with the ASG. Same as launch_configuration_name, - provided for compatibility with M(community.aws.autoscaling_group) module. - returned: success - type: str - sample: "public-webapp-production-1" -launch_configuration_name: - description: Name of launch configuration associated with the ASG. - returned: success - type: str - sample: "public-webapp-production-1" -lifecycle_hooks: - description: List of lifecycle hooks for the ASG. - returned: success - type: list - sample: [ - { - "AutoScalingGroupName": "public-webapp-production-1", - "DefaultResult": "ABANDON", - "GlobalTimeout": 172800, - "HeartbeatTimeout": 3600, - "LifecycleHookName": "instance-launch", - "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING" - }, - { - "AutoScalingGroupName": "public-webapp-production-1", - "DefaultResult": "ABANDON", - "GlobalTimeout": 172800, - "HeartbeatTimeout": 3600, - "LifecycleHookName": "instance-terminate", - "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" - } - ] -load_balancer_names: - description: List of load balancers names attached to the ASG. - returned: success - type: list - sample: ["elb-webapp-prod"] -max_size: - description: Maximum size of group - returned: success - type: int - sample: 3 -min_size: - description: Minimum size of group - returned: success - type: int - sample: 1 -new_instances_protected_from_scale_in: - description: Whether or not new instances a protected from automatic scaling in. - returned: success - type: bool - sample: "false" -placement_group: - description: Placement group into which instances are launched, if any. - returned: success - type: str - sample: None -status: - description: The current state of the group when DeleteAutoScalingGroup is in progress. - returned: success - type: str - sample: None -tags: - description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. - returned: success - type: list - sample: [ - { - "key": "Name", - "value": "public-webapp-production-1", - "resource_id": "public-webapp-production-1", - "resource_type": "auto-scaling-group", - "propagate_at_launch": "true" - }, - { - "key": "env", - "value": "production", - "resource_id": "public-webapp-production-1", - "resource_type": "auto-scaling-group", - "propagate_at_launch": "true" - } - ] -target_group_arns: - description: List of ARNs of the target groups that the ASG populates - returned: success - type: list - sample: [ - "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", - "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" - ] -target_group_names: - description: List of names of the target groups that the ASG populates - returned: success - type: list - sample: [ - "target-group-host-hello", - "target-group-path-world" - ] -termination_policies: - description: A list of termination policies for the group. - returned: success - type: str - sample: ["Default"] -''' - -import re - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - - -def match_asg_tags(tags_to_match, asg): - for key, value in tags_to_match.items(): - for tag in asg['Tags']: - if key == tag['Key'] and value == tag['Value']: - break - else: - return False - return True - - -def find_asgs(conn, module, name=None, tags=None): - """ - Args: - conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. - name (str): Optional name of the ASG you are looking for. - tags (dict): Optional dictionary of tags and values to search for. - - Basic Usage: - >>> name = 'public-webapp-production' - >>> tags = { 'env': 'production' } - >>> conn = boto3.client('autoscaling', region_name='us-west-2') - >>> results = find_asgs(name, conn) - - Returns: - List - [ - { - "auto_scaling_group_arn": ( - "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:" - "autoScalingGroupName/public-webapp-production" - ), - "auto_scaling_group_name": "public-webapp-production", - "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], - "created_time": "2016-02-02T23:28:42.481000+00:00", - "default_cooldown": 300, - "desired_capacity": 2, - "enabled_metrics": [], - "health_check_grace_period": 300, - "health_check_type": "ELB", - "instances": - [ - { - "availability_zone": "us-west-2c", - "health_status": "Healthy", - "instance_id": "i-047a12cb", - "launch_configuration_name": "public-webapp-production-1", - "lifecycle_state": "InService", - "protected_from_scale_in": false - }, - { - "availability_zone": "us-west-2a", - "health_status": "Healthy", - "instance_id": "i-7a29df2c", - "launch_configuration_name": "public-webapp-production-1", - "lifecycle_state": "InService", - "protected_from_scale_in": false - } - ], - "launch_config_name": "public-webapp-production-1", - "launch_configuration_name": "public-webapp-production-1", - "lifecycle_hooks": - [ - { - "AutoScalingGroupName": "public-webapp-production-1", - "DefaultResult": "ABANDON", - "GlobalTimeout": 172800, - "HeartbeatTimeout": 3600, - "LifecycleHookName": "instance-launch", - "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING" - }, - { - "AutoScalingGroupName": "public-webapp-production-1", - "DefaultResult": "ABANDON", - "GlobalTimeout": 172800, - "HeartbeatTimeout": 3600, - "LifecycleHookName": "instance-terminate", - "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" - } - ], - "load_balancer_names": ["public-webapp-production-lb"], - "max_size": 4, - "min_size": 2, - "new_instances_protected_from_scale_in": false, - "placement_group": None, - "status": None, - "suspended_processes": [], - "tags": - [ - { - "key": "Name", - "propagate_at_launch": true, - "resource_id": "public-webapp-production", - "resource_type": "auto-scaling-group", - "value": "public-webapp-production" - }, - { - "key": "env", - "propagate_at_launch": true, - "resource_id": "public-webapp-production", - "resource_type": "auto-scaling-group", - "value": "production" - } - ], - "target_group_names": [], - "target_group_arns": [], - "termination_policies": - [ - "Default" - ], - "vpc_zone_identifier": - [ - "subnet-a1b1c1d1", - "subnet-a2b2c2d2", - "subnet-a3b3c3d3" - ] - } - ] - """ - - try: - asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') - asgs = asgs_paginator.paginate().build_full_result() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups') - - if not asgs: - return asgs - - try: - elbv2 = module.client('elbv2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): - # This is nice to have, not essential - elbv2 = None - matched_asgs = [] - - if name is not None: - # if the user didn't specify a name - name_prog = re.compile(r'^' + name) - - for asg in asgs['AutoScalingGroups']: - if name: - matched_name = name_prog.search(asg['AutoScalingGroupName']) - else: - matched_name = True - - if tags: - matched_tags = match_asg_tags(tags, asg) - else: - matched_tags = True - - if matched_name and matched_tags: - asg = camel_dict_to_snake_dict(asg) - # compatibility with autoscaling_group module - if 'launch_configuration_name' in asg: - asg['launch_config_name'] = asg['launch_configuration_name'] - # workaround for https://github.com/ansible/ansible/pull/25015 - if 'target_group_ar_ns' in asg: - asg['target_group_arns'] = asg['target_group_ar_ns'] - del asg['target_group_ar_ns'] - if asg.get('target_group_arns'): - if elbv2: - try: - tg_paginator = elbv2.get_paginator('describe_target_groups') - tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result() - asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']] - except is_boto3_error_code('TargetGroupNotFound'): - asg['target_group_names'] = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe Target Groups") - else: - asg['target_group_names'] = [] - # get asg lifecycle hooks if any - try: - asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg['auto_scaling_group_name']) - asg['lifecycle_hooks'] = asg_lifecyclehooks['LifecycleHooks'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to fetch information about ASG lifecycle hooks") - matched_asgs.append(asg) - - return matched_asgs - - -def main(): - - argument_spec = dict( - name=dict(type='str'), - tags=dict(type='dict'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - asg_name = module.params.get('name') - asg_tags = module.params.get('tags') - - autoscaling = module.client('autoscaling') - - results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) - module.exit_json(results=results) - - -if __name__ == '__main__': - main() From 000f8fe0fcc530d1313dae65317676e4499f988e Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Sep 2022 15:58:40 +0200 Subject: [PATCH 581/683] Migrate cloudwatchlogs* modules and tests (#1489) Migrate cloudwatchlogs* modules and tests Depends-On: ansible-collections/amazon.aws#1031 Remove cloudwatchlogs* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- cloudwatchlogs_log_group.py | 348 ---------------------- cloudwatchlogs_log_group_info.py | 136 --------- cloudwatchlogs_log_group_metric_filter.py | 216 -------------- 3 files changed, 700 deletions(-) delete mode 100644 cloudwatchlogs_log_group.py delete mode 100644 cloudwatchlogs_log_group_info.py delete mode 100644 cloudwatchlogs_log_group_metric_filter.py diff --git a/cloudwatchlogs_log_group.py b/cloudwatchlogs_log_group.py deleted file mode 100644 index 31caeb60daf..00000000000 --- a/cloudwatchlogs_log_group.py +++ /dev/null @@ -1,348 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cloudwatchlogs_log_group -version_added: 1.0.0 -short_description: create or delete log_group in CloudWatchLogs -notes: - - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html). - - Support for I(purge_tags) was added in release 4.0.0. -description: - - Create or delete log_group in CloudWatchLogs. -author: - - Willian Ricardo (@willricardo) -options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - default: present - required: false - type: str - log_group_name: - description: - - The name of the log group. - required: true - type: str - kms_key_id: - description: - - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. - required: false - type: str - retention: - description: - - The number of days to retain the log events in the specified log group. - - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" - - Mutually exclusive with I(purge_retention_policy). - required: false - type: int - purge_retention_policy: - description: - - "Whether to purge the retention policy or not." - - "Mutually exclusive with I(retention) and I(overwrite)." - default: false - required: false - type: bool - overwrite: - description: - - Whether an existing log group should be overwritten on create. - - Mutually exclusive with I(purge_retention_policy). - default: false - required: false - type: bool -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- community.aws.cloudwatchlogs_log_group: - log_group_name: test-log-group - -- community.aws.cloudwatchlogs_log_group: - state: present - log_group_name: test-log-group - tags: { "Name": "test-log-group", "Env" : "QA" } - -- community.aws.cloudwatchlogs_log_group: - state: present - log_group_name: test-log-group - tags: { "Name": "test-log-group", "Env" : "QA" } - kms_key_id: arn:aws:kms:region:account-id:key/key-id - -- community.aws.cloudwatchlogs_log_group: - state: absent - log_group_name: test-log-group - -''' - -RETURN = ''' -log_groups: - description: Return the list of complex objects representing log groups - returned: success - type: complex - version_added: 4.0.0 - contains: - log_group_name: - description: The name of the log group. - returned: always - type: str - creation_time: - description: The creation time of the log group. - returned: always - type: int - retention_in_days: - description: The number of days to retain the log events in the specified log group. - returned: always - type: int - metric_filter_count: - description: The number of metric filters. - returned: always - type: int - arn: - description: The Amazon Resource Name (ARN) of the log group. - returned: always - type: str - stored_bytes: - description: The number of bytes stored. - returned: always - type: str - kms_key_id: - description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. - returned: always - type: str - tags: - description: A dictionary representing the tags on the log group. - returned: always - type: dict -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - - -def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): - request = {'logGroupName': log_group_name} - if kms_key_id: - request['kmsKeyId'] = kms_key_id - if tags: - request['tags'] = tags - - if module.check_mode: - module.exit_json(changed=True, msg="Would have created log group if not in check_mode.") - - try: - client.create_log_group(**request) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create log group") - - if retention: - input_retention_policy(client=client, - log_group_name=log_group_name, - retention=retention, module=module) - - found_log_group = describe_log_group(client=client, log_group_name=log_group_name, module=module) - - if not found_log_group: - module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!") - return found_log_group - - -def input_retention_policy(client, log_group_name, retention, module): - try: - permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653] - - if retention in permited_values: - response = client.put_retention_policy(logGroupName=log_group_name, - retentionInDays=retention) - else: - delete_log_group(client=client, log_group_name=log_group_name, module=module) - module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name)) - - -def delete_retention_policy(client, log_group_name, module): - if module.check_mode: - return True - - try: - client.delete_retention_policy(logGroupName=log_group_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name)) - - -def delete_log_group(client, log_group_name, module): - if module.check_mode: - module.exit_json(changed=True, msg="Would have deleted log group if not in check_mode.") - - try: - client.delete_log_group(logGroupName=log_group_name) - except is_boto3_error_code('ResourceNotFoundException'): - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) - - -def describe_log_group(client, log_group_name, module): - try: - desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) - - matching_logs = [log for log in desc_log_group.get('logGroups', []) if log['logGroupName'] == log_group_name] - - if not matching_logs: - return {} - - found_log_group = matching_logs[0] - - try: - tags = client.list_tags_log_group(logGroupName=log_group_name) - except is_boto3_error_code('AccessDeniedException'): - tags = {} - module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) - - found_log_group['tags'] = tags.get('tags', {}) - return found_log_group - - -def format_result(found_log_group): - # Prior to 4.0.0 we documented returning log_groups=[log_group], but returned **log_group - # Return both to avoid a breaking change. - log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=['tags']) - return dict(log_groups=[log_group], **log_group) - - -def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): - if desired_tags is None: - return False - - group_name = module.params.get('log_group_name') - current_tags = found_log_group.get('tags', {}) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags) - - if not tags_to_add and not tags_to_remove: - return False - if module.check_mode: - return True - - try: - if tags_to_remove: - client.untag_log_group(logGroupName=group_name, tags=tags_to_remove) - if tags_to_add: - client.tag_log_group(logGroupName=group_name, tags=tags_to_add) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update tags') - - return True - - -def main(): - argument_spec = dict( - log_group_name=dict(required=True, type='str'), - state=dict(choices=['present', 'absent'], - default='present'), - kms_key_id=dict(required=False, type='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - retention=dict(required=False, type='int'), - purge_retention_policy=dict(required=False, type='bool', default=False), - overwrite=dict(required=False, type='bool', default=False), - ) - - mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] - module = AnsibleAWSModule(supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) - - try: - logs = module.client('logs') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - state = module.params.get('state') - changed = False - - # Determine if the log group exists - found_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) - - if state == 'present': - if found_log_group: - if module.params['overwrite'] is True: - changed = True - delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) - found_log_group = create_log_group(client=logs, - log_group_name=module.params['log_group_name'], - kms_key_id=module.params['kms_key_id'], - tags=module.params['tags'], - retention=module.params['retention'], - module=module) - else: - changed |= ensure_tags(client=logs, - found_log_group=found_log_group, - desired_tags=module.params['tags'], - purge_tags=module.params['purge_tags'], - module=module) - if module.params['purge_retention_policy']: - if found_log_group.get('retentionInDays'): - changed = True - delete_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - elif module.params['retention'] != found_log_group.get('retentionInDays'): - if module.params['retention'] is not None: - changed = True - input_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - retention=module.params['retention'], - module=module) - if changed: - found_log_group = describe_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - - elif not found_log_group: - changed = True - found_log_group = create_log_group(client=logs, - log_group_name=module.params['log_group_name'], - kms_key_id=module.params['kms_key_id'], - tags=module.params['tags'], - retention=module.params['retention'], - module=module) - - result = format_result(found_log_group) - module.exit_json(changed=changed, **result) - - elif state == 'absent': - if found_log_group: - changed = True - delete_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/cloudwatchlogs_log_group_info.py b/cloudwatchlogs_log_group_info.py deleted file mode 100644 index b3d0ca223b2..00000000000 --- a/cloudwatchlogs_log_group_info.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cloudwatchlogs_log_group_info -version_added: 1.0.0 -short_description: Get information about log_group in CloudWatchLogs -description: - - Lists the specified log groups. You can list all your log groups or filter the results by prefix. -author: - - Willian Ricardo (@willricardo) -options: - log_group_name: - description: - - The name or prefix of the log group to filter by. - type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. -- community.aws.cloudwatchlogs_log_group_info: - log_group_name: test-log-group -''' - -RETURN = ''' -log_groups: - description: Return the list of complex objects representing log groups - returned: success - type: complex - contains: - log_group_name: - description: The name of the log group. - returned: always - type: str - creation_time: - description: The creation time of the log group. - returned: always - type: int - retention_in_days: - description: The number of days to retain the log events in the specified log group. - returned: always - type: int - metric_filter_count: - description: The number of metric filters. - returned: always - type: int - arn: - description: The Amazon Resource Name (ARN) of the log group. - returned: always - type: str - stored_bytes: - description: The number of bytes stored. - returned: always - type: str - kms_key_id: - description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. - returned: always - type: str - tags: - description: A dictionary representing the tags on the log group. - returned: always - type: dict - version_added: 4.0.0 -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - - -def describe_log_group(client, log_group_name, module): - params = {} - if log_group_name: - params['logGroupNamePrefix'] = log_group_name - try: - paginator = client.get_paginator('describe_log_groups') - desc_log_group = paginator.paginate(**params).build_full_result() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) - - for log_group in desc_log_group['logGroups']: - log_group_name = log_group['logGroupName'] - try: - tags = client.list_tags_log_group(logGroupName=log_group_name) - except is_boto3_error_code('AccessDeniedException'): - tags = {} - module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) - log_group['tags'] = tags.get('tags', {}) - - return desc_log_group - - -def main(): - argument_spec = dict( - log_group_name=dict(), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - try: - logs = module.client('logs') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - desc_log_group = describe_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - final_log_group_snake = [] - - for log_group in desc_log_group['logGroups']: - final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=['tags'])) - - desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) - module.exit_json(**desc_log_group_result) - - -if __name__ == '__main__': - main() diff --git a/cloudwatchlogs_log_group_metric_filter.py b/cloudwatchlogs_log_group_metric_filter.py deleted file mode 100644 index 26cbc65ce34..00000000000 --- a/cloudwatchlogs_log_group_metric_filter.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cloudwatchlogs_log_group_metric_filter -version_added: 1.0.0 -author: - - "Markus Bergholz (@markuman)" -short_description: Manage CloudWatch log group metric filter -description: - - Create, modify and delete CloudWatch log group metric filter. - - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). -options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - required: true - type: str - log_group_name: - description: - - The name of the log group where the metric filter is applied on. - required: true - type: str - filter_name: - description: - - A name for the metric filter you create. - required: true - type: str - filter_pattern: - description: - - A filter pattern for extracting metric data out of ingested log events. Required when I(state=present). - type: str - metric_transformation: - description: - - A collection of information that defines how metric data gets emitted. Required when I(state=present). - type: dict - suboptions: - metric_name: - description: - - The name of the cloudWatch metric. - type: str - metric_namespace: - description: - - The namespace of the cloudWatch metric. - type: str - metric_value: - description: - - The value to publish to the cloudWatch metric when a filter pattern matches a log event. - type: str - default_value: - description: - - The value to emit when a filter pattern does not match a log event. - type: float -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' -- name: set metric filter on log group /fluentd/testcase - community.aws.cloudwatchlogs_log_group_metric_filter: - log_group_name: /fluentd/testcase - filter_name: BoxFreeStorage - filter_pattern: '{($.value = *) && ($.hostname = "box")}' - state: present - metric_transformation: - metric_name: box_free_space - metric_namespace: fluentd_metrics - metric_value: "$.value" - -- name: delete metric filter on log group /fluentd/testcase - community.aws.cloudwatchlogs_log_group_metric_filter: - log_group_name: /fluentd/testcase - filter_name: BoxFreeStorage - state: absent -''' - -RETURN = """ -metric_filters: - description: Return the origin response value - returned: success - type: list - sample: [ - { - "default_value": 3.1415, - "metric_name": "box_free_space", - "metric_namespace": "made_with_ansible", - "metric_value": "$.value" - } - ] - -""" - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - - -def metricTransformationHandler(metricTransformations, originMetricTransformations=None): - - if originMetricTransformations: - change = False - originMetricTransformations = camel_dict_to_snake_dict( - originMetricTransformations) - for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]: - if metricTransformations.get(item) != originMetricTransformations.get(item): - change = True - else: - change = True - - defaultValue = metricTransformations.get("default_value") - if isinstance(defaultValue, int) or isinstance(defaultValue, float): - retval = [ - { - 'metricName': metricTransformations.get("metric_name"), - 'metricNamespace': metricTransformations.get("metric_namespace"), - 'metricValue': metricTransformations.get("metric_value"), - 'defaultValue': defaultValue - } - ] - else: - retval = [ - { - 'metricName': metricTransformations.get("metric_name"), - 'metricNamespace': metricTransformations.get("metric_namespace"), - 'metricValue': metricTransformations.get("metric_value"), - } - ] - - return retval, change - - -def main(): - - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - log_group_name=dict(type='str', required=True), - filter_name=dict(type='str', required=True), - filter_pattern=dict(type='str'), - metric_transformation=dict(type='dict', options=dict( - metric_name=dict(type='str'), - metric_namespace=dict(type='str'), - metric_value=dict(type='str'), - default_value=dict(type='float') - )), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])] - ) - - log_group_name = module.params.get("log_group_name") - filter_name = module.params.get("filter_name") - filter_pattern = module.params.get("filter_pattern") - metric_transformation = module.params.get("metric_transformation") - state = module.params.get("state") - - cwl = module.client('logs') - - # check if metric filter exists - response = cwl.describe_metric_filters( - logGroupName=log_group_name, - filterNamePrefix=filter_name - ) - - if len(response.get("metricFilters")) == 1: - originMetricTransformations = response.get( - "metricFilters")[0].get("metricTransformations")[0] - originFilterPattern = response.get("metricFilters")[ - 0].get("filterPattern") - else: - originMetricTransformations = None - originFilterPattern = None - change = False - metricTransformation = None - - if state == "absent" and originMetricTransformations: - if not module.check_mode: - response = cwl.delete_metric_filter( - logGroupName=log_group_name, - filterName=filter_name - ) - change = True - metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]] - - elif state == "present": - metricTransformation, change = metricTransformationHandler( - metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations) - - change = change or filter_pattern != originFilterPattern - - if change: - if not module.check_mode: - response = cwl.put_metric_filter( - logGroupName=log_group_name, - filterName=filter_name, - filterPattern=filter_pattern, - metricTransformations=metricTransformation - ) - - metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation] - - module.exit_json(changed=change, metric_filters=metricTransformation) - - -if __name__ == '__main__': - main() From 390ff04ba25c1745d58f2017e509418d6439a4ba Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Sep 2022 16:38:23 +0200 Subject: [PATCH 582/683] Migrate rds_cluster* modules and tests (#1479) Migrate rds_cluster* modules and tests Depends-On: ansible-collections/amazon.aws#1027 Depends-On: #1478 Remove rds_cluster* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mike Graves Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- rds_cluster.py | 1022 --------------------------------------- rds_cluster_info.py | 307 ------------ rds_cluster_snapshot.py | 372 -------------- 3 files changed, 1701 deletions(-) delete mode 100644 rds_cluster.py delete mode 100644 rds_cluster_info.py delete mode 100644 rds_cluster_snapshot.py diff --git a/rds_cluster.py b/rds_cluster.py deleted file mode 100644 index e774e7a66cd..00000000000 --- a/rds_cluster.py +++ /dev/null @@ -1,1022 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2022 Ansible Project -# Copyright (c) 2022 Alina Buzachis (@alinabuzachis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds_cluster -version_added: "3.2.0" -short_description: rds_cluster module -description: - - Create, modify, and delete RDS clusters. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -author: - - Sloane Hertel (@s-hertel) - - Alina Buzachis (@alinabuzachis) -options: - # General module options - state: - description: Whether the snapshot should exist or not. - choices: ['present', 'absent'] - default: 'present' - type: str - creation_source: - description: Which source to use if creating from a template (an existing cluster, S3 bucket, or snapshot). - choices: ['snapshot', 's3', 'cluster'] - type: str - force_update_password: - description: - - Set to C(true) to update your cluster password with I(master_user_password). - - Since comparing passwords to determine if it needs to be updated is not possible this is set to C(false) by default to allow idempotence. - type: bool - default: false - promote: - description: Set to C(true) to promote a read replica cluster. - type: bool - default: false - purge_cloudwatch_logs_exports: - description: - - Whether or not to disable Cloudwatch logs enabled for the DB cluster that are not provided in I(enable_cloudwatch_logs_exports). - Set I(enable_cloudwatch_logs_exports) to an empty list to disable all. - type: bool - default: true - purge_security_groups: - description: - - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the cluster. - - Can be applied to I(vpc_security_group_ids) - type: bool - default: true - wait: - description: Whether to wait for the cluster to be available or deleted. - type: bool - default: true - # Options that have a corresponding boto3 parameter - apply_immediately: - description: - - A value that specifies whether modifying a cluster with I(new_db_cluster_identifier) and I(master_user_password) - should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If C(false), changes - are applied during the next maintenance window. - type: bool - default: false - availability_zones: - description: - - A list of EC2 Availability Zones that instances in the DB cluster can be created in. - May be used when creating a cluster or when restoring from S3 or a snapshot. - aliases: - - zones - - az - type: list - elements: str - backtrack_to: - description: - - The timestamp of the time to backtrack the DB cluster to in ISO 8601 format, such as "2017-07-08T18:00Z". - type: str - backtrack_window: - description: - - The target backtrack window, in seconds. To disable backtracking, set this value to C(0). - - If specified, this value must be set to a number from C(0) to C(259,200) (72 hours). - default: 0 - type: int - backup_retention_period: - description: - - The number of days for which automated backups are retained (must be within C(1) to C(35)). - May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. - type: int - default: 1 - character_set_name: - description: - - The character set to associate with the DB cluster. - type: str - database_name: - description: - - The name for your database. If a name is not provided Amazon RDS will not create a database. - aliases: - - db_name - type: str - db_cluster_identifier: - description: - - The DB cluster (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or - hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. - aliases: - - cluster_id - - id - - cluster_name - type: str - required: true - db_cluster_parameter_group_name: - description: - - The name of the DB cluster parameter group to associate with this DB cluster. - If this argument is omitted when creating a cluster, the default DB cluster parameter group for the specified DB engine and version is used. - type: str - db_subnet_group_name: - description: - - A DB subnet group to associate with this DB cluster if not using the default. - type: str - enable_cloudwatch_logs_exports: - description: - - A list of log types that need to be enabled for exporting to CloudWatch Logs. - - Engine aurora-mysql supports C(audit), C(error), C(general) and C(slowquery). - - Engine aurora-postgresql supports C(postgresql). - type: list - elements: str - deletion_protection: - description: - - A value that indicates whether the DB cluster has deletion protection enabled. - The database can't be deleted when deletion protection is enabled. - By default, deletion protection is disabled. - type: bool - global_cluster_identifier: - description: - - The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster. - type: str - enable_http_endpoint: - description: - - A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. - By default, the HTTP endpoint is disabled. - type: bool - copy_tags_to_snapshot: - description: - - Indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. - The default is not to copy them. - type: bool - domain: - description: - - The Active Directory directory ID to create the DB cluster in. - type: str - domain_iam_role_name: - description: - - Specify the name of the IAM role to be used when making API calls to the Directory Service. - type: str - enable_global_write_forwarding: - description: - - A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database. - By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. - - This value can be only set on Aurora DB clusters that are members of an Aurora global database. - type: bool - enable_iam_database_authentication: - description: - - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. - If this option is omitted when creating the cluster, Amazon RDS sets this to C(false). - type: bool - engine: - description: - - The name of the database engine to be used for this DB cluster. This is required to create a cluster. - choices: - - aurora - - aurora-mysql - - aurora-postgresql - type: str - engine_version: - description: - - The version number of the database engine to use. - - For Aurora MySQL that could be C(5.6.10a), C(5.7.12). - - Aurora PostgreSQL example, C(9.6.3). - type: str - final_snapshot_identifier: - description: - - The DB cluster snapshot identifier of the new DB cluster snapshot created when I(skip_final_snapshot=false). - type: str - force_backtrack: - description: - - A boolean to indicate if the DB cluster should be forced to backtrack when binary logging is enabled. - Otherwise, an error occurs when binary logging is enabled. - type: bool - kms_key_id: - description: - - The AWS KMS key identifier (the ARN, unless you are creating a cluster in the same account that owns the - KMS key, in which case the KMS key alias may be used). - - If I(replication_source_identifier) specifies an encrypted source Amazon RDS will use the key used toe encrypt the source. - - If I(storage_encrypted=true) and and I(replication_source_identifier) is not provided, the default encryption key is used. - type: str - master_user_password: - description: - - An 8-41 character password for the master database user. - - The password can contain any printable ASCII character except "/", """, or "@". - - To modify the password use I(force_password_update). Use I(apply immediately) to change - the password immediately, otherwise it is updated during the next maintenance window. - aliases: - - password - type: str - master_username: - description: - - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter. - aliases: - - username - type: str - new_db_cluster_identifier: - description: - - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB cluster. - - The identifier must contain from 1 to 63 letters, numbers, or hyphens and the first character must be a - letter and may not end in a hyphen or contain consecutive hyphens. - - Use I(apply_immediately) to rename immediately, otherwise it is updated during the next maintenance window. - aliases: - - new_cluster_id - - new_id - - new_cluster_name - type: str - option_group_name: - description: - - The option group to associate with the DB cluster. - type: str - port: - description: - - The port number on which the instances in the DB cluster accept connections. If not specified, Amazon RDS - defaults this to C(3306) if the I(engine) is C(aurora) and c(5432) if the I(engine) is C(aurora-postgresql). - type: int - preferred_backup_window: - description: - - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are - enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with - I(preferred_maintenance_window). - aliases: - - backup_window - type: str - preferred_maintenance_window: - description: - - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must - be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. - aliases: - - maintenance_window - type: str - replication_source_identifier: - description: - - The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica. - aliases: - - replication_src_id - type: str - restore_to_time: - description: - - The UTC date and time to restore the DB cluster to. Must be in the format "2015-03-07T23:45:00Z". - - If this is not provided while restoring a cluster, I(use_latest_restorable_time) must be. - May not be specified if I(restore_type) is copy-on-write. - type: str - restore_type: - description: - - The type of restore to be performed. If not provided, Amazon RDS uses full-copy. - choices: - - full-copy - - copy-on-write - type: str - role_arn: - description: - - The Amazon Resource Name (ARN) of the IAM role to associate with the Aurora DB cluster, for example - "arn:aws:iam::123456789012:role/AuroraAccessRole" - type: str - s3_bucket_name: - description: - - The name of the Amazon S3 bucket that contains the data used to create the Amazon Aurora DB cluster. - type: str - s3_ingestion_role_arn: - description: - - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access - the Amazon S3 bucket on your behalf. - type: str - s3_prefix: - description: - - The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. - - If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. - type: str - skip_final_snapshot: - description: - - Whether a final DB cluster snapshot is created before the DB cluster is deleted. - - If this is C(false), I(final_snapshot_identifier) must be provided. - type: bool - default: false - snapshot_identifier: - description: - - The identifier for the DB snapshot or DB cluster snapshot to restore from. - - You can use either the name or the ARN to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot. - type: str - source_db_cluster_identifier: - description: - - The identifier of the source DB cluster from which to restore. - type: str - source_engine: - description: - - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. - choices: - - mysql - type: str - source_engine_version: - description: - - The version of the database that the backup files were created from. - type: str - source_region: - description: - - The ID of the region that contains the source for the DB cluster. - type: str - storage_encrypted: - description: - - Whether the DB cluster is encrypted. - type: bool - use_earliest_time_on_point_in_time_unavailable: - description: - - If I(backtrack_to) is set to a timestamp earlier than the earliest backtrack time, this value backtracks the DB cluster to - the earliest possible backtrack time. Otherwise, an error occurs. - type: bool - use_latest_restorable_time: - description: - - Whether to restore the DB cluster to the latest restorable backup time. Only one of I(use_latest_restorable_time) - and I(restore_to_time) may be provided. - type: bool - vpc_security_group_ids: - description: - - A list of EC2 VPC security groups to associate with the DB cluster. - type: list - elements: str -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. -- name: Create minimal aurora cluster in default VPC and default subnet group - community.aws.rds_cluster: - cluster_id: "{{ cluster_id }}" - engine: "aurora" - password: "{{ password }}" - username: "{{ username }}" - -- name: Add a new security group without purge - community.aws.rds_cluster: - id: "{{ cluster_id }}" - state: present - vpc_security_group_ids: - - sg-0be17ba10c9286b0b - purge_security_groups: false - -- name: Modify password - community.aws.rds_cluster: - id: "{{ cluster_id }}" - state: present - password: "{{ new_password }}" - force_update_password: true - apply_immediately: true - -- name: Rename the cluster - community.aws.rds_cluster: - engine: aurora - password: "{{ password }}" - username: "{{ username }}" - cluster_id: "cluster-{{ resource_prefix }}" - new_cluster_id: "cluster-{{ resource_prefix }}-renamed" - apply_immediately: true - -- name: Delete aurora cluster without creating a final snapshot - community.aws.rds_cluster: - engine: aurora - password: "{{ password }}" - username: "{{ username }}" - cluster_id: "{{ cluster_id }}" - skip_final_snapshot: True - tags: - Name: "cluster-{{ resource_prefix }}" - Created_By: "Ansible_rds_cluster_integration_test" - state: absent - -- name: Restore cluster from source snapshot - community.aws.rds_cluster: - engine: aurora - password: "{{ password }}" - username: "{{ username }}" - cluster_id: "cluster-{{ resource_prefix }}-restored" - snapshot_identifier: "cluster-{{ resource_prefix }}-snapshot" -''' - -RETURN = r''' -activity_stream_status: - description: The status of the database activity stream. - returned: always - type: str - sample: stopped -allocated_storage: - description: - - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is - always 1 for aurora database engines. - returned: always - type: int - sample: 1 -associated_roles: - description: - - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated - with the DB cluster. Each dictionary contains the role_arn and the status of the role. - returned: always - type: list - sample: [] -availability_zones: - description: The list of availability zones that instances in the DB cluster can be created in. - returned: always - type: list - sample: - - us-east-1c - - us-east-1a - - us-east-1e -backup_retention_period: - description: The number of days for which automatic DB snapshots are retained. - returned: always - type: int - sample: 1 -changed: - description: If the RDS cluster has changed. - returned: always - type: bool - sample: true -cluster_create_time: - description: The time in UTC when the DB cluster was created. - returned: always - type: str - sample: '2018-06-29T14:08:58.491000+00:00' -copy_tags_to_snapshot: - description: - - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster. - returned: always - type: bool - sample: false -cross_account_clone: - description: - - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. - returned: always - type: bool - sample: false -db_cluster_arn: - description: The Amazon Resource Name (ARN) for the DB cluster. - returned: always - type: str - sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo -db_cluster_identifier: - description: The lowercase user-supplied DB cluster identifier. - returned: always - type: str - sample: rds-cluster-demo -db_cluster_members: - description: - - A list of dictionaries containing information about the instances in the cluster. - Each dictionary contains the db_instance_identifier, is_cluster_writer (bool), - db_cluster_parameter_group_status, and promotion_tier (int). - returned: always - type: list - sample: [] -db_cluster_parameter_group: - description: The parameter group associated with the DB cluster. - returned: always - type: str - sample: default.aurora5.6 -db_cluster_resource_id: - description: The AWS Region-unique, immutable identifier for the DB cluster. - returned: always - type: str - sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU -db_subnet_group: - description: The name of the subnet group associated with the DB Cluster. - returned: always - type: str - sample: default -deletion_protection: - description: - - Indicates if the DB cluster has deletion protection enabled. - The database can't be deleted when deletion protection is enabled. - returned: always - type: bool - sample: false -domain_memberships: - description: - - The Active Directory Domain membership records associated with the DB cluster. - returned: always - type: list - sample: [] -earliest_restorable_time: - description: The earliest time to which a database can be restored with point-in-time restore. - returned: always - type: str - sample: '2018-06-29T14:09:34.797000+00:00' -endpoint: - description: The connection endpoint for the primary instance of the DB cluster. - returned: always - type: str - sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com -engine: - description: The database engine of the DB cluster. - returned: always - type: str - sample: aurora -engine_mode: - description: The DB engine mode of the DB cluster. - returned: always - type: str - sample: provisioned -engine_version: - description: The database engine version. - returned: always - type: str - sample: 5.6.10a -hosted_zone_id: - description: The ID that Amazon Route 53 assigns when you create a hosted zone. - returned: always - type: str - sample: Z2R2ITUGPM61AM -http_endpoint_enabled: - description: - - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. - returned: always - type: bool - sample: false -iam_database_authentication_enabled: - description: Whether IAM accounts may be mapped to database accounts. - returned: always - type: bool - sample: false -latest_restorable_time: - description: The latest time to which a database can be restored with point-in-time restore. - returned: always - type: str - sample: '2018-06-29T14:09:34.797000+00:00' -master_username: - description: The master username for the DB cluster. - returned: always - type: str - sample: username -multi_az: - description: Whether the DB cluster has instances in multiple availability zones. - returned: always - type: bool - sample: false -port: - description: The port that the database engine is listening on. - returned: always - type: int - sample: 3306 -preferred_backup_window: - description: The UTC weekly time range during which system maintenance can occur. - returned: always - type: str - sample: 10:18-10:48 -preferred_maintenance_window: - description: The UTC weekly time range during which system maintenance can occur. - returned: always - type: str - sample: tue:03:23-tue:03:53 -read_replica_identifiers: - description: A list of read replica ID strings associated with the DB cluster. - returned: always - type: list - sample: [] -reader_endpoint: - description: The reader endpoint for the DB cluster. - returned: always - type: str - sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com -status: - description: The status of the DB cluster. - returned: always - type: str - sample: available -storage_encrypted: - description: Whether the DB cluster is storage encrypted. - returned: always - type: bool - sample: false -tag_list: - description: A list of tags consisting of key-value pairs. - returned: always - type: list - elements: dict - sample: [ - { - "key": "Created_By", - "value": "Ansible_rds_cluster_integration_test" - } - ] -tags: - description: A dictionary of key value pairs. - returned: always - type: dict - sample: { - "Name": "rds-cluster-demo" - } -vpc_security_groups: - description: A list of the DB cluster's security groups and their status. - returned: always - type: complex - contains: - status: - description: Status of the security group. - returned: always - type: str - sample: active - vpc_security_group_id: - description: Security group of the cluster. - returned: always - type: str - sample: sg-12345678 -''' - - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status -from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method - - -@AWSRetry.jittered_backoff(retries=10) -def _describe_db_clusters(**params): - try: - paginator = client.get_paginator('describe_db_clusters') - return paginator.paginate(**params).build_full_result()['DBClusters'][0] - except is_boto3_error_code('DBClusterNotFoundFault'): - return {} - - -def get_add_role_options(params_dict, cluster): - current_role_arns = [role['RoleArn'] for role in cluster.get('AssociatedRoles', [])] - role = params_dict['RoleArn'] - if role is not None and role not in current_role_arns: - return {'RoleArn': role, 'DBClusterIdentifier': params_dict['DBClusterIdentifier']} - return {} - - -def get_backtrack_options(params_dict): - options = ['BacktrackTo', 'DBClusterIdentifier', 'UseEarliestTimeOnPointInTimeUnavailable'] - if params_dict['BacktrackTo'] is not None: - options = dict((k, params_dict[k]) for k in options if params_dict[k] is not None) - if 'ForceBacktrack' in params_dict: - options['Force'] = params_dict['ForceBacktrack'] - return options - return {} - - -def get_create_options(params_dict): - options = [ - 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', - 'CharacterSetName', 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', - 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'KmsKeyId', - 'Engine', 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'MasterUsername', - 'OptionGroupName', 'Port', 'ReplicationSourceIdentifier', 'SourceRegion', 'StorageEncrypted', - 'Tags', 'VpcSecurityGroupIds', 'EngineMode', 'ScalingConfiguration', 'DeletionProtection', - 'EnableHttpEndpoint', 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', - 'EnableGlobalWriteForwarding', - ] - - return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) - - -def get_modify_options(params_dict, force_update_password): - options = [ - 'ApplyImmediately', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', - 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'EnableIAMDatabaseAuthentication', - 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'NewDBClusterIdentifier', - 'OptionGroupName', 'Port', 'VpcSecurityGroupIds', 'EnableIAMDatabaseAuthentication', - 'CloudwatchLogsExportConfiguration', 'DeletionProtection', 'EnableHttpEndpoint', - 'CopyTagsToSnapshot', 'EnableGlobalWriteForwarding', 'Domain', 'DomainIAMRoleName', - ] - modify_options = dict((k, v) for k, v in params_dict.items() if k in options and v is not None) - if not force_update_password: - modify_options.pop('MasterUserPassword', None) - return modify_options - - -def get_delete_options(params_dict): - options = ['DBClusterIdentifier', 'FinalSnapshotIdentifier', 'SkipFinalSnapshot'] - return dict((k, params_dict[k]) for k in options if params_dict[k] is not None) - - -def get_restore_s3_options(params_dict): - options = [ - 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'CharacterSetName', - 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', 'DatabaseName', - 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'Engine', 'EngineVersion', - 'KmsKeyId', 'MasterUserPassword', 'MasterUsername', 'OptionGroupName', 'Port', - 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'S3BucketName', 'S3IngestionRoleArn', - 'S3Prefix', 'SourceEngine', 'SourceEngineVersion', 'StorageEncrypted', 'Tags', - 'VpcSecurityGroupIds', 'DeletionProtection', 'EnableHttpEndpoint', 'CopyTagsToSnapshot', - 'Domain', 'DomainIAMRoleName', - ] - - return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) - - -def get_restore_snapshot_options(params_dict): - options = [ - 'AvailabilityZones', 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', - 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', - 'Engine', 'EngineVersion', 'KmsKeyId', 'OptionGroupName', 'Port', 'SnapshotIdentifier', - 'Tags', 'VpcSecurityGroupIds', 'DBClusterParameterGroupName', 'DeletionProtection', - 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', - ] - return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) - - -def get_restore_cluster_options(params_dict): - options = [ - 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', 'EnableCloudwatchLogsExports', - 'EnableIAMDatabaseAuthentication', 'KmsKeyId', 'OptionGroupName', 'Port', 'RestoreToTime', - 'RestoreType', 'SourceDBClusterIdentifier', 'Tags', 'UseLatestRestorableTime', - 'VpcSecurityGroupIds', 'DeletionProtection', 'CopyTagsToSnapshot', 'Domain', - 'DomainIAMRoleName', - ] - return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) - - -def get_rds_method_attribute_name(cluster): - state = module.params['state'] - creation_source = module.params['creation_source'] - method_name = None - method_options_name = None - - if state == 'absent': - if cluster and cluster['Status'] not in ['deleting', 'deleted']: - method_name = 'delete_db_cluster' - method_options_name = 'get_delete_options' - else: - if cluster: - method_name = 'modify_db_cluster' - method_options_name = 'get_modify_options' - elif creation_source == 'snapshot': - method_name = 'restore_db_cluster_from_snapshot' - method_options_name = 'get_restore_snapshot_options' - elif creation_source == 's3': - method_name = 'restore_db_cluster_from_s3' - method_options_name = 'get_restore_s3_options' - elif creation_source == 'cluster': - method_name = 'restore_db_cluster_to_point_in_time' - method_options_name = 'get_restore_cluster_options' - else: - method_name = 'create_db_cluster' - method_options_name = 'get_create_options' - - return method_name, method_options_name - - -def add_role(params): - if not module.check_mode: - try: - client.add_role_to_db_cluster(**params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}") - wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') - - -def backtrack_cluster(params): - if not module.check_mode: - try: - client.backtrack_db_cluster(**params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=F"Unable to backtrack cluster {params['DBClusterIdentifier']}") - wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') - - -def get_cluster(db_cluster_id): - try: - return _describe_db_clusters(DBClusterIdentifier=db_cluster_id) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe DB clusters") - - -def changing_cluster_options(modify_params, current_cluster): - changing_params = {} - apply_immediately = modify_params.pop('ApplyImmediately') - db_cluster_id = modify_params.pop('DBClusterIdentifier') - - enable_cloudwatch_logs_export = modify_params.pop('EnableCloudwatchLogsExports', None) - if enable_cloudwatch_logs_export is not None: - desired_cloudwatch_logs_configuration = {'EnableLogTypes': [], 'DisableLogTypes': []} - provided_cloudwatch_logs = set(enable_cloudwatch_logs_export) - current_cloudwatch_logs_export = set(current_cluster['EnabledCloudwatchLogsExports']) - - desired_cloudwatch_logs_configuration['EnableLogTypes'] = list(provided_cloudwatch_logs.difference(current_cloudwatch_logs_export)) - if module.params['purge_cloudwatch_logs_exports']: - desired_cloudwatch_logs_configuration['DisableLogTypes'] = list(current_cloudwatch_logs_export.difference(provided_cloudwatch_logs)) - changing_params['CloudwatchLogsExportConfiguration'] = desired_cloudwatch_logs_configuration - - password = modify_params.pop('MasterUserPassword', None) - if password: - changing_params['MasterUserPassword'] = password - - new_cluster_id = modify_params.pop('NewDBClusterIdentifier', None) - if new_cluster_id and new_cluster_id != current_cluster['DBClusterIdentifier']: - changing_params['NewDBClusterIdentifier'] = new_cluster_id - - option_group = modify_params.pop('OptionGroupName', None) - if ( - option_group and option_group not in [g['DBClusterOptionGroupName'] for g in current_cluster['DBClusterOptionGroupMemberships']] - ): - changing_params['OptionGroupName'] = option_group - - vpc_sgs = modify_params.pop('VpcSecurityGroupIds', None) - if vpc_sgs: - desired_vpc_sgs = [] - provided_vpc_sgs = set(vpc_sgs) - current_vpc_sgs = set([sg['VpcSecurityGroupId'] for sg in current_cluster['VpcSecurityGroups']]) - if module.params['purge_security_groups']: - desired_vpc_sgs = vpc_sgs - else: - if provided_vpc_sgs - current_vpc_sgs: - desired_vpc_sgs = list(provided_vpc_sgs | current_vpc_sgs) - - if desired_vpc_sgs: - changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs - - desired_db_cluster_parameter_group = modify_params.pop("DBClusterParameterGroupName", None) - if desired_db_cluster_parameter_group: - if desired_db_cluster_parameter_group != current_cluster["DBClusterParameterGroup"]: - changing_params["DBClusterParameterGroupName"] = desired_db_cluster_parameter_group - - for param in modify_params: - if modify_params[param] != current_cluster[param]: - changing_params[param] = modify_params[param] - - if changing_params: - changing_params['DBClusterIdentifier'] = db_cluster_id - if apply_immediately is not None: - changing_params['ApplyImmediately'] = apply_immediately - - return changing_params - - -def ensure_present(cluster, parameters, method_name, method_options_name): - changed = False - - if not cluster: - if parameters.get('Tags') is not None: - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) - call_method(client, module, method_name, eval(method_options_name)(parameters)) - changed = True - else: - if get_backtrack_options(parameters): - backtrack_cluster(client, module, get_backtrack_options(parameters)) - changed = True - else: - modifiable_options = eval(method_options_name)(parameters, - force_update_password=module.params['force_update_password']) - modify_options = changing_cluster_options(modifiable_options, cluster) - if modify_options: - call_method(client, module, method_name, modify_options) - changed = True - if module.params['tags'] is not None: - existing_tags = get_tags(client, module, cluster['DBClusterArn']) - changed |= ensure_tags(client, module, cluster['DBClusterArn'], existing_tags, module.params['tags'], - module.params['purge_tags']) - - add_role_params = get_add_role_options(parameters, cluster) - if add_role_params: - add_role(client, module, add_role_params) - changed = True - - if module.params['promote'] and cluster.get('ReplicationSourceIdentifier'): - call_method(client, module, 'promote_read_replica_db_cluster', parameters={'DBClusterIdentifier': module.params['db_cluster_identifier']}) - changed = True - - return changed - - -def main(): - global module - global client - - arg_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - creation_source=dict(type='str', choices=['snapshot', 's3', 'cluster']), - force_update_password=dict(type='bool', default=False), - promote=dict(type='bool', default=False), - purge_cloudwatch_logs_exports=dict(type='bool', default=True), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - purge_security_groups=dict(type='bool', default=True), - ) - - parameter_options = dict( - apply_immediately=dict(type='bool', default=False), - availability_zones=dict(type='list', elements='str', aliases=['zones', 'az']), - backtrack_to=dict(), - backtrack_window=dict(type='int'), - backup_retention_period=dict(type='int', default=1), - character_set_name=dict(), - database_name=dict(aliases=['db_name']), - db_cluster_identifier=dict(required=True, aliases=['cluster_id', 'id', 'cluster_name']), - db_cluster_parameter_group_name=dict(), - db_subnet_group_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', elements='str'), - deletion_protection=dict(type='bool'), - global_cluster_identifier=dict(), - enable_http_endpoint=dict(type='bool'), - copy_tags_to_snapshot=dict(type='bool'), - domain=dict(), - domain_iam_role_name=dict(), - enable_global_write_forwarding=dict(type='bool'), - enable_iam_database_authentication=dict(type='bool'), - engine=dict(choices=["aurora", "aurora-mysql", "aurora-postgresql"]), - engine_version=dict(), - final_snapshot_identifier=dict(), - force_backtrack=dict(type='bool'), - kms_key_id=dict(), - master_user_password=dict(aliases=['password'], no_log=True), - master_username=dict(aliases=['username']), - new_db_cluster_identifier=dict(aliases=['new_cluster_id', 'new_id', 'new_cluster_name']), - option_group_name=dict(), - port=dict(type='int'), - preferred_backup_window=dict(aliases=['backup_window']), - preferred_maintenance_window=dict(aliases=['maintenance_window']), - replication_source_identifier=dict(aliases=['replication_src_id']), - restore_to_time=dict(), - restore_type=dict(choices=['full-copy', 'copy-on-write']), - role_arn=dict(), - s3_bucket_name=dict(), - s3_ingestion_role_arn=dict(), - s3_prefix=dict(), - skip_final_snapshot=dict(type='bool', default=False), - snapshot_identifier=dict(), - source_db_cluster_identifier=dict(), - source_engine=dict(choices=['mysql']), - source_engine_version=dict(), - source_region=dict(), - storage_encrypted=dict(type='bool'), - tags=dict(type='dict', aliases=['resource_tags']), - use_earliest_time_on_point_in_time_unavailable=dict(type='bool'), - use_latest_restorable_time=dict(type='bool'), - vpc_security_group_ids=dict(type='list', elements='str'), - ) - arg_spec.update(parameter_options) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - required_if=[ - ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), - ('creation_source', 's3', ( - 's3_bucket_name', 'engine', 'master_username', 'master_user_password', - 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), - ], - mutually_exclusive=[ - ('s3_bucket_name', 'source_db_cluster_identifier', 'snapshot_identifier'), - ('use_latest_restorable_time', 'restore_to_time'), - ], - supports_check_mode=True - ) - - retry_decorator = AWSRetry.jittered_backoff(retries=10) - - try: - client = module.client('rds', retry_decorator=retry_decorator) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') - - module.params['db_cluster_identifier'] = module.params['db_cluster_identifier'].lower() - cluster = get_cluster(module.params['db_cluster_identifier']) - - if module.params['new_db_cluster_identifier']: - module.params['new_db_cluster_identifier'] = module.params['new_db_cluster_identifier'].lower() - - if get_cluster(module.params['new_db_cluster_identifier']): - module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists") - if not cluster: - module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist") - - if ( - module.params['state'] == 'absent' and module.params['skip_final_snapshot'] is False and - module.params['final_snapshot_identifier'] is None - ): - module.fail_json(msg='skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier') - - parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) - - changed = False - method_name, method_options_name = get_rds_method_attribute_name(cluster) - - if method_name: - if method_name == 'delete_db_cluster': - call_method(client, module, method_name, eval(method_options_name)(parameters)) - changed = True - else: - changed |= ensure_present(cluster, parameters, method_name, method_options_name) - - if not module.check_mode and module.params['new_db_cluster_identifier'] and module.params['apply_immediately']: - cluster_id = module.params['new_db_cluster_identifier'] - else: - cluster_id = module.params['db_cluster_identifier'] - - result = camel_dict_to_snake_dict(get_cluster(cluster_id)) - - if result: - result['tags'] = get_tags(client, module, result['db_cluster_arn']) - - module.exit_json(changed=changed, **result) - - -if __name__ == '__main__': - main() diff --git a/rds_cluster_info.py b/rds_cluster_info.py deleted file mode 100644 index c53d661bd8b..00000000000 --- a/rds_cluster_info.py +++ /dev/null @@ -1,307 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2022 Ansible Project -# Copyright (c) 2022 Alina Buzachis (@alinabuzachis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: rds_cluster_info -version_added: 3.2.0 -short_description: Obtain information about one or more RDS clusters -description: - - Obtain information about one or more RDS clusters. -options: - db_cluster_identifier: - description: - - The user-supplied DB cluster identifier. - - If this parameter is specified, information from only the specific DB cluster is returned. - aliases: - - cluster_id - - id - - cluster_name - type: str - filters: - description: - - A filter that specifies one or more DB clusters to describe. - See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html). - type: dict -author: - - Alina Buzachis (@alinabuzachis) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -- name: Get info of all existing DB clusters - community.aws.rds_cluster_info: - register: _result_cluster_info - -- name: Get info on a specific DB cluster - community.aws.rds_cluster_info: - cluster_id: "{{ cluster_id }}" - register: _result_cluster_info - -- name: Get info all DB clusters with specific engine - community.aws.rds_cluster_info: - engine: "aurora" - register: _result_cluster_info -''' - -RETURN = r''' -clusters: - description: List of RDS clusters. - returned: always - type: list - contains: - activity_stream_status: - description: The status of the database activity stream. - type: str - sample: stopped - allocated_storage: - description: - - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is - always 1 for aurora database engines. - type: int - sample: 1 - associated_roles: - description: - - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated - with the DB cluster. Each dictionary contains the role_arn and the status of the role. - type: list - sample: [] - availability_zones: - description: The list of availability zones that instances in the DB cluster can be created in. - type: list - sample: - - us-east-1c - - us-east-1a - - us-east-1e - backup_retention_period: - description: The number of days for which automatic DB snapshots are retained. - type: int - sample: 1 - cluster_create_time: - description: The time in UTC when the DB cluster was created. - type: str - sample: '2018-06-29T14:08:58.491000+00:00' - copy_tags_to_snapshot: - description: - - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster. - type: bool - sample: false - cross_account_clone: - description: - - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. - type: bool - sample: false - db_cluster_arn: - description: The Amazon Resource Name (ARN) for the DB cluster. - type: str - sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo - db_cluster_identifier: - description: The lowercase user-supplied DB cluster identifier. - type: str - sample: rds-cluster-demo - db_cluster_members: - description: - - A list of dictionaries containing information about the instances in the cluster. - Each dictionary contains the I(db_instance_identifier), I(is_cluster_writer) (bool), - I(db_cluster_parameter_group_status), and I(promotion_tier) (int). - type: list - sample: [] - db_cluster_parameter_group: - description: The parameter group associated with the DB cluster. - type: str - sample: default.aurora5.6 - db_cluster_resource_id: - description: The AWS Region-unique, immutable identifier for the DB cluster. - type: str - sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU - db_subnet_group: - description: The name of the subnet group associated with the DB Cluster. - type: str - sample: default - deletion_protection: - description: - - Indicates if the DB cluster has deletion protection enabled. - The database can't be deleted when deletion protection is enabled. - type: bool - sample: false - domain_memberships: - description: - - The Active Directory Domain membership records associated with the DB cluster. - type: list - sample: [] - earliest_restorable_time: - description: The earliest time to which a database can be restored with point-in-time restore. - type: str - sample: '2018-06-29T14:09:34.797000+00:00' - endpoint: - description: The connection endpoint for the primary instance of the DB cluster. - type: str - sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com - engine: - description: The database engine of the DB cluster. - type: str - sample: aurora - engine_mode: - description: The DB engine mode of the DB cluster. - type: str - sample: provisioned - engine_version: - description: The database engine version. - type: str - sample: 5.6.10a - hosted_zone_id: - description: The ID that Amazon Route 53 assigns when you create a hosted zone. - type: str - sample: Z2R2ITUGPM61AM - http_endpoint_enabled: - description: - - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. - type: bool - sample: false - iam_database_authentication_enabled: - description: Whether IAM accounts may be mapped to database accounts. - type: bool - sample: false - latest_restorable_time: - description: The latest time to which a database can be restored with point-in-time restore. - type: str - sample: '2018-06-29T14:09:34.797000+00:00' - master_username: - description: The master username for the DB cluster. - type: str - sample: username - multi_az: - description: Whether the DB cluster has instances in multiple availability zones. - type: bool - sample: false - port: - description: The port that the database engine is listening on. - type: int - sample: 3306 - preferred_backup_window: - description: The UTC weekly time range during which system maintenance can occur. - type: str - sample: 10:18-10:48 - preferred_maintenance_window: - description: The UTC weekly time range during which system maintenance can occur. - type: str - sample: tue:03:23-tue:03:53 - read_replica_identifiers: - description: A list of read replica ID strings associated with the DB cluster. - type: list - sample: [] - reader_endpoint: - description: The reader endpoint for the DB cluster. - type: str - sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com - status: - description: The status of the DB cluster. - type: str - sample: available - storage_encrypted: - description: Whether the DB cluster is storage encrypted. - type: bool - sample: false - tag_list: - description: A list of tags consisting of key-value pairs. - type: list - elements: dict - sample: [ - { - "key": "Created_By", - "value": "Ansible_rds_cluster_integration_test" - } - ] - tags: - description: A dictionary of key value pairs. - type: dict - sample: { - "Name": "rds-cluster-demo" - } - vpc_security_groups: - description: A list of the DB cluster's security groups and their status. - type: complex - contains: - status: - description: Status of the security group. - type: str - sample: active - vpc_security_group_id: - description: Security group of the cluster. - type: str - sample: sg-12345678 -''' - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags - - -@AWSRetry.jittered_backoff(retries=10) -def _describe_db_clusters(client, **params): - try: - paginator = client.get_paginator('describe_db_clusters') - return paginator.paginate(**params).build_full_result()['DBClusters'] - except is_boto3_error_code('DBClusterNotFoundFault'): - return [] - - -def cluster_info(client, module): - cluster_id = module.params.get('db_cluster_identifier') - filters = module.params.get('filters') - - params = dict() - if cluster_id: - params['DBClusterIdentifier'] = cluster_id - if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) - - try: - result = _describe_db_clusters(client, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get RDS cluster information.") - - for cluster in result: - cluster['Tags'] = get_tags(client, module, cluster['DBClusterArn']) - - return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=['Tags']) for cluster in result]) - - -def main(): - argument_spec = dict( - db_cluster_identifier=dict(aliases=['cluster_id', 'id', 'cluster_name']), - filters=dict(type='dict'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') - - module.exit_json(**cluster_info(client, module)) - - -if __name__ == '__main__': - main() diff --git a/rds_cluster_snapshot.py b/rds_cluster_snapshot.py deleted file mode 100644 index 2386f5589d7..00000000000 --- a/rds_cluster_snapshot.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2014 Ansible Project -# Copyright (c) 2021 Alina Buzachis (@alinabuzachis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds_cluster_snapshot -version_added: 4.0.0 -short_description: Manage Amazon RDS snapshots of DB clusters -description: - - Create, modify and delete RDS snapshots of DB clusters. -options: - state: - description: - - Specify the desired state of the snapshot. - default: present - choices: [ 'present', 'absent'] - type: str - db_cluster_snapshot_identifier: - description: - - The identifier of the DB cluster snapshot. - required: true - aliases: - - snapshot_id - - id - - snapshot_name - type: str - db_cluster_identifier: - description: - - The identifier of the DB cluster to create a snapshot for. - - Required when I(state=present). - aliases: - - cluster_id - - cluster_name - type: str - source_db_cluster_snapshot_identifier: - description: - - The identifier of the DB cluster snapshot to copy. - - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier. - - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN. - aliases: - - source_id - - source_snapshot_id - type: str - source_region: - description: - - The region that contains the snapshot to be copied. - type: str - copy_tags: - description: - - Whether to copy all tags from I(source_db_cluster_snapshot_identifier) to I(db_cluster_snapshot_identifier). - type: bool - default: False - wait: - description: - - Whether or not to wait for snapshot creation or deletion. - type: bool - default: false - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 300 - type: int -notes: - - Retrieve the information about a specific DB cluster or list the DB cluster snapshots for a specific DB cluster - can de done using M(community.aws.rds_snapshot_info). -author: - - Alina Buzachis (@alinabuzachis) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -''' - -EXAMPLES = r''' -- name: Create a DB cluster snapshot - community.aws.rds_cluster_snapshot: - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: new-cluster-snapshot - -- name: Delete a DB cluster snapshot - community.aws.rds_cluster_snapshot: - db_cluster_snapshot_identifier: new-cluster-snapshot - state: absent - -- name: Copy snapshot from a different region and copy its tags - community.aws.rds_cluster_snapshot: - id: new-database-snapshot-copy - region: us-east-1 - source_id: "{{ snapshot.db_snapshot_arn }}" - source_region: us-east-2 - copy_tags: true -''' - -RETURN = r''' -availability_zone: - description: Availability zone of the database from which the snapshot was created. - returned: always - type: str - sample: us-west-2a -db_cluster_snapshot_identifier: - description: Specifies the identifier for the DB cluster snapshot. - returned: always - type: str - sample: ansible-test-16638696-test-snapshot -db_cluster_identifier: - description: Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from. - returned: always - type: str - sample: ansible-test-16638696 -snapshot_create_time: - description: Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). - returned: always - type: str - sample: '2019-06-15T10:46:23.776000+00:00' -engine: - description: Specifies the name of the database engine for this DB cluster snapshot. - returned: always - type: str - sample: "aurora" -engine_mode: - description: Provides the engine mode of the database engine for this DB cluster snapshot. - returned: always - type: str - sample: "5.6.mysql_aurora.1.22.5" -allocated_storage: - description: Specifies the allocated storage size in gibibytes (GiB). - returned: always - type: int - sample: 20 -status: - description: Specifies the status of this DB cluster snapshot. - returned: always - type: str - sample: available -port: - description: Port on which the database is listening. - returned: always - type: int - sample: 3306 -vpc_id: - description: ID of the VPC in which the DB lives. - returned: always - type: str - sample: vpc-09ff232e222710ae0 -cluster_create_time: - description: Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC). - returned: always - type: str - sample: '2019-06-15T10:15:56.221000+00:00' -master_username: - description: Provides the master username for this DB cluster snapshot. - returned: always - type: str - sample: test -engine_version: - description: Version of the cluster from which the snapshot was created. - returned: always - type: str - sample: "5.6.mysql_aurora.1.22.5" -license_model: - description: Provides the license model information for this DB cluster snapshot. - returned: always - type: str - sample: general-public-license -snapshot_type: - description: How the snapshot was created (always manual for this module!). - returned: always - type: str - sample: manual -percent_progress: - description: Specifies the percentage of the estimated data that has been transferred. - returned: always - type: int - sample: 100 -storage_encrypted: - description: Specifies whether the DB cluster snapshot is encrypted. - returned: always - type: bool - sample: false -kms_key_id: - description: The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. - returned: always - type: str -db_cluster_snapshot_arn: - description: Amazon Resource Name for the snapshot. - returned: always - type: str - sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot -source_db_cluster_snapshot_arn: - description: If the DB cluster snapshot was copied from a source DB cluster snapshot, the ARN for the source DB cluster snapshot, otherwise, null. - returned: always - type: str - sample: null -iam_database_authentication_enabled: - description: Whether IAM database authentication is enabled. - returned: always - type: bool - sample: false -tag_list: - description: A list of tags. - returned: always - type: list - sample: [] -tags: - description: Tags applied to the snapshot. - returned: always - type: complex - contains: {} -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute -from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params - - -def get_snapshot(snapshot_id): - try: - snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)["DBClusterSnapshots"][0] - snapshot["Tags"] = get_tags(client, module, snapshot["DBClusterSnapshotArn"]) - except is_boto3_error_code("DBClusterSnapshotNotFound"): - return {} - except is_boto3_error_code("DBClusterSnapshotNotFoundFault"): # pylint: disable=duplicate-except - return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) - return snapshot - - -def get_parameters(parameters, method_name): - if method_name == 'copy_db_cluster_snapshot': - parameters['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier'] - - required_options = get_boto3_client_method_parameters(client, method_name, required=True) - if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) - options = get_boto3_client_method_parameters(client, method_name) - parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) - - return parameters - - -def ensure_snapshot_absent(): - snapshot_name = module.params.get("db_cluster_snapshot_identifier") - params = {"DBClusterSnapshotIdentifier": snapshot_name} - changed = False - - snapshot = get_snapshot(snapshot_name) - if not snapshot: - module.exit_json(changed=changed) - elif snapshot and snapshot["Status"] != "deleting": - snapshot, changed = call_method(client, module, "delete_db_cluster_snapshot", params) - - module.exit_json(changed=changed) - - -def copy_snapshot(params): - changed = False - snapshot_id = module.params.get('db_cluster_snapshot_identifier') - snapshot = get_snapshot(snapshot_id) - - if not snapshot: - method_params = get_parameters(params, 'copy_db_cluster_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - result, changed = call_method(client, module, 'copy_db_cluster_snapshot', method_params) - - return changed - - -def ensure_snapshot_present(params): - source_id = module.params.get('source_db_cluster_snapshot_identifier') - snapshot_name = module.params.get("db_cluster_snapshot_identifier") - changed = False - - snapshot = get_snapshot(snapshot_name) - - # Copy snapshot - if source_id: - changed |= copy_snapshot(params) - - # Create snapshot - elif not snapshot: - changed |= create_snapshot(params) - - # Snapshot exists and we're not creating a copy - modify exising snapshot - else: - changed |= modify_snapshot() - - snapshot = get_snapshot(snapshot_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) - - -def create_snapshot(params): - method_params = get_parameters(params, 'create_db_cluster_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - snapshot, changed = call_method(client, module, 'create_db_cluster_snapshot', method_params) - - return changed - - -def modify_snapshot(): - # TODO - add other modifications aside from purely tags - changed = False - snapshot_id = module.params.get('db_cluster_snapshot_identifier') - snapshot = get_snapshot(snapshot_id) - - if module.params.get('tags'): - changed |= ensure_tags(client, module, snapshot['DBClusterSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) - - return changed - - -def main(): - global client - global module - - argument_spec = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - db_cluster_snapshot_identifier=dict(type='str', aliases=['id', 'snapshot_id', 'snapshot_name'], required=True), - db_cluster_identifier=dict(type='str', aliases=['cluster_id', 'cluster_name']), - source_db_cluster_snapshot_identifier=dict(type='str', aliases=['source_id', 'source_snapshot_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_tags=dict(type='bool', default=False), - source_region=dict(type='str'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - retry_decorator = AWSRetry.jittered_backoff(retries=10) - try: - client = module.client('rds', retry_decorator=retry_decorator) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS.") - - state = module.params.get("state") - - if state == "absent": - ensure_snapshot_absent() - elif state == "present": - params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) - ensure_snapshot_present(params) - - -if __name__ == '__main__': - main() From 0dce9b1201e5f8785a4a1d9c0958cde90a0870ff Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 22 Sep 2022 17:23:05 +0200 Subject: [PATCH 583/683] Migrate cloudwatch_metric_alarm* modules and tests (#1483) * Remove modules * Update runtime * Add changelog fragment --- cloudwatch_metric_alarm.py | 352 ------------------------------------- 1 file changed, 352 deletions(-) delete mode 100644 cloudwatch_metric_alarm.py diff --git a/cloudwatch_metric_alarm.py b/cloudwatch_metric_alarm.py deleted file mode 100644 index dbe6bf43f94..00000000000 --- a/cloudwatch_metric_alarm.py +++ /dev/null @@ -1,352 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: cloudwatch_metric_alarm -short_description: "Create/update or delete AWS CloudWatch 'metric alarms'" -version_added: 1.0.0 -description: - - Can create or delete AWS CloudWatch metric alarms. - - Metrics you wish to alarm on must already exist. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_metric_alarm). - The usage did not change. -author: - - "Zacharie Eakin (@Zeekin)" -options: - state: - description: - - Register or deregister the alarm. - choices: ['present', 'absent'] - default: 'present' - type: str - name: - description: - - Unique name for the alarm. - required: true - type: str - metric: - description: - - Name of the monitored metric (e.g. C(CPUUtilization)). - - Metric must already exist. - required: false - type: str - namespace: - description: - - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in CloudWatch. - required: false - type: str - statistic: - description: - - Operation applied to the metric. - - Works in conjunction with I(period) and I(evaluation_periods) to determine the comparison value. - required: false - choices: ['SampleCount','Average','Sum','Minimum','Maximum'] - type: str - comparison: - description: - - Determines how the threshold value is compared - required: false - type: str - choices: - - 'GreaterThanOrEqualToThreshold' - - 'GreaterThanThreshold' - - 'LessThanThreshold' - - 'LessThanOrEqualToThreshold' - threshold: - description: - - Sets the min/max bound for triggering the alarm. - required: false - type: float - period: - description: - - The time (in seconds) between metric evaluations. - required: false - type: int - evaluation_periods: - description: - - The number of times in which the metric is evaluated before final calculation. - required: false - type: int - unit: - description: - - The threshold's unit of measurement. - required: false - type: str - choices: - - 'Seconds' - - 'Microseconds' - - 'Milliseconds' - - 'Bytes' - - 'Kilobytes' - - 'Megabytes' - - 'Gigabytes' - - 'Terabytes' - - 'Bits' - - 'Kilobits' - - 'Megabits' - - 'Gigabits' - - 'Terabits' - - 'Percent' - - 'Count' - - 'Bytes/Second' - - 'Kilobytes/Second' - - 'Megabytes/Second' - - 'Gigabytes/Second' - - 'Terabytes/Second' - - 'Bits/Second' - - 'Kilobits/Second' - - 'Megabits/Second' - - 'Gigabits/Second' - - 'Terabits/Second' - - 'Count/Second' - - 'None' - description: - description: - - A longer description of the alarm. - required: false - type: str - dimensions: - description: - - A dictionary describing which metric the alarm is applied to. - - 'For more information see the AWS documentation:' - - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension) - required: false - type: dict - alarm_actions: - description: - - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s). - required: false - type: list - elements: str - insufficient_data_actions: - description: - - A list of the names of action(s) to take when the alarm is in the C(insufficient_data) status. - required: false - type: list - elements: str - ok_actions: - description: - - A list of the names of action(s) to take when the alarm is in the C(ok) status, denoted as Amazon Resource Name(s). - required: false - type: list - elements: str - treat_missing_data: - description: - - Sets how the alarm handles missing data points. - required: false - type: str - choices: - - 'breaching' - - 'notBreaching' - - 'ignore' - - 'missing' - default: 'missing' -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = r''' - - name: create alarm - community.aws.cloudwatch_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metric: "CPUUtilization" - namespace: "AWS/EC2" - statistic: Average - comparison: "LessThanOrEqualToThreshold" - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: "Percent" - description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" - dimensions: {'InstanceId':'i-XXX'} - alarm_actions: ["action1","action2"] - - - name: Create an alarm to recover a failed instance - community.aws.cloudwatch_metric_alarm: - state: present - region: us-west-1 - name: "recover-instance" - metric: "StatusCheckFailed_System" - namespace: "AWS/EC2" - statistic: "Minimum" - comparison: "GreaterThanOrEqualToThreshold" - threshold: 1.0 - period: 60 - evaluation_periods: 2 - unit: "Count" - description: "This will recover an instance when it fails" - dimensions: {"InstanceId":'i-XXX'} - alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] -''' - -try: - from botocore.exceptions import ClientError -except ImportError: - pass # protected by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - - -def create_metric_alarm(connection, module, params): - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) - - if not isinstance(params['Dimensions'], list): - fixed_dimensions = [] - for key, value in params['Dimensions'].items(): - fixed_dimensions.append({'Name': key, 'Value': value}) - params['Dimensions'] = fixed_dimensions - - if not alarms['MetricAlarms']: - try: - if not module.check_mode: - connection.put_metric_alarm(**params) - changed = True - except ClientError as e: - module.fail_json_aws(e) - - else: - changed = False - alarm = alarms['MetricAlarms'][0] - - # Workaround for alarms created before TreatMissingData was introduced - if 'TreatMissingData' not in alarm.keys(): - alarm['TreatMissingData'] = 'missing' - - for key in ['ActionsEnabled', 'StateValue', 'StateReason', - 'StateReasonData', 'StateUpdatedTimestamp', - 'AlarmArn', 'AlarmConfigurationUpdatedTimestamp']: - alarm.pop(key, None) - if alarm != params: - changed = True - alarm = params - - try: - if changed: - if not module.check_mode: - connection.put_metric_alarm(**alarm) - except ClientError as e: - module.fail_json_aws(e) - - try: - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) - except ClientError as e: - module.fail_json_aws(e) - - result = {} - if alarms['MetricAlarms']: - result = alarms['MetricAlarms'][0] - - module.exit_json(changed=changed, - name=result.get('AlarmName'), - actions_enabled=result.get('ActionsEnabled'), - alarm_actions=result.get('AlarmActions'), - alarm_arn=result.get('AlarmArn'), - comparison=result.get('ComparisonOperator'), - description=result.get('AlarmDescription'), - dimensions=result.get('Dimensions'), - evaluation_periods=result.get('EvaluationPeriods'), - insufficient_data_actions=result.get('InsufficientDataActions'), - last_updated=result.get('AlarmConfigurationUpdatedTimestamp'), - metric=result.get('MetricName'), - namespace=result.get('Namespace'), - ok_actions=result.get('OKActions'), - period=result.get('Period'), - state_reason=result.get('StateReason'), - state_value=result.get('StateValue'), - statistic=result.get('Statistic'), - threshold=result.get('Threshold'), - treat_missing_data=result.get('TreatMissingData'), - unit=result.get('Unit')) - - -def delete_metric_alarm(connection, module, params): - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) - - if alarms['MetricAlarms']: - try: - if not module.check_mode: - connection.delete_alarms(AlarmNames=[params['AlarmName']]) - module.exit_json(changed=True) - except (ClientError) as e: - module.fail_json_aws(e) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - metric=dict(type='str'), - namespace=dict(type='str'), - statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), - comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold', - 'GreaterThanOrEqualToThreshold']), - threshold=dict(type='float'), - period=dict(type='int'), - unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', - 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', - 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', - 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', - 'Terabits/Second', 'Count/Second', 'None']), - evaluation_periods=dict(type='int'), - description=dict(type='str'), - dimensions=dict(type='dict', default={}), - alarm_actions=dict(type='list', default=[], elements='str'), - insufficient_data_actions=dict(type='list', default=[], elements='str'), - ok_actions=dict(type='list', default=[], elements='str'), - treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'), - state=dict(default='present', choices=['present', 'absent']), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - state = module.params.get('state') - - params = dict() - params['AlarmName'] = module.params.get('name') - params['MetricName'] = module.params.get('metric') - params['Namespace'] = module.params.get('namespace') - params['Statistic'] = module.params.get('statistic') - params['ComparisonOperator'] = module.params.get('comparison') - params['Threshold'] = module.params.get('threshold') - params['Period'] = module.params.get('period') - params['EvaluationPeriods'] = module.params.get('evaluation_periods') - if module.params.get('unit'): - params['Unit'] = module.params.get('unit') - params['AlarmDescription'] = module.params.get('description') - params['Dimensions'] = module.params.get('dimensions') - params['AlarmActions'] = module.params.get('alarm_actions', []) - params['InsufficientDataActions'] = module.params.get('insufficient_data_actions', []) - params['OKActions'] = module.params.get('ok_actions', []) - params['TreatMissingData'] = module.params.get('treat_missing_data') - - connection = module.client('cloudwatch') - - if state == 'present': - create_metric_alarm(connection, module, params) - elif state == 'absent': - delete_metric_alarm(connection, module, params) - - -if __name__ == '__main__': - main() From 97108ae4743fff7fd2c1f2307bd383fe78abb56f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Sep 2022 09:42:48 +0200 Subject: [PATCH 584/683] Migrate rds_instance* modules and tests (#1452) Migrate rds_instance* modules and tests Depends-On: ansible-collections/amazon.aws#1011 Depends-On: #1480 Remove rds_instance* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mike Graves Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- rds_instance.py | 1426 -------------------------------------- rds_instance_info.py | 421 ----------- rds_instance_snapshot.py | 380 ---------- 3 files changed, 2227 deletions(-) delete mode 100644 rds_instance.py delete mode 100644 rds_instance_info.py delete mode 100644 rds_instance_snapshot.py diff --git a/rds_instance.py b/rds_instance.py deleted file mode 100644 index 5996ec2b2cf..00000000000 --- a/rds_instance.py +++ /dev/null @@ -1,1426 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds_instance -version_added: 1.0.0 -short_description: Manage RDS instances -description: - - Create, modify, and delete RDS instances. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -author: - - Sloane Hertel (@s-hertel) - -options: - # General module options - state: - description: - - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state - and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state, - (running if creating the DB instance). - - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance - is not idempotent. - choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'] - default: 'present' - type: str - creation_source: - description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot). - choices: ['snapshot', 's3', 'instance'] - type: str - force_update_password: - description: - - Set to C(True) to update your instance password with I(master_user_password). Since comparing passwords to determine - if it needs to be updated is not possible this is set to False by default to allow idempotence. - type: bool - default: False - purge_cloudwatch_logs_exports: - description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. - type: bool - default: True - read_replica: - description: - - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should - be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. - type: bool - wait: - description: - - Whether to wait for the instance to be available, stopped, or deleted. At a later time a I(wait_timeout) option may be added. - Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches - the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the - instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). - If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications. - type: bool - default: True - - # Options that have a corresponding boto3 parameter - allocated_storage: - description: - - The amount of storage (in gibibytes) to allocate for the DB instance. - type: int - allow_major_version_upgrade: - description: - - Whether to allow major version upgrades. - type: bool - apply_immediately: - description: - - A value that specifies whether modifying an instance with I(new_db_instance_identifier) and I(master_user_password) - should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes - are applied during the next maintenance window. - type: bool - default: False - auto_minor_version_upgrade: - description: - - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window. - type: bool - availability_zone: - description: - - A list of EC2 Availability Zones that the DB instance can be created in. - May be used when creating an instance or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az). - aliases: - - az - - zone - type: str - backup_retention_period: - description: - - The number of days for which automated backups are retained. - - When set to C(0), automated backups will be disabled. (Not applicable if the DB instance is a source to read replicas) - - May be used when creating a new instance, when restoring from S3, or when modifying an instance. - type: int - ca_certificate_identifier: - description: - - The identifier of the CA certificate for the DB instance. - type: str - character_set_name: - description: - - The character set to associate with the DB instance. - type: str - copy_tags_to_snapshot: - description: - - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating - a DB instance the RDS API defaults this to false if unspecified. - type: bool - db_cluster_identifier: - description: - - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to - 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or - contain consecutive hyphens. - aliases: - - cluster_id - type: str - db_instance_class: - description: - - The compute and memory capacity of the DB instance, for example db.t2.micro. - aliases: - - class - - instance_type - type: str - db_instance_identifier: - description: - - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or - hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. - aliases: - - instance_id - - id - required: True - type: str - db_name: - description: - - The name for your database. If a name is not provided Amazon RDS will not create a database. - type: str - db_parameter_group_name: - description: - - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this - argument is omitted the default DBParameterGroup for the specified engine is used. - type: str - db_security_groups: - description: - - (EC2-Classic platform) A list of DB security groups to associate with this DB instance. - type: list - elements: str - db_snapshot_identifier: - description: - - The identifier or ARN of the DB snapshot to restore from when using I(creation_source=snapshot). - type: str - aliases: - - snapshot_identifier - - snapshot_id - db_subnet_group_name: - description: - - The DB subnet group name to use for the DB instance. - aliases: - - subnet_group - type: str - deletion_protection: - description: - - A value that indicates whether the DB instance has deletion protection enabled. - The database can't be deleted when deletion protection is enabled. - By default, deletion protection is disabled. - type: bool - version_added: 3.3.0 - domain: - description: - - The Active Directory Domain to restore the instance in. - type: str - domain_iam_role_name: - description: - - The name of the IAM role to be used when making API calls to the Directory Service. - type: str - enable_cloudwatch_logs_exports: - description: - - A list of log types that need to be enabled for exporting to CloudWatch Logs. - aliases: - - cloudwatch_log_exports - type: list - elements: str - enable_iam_database_authentication: - description: - - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. - If this option is omitted when creating the instance, Amazon RDS sets this to False. - type: bool - enable_performance_insights: - description: - - Whether to enable Performance Insights for the DB instance. - type: bool - engine: - description: - - The name of the database engine to be used for this DB instance. This is required to create an instance. - choices: ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', - 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] - type: str - engine_version: - description: - - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12. - Aurora PostgreSQL example, 9.6.3 - type: str - final_db_snapshot_identifier: - description: - - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false. - aliases: - - final_snapshot_identifier - type: str - force_failover: - description: - - Set to true to conduct the reboot through a MultiAZ failover. - type: bool - iam_roles: - description: - - List of Amazon Web Services Identity and Access Management (IAM) roles to associate with DB instance. - type: list - elements: dict - suboptions: - feature_name: - description: - - The name of the feature associated with the IAM role. - type: str - required: true - role_arn: - description: - - The ARN of the IAM role to associate with the DB instance. - type: str - required: true - version_added: 3.3.0 - iops: - description: - - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1. - type: int - kms_key_id: - description: - - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the - same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key - alias instead of the ARN for the KM encryption key. - - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used. - type: str - license_model: - description: - - The license model for the DB instance. - - Several options are license-included, bring-your-own-license, and general-public-license. - - This option can also be omitted to default to an accepted value. - type: str - master_user_password: - description: - - An 8-41 character password for the master database user. The password can contain any printable ASCII character - except "/", """, or "@". To modify the password use I(force_update_password). Use I(apply immediately) to change - the password immediately, otherwise it is updated during the next maintenance window. - aliases: - - password - type: str - master_username: - description: - - The name of the master user for the DB instance. Must be 1-16 letters or numbers and begin with a letter. - aliases: - - username - type: str - max_allocated_storage: - description: - - The upper limit to which Amazon RDS can automatically scale the storage of the DB instance. - type: int - monitoring_interval: - description: - - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting - metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. - type: int - monitoring_role_arn: - description: - - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. - type: str - multi_az: - description: - - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone). - type: bool - new_db_instance_identifier: - description: - - The new DB instance (lowercase) identifier for the DB instance when renaming a DB instance. The identifier must contain - from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or - contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the - next maintenance window. - aliases: - - new_instance_id - - new_id - type: str - option_group_name: - description: - - The option group to associate with the DB instance. - type: str - performance_insights_kms_key_id: - description: - - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data. - type: str - performance_insights_retention_period: - description: - - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731. - type: int - port: - description: - - The port number on which the instances accept connections. - type: int - preferred_backup_window: - description: - - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are - enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with - I(preferred_maintenance_window). - aliases: - - backup_window - type: str - preferred_maintenance_window: - description: - - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must - be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. - aliases: - - maintenance_window - type: str - processor_features: - description: - - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the - DB instance class of the DB instance. Names are threadsPerCore and coreCount. - Set this option to an empty dictionary to use the default processor features. - suboptions: - threadsPerCore: - description: The number of threads per core - coreCount: - description: The number of CPU cores - type: dict - promotion_tier: - description: - - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of - the existing primary instance. - type: str - publicly_accessible: - description: - - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with - a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal - instance with a DNS name that resolves to a private IP address. - type: bool - purge_iam_roles: - description: - - Set to C(True) to remove any IAM roles that aren't specified in the task and are associated with the instance. - type: bool - default: False - version_added: 3.3.0 - restore_time: - description: - - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. - For example, "2009-09-07T23:45:00Z". - - May alternatively set I(use_latest_restore_time=True). - - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. - type: str - s3_bucket_name: - description: - - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance. - type: str - s3_ingestion_role_arn: - description: - - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access - the Amazon S3 bucket on your behalf. - type: str - s3_prefix: - description: - - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not - specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket. - type: str - skip_final_snapshot: - description: - - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is false I(final_db_snapshot_identifier) - must be provided. - type: bool - default: false - source_db_instance_identifier: - description: - - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time - DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN. - type: str - source_engine: - description: - - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. - choices: - - mysql - type: str - source_engine_version: - description: - - The version of the database that the backup files were created from. - type: str - source_region: - description: - - The region of the DB instance from which the replica is created. - type: str - storage_encrypted: - description: - - Whether the DB instance is encrypted. - type: bool - storage_type: - description: - - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances. - choices: - - standard - - gp2 - - io1 - type: str - tde_credential_arn: - description: - - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is - supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted) - though it might slightly affect the performance of your database. - aliases: - - transparent_data_encryption_arn - type: str - tde_credential_password: - description: - - The password for the given ARN from the key store in order to access the device. - aliases: - - transparent_data_encryption_password - type: str - timezone: - description: - - The time zone of the DB instance. - type: str - use_latest_restorable_time: - description: - - Whether to restore the DB instance to the latest restorable backup time. - - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. - type: bool - aliases: - - restore_from_latest - vpc_security_group_ids: - description: - - A list of EC2 VPC security groups to associate with the DB instance. - type: list - elements: str - purge_security_groups: - description: - - Set to False to retain any enabled security groups that aren't specified in the task and are associated with the instance. - - Can be applied to I(vpc_security_group_ids) and I(db_security_groups) - type: bool - default: True - version_added: 1.5.0 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. -- name: create minimal aurora instance in default VPC and default subnet group - community.aws.rds_instance: - engine: aurora - db_instance_identifier: ansible-test-aurora-db-instance - instance_type: db.t2.small - password: "{{ password }}" - username: "{{ username }}" - cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it - -- name: Create a DB instance using the default AWS KMS encryption key - community.aws.rds_instance: - id: test-encrypted-db - state: present - engine: mariadb - storage_encrypted: True - db_instance_class: db.t2.medium - username: "{{ username }}" - password: "{{ password }}" - allocated_storage: "{{ allocated_storage }}" - -- name: remove the DB instance without a final snapshot - community.aws.rds_instance: - id: "{{ instance_id }}" - state: absent - skip_final_snapshot: True - -- name: remove the DB instance with a final snapshot - community.aws.rds_instance: - id: "{{ instance_id }}" - state: absent - final_snapshot_identifier: "{{ snapshot_id }}" - -- name: Add a new security group without purge - community.aws.rds_instance: - id: "{{ instance_id }}" - state: present - vpc_security_group_ids: - - sg-0be17ba10c9286b0b - purge_security_groups: false - register: result - -# Add IAM role to db instance -- name: Create IAM policy - community.aws.iam_managed_policy: - policy_name: "my-policy" - policy: "{{ lookup('file','files/policy.json') }}" - state: present - register: iam_policy - -- name: Create IAM role - community.aws.iam_role: - assume_role_policy_document: "{{ lookup('file','files/assume_policy.json') }}" - name: "my-role" - state: present - managed_policy: "{{ iam_policy.policy.arn }}" - register: iam_role - -- name: Create DB instance with added IAM role - community.aws.rds_instance: - id: "my-instance-id" - state: present - engine: postgres - engine_version: 14.2 - username: "{{ username }}" - password: "{{ password }}" - db_instance_class: db.m6g.large - allocated_storage: "{{ allocated_storage }}" - iam_roles: - - role_arn: "{{ iam_role.arn }}" - feature_name: 's3Export' - -- name: Remove IAM role from DB instance - community.aws.rds_instance: - id: "my-instance-id" - state: present - purge_iam_roles: true - -# Restore DB instance from snapshot -- name: Create a snapshot and wait until completion - community.aws.rds_instance_snapshot: - instance_id: 'my-instance-id' - snapshot_id: 'my-new-snapshot' - state: present - wait: true - register: snapshot - -- name: Restore DB from snapshot - community.aws.rds_instance: - id: 'my-restored-db' - creation_source: snapshot - snapshot_identifier: 'my-new-snapshot' - engine: mariadb - state: present - register: restored_db -''' - -RETURN = r''' -allocated_storage: - description: The allocated storage size in gigabytes. This is always 1 for aurora database engines. - returned: always - type: int - sample: 20 -associated_roles: - description: The list of currently associated roles. - returned: always - type: list - sample: [] -auto_minor_version_upgrade: - description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. - returned: always - type: bool - sample: true -availability_zone: - description: The availability zone for the DB instance. - returned: always - type: str - sample: us-east-1f -backup_retention_period: - description: The number of days for which automated backups are retained. - returned: always - type: int - sample: 1 -ca_certificate_identifier: - description: The identifier of the CA certificate for the DB instance. - returned: always - type: str - sample: rds-ca-2015 -copy_tags_to_snapshot: - description: Whether tags are copied from the DB instance to snapshots of the DB instance. - returned: always - type: bool - sample: false -db_instance_arn: - description: The Amazon Resource Name (ARN) for the DB instance. - returned: always - type: str - sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test -db_instance_class: - description: The name of the compute and memory capacity class of the DB instance. - returned: always - type: str - sample: db.m4.large -db_instance_identifier: - description: The identifier of the DB instance - returned: always - type: str - sample: ansible-test -db_instance_port: - description: The port that the DB instance listens on. - returned: always - type: int - sample: 0 -db_instance_status: - description: The current state of this database. - returned: always - type: str - sample: stopped -db_parameter_groups: - description: The list of DB parameter groups applied to this DB instance. - returned: always - type: complex - contains: - db_parameter_group_name: - description: The name of the DP parameter group. - returned: always - type: str - sample: default.mariadb10.0 - parameter_apply_status: - description: The status of parameter updates. - returned: always - type: str - sample: in-sync -db_security_groups: - description: A list of DB security groups associated with this DB instance. - returned: always - type: list - sample: [] -db_subnet_group: - description: The subnet group associated with the DB instance. - returned: always - type: complex - contains: - db_subnet_group_description: - description: The description of the DB subnet group. - returned: always - type: str - sample: default - db_subnet_group_name: - description: The name of the DB subnet group. - returned: always - type: str - sample: default - subnet_group_status: - description: The status of the DB subnet group. - returned: always - type: str - sample: Complete - subnets: - description: A list of Subnet elements. - returned: always - type: complex - contains: - subnet_availability_zone: - description: The availability zone of the subnet. - returned: always - type: complex - contains: - name: - description: The name of the Availability Zone. - returned: always - type: str - sample: us-east-1c - subnet_identifier: - description: The ID of the subnet. - returned: always - type: str - sample: subnet-12345678 - subnet_status: - description: The status of the subnet. - returned: always - type: str - sample: Active - vpc_id: - description: The VpcId of the DB subnet group. - returned: always - type: str - sample: vpc-12345678 -dbi_resource_id: - description: The AWS Region-unique, immutable identifier for the DB instance. - returned: always - type: str - sample: db-UHV3QRNWX4KB6GALCIGRML6QFA -deletion_protection: - description: C(True) if the DB instance has deletion protection enabled, C(False) if not. - returned: always - type: bool - sample: False - version_added: 3.3.0 -domain_memberships: - description: The Active Directory Domain membership records associated with the DB instance. - returned: always - type: list - sample: [] -endpoint: - description: The connection endpoint. - returned: always - type: complex - contains: - address: - description: The DNS address of the DB instance. - returned: always - type: str - sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com - hosted_zone_id: - description: The ID that Amazon Route 53 assigns when you create a hosted zone. - returned: always - type: str - sample: ZTR2ITUGPA61AM - port: - description: The port that the database engine is listening on. - returned: always - type: int - sample: 3306 -engine: - description: The database engine version. - returned: always - type: str - sample: mariadb -engine_version: - description: The database engine version. - returned: always - type: str - sample: 10.0.35 -iam_database_authentication_enabled: - description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. - returned: always - type: bool - sample: false -instance_create_time: - description: The date and time the DB instance was created. - returned: always - type: str - sample: '2018-07-04T16:48:35.332000+00:00' -kms_key_id: - description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true. - returned: When storage_encrypted is true - type: str - sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33 -latest_restorable_time: - description: The latest time to which a database can be restored with point-in-time restore. - returned: always - type: str - sample: '2018-07-04T16:50:50.642000+00:00' -license_model: - description: The License model information for this DB instance. - returned: always - type: str - sample: general-public-license -master_username: - description: The master username for the DB instance. - returned: always - type: str - sample: test -max_allocated_storage: - description: The upper limit to which Amazon RDS can automatically scale the storage of the DB instance. - returned: When max allocated storage is present. - type: int - sample: 100 -monitoring_interval: - description: - - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. - 0 means collecting Enhanced Monitoring metrics is disabled. - returned: always - type: int - sample: 0 -multi_az: - description: Whether the DB instance is a Multi-AZ deployment. - returned: always - type: bool - sample: false -option_group_memberships: - description: The list of option group memberships for this DB instance. - returned: always - type: complex - contains: - option_group_name: - description: The name of the option group that the instance belongs to. - returned: always - type: str - sample: default:mariadb-10-0 - status: - description: The status of the DB instance's option group membership. - returned: always - type: str - sample: in-sync -pending_modified_values: - description: The changes to the DB instance that are pending. - returned: always - type: complex - contains: {} -performance_insights_enabled: - description: True if Performance Insights is enabled for the DB instance, and otherwise false. - returned: always - type: bool - sample: false -preferred_backup_window: - description: The daily time range during which automated backups are created if automated backups are enabled. - returned: always - type: str - sample: 07:01-07:31 -preferred_maintenance_window: - description: The weekly time range (in UTC) during which system maintenance can occur. - returned: always - type: str - sample: sun:09:31-sun:10:01 -publicly_accessible: - description: - - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an - internal instance with a DNS name that resolves to a private IP address. - returned: always - type: bool - sample: true -read_replica_db_instance_identifiers: - description: Identifiers of the Read Replicas associated with this DB instance. - returned: always - type: list - sample: [] -storage_encrypted: - description: Whether the DB instance is encrypted. - returned: always - type: bool - sample: false -storage_type: - description: The storage type to be associated with the DB instance. - returned: always - type: str - sample: standard -tags: - description: A dictionary of tags associated with the DB instance. - returned: always - type: complex - contains: {} -vpc_security_groups: - description: A list of VPC security group elements that the DB instance belongs to. - returned: always - type: complex - contains: - status: - description: The status of the VPC security group. - returned: always - type: str - sample: active - vpc_security_group_id: - description: The name of the VPC security group. - returned: always - type: str - sample: sg-12345678 -''' - -from time import sleep - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - - -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.six import string_types - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params -from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method -from ansible_collections.amazon.aws.plugins.module_utils.rds import compare_iam_roles -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_identifier -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import update_iam_roles - - -valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', - 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] - -valid_engines_iam_roles = ['aurora-postgresql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb', - 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] - - -def get_rds_method_attribute_name(instance, state, creation_source, read_replica): - method_name = None - if state == 'absent' or state == 'terminated': - if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']: - method_name = 'delete_db_instance' - else: - if instance: - method_name = 'modify_db_instance' - elif read_replica is True: - method_name = 'create_db_instance_read_replica' - elif creation_source == 'snapshot': - method_name = 'restore_db_instance_from_db_snapshot' - elif creation_source == 's3': - method_name = 'restore_db_instance_from_s3' - elif creation_source == 'instance': - method_name = 'restore_db_instance_to_point_in_time' - else: - method_name = 'create_db_instance' - return method_name - - -def get_instance(client, module, db_instance_id): - try: - for i in range(3): - try: - instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] - instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) - if instance.get('ProcessorFeatures'): - instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - instance['PendingModifiedValues']['ProcessorFeatures'] = dict( - (feature['Name'], feature['Value']) - for feature in instance['PendingModifiedValues']['ProcessorFeatures'] - ) - break - except is_boto3_error_code('DBInstanceNotFound'): - sleep(3) - else: - instance = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe DB instances') - return instance - - -def get_final_snapshot(client, module, snapshot_identifier): - try: - snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) - if len(snapshots.get('DBSnapshots', [])) == 1: - return snapshots['DBSnapshots'][0] - return {} - except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') - - -def get_parameters(client, module, parameters, method_name): - if method_name == 'restore_db_instance_to_point_in_time': - parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] - - required_options = get_boto3_client_method_parameters(client, method_name, required=True) - if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) - options = get_boto3_client_method_parameters(client, method_name) - parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) - - if parameters.get('ProcessorFeatures') is not None: - parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()] - - # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures) - if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': - parameters.pop('ProcessorFeatures') - - if method_name in ['create_db_instance', 'create_db_instance_read_replica', 'restore_db_instance_from_db_snapshot']: - if parameters.get('Tags'): - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) - - if method_name == 'modify_db_instance': - parameters = get_options_with_changing_values(client, module, parameters) - - return parameters - - -def get_options_with_changing_values(client, module, parameters): - instance_id = module.params['db_instance_identifier'] - purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports'] - force_update_password = module.params['force_update_password'] - port = module.params['port'] - apply_immediately = parameters.pop('ApplyImmediately', None) - cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] - purge_security_groups = module.params['purge_security_groups'] - - if port: - parameters['DBPortNumber'] = port - if not force_update_password: - parameters.pop('MasterUserPassword', None) - if cloudwatch_logs_enabled: - parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled - if not module.params['storage_type']: - parameters.pop('Iops', None) - - instance = get_instance(client, module, instance_id) - updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs, purge_security_groups) - updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) - parameters = updated_parameters - - if instance.get('StorageType') == 'io1': - # Bundle Iops and AllocatedStorage while updating io1 RDS Instance - current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) - current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) - new_iops = module.params.get('iops') - new_allocated_storage = module.params.get('allocated_storage') - - if current_iops != new_iops or current_allocated_storage != new_allocated_storage: - parameters['AllocatedStorage'] = new_allocated_storage - parameters['Iops'] = new_iops - - if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): - if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: - parameters.pop('NewDBInstanceIdentifier') - - if parameters: - parameters['DBInstanceIdentifier'] = instance_id - if apply_immediately is not None: - parameters['ApplyImmediately'] = apply_immediately - - return parameters - - -def get_current_attributes_with_inconsistent_keys(instance): - options = {} - if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []): - current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable'] - current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable'] - options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled} - else: - options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []} - if instance.get('PendingModifiedValues', {}).get('Port'): - options['DBPortNumber'] = instance['PendingModifiedValues']['Port'] - else: - options['DBPortNumber'] = instance['Endpoint']['Port'] - if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'): - options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName'] - else: - options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName'] - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures'] - else: - options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {}) - options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']] - options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] - options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] - options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] - options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] - # PerformanceInsightsEnabled is not returned on older RDS instances it seems - options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False) - options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] - - # Neither of these are returned via describe_db_instances, so if either is specified during a check_mode run, changed=True - options['AllowMajorVersionUpgrade'] = None - options['MasterUserPassword'] = None - - return options - - -def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups): - changing_params = {} - current_options = get_current_attributes_with_inconsistent_keys(instance) - for option in current_options: - current_option = current_options[option] - desired_option = modify_params.pop(option, None) - if desired_option is None: - continue - - # TODO: allow other purge_option module parameters rather than just checking for things to add - if isinstance(current_option, list): - if isinstance(desired_option, list): - if ( - set(desired_option) < set(current_option) and - option in ('DBSecurityGroups', 'VpcSecurityGroupIds',) and purge_security_groups - ): - changing_params[option] = desired_option - elif set(desired_option) <= set(current_option): - continue - elif isinstance(desired_option, string_types): - if desired_option in current_option: - continue - - # Current option and desired option are the same - continue loop - if option != 'ProcessorFeatures' and current_option == desired_option: - continue - - if option == 'ProcessorFeatures' and current_option == boto3_tag_list_to_ansible_dict(desired_option, 'Name', 'Value'): - continue - - # Current option and desired option are different - add to changing_params list - if option == 'ProcessorFeatures' and desired_option == []: - changing_params['UseDefaultProcessorFeatures'] = True - elif option == 'CloudwatchLogsExportConfiguration': - current_option = set(current_option.get('LogTypesToEnable', [])) - desired_option = set(desired_option) - format_option = {'EnableLogTypes': [], 'DisableLogTypes': []} - format_option['EnableLogTypes'] = list(desired_option.difference(current_option)) - if purge_cloudwatch_logs: - format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) - if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: - changing_params[option] = format_option - elif option in ('DBSecurityGroups', 'VpcSecurityGroupIds',): - if purge_security_groups: - changing_params[option] = desired_option - else: - changing_params[option] = list(set(current_option) | set(desired_option)) - else: - changing_params[option] = desired_option - - return changing_params - - -def get_changing_options_with_consistent_keys(modify_params, instance): - changing_params = {} - - for param in modify_params: - current_option = instance.get('PendingModifiedValues', {}).get(param, None) - if current_option is None: - current_option = instance.get(param, None) - if modify_params[param] != current_option: - changing_params[param] = modify_params[param] - - return changing_params - - -def validate_options(client, module, instance): - state = module.params['state'] - skip_final_snapshot = module.params['skip_final_snapshot'] - snapshot_id = module.params['final_db_snapshot_identifier'] - modified_id = module.params['new_db_instance_identifier'] - engine = module.params['engine'] - tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn']) - read_replica = module.params['read_replica'] - creation_source = module.params['creation_source'] - source_instance = module.params['source_db_instance_identifier'] - if module.params['source_region'] is not None: - same_region = bool(module.params['source_region'] == module.params['region']) - else: - same_region = True - - if modified_id: - modified_instance = get_instance(client, module, modified_id) - else: - modified_instance = {} - - if modified_id and instance and modified_instance: - module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id)) - if modified_id and not instance and modified_instance: - module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id)) - if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None: - module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier') - if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options: - module.fail_json(msg='TDE is available for MySQL and Oracle DB instances') - if read_replica is True and not instance and creation_source not in [None, 'instance']: - module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source)) - if read_replica is True and not instance and not source_instance: - module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier') - - -def update_instance(client, module, instance, instance_id): - changed = False - - # Get newly created DB instance - if not instance: - instance = get_instance(client, module, instance_id) - - # Check tagging/promoting/rebooting/starting/stopping instance - changed |= ensure_tags( - client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags'] - ) - changed |= promote_replication_instance(client, module, instance, module.params['read_replica']) - changed |= update_instance_state(client, module, instance, module.params['state']) - - return changed - - -def promote_replication_instance(client, module, instance, read_replica): - changed = False - if read_replica is False: - # 'StatusInfos' only exists when the instance is a read replica - # See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-instances.html - if bool(instance.get('StatusInfos')): - try: - result, changed = call_method(client, module, method_name='promote_read_replica', - parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) - except is_boto3_error_message('DB Instance is not a read replica'): - pass - return changed - - -def ensure_iam_roles(client, module, instance_id): - ''' - Ensure specified IAM roles are associated with DB instance - - Parameters: - client: RDS client - module: AWSModule - instance_id: DB's instance ID - - Returns: - changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not - ''' - instance = camel_dict_to_snake_dict(get_instance(client, module, instance_id), ignore_list=['Tags', 'ProcessorFeatures']) - - # Ensure engine type supports associating IAM roles - engine = instance.get('engine') - if engine not in valid_engines_iam_roles: - module.fail_json(msg='DB engine {0} is not valid for adding IAM roles. Valid engines are {1}'.format(engine, valid_engines_iam_roles)) - - changed = False - purge_iam_roles = module.params.get('purge_iam_roles') - target_roles = module.params.get('iam_roles') if module.params.get('iam_roles') else [] - existing_roles = instance.get('associated_roles', []) - roles_to_add, roles_to_remove = compare_iam_roles(existing_roles, target_roles, purge_iam_roles) - if bool(roles_to_add or roles_to_remove): - changed = True - # Don't update on check_mode - if module.check_mode: - module.exit_json(changed=changed, **instance) - else: - update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove) - return changed - - -def update_instance_state(client, module, instance, state): - changed = False - if state in ['rebooted', 'restarted']: - changed |= reboot_running_db_instance(client, module, instance) - if state in ['started', 'running', 'stopped']: - changed |= start_or_stop_instance(client, module, instance, state) - return changed - - -def reboot_running_db_instance(client, module, instance): - parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} - if instance['DBInstanceStatus'] in ['stopped', 'stopping']: - call_method(client, module, 'start_db_instance', parameters) - if module.params.get('force_failover') is not None: - parameters['ForceFailover'] = module.params['force_failover'] - results, changed = call_method(client, module, 'reboot_db_instance', parameters) - return changed - - -def start_or_stop_instance(client, module, instance, state): - changed = False - parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} - if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']: - if module.params['db_snapshot_identifier']: - parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] - result, changed = call_method(client, module, 'stop_db_instance', parameters) - elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']: - result, changed = call_method(client, module, 'start_db_instance', parameters) - return changed - - -def main(): - arg_spec = dict( - state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), - creation_source=dict(choices=['snapshot', 's3', 'instance']), - force_update_password=dict(type='bool', default=False, no_log=False), - purge_cloudwatch_logs_exports=dict(type='bool', default=True), - purge_iam_roles=dict(type='bool', default=False), - purge_tags=dict(type='bool', default=True), - read_replica=dict(type='bool'), - wait=dict(type='bool', default=True), - purge_security_groups=dict(type='bool', default=True), - ) - - parameter_options = dict( - allocated_storage=dict(type='int'), - allow_major_version_upgrade=dict(type='bool'), - apply_immediately=dict(type='bool', default=False), - auto_minor_version_upgrade=dict(type='bool'), - availability_zone=dict(aliases=['az', 'zone']), - backup_retention_period=dict(type='int'), - ca_certificate_identifier=dict(), - character_set_name=dict(), - copy_tags_to_snapshot=dict(type='bool'), - db_cluster_identifier=dict(aliases=['cluster_id']), - db_instance_class=dict(aliases=['class', 'instance_type']), - db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), - db_name=dict(), - db_parameter_group_name=dict(), - db_security_groups=dict(type='list', elements='str'), - db_snapshot_identifier=dict(type='str', aliases=['snapshot_identifier', 'snapshot_id']), - db_subnet_group_name=dict(aliases=['subnet_group']), - deletion_protection=dict(type='bool'), - domain=dict(), - domain_iam_role_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), - enable_iam_database_authentication=dict(type='bool'), - enable_performance_insights=dict(type='bool'), - engine=dict(type='str', choices=valid_engines), - engine_version=dict(), - final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), - force_failover=dict(type='bool'), - iam_roles=dict(type='list', elements='dict'), - iops=dict(type='int'), - kms_key_id=dict(), - license_model=dict(), - master_user_password=dict(aliases=['password'], no_log=True), - master_username=dict(aliases=['username']), - max_allocated_storage=dict(type='int'), - monitoring_interval=dict(type='int'), - monitoring_role_arn=dict(), - multi_az=dict(type='bool'), - new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), - option_group_name=dict(), - performance_insights_kms_key_id=dict(), - performance_insights_retention_period=dict(type='int'), - port=dict(type='int'), - preferred_backup_window=dict(aliases=['backup_window']), - preferred_maintenance_window=dict(aliases=['maintenance_window']), - processor_features=dict(type='dict'), - promotion_tier=dict(), - publicly_accessible=dict(type='bool'), - restore_time=dict(), - s3_bucket_name=dict(), - s3_ingestion_role_arn=dict(), - s3_prefix=dict(), - skip_final_snapshot=dict(type='bool', default=False), - source_db_instance_identifier=dict(), - source_engine=dict(choices=['mysql']), - source_engine_version=dict(), - source_region=dict(), - storage_encrypted=dict(type='bool'), - storage_type=dict(choices=['standard', 'gp2', 'io1']), - tags=dict(type='dict', aliases=['resource_tags']), - tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), - tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), - timezone=dict(), - use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), - vpc_security_group_ids=dict(type='list', elements='str') - ) - arg_spec.update(parameter_options) - - required_if = [ - ('engine', 'aurora', ('db_cluster_identifier',)), - ('engine', 'aurora-mysql', ('db_cluster_identifier',)), - ('engine', 'aurora-postresql', ('db_cluster_identifier',)), - ('storage_type', 'io1', ('iops', 'allocated_storage')), - ('creation_source', 'snapshot', ('db_snapshot_identifier', 'engine')), - ('creation_source', 's3', ( - 's3_bucket_name', 'engine', 'master_username', 'master_user_password', - 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), - ] - mutually_exclusive = [ - ('s3_bucket_name', 'source_db_instance_identifier', 'db_snapshot_identifier'), - ('use_latest_restorable_time', 'restore_time'), - ('availability_zone', 'multi_az'), - ] - - module = AnsibleAWSModule( - argument_spec=arg_spec, - required_if=required_if, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True - ) - - # Sanitize instance identifiers - module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() - if module.params['new_db_instance_identifier']: - module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() - - # Sanitize processor features - if module.params['processor_features'] is not None: - module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) - - # Ensure dates are in lowercase - if module.params['preferred_maintenance_window']: - module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower() - - # Throw warning regarding case when allow_major_version_upgrade is specified in check_mode - # describe_rds_instance never returns this value, so on check_mode, it will always return changed=True - # In non-check mode runs, changed will return the correct value, so no need to warn there. - # see: amazon.aws.module_util.rds.handle_errors. - if module.params.get('allow_major_version_upgrade') and module.check_mode: - module.warn('allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs.') - - client = module.client('rds') - changed = False - state = module.params['state'] - instance_id = module.params['db_instance_identifier'] - instance = get_instance(client, module, instance_id) - validate_options(client, module, instance) - method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) - - if method_name: - - # Exit on create/delete if check_mode - if module.check_mode and method_name in ['create_db_instance', 'delete_db_instance']: - module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) - - raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) - parameters_to_modify = get_parameters(client, module, raw_parameters, method_name) - - if parameters_to_modify: - # Exit on check_mode when parameters to modify - if module.check_mode: - module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) - result, changed = call_method(client, module, method_name, parameters_to_modify) - - instance_id = get_final_identifier(method_name, module) - - if state != 'absent': - # Check tagging/promoting/rebooting/starting/stopping instance - if not module.check_mode or instance: - changed |= update_instance(client, module, instance, instance_id) - - # Check IAM roles - if module.params.get('iam_roles') or module.params.get('purge_iam_roles'): - changed |= ensure_iam_roles(client, module, instance_id) - - if changed: - instance = get_instance(client, module, instance_id) - if state != 'absent' and (instance or not module.check_mode): - for attempt_to_wait in range(0, 10): - instance = get_instance(client, module, instance_id) - if instance: - break - else: - sleep(5) - - if state == 'absent' and changed and not module.params['skip_final_snapshot']: - instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) - - pending_processor_features = None - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') - instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) - if pending_processor_features is not None: - instance['pending_modified_values']['processor_features'] = pending_processor_features - - module.exit_json(changed=changed, **instance) - - -if __name__ == '__main__': - main() diff --git a/rds_instance_info.py b/rds_instance_info.py deleted file mode 100644 index e26e0f680a6..00000000000 --- a/rds_instance_info.py +++ /dev/null @@ -1,421 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017, 2018 Michael De La Rue -# Copyright (c) 2017, 2018 Will Thames -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rds_instance_info -version_added: 1.0.0 -short_description: obtain information about one or more RDS instances -description: - - Obtain information about one or more RDS instances. -options: - db_instance_identifier: - description: - - The RDS instance's unique identifier. - required: false - aliases: - - id - type: str - filters: - description: - - A filter that specifies one or more DB instances to describe. - See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) - type: dict -author: - - "Will Thames (@willthames)" - - "Michael De La Rue (@mikedlr)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' -- name: Get information about an instance - community.aws.rds_instance_info: - db_instance_identifier: new-database - register: new_database_info - -- name: Get all RDS instances - community.aws.rds_instance_info: -''' - -RETURN = ''' -instances: - description: List of RDS instances - returned: always - type: complex - contains: - allocated_storage: - description: Gigabytes of storage allocated to the database - returned: always - type: int - sample: 10 - auto_minor_version_upgrade: - description: Whether minor version upgrades happen automatically - returned: always - type: bool - sample: true - availability_zone: - description: Availability Zone in which the database resides - returned: always - type: str - sample: us-west-2b - backup_retention_period: - description: Days for which backups are retained - returned: always - type: int - sample: 7 - ca_certificate_identifier: - description: ID for the CA certificate - returned: always - type: str - sample: rds-ca-2015 - copy_tags_to_snapshot: - description: Whether DB tags should be copied to the snapshot - returned: always - type: bool - sample: false - db_instance_arn: - description: ARN of the database instance - returned: always - type: str - sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds - db_instance_class: - description: Instance class of the database instance - returned: always - type: str - sample: db.t2.small - db_instance_identifier: - description: Database instance identifier - returned: always - type: str - sample: helloworld-rds - db_instance_port: - description: Port used by the database instance - returned: always - type: int - sample: 0 - db_instance_status: - description: Status of the database instance - returned: always - type: str - sample: available - db_name: - description: Name of the database - returned: always - type: str - sample: management - db_parameter_groups: - description: List of database parameter groups - returned: always - type: complex - contains: - db_parameter_group_name: - description: Name of the database parameter group - returned: always - type: str - sample: psql-pg-helloworld - parameter_apply_status: - description: Whether the parameter group has been applied - returned: always - type: str - sample: in-sync - db_security_groups: - description: List of security groups used by the database instance - returned: always - type: list - sample: [] - db_subnet_group: - description: list of subnet groups - returned: always - type: complex - contains: - db_subnet_group_description: - description: Description of the DB subnet group - returned: always - type: str - sample: My database subnet group - db_subnet_group_name: - description: Name of the database subnet group - returned: always - type: str - sample: my-subnet-group - subnet_group_status: - description: Subnet group status - returned: always - type: str - sample: Complete - subnets: - description: List of subnets in the subnet group - returned: always - type: complex - contains: - subnet_availability_zone: - description: Availability zone of the subnet - returned: always - type: complex - contains: - name: - description: Name of the availability zone - returned: always - type: str - sample: us-west-2c - subnet_identifier: - description: Subnet ID - returned: always - type: str - sample: subnet-abcd1234 - subnet_status: - description: Subnet status - returned: always - type: str - sample: Active - vpc_id: - description: VPC id of the subnet group - returned: always - type: str - sample: vpc-abcd1234 - dbi_resource_id: - description: AWS Region-unique, immutable identifier for the DB instance - returned: always - type: str - sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA - deletion_protection: - description: C(True) if the DB instance has deletion protection enabled, C(False) if not. - returned: always - type: bool - sample: False - version_added: 3.3.0 - domain_memberships: - description: List of domain memberships - returned: always - type: list - sample: [] - endpoint: - description: Database endpoint - returned: always - type: complex - contains: - address: - description: Database endpoint address - returned: always - type: str - sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com - hosted_zone_id: - description: Route53 hosted zone ID - returned: always - type: str - sample: Z1PABCD0000000 - port: - description: Database endpoint port - returned: always - type: int - sample: 5432 - engine: - description: Database engine - returned: always - type: str - sample: postgres - engine_version: - description: Database engine version - returned: always - type: str - sample: 9.5.10 - iam_database_authentication_enabled: - description: Whether database authentication through IAM is enabled - returned: always - type: bool - sample: false - instance_create_time: - description: Date and time the instance was created - returned: always - type: str - sample: '2017-10-10T04:00:07.434000+00:00' - iops: - description: The Provisioned IOPS value for the DB instance. - returned: always - type: int - sample: 1000 - kms_key_id: - description: KMS Key ID - returned: always - type: str - sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab - latest_restorable_time: - description: Latest time to which a database can be restored with point-in-time restore - returned: always - type: str - sample: '2018-05-17T00:03:56+00:00' - license_model: - description: License model - returned: always - type: str - sample: postgresql-license - master_username: - description: Database master username - returned: always - type: str - sample: dbadmin - monitoring_interval: - description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance - returned: always - type: int - sample: 0 - multi_az: - description: Whether Multi-AZ is on - returned: always - type: bool - sample: false - option_group_memberships: - description: List of option groups - returned: always - type: complex - contains: - option_group_name: - description: Option group name - returned: always - type: str - sample: default:postgres-9-5 - status: - description: Status of option group - returned: always - type: str - sample: in-sync - pending_modified_values: - description: Modified values pending application - returned: always - type: complex - contains: {} - performance_insights_enabled: - description: Whether performance insights are enabled - returned: always - type: bool - sample: false - preferred_backup_window: - description: Preferred backup window - returned: always - type: str - sample: 04:00-05:00 - preferred_maintenance_window: - description: Preferred maintenance window - returned: always - type: str - sample: mon:05:00-mon:05:30 - publicly_accessible: - description: Whether the DB is publicly accessible - returned: always - type: bool - sample: false - read_replica_db_instance_identifiers: - description: List of database instance read replicas - returned: always - type: list - sample: [] - storage_encrypted: - description: Whether the storage is encrypted - returned: always - type: bool - sample: true - storage_type: - description: Storage type of the Database instance - returned: always - type: str - sample: gp2 - tags: - description: Tags used by the database instance - returned: always - type: complex - contains: {} - vpc_security_groups: - description: List of VPC security groups - returned: always - type: complex - contains: - status: - description: Status of the VPC security group - returned: always - type: str - sample: active - vpc_security_group_id: - description: VPC Security Group ID - returned: always - type: str - sample: sg-abcd1234 -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - AWSRetry, - camel_dict_to_snake_dict, - ) - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - - -@AWSRetry.jittered_backoff() -def _describe_db_instances(conn, **params): - paginator = conn.get_paginator('describe_db_instances') - try: - results = paginator.paginate(**params).build_full_result()['DBInstances'] - except is_boto3_error_code('DBInstanceNotFound'): - results = [] - - return results - - -def instance_info(module, conn): - instance_name = module.params.get('db_instance_identifier') - filters = module.params.get('filters') - - params = dict() - if instance_name: - params['DBInstanceIdentifier'] = instance_name - if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) - - try: - results = _describe_db_instances(conn, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get instance information") - - for instance in results: - try: - instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], - aws_retry=True)['TagList']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) - - return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results]) - - -def main(): - argument_spec = dict( - db_instance_identifier=dict(aliases=['id']), - filters=dict(type='dict') - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - - module.exit_json(**instance_info(module, conn)) - - -if __name__ == '__main__': - main() diff --git a/rds_instance_snapshot.py b/rds_instance_snapshot.py deleted file mode 100644 index e9430fa1af4..00000000000 --- a/rds_instance_snapshot.py +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2014 Ansible Project -# Copyright (c) 2017, 2018, 2019 Will Thames -# Copyright (c) 2017, 2018 Michael De La Rue -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds_instance_snapshot -version_added: 1.0.0 -short_description: Manage Amazon RDS instance snapshots -description: - - Creates or deletes RDS snapshots. -options: - state: - description: - - Specify the desired state of the snapshot. - default: present - choices: [ 'present', 'absent'] - type: str - db_snapshot_identifier: - description: - - The snapshot to manage. - required: true - aliases: - - id - - snapshot_id - type: str - db_instance_identifier: - description: - - Database instance identifier. Required when creating a snapshot. - aliases: - - instance_id - type: str - source_db_snapshot_identifier: - description: - - The identifier of the source DB snapshot. - - Required when copying a snapshot. - - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier. - - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN. - aliases: - - source_id - - source_snapshot_id - type: str - version_added: 3.3.0 - source_region: - description: - - The region that contains the snapshot to be copied. - type: str - version_added: 3.3.0 - copy_tags: - description: - - Whether to copy all tags from I(source_db_snapshot_identifier) to I(db_instance_identifier). - type: bool - default: False - version_added: 3.3.0 - wait: - description: - - Whether or not to wait for snapshot creation or deletion. - type: bool - default: False - wait_timeout: - description: - - how long before wait gives up, in seconds. - default: 300 - type: int -author: - - "Will Thames (@willthames)" - - "Michael De La Rue (@mikedlr)" - - "Alina Buzachis (@alinabuzachis)" - - "Joseph Torcasso (@jatorcasso)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -''' - -EXAMPLES = r''' -- name: Create snapshot - community.aws.rds_instance_snapshot: - db_instance_identifier: new-database - db_snapshot_identifier: new-database-snapshot - register: snapshot - -- name: Copy snapshot from a different region and copy its tags - community.aws.rds_instance_snapshot: - id: new-database-snapshot-copy - region: us-east-1 - source_id: "{{ snapshot.db_snapshot_arn }}" - source_region: us-east-2 - copy_tags: true - -- name: Delete snapshot - community.aws.rds_instance_snapshot: - db_snapshot_identifier: new-database-snapshot - state: absent -''' - -RETURN = r''' -allocated_storage: - description: How much storage is allocated in GB. - returned: always - type: int - sample: 20 -availability_zone: - description: Availability zone of the database from which the snapshot was created. - returned: always - type: str - sample: us-west-2a -db_instance_identifier: - description: Database from which the snapshot was created. - returned: always - type: str - sample: ansible-test-16638696 -db_snapshot_arn: - description: Amazon Resource Name for the snapshot. - returned: always - type: str - sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot -db_snapshot_identifier: - description: Name of the snapshot. - returned: always - type: str - sample: ansible-test-16638696-test-snapshot -dbi_resource_id: - description: The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region. - returned: always - type: str - sample: db-MM4P2U35RQRAMWD3QDOXWPZP4U -encrypted: - description: Whether the snapshot is encrypted. - returned: always - type: bool - sample: false -engine: - description: Engine of the database from which the snapshot was created. - returned: always - type: str - sample: mariadb -engine_version: - description: Version of the database from which the snapshot was created. - returned: always - type: str - sample: 10.2.21 -iam_database_authentication_enabled: - description: Whether IAM database authentication is enabled. - returned: always - type: bool - sample: false -instance_create_time: - description: Creation time of the instance from which the snapshot was created. - returned: always - type: str - sample: '2019-06-15T10:15:56.221000+00:00' -license_model: - description: License model of the database. - returned: always - type: str - sample: general-public-license -master_username: - description: Master username of the database. - returned: always - type: str - sample: test -option_group_name: - description: Option group of the database. - returned: always - type: str - sample: default:mariadb-10-2 -percent_progress: - description: How much progress has been made taking the snapshot. Will be 100 for an available snapshot. - returned: always - type: int - sample: 100 -port: - description: Port on which the database is listening. - returned: always - type: int - sample: 3306 -processor_features: - description: List of processor features of the database. - returned: always - type: list - sample: [] -source_db_snapshot_identifier: - description: The DB snapshot ARN that the DB snapshot was copied from. - returned: when snapshot is a copy - type: str - sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot-source - version_added: 3.3.0 -snapshot_create_time: - description: Creation time of the snapshot. - returned: always - type: str - sample: '2019-06-15T10:46:23.776000+00:00' -snapshot_type: - description: How the snapshot was created (always manual for this module!). - returned: always - type: str - sample: manual -status: - description: Status of the snapshot. - returned: always - type: str - sample: available -storage_type: - description: Storage type of the database. - returned: always - type: str - sample: gp2 -tags: - description: Tags applied to the snapshot. - returned: always - type: complex - contains: {} -vpc_id: - description: ID of the VPC in which the DB lives. - returned: always - type: str - sample: vpc-09ff232e222710ae0 -''' - -try: - import botocore -except ImportError: - pass # protected by AnsibleAWSModule - -# import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params -from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags - - -def get_snapshot(snapshot_id): - try: - snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)['DBSnapshots'][0] - snapshot['Tags'] = get_tags(client, module, snapshot['DBSnapshotArn']) - except is_boto3_error_code("DBSnapshotNotFound"): - return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) - return snapshot - - -def get_parameters(parameters, method_name): - if method_name == 'copy_db_snapshot': - parameters['TargetDBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] - - required_options = get_boto3_client_method_parameters(client, method_name, required=True) - if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) - options = get_boto3_client_method_parameters(client, method_name) - parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) - - return parameters - - -def ensure_snapshot_absent(): - snapshot_name = module.params.get("db_snapshot_identifier") - params = {"DBSnapshotIdentifier": snapshot_name} - changed = False - - snapshot = get_snapshot(snapshot_name) - if not snapshot: - module.exit_json(changed=changed) - elif snapshot and snapshot["Status"] != "deleting": - snapshot, changed = call_method(client, module, "delete_db_snapshot", params) - - module.exit_json(changed=changed) - - -def ensure_snapshot_present(params): - source_id = module.params.get('source_db_snapshot_identifier') - snapshot_name = module.params.get('db_snapshot_identifier') - changed = False - snapshot = get_snapshot(snapshot_name) - - # Copy snapshot - if source_id: - changed |= copy_snapshot(params) - - # Create snapshot - elif not snapshot: - changed |= create_snapshot(params) - - # Snapshot exists and we're not creating a copy - modify exising snapshot - else: - changed |= modify_snapshot() - - snapshot = get_snapshot(snapshot_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) - - -def create_snapshot(params): - method_params = get_parameters(params, 'create_db_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - snapshot, changed = call_method(client, module, 'create_db_snapshot', method_params) - - return changed - - -def copy_snapshot(params): - changed = False - snapshot_id = module.params.get('db_snapshot_identifier') - snapshot = get_snapshot(snapshot_id) - - if not snapshot: - method_params = get_parameters(params, 'copy_db_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - result, changed = call_method(client, module, 'copy_db_snapshot', method_params) - - return changed - - -def modify_snapshot(): - # TODO - add other modifications aside from purely tags - changed = False - snapshot_id = module.params.get('db_snapshot_identifier') - snapshot = get_snapshot(snapshot_id) - - if module.params.get('tags'): - changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) - - return changed - - -def main(): - global client - global module - - argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), - db_instance_identifier=dict(aliases=['instance_id']), - source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_tags=dict(type='bool', default=False), - source_region=dict(type='str'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - retry_decorator = AWSRetry.jittered_backoff(retries=10) - try: - client = module.client('rds', retry_decorator=retry_decorator) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS.") - - state = module.params.get("state") - if state == 'absent': - ensure_snapshot_absent() - - elif state == 'present': - params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) - ensure_snapshot_present(params) - - -if __name__ == '__main__': - main() From c3e99d5e0f08fb35b4d6fabb9812c8029ee5b558 Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Fri, 23 Sep 2022 03:44:14 -0400 Subject: [PATCH 585/683] Promote kms_key* modules and tests (#1520) Promote kms_key* modules and tests SUMMARY Remove kms_key* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- kms_key.py | 993 ------------------------------------------------ kms_key_info.py | 525 ------------------------- 2 files changed, 1518 deletions(-) delete mode 100644 kms_key.py delete mode 100644 kms_key_info.py diff --git a/kms_key.py b/kms_key.py deleted file mode 100644 index 2f664bbafff..00000000000 --- a/kms_key.py +++ /dev/null @@ -1,993 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -* -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: kms_key -version_added: 1.0.0 -short_description: Perform various KMS key management tasks -description: - - Manage role/user access to a KMS key. - - Not designed for encrypting/decrypting. - - Prior to release 5.0.0 this module was called C(community.aws.aws_kms). - The usage did not change. -options: - alias: - description: - - An alias for a key. - - For safety, even though KMS does not require keys to have an alias, this module expects all - new keys to be given an alias to make them easier to manage. Existing keys without an alias - may be referred to by I(key_id). Use M(community.aws.kms_key_info) to find key ids. - - Note that passing a I(key_id) and I(alias) will only cause a new alias to be added, an alias will never be renamed. - - The C(alias/) prefix is optional. - - Required if I(key_id) is not given. - required: false - aliases: - - key_alias - type: str - key_id: - description: - - Key ID or ARN of the key. - - One of I(alias) or I(key_id) are required. - required: false - aliases: - - key_arn - type: str - enable_key_rotation: - description: - - Whether the key should be automatically rotated every year. - required: false - type: bool - state: - description: - - Whether a key should be present or absent. - - Note that making an existing key C(absent) only schedules a key for deletion. - - Passing a key that is scheduled for deletion with I(state=present) will cancel key deletion. - required: False - choices: - - present - - absent - default: present - type: str - enabled: - description: Whether or not a key is enabled. - default: True - type: bool - description: - description: - - A description of the CMK. - - Use a description that helps you decide whether the CMK is appropriate for a task. - type: str - pending_window: - description: - - The number of days between requesting deletion of the CMK and when it will actually be deleted. - - Only used when I(state=absent) and the CMK has not yet been deleted. - - Valid values are between 7 and 30 (inclusive). - - 'See also: U(https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html#KMS-ScheduleKeyDeletion-request-PendingWindowInDays)' - type: int - aliases: ['deletion_delay'] - version_added: 1.4.0 - purge_grants: - description: - - Whether the I(grants) argument should cause grants not in the list to be removed. - default: False - type: bool - grants: - description: - - A list of grants to apply to the key. Each item must contain I(grantee_principal). - Each item can optionally contain I(retiring_principal), I(operations), I(constraints), - I(name). - - I(grantee_principal) and I(retiring_principal) must be ARNs - - 'For full documentation of suboptions see the boto3 documentation:' - - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)' - type: list - elements: dict - suboptions: - grantee_principal: - description: The full ARN of the principal being granted permissions. - required: true - type: str - retiring_principal: - description: The full ARN of the principal permitted to revoke/retire the grant. - type: str - operations: - type: list - elements: str - description: - - A list of operations that the grantee may perform using the CMK. - choices: ['Decrypt', 'Encrypt', 'GenerateDataKey', 'GenerateDataKeyWithoutPlaintext', 'ReEncryptFrom', 'ReEncryptTo', - 'CreateGrant', 'RetireGrant', 'DescribeKey', 'Verify', 'Sign'] - constraints: - description: - - Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals), - either or both being a dict specifying an encryption context match. - See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) or - U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant) - type: dict - policy: - description: - - policy to apply to the KMS key. - - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - type: json - key_spec: - aliases: - - customer_master_key_spec - description: - - Specifies the type of KMS key to create. - - The specification is not changeable once the key is created. - type: str - default: SYMMETRIC_DEFAULT - choices: ['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1'] - version_added: 2.1.0 - key_usage: - description: - - Determines the cryptographic operations for which you can use the KMS key. - - The usage is not changeable once the key is created. - type: str - default: ENCRYPT_DECRYPT - choices: ['ENCRYPT_DECRYPT', 'SIGN_VERIFY'] - version_added: 2.1.0 -author: - - Ted Timmons (@tedder) - - Will Thames (@willthames) - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -notes: - - There are known inconsistencies in the amount of time required for updates of KMS keys to be fully reflected on AWS. - This can cause issues when running duplicate tasks in succession or using the M(community.aws.kms_key_info) module to fetch key metadata - shortly after modifying keys. - For this reason, it is recommended to use the return data from this module (M(community.aws.kms_key)) to fetch a key's metadata. -''' - -EXAMPLES = r''' -# Create a new KMS key -- community.aws.kms_key: - alias: mykey - tags: - Name: myKey - Purpose: protect_stuff - -# Update previous key with more tags -- community.aws.kms_key: - alias: mykey - tags: - Name: myKey - Purpose: protect_stuff - Owner: security_team - -# Update a known key with grants allowing an instance with the billing-prod IAM profile -# to decrypt data encrypted with the environment: production, application: billing -# encryption context -- community.aws.kms_key: - key_id: abcd1234-abcd-1234-5678-ef1234567890 - grants: - - name: billing_prod - grantee_principal: arn:aws:iam::1234567890123:role/billing_prod - constraints: - encryption_context_equals: - environment: production - application: billing - operations: - - Decrypt - - RetireGrant - -- name: Update IAM policy on an existing KMS key - community.aws.kms_key: - alias: my-kms-key - policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { } ]}' - state: present - -- name: Example using lookup for policy json - community.aws.kms_key: - alias: my-kms-key - policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" - state: present -''' - -RETURN = r''' -key_id: - description: ID of key. - type: str - returned: always - sample: abcd1234-abcd-1234-5678-ef1234567890 -key_arn: - description: ARN of key. - type: str - returned: always - sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 -key_state: - description: - - The state of the key. - - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'), - C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating'). - type: str - returned: always - sample: PendingDeletion -key_usage: - description: The cryptographic operations for which you can use the key. - type: str - returned: always - sample: ENCRYPT_DECRYPT -origin: - description: The source of the key's key material. When this value is C(AWS_KMS), - AWS KMS created the key material. When this value is C(EXTERNAL), the - key material was imported or the CMK lacks key material. - type: str - returned: always - sample: AWS_KMS -aws_account_id: - description: The AWS Account ID that the key belongs to. - type: str - returned: always - sample: 1234567890123 -creation_date: - description: Date and time of creation of the key. - type: str - returned: always - sample: "2017-04-18T15:12:08.551000+10:00" -deletion_date: - description: Date and time after which KMS deletes this KMS key. - type: str - returned: when key_state is PendingDeletion - sample: "2017-04-18T15:12:08.551000+10:00" - version_added: 3.3.0 -description: - description: Description of the key. - type: str - returned: always - sample: "My Key for Protecting important stuff" -enabled: - description: Whether the key is enabled. True if I(key_state) is C(Enabled). - type: bool - returned: always - sample: false -enable_key_rotation: - description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined. - type: bool - returned: always - sample: false -aliases: - description: List of aliases associated with the key. - type: list - returned: always - sample: - - aws/acm - - aws/ebs -policies: - description: List of policy documents for the key. Empty when access is denied even if there are policies. - type: list - returned: always - elements: str - sample: - Version: "2012-10-17" - Id: "auto-ebs-2" - Statement: - - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" - Effect: "Allow" - Principal: - AWS: "*" - Action: - - "kms:Encrypt" - - "kms:Decrypt" - - "kms:ReEncrypt*" - - "kms:GenerateDataKey*" - - "kms:CreateGrant" - - "kms:DescribeKey" - Resource: "*" - Condition: - StringEquals: - kms:CallerAccount: "111111111111" - kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" - - Sid: "Allow direct access to key metadata to the account" - Effect: "Allow" - Principal: - AWS: "arn:aws:iam::111111111111:root" - Action: - - "kms:Describe*" - - "kms:Get*" - - "kms:List*" - - "kms:RevokeGrant" - Resource: "*" -key_policies: - description: List of policy documents for the key. Empty when access is denied even if there are policies. - type: list - returned: always - elements: dict - sample: - Version: "2012-10-17" - Id: "auto-ebs-2" - Statement: - - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" - Effect: "Allow" - Principal: - AWS: "*" - Action: - - "kms:Encrypt" - - "kms:Decrypt" - - "kms:ReEncrypt*" - - "kms:GenerateDataKey*" - - "kms:CreateGrant" - - "kms:DescribeKey" - Resource: "*" - Condition: - StringEquals: - kms:CallerAccount: "111111111111" - kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" - - Sid: "Allow direct access to key metadata to the account" - Effect: "Allow" - Principal: - AWS: "arn:aws:iam::111111111111:root" - Action: - - "kms:Describe*" - - "kms:Get*" - - "kms:List*" - - "kms:RevokeGrant" - Resource: "*" - version_added: 3.3.0 -tags: - description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags. - type: dict - returned: always - sample: - Name: myKey - Purpose: protecting_stuff -grants: - description: List of grants associated with a key. - type: list - elements: dict - returned: always - contains: - constraints: - description: Constraints on the encryption context that the grant allows. - See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details - type: dict - returned: always - sample: - encryption_context_equals: - "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" - creation_date: - description: Date of creation of the grant. - type: str - returned: always - sample: "2017-04-18T15:12:08+10:00" - grant_id: - description: The unique ID for the grant. - type: str - returned: always - sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 - grantee_principal: - description: The principal that receives the grant's permissions. - type: str - returned: always - sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz - issuing_account: - description: The AWS account under which the grant was issued. - type: str - returned: always - sample: arn:aws:iam::01234567890:root - key_id: - description: The key ARN to which the grant applies. - type: str - returned: always - sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 - name: - description: The friendly name that identifies the grant. - type: str - returned: always - sample: xyz - operations: - description: The list of operations permitted by the grant. - type: list - returned: always - sample: - - Decrypt - - RetireGrant - retiring_principal: - description: The principal that can retire the grant. - type: str - returned: always - sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz -changes_needed: - description: Grant types that would be changed/were changed. - type: dict - returned: always - sample: { "role": "add", "role grant": "add" } -had_invalid_entries: - description: Whether there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made. - type: bool - returned: always -''' - -# these mappings are used to go from simple labels to the actual 'Sid' values returned -# by get_policy. They seem to be magic values. -statement_label = { - 'role': 'Allow use of the key', - 'role grant': 'Allow attachment of persistent resources', - 'admin': 'Allow access for Key Administrators' -} - -import json - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_iam_roles_with_backoff(connection): - paginator = connection.get_paginator('list_roles') - return paginator.paginate().build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_keys_with_backoff(connection): - paginator = connection.get_paginator('list_keys') - return paginator.paginate().build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_aliases_with_backoff(connection): - paginator = connection.get_paginator('list_aliases') - return paginator.paginate().build_full_result() - - -def get_kms_aliases_lookup(connection): - _aliases = dict() - for alias in get_kms_aliases_with_backoff(connection)['Aliases']: - # Not all aliases are actually associated with a key - if 'TargetKeyId' in alias: - # strip off leading 'alias/' and add it to key's aliases - if alias['TargetKeyId'] in _aliases: - _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) - else: - _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] - return _aliases - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_tags_with_backoff(connection, key_id, **kwargs): - return connection.list_resource_tags(KeyId=key_id, **kwargs) - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_grants_with_backoff(connection, key_id): - params = dict(KeyId=key_id) - paginator = connection.get_paginator('list_grants') - return paginator.paginate(**params).build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_metadata_with_backoff(connection, key_id): - return connection.describe_key(KeyId=key_id) - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def list_key_policies_with_backoff(connection, key_id): - paginator = connection.get_paginator('list_key_policies') - return paginator.paginate(KeyId=key_id).build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_key_policy_with_backoff(connection, key_id, policy_name): - return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) - - -def get_kms_tags(connection, module, key_id): - # Handle pagination here as list_resource_tags does not have - # a paginator - kwargs = {} - tags = [] - more = True - while more: - try: - tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) - tags.extend(tag_response['Tags']) - except is_boto3_error_code('AccessDeniedException'): - tag_response = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain key tags") - if tag_response.get('NextMarker'): - kwargs['Marker'] = tag_response['NextMarker'] - else: - more = False - return tags - - -def get_kms_policies(connection, module, key_id): - try: - policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] - return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for - policy in policies] - except is_boto3_error_code('AccessDeniedException'): - return [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain key policies") - - -def camel_to_snake_grant(grant): - ''' camel_to_snake_grant snakifies everything except the encryption context ''' - constraints = grant.get('Constraints', {}) - result = camel_dict_to_snake_dict(grant) - if 'EncryptionContextEquals' in constraints: - result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals'] - if 'EncryptionContextSubset' in constraints: - result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset'] - return result - - -def get_key_details(connection, module, key_id): - try: - result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain key metadata") - result['KeyArn'] = result.pop('Arn') - - try: - aliases = get_kms_aliases_lookup(connection) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain aliases") - - try: - current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') - except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: - result['enable_key_rotation'] = None - result['aliases'] = aliases.get(result['KeyId'], []) - - result = camel_dict_to_snake_dict(result) - - # grants and tags get snakified differently - try: - result['grants'] = [camel_to_snake_grant(grant) for grant in - get_kms_grants_with_backoff(connection, key_id)['Grants']] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain key grants") - tags = get_kms_tags(connection, module, key_id) - result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') - result['policies'] = get_kms_policies(connection, module, key_id) - result['key_policies'] = [json.loads(policy) for policy in result['policies']] - return result - - -def get_kms_facts(connection, module): - try: - keys = get_kms_keys_with_backoff(connection)['Keys'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain keys") - - return [get_key_details(connection, module, key['KeyId']) for key in keys] - - -def convert_grant_params(grant, key): - grant_params = dict(KeyId=key['key_arn'], - GranteePrincipal=grant['grantee_principal']) - if grant.get('operations'): - grant_params['Operations'] = grant['operations'] - if grant.get('retiring_principal'): - grant_params['RetiringPrincipal'] = grant['retiring_principal'] - if grant.get('name'): - grant_params['Name'] = grant['name'] - if grant.get('constraints'): - grant_params['Constraints'] = dict() - if grant['constraints'].get('encryption_context_subset'): - grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset'] - if grant['constraints'].get('encryption_context_equals'): - grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals'] - return grant_params - - -def different_grant(existing_grant, desired_grant): - if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'): - return True - if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'): - return True - if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')): - return True - if existing_grant.get('constraints') != desired_grant.get('constraints'): - return True - return False - - -def compare_grants(existing_grants, desired_grants, purge_grants=False): - existing_dict = dict((eg['name'], eg) for eg in existing_grants) - desired_dict = dict((dg['name'], dg) for dg in desired_grants) - to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys()) - if purge_grants: - to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys()) - else: - to_remove_keys = set() - to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys()) - for candidate in to_change_candidates: - if different_grant(existing_dict[candidate], desired_dict[candidate]): - to_add_keys.add(candidate) - to_remove_keys.add(candidate) - - to_add = [] - to_remove = [] - for key in to_add_keys: - grant = desired_dict[key] - to_add.append(grant) - for key in to_remove_keys: - grant = existing_dict[key] - to_remove.append(grant) - return to_add, to_remove - - -def start_key_deletion(connection, module, key_metadata): - if key_metadata['KeyState'] == 'PendingDeletion': - return False - - if module.check_mode: - return True - - deletion_params = {'KeyId': key_metadata['Arn']} - if module.params.get('pending_window'): - deletion_params['PendingWindowInDays'] = module.params.get('pending_window') - - try: - connection.schedule_key_deletion(**deletion_params) - return True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to schedule key for deletion") - - -def cancel_key_deletion(connection, module, key): - key_id = key['key_arn'] - if key['key_state'] != 'PendingDeletion': - return False - - if module.check_mode: - return True - - try: - connection.cancel_key_deletion(KeyId=key_id) - # key is disabled after deletion cancellation - # set this so that ensure_enabled_disabled works correctly - key['key_state'] = 'Disabled' - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to cancel key deletion") - - return True - - -def ensure_enabled_disabled(connection, module, key, enabled): - desired_state = 'Enabled' - if not enabled: - desired_state = 'Disabled' - - if key['key_state'] == desired_state: - return False - - key_id = key['key_arn'] - if not module.check_mode: - if enabled: - try: - connection.enable_key(KeyId=key_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to enable key") - else: - try: - connection.disable_key(KeyId=key_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to disable key") - - return True - - -def update_alias(connection, module, key, alias): - alias = canonicalize_alias_name(alias) - - if alias is None: - return False - - key_id = key['key_arn'] - aliases = get_kms_aliases_with_backoff(connection)['Aliases'] - # We will only add new aliases, not rename existing ones - if alias in [_alias['AliasName'] for _alias in aliases]: - return False - - if not module.check_mode: - try: - connection.create_alias(TargetKeyId=key_id, AliasName=alias) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed create key alias") - - return True - - -def update_description(connection, module, key, description): - if description is None: - return False - if key['description'] == description: - return False - - key_id = key['key_arn'] - if not module.check_mode: - try: - connection.update_key_description(KeyId=key_id, Description=description) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update key description") - - return True - - -def update_tags(connection, module, key, desired_tags, purge_tags): - if desired_tags is None: - return False - - to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags) - if not (bool(to_add) or bool(to_remove)): - return False - - key_id = key['key_arn'] - if not module.check_mode: - if to_remove: - try: - connection.untag_resource(KeyId=key_id, TagKeys=to_remove) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove tag") - if to_add: - try: - tags = ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue') - connection.tag_resource(KeyId=key_id, Tags=tags) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to add tag to key") - - return True - - -def update_policy(connection, module, key, policy): - if policy is None: - return False - try: - new_policy = json.loads(policy) - except ValueError as e: - module.fail_json_aws(e, msg="Unable to parse new policy as JSON") - - key_id = key['key_arn'] - try: - keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default') - original_policy = json.loads(keyret['Policy']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): - # If we can't fetch the current policy assume we're making a change - # Could occur if we have PutKeyPolicy without GetKeyPolicy - original_policy = {} - - if not compare_policies(original_policy, new_policy): - return False - - if not module.check_mode: - try: - connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update key policy") - - return True - - -def update_key_rotation(connection, module, key, enable_key_rotation): - if enable_key_rotation is None: - return False - key_id = key['key_arn'] - - try: - current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: - return False - except is_boto3_error_code('AccessDeniedException'): - pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get current key rotation status") - - if not module.check_mode: - try: - if enable_key_rotation: - connection.enable_key_rotation(KeyId=key_id) - else: - connection.disable_key_rotation(KeyId=key_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to enable/disable key rotation") - - return True - - -def update_grants(connection, module, key, desired_grants, purge_grants): - existing_grants = key['grants'] - - to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants) - if not (bool(to_add) or bool(to_remove)): - return False - - key_id = key['key_arn'] - if not module.check_mode: - for grant in to_remove: - try: - connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to retire grant") - for grant in to_add: - grant_params = convert_grant_params(grant, key) - try: - connection.create_grant(**grant_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create grant") - - return True - - -def update_key(connection, module, key): - changed = False - - changed |= cancel_key_deletion(connection, module, key) - changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled']) - changed |= update_alias(connection, module, key, module.params['alias']) - changed |= update_description(connection, module, key, module.params['description']) - changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags')) - changed |= update_policy(connection, module, key, module.params.get('policy')) - changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants')) - changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) - - # make results consistent with kms_facts before returning - result = get_key_details(connection, module, key['key_arn']) - result['changed'] = changed - return result - - -def create_key(connection, module): - key_usage = module.params.get('key_usage') - key_spec = module.params.get('key_spec') - tags_list = ansible_dict_to_boto3_tag_list( - module.params['tags'] or {}, - # KMS doesn't use "Key" and "Value" as other APIs do. - tag_name_key_name='TagKey', tag_value_key_name='TagValue' - ) - params = dict(BypassPolicyLockoutSafetyCheck=False, - Tags=tags_list, - KeyUsage=key_usage, - CustomerMasterKeySpec=key_spec, - Origin='AWS_KMS') - - if module.check_mode: - return {'changed': True} - - if module.params.get('description'): - params['Description'] = module.params['description'] - if module.params.get('policy'): - params['Policy'] = module.params['policy'] - - try: - result = connection.create_key(**params)['KeyMetadata'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create initial key") - - key = get_key_details(connection, module, result['KeyId']) - update_alias(connection, module, key, module.params['alias']) - update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) - - ensure_enabled_disabled(connection, module, key, module.params.get('enabled')) - update_grants(connection, module, key, module.params.get('grants'), False) - - # make results consistent with kms_facts - result = get_key_details(connection, module, key['key_id']) - result['changed'] = True - return result - - -def delete_key(connection, module, key_metadata): - changed = False - - changed |= start_key_deletion(connection, module, key_metadata) - - result = get_key_details(connection, module, key_metadata['Arn']) - result['changed'] = changed - return result - - -def get_arn_from_role_name(iam, rolename): - ret = iam.get_role(RoleName=rolename) - if ret.get('Role') and ret['Role'].get('Arn'): - return ret['Role']['Arn'] - raise Exception('could not find arn for name {0}.'.format(rolename)) - - -def canonicalize_alias_name(alias): - if alias is None: - return None - if alias.startswith('alias/'): - return alias - return 'alias/' + alias - - -def fetch_key_metadata(connection, module, key_id, alias): - # Note - fetching a key's metadata is very inconsistent shortly after any sort of update to a key has occurred. - # Combinations of manual waiters, checking expecting key values to actual key value, and static sleeps - # have all been exhausted, but none of those available options have solved the problem. - # Integration tests will wait for 10 seconds to combat this issue. - # See https://github.com/ansible-collections/community.aws/pull/1052. - - alias = canonicalize_alias_name(module.params.get('alias')) - - try: - # Fetch by key_id where possible - if key_id: - return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] - # Or try alias as a backup - return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata'] - - except connection.exceptions.NotFoundException: - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Failed to fetch key metadata.') - - -def main(): - argument_spec = dict( - alias=dict(aliases=['key_alias']), - pending_window=dict(aliases=['deletion_delay'], type='int'), - key_id=dict(aliases=['key_arn']), - description=dict(), - enabled=dict(type='bool', default=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - grants=dict(type='list', default=[], elements='dict'), - policy=dict(type='json'), - purge_grants=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - enable_key_rotation=(dict(type='bool')), - key_spec=dict(type='str', default='SYMMETRIC_DEFAULT', aliases=['customer_master_key_spec'], - choices=['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1']), - key_usage=dict(type='str', default='ENCRYPT_DECRYPT', choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY']), - ) - - module = AnsibleAWSModule( - supports_check_mode=True, - argument_spec=argument_spec, - required_one_of=[['alias', 'key_id']], - ) - - kms = module.client('kms') - - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", - date='2024-05-01', collection_name='community.aws') - - key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias')) - # We can't create keys with a specific ID, if we can't access the key we'll have to fail - if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata: - module.fail_json(msg="Could not find key with id {0} to update".format(module.params.get('key_id'))) - - if module.params.get('state') == 'absent': - if key_metadata is None: - module.exit_json(changed=False) - result = delete_key(kms, module, key_metadata) - module.exit_json(**result) - - if key_metadata: - key_details = get_key_details(kms, module, key_metadata['Arn']) - result = update_key(kms, module, key_details) - module.exit_json(**result) - - result = create_key(kms, module) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/kms_key_info.py b/kms_key_info.py deleted file mode 100644 index 1ba01e50d61..00000000000 --- a/kms_key_info.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: kms_key_info -version_added: 1.0.0 -short_description: Gather information about AWS KMS keys -description: - - Gather information about AWS KMS keys including tags and grants. - - Prior to release 5.0.0 this module was called C(community.aws.aws_kms_info). - The usage did not change. -author: - - "Will Thames (@willthames)" -options: - alias: - description: - - Alias for key. - - Mutually exclusive with I(key_id) and I(filters). - required: false - aliases: - - key_alias - type: str - version_added: 1.4.0 - key_id: - description: - - Key ID or ARN of the key. - - Mutually exclusive with I(alias) and I(filters). - required: false - aliases: - - key_arn - type: str - version_added: 1.4.0 - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - The filters aren't natively supported by boto3, but are supported to provide similar - functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and - C(tag:tagName)) are available, as are C(key-id) and C(alias) - - Mutually exclusive with I(alias) and I(key_id). - type: dict - pending_deletion: - description: Whether to get full details (tags, grants etc.) of keys pending deletion. - default: False - type: bool -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Gather information about all KMS keys -- community.aws.kms_key_info: - -# Gather information about all keys with a Name tag -- community.aws.kms_key_info: - filters: - tag-key: Name - -# Gather information about all keys with a specific name -- community.aws.kms_key_info: - filters: - "tag:Name": Example -''' - -RETURN = r''' -kms_keys: - description: List of keys. - type: complex - returned: always - contains: - key_id: - description: ID of key. - type: str - returned: always - sample: abcd1234-abcd-1234-5678-ef1234567890 - key_arn: - description: ARN of key. - type: str - returned: always - sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 - key_state: - description: - - The state of the key. - - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'), - C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating'). - type: str - returned: always - sample: PendingDeletion - key_usage: - description: The cryptographic operations for which you can use the key. - type: str - returned: always - sample: ENCRYPT_DECRYPT - origin: - description: The source of the key's key material. When this value is C(AWS_KMS), - AWS KMS created the key material. When this value is C(EXTERNAL), the - key material was imported or the CMK lacks key material. - type: str - returned: always - sample: AWS_KMS - aws_account_id: - description: The AWS Account ID that the key belongs to. - type: str - returned: always - sample: 1234567890123 - creation_date: - description: Date and time of creation of the key. - type: str - returned: always - sample: "2017-04-18T15:12:08.551000+10:00" - deletion_date: - description: Date and time after which KMS deletes this KMS key. - type: str - returned: when key_state is PendingDeletion - sample: "2017-04-18T15:12:08.551000+10:00" - version_added: 3.3.0 - description: - description: Description of the key. - type: str - returned: always - sample: "My Key for Protecting important stuff" - enabled: - description: Whether the key is enabled. True if I(key_state) is C(Enabled). - type: bool - returned: always - sample: false - enable_key_rotation: - description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined. - type: bool - returned: always - sample: false - aliases: - description: list of aliases associated with the key. - type: list - returned: always - sample: - - aws/acm - - aws/ebs - tags: - description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags. - type: dict - returned: always - sample: - Name: myKey - Purpose: protecting_stuff - policies: - description: List of policy documents for the key. Empty when access is denied even if there are policies. - type: list - returned: always - elements: str - sample: - Version: "2012-10-17" - Id: "auto-ebs-2" - Statement: - - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" - Effect: "Allow" - Principal: - AWS: "*" - Action: - - "kms:Encrypt" - - "kms:Decrypt" - - "kms:ReEncrypt*" - - "kms:GenerateDataKey*" - - "kms:CreateGrant" - - "kms:DescribeKey" - Resource: "*" - Condition: - StringEquals: - kms:CallerAccount: "111111111111" - kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" - - Sid: "Allow direct access to key metadata to the account" - Effect: "Allow" - Principal: - AWS: "arn:aws:iam::111111111111:root" - Action: - - "kms:Describe*" - - "kms:Get*" - - "kms:List*" - - "kms:RevokeGrant" - Resource: "*" - key_policies: - description: List of policy documents for the key. Empty when access is denied even if there are policies. - type: list - returned: always - elements: dict - sample: - Version: "2012-10-17" - Id: "auto-ebs-2" - Statement: - - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" - Effect: "Allow" - Principal: - AWS: "*" - Action: - - "kms:Encrypt" - - "kms:Decrypt" - - "kms:ReEncrypt*" - - "kms:GenerateDataKey*" - - "kms:CreateGrant" - - "kms:DescribeKey" - Resource: "*" - Condition: - StringEquals: - kms:CallerAccount: "111111111111" - kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" - - Sid: "Allow direct access to key metadata to the account" - Effect: "Allow" - Principal: - AWS: "arn:aws:iam::111111111111:root" - Action: - - "kms:Describe*" - - "kms:Get*" - - "kms:List*" - - "kms:RevokeGrant" - Resource: "*" - version_added: 3.3.0 - grants: - description: List of grants associated with a key. - type: list - elements: dict - returned: always - contains: - constraints: - description: Constraints on the encryption context that the grant allows. - See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details - type: dict - returned: always - sample: - encryption_context_equals: - "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz" - creation_date: - description: Date of creation of the grant. - type: str - returned: always - sample: "2017-04-18T15:12:08+10:00" - grant_id: - description: The unique ID for the grant. - type: str - returned: always - sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 - grantee_principal: - description: The principal that receives the grant's permissions. - type: str - returned: always - sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz - issuing_account: - description: The AWS account under which the grant was issued. - type: str - returned: always - sample: arn:aws:iam::01234567890:root - key_id: - description: The key ARN to which the grant applies. - type: str - returned: always - sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 - name: - description: The friendly name that identifies the grant. - type: str - returned: always - sample: xyz - operations: - description: The list of operations permitted by the grant. - type: list - returned: always - sample: - - Decrypt - - RetireGrant - retiring_principal: - description: The principal that can retire the grant. - type: str - returned: always - sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz -''' - -import json - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - -# Caching lookup for aliases -_aliases = dict() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_keys_with_backoff(connection): - paginator = connection.get_paginator('list_keys') - return paginator.paginate().build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_aliases_with_backoff(connection): - paginator = connection.get_paginator('list_aliases') - return paginator.paginate().build_full_result() - - -def get_kms_aliases_lookup(connection): - if not _aliases: - for alias in get_kms_aliases_with_backoff(connection)['Aliases']: - # Not all aliases are actually associated with a key - if 'TargetKeyId' in alias: - # strip off leading 'alias/' and add it to key's aliases - if alias['TargetKeyId'] in _aliases: - _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) - else: - _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] - return _aliases - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_tags_with_backoff(connection, key_id, **kwargs): - return connection.list_resource_tags(KeyId=key_id, **kwargs) - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_grants_with_backoff(connection, key_id, **kwargs): - params = dict(KeyId=key_id) - if kwargs.get('tokens'): - params['GrantTokens'] = kwargs['tokens'] - paginator = connection.get_paginator('list_grants') - return paginator.paginate(**params).build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_kms_metadata_with_backoff(connection, key_id): - return connection.describe_key(KeyId=key_id) - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def list_key_policies_with_backoff(connection, key_id): - paginator = connection.get_paginator('list_key_policies') - return paginator.paginate(KeyId=key_id).build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_key_policy_with_backoff(connection, key_id, policy_name): - return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def get_enable_key_rotation_with_backoff(connection, key_id): - try: - current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: - return None - - return current_rotation_status.get('KeyRotationEnabled') - - -def canonicalize_alias_name(alias): - if alias is None: - return None - if alias.startswith('alias/'): - return alias - return 'alias/' + alias - - -def get_kms_tags(connection, module, key_id): - # Handle pagination here as list_resource_tags does not have - # a paginator - kwargs = {} - tags = [] - more = True - while more: - try: - tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) - tags.extend(tag_response['Tags']) - except is_boto3_error_code('AccessDeniedException'): - tag_response = {} - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain key tags") - if tag_response.get('NextMarker'): - kwargs['Marker'] = tag_response['NextMarker'] - else: - more = False - return tags - - -def get_kms_policies(connection, module, key_id): - try: - policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] - return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for - policy in policies] - except is_boto3_error_code('AccessDeniedException'): - return [] - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain key policies") - - -def key_matches_filter(key, filtr): - if filtr[0] == 'key-id': - return filtr[1] == key['key_id'] - if filtr[0] == 'tag-key': - return filtr[1] in key['tags'] - if filtr[0] == 'tag-value': - return filtr[1] in key['tags'].values() - if filtr[0] == 'alias': - return filtr[1] in key['aliases'] - if filtr[0].startswith('tag:'): - tag_key = filtr[0][4:] - if tag_key not in key['tags']: - return False - return key['tags'].get(tag_key) == filtr[1] - - -def key_matches_filters(key, filters): - if not filters: - return True - else: - return all(key_matches_filter(key, filtr) for filtr in filters.items()) - - -def get_key_details(connection, module, key_id, tokens=None): - if not tokens: - tokens = [] - try: - result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] - # Make sure we have the canonical ARN, we might have been passed an alias - key_id = result['Arn'] - except is_boto3_error_code('NotFoundException'): - return None - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - module.warn('Permission denied fetching key metadata ({0})'.format(key_id)) - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain key metadata") - result['KeyArn'] = result.pop('Arn') - - try: - aliases = get_kms_aliases_lookup(connection) - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied fetching key aliases') - aliases = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain aliases") - # We can only get aliases for our own account, so we don't need the full ARN - result['aliases'] = aliases.get(result['KeyId'], []) - result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) - - if module.params.get('pending_deletion'): - return camel_dict_to_snake_dict(result) - - try: - result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied fetching key grants ({0})'.format(key_id)) - result['grants'] = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to obtain key grants") - - tags = get_kms_tags(connection, module, key_id) - - result = camel_dict_to_snake_dict(result) - result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') - result['policies'] = get_kms_policies(connection, module, key_id) - result['key_policies'] = [json.loads(policy) for policy in result['policies']] - return result - - -def get_kms_info(connection, module): - if module.params.get('key_id'): - key_id = module.params.get('key_id') - details = get_key_details(connection, module, key_id) - if details: - return [details] - return [] - elif module.params.get('alias'): - alias = canonicalize_alias_name(module.params.get('alias')) - details = get_key_details(connection, module, alias) - if details: - return [details] - return [] - else: - try: - keys = get_kms_keys_with_backoff(connection)['Keys'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain keys") - return [get_key_details(connection, module, key['KeyId']) for key in keys] - - -def main(): - argument_spec = dict( - alias=dict(aliases=['key_alias']), - key_id=dict(aliases=['key_arn']), - filters=dict(type='dict'), - pending_deletion=dict(type='bool', default=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['alias', 'filters', 'key_id']], - supports_check_mode=True) - - try: - connection = module.client('kms') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", - date='2024-05-01', collection_name='community.aws') - - all_keys = get_kms_info(connection, module) - filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] - ret_params = dict(kms_keys=filtered_keys) - - module.exit_json(**ret_params) - - -if __name__ == '__main__': - main() From f3c68339a4242faa4bbacc8675a04a32d3f18927 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Sep 2022 10:24:34 +0200 Subject: [PATCH 586/683] Migrate rds_subnet_group* modules and tests (#1532) Migrate rds_subnet_group* modules and tests Depends-On: ansible-collections/amazon.aws#1058 Remove rds_subnet_group* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mark Chappell --- rds_subnet_group.py | 367 -------------------------------------------- 1 file changed, 367 deletions(-) delete mode 100644 rds_subnet_group.py diff --git a/rds_subnet_group.py b/rds_subnet_group.py deleted file mode 100644 index 3ce90a5d863..00000000000 --- a/rds_subnet_group.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds_subnet_group -version_added: 1.0.0 -short_description: manage RDS database subnet groups -description: - - Creates, modifies, and deletes RDS database subnet groups. -options: - state: - description: - - Specifies whether the subnet should be present or absent. - required: true - choices: [ 'present' , 'absent' ] - type: str - name: - description: - - Database subnet group identifier. - required: true - type: str - description: - description: - - Database subnet group description. - - Required when I(state=present). - type: str - subnets: - description: - - List of subnet IDs that make up the database subnet group. - - Required when I(state=present). - type: list - elements: str -notes: - - Support for I(tags) and I(purge_tags) was added in release 3.2.0. -author: - - "Scott Anderson (@tastychutney)" - - "Alina Buzachis (@alinabuzachis)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -''' - -EXAMPLES = r''' -- name: Add or change a subnet group - community.aws.rds_subnet_group: - state: present - name: norwegian-blue - description: My Fancy Ex Parrot Subnet Group - subnets: - - subnet-aaaaaaaa - - subnet-bbbbbbbb - -- name: Add or change a subnet group and associate tags - community.aws.rds_subnet_group: - state: present - name: norwegian-blue - description: My Fancy Ex Parrot Subnet Group - subnets: - - subnet-aaaaaaaa - - subnet-bbbbbbbb - tags: - tag1: Tag1 - tag2: Tag2 - -- name: Remove a subnet group - community.aws.rds_subnet_group: - state: absent - name: norwegian-blue -''' - -RETURN = r''' -changed: - description: True if listing the RDS subnet group succeeds. - type: bool - returned: always - sample: "false" -subnet_group: - description: Dictionary of DB subnet group values - returned: I(state=present) - type: complex - contains: - name: - description: The name of the DB subnet group (maintained for backward compatibility) - returned: I(state=present) - type: str - sample: "ansible-test-mbp-13950442" - db_subnet_group_name: - description: The name of the DB subnet group - returned: I(state=present) - type: str - sample: "ansible-test-mbp-13950442" - description: - description: The description of the DB subnet group (maintained for backward compatibility) - returned: I(state=present) - type: str - sample: "Simple description." - db_subnet_group_description: - description: The description of the DB subnet group - returned: I(state=present) - type: str - sample: "Simple description." - vpc_id: - description: The VpcId of the DB subnet group - returned: I(state=present) - type: str - sample: "vpc-0acb0ba033ff2119c" - subnet_ids: - description: Contains a list of Subnet IDs - returned: I(state=present) - type: list - sample: - "subnet-08c94870f4480797e" - subnets: - description: Contains a list of Subnet elements (@see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups) # noqa - returned: I(state=present) - type: list - contains: - subnet_availability_zone: - description: Contains Availability Zone information. - returned: I(state=present) - type: dict - version_added: 3.2.0 - sample: - name: "eu-north-1b" - subnet_identifier: - description: The identifier of the subnet. - returned: I(state=present) - type: str - version_added: 3.2.0 - sample: "subnet-08c94870f4480797e" - subnet_outpost: - description: This value specifies the Outpost. - returned: I(state=present) - type: dict - version_added: 3.2.0 - sample: {} - subnet_status: - description: The status of the subnet. - returned: I(state=present) - type: str - version_added: 3.2.0 - sample: "Active" - status: - description: The status of the DB subnet group (maintained for backward compatibility) - returned: I(state=present) - type: str - sample: "Complete" - subnet_group_status: - description: The status of the DB subnet group - returned: I(state=present) - type: str - sample: "Complete" - db_subnet_group_arn: - description: The ARN of the DB subnet group - returned: I(state=present) - type: str - sample: "arn:aws:rds:eu-north-1:721066863947:subgrp:ansible-test-13950442" - tags: - description: The tags associated with the subnet group - returned: I(state=present) - type: dict - version_added: 3.2.0 - sample: - tag1: Tag1 - tag2: Tag2 -''' - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags - - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - - -def create_result(changed, subnet_group=None): - if subnet_group is None: - return dict( - changed=changed - ) - result_subnet_group = dict(subnet_group) - result_subnet_group['name'] = result_subnet_group.get( - 'db_subnet_group_name') - result_subnet_group['description'] = result_subnet_group.get( - 'db_subnet_group_description') - result_subnet_group['status'] = result_subnet_group.get( - 'subnet_group_status') - result_subnet_group['subnet_ids'] = create_subnet_list( - subnet_group.get('subnets')) - return dict( - changed=changed, - subnet_group=result_subnet_group - ) - - -@AWSRetry.jittered_backoff() -def _describe_db_subnet_groups_with_backoff(client, **kwargs): - paginator = client.get_paginator('describe_db_subnet_groups') - return paginator.paginate(**kwargs).build_full_result() - - -def get_subnet_group(client, module): - params = dict() - params['DBSubnetGroupName'] = module.params.get('name').lower() - - try: - _result = _describe_db_subnet_groups_with_backoff(client, **params) - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't describe subnet groups.") - - if _result: - result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0]) - result['tags'] = get_tags(client, module, result['db_subnet_group_arn']) - - return result - - -def create_subnet_list(subnets): - r''' - Construct a list of subnet ids from a list of subnets dicts returned by boto3. - Parameters: - subnets (list): A list of subnets definitions. - @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups - Returns: - (list): List of subnet ids (str) - ''' - subnets_ids = [] - for subnet in subnets: - subnets_ids.append(subnet.get('subnet_identifier')) - return subnets_ids - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - name=dict(required=True), - description=dict(required=False), - subnets=dict(required=False, type='list', elements='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - ) - required_if = [('state', 'present', ['description', 'subnets'])] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True - ) - - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or [] - - try: - connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to instantiate AWS connection.') - - # Default. - changed = None - result = create_result(False) - tags_update = False - subnet_update = False - - if module.params.get("tags") is not None: - _tags = ansible_dict_to_boto3_tag_list(module.params.get("tags")) - else: - _tags = list() - - matching_groups = get_subnet_group(connection, module) - - if state == 'present': - if matching_groups: - # We have one or more subnets at this point. - - # Check if there is any tags update - tags_update = ensure_tags( - connection, - module, - matching_groups['db_subnet_group_arn'], - matching_groups['tags'], - module.params.get("tags"), - module.params['purge_tags'] - ) - - # Sort the subnet groups before we compare them - existing_subnets = create_subnet_list(matching_groups['subnets']) - existing_subnets.sort() - group_subnets.sort() - - # See if anything changed. - if ( - matching_groups['db_subnet_group_name'] != group_name or - matching_groups['db_subnet_group_description'] != group_description or - existing_subnets != group_subnets - ): - if not module.check_mode: - # Modify existing group. - try: - connection.modify_db_subnet_group( - aws_retry=True, - DBSubnetGroupName=group_name, - DBSubnetGroupDescription=group_description, - SubnetIds=group_subnets - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to update a subnet group.') - subnet_update = True - else: - if not module.check_mode: - try: - connection.create_db_subnet_group( - aws_retry=True, - DBSubnetGroupName=group_name, - DBSubnetGroupDescription=group_description, - SubnetIds=group_subnets, - Tags=_tags - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to create a new subnet group.') - subnet_update = True - elif state == 'absent': - if not module.check_mode: - try: - connection.delete_db_subnet_group(aws_retry=True, DBSubnetGroupName=group_name) - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): - module.exit_json(**result) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Failed to delete a subnet group.') - else: - subnet_group = get_subnet_group(connection, module) - if subnet_group: - subnet_update = True - result = create_result(subnet_update, subnet_group) - module.exit_json(**result) - - subnet_update = True - - subnet_group = get_subnet_group(connection, module) - changed = tags_update or subnet_update - result = create_result(changed, subnet_group) - module.exit_json(**result) - - -if __name__ == '__main__': - main() From 2996665ec98f22097bfecc8a2f7462409a1ce57b Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Sep 2022 10:49:54 +0200 Subject: [PATCH 587/683] Migrate rds_param_group* modules and tests (#1530) Migrate rds_param_group* modules and tests Depends-On: ansible-collections/amazon.aws#1057 Remove rds_param_group* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mark Chappell --- rds_param_group.py | 339 --------------------------------------------- 1 file changed, 339 deletions(-) delete mode 100644 rds_param_group.py diff --git a/rds_param_group.py b/rds_param_group.py deleted file mode 100644 index d1492779996..00000000000 --- a/rds_param_group.py +++ /dev/null @@ -1,339 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rds_param_group -version_added: 1.0.0 -short_description: manage RDS parameter groups -description: - - Creates, modifies, and deletes RDS parameter groups. -options: - state: - description: - - Specifies whether the group should be present or absent. - required: true - choices: [ 'present' , 'absent' ] - type: str - name: - description: - - Database parameter group identifier. - required: true - type: str - description: - description: - - Database parameter group description. Only set when a new group is added. - type: str - engine: - description: - - The type of database for this group. - - Please use following command to get list of all supported db engines and their respective versions. - - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"' - - Required for I(state=present). - type: str - immediate: - description: - - Whether to apply the changes immediately, or after the next reboot of any associated instances. - aliases: - - apply_immediately - type: bool - params: - description: - - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), - or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. - aliases: [parameters] - type: dict -author: - - "Scott Anderson (@tastychutney)" - - "Will Thames (@willthames)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags - -''' - -EXAMPLES = ''' -- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 - community.aws.rds_param_group: - state: present - name: norwegian-blue - description: 'My Fancy Ex Parrot Group' - engine: 'mysql5.6' - params: - auto_increment_increment: "42K" - tags: - Environment: production - Application: parrot - -- name: Remove a parameter group - community.aws.rds_param_group: - state: absent - name: norwegian-blue -''' - -RETURN = ''' -db_parameter_group_name: - description: Name of DB parameter group - type: str - returned: when state is present -db_parameter_group_family: - description: DB parameter group family that this DB parameter group is compatible with. - type: str - returned: when state is present -db_parameter_group_arn: - description: ARN of the DB parameter group - type: str - returned: when state is present -description: - description: description of the DB parameter group - type: str - returned: when state is present -errors: - description: list of errors from attempting to modify parameters that are not modifiable - type: list - returned: when state is present -tags: - description: dictionary of tags - type: dict - returned: when state is present -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE -from ansible.module_utils.six import string_types -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - -INT_MODIFIERS = { - 'K': 1024, - 'M': pow(1024, 2), - 'G': pow(1024, 3), - 'T': pow(1024, 4), -} - - -@AWSRetry.jittered_backoff() -def _describe_db_parameters(connection, **params): - try: - paginator = connection.get_paginator('describe_db_parameters') - return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('DBParameterGroupNotFound'): - return None - - -def convert_parameter(param, value): - """ - Allows setting parameters with 10M = 10* 1024 * 1024 and so on. - """ - converted_value = value - - if param['DataType'] == 'integer': - if isinstance(value, string_types): - try: - for modifier in INT_MODIFIERS.keys(): - if value.endswith(modifier): - converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] - except ValueError: - # may be based on a variable (ie. {foo*3/4}) so - # just pass it on through to the AWS SDK - pass - elif isinstance(value, bool): - converted_value = 1 if value else 0 - - elif param['DataType'] == 'boolean': - if isinstance(value, string_types): - converted_value = value in BOOLEANS_TRUE - # convert True/False to 1/0 - converted_value = 1 if converted_value else 0 - return str(converted_value) - - -def update_parameters(module, connection): - groupname = module.params['name'] - desired = module.params['params'] - apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot' - errors = [] - modify_list = [] - existing = {} - try: - _existing = _describe_db_parameters(connection, DBParameterGroupName=groupname) - if _existing: - existing = _existing['Parameters'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe existing parameter groups") - lookup = dict((param['ParameterName'], param) for param in existing) - for param_key, param_value in desired.items(): - if param_key not in lookup: - errors.append("Parameter %s is not an available parameter for the %s engine" % - (param_key, module.params.get('engine'))) - else: - converted_value = convert_parameter(lookup[param_key], param_value) - # engine-default parameters do not have a ParameterValue, so we'll always override those. - if converted_value != lookup[param_key].get('ParameterValue'): - if lookup[param_key]['IsModifiable']: - modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method)) - else: - errors.append("Parameter %s is not modifiable" % param_key) - - # modify_db_parameters takes at most 20 parameters - if modify_list and not module.check_mode: - try: - from itertools import izip_longest as zip_longest # python 2 - except ImportError: - from itertools import zip_longest # python 3 - for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): - non_empty_slice = [item for item in modify_slice if item] - try: - connection.modify_db_parameter_group(aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't update parameters") - return True, errors - return False, errors - - -def update_tags(module, connection, group, tags): - if tags is None: - return False - changed = False - - existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList'] - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), - tags, module.params['purge_tags']) - - if module.check_mode: - if not to_update and not to_delete: - return False - else: - return True - - if to_update: - try: - connection.add_tags_to_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], - Tags=ansible_dict_to_boto3_tag_list(to_update)) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add tags to parameter group") - if to_delete: - try: - connection.remove_tags_from_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], - TagKeys=to_delete) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove tags from parameter group") - return changed - - -def ensure_present(module, connection): - groupname = module.params['name'] - tags = module.params.get('tags') - changed = False - errors = [] - try: - response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) - except is_boto3_error_code('DBParameterGroupNotFound'): - response = None - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't access parameter group information") - if not response: - params = dict(DBParameterGroupName=groupname, - DBParameterGroupFamily=module.params['engine'], - Description=module.params['description']) - if tags: - params['Tags'] = ansible_dict_to_boto3_tag_list(tags) - if not module.check_mode: - try: - response = connection.create_db_parameter_group(aws_retry=True, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create parameter group") - else: - group = response['DBParameterGroups'][0] - if tags: - changed = update_tags(module, connection, group, tags) - - if module.params.get('params'): - params_changed, errors = update_parameters(module, connection) - changed = changed or params_changed - - try: - response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) - group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) - except is_boto3_error_code('DBParameterGroupNotFound'): - module.exit_json(changed=True, errors=errors) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain parameter group information") - try: - tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['db_parameter_group_arn'])['TagList'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't obtain parameter group tags") - group['tags'] = boto3_tag_list_to_ansible_dict(tags) - - module.exit_json(changed=changed, errors=errors, **group) - - -def ensure_absent(module, connection): - group = module.params['name'] - try: - response = connection.describe_db_parameter_groups(DBParameterGroupName=group) - except is_boto3_error_code('DBParameterGroupNotFound'): - module.exit_json(changed=False) - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't access parameter group information") - - if response and module.check_mode: - module.exit_json(changed=True) - - try: - response = connection.delete_db_parameter_group(aws_retry=True, DBParameterGroupName=group) - module.exit_json(changed=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete parameter group") - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - name=dict(required=True), - engine=dict(), - description=dict(), - params=dict(aliases=['parameters'], type='dict'), - immediate=dict(type='bool', aliases=['apply_immediately']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[['state', 'present', ['description', 'engine']]], - supports_check_mode=True - ) - - try: - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - state = module.params.get('state') - if state == 'present': - ensure_present(module, conn) - if state == 'absent': - ensure_absent(module, conn) - - -if __name__ == '__main__': - main() From e9a1c5a23e6236311a92b79c438335ff28925004 Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Fri, 23 Sep 2022 04:51:44 -0400 Subject: [PATCH 588/683] Promote iam_user* modules and test (#1518) Promote iam_user* modules and test SUMMARY Depends-On: ansible-collections/amazon.aws#1053 Remove iam_user* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- iam_user.py | 571 ----------------------------------------------- iam_user_info.py | 197 ---------------- 2 files changed, 768 deletions(-) delete mode 100644 iam_user.py delete mode 100644 iam_user_info.py diff --git a/iam_user.py b/iam_user.py deleted file mode 100644 index b6b3ce34873..00000000000 --- a/iam_user.py +++ /dev/null @@ -1,571 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: iam_user -version_added: 1.0.0 -short_description: Manage AWS IAM users -description: - - A module to manage AWS IAM users. - - The module does not manage groups that users belong to, groups memberships can be managed using M(community.aws.iam_group). -author: Josh Souza (@joshsouza) -options: - name: - description: - - The name of the user to create. - required: true - type: str - password: - description: - - The password to apply to the user. - required: false - type: str - version_added: 2.2.0 - password_reset_required: - description: - - Defines if the user is required to set a new password after login. - required: false - type: bool - default: false - version_added: 3.1.0 - update_password: - default: always - choices: ['always', 'on_create'] - description: - - When to update user passwords. - - I(update_password=always) will ensure the password is set to I(password). - - I(update_password=on_create) will only set the password for newly created users. - type: str - version_added: 2.2.0 - remove_password: - description: - - Option to delete user login passwords. - - This field is mutually exclusive to I(password). - type: 'bool' - version_added: 2.2.0 - managed_policies: - description: - - A list of managed policy ARNs or friendly names to attach to the user. - - To embed an inline policy, use M(community.aws.iam_policy). - required: false - type: list - elements: str - aliases: ['managed_policy'] - state: - description: - - Create or remove the IAM user. - required: true - choices: [ 'present', 'absent' ] - type: str - purge_policies: - description: - - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detached. - required: false - default: false - type: bool - aliases: ['purge_policy', 'purge_managed_policies'] - wait: - description: - - When I(wait=True) the module will wait for up to I(wait_timeout) seconds - for IAM user creation before returning. - default: True - type: bool - version_added: 2.2.0 - wait_timeout: - description: - - How long (in seconds) to wait for creation / updates to complete. - default: 120 - type: int - version_added: 2.2.0 -notes: - - Support for I(tags) and I(purge_tags) was added in release 2.1.0. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. -# Note: This module does not allow management of groups that users belong to. -# Groups should manage their membership directly using community.aws.iam_group, -# as users belong to them. - -- name: Create a user - community.aws.iam_user: - name: testuser1 - state: present - -- name: Create a user with a password - community.aws.iam_user: - name: testuser1 - password: SomeSecurePassword - state: present - -- name: Create a user and attach a managed policy using its ARN - community.aws.iam_user: - name: testuser1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - state: present - -- name: Remove all managed policies from an existing user with an empty list - community.aws.iam_user: - name: testuser1 - state: present - purge_policies: true - -- name: Create user with tags - community.aws.iam_user: - name: testuser1 - state: present - tags: - Env: Prod - -- name: Delete the user - community.aws.iam_user: - name: testuser1 - state: absent - -''' -RETURN = r''' -user: - description: dictionary containing all the user information - returned: success - type: complex - contains: - arn: - description: the Amazon Resource Name (ARN) specifying the user - type: str - sample: "arn:aws:iam::1234567890:user/testuser1" - create_date: - description: the date and time, in ISO 8601 date-time format, when the user was created - type: str - sample: "2017-02-08T04:36:28+00:00" - user_id: - description: the stable and unique string identifying the user - type: str - sample: "AGPAIDBWE12NSFINE55TM" - user_name: - description: the friendly name that identifies the user - type: str - sample: "testuser1" - path: - description: the path to the user - type: str - sample: "/" - tags: - description: user tags - type: dict - returned: always - sample: {"Env": "Prod"} -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -def compare_attached_policies(current_attached_policies, new_attached_policies): - - # If new_attached_policies is None it means we want to remove all policies - if len(current_attached_policies) > 0 and new_attached_policies is None: - return False - - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) - - if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)): - return True - else: - return False - - -def convert_friendly_names_to_arns(connection, module, policy_names): - - # List comprehension that looks for any policy in the 'policy_names' list - # that does not begin with 'arn'. If there aren't any, short circuit. - # If there are, translate friendly name to the full arn - if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): - return policy_names - allpolicies = {} - paginator = connection.get_paginator('list_policies') - policies = paginator.paginate().build_full_result()['Policies'] - - for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] - try: - return [allpolicies[policy] for policy in policy_names] - except KeyError as e: - module.fail_json(msg="Couldn't find policy: " + str(e)) - - -def wait_iam_exists(connection, module): - - user_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') - - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - - try: - waiter = connection.get_waiter('user_exists') - waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, - UserName=user_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on IAM user creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on IAM user creation') - - -def create_or_update_login_profile(connection, module): - - # Apply new password / update password for the user - user_params = dict() - user_params['UserName'] = module.params.get('name') - user_params['Password'] = module.params.get('password') - user_params['PasswordResetRequired'] = module.params.get('password_reset_required') - retval = {} - - try: - retval = connection.update_login_profile(**user_params) - except is_boto3_error_code('NoSuchEntity'): - # Login profile does not yet exist - create it - try: - retval = connection.create_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create user login profile") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to update user login profile") - - return True, retval - - -def delete_login_profile(connection, module): - ''' - Deletes a users login profile. - Parameters: - connection: IAM client - module: AWSModule - Returns: - (bool): True if login profile deleted, False if no login profile found to delete - ''' - user_params = dict() - user_params['UserName'] = module.params.get('name') - - # User does not have login profile - nothing to delete - if not user_has_login_profile(connection, module, user_params['UserName']): - return False - - if not module.check_mode: - try: - connection.delete_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete user login profile") - - return True - - -def create_or_update_user(connection, module): - - params = dict() - params['UserName'] = module.params.get('name') - managed_policies = module.params.get('managed_policies') - purge_policies = module.params.get('purge_policies') - - if module.params.get('tags') is not None: - params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) - - changed = False - - if managed_policies: - managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) - - # Get user - user = get_user(connection, module, params['UserName']) - - # If user is None, create it - new_login_profile = False - if user is None: - # Check mode means we would create the user - if module.check_mode: - module.exit_json(changed=True) - - try: - connection.create_user(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create user") - - # Wait for user to be fully available before continuing - if module.params.get('wait'): - wait_iam_exists(connection, module) - - if module.params.get('password') is not None: - login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) - - if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): - new_login_profile = True - else: - login_profile_result = None - update_result = update_user_tags(connection, module, params, user) - - if module.params['update_password'] == "always" and module.params.get('password') is not None: - # Can't compare passwords, so just return changed on check mode runs - if module.check_mode: - module.exit_json(changed=True) - login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) - - if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): - new_login_profile = True - - elif module.params.get('remove_password'): - login_profile_result = delete_login_profile(connection, module) - - changed = bool(update_result) or bool(login_profile_result) - - # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) - if not compare_attached_policies(current_attached_policies, managed_policies): - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) - - # If managed_policies has a single empty element we want to remove all attached policies - if purge_policies: - # Detach policies not present - for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): - changed = True - if not module.check_mode: - try: - connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to detach policy {0} from user {1}".format( - policy_arn, params['UserName'])) - - # If there are policies to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(managed_policies).difference(set(current_attached_policies_arn_list)): - changed = True - # If there are policies in managed_policies attach each policy - if managed_policies != [None] and not module.check_mode: - for policy_arn in managed_policies: - try: - connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format( - policy_arn, params['UserName'])) - - if module.check_mode: - module.exit_json(changed=changed) - - # Get the user again - user = get_user(connection, module, params['UserName']) - if changed and new_login_profile: - # `LoginProfile` is only returned on `create_login_profile` method - user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False) - - module.exit_json(changed=changed, iam_user=user, user=user['user']) - - -def destroy_user(connection, module): - - user_name = module.params.get('name') - - user = get_user(connection, module, user_name) - # User is not present - if not user: - module.exit_json(changed=False) - - # Check mode means we would remove this user - if module.check_mode: - module.exit_json(changed=True) - - # Remove any attached policies otherwise deletion fails - try: - for policy in get_attached_policy_list(connection, module, user_name): - connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) - - try: - # Remove user's access keys - access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"] - for access_key in access_keys: - connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"]) - - # Remove user's login profile (console password) - delete_login_profile(connection, module) - - # Remove user's ssh public keys - ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"] - for ssh_public_key in ssh_public_keys: - connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"]) - - # Remove user's service specific credentials - service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"] - for service_specific_credential in service_credentials: - connection.delete_service_specific_credential( - UserName=user_name, - ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"] - ) - - # Remove user's signing certificates - signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"] - for signing_certificate in signing_certificates: - connection.delete_signing_certificate( - UserName=user_name, - CertificateId=signing_certificate["CertificateId"] - ) - - # Remove user's MFA devices - mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"] - for mfa_device in mfa_devices: - connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"]) - - # Remove user's inline policies - inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"] - for policy_name in inline_policies: - connection.delete_user_policy(UserName=user_name, PolicyName=policy_name) - - # Remove user's group membership - user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"] - for group in user_groups: - connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"]) - - connection.delete_user(UserName=user_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) - - module.exit_json(changed=True) - - -def get_user(connection, module, name): - - params = dict() - params['UserName'] = name - - try: - user = connection.get_user(**params) - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) - - tags = boto3_tag_list_to_ansible_dict(user['User'].pop('Tags', [])) - user = camel_dict_to_snake_dict(user) - user['user']['tags'] = tags - return user - - -def get_attached_policy_list(connection, module, name): - - try: - return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) - - -def user_has_login_profile(connection, module, name): - ''' - Returns whether or not given user has a login profile. - Parameters: - connection: IAM client - module: AWSModule - name (str): Username of user - Returns: - (bool): True if user had login profile, False if not - ''' - try: - connection.get_login_profile(UserName=name) - except is_boto3_error_code('NoSuchEntity'): - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name)) - return True - - -def update_user_tags(connection, module, params, user): - user_name = params['UserName'] - existing_tags = user['user']['tags'] - new_tags = params.get('Tags') - if new_tags is None: - return False - new_tags = boto3_tag_list_to_ansible_dict(new_tags) - - purge_tags = module.params.get('purge_tags') - - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) - - if not module.check_mode: - try: - if tags_to_remove: - connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) - if tags_to_add: - connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name) - - changed = bool(tags_to_add) or bool(tags_to_remove) - return changed - - -def main(): - - argument_spec = dict( - name=dict(required=True, type='str'), - password=dict(type='str', no_log=True), - password_reset_required=dict(type='bool', default=False, no_log=False), - update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), - remove_password=dict(type='bool', no_log=False), - managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), - state=dict(choices=['present', 'absent'], required=True), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['password', 'remove_password']], - ) - - module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", - date='2024-05-01', collection_name='community.aws') - - connection = module.client('iam') - - state = module.params.get("state") - - if state == 'present': - create_or_update_user(connection, module) - else: - destroy_user(connection, module) - - -if __name__ == '__main__': - main() diff --git a/iam_user_info.py b/iam_user_info.py deleted file mode 100644 index ee6224880cd..00000000000 --- a/iam_user_info.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python - -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: iam_user_info -version_added: 1.0.0 -short_description: Gather IAM user(s) facts in AWS -description: - - This module can be used to gather IAM user(s) facts in AWS. -author: - - Constantin Bugneac (@Constantin07) - - Abhijeet Kasurde (@Akasurde) -options: - name: - description: - - The name of the IAM user to look for. - required: false - type: str - group: - description: - - The group name name of the IAM user to look for. Mutually exclusive with C(path). - required: false - type: str - path: - description: - - The path to the IAM user. Mutually exclusive with C(group). - - If specified, then would get all user names whose path starts with user provided value. - required: false - default: '/' - type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. -# Gather facts about "test" user. -- name: Get IAM user info - community.aws.iam_user_info: - name: "test" - -# Gather facts about all users in the "dev" group. -- name: Get IAM user info - community.aws.iam_user_info: - group: "dev" - -# Gather facts about all users with "/division_abc/subdivision_xyz/" path. -- name: Get IAM user info - community.aws.iam_user_info: - path: "/division_abc/subdivision_xyz/" -''' - -RETURN = r''' -iam_users: - description: list of maching iam users - returned: success - type: complex - contains: - arn: - description: the ARN of the user - returned: if user exists - type: str - sample: "arn:aws:iam::156360693172:user/dev/test_user" - create_date: - description: the datetime user was created - returned: if user exists - type: str - sample: "2016-05-24T12:24:59+00:00" - password_last_used: - description: the last datetime the password was used by user - returned: if password was used at least once - type: str - sample: "2016-05-25T13:39:11+00:00" - path: - description: the path to user - returned: if user exists - type: str - sample: "/dev/" - user_id: - description: the unique user id - returned: if user exists - type: str - sample: "AIDUIOOCQKTUGI6QJLGH2" - user_name: - description: the user name - returned: if user exists - type: str - sample: "test_user" - tags: - description: User tags. - type: dict - returned: if user exists - sample: '{"Env": "Prod"}' -''' - -try: - from botocore.exceptions import BotoCoreError, ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - - -@AWSRetry.exponential_backoff() -def list_iam_users_with_backoff(client, operation, **kwargs): - paginator = client.get_paginator(operation) - return paginator.paginate(**kwargs).build_full_result() - - -def describe_iam_user(user): - tags = boto3_tag_list_to_ansible_dict(user.pop('Tags', [])) - user = camel_dict_to_snake_dict(user) - user['tags'] = tags - return user - - -def list_iam_users(connection, module): - - name = module.params.get('name') - group = module.params.get('group') - path = module.params.get('path') - - params = dict() - iam_users = [] - - if not group and not path: - if name: - params['UserName'] = name - try: - iam_users.append(connection.get_user(**params)['User']) - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) - - if group: - params['GroupName'] = group - try: - iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users'] - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) - if name: - iam_users = [user for user in iam_users if user['UserName'] == name] - - if path and not group: - params['PathPrefix'] = path - try: - iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users'] - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) - if name: - iam_users = [user for user in iam_users if user['UserName'] == name] - - module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users]) - - -def main(): - argument_spec = dict( - name=dict(), - group=dict(), - path=dict(default='/') - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['group', 'path'] - ], - supports_check_mode=True - ) - - connection = module.client('iam') - - list_iam_users(connection, module) - - -if __name__ == '__main__': - main() From b1980c3cb9a75fc34869f95d717bbaf275c0bae7 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Sep 2022 11:46:16 +0200 Subject: [PATCH 589/683] Migrate rds_option_group* modules and tests (#1528) Migrate rds_option_group* modules and tests Depends-On: ansible-collections/amazon.aws#1056 Remove rds_option_group* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Mark Chappell --- rds_option_group.py | 665 --------------------------------------- rds_option_group_info.py | 326 ------------------- 2 files changed, 991 deletions(-) delete mode 100644 rds_option_group.py delete mode 100644 rds_option_group_info.py diff --git a/rds_option_group.py b/rds_option_group.py deleted file mode 100644 index 1efc80cf55f..00000000000 --- a/rds_option_group.py +++ /dev/null @@ -1,665 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: rds_option_group -short_description: Manages the creation, modification, deletion of RDS option groups -version_added: 2.1.0 -description: - - Manages the creation, modification, deletion of RDS option groups. -author: - - "Nick Aslanidis (@naslanidis)" - - "Will Thames (@willthames)" - - "Alina Buzachis (@alinabuzachis)" -options: - state: - description: - - Specifies whether the option group should be C(present) or C(absent). - required: true - choices: [ 'present', 'absent' ] - type: str - option_group_name: - description: - - Specifies the name of the option group to be created. - required: true - type: str - engine_name: - description: - - Specifies the name of the engine that this option group should be associated with. - type: str - major_engine_version: - description: - - Specifies the major version of the engine that this option group should be associated with. - type: str - option_group_description: - description: - - The description of the option group. - type: str - apply_immediately: - description: - - Indicates whether the changes should be applied immediately, or during the next maintenance window. - required: false - type: bool - default: false - options: - description: - - Options in this list are added to the option group. - - If already present, the specified configuration is used to update the existing configuration. - - If none are supplied, any existing options are removed. - type: list - elements: dict - suboptions: - option_name: - description: The configuration of options to include in a group. - required: false - type: str - port: - description: The optional port for the option. - required: false - type: int - option_version: - description: The version for the option. - required: false - type: str - option_settings: - description: The option settings to include in an option group. - required: false - type: list - elements: dict - suboptions: - name: - description: The name of the option that has settings that you can set. - required: false - type: str - value: - description: The current value of the option setting. - required: false - type: str - default_value: - description: The default value of the option setting. - required: false - type: str - description: - description: The description of the option setting. - required: false - type: str - apply_type: - description: The DB engine specific parameter type. - required: false - type: str - data_type: - description: The data type of the option setting. - required: false - type: str - allowed_values: - description: The allowed values of the option setting. - required: false - type: str - is_modifiable: - description: A Boolean value that, when C(true), indicates the option setting can be modified from the default. - required: false - type: bool - is_collection: - description: Indicates if the option setting is part of a collection. - required: false - type: bool - db_security_group_memberships: - description: A list of C(DBSecurityGroupMembership) name strings used for this option. - required: false - type: list - elements: str - vpc_security_group_memberships: - description: A list of C(VpcSecurityGroupMembership) name strings used for this option. - required: false - type: list - elements: str - wait: - description: Whether to wait for the cluster to be available or deleted. - type: bool - default: True -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -''' - -EXAMPLES = r''' -# Create an RDS Mysql Option group -- name: Create an RDS Mysql option group - community.aws.rds_option_group: - state: present - option_group_name: test-mysql-option-group - engine_name: mysql - major_engine_version: 5.6 - option_group_description: test mysql option group - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - "sg-d188c123" - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: "20" - - name: CHUNK_SIZE_GROWTH_FACTOR - value: "1.25" - register: new_rds_mysql_option_group - -# Remove currently configured options for an option group by removing options argument -- name: Create an RDS Mysql option group - community.aws.rds_option_group: - state: present - option_group_name: test-mysql-option-group - engine_name: mysql - major_engine_version: 5.6 - option_group_description: test mysql option group - apply_immediately: true - register: rds_mysql_option_group - -- name: Create an RDS Mysql option group using tags - community.aws.rds_option_group: - state: present - option_group_name: test-mysql-option-group - engine_name: mysql - major_engine_version: 5.6 - option_group_description: test mysql option group - apply_immediately: true - tags: - Tag1: tag1 - Tag2: tag2 - register: rds_mysql_option_group - -# Delete an RDS Mysql Option group -- name: Delete an RDS Mysql option group - community.aws.rds_option_group: - state: absent - option_group_name: test-mysql-option-group - register: deleted_rds_mysql_option_group -''' - -RETURN = r''' -allows_vpc_and_non_vpc_instance_memberships: - description: Indicates whether this option group can be applied to both VPC and non-VPC instances. - returned: always - type: bool - sample: false -changed: - description: If the Option Group has changed. - type: bool - returned: always - sample: true -engine_name: - description: Indicates the name of the engine that this option group can be applied to. - returned: always - type: str - sample: "mysql" -major_engine_version: - description: Indicates the major engine version associated with this option group. - returned: always - type: str - sample: "5.6" -option_group_arn: - description: The Amazon Resource Name (ARN) for the option group. - returned: always - type: str - sample: "arn:aws:rds:ap-southeast-2:721066863947:og:ansible-test-option-group" -option_group_description: - description: Provides a description of the option group. - returned: always - type: str - sample: "test mysql option group" -option_group_name: - description: Specifies the name of the option group. - returned: always - type: str - sample: "test-mysql-option-group" -options: - description: Indicates what options are available in the option group. - returned: always - type: list - elements: dict - contains: - db_security_group_memberships: - description: If the option requires access to a port, then this DB security group allows access to the port. - returned: always - type: list - elements: dict - contains: - status: - description: The status of the DB security group. - returned: always - type: str - sample: "available" - db_security_group_name: - description: The name of the DB security group. - returned: always - type: str - sample: "mydbsecuritygroup" - option_description: - description: The description of the option. - returned: always - type: str - sample: "Innodb Memcached for MySQL" - option_name: - description: The name of the option. - returned: always - type: str - sample: "MEMCACHED" - option_settings: - description: The name of the option. - returned: always - type: list - contains: - allowed_values: - description: The allowed values of the option setting. - returned: always - type: str - sample: "1-2048" - apply_type: - description: The DB engine specific parameter type. - returned: always - type: str - sample: "STATIC" - data_type: - description: The data type of the option setting. - returned: always - type: str - sample: "INTEGER" - default_value: - description: The default value of the option setting. - returned: always - type: str - sample: "1024" - description: - description: The description of the option setting. - returned: always - type: str - sample: "Verbose level for memcached." - is_collection: - description: Indicates if the option setting is part of a collection. - returned: always - type: bool - sample: true - is_modifiable: - description: A Boolean value that, when true, indicates the option setting can be modified from the default. - returned: always - type: bool - sample: true - name: - description: The name of the option that has settings that you can set. - returned: always - type: str - sample: "INNODB_API_ENABLE_MDL" - value: - description: The current value of the option setting. - returned: always - type: str - sample: "0" - permanent: - description: Indicate if this option is permanent. - returned: always - type: bool - sample: true - persistent: - description: Indicate if this option is persistent. - returned: always - type: bool - sample: true - port: - description: If required, the port configured for this option to use. - returned: always - type: int - sample: 11211 - vpc_security_group_memberships: - description: If the option requires access to a port, then this VPC security group allows access to the port. - returned: always - type: list - elements: dict - contains: - status: - description: The status of the VPC security group. - returned: always - type: str - sample: "available" - vpc_security_group_id: - description: The name of the VPC security group. - returned: always - type: str - sample: "sg-0cd636a23ae76e9a4" -vpc_id: - description: If present, this option group can only be applied to instances that are in the VPC indicated by this field. - returned: always - type: str - sample: "vpc-bf07e9d6" -tags: - description: The tags associated the Internet Gateway. - type: dict - returned: always - sample: { - "Ansible": "Test" - } -''' - - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=10) -def _describe_option_groups(client, **params): - try: - paginator = client.get_paginator('describe_option_groups') - return paginator.paginate(**params).build_full_result()['OptionGroupsList'][0] - except is_boto3_error_code('OptionGroupNotFoundFault'): - return {} - - -def get_option_group(client, module): - params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - - try: - result = camel_dict_to_snake_dict(_describe_option_groups(client, **params)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't describe option groups.") - - if result: - result['tags'] = get_tags(client, module, result['option_group_arn']) - - return result - - -def create_option_group_options(client, module): - changed = True - params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - options_to_include = module.params.get('options') - params['OptionsToInclude'] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) - - if module.params.get('apply_immediately'): - params['ApplyImmediately'] = module.params.get('apply_immediately') - - if module.check_mode: - return changed - - try: - client.modify_option_group(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update Option Group.") - - return changed - - -def remove_option_group_options(client, module, options_to_remove): - changed = True - params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - params['OptionsToRemove'] = options_to_remove - - if module.params.get('apply_immediately'): - params['ApplyImmediately'] = module.params.get('apply_immediately') - - if module.check_mode: - return changed - - try: - client.modify_option_group(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - return changed - - -def create_option_group(client, module): - changed = True - params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - params['EngineName'] = module.params.get('engine_name') - params['MajorEngineVersion'] = str(module.params.get('major_engine_version')) - params['OptionGroupDescription'] = module.params.get('option_group_description') - - if module.params.get('tags'): - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) - else: - params['Tags'] = list() - - if module.check_mode: - return changed - try: - client.create_option_group(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to create Option Group.') - - return changed - - -def match_option_group_options(client, module): - requires_update = False - new_options = module.params.get('options') - - # Get existing option groups and compare to our new options spec - current_option = get_option_group(client, module) - - if current_option['options'] == [] and new_options: - requires_update = True - else: - for option in current_option['options']: - for setting_name in new_options: - if setting_name['option_name'] == option['option_name']: - - # Security groups need to be handled separately due to different keys on request and what is - # returned by the API - if any( - name in option.keys() - ['option_settings', 'vpc_security_group_memberships'] and - setting_name[name] != option[name] - for name in setting_name - ): - requires_update = True - - if any( - name in option and name == 'vpc_security_group_memberships' - for name in setting_name - ): - current_sg = set(sg['vpc_security_group_id'] for sg in option['vpc_security_group_memberships']) - new_sg = set(setting_name['vpc_security_group_memberships']) - if current_sg != new_sg: - requires_update = True - - if any( - new_option_setting['name'] == current_option_setting['name'] and - new_option_setting['value'] != current_option_setting['value'] - for new_option_setting in setting_name['option_settings'] - for current_option_setting in option['option_settings'] - ): - requires_update = True - else: - requires_update = True - - return requires_update - - -def compare_option_group(client, module): - to_be_added = None - to_be_removed = None - current_option = get_option_group(client, module) - new_options = module.params.get('options') - new_settings = set([item['option_name'] for item in new_options]) - old_settings = set([item['option_name'] for item in current_option['options']]) - - if new_settings != old_settings: - to_be_added = list(new_settings - old_settings) - to_be_removed = list(old_settings - new_settings) - - return to_be_added, to_be_removed - - -def setup_option_group(client, module): - results = [] - changed = False - to_be_added = None - to_be_removed = None - - # Check if there is an existing options group - existing_option_group = get_option_group(client, module) - - if existing_option_group: - results = existing_option_group - - # Check tagging - changed |= update_tags(client, module, existing_option_group) - - if module.params.get('options'): - # Check if existing options require updating - update_required = match_option_group_options(client, module) - - # Check if there are options to be added or removed - if update_required: - to_be_added, to_be_removed = compare_option_group(client, module) - - if to_be_added or update_required: - changed |= create_option_group_options(client, module) - - if to_be_removed: - changed |= remove_option_group_options(client, module, to_be_removed) - - # If changed, get updated version of option group - if changed: - results = get_option_group(client, module) - else: - # No options were supplied. If options exist, remove them - current_option_group = get_option_group(client, module) - - if current_option_group['options'] != []: - # Here we would call our remove options function - options_to_remove = [] - - for option in current_option_group['options']: - options_to_remove.append(option['option_name']) - - changed |= remove_option_group_options(client, module, options_to_remove) - - # If changed, get updated version of option group - if changed: - results = get_option_group(client, module) - else: - changed = create_option_group(client, module) - - if module.params.get('options'): - changed = create_option_group_options(client, module) - - results = get_option_group(client, module) - - return changed, results - - -def remove_option_group(client, module): - changed = False - params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - - # Check if there is an existing options group - existing_option_group = get_option_group(client, module) - - if existing_option_group: - - if module.check_mode: - return True, {} - - changed = True - try: - client.delete_option_group(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete option group.") - - return changed, {} - - -def update_tags(client, module, option_group): - if module.params.get('tags') is None: - return False - - try: - existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group['option_group_arn'])['TagList'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't obtain option group tags.") - - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), - module.params['tags'], module.params['purge_tags']) - changed = bool(to_update or to_delete) - - if to_update: - try: - if module.check_mode: - return changed - client.add_tags_to_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], - Tags=ansible_dict_to_boto3_tag_list(to_update)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add tags to option group.") - if to_delete: - try: - if module.check_mode: - return changed - client.remove_tags_from_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], - TagKeys=to_delete) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove tags from option group.") - - return changed - - -def main(): - argument_spec = dict( - option_group_name=dict(required=True, type='str'), - engine_name=dict(type='str'), - major_engine_version=dict(type='str'), - option_group_description=dict(type='str'), - options=dict(required=False, type='list', elements='dict'), - apply_immediately=dict(type='bool', default=False), - state=dict(required=True, choices=['present', 'absent']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[['state', 'present', ['engine_name', 'major_engine_version', 'option_group_description']]], - ) - - try: - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') - - state = module.params.get('state') - - if state == 'present': - changed, results = setup_option_group(client, module) - else: - changed, results = remove_option_group(client, module) - - module.exit_json(changed=changed, **results) - - -if __name__ == '__main__': - main() diff --git a/rds_option_group_info.py b/rds_option_group_info.py deleted file mode 100644 index 37e848032c8..00000000000 --- a/rds_option_group_info.py +++ /dev/null @@ -1,326 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rds_option_group_info -short_description: rds_option_group_info module -version_added: 2.1.0 -description: - - Gather information about RDS option groups. -author: "Alina Buzachis (@alinabuzachis)" -options: - option_group_name: - description: - - The name of the option group to describe. - - Can't be supplied together with I(engine_name) or I(major_engine_version). - default: '' - required: false - type: str - marker: - description: - - If this parameter is specified, the response includes only records beyond the marker, up to the value specified by I(max_records). - - Allowed values are between C(20) and C(100). - default: '' - required: false - type: str - max_records: - description: - - The maximum number of records to include in the response. - type: int - default: 100 - required: false - engine_name: - description: Filters the list of option groups to only include groups associated with a specific database engine. - type: str - default: '' - required: false - major_engine_version: - description: - - Filters the list of option groups to only include groups associated with a specific database engine version. - - If specified, then I(engine_name) must also be specified. - type: str - default: '' - required: false -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -''' - -EXAMPLES = r''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: List an option group - community.aws.rds_option_group_info: - option_group_name: test-mysql-option-group - register: option_group - -- name: List all the option groups - community.aws.rds_option_group_info: - region: ap-southeast-2 - profile: production - register: option_group -''' - -RETURN = r''' -changed: - description: True if listing the RDS option group succeeds. - type: bool - returned: always - sample: false -option_groups_list: - description: The available RDS option groups. - returned: always - type: complex - contains: - allows_vpc_and_non_vpc_instance_memberships: - description: Indicates whether this option group can be applied to both VPC and non-VPC instances. - returned: always - type: bool - sample: false - engine_name: - description: Indicates the name of the engine that this option group can be applied to. - returned: always - type: str - sample: "mysql" - major_engine_version: - description: Indicates the major engine version associated with this option group. - returned: always - type: str - sample: "5.6" - option_group_arn: - description: The Amazon Resource Name (ARN) for the option group. - returned: always - type: str - sample: "arn:aws:rds:ap-southeast-2:721066863947:og:ansible-test-option-group" - option_group_description: - description: Provides a description of the option group. - returned: always - type: str - sample: "test mysql option group" - option_group_name: - description: Specifies the name of the option group. - returned: always - type: str - sample: "test-mysql-option-group" - options: - description: Indicates what options are available in the option group. - returned: always - type: complex - contains: - db_security_group_memberships: - description: If the option requires access to a port, then this DB security group allows access to the port. - returned: always - type: complex - sample: list - elements: dict - contains: - status: - description: The status of the DB security group. - returned: always - type: str - sample: "available" - db_security_group_name: - description: The name of the DB security group. - returned: always - type: str - sample: "mydbsecuritygroup" - option_description: - description: The description of the option. - returned: always - type: str - sample: "Innodb Memcached for MySQL" - option_name: - description: The name of the option. - returned: always - type: str - sample: "MEMCACHED" - option_settings: - description: The name of the option. - returned: always - type: complex - contains: - allowed_values: - description: The allowed values of the option setting. - returned: always - type: str - sample: "1-2048" - apply_type: - description: The DB engine specific parameter type. - returned: always - type: str - sample: "STATIC" - data_type: - description: The data type of the option setting. - returned: always - type: str - sample: "INTEGER" - default_value: - description: The default value of the option setting. - returned: always - type: str - sample: "1024" - description: - description: The description of the option setting. - returned: always - type: str - sample: "Verbose level for memcached." - is_collection: - description: Indicates if the option setting is part of a collection. - returned: always - type: bool - sample: true - is_modifiable: - description: A Boolean value that, when true, indicates the option setting can be modified from the default. - returned: always - type: bool - sample: true - name: - description: The name of the option that has settings that you can set. - returned: always - type: str - sample: "INNODB_API_ENABLE_MDL" - value: - description: The current value of the option setting. - returned: always - type: str - sample: "0" - permanent: - description: Indicate if this option is permanent. - returned: always - type: bool - sample: true - persistent: - description: Indicate if this option is persistent. - returned: always - type: bool - sample: true - port: - description: If required, the port configured for this option to use. - returned: always - type: int - sample: 11211 - vpc_security_group_memberships: - description: If the option requires access to a port, then this VPC security group allows access to the port. - returned: always - type: list - elements: dict - contains: - status: - description: The status of the VPC security group. - returned: always - type: str - sample: "available" - vpc_security_group_id: - description: The name of the VPC security group. - returned: always - type: str - sample: "sg-0cd636a23ae76e9a4" - vpc_id: - description: If present, this option group can only be applied to instances that are in the VPC indicated by this field. - returned: always - type: str - sample: "vpc-bf07e9d6" - tags: - description: The tags associated the Internet Gateway. - type: dict - returned: always - sample: { - "Ansible": "Test" - } - -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags - - -@AWSRetry.jittered_backoff(retries=10) -def _describe_option_groups(client, **params): - try: - paginator = client.get_paginator('describe_option_groups') - return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('OptionGroupNotFoundFault'): - return {} - - -def list_option_groups(client, module): - option_groups = list() - params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - - if module.params.get('marker'): - params['Marker'] = module.params.get('marker') - if int(params['Marker']) < 20 or int(params['Marker']) > 100: - module.fail_json(msg="marker must be between 20 and 100 minutes") - - if module.params.get('max_records'): - params['MaxRecords'] = module.params.get('max_records') - if params['MaxRecords'] > 100: - module.fail_json(msg="The maximum number of records to include in the response is 100.") - - params['EngineName'] = module.params.get('engine_name') - params['MajorEngineVersion'] = module.params.get('major_engine_version') - - try: - result = _describe_option_groups(client, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't describe option groups.") - - for option_group in result['OptionGroupsList']: - # Turn the boto3 result into ansible_friendly_snaked_names - converted_option_group = camel_dict_to_snake_dict(option_group) - converted_option_group['tags'] = get_tags(client, module, converted_option_group['option_group_arn']) - option_groups.append(converted_option_group) - - return option_groups - - -def main(): - argument_spec = dict( - option_group_name=dict(default='', type='str'), - marker=dict(type='str'), - max_records=dict(type='int', default=100), - engine_name=dict(type='str', default=''), - major_engine_version=dict(type='str', default=''), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ['option_group_name', 'engine_name'], - ['option_group_name', 'major_engine_version'], - ], - required_together=[ - ['engine_name', 'major_engine_version'], - ], - ) - - # Validate Requirements - try: - connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - results = list_option_groups(connection, module) - - module.exit_json(result=results) - - -if __name__ == '__main__': - main() From d599943e4c493bc3829add1165ce7a1512373328 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 23 Sep 2022 12:40:28 +0200 Subject: [PATCH 590/683] Migrate route53* modules and tests (#1485) Migrate route53* modules and tests Depends-On: ansible-collections/amazon.aws#1029 Remove route53* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- route53.py | 793 -------------------------------------- route53_health_check.py | 644 ------------------------------- route53_info.py | 827 ---------------------------------------- route53_zone.py | 479 ----------------------- 4 files changed, 2743 deletions(-) delete mode 100644 route53.py delete mode 100644 route53_health_check.py delete mode 100644 route53_info.py delete mode 100644 route53_zone.py diff --git a/route53.py b/route53.py deleted file mode 100644 index 620d1833b98..00000000000 --- a/route53.py +++ /dev/null @@ -1,793 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: route53 -version_added: 1.0.0 -short_description: add or delete entries in Amazons Route 53 DNS service -description: - - Creates and deletes DNS records in Amazons Route 53 service. -options: - state: - description: - - Specifies the state of the resource record. - required: true - aliases: [ 'command' ] - choices: [ 'present', 'absent', 'get', 'create', 'delete' ] - type: str - zone: - description: - - The DNS zone to modify. - - This is a required parameter, if parameter I(hosted_zone_id) is not supplied. - type: str - hosted_zone_id: - description: - - The Hosted Zone ID of the DNS zone to modify. - - This is a required parameter, if parameter I(zone) is not supplied. - type: str - record: - description: - - The full DNS record to create or delete. - required: true - type: str - ttl: - description: - - The TTL, in second, to give the new record. - - Mutually exclusive with I(alias). - default: 3600 - type: int - type: - description: - - The type of DNS record to create. - required: true - choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA' ] - type: str - alias: - description: - - Indicates if this is an alias record. - - Mutually exclusive with I(ttl). - - Defaults to C(false). - type: bool - alias_hosted_zone_id: - description: - - The hosted zone identifier. - type: str - alias_evaluate_target_health: - description: - - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers. - type: bool - default: false - value: - description: - - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. - type: list - elements: str - overwrite: - description: - - Whether an existing record should be overwritten on create if values do not match. - type: bool - retry_interval: - description: - - In the case that Route 53 is still servicing a prior request, this module will wait and try again after this many seconds. - If you have many domain names, the default of C(500) seconds may be too long. - default: 500 - type: int - private_zone: - description: - - If set to C(true), the private zone matching the requested name within the domain will be used if there are both public and private zones. - - The default is to use the public zone. - type: bool - default: false - identifier: - description: - - Have to be specified for Weighted, latency-based and failover resource record sets only. - An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. - type: str - weight: - description: - - Weighted resource record sets only. Among resource record sets that - have the same combination of DNS name and type, a value that - determines what portion of traffic for the current resource record set - is routed to the associated location. - - Mutually exclusive with I(region) and I(failover). - type: int - region: - description: - - Latency-based resource record sets only Among resource record sets - that have the same combination of DNS name and type, a value that - determines which region this should be associated with for the - latency-based routing - - Mutually exclusive with I(weight) and I(failover). - type: str - geo_location: - description: - - Allows to control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. - - Two geolocation resource record sets that specify same geographic location cannot be created. - - Non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation - resource record sets cannot be created. - suboptions: - continent_code: - description: - - The two-letter code for the continent. - - Specifying I(continent_code) with either I(country_code) or I(subdivision_code) returns an InvalidInput error. - type: str - country_code: - description: - - The two-letter code for a country. - - Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2 . - type: str - subdivision_code: - description: - - The two-letter code for a state of the United States. - - To specify I(subdivision_code), I(country_code) must be set to C(US). - type: str - type: dict - version_added: 3.3.0 - health_check: - description: - - Health check to associate with this record - type: str - failover: - description: - - Failover resource record sets only. Whether this is the primary or - secondary resource record set. Allowed values are PRIMARY and SECONDARY - - Mutually exclusive with I(weight) and I(region). - type: str - choices: ['SECONDARY', 'PRIMARY'] - vpc_id: - description: - - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." - - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. - type: str - wait: - description: - - Wait until the changes have been replicated to all Amazon Route 53 DNS servers. - type: bool - default: false - wait_timeout: - description: - - How long to wait for the changes to be replicated, in seconds. - default: 300 - type: int -author: -- Bruce Pennypacker (@bpennypacker) -- Mike Buzzetti (@jimbydamonk) -extends_documentation_fragment: -- amazon.aws.aws -''' - -RETURN = r''' -nameservers: - description: Nameservers associated with the zone. - returned: when state is 'get' - type: list - sample: - - ns-1036.awsdns-00.org. - - ns-516.awsdns-00.net. - - ns-1504.awsdns-00.co.uk. - - ns-1.awsdns-00.com. -set: - description: Info specific to the resource record. - returned: when state is 'get' - type: complex - contains: - alias: - description: Whether this is an alias. - returned: always - type: bool - sample: false - failover: - description: Whether this is the primary or secondary resource record set. - returned: always - type: str - sample: PRIMARY - geo_location: - description: geograpic location based on which Route53 resonds to DNS queries. - returned: when configured - type: dict - sample: { continent_code: "NA", country_code: "US", subdivision_code: "CA" } - version_added: 3.3.0 - health_check: - description: health_check associated with this record. - returned: always - type: str - identifier: - description: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. - returned: always - type: str - record: - description: Domain name for the record set. - returned: always - type: str - sample: new.foo.com. - region: - description: Which region this should be associated with for latency-based routing. - returned: always - type: str - sample: us-west-2 - ttl: - description: Resource record cache TTL. - returned: always - type: str - sample: '3600' - type: - description: Resource record set type. - returned: always - type: str - sample: A - value: - description: Record value. - returned: always - type: str - sample: 52.43.18.27 - values: - description: Record Values. - returned: always - type: list - sample: - - 52.43.18.27 - weight: - description: Weight of the record. - returned: always - type: str - sample: '3' - zone: - description: Zone this record set belongs to. - returned: always - type: str - sample: foo.bar.com. -''' - -EXAMPLES = r''' -- name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated - community.aws.route53: - state: present - zone: foo.com - record: new.foo.com - type: A - ttl: 7200 - value: 1.1.1.1,2.2.2.2,3.3.3.3 - wait: true -- name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated - community.aws.route53: - state: present - zone: foo.com - record: new.foo.com - type: A - ttl: 7200 - value: - - 1.1.1.1 - - 2.2.2.2 - - 3.3.3.3 - wait: true -- name: Retrieve the details for new.foo.com - community.aws.route53: - state: get - zone: foo.com - record: new.foo.com - type: A - register: rec -- name: Delete new.foo.com A record using the results from the get command - community.aws.route53: - state: absent - zone: foo.com - record: "{{ rec.set.record }}" - ttl: "{{ rec.set.ttl }}" - type: "{{ rec.set.type }}" - value: "{{ rec.set.value }}" -# Add an AAAA record. Note that because there are colons in the value -# that the IPv6 address must be quoted. Also shows using the old form command=create. -- name: Add an AAAA record - community.aws.route53: - command: create - zone: foo.com - record: localhost.foo.com - type: AAAA - ttl: 7200 - value: "::1" -# For more information on SRV records see: -# https://en.wikipedia.org/wiki/SRV_record -- name: Add a SRV record with multiple fields for a service on port 22222 - community.aws.route53: - state: present - zone: foo.com - record: "_example-service._tcp.foo.com" - type: SRV - value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" -# Note that TXT and SPF records must be surrounded -# by quotes when sent to Route 53: -- name: Add a TXT record. - community.aws.route53: - state: present - zone: foo.com - record: localhost.foo.com - type: TXT - ttl: 7200 - value: '"bar"' -- name: Add an alias record that points to an Amazon ELB - community.aws.route53: - state: present - zone: foo.com - record: elb.foo.com - type: A - value: "{{ elb_dns_name }}" - alias: True - alias_hosted_zone_id: "{{ elb_zone_id }}" -- name: Retrieve the details for elb.foo.com - community.aws.route53: - state: get - zone: foo.com - record: elb.foo.com - type: A - register: rec -- name: Delete an alias record using the results from the get command - community.aws.route53: - state: absent - zone: foo.com - record: "{{ rec.set.record }}" - ttl: "{{ rec.set.ttl }}" - type: "{{ rec.set.type }}" - value: "{{ rec.set.value }}" - alias: True - alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" -- name: Add an alias record that points to an Amazon ELB and evaluates it health - community.aws.route53: - state: present - zone: foo.com - record: elb.foo.com - type: A - value: "{{ elb_dns_name }}" - alias: True - alias_hosted_zone_id: "{{ elb_zone_id }}" - alias_evaluate_target_health: True -- name: Add an AAAA record with Hosted Zone ID - community.aws.route53: - state: present - zone: foo.com - hosted_zone_id: Z2AABBCCDDEEFF - record: localhost.foo.com - type: AAAA - ttl: 7200 - value: "::1" -- name: Use a routing policy to distribute traffic - community.aws.route53: - state: present - zone: foo.com - record: www.foo.com - type: CNAME - value: host1.foo.com - ttl: 30 - # Routing policy - identifier: "host1@www" - weight: 100 - health_check: "d994b780-3150-49fd-9205-356abdd42e75" -- name: Add a CAA record (RFC 6844) - community.aws.route53: - state: present - zone: example.com - record: example.com - type: CAA - value: - - 0 issue "ca.example.net" - - 0 issuewild ";" - - 0 iodef "mailto:security@example.com" -- name: Create a record with geo_location - country_code - community.aws.route53: - state: present - zone: '{{ zone_one }}' - record: 'geo-test.{{ zone_one }}' - identifier: "geohost@www" - type: A - value: 1.1.1.1 - ttl: 30 - geo_location: - country_code: US -- name: Create a record with geo_location - subdivision code - community.aws.route53: - state: present - zone: '{{ zone_one }}' - record: 'geo-test.{{ zone_one }}' - identifier: "geohost@www" - type: A - value: 1.1.1.1 - ttl: 30 - geo_location: - country_code: US - subdivision_code: TX -''' - -from operator import itemgetter - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing -WAIT_RETRY = 5 # how many seconds to wait between propagation status polls - - -@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) -def _list_record_sets(route53, **kwargs): - paginator = route53.get_paginator('list_resource_record_sets') - return paginator.paginate(**kwargs).build_full_result()['ResourceRecordSets'] - - -@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) -def _list_hosted_zones(route53, **kwargs): - paginator = route53.get_paginator('list_hosted_zones') - return paginator.paginate(**kwargs).build_full_result()['HostedZones'] - - -def get_record(route53, zone_id, record_name, record_type, record_identifier): - record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id) - - for record_set in record_sets_results: - record_set['Name'] = record_set['Name'].encode().decode('unicode_escape') - # If the record name and type is not equal, move to the next record - if (record_name.lower(), record_type) != (record_set['Name'].lower(), record_set['Type']): - continue - - if record_identifier and record_identifier != record_set.get("SetIdentifier"): - continue - - return record_set - - return None - - -def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): - """Finds a zone by name or zone_id""" - hosted_zones_results = _list_hosted_zones(route53) - - for zone in hosted_zones_results: - # only save this zone id if the private status of the zone matches - # the private_zone_in boolean specified in the params - private_zone = module.boolean(zone['Config'].get('PrivateZone', False)) - zone_id = zone['Id'].replace("/hostedzone/", "") - - if private_zone == want_private and zone['Name'] == zone_name: - if want_vpc_id: - # NOTE: These details aren't available in other boto3 methods, hence the necessary - # extra API call - hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) - if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: - return zone_id - else: - return zone_id - return None - - -def format_record(record_in, zone_in, zone_id): - """ - Formats a record in a way that's consistent with the pre-boto3 migration values - as well as returning the 'normal' boto3 style values - """ - if not record_in: - return None - - record = dict(record_in) - record['zone'] = zone_in - record['hosted_zone_id'] = zone_id - - record['type'] = record_in.get('Type', None) - record['record'] = record_in.get('Name').encode().decode('unicode_escape') - record['ttl'] = record_in.get('TTL', None) - record['identifier'] = record_in.get('SetIdentifier', None) - record['weight'] = record_in.get('Weight', None) - record['region'] = record_in.get('Region', None) - record['failover'] = record_in.get('Failover', None) - record['health_check'] = record_in.get('HealthCheckId', None) - - if record['ttl']: - record['ttl'] = str(record['ttl']) - if record['weight']: - record['weight'] = str(record['weight']) - if record['region']: - record['region'] = str(record['region']) - - if record_in.get('AliasTarget'): - record['alias'] = True - record['value'] = record_in['AliasTarget'].get('DNSName') - record['values'] = [record_in['AliasTarget'].get('DNSName')] - record['alias_hosted_zone_id'] = record_in['AliasTarget'].get('HostedZoneId') - record['alias_evaluate_target_health'] = record_in['AliasTarget'].get('EvaluateTargetHealth') - else: - record['alias'] = False - records = [r.get('Value') for r in record_in.get('ResourceRecords')] - record['value'] = ','.join(sorted(records)) - record['values'] = sorted(records) - - return record - - -def get_hosted_zone_nameservers(route53, zone_id): - hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name'] - resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id) - - nameservers_records = list( - filter(lambda record: record['Name'] == hosted_zone_name and record['Type'] == 'NS', resource_records_sets) - )[0]['ResourceRecords'] - - return [ns_record['Value'] for ns_record in nameservers_records] - - -def main(): - argument_spec = dict( - state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), - zone=dict(type='str'), - hosted_zone_id=dict(type='str'), - record=dict(type='str', required=True), - ttl=dict(type='int', default=3600), - type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']), - alias=dict(type='bool'), - alias_hosted_zone_id=dict(type='str'), - alias_evaluate_target_health=dict(type='bool', default=False), - value=dict(type='list', elements='str'), - overwrite=dict(type='bool'), - retry_interval=dict(type='int', default=500), - private_zone=dict(type='bool', default=False), - identifier=dict(type='str'), - weight=dict(type='int'), - region=dict(type='str'), - geo_location=dict(type='dict', - options=dict( - continent_code=dict(type="str"), - country_code=dict(type="str"), - subdivision_code=dict(type="str")), - required=False), - health_check=dict(type='str'), - failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']), - vpc_id=dict(type='str'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[['zone', 'hosted_zone_id']], - # If alias is True then you must specify alias_hosted_zone as well - required_together=[['alias', 'alias_hosted_zone_id']], - # state=present, absent, create, delete THEN value is required - required_if=( - ('state', 'present', ['value']), - ('state', 'create', ['value']), - ), - # failover, region and weight are mutually exclusive - mutually_exclusive=[ - ('failover', 'region', 'weight'), - ('alias', 'ttl'), - ], - # failover, region, weight and geo_location require identifier - required_by=dict( - failover=('identifier',), - region=('identifier',), - weight=('identifier',), - geo_location=('identifier'), - ), - ) - - if module.params['state'] in ('present', 'create'): - command_in = 'create' - elif module.params['state'] in ('absent', 'delete'): - command_in = 'delete' - elif module.params['state'] == 'get': - command_in = 'get' - - zone_in = (module.params.get('zone') or '').lower() - hosted_zone_id_in = module.params.get('hosted_zone_id') - ttl_in = module.params.get('ttl') - record_in = module.params.get('record').lower() - type_in = module.params.get('type') - value_in = module.params.get('value') or [] - alias_in = module.params.get('alias') - alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') - alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') - retry_interval_in = module.params.get('retry_interval') - - if module.params['vpc_id'] is not None: - private_zone_in = True - else: - private_zone_in = module.params.get('private_zone') - - identifier_in = module.params.get('identifier') - weight_in = module.params.get('weight') - region_in = module.params.get('region') - health_check_in = module.params.get('health_check') - failover_in = module.params.get('failover') - vpc_id_in = module.params.get('vpc_id') - wait_in = module.params.get('wait') - wait_timeout_in = module.params.get('wait_timeout') - geo_location = module.params.get('geo_location') - - if zone_in[-1:] != '.': - zone_in += "." - - if record_in[-1:] != '.': - record_in += "." - - if command_in == 'create' or command_in == 'delete': - if alias_in and len(value_in) != 1: - module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: - module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") - - retry_decorator = AWSRetry.jittered_backoff( - retries=MAX_AWS_RETRIES, - delay=retry_interval_in, - catch_extra_error_codes=['PriorRequestNotComplete'], - max_delay=max(60, retry_interval_in), - ) - - # connect to the route53 endpoint - try: - route53 = module.client('route53', retry_decorator=retry_decorator) - except botocore.exceptions.HTTPClientError as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - # Find the named zone ID - zone_id = hosted_zone_id_in or get_zone_id_by_name(route53, module, zone_in, private_zone_in, vpc_id_in) - - # Verify that the requested zone is already defined in Route53 - if zone_id is None: - errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) - module.fail_json(msg=errmsg) - - aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in) - - resource_record_set = scrub_none_parameters({ - 'Name': record_in, - 'Type': type_in, - 'Weight': weight_in, - 'Region': region_in, - 'Failover': failover_in, - 'TTL': ttl_in, - 'ResourceRecords': [dict(Value=value) for value in value_in], - 'HealthCheckId': health_check_in, - 'SetIdentifier': identifier_in, - }) - - if geo_location: - continent_code = geo_location.get('continent_code') - country_code = geo_location.get('country_code') - subdivision_code = geo_location.get('subdivision_code') - - if continent_code and (country_code or subdivision_code): - module.fail_json(changed=False, msg='While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.') - - if not any([continent_code, country_code, subdivision_code]): - module.fail_json(changed=False, msg='To use geo_location please specify either continent_code, country_code, or subdivision_code.') - - if geo_location.get('subdivision_code') and geo_location.get('country_code').lower() != 'us': - module.fail_json(changed=False, msg='To use subdivision_code, you must specify country_code as US.') - - # Build geo_location suboptions specification - resource_record_set['GeoLocation'] = {} - if continent_code: - resource_record_set['GeoLocation']['ContinentCode'] = continent_code - if country_code: - resource_record_set['GeoLocation']['CountryCode'] = country_code - if subdivision_code: - resource_record_set['GeoLocation']['SubdivisionCode'] = subdivision_code - - if command_in == 'delete' and aws_record is not None: - resource_record_set['TTL'] = aws_record.get('TTL') - if not resource_record_set['ResourceRecords']: - resource_record_set['ResourceRecords'] = aws_record.get('ResourceRecords') - - if alias_in: - resource_record_set['AliasTarget'] = dict( - HostedZoneId=alias_hosted_zone_id_in, - DNSName=value_in[0], - EvaluateTargetHealth=alias_evaluate_target_health_in - ) - if 'ResourceRecords' in resource_record_set: - del resource_record_set['ResourceRecords'] - if 'TTL' in resource_record_set: - del resource_record_set['TTL'] - - # On CAA records order doesn't matter - if type_in == 'CAA': - resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value')) - if aws_record: - aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value')) - - if command_in == 'create' and aws_record == resource_record_set: - rr_sets = [camel_dict_to_snake_dict(resource_record_set)] - module.exit_json(changed=False, resource_records_sets=rr_sets) - - if command_in == 'get': - if type_in == 'NS': - ns = aws_record.get('values', []) - else: - # Retrieve name servers associated to the zone. - ns = get_hosted_zone_nameservers(route53, zone_id) - - formatted_aws = format_record(aws_record, zone_in, zone_id) - - if formatted_aws is None: - # record does not exist - module.exit_json(changed=False, set=[], nameservers=ns, resource_record_sets=[]) - - rr_sets = [camel_dict_to_snake_dict(aws_record)] - module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets) - - if command_in == 'delete' and not aws_record: - module.exit_json(changed=False) - - if command_in == 'create' or command_in == 'delete': - if command_in == 'create' and aws_record: - if not module.params['overwrite']: - module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") - command = 'UPSERT' - else: - command = command_in.upper() - - if not module.check_mode: - try: - change_resource_record_sets = route53.change_resource_record_sets( - aws_retry=True, - HostedZoneId=zone_id, - ChangeBatch=dict( - Changes=[ - dict( - Action=command, - ResourceRecordSet=resource_record_set - ) - ] - ) - ) - - if wait_in: - waiter = get_waiter(route53, 'resource_record_sets_changed') - waiter.wait( - Id=change_resource_record_sets['ChangeInfo']['Id'], - WaiterConfig=dict( - Delay=WAIT_RETRY, - MaxAttempts=wait_timeout_in // WAIT_RETRY, - ) - ) - except is_boto3_error_message('but it already exists'): - module.exit_json(changed=False) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout waiting for resource records changes to be applied') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to update records') - except Exception as e: - module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) - - rr_sets = [camel_dict_to_snake_dict(resource_record_set)] - formatted_aws = format_record(aws_record, zone_in, zone_id) - formatted_record = format_record(resource_record_set, zone_in, zone_id) - - module.exit_json( - changed=True, - diff=dict( - before=formatted_aws, - after=formatted_record if command_in != 'delete' else {}, - resource_record_sets=rr_sets, - ), - ) - - -if __name__ == '__main__': - main() diff --git a/route53_health_check.py b/route53_health_check.py deleted file mode 100644 index b07672e9ddb..00000000000 --- a/route53_health_check.py +++ /dev/null @@ -1,644 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: route53_health_check -version_added: 1.0.0 -short_description: Manage health-checks in Amazons Route53 DNS service -description: - - Creates and deletes DNS Health checks in Amazons Route53 service. - - Only the port, resource_path, string_match and request_interval are - considered when updating existing health-checks. -options: - state: - description: - - Specifies the action to take. - choices: [ 'present', 'absent' ] - type: str - default: 'present' - disabled: - description: - - Stops Route 53 from performing health checks. - - See the AWS documentation for more details on the exact implications. - U(https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-values.html) - - Defaults to C(true) when creating a new health check. - type: bool - version_added: 2.1.0 - ip_address: - description: - - IP address of the end-point to check. Either this or I(fqdn) has to be provided. - - IP addresses must be publicly routable. - type: str - port: - description: - - The port on the endpoint on which you want Amazon Route 53 to perform - health checks. Required for TCP checks. - type: int - type: - description: - - The type of health check that you want to create, which indicates how - Amazon Route 53 determines whether an endpoint is healthy. - - Once health_check is created, type can not be changed. - choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] - type: str - resource_path: - description: - - The path that you want Amazon Route 53 to request when performing - health checks. The path can be any value for which your endpoint will - return an HTTP status code of 2xx or 3xx when the endpoint is healthy, - for example the file /docs/route53-health-check.html. - - Mutually exclusive with I(type='TCP'). - - The path must begin with a / - - Maximum 255 characters. - type: str - fqdn: - description: - - Domain name of the endpoint to check. Either this or I(ip_address) has - to be provided. When both are given the I(fqdn) is used in the C(Host:) - header of the HTTP request. - type: str - string_match: - description: - - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string - that you want Amazon Route 53 to search for in the response body from - the specified resource. If the string appears in the first 5120 bytes - of the response body, Amazon Route 53 considers the resource healthy. - type: str - request_interval: - description: - - The number of seconds between the time that Amazon Route 53 gets a - response from your endpoint and the time that it sends the next - health-check request. - default: 30 - choices: [ 10, 30 ] - type: int - failure_threshold: - description: - - The number of consecutive health checks that an endpoint must pass or - fail for Amazon Route 53 to change the current status of the endpoint - from unhealthy to healthy or vice versa. - - Will default to C(3) if not specified on creation. - choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] - type: int - health_check_name: - description: - - Name of the Health Check. - - Used together with I(use_unique_names) to set/make use of I(health_check_name) as a unique identifier. - type: str - required: False - aliases: ['name'] - version_added: 4.1.0 - use_unique_names: - description: - - Used together with I(health_check_name) to set/make use of I(health_check_name) as a unique identifier. - type: bool - required: False - version_added: 4.1.0 - health_check_id: - description: - - ID of the health check to be update or deleted. - - If provided, a health check can be updated or deleted based on the ID as unique identifier. - type: str - required: False - aliases: ['id'] - version_added: 4.1.0 -author: - - "zimbatm (@zimbatm)" -notes: - - Support for I(tags) and I(purge_tags) was added in release 2.1.0. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -''' - -EXAMPLES = ''' -- name: Create a health-check for host1.example.com and use it in record - community.aws.route53_health_check: - state: present - fqdn: host1.example.com - type: HTTP_STR_MATCH - resource_path: / - string_match: "Hello" - request_interval: 10 - failure_threshold: 2 - register: my_health_check - -- community.aws.route53: - action: create - zone: "example.com" - type: CNAME - record: "www.example.com" - value: host1.example.com - ttl: 30 - # Routing policy - identifier: "host1@www" - weight: 100 - health_check: "{{ my_health_check.health_check.id }}" - -- name: create a simple health check with health_check_name as unique identifier - community.aws.route53_health_check: - state: present - health_check_name: ansible - fqdn: ansible.com - port: 443 - type: HTTPS - use_unique_names: true - -- name: Delete health-check - community.aws.route53_health_check: - state: absent - fqdn: host1.example.com - -- name: Update Health check by ID - update ip_address - community.aws.route53_health_check: - id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx - ip_address: 1.2.3.4 - -- name: Update Health check by ID - update port - community.aws.route53_health_check: - id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx - ip_address: 8080 - -- name: Delete Health check by ID - community.aws.route53_health_check: - state: absent - id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx - -''' - -RETURN = r''' -health_check: - description: Information about the health check. - returned: success - type: dict - contains: - action: - description: The action performed by the module. - type: str - returned: When a change is or would be made. - sample: 'updated' - id: - description: The Unique ID assigned by AWS to the health check. - type: str - returned: When the health check exists. - sample: 50ec8a13-9623-4c66-9834-dd8c5aedc9ba - health_check_version: - description: The version number of the health check. - type: int - returned: When the health check exists. - sample: 14 - health_check_config: - description: - - Detailed information about the health check. - - May contain additional values from Route 53 health check - features not yet supported by this module. - type: dict - returned: When the health check exists. - contains: - type: - description: The type of the health check. - type: str - returned: When the health check exists. - sample: 'HTTPS_STR_MATCH' - failure_threshold: - description: - - The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to - change the current status of the endpoint from unhealthy to healthy or vice versa. - type: int - returned: When the health check exists. - sample: 3 - fully_qualified_domain_name: - description: The FQDN configured for the health check to test. - type: str - returned: When the health check exists and an FQDN is configured. - sample: 'updated' - ip_address: - description: The IPv4 or IPv6 IP address of the endpoint to be queried. - type: str - returned: When the health check exists and a specific IP address is configured. - sample: '' - port: - description: The port on the endpoint that the health check will query. - type: str - returned: When the health check exists. - sample: 'updated' - request_interval: - description: The number of seconds between health check queries. - type: int - returned: When the health check exists. - sample: 30 - resource_path: - description: The URI path to query when performing an HTTP/HTTPS based health check. - type: str - returned: When the health check exists and a resource path has been configured. - sample: '/healthz' - search_string: - description: A string that must be present in the response for a health check to be considered successful. - type: str - returned: When the health check exists and a search string has been configured. - sample: 'ALIVE' - disabled: - description: Whether the health check has been disabled or not. - type: bool - returned: When the health check exists. - sample: false - tags: - description: A dictionary representing the tags on the health check. - type: dict - returned: When the health check exists. - sample: '{"my_key": "my_value"}' -''' - -import uuid - -try: - import botocore -except ImportError: - pass # Handled by HAS_BOTO - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.route53 import get_tags -from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags - - -def _list_health_checks(**params): - try: - results = client.list_health_checks(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to list health checks') - return results - - -def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): - """Searches for health checks that have the exact same set of immutable values""" - - # In lieu of an Id we perform matches against the following values: - # - ip_addr - # - fqdn - # - type (immutable) - # - request_interval - # - port - - # Because the list and route53 provides no 'filter' mechanism, - # the using a paginator would result in (on average) double the - # number of API calls and can get really slow. - # Additionally, we can't properly wrap the paginator, so retrying means - # starting from scratch with a paginator - results = _list_health_checks() - while True: - for check in results.get('HealthChecks'): - config = check.get('HealthCheckConfig') - if ( - config.get('IPAddress', None) == ip_addr and - config.get('FullyQualifiedDomainName', None) == fqdn and - config.get('Type') == hc_type and - config.get('RequestInterval') == request_interval and - config.get('Port', None) == port - ): - return check - - if results.get('IsTruncated', False): - results = _list_health_checks(Marker=results.get('NextMarker')) - else: - return None - - -def get_existing_checks_with_name(): - results = _list_health_checks() - health_checks_with_name = {} - while True: - for check in results.get('HealthChecks'): - if 'Name' in describe_health_check(check['Id'])['tags']: - check_name = describe_health_check(check['Id'])['tags']['Name'] - health_checks_with_name[check_name] = check - if results.get('IsTruncated', False): - results = _list_health_checks(Marker=results.get('NextMarker')) - else: - return health_checks_with_name - - -def delete_health_check(check_id): - if not check_id: - return False, None - - if module.check_mode: - return True, 'delete' - - try: - client.delete_health_check( - aws_retry=True, - HealthCheckId=check_id, - ) - except is_boto3_error_code('NoSuchHealthCheck'): - # Handle the deletion race condition as cleanly as possible - return False, None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to list health checks') - - return True, 'delete' - - -def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in): - - # In general, if a request is repeated with the same CallerRef it won't - # result in a duplicate check appearing. This means we can safely use our - # retry decorators - caller_ref = str(uuid.uuid4()) - missing_args = [] - - health_check = dict( - Type=type_in, - RequestInterval=request_interval_in, - Port=port_in, - ) - if module.params.get('disabled') is not None: - health_check['Disabled'] = module.params.get('disabled') - if ip_addr_in: - health_check['IPAddress'] = ip_addr_in - if fqdn_in: - health_check['FullyQualifiedDomainName'] = fqdn_in - - if type_in in ['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - resource_path = module.params.get('resource_path') - # if not resource_path: - # missing_args.append('resource_path') - if resource_path: - health_check['ResourcePath'] = resource_path - if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - string_match = module.params.get('string_match') - if not string_match: - missing_args.append('string_match') - health_check['SearchString'] = module.params.get('string_match') - - failure_threshold = module.params.get('failure_threshold') - if not failure_threshold: - failure_threshold = 3 - health_check['FailureThreshold'] = failure_threshold - - if missing_args: - module.fail_json(msg='missing required arguments for creation: {0}'.format( - ', '.join(missing_args)), - ) - - if module.check_mode: - return True, 'create', None - - try: - result = client.create_health_check( - aws_retry=True, - CallerReference=caller_ref, - HealthCheckConfig=health_check, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check) - - check_id = result.get('HealthCheck').get('Id') - return True, 'create', check_id - - -def update_health_check(existing_check): - # It's possible to update following parameters - # - ResourcePath - # - SearchString - # - FailureThreshold - # - Disabled - # - IPAddress - # - Port - # - FullyQualifiedDomainName - - changes = dict() - existing_config = existing_check.get('HealthCheckConfig') - - resource_path = module.params.get('resource_path', None) - if resource_path and resource_path != existing_config.get('ResourcePath'): - changes['ResourcePath'] = resource_path - - search_string = module.params.get('string_match', None) - if search_string and search_string != existing_config.get('SearchString'): - changes['SearchString'] = search_string - - failure_threshold = module.params.get('failure_threshold', None) - if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'): - changes['FailureThreshold'] = failure_threshold - - disabled = module.params.get('disabled', None) - if disabled is not None and disabled != existing_config.get('Disabled'): - changes['Disabled'] = module.params.get('disabled') - - # If updating based on Health Check ID or health_check_name, we can update - if module.params.get('health_check_id') or module.params.get('use_unique_names'): - ip_address = module.params.get('ip_address', None) - if ip_address is not None and ip_address != existing_config.get('IPAddress'): - changes['IPAddress'] = module.params.get('ip_address') - - port = module.params.get('port', None) - if port is not None and port != existing_config.get('Port'): - changes['Port'] = module.params.get('port') - - fqdn = module.params.get('fqdn', None) - if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'): - changes['FullyQualifiedDomainName'] = module.params.get('fqdn') - - # No changes... - if not changes: - return False, None - if module.check_mode: - return True, 'update' - - check_id = existing_check.get('Id') - # This makes sure we're starting from the version we think we are... - version_id = existing_check.get('HealthCheckVersion', 1) - try: - client.update_health_check( - HealthCheckId=check_id, - HealthCheckVersion=version_id, - **changes, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to update health check.', id=check_id) - - return True, 'update' - - -def describe_health_check(id): - if not id: - return dict() - - try: - result = client.get_health_check( - aws_retry=True, - HealthCheckId=id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to get health check.', id=id) - - health_check = result.get('HealthCheck', {}) - health_check = camel_dict_to_snake_dict(health_check) - tags = get_tags(module, client, 'healthcheck', id) - health_check['tags'] = tags - return health_check - - -def main(): - argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - disabled=dict(type='bool'), - ip_address=dict(), - port=dict(type='int'), - type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), - resource_path=dict(), - fqdn=dict(), - string_match=dict(), - request_interval=dict(type='int', choices=[10, 30], default=30), - failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - health_check_id=dict(type='str', aliases=['id'], required=False), - health_check_name=dict(type='str', aliases=['name'], required=False), - use_unique_names=dict(type='bool', required=False), - ) - - args_one_of = [ - ['ip_address', 'fqdn', 'health_check_id'], - ] - - args_if = [ - ['type', 'TCP', ('port',)], - ] - - args_required_together = [ - ['use_unique_names', 'health_check_name'], - ] - - args_mutually_exclusive = [ - ['health_check_id', 'health_check_name'] - ] - - global module - global client - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=args_one_of, - required_if=args_if, - required_together=args_required_together, - mutually_exclusive=args_mutually_exclusive, - supports_check_mode=True, - ) - - if not module.params.get('health_check_id') and not module.params.get('type'): - module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") - - state_in = module.params.get('state') - ip_addr_in = module.params.get('ip_address') - port_in = module.params.get('port') - type_in = module.params.get('type') - resource_path_in = module.params.get('resource_path') - fqdn_in = module.params.get('fqdn') - string_match_in = module.params.get('string_match') - request_interval_in = module.params.get('request_interval') - failure_threshold_in = module.params.get('failure_threshold') - health_check_name = module.params.get('health_check_name') - tags = module.params.get('tags') - - # Default port - if port_in is None: - if type_in in ['HTTP', 'HTTP_STR_MATCH']: - port_in = 80 - elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: - port_in = 443 - - if string_match_in: - if type_in not in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") - if len(string_match_in) > 255: - module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") - - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) - - changed = False - action = None - check_id = None - - if module.params.get('use_unique_names') or module.params.get('health_check_id'): - module.deprecate( - 'The health_check_name is currently non required parameter.' - ' This behavior will change and health_check_name ' - ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.', - version='6.0.0', collection_name='community.aws') - - # If update or delete Health Check based on ID - update_delete_by_id = False - if module.params.get('health_check_id'): - update_delete_by_id = True - id_to_update_delete = module.params.get('health_check_id') - try: - existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck'] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete)) - else: - existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - if existing_check: - check_id = existing_check.get('Id') - - # Delete Health Check - if state_in == 'absent': - if update_delete_by_id: - changed, action = delete_health_check(id_to_update_delete) - else: - changed, action = delete_health_check(check_id) - check_id = None - - # Create Health Check - elif state_in == 'present': - if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id: - changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - - # Update Health Check - else: - # If health_check_name is a unique identifier - if module.params.get('use_unique_names'): - existing_checks_with_name = get_existing_checks_with_name() - # update the health_check if another health check with same name exists - if health_check_name in existing_checks_with_name: - changed, action = update_health_check(existing_checks_with_name[health_check_name]) - else: - # create a new health_check if another health check with same name does not exists - changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - # Add tag to add name to health check - if check_id: - if not tags: - tags = {} - tags['Name'] = health_check_name - - else: - if update_delete_by_id: - changed, action = update_health_check(existing_check) - else: - changed, action = update_health_check(existing_check) - - if check_id: - changed |= manage_tags(module, client, 'healthcheck', check_id, - tags, module.params.get('purge_tags')) - - health_check = describe_health_check(id=check_id) - health_check['action'] = action - module.exit_json( - changed=changed, - health_check=health_check, - ) - - -if __name__ == '__main__': - main() diff --git a/route53_info.py b/route53_info.py deleted file mode 100644 index a331fae9319..00000000000 --- a/route53_info.py +++ /dev/null @@ -1,827 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: route53_info -short_description: Retrieves route53 details using AWS methods -version_added: 1.0.0 -description: - - Gets various details related to Route53 zone, record set or health check details. -options: - query: - description: - - Specifies the query action to take. - required: True - choices: [ - 'change', - 'checker_ip_range', - 'health_check', - 'hosted_zone', - 'record_sets', - 'reusable_delegation_set', - ] - type: str - change_id: - description: - - The ID of the change batch request. - - The value that you specify here is the value that - ChangeResourceRecordSets returned in the Id element - when you submitted the request. - - Required if I(query=change). - required: false - type: str - hosted_zone_id: - description: - - The Hosted Zone ID of the DNS zone. - - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details). - - Required if I(query) is set to I(record_sets). - required: false - type: str - max_items: - description: - - Maximum number of items to return for various get/list requests. - required: false - type: int - next_marker: - description: - - "Some requests such as list_command: hosted_zones will return a maximum - number of entries - EG 100 or the number specified by I(max_items). - If the number of entries exceeds this maximum another request can be sent - using the NextMarker entry from the first response to get the next page - of results." - required: false - type: str - delegation_set_id: - description: - - The DNS Zone delegation set ID. - required: false - type: str - start_record_name: - description: - - "The first name in the lexicographic ordering of domain names that you want - the list_command: record_sets to start listing from." - required: false - type: str - type: - description: - - The type of DNS record. - required: false - choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' ] - type: str - dns_name: - description: - - The first name in the lexicographic ordering of domain names that you want - the list_command to start listing from. - required: false - type: str - resource_id: - description: - - The ID/s of the specified resource/s. - - Required if I(query=health_check) and I(health_check_method=tags). - - Required if I(query=hosted_zone) and I(hosted_zone_method=tags). - required: false - aliases: ['resource_ids'] - type: list - elements: str - health_check_id: - description: - - The ID of the health check. - - Required if C(query) is set to C(health_check) and - C(health_check_method) is set to C(details) or C(status) or C(failure_reason). - required: false - type: str - hosted_zone_method: - description: - - "This is used in conjunction with query: hosted_zone. - It allows for listing details, counts or tags of various - hosted zone details." - required: false - choices: [ - 'details', - 'list', - 'list_by_name', - 'count', - 'tags', - ] - default: 'list' - type: str - health_check_method: - description: - - "This is used in conjunction with query: health_check. - It allows for listing details, counts or tags of various - health check details." - required: false - choices: [ - 'list', - 'details', - 'status', - 'failure_reason', - 'count', - 'tags', - ] - default: 'list' - type: str -author: Karen Cheng (@Etherdaemon) -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = r''' -# Simple example of listing all hosted zones -- name: List all hosted zones - community.aws.route53_info: - query: hosted_zone - register: hosted_zones - -# Getting a count of hosted zones -- name: Return a count of all hosted zones - community.aws.route53_info: - query: hosted_zone - hosted_zone_method: count - register: hosted_zone_count - -- name: List the first 20 resource record sets in a given hosted zone - community.aws.route53_info: - profile: account_name - query: record_sets - hosted_zone_id: ZZZ1111112222 - max_items: 20 - register: record_sets - -- name: List first 20 health checks - community.aws.route53_info: - query: health_check - health_check_method: list - max_items: 20 - register: health_checks - -- name: Get health check last failure_reason - community.aws.route53_info: - query: health_check - health_check_method: failure_reason - health_check_id: 00000000-1111-2222-3333-12345678abcd - register: health_check_failure_reason - -- name: Retrieve reusable delegation set details - community.aws.route53_info: - query: reusable_delegation_set - delegation_set_id: delegation id - register: delegation_sets - -- name: setup of example for using next_marker - community.aws.route53_info: - query: hosted_zone - max_items: 1 - register: first_info - -- name: example for using next_marker - community.aws.route53_info: - query: hosted_zone - next_marker: "{{ first_info.NextMarker }}" - max_items: 1 - when: "{{ 'NextMarker' in first_info }}" - -- name: retrieve host entries starting with host1.workshop.test.io - block: - - name: grab zone id - community.aws.route53_zone: - zone: "test.io" - register: AWSINFO - - - name: grab Route53 record information - community.aws.route53_info: - type: A - query: record_sets - hosted_zone_id: "{{ AWSINFO.zone_id }}" - start_record_name: "host1.workshop.test.io" - register: RECORDS -''' - -RETURN = r''' -resource_record_sets: - description: A list of resource record sets returned by list_resource_record_sets in boto3. - returned: when I(query=record_sets) - type: list - elements: dict - contains: - name: - description: The name of a record in the specified hosted zone. - type: str - sample: 'www.example.com' - type: - description: The DNS record type. - type: str - sample: 'A' - ttl: - description: The resource record cache time to live (TTL), in seconds. - type: int - sample: 60 - set_identifier: - description: An identifier that differentiates among multiple resource record sets that have the same combination of name and type. - type: str - sample: 'abcd' - resource_records: - description: Information about the resource records. - type: list - elements: dict - contains: - value: - description: The current or new DNS record value. - type: str - sample: 'ns-12.awsdns-34.com.' - geo_location: - description: The specified geographic location for which the Route53 responds to based on location. - type: dict - elements: str - contains: - continent_code: - description: The two-letter code for the continent. - type: str - sample: 'NA' - country_code: - description: The two-letter code for a country. - type: str - sample: 'US' - subdivision_code: - description: The two-letter code for a state of the United States - type: str - sample: 'NY' - version_added: 4.0.0 -hosted_zones: - description: A list of hosted zones returned by list_hosted_zones in boto3. - returned: when I(query=hosted_zone) - type: list - elements: dict - contains: - id: - description: The ID of the hosted zone assigned by Amazon Route53 to the hosted zone at the creation time. - type: str - sample: '/hostedzone/Z01234567AB1234567890' - name: - description: The name of the domain. - type: str - sample: 'example.io' - resource_record_set_count: - description: The number of resource record sets in the hosted zone. - type: int - sample: 3 - caller_reference: - description: The value specified for CallerReference at the time of hosted zone creation. - type: str - sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' - config: - description: A dict that contains Comment and PrivateZone elements. - type: dict - contains: - comment: - description: Any comments that included about in the hosted zone. - type: str - sample: 'HostedZone created by Route53 Registrar' - private_zone: - description: A value that indicates whether this is a private hosted zone or not. - type: bool - sample: false - version_added: 4.0.0 -health_checks: - description: A list of Route53 health checks returned by list_health_checks in boto3. - type: list - elements: dict - returned: when I(query=health_check) - contains: - id: - description: The identifier that Amazon Route53 assigned to the health check at the time of creation. - type: str - sample: '12345cdc-2cc4-1234-bed2-123456abc1a2' - health_check_version: - description: The version of the health check. - type: str - sample: 1 - caller_reference: - description: A unique string that you specified when you created the health check. - type: str - sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' - health_check_config: - description: A dict that contains detailed information about one health check. - type: dict - contains: - disabled: - description: Whether Route53 should stop performing health checks on a endpoint. - type: bool - sample: false - enable_sni: - description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation. - type: bool - sample: true - failure_threshold: - description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint. - type: int - sample: 3 - fully_qualified_domain_name: - description: The fully qualified DNS name of the endpoint on which Route53 performs health checks. - type: str - sample: 'hello' - inverted: - description: Whether Route53 should invert the status of a health check. - type: bool - sample: false - ip_address: - description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on. - type: str - sample: 192.0.2.44 - measure_latency: - description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint. - type: bool - sample: false - port: - description: The port of the endpoint that Route53 should perform health checks on. - type: int - sample: 80 - request_interval: - description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request. - type: int - sample: 30 - resource_path: - description: The path that Route53 requests when performing health checks. - type: str - sample: '/welcome.html' - search_string: - description: The string that Route53 uses to search for in the response body from specified resource. - type: str - sample: 'test-string-to-match' - type: - description: The type of the health check. - type: str - sample: HTTPS - version_added: 4.0.0 -checker_ip_ranges: - description: A list of IP ranges in CIDR format for Amazon Route 53 health checkers. - returned: when I(query=checker_ip_range) - type: list - elements: str - version_added: 4.1.0 -delegation_sets: - description: A list of dicts that contains information about the reusable delegation set. - returned: when I(query=reusable_delegation_set) - type: list - elements: dict - version_added: 4.1.0 -health_check: - description: A dict of Route53 health check details returned by get_health_check_status in boto3. - type: dict - returned: when I(query=health_check) and I(health_check_method=details) - contains: - id: - description: The identifier that Amazon Route53 assigned to the health check at the time of creation. - type: str - sample: '12345cdc-2cc4-1234-bed2-123456abc1a2' - health_check_version: - description: The version of the health check. - type: str - sample: 1 - caller_reference: - description: A unique string that you specified when you created the health check. - type: str - sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' - health_check_config: - description: A dict that contains detailed information about one health check. - type: dict - contains: - disabled: - description: Whether Route53 should stop performing health checks on a endpoint. - type: bool - sample: false - enable_sni: - description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation. - type: bool - sample: true - failure_threshold: - description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint. - type: int - sample: 3 - fully_qualified_domain_name: - description: The fully qualified DNS name of the endpoint on which Route53 performs health checks. - type: str - sample: 'hello' - inverted: - description: Whether Route53 should invert the status of a health check. - type: bool - sample: false - ip_address: - description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on. - type: str - sample: 192.0.2.44 - measure_latency: - description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint. - type: bool - sample: false - port: - description: The port of the endpoint that Route53 should perform health checks on. - type: int - sample: 80 - request_interval: - description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request. - type: int - sample: 30 - resource_path: - description: The path that Route53 requests when performing health checks. - type: str - sample: '/welcome.html' - search_string: - description: The string that Route53 uses to search for in the response body from specified resource. - type: str - sample: 'test-string-to-match' - type: - description: The type of the health check. - type: str - sample: HTTPS - version_added: 4.1.0 -ResourceRecordSets: - description: A deprecated CamelCased list of resource record sets returned by list_resource_record_sets in boto3. \ - This list contains same elements/parameters as it's snake_cased version mentioned above. \ - This field is deprecated and will be removed in 6.0.0 version release. - returned: when I(query=record_sets) - type: list - elements: dict -HostedZones: - description: A deprecated CamelCased list of hosted zones returned by list_hosted_zones in boto3. \ - This list contains same elements/parameters as it's snake_cased version mentioned above. \ - This field is deprecated and will be removed in 6.0.0 version release. - returned: when I(query=hosted_zone) - type: list - elements: dict -HealthChecks: - description: A deprecated CamelCased list of Route53 health checks returned by list_health_checks in boto3. \ - This list contains same elements/parameters as it's snake_cased version mentioned above. \ - This field is deprecated and will be removed in 6.0.0 version release. - type: list - elements: dict - returned: when I(query=health_check) -CheckerIpRanges: - description: A deprecated CamelCased list of IP ranges in CIDR format for Amazon Route 53 health checkers.\ - This list contains same elements/parameters as it's snake_cased version mentioned abobe. \ - This field is deprecated and will be removed in 6.0.0 version release. - type: list - elements: str - returned: when I(query=checker_ip_range) -DelegationSets: - description: A deprecated CamelCased list of dicts that contains information about the reusable delegation set. \ - This list contains same elements/parameters as it's snake_cased version mentioned above. \ - This field is deprecated and will be removed in 6.0.0 version release. - type: list - elements: dict - returned: when I(query=reusable_delegation_set) -HealthCheck: - description: A deprecated CamelCased dict of Route53 health check details returned by get_health_check_status in boto3. \ - This dict contains same elements/parameters as it's snake_cased version mentioned above. \ - This field is deprecated and will be removed in 6.0.0 version release. - type: dict - returned: when I(query=health_check) and I(health_check_method=details) -''' - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - - -# Split out paginator to allow for the backoff decorator to function -@AWSRetry.jittered_backoff() -def _paginated_result(paginator_name, **params): - paginator = client.get_paginator(paginator_name) - return paginator.paginate(**params).build_full_result() - - -def get_hosted_zone(): - params = dict() - - if module.params.get('hosted_zone_id'): - params['Id'] = module.params.get('hosted_zone_id') - else: - module.fail_json(msg="Hosted Zone Id is required") - - return client.get_hosted_zone(**params) - - -def reusable_delegation_set_details(): - params = dict() - - if not module.params.get('delegation_set_id'): - if module.params.get('max_items'): - params['MaxItems'] = str(module.params.get('max_items')) - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - - results = client.list_reusable_delegation_sets(**params) - else: - params['DelegationSetId'] = module.params.get('delegation_set_id') - results = client.get_reusable_delegation_set(**params) - - results['delegation_sets'] = results['DelegationSets'] - module.deprecate("The 'CamelCase' return values with key 'DelegationSets' is deprecated and \ - will be replaced by 'snake_case' return values with key 'delegation_sets'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='community.aws') - - return results - - -def list_hosted_zones(): - params = dict() - - # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - - if module.params.get('delegation_set_id'): - params['DelegationSetId'] = module.params.get('delegation_set_id') - - zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] - snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones] - - module.deprecate("The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'hosted_zones'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='community.aws') - - return { - "HostedZones": zones, - "list": zones, - "hosted_zones": snaked_zones, - } - - -def list_hosted_zones_by_name(): - params = dict() - - if module.params.get('hosted_zone_id'): - params['HostedZoneId'] = module.params.get('hosted_zone_id') - - if module.params.get('dns_name'): - params['DNSName'] = module.params.get('dns_name') - - if module.params.get('max_items'): - params['MaxItems'] = str(module.params.get('max_items')) - - return client.list_hosted_zones_by_name(**params) - - -def change_details(): - params = dict() - - if module.params.get('change_id'): - params['Id'] = module.params.get('change_id') - else: - module.fail_json(msg="change_id is required") - - results = client.get_change(**params) - return results - - -def checker_ip_range_details(): - results = client.get_checker_ip_ranges() - results['checker_ip_ranges'] = results['CheckerIpRanges'] - module.deprecate("The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \ - will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='community.aws') - - return results - - -def get_count(): - if module.params.get('query') == 'health_check': - results = client.get_health_check_count() - else: - results = client.get_hosted_zone_count() - - return results - - -def get_health_check(): - params = dict() - - if not module.params.get('health_check_id'): - module.fail_json(msg="health_check_id is required") - else: - params['HealthCheckId'] = module.params.get('health_check_id') - - if module.params.get('health_check_method') == 'details': - results = client.get_health_check(**params) - elif module.params.get('health_check_method') == 'failure_reason': - results = client.get_health_check_last_failure_reason(**params) - elif module.params.get('health_check_method') == 'status': - results = client.get_health_check_status(**params) - - results['health_check'] = camel_dict_to_snake_dict(results['HealthCheck']) - module.deprecate("The 'CamelCase' return values with key 'HealthCheck' is deprecated and \ - will be replaced by 'snake_case' return values with key 'health_check'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='community.aws') - - return results - - -def get_resource_tags(): - params = dict() - - if module.params.get('resource_id'): - params['ResourceIds'] = module.params.get('resource_id') - else: - module.fail_json(msg="resource_id or resource_ids is required") - - if module.params.get('query') == 'health_check': - params['ResourceType'] = 'healthcheck' - else: - params['ResourceType'] = 'hostedzone' - - return client.list_tags_for_resources(**params) - - -def list_health_checks(): - params = dict() - - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') - - # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) - - health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] - snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks] - - module.deprecate("The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'health_checks'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='community.aws') - - return { - "HealthChecks": health_checks, - "list": health_checks, - "health_checks": snaked_health_checks, - } - - -def record_sets_details(): - params = dict() - - if module.params.get('hosted_zone_id'): - params['HostedZoneId'] = module.params.get('hosted_zone_id') - else: - module.fail_json(msg="Hosted Zone Id is required") - - if module.params.get('start_record_name'): - params['StartRecordName'] = module.params.get('start_record_name') - - # Check that both params are set if type is applied - if module.params.get('type') and not module.params.get('start_record_name'): - module.fail_json(msg="start_record_name must be specified if type is set") - - if module.params.get('type'): - params['StartRecordType'] = module.params.get('type') - - # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) - - record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] - snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets] - - module.deprecate("The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'resource_record_sets'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='community.aws') - - return { - "ResourceRecordSets": record_sets, - "list": record_sets, - "resource_record_sets": snaked_record_sets, - } - - -def health_check_details(): - health_check_invocations = { - 'list': list_health_checks, - 'details': get_health_check, - 'status': get_health_check, - 'failure_reason': get_health_check, - 'count': get_count, - 'tags': get_resource_tags, - } - - results = health_check_invocations[module.params.get('health_check_method')]() - return results - - -def hosted_zone_details(): - hosted_zone_invocations = { - 'details': get_hosted_zone, - 'list': list_hosted_zones, - 'list_by_name': list_hosted_zones_by_name, - 'count': get_count, - 'tags': get_resource_tags, - } - - results = hosted_zone_invocations[module.params.get('hosted_zone_method')]() - return results - - -def main(): - global module - global client - - argument_spec = dict( - query=dict(choices=[ - 'change', - 'checker_ip_range', - 'health_check', - 'hosted_zone', - 'record_sets', - 'reusable_delegation_set', - ], required=True), - change_id=dict(), - hosted_zone_id=dict(), - max_items=dict(type='int'), - next_marker=dict(), - delegation_set_id=dict(), - start_record_name=dict(), - type=dict(type='str', choices=[ - 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' - ]), - dns_name=dict(), - resource_id=dict(type='list', aliases=['resource_ids'], elements='str'), - health_check_id=dict(), - hosted_zone_method=dict(choices=[ - 'details', - 'list', - 'list_by_name', - 'count', - 'tags' - ], default='list'), - health_check_method=dict(choices=[ - 'list', - 'details', - 'status', - 'failure_reason', - 'count', - 'tags', - ], default='list'), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ['hosted_zone_method', 'health_check_method'], - ], - check_boto3=False, - ) - - try: - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - invocations = { - 'change': change_details, - 'checker_ip_range': checker_ip_range_details, - 'health_check': health_check_details, - 'hosted_zone': hosted_zone_details, - 'record_sets': record_sets_details, - 'reusable_delegation_set': reusable_delegation_set_details, - } - - results = dict(changed=False) - try: - results = invocations[module.params.get('query')]() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg=to_native(e)) - - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/route53_zone.py b/route53_zone.py deleted file mode 100644 index e5c6f199b8e..00000000000 --- a/route53_zone.py +++ /dev/null @@ -1,479 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' -module: route53_zone -short_description: add or delete Route53 zones -version_added: 1.0.0 -description: - - Creates and deletes Route53 private and public zones. -options: - zone: - description: - - "The DNS zone record (eg: foo.com.)" - required: true - type: str - state: - description: - - Whether or not the zone should exist or not. - default: present - choices: [ "present", "absent" ] - type: str - vpc_id: - description: - - The VPC ID the zone should be a part of (if this is going to be a private zone). - type: str - vpc_region: - description: - - The VPC Region the zone should be a part of (if this is going to be a private zone). - type: str - comment: - description: - - Comment associated with the zone. - default: '' - type: str - hosted_zone_id: - description: - - The unique zone identifier you want to delete or "all" if there are many zones with the same domain name. - - Required if there are multiple zones identified with the above options. - type: str - delegation_set_id: - description: - - The reusable delegation set ID to be associated with the zone. - - Note that you can't associate a reusable delegation set with a private hosted zone. - type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -notes: - - Support for I(tags) and I(purge_tags) was added in release 2.1.0. -author: - - "Christopher Troup (@minichate)" -''' - -EXAMPLES = r''' -- name: create a public zone - community.aws.route53_zone: - zone: example.com - comment: this is an example - -- name: delete a public zone - community.aws.route53_zone: - zone: example.com - state: absent - -- name: create a private zone - community.aws.route53_zone: - zone: devel.example.com - vpc_id: '{{ myvpc_id }}' - vpc_region: us-west-2 - comment: developer domain - -- name: create a public zone associated with a specific reusable delegation set - community.aws.route53_zone: - zone: example.com - comment: reusable delegation set example - delegation_set_id: A1BCDEF2GHIJKL - -- name: create a public zone with tags - community.aws.route53_zone: - zone: example.com - comment: this is an example - tags: - Owner: Ansible Team - -- name: modify a public zone, removing all previous tags and adding a new one - community.aws.route53_zone: - zone: example.com - comment: this is an example - tags: - Support: Ansible Community - purge_tags: true -''' - -RETURN = r''' -comment: - description: optional hosted zone comment - returned: when hosted zone exists - type: str - sample: "Private zone" -name: - description: hosted zone name - returned: when hosted zone exists - type: str - sample: "private.local." -private_zone: - description: whether hosted zone is private or public - returned: when hosted zone exists - type: bool - sample: true -vpc_id: - description: id of vpc attached to private hosted zone - returned: for private hosted zone - type: str - sample: "vpc-1d36c84f" -vpc_region: - description: region of vpc attached to private hosted zone - returned: for private hosted zone - type: str - sample: "eu-west-1" -zone_id: - description: hosted zone id - returned: when hosted zone exists - type: str - sample: "Z6JQG9820BEFMW" -delegation_set_id: - description: id of the associated reusable delegation set - returned: for public hosted zones, if they have been associated with a reusable delegation set - type: str - sample: "A1BCDEF2GHIJKL" -tags: - description: tags associated with the zone - returned: when tags are defined - type: dict -''' - -import time -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags -from ansible_collections.community.aws.plugins.module_utils.route53 import get_tags - -try: - from botocore.exceptions import BotoCoreError, ClientError -except ImportError: - pass # caught by AnsibleAWSModule - - -@AWSRetry.jittered_backoff() -def _list_zones(): - paginator = client.get_paginator('list_hosted_zones') - return paginator.paginate().build_full_result() - - -def find_zones(zone_in, private_zone): - try: - results = _list_zones() - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not list current hosted zones") - zones = [] - for r53zone in results['HostedZones']: - if r53zone['Name'] != zone_in: - continue - # only save zone names that match the public/private setting - if (r53zone['Config']['PrivateZone'] and private_zone) or \ - (not r53zone['Config']['PrivateZone'] and not private_zone): - zones.append(r53zone) - - return zones - - -def create(matching_zones): - zone_in = module.params.get('zone').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - comment = module.params.get('comment') - delegation_set_id = module.params.get('delegation_set_id') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - if not zone_in.endswith('.'): - zone_in += "." - - private_zone = bool(vpc_id and vpc_region) - - record = { - 'private_zone': private_zone, - 'vpc_id': vpc_id, - 'vpc_region': vpc_region, - 'comment': comment, - 'name': zone_in, - 'delegation_set_id': delegation_set_id, - 'zone_id': None, - } - - if private_zone: - changed, result = create_or_update_private(matching_zones, record) - else: - changed, result = create_or_update_public(matching_zones, record) - - zone_id = result.get('zone_id') - if zone_id: - if tags is not None: - changed |= manage_tags(module, client, 'hostedzone', zone_id, tags, purge_tags) - result['tags'] = get_tags(module, client, 'hostedzone', zone_id) - else: - result['tags'] = tags - - return changed, result - - -def create_or_update_private(matching_zones, record): - for z in matching_zones: - try: - result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) - zone_details = result['HostedZone'] - vpc_details = result['VPCs'] - current_vpc_id = None - current_vpc_region = None - if isinstance(vpc_details, dict): - if vpc_details['VPC']['VPCId'] == record['vpc_id']: - current_vpc_id = vpc_details['VPC']['VPCId'] - current_vpc_region = vpc_details['VPC']['VPCRegion'] - else: - if record['vpc_id'] in [v['VPCId'] for v in vpc_details]: - current_vpc_id = record['vpc_id'] - if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]: - current_vpc_region = record['vpc_region'] - - if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: - if not module.check_mode: - try: - client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) - return True, record - else: - record['msg'] = "There is already a private hosted zone in the same region with the same VPC \ - you chose. Unable to create a new private hosted zone in the same name space." - return False, record - - if not module.check_mode: - try: - result = client.create_hosted_zone( - Name=record['name'], - HostedZoneConfig={ - 'Comment': record['comment'] if record['comment'] is not None else "", - 'PrivateZone': True, - }, - VPC={ - 'VPCRegion': record['vpc_region'], - 'VPCId': record['vpc_id'], - }, - CallerReference="%s-%s" % (record['name'], time.time()), - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not create hosted zone") - - hosted_zone = result['HostedZone'] - zone_id = hosted_zone['Id'].replace('/hostedzone/', '') - record['zone_id'] = zone_id - - changed = True - return changed, record - - -def create_or_update_public(matching_zones, record): - zone_details, zone_delegation_set_details = None, {} - for matching_zone in matching_zones: - try: - zone = client.get_hosted_zone(Id=matching_zone['Id']) - zone_details = zone['HostedZone'] - zone_delegation_set_details = zone.get('DelegationSet', {}) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id']) - if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: - if not module.check_mode: - try: - client.update_hosted_zone_comment( - Id=zone_details['Id'], - Comment=record['comment'] - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) - changed = True - else: - changed = False - break - - if zone_details is None: - if not module.check_mode: - try: - params = dict( - Name=record['name'], - HostedZoneConfig={ - 'Comment': record['comment'] if record['comment'] is not None else "", - 'PrivateZone': False, - }, - CallerReference="%s-%s" % (record['name'], time.time()), - ) - - if record.get('delegation_set_id') is not None: - params['DelegationSetId'] = record['delegation_set_id'] - - result = client.create_hosted_zone(**params) - zone_details = result['HostedZone'] - zone_delegation_set_details = result.get('DelegationSet', {}) - - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not create hosted zone") - changed = True - - if module.check_mode: - if zone_details: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - else: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - record['name'] = zone_details['Name'] - record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') - - return changed, record - - -def delete_private(matching_zones, vpc_id, vpc_region): - for z in matching_zones: - try: - result = client.get_hosted_zone(Id=z['Id']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) - zone_details = result['HostedZone'] - vpc_details = result['VPCs'] - if isinstance(vpc_details, dict): - if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']: - if not module.check_mode: - try: - client.delete_hosted_zone(Id=z['Id']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - return True, "Successfully deleted %s" % zone_details['Name'] - else: - if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]: - if not module.check_mode: - try: - client.delete_hosted_zone(Id=z['Id']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - return True, "Successfully deleted %s" % zone_details['Name'] - - return False, "The vpc_id and the vpc_region do not match a private hosted zone." - - -def delete_public(matching_zones): - if len(matching_zones) > 1: - changed = False - msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone." - else: - if not module.check_mode: - try: - client.delete_hosted_zone(Id=matching_zones[0]['Id']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id']) - changed = True - msg = "Successfully deleted %s" % matching_zones[0]['Id'] - return changed, msg - - -def delete_hosted_id(hosted_zone_id, matching_zones): - if hosted_zone_id == "all": - deleted = [] - for z in matching_zones: - deleted.append(z['Id']) - if not module.check_mode: - try: - client.delete_hosted_zone(Id=z['Id']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - changed = True - msg = "Successfully deleted zones: %s" % deleted - elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]: - if not module.check_mode: - try: - client.delete_hosted_zone(Id=hosted_zone_id) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id) - changed = True - msg = "Successfully deleted zone: %s" % hosted_zone_id - else: - changed = False - msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id - return changed, msg - - -def delete(matching_zones): - zone_in = module.params.get('zone').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - hosted_zone_id = module.params.get('hosted_zone_id') - - if not zone_in.endswith('.'): - zone_in += "." - - private_zone = bool(vpc_id and vpc_region) - - if zone_in in [z['Name'] for z in matching_zones]: - if hosted_zone_id: - changed, result = delete_hosted_id(hosted_zone_id, matching_zones) - else: - if private_zone: - changed, result = delete_private(matching_zones, vpc_id, vpc_region) - else: - changed, result = delete_public(matching_zones) - else: - changed = False - result = "No zone to delete." - - return changed, result - - -def main(): - global module - global client - - argument_spec = dict( - zone=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - vpc_id=dict(default=None), - vpc_region=dict(default=None), - comment=dict(default=''), - hosted_zone_id=dict(), - delegation_set_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - ) - - mutually_exclusive = [ - ['delegation_set_id', 'vpc_id'], - ['delegation_set_id', 'vpc_region'], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True, - ) - - zone_in = module.params.get('zone').lower() - state = module.params.get('state').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - - if not zone_in.endswith('.'): - zone_in += "." - - private_zone = bool(vpc_id and vpc_region) - - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) - - zones = find_zones(zone_in, private_zone) - if state == 'present': - changed, result = create(matching_zones=zones) - elif state == 'absent': - changed, result = delete(matching_zones=zones) - - if isinstance(result, dict): - module.exit_json(changed=changed, result=result, **result) - else: - module.exit_json(changed=changed, result=result) - - -if __name__ == '__main__': - main() From 0b8155f9e8985b0dc51791f94a8d6e5c02853bc5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 28 Sep 2022 13:40:43 +0200 Subject: [PATCH 591/683] Make example AWS UUIDS follow a specific pattern (#1539) Make example AWS UUIDS follow a specific pattern SUMMARY Various AWS IAM resources have UUID which follow a specific pattern. Similarly AWS accounts are all 12 digit numbers (text aliases in a couple of cases). To minimize the risk of accidental data leaks use a consistent Account ID in examples (123456789012), and a specific format for the UUIDS: (AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)12345EXAMPLE54321 While this does nothing about historic data, having consistency makes it easier to prevent future leaks. Note: We should follow this up with an update to the developer docs, however I'd like to get this in prior to 5.0.0 ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/acm_certificate_info.py plugins/modules/application_autoscaling_policy.py plugins/modules/autoscaling_launch_config.py plugins/modules/autoscaling_launch_config_info.py plugins/modules/codecommit_repository.py plugins/modules/directconnect_link_aggregation_group.py plugins/modules/dms_endpoint.py plugins/modules/dynamodb_table.py plugins/modules/ec2_transit_gateway_info.py plugins/modules/ec2_transit_gateway_vpc_attachment.py plugins/modules/ec2_transit_gateway_vpc_attachment_info.py plugins/modules/ec2_vpc_peer.py plugins/modules/ec2_vpc_peering_info.py plugins/modules/ec2_vpc_vpn_info.py plugins/modules/ecs_cluster.py plugins/modules/ecs_ecr.py plugins/modules/ecs_service.py plugins/modules/ecs_service_info.py plugins/modules/ecs_task.py plugins/modules/efs.py plugins/modules/efs_info.py plugins/modules/eks_cluster.py plugins/modules/elasticache_subnet_group.py plugins/modules/elb_network_lb.py plugins/modules/elb_target_group.py plugins/modules/elb_target_group_info.py plugins/modules/elb_target_info.py plugins/modules/iam_group.py plugins/modules/iam_managed_policy.py plugins/modules/iam_mfa_device_info.py plugins/modules/iam_server_certificate_info.py plugins/modules/lightsail.py plugins/modules/lightsail_static_ip.py plugins/modules/msk_cluster.py plugins/modules/s3_bucket_notification.py plugins/modules/sns_topic.py plugins/modules/sns_topic_info.py plugins/modules/sqs_queue.py plugins/modules/stepfunctions_state_machine.py plugins/modules/stepfunctions_state_machine_execution.py plugins/modules/storagegateway_info.py plugins/modules/wafv2_web_acl.py ADDITIONAL INFORMATION While the 'secret' nature of these UUIDs is debatable (they're closer to user names than passwords), deliberately mangling them makes it easier for InfoSec teams to spot when their secret counterparts may have been leaked in combination with a real 'public' part. --- acm_certificate_info.py | 2 +- application_autoscaling_policy.py | 2 +- autoscaling_launch_config.py | 4 ++-- autoscaling_launch_config_info.py | 2 +- codecommit_repository.py | 4 ++-- directconnect_link_aggregation_group.py | 2 +- dms_endpoint.py | 4 ++-- ec2_transit_gateway_info.py | 4 ++-- ec2_transit_gateway_vpc_attachment.py | 2 +- ec2_transit_gateway_vpc_attachment_info.py | 2 +- ec2_vpc_peer.py | 12 ++++++------ ec2_vpc_peering_info.py | 6 +++--- ec2_vpc_vpn_info.py | 2 +- ecs_cluster.py | 4 ++-- ecs_ecr.py | 8 ++++---- ecs_service.py | 14 ++++++++------ ecs_service_info.py | 5 ++++- ecs_task.py | 10 +++++----- efs.py | 2 +- efs_info.py | 2 +- eks_cluster.py | 4 ++-- elasticache_subnet_group.py | 2 +- elb_network_lb.py | 2 +- elb_target_group.py | 2 +- elb_target_group_info.py | 4 ++-- elb_target_info.py | 2 +- iam_group.py | 4 ++-- iam_managed_policy.py | 2 +- iam_mfa_device_info.py | 8 ++++---- iam_server_certificate_info.py | 4 ++-- lightsail.py | 4 ++-- lightsail_static_ip.py | 4 ++-- msk_cluster.py | 2 +- s3_bucket_notification.py | 4 ++-- sns_topic.py | 10 +++++----- sns_topic_info.py | 10 +++++----- sqs_queue.py | 2 +- stepfunctions_state_machine.py | 4 ++-- stepfunctions_state_machine_execution.py | 6 +++--- storagegateway_info.py | 6 +++--- wafv2_web_acl.py | 4 ++-- 41 files changed, 94 insertions(+), 89 deletions(-) diff --git a/acm_certificate_info.py b/acm_certificate_info.py index 70f641e61df..f546bf2c1be 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -70,7 +70,7 @@ # The output is still a list of certificates, just one item long. - name: obtain information about a certificate with a particular ARN community.aws.aws_acm_info: - certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" + certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" ''' diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py index 6a636e8a7cd..51f27ed580c 100644 --- a/application_autoscaling_policy.py +++ b/application_autoscaling_policy.py @@ -216,7 +216,7 @@ description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present). returned: when state present type: str - sample: arn:aws:iam::123456789123:role/roleName + sample: arn:aws:iam::123456789012:role/roleName step_scaling_policy_configuration: description: The step scaling policy. returned: when state present and the policy type is StepScaling diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index d353afe3b9f..ea50eb7a57e 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -259,7 +259,7 @@ description: The Amazon Resource Name of the launch configuration. returned: when I(state=present) type: str - sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name + sample: arn:aws:autoscaling:us-east-1:123456789012:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name changed: description: Whether the state of the launch configuration has changed. returned: always @@ -391,7 +391,7 @@ description: The Amazon Resource Name (ARN) of the launch configuration. returned: when I(state=present) type: str - sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name + sample: arn:aws:autoscaling:us-east-1:123456789012:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name member: description: "" returned: when I(state=present) diff --git a/autoscaling_launch_config_info.py b/autoscaling_launch_config_info.py index 7a9cfae9ad1..1606201c999 100644 --- a/autoscaling_launch_config_info.py +++ b/autoscaling_launch_config_info.py @@ -126,7 +126,7 @@ description: Amazon Resource Name (ARN) of the launch configuration. type: str returned: always - sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app" + sample: "arn:aws:autoscaling:us-east-1:123456798012:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app" launch_configuration_name: description: Name of the launch configuration. type: str diff --git a/codecommit_repository.py b/codecommit_repository.py index 4eae0d90fb1..689a1c053d7 100644 --- a/codecommit_repository.py +++ b/codecommit_repository.py @@ -52,12 +52,12 @@ description: "The ID of the AWS account associated with the repository." returned: when state is present type: str - sample: "268342293637" + sample: "123456789012" arn: description: "The Amazon Resource Name (ARN) of the repository." returned: when state is present type: str - sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username" + sample: "arn:aws:codecommit:ap-northeast-1:123456789012:username" clone_url_http: description: "The URL to use for cloning the repository over HTTPS." returned: when state is present diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index f340ef3fcfb..ab8a04a9d98 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -115,7 +115,7 @@ "connection_state": "down", "lag_id": "dxlag-fgnsp4rq", "location": "EqSe2", - "owner_account": "448830907657", + "owner_account": "123456789012", "region": "us-west-2" } ] diff --git a/dms_endpoint.py b/dms_endpoint.py index 6dcbcc8868c..bc2d6160af9 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -183,7 +183,7 @@ - The ARN that uniquely identifies the endpoint. type: str returned: success - example: "arn:aws:dms:us-east-1:012345678901:endpoint:1234556789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + example: "arn:aws:dms:us-east-1:123456789012:endpoint:1234556789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" endpoint_identifier: description: - The database endpoint identifier. @@ -213,7 +213,7 @@ - An KMS key ID that is used to encrypt the connection parameters for the endpoint. type: str returned: success - example: "arn:aws:kms:us-east-1:012345678901:key/01234567-abcd-12ab-98fe-123456789abc" + example: "arn:aws:kms:us-east-1:123456789012:key/01234567-abcd-12ab-98fe-123456789abc" port: description: - The port used to access the endpoint. diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 024aa5dcec9..8739815693b 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -136,7 +136,7 @@ description: The AWS account number ID which owns the transit gateway. returned: always type: str - sample: "1234567654323" + sample: "123456789012" state: description: The state of the transit gateway. returned: always @@ -153,7 +153,7 @@ description: The Amazon Resource Name (ARN) of the transit gateway. returned: always type: str - sample: "arn:aws:ec2:us-west-2:1234567654323:transit-gateway/tgw-02c42332e6b7da829" + sample: "arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-02c42332e6b7da829" transit_gateway_id: description: The ID of the transit gateway. returned: always diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py index 7f2fc2988ba..20178ed5f19 100644 --- a/ec2_transit_gateway_vpc_attachment.py +++ b/ec2_transit_gateway_vpc_attachment.py @@ -214,7 +214,7 @@ - The ID of the account that the VPC belongs to. type: str returned: success - example: '012345678901' + example: '123456789012' ''' diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py index a0a07ce87d7..9e51ad19bda 100644 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -142,7 +142,7 @@ - The ID of the account that the VPC belongs to. type: str returned: success - example: '012345678901' + example: '123456789012' ''' diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 2034f234340..4abf9e990e9 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -109,7 +109,7 @@ region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-12345678 - peer_owner_id: 123456789102 + peer_owner_id: 123456789012 state: present tags: Name: Peering connection for VPC 21 to VPC 22 @@ -171,7 +171,7 @@ region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-12345678 - peer_owner_id: 123456789102 + peer_owner_id: 123456789012 state: present tags: Name: Peering connection for VPC 21 to VPC 22 @@ -196,7 +196,7 @@ region: ap-southeast-2 vpc_id: vpc-12345678 peer_vpc_id: vpc-12345678 - peer_owner_id: 123456789102 + peer_owner_id: 123456789012 state: present tags: Name: Peering connection for VPC 21 to VPC 22 @@ -247,7 +247,7 @@ description: The AWS account that owns the VPC. returned: success type: str - example: 012345678901 + example: 123456789012 peering_options: description: Additional peering configuration. returned: when connection is in the accepted state. @@ -299,7 +299,7 @@ description: The AWS account that owns the VPC. returned: success type: str - example: 012345678901 + example: 123456789012 peering_options: description: Additional peering configuration. returned: when connection is not in the deleted state. @@ -341,7 +341,7 @@ description: Additional information about the status of the connection. returned: success type: str - example: Pending Acceptance by 012345678901 + example: Pending Acceptance by 123456789012 tags: description: Tags applied to the connection. returned: success diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index e7d8264c8d5..cdb8f8ca8b0 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -88,7 +88,7 @@ description: The AWS account that owns the VPC. returned: success type: str - example: 012345678901 + example: 123456789012 peering_options: description: Additional peering configuration. returned: when connection is in the accepted state. @@ -140,7 +140,7 @@ description: The AWS account that owns the VPC. returned: success type: str - example: 012345678901 + example: 123456789012 peering_options: description: Additional peering configuration. returned: when connection is not in the deleted state. @@ -182,7 +182,7 @@ description: Additional information about the status of the connection. returned: success type: str - example: Pending Acceptance by 012345678901 + example: Pending Acceptance by 123456789012 tags: description: Tags applied to the connection. returned: success diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 57ebb17e852..b1d2bbee43e 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -144,7 +144,7 @@ description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate. returned: when a private certificate is used for authentication type: str - sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example" + sample: "arn:aws:acm:us-east-1:123456789012:certificate/c544d8ce-20b8-4fff-98b0-example" vpn_connection_id: description: The ID of the VPN connection. returned: always diff --git a/ecs_cluster.py b/ecs_cluster.py index 3074e8914f2..c186c0da5c5 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -78,12 +78,12 @@ description: the ARN of the cluster just created type: str returned: 0 if a new cluster - sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok + sample: arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster clusterName: description: name of the cluster just created (should match the input argument) type: str returned: always - sample: test-cluster-mfshcdok + sample: test-cluster pendingTasksCount: description: how many tasks are waiting to run in this cluster returned: 0 if a new cluster diff --git a/ecs_ecr.py b/ecs_ecr.py index a7194659974..d2947753f38 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -107,7 +107,7 @@ - name: Cross account ecr-repo community.aws.ecs_ecr: - registry_id: 999999999999 + registry_id: 123456789012 name: cross/account - name: set-policy as object @@ -186,10 +186,10 @@ returned: I(state=present) sample: createdAt: '2017-01-17T08:41:32-06:00' - registryId: '999999999999' - repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090 + registryId: '123456789012' + repositoryArn: arn:aws:ecr:us-east-1:123456789012:repository/ecr-test-1484664090 repositoryName: ecr-test-1484664090 - repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090 + repositoryUri: 123456789012.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090 ''' import json diff --git a/ecs_service.py b/ecs_service.py index f355fa32bed..e04f296060d 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -411,9 +411,10 @@ returned: always type: int serviceArn: - description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region - of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, - arn:aws:ecs:region :012345678910 :service/my-service . + description: + - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the C(arn:aws:ecs) namespace, followed by + the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. + sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service' returned: always type: str serviceName: @@ -547,9 +548,10 @@ returned: always type: int serviceArn: - description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region - of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, - arn:aws:ecs:region :012345678910 :service/my-service . + description: + - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region + of the service, the AWS account ID of the service owner, the service namespace, and then the service name. + sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service' returned: always type: str serviceName: diff --git a/ecs_service_info.py b/ecs_service_info.py index b04f94241f5..49c2676c7e1 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -103,7 +103,10 @@ returned: always type: int serviceArn: - description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service . + description: + - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the + service, the AWS account ID of the service owner, the service namespace, and then the service name. + sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service' returned: always type: str serviceName: diff --git a/ecs_task.py b/ecs_task.py index 9da2dcbf45e..bdc5cc98718 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -122,14 +122,14 @@ operation: start cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" tags: resourceName: a_task_for_ansible_to_run type: long_running_task network: internal version: 1.4 container_instances: - - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8 + - arn:aws:ecs:us-west-2:123456789012:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8 started_by: ansible_user network_configuration: subnets: @@ -144,7 +144,7 @@ operation: run cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" started_by: ansible_user launch_type: FARGATE network_configuration: @@ -161,7 +161,7 @@ count: 2 cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" started_by: ansible_user launch_type: FARGATE network_configuration: @@ -175,7 +175,7 @@ operation: stop cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" ''' RETURN = r''' task: diff --git a/efs.py b/efs.py index a78f832d971..68ebe46d395 100644 --- a/efs.py +++ b/efs.py @@ -196,7 +196,7 @@ "life_cycle_state": "available", "mount_target_id": "fsmt-d8907871", "network_interface_id": "eni-6e387e26", - "owner_id": "740748460359", + "owner_id": "123456789012", "security_groups": [ "sg-a30b22c6" ], diff --git a/efs_info.py b/efs_info.py index 8f616d15dda..a44f402ac64 100644 --- a/efs_info.py +++ b/efs_info.py @@ -108,7 +108,7 @@ "life_cycle_state": "available", "mount_target_id": "fsmt-d8907871", "network_interface_id": "eni-6e387e26", - "owner_id": "740748460359", + "owner_id": "123456789012", "security_groups": [ "sg-a30b22c6" ], diff --git a/eks_cluster.py b/eks_cluster.py index abdaee4ff95..f71e1514a87 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -89,7 +89,7 @@ description: ARN of the EKS cluster returned: when state is present type: str - sample: arn:aws:eks:us-west-2:111111111111:cluster/my-eks-cluster + sample: arn:aws:eks:us-west-2:123456789012:cluster/my-eks-cluster certificate_authority: description: Dictionary containing Certificate Authority Data for cluster returned: after creation @@ -143,7 +143,7 @@ description: ARN of the IAM role used by the cluster returned: when state is present type: str - sample: arn:aws:iam::111111111111:role/eks_cluster_role + sample: arn:aws:iam::123456789012:role/eks_cluster_role status: description: status of the EKS cluster returned: when state is present diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index eda678205d0..6816cc364d3 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -70,7 +70,7 @@ description: The Amazon Resource Name (ARN) of the cache subnet group. returned: when the subnet group exists type: str - sample: arn:aws:elasticache:us-east-1:012345678901:subnetgroup:norwegian-blue + sample: arn:aws:elasticache:us-east-1:123456789012:subnetgroup:norwegian-blue description: description: The description of the cache subnet group. returned: when the cache subnet group exists diff --git a/elb_network_lb.py b/elb_network_lb.py index 3b5277d071d..a65c42a2c4c 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -289,7 +289,7 @@ description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + sample: arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-elb/001122334455 load_balancer_name: description: The name of the load balancer. returned: when state is present diff --git a/elb_target_group.py b/elb_target_group.py index cd750be188b..c11f622226f 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -403,7 +403,7 @@ description: The Amazon Resource Name (ARN) of the target group. returned: when state present type: str - sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211" + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/mytargetgroup/aabbccddee0044332211" target_group_name: description: The name of the target group. returned: when state present diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 5fd8a9b6c39..bb27bc30ec4 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -53,7 +53,7 @@ - name: Gather information about the target group attached to a particular ELB community.aws.elb_target_group_info: - load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" + load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-elb/aabbccddeeff" - name: Gather information about a target groups named 'tg1' and 'tg2' community.aws.elb_target_group_info: @@ -152,7 +152,7 @@ description: The Amazon Resource Name (ARN) of the target group. returned: always type: str - sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211" + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/mytargetgroup/aabbccddee0044332211" targets_health_description: description: Targets health description. returned: when collect_targets_health is enabled diff --git a/elb_target_info.py b/elb_target_info.py index dc71adbc72f..ad2f0702879 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -137,7 +137,7 @@ type: str returned: always sample: - - "arn:aws:elasticloadbalancing:eu-west-1:111111111111:targetgroup/target-group/deadbeefdeadbeef" + - "arn:aws:elasticloadbalancing:eu-west-1:123456789012:targetgroup/target-group/deadbeefdeadbeef" target_group_type: description: Which target type is used for this group returned: always diff --git a/iam_group.py b/iam_group.py index 5f85c4bfc8c..759be42a74d 100644 --- a/iam_group.py +++ b/iam_group.py @@ -140,7 +140,7 @@ group_id: description: the stable and unique string identifying the group type: str - sample: AGPAIDBWE12NSFINE55TM + sample: AGPA12345EXAMPLE54321 group_name: description: the friendly name that identifies the group type: str @@ -165,7 +165,7 @@ user_id: description: the stable and unique string identifying the user type: str - sample: AIDAIZTPY123YQRS22YU2 + sample: AIDA12345EXAMPLE54321 user_name: description: the friendly name that identifies the user type: str diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 3e30c4a667c..4b3dbfebda4 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -116,7 +116,7 @@ "default_version_id": "v1", "is_attachable": true, "path": "/", - "policy_id": "ANPALM4KLDMTFXGOOJIHL", + "policy_id": "ANPA1245EXAMPLE54321", "policy_name": "AdministratorAccess", "update_date": "2017-03-01T15:42:55.981000+00:00" }' diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 4cd27875273..a2801ca9482 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -31,11 +31,11 @@ type: list sample: - enable_date: "2016-03-11T23:25:36+00:00" - serial_number: arn:aws:iam::085120003701:mfa/pwnall - user_name: pwnall + serial_number: arn:aws:iam::123456789012:mfa/example + user_name: example - enable_date: "2016-03-11T23:25:37+00:00" - serial_number: arn:aws:iam::085120003702:mfa/pwnall - user_name: pwnall + serial_number: arn:aws:iam::123456789012:mfa/example + user_name: example """ EXAMPLES = r''' diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index 1356a5ec15e..cbe2ab5459c 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -58,7 +58,7 @@ description: The Amazon resource name of the server certificate returned: success type: str - sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name" + sample: "arn:aws:iam::123456789012:server-certificate/server-cert-name" path: description: The path of the server certificate returned: success @@ -106,7 +106,7 @@ def get_server_certs(iam, name=None): "server_certificate_name": "server-cert-name", "expiration": "2017-06-15T12:00:00+00:00", "path": "/", - "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name" + "arn": "arn:aws:iam::123456789012:server-certificate/server-cert-name" } """ results = dict() diff --git a/lightsail.py b/lightsail.py index 75796774580..40e058219ce 100644 --- a/lightsail.py +++ b/lightsail.py @@ -107,7 +107,7 @@ returned: always type: dict sample: - arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87" + arn: "arn:aws:lightsail:us-east-1:123456789012:Instance/1fef0175-d6c8-480e-84fa-214f969cda87" blueprint_id: "ubuntu_16_04" blueprint_name: "Ubuntu" bundle_id: "nano_1_0" @@ -145,7 +145,7 @@ state: code: 16 name: running - support_code: "588307843083/i-0997c97831ee21e33" + support_code: "123456789012/i-0997c97831ee21e33" username: "ubuntu" ''' diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py index 7f5d5377c0a..4f3463652b4 100644 --- a/lightsail_static_ip.py +++ b/lightsail_static_ip.py @@ -53,7 +53,7 @@ returned: always type: dict sample: - arn: "arn:aws:lightsail:ap-southeast-2:184297340509:StaticIp/d8f47672-c261-4443-a484-4a2ec983db9a" + arn: "arn:aws:lightsail:ap-southeast-2:123456789012:StaticIp/d8f47672-c261-4443-a484-4a2ec983db9a" created_at: "2021-02-28T00:04:05.202000+10:30" ip_address: "192.0.2.5" is_attached: false @@ -62,7 +62,7 @@ region_name: ap-southeast-2 name: "static_ip" resource_type: StaticIp - support_code: "677585553206/192.0.2.5" + support_code: "123456789012/192.0.2.5" ''' try: diff --git a/msk_cluster.py b/msk_cluster.py index 8a1774b25ef..1dccf4558f3 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -225,7 +225,7 @@ - subnet-d9fbeaf46c54bfab6 wait: true wait_timeout: 1800 - configuration_arn: arn:aws:kafka:us-east-1:000000000001:configuration/kafka-cluster-configuration/aaaaaaaa-bbbb-4444-3333-ccccccccc-1 + configuration_arn: arn:aws:kafka:us-east-1:123456789012:configuration/kafka-cluster-configuration/aaaaaaaa-bbbb-4444-3333-ccccccccc-1 configuration_revision: 1 - community.aws.msk_cluster: diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index ee876405b20..21b598d3bd9 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -112,7 +112,7 @@ state: present event_name: on_file_add_or_remove bucket_name: test-bucket - lambda_function_arn: arn:aws:lambda:us-east-2:526810320200:function:test-lambda + lambda_function_arn: arn:aws:lambda:us-east-2:123456789012:function:test-lambda events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] prefix: images/ suffix: .jpg @@ -122,7 +122,7 @@ state: present event_name: on_file_add_or_remove bucket_name: test-bucket - queue_arn: arn:aws:sqs:us-east-2:526810320200:test-queue + queue_arn: arn:aws:sqs:us-east-2:123456789012:test-queue events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] prefix: images/ suffix: .jpg diff --git a/sns_topic.py b/sns_topic.py index 59ace8b051d..e569397e88c 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -210,7 +210,7 @@ description: The ARN of the topic you are modifying type: str returned: always - sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name" + sample: "arn:aws:sns:us-east-2:123456789012:my_topic_name" sns_topic: description: Dict of sns topic details type: complex @@ -247,14 +247,14 @@ description: AWS account that owns the topic returned: when topic is owned by this AWS account type: str - sample: '111111111111' + sample: '123456789012' policy: description: Policy for the SNS topic returned: when topic is owned by this AWS account type: str sample: > - {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"}, - "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} + {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::123456789012:root"}, + "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} state: description: whether the topic is present or absent returned: always @@ -304,7 +304,7 @@ description: ARN of the SNS topic (equivalent to sns_arn) returned: when topic is owned by this AWS account type: str - sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic + sample: arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic topic_created: description: Whether the topic was created returned: always diff --git a/sns_topic_info.py b/sns_topic_info.py index 380d712820b..8d5a3d01532 100644 --- a/sns_topic_info.py +++ b/sns_topic_info.py @@ -47,7 +47,7 @@ description: The ARN of the topic. type: str returned: always - sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name" + sample: "arn:aws:sns:us-east-2:123456789012:my_topic_name" sns_topic: description: Dict of sns topic details. type: complex @@ -69,14 +69,14 @@ description: AWS account that owns the topic. returned: when topic is owned by this AWS account type: str - sample: '111111111111' + sample: '123456789012' policy: description: Policy for the SNS topic. returned: when topic is owned by this AWS account type: str sample: > - {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"}, - "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} + {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::123456789012:root"}, + "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} subscriptions: description: List of subscribers to the topic in this AWS account. returned: always @@ -121,7 +121,7 @@ description: ARN of the SNS topic (equivalent to sns_arn). returned: when topic is owned by this AWS account type: str - sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic + sample: arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic topic_type: description: The type of topic. type: str diff --git a/sqs_queue.py b/sqs_queue.py index d4b159bbab9..3e8931265ab 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -138,7 +138,7 @@ description: The queue's Amazon resource name (ARN). type: str returned: on success - sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0' + sample: 'arn:aws:sqs:us-east-1:123456789012:queuename-987d2de0' queue_url: description: URL to access the queue type: str diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py index 227ec6f86c3..80e617b3e1c 100644 --- a/stepfunctions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -58,7 +58,7 @@ community.aws.stepfunctions_state_machine: name: "HelloWorldStateMachine" definition: "{{ lookup('file','state_machine.json') }}" - role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole + role_arn: arn:aws:iam::123456789012:role/service-role/invokeLambdaStepFunctionsRole tags: project: helloWorld @@ -67,7 +67,7 @@ community.aws.stepfunctions_state_machine: name: HelloWorldStateMachine definition: "{{ lookup('file','state_machine.json') }}" - role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole + role_arn: arn:aws:iam::123456789012:role/service-role/anotherStepFunctionsRole tags: otherTag: aDifferentTag diff --git a/stepfunctions_state_machine_execution.py b/stepfunctions_state_machine_execution.py index 23c47af1d27..fbd2c7b164e 100644 --- a/stepfunctions_state_machine_execution.py +++ b/stepfunctions_state_machine_execution.py @@ -60,12 +60,12 @@ community.aws.stepfunctions_state_machine_execution: name: an_execution_name execution_input: '{ "IsHelloWorldExample": true }' - state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine" + state_machine_arn: "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine" - name: Stop an execution of a state machine community.aws.stepfunctions_state_machine_execution: action: stop - execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" + execution_arn: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" cause: "cause of task failure" error: "error code of the failure" ''' @@ -75,7 +75,7 @@ description: ARN of the AWS Step Functions state machine execution. type: str returned: if action == start and changed == True - sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" + sample: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" start_date: description: The date the execution is started. type: str diff --git a/storagegateway_info.py b/storagegateway_info.py index 9209dc401b4..87825711e1f 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -59,7 +59,7 @@ description: "Storage Gateway ARN" returned: always type: str - sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888" + sample: "arn:aws:storagegateway:eu-west-1:123456789012:gateway/sgw-9999F888" gateway_id: description: "Storage Gateway ID" returned: always @@ -89,7 +89,7 @@ description: "File share ARN" returned: always type: str - sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88" + sample: "arn:aws:storagegateway:eu-west-1:123456789012:share/share-AF999C88" file_share_id: description: "File share ID" returned: always @@ -109,7 +109,7 @@ description: "Tape ARN" returned: always type: str - sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88" + sample: "arn:aws:storagegateway:eu-west-1:123456789012:tape/tape-AF999C88" tape_barcode: description: "Tape ARN" returned: always diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index df4a01b5034..518234bbf5d 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -221,7 +221,7 @@ allow: {} statement: ip_set_reference_statement: - arn: 'arn:aws:wafv2:us-east-1:520789123123:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123' + arn: 'arn:aws:wafv2:us-east-1:123456789012:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123' visibility_config: sampled_requests_enabled: true cloud_watch_metrics_enabled: true @@ -254,7 +254,7 @@ RETURN = """ arn: description: web acl arn - sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 + sample: arn:aws:wafv2:eu-central-1:123456789012:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 type: str returned: Always, as long as the web acl exists description: From fe5eab3b7ce64ed3a17fc1f1bcb732a143e7cad8 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 28 Sep 2022 16:00:21 +0200 Subject: [PATCH 592/683] Migrate lambda* modules and tests (#1487) Migrate lambda* modules and tests Depends-On: ansible-collections/amazon.aws#1030 Remove lambda* modules and tests These modules have been migrated to amazon.aws Update runtime.yml with redirects to that collection Update ignore files Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- lambda.py | 800 ---------------------------------------------- lambda_alias.py | 327 ------------------- lambda_event.py | 429 ------------------------- lambda_execute.py | 283 ---------------- lambda_info.py | 535 ------------------------------- lambda_policy.py | 426 ------------------------ 6 files changed, 2800 deletions(-) delete mode 100644 lambda.py delete mode 100644 lambda_alias.py delete mode 100644 lambda_event.py delete mode 100644 lambda_execute.py delete mode 100644 lambda_info.py delete mode 100644 lambda_policy.py diff --git a/lambda.py b/lambda.py deleted file mode 100644 index 6849e5af67b..00000000000 --- a/lambda.py +++ /dev/null @@ -1,800 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: lambda -version_added: 1.0.0 -short_description: Manage AWS Lambda functions -description: - - Allows for the management of Lambda functions. -options: - name: - description: - - The name you want to assign to the function you are uploading. Cannot be changed. - required: true - type: str - state: - description: - - Create or delete Lambda function. - default: present - choices: [ 'present', 'absent' ] - type: str - runtime: - description: - - The runtime environment for the Lambda function you are uploading. - - Required when creating a function. Uses parameters as described in boto3 docs. - - Required when I(state=present). - - For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). - type: str - role: - description: - - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) - resources. You may use the bare ARN if the role belongs to the same AWS account. - - Required when I(state=present). - type: str - handler: - description: - - The function within your code that Lambda calls to begin execution. - type: str - zip_file: - description: - - A .zip file containing your deployment package - - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present. - aliases: [ 'src' ] - type: str - s3_bucket: - description: - - Amazon S3 bucket name where the .zip file containing your deployment package is stored. - - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present. - - I(s3_bucket) and I(s3_key) are required together. - type: str - s3_key: - description: - - The Amazon S3 object (the deployment package) key name you want to upload. - - I(s3_bucket) and I(s3_key) are required together. - type: str - s3_object_version: - description: - - The Amazon S3 object (the deployment package) version you want to upload. - type: str - description: - description: - - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit. - type: str - timeout: - description: - - The function maximum execution time in seconds after which Lambda should terminate the function. - default: 3 - type: int - memory_size: - description: - - The amount of memory, in MB, your Lambda function is given. - default: 128 - type: int - vpc_subnet_ids: - description: - - List of subnet IDs to run Lambda function in. - - Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC. - - If set, I(vpc_security_group_ids) must also be set. - type: list - elements: str - vpc_security_group_ids: - description: - - List of VPC security group IDs to associate with the Lambda function. - - Required when I(vpc_subnet_ids) is used. - type: list - elements: str - environment_variables: - description: - - A dictionary of environment variables the Lambda function is given. - type: dict - dead_letter_arn: - description: - - The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. - type: str - tracing_mode: - description: - - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default. - choices: ['Active', 'PassThrough'] - type: str - kms_key_arn: - description: - - The KMS key ARN used to encrypt the function's environment variables. - type: str - version_added: 3.3.0 - architecture: - description: - - The instruction set architecture that the function supports. - - Requires one of I(s3_bucket) or I(zip_file). - - Requires botocore >= 1.21.51. - type: str - choices: ['x86_64', 'arm64'] - aliases: ['architectures'] - version_added: 5.0.0 -author: - - 'Steyn Huizinga (@steynovich)' -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.tags -''' - -EXAMPLES = r''' -# Create Lambda functions -- name: looped creation - community.aws.lambda: - name: '{{ item.name }}' - state: present - zip_file: '{{ item.zip_file }}' - runtime: 'python2.7' - role: 'arn:aws:iam::987654321012:role/lambda_basic_execution' - handler: 'hello_python.my_handler' - vpc_subnet_ids: - - subnet-123abcde - - subnet-edcba321 - vpc_security_group_ids: - - sg-123abcde - - sg-edcba321 - environment_variables: '{{ item.env_vars }}' - tags: - key1: 'value1' - loop: - - name: HelloWorld - zip_file: hello-code.zip - env_vars: - key1: "first" - key2: "second" - - name: ByeBye - zip_file: bye-code.zip - env_vars: - key1: "1" - key2: "2" - -# To remove previously added tags pass an empty dict -- name: remove tags - community.aws.lambda: - name: 'Lambda function' - state: present - zip_file: 'code.zip' - runtime: 'python2.7' - role: 'arn:aws:iam::987654321012:role/lambda_basic_execution' - handler: 'hello_python.my_handler' - tags: {} - -# Basic Lambda function deletion -- name: Delete Lambda functions HelloWorld and ByeBye - community.aws.lambda: - name: '{{ item }}' - state: absent - loop: - - HelloWorld - - ByeBye -''' - -RETURN = r''' -code: - description: The lambda function's code returned by get_function in boto3. - returned: success - type: dict - contains: - location: - description: - - The presigned URL you can use to download the function's .zip file that you previously uploaded. - - The URL is valid for up to 10 minutes. - returned: success - type: str - sample: 'https://prod-04-2014-tasks.s3.us-east-1.amazonaws.com/snapshots/sample' - repository_type: - description: The repository from which you can download the function. - returned: success - type: str - sample: 'S3' -configuration: - description: the lambda function's configuration metadata returned by get_function in boto3 - returned: success - type: dict - contains: - architectures: - description: The architectures supported by the function. - returned: successful run where botocore >= 1.21.51 - type: list - elements: str - sample: ['arm64'] - code_sha256: - description: The SHA256 hash of the function's deployment package. - returned: success - type: str - sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=' - code_size: - description: The size of the function's deployment package in bytes. - returned: success - type: int - sample: 123 - dead_letter_config: - description: The function's dead letter queue. - returned: when the function has a dead letter queue configured - type: dict - sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 } - contains: - target_arn: - description: The ARN of an SQS queue or SNS topic. - returned: when the function has a dead letter queue configured - type: str - sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 - description: - description: The function's description. - returned: success - type: str - sample: 'My function' - environment: - description: The function's environment variables. - returned: when environment variables exist - type: dict - contains: - variables: - description: Environment variable key-value pairs. - returned: when environment variables exist - type: dict - sample: {'key': 'value'} - error: - description: Error message for environment variables that could not be applied. - returned: when there is an error applying environment variables - type: dict - contains: - error_code: - description: The error code. - returned: when there is an error applying environment variables - type: str - message: - description: The error message. - returned: when there is an error applying environment variables - type: str - function_arn: - description: The function's Amazon Resource Name (ARN). - returned: on success - type: str - sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1' - function_name: - description: The function's name. - returned: on success - type: str - sample: 'myFunction' - handler: - description: The function Lambda calls to begin executing your function. - returned: on success - type: str - sample: 'index.handler' - last_modified: - description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD). - returned: on success - type: str - sample: '2017-08-01T00:00:00.000+0000' - memory_size: - description: The memory allocated to the function. - returned: on success - type: int - sample: 128 - revision_id: - description: The latest updated revision of the function or alias. - returned: on success - type: str - sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c' - role: - description: The function's execution role. - returned: on success - type: str - sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution' - runtime: - description: The funtime environment for the Lambda function. - returned: on success - type: str - sample: 'nodejs6.10' - tracing_config: - description: The function's AWS X-Ray tracing configuration. - returned: on success - type: dict - sample: { 'mode': 'Active' } - contains: - mode: - description: The tracing mode. - returned: on success - type: str - sample: 'Active' - timeout: - description: The amount of time that Lambda allows a function to run before terminating it. - returned: on success - type: int - sample: 3 - version: - description: The version of the Lambda function. - returned: on success - type: str - sample: '1' - vpc_config: - description: The function's networking configuration. - returned: on success - type: dict - sample: { - 'security_group_ids': [], - 'subnet_ids': [], - 'vpc_id': '123' - } -''' - -import base64 -import hashlib -import traceback -import re - -try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError -except ImportError: - pass # protected by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -def get_account_info(module): - """return the account information (account id and partition) we are currently working on - - get_account_info tries too find out the account that we are working - on. It's not guaranteed that this will be easy so we try in - several different ways. Giving either IAM or STS privileges to - the account should be enough to permit this. - """ - account_id = None - partition = None - try: - sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) - caller_id = sts_client.get_caller_identity(aws_retry=True) - account_id = caller_id.get('Account') - partition = caller_id.get('Arn').split(':')[1] - except (BotoCoreError, ClientError): - try: - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') - except is_boto3_error_code('AccessDenied') as e: - try: - except_msg = to_native(e.message) - except AttributeError: - except_msg = to_native(e) - m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg) - if m is None: - module.fail_json_aws(e, msg="getting account information") - account_id = m.group(4) - partition = m.group(1) - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="getting account information") - - return account_id, partition - - -def get_current_function(connection, function_name, qualifier=None): - try: - if qualifier is not None: - return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True) - return connection.get_function(FunctionName=function_name, aws_retry=True) - except is_boto3_error_code('ResourceNotFoundException'): - return None - - -def sha256sum(filename): - hasher = hashlib.sha256() - with open(filename, 'rb') as f: - hasher.update(f.read()) - - code_hash = hasher.digest() - code_b64 = base64.b64encode(code_hash) - hex_digest = code_b64.decode('utf-8') - - return hex_digest - - -def set_tag(client, module, tags, function, purge_tags): - - if tags is None: - return False - - changed = False - arn = function['Configuration']['FunctionArn'] - - try: - current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {}) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to list tags") - - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) - - if not tags_to_remove and not tags_to_add: - return False - - if module.check_mode: - return True - - try: - if tags_to_remove: - client.untag_resource( - Resource=arn, - TagKeys=tags_to_remove, - aws_retry=True - ) - changed = True - - if tags_to_add: - client.tag_resource( - Resource=arn, - Tags=tags_to_add, - aws_retry=True - ) - changed = True - - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn)) - - return changed - - -def wait_for_lambda(client, module, name): - try: - client_active_waiter = client.get_waiter('function_active') - client_updated_waiter = client.get_waiter('function_updated') - client_active_waiter.wait(FunctionName=name) - client_updated_waiter.wait(FunctionName=name) - except WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating') - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') - - -def format_response(response): - tags = response.get("Tags", {}) - result = camel_dict_to_snake_dict(response) - # Lambda returns a dict rather than the usual boto3 list of dicts - result["tags"] = tags - return result - - -def _zip_args(zip_file, current_config, ignore_checksum): - if not zip_file: - return {} - - # If there's another change that needs to happen, we always re-upload the code - if not ignore_checksum: - local_checksum = sha256sum(zip_file) - remote_checksum = current_config.get('CodeSha256', '') - if local_checksum == remote_checksum: - return {} - - with open(zip_file, 'rb') as f: - zip_content = f.read() - return {'ZipFile': zip_content} - - -def _s3_args(s3_bucket, s3_key, s3_object_version): - if not s3_bucket: - return {} - if not s3_key: - return {} - - code = {'S3Bucket': s3_bucket, - 'S3Key': s3_key} - if s3_object_version: - code.update({'S3ObjectVersion': s3_object_version}) - - return code - - -def _code_args(module, current_config): - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - s3_object_version = module.params.get('s3_object_version') - zip_file = module.params.get('zip_file') - architectures = module.params.get('architecture') - checksum_match = False - - code_kwargs = {} - - if architectures and current_config.get('Architectures', None) != [architectures]: - module.warn('Arch Change') - code_kwargs.update({'Architectures': [architectures]}) - - try: - code_kwargs.update(_zip_args(zip_file, current_config, bool(code_kwargs))) - except IOError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - - code_kwargs.update(_s3_args(s3_bucket, s3_key, s3_object_version)) - - if not code_kwargs: - return {} - - if not architectures and current_config.get('Architectures', None): - code_kwargs.update({'Architectures': current_config.get('Architectures', None)}) - - return code_kwargs - - -def main(): - argument_spec = dict( - name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - runtime=dict(), - role=dict(), - handler=dict(), - zip_file=dict(aliases=['src']), - s3_bucket=dict(), - s3_key=dict(no_log=False), - s3_object_version=dict(), - description=dict(default=''), - timeout=dict(type='int', default=3), - memory_size=dict(type='int', default=128), - vpc_subnet_ids=dict(type='list', elements='str'), - vpc_security_group_ids=dict(type='list', elements='str'), - environment_variables=dict(type='dict'), - dead_letter_arn=dict(), - kms_key_arn=dict(type='str', no_log=False), - tracing_mode=dict(choices=['Active', 'PassThrough']), - architecture=dict(choices=['x86_64', 'arm64'], type='str', aliases=['architectures']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - ) - - mutually_exclusive = [['zip_file', 's3_key'], - ['zip_file', 's3_bucket'], - ['zip_file', 's3_object_version']] - - required_together = [['s3_key', 's3_bucket'], - ['vpc_subnet_ids', 'vpc_security_group_ids']] - - required_if = [ - ['state', 'present', ['runtime', 'handler', 'role']], - ['architecture', 'x86_64', ['zip_file', 's3_bucket'], True], - ['architecture', 'arm64', ['zip_file', 's3_bucket'], True], - ] - - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_together=required_together, - required_if=required_if) - - name = module.params.get('name') - state = module.params.get('state').lower() - runtime = module.params.get('runtime') - role = module.params.get('role') - handler = module.params.get('handler') - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - s3_object_version = module.params.get('s3_object_version') - zip_file = module.params.get('zip_file') - description = module.params.get('description') - timeout = module.params.get('timeout') - memory_size = module.params.get('memory_size') - vpc_subnet_ids = module.params.get('vpc_subnet_ids') - vpc_security_group_ids = module.params.get('vpc_security_group_ids') - environment_variables = module.params.get('environment_variables') - dead_letter_arn = module.params.get('dead_letter_arn') - tracing_mode = module.params.get('tracing_mode') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - kms_key_arn = module.params.get('kms_key_arn') - architectures = module.params.get('architecture') - - check_mode = module.check_mode - changed = False - - if architectures: - module.require_botocore_at_least( - '1.21.51', reason='to configure the architectures that the function supports.') - - try: - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Trying to connect to AWS") - - if state == 'present': - if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): - role_arn = role - else: - # get account ID and assemble ARN - account_id, partition = get_account_info(module) - role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role) - - # Get function configuration if present, False otherwise - current_function = get_current_function(client, name) - - # Update existing Lambda function - if state == 'present' and current_function: - - # Get current state - current_config = current_function['Configuration'] - current_version = None - - # Update function configuration - func_kwargs = {'FunctionName': name} - - # Update configuration if needed - if role_arn and current_config['Role'] != role_arn: - func_kwargs.update({'Role': role_arn}) - if handler and current_config['Handler'] != handler: - func_kwargs.update({'Handler': handler}) - if description and current_config['Description'] != description: - func_kwargs.update({'Description': description}) - if timeout and current_config['Timeout'] != timeout: - func_kwargs.update({'Timeout': timeout}) - if memory_size and current_config['MemorySize'] != memory_size: - func_kwargs.update({'MemorySize': memory_size}) - if runtime and current_config['Runtime'] != runtime: - func_kwargs.update({'Runtime': runtime}) - if (environment_variables is not None) and (current_config.get( - 'Environment', {}).get('Variables', {}) != environment_variables): - func_kwargs.update({'Environment': {'Variables': environment_variables}}) - if dead_letter_arn is not None: - if current_config.get('DeadLetterConfig'): - if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn: - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) - else: - if dead_letter_arn != "": - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) - if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode): - func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) - if kms_key_arn: - func_kwargs.update({'KMSKeyArn': kms_key_arn}) - - # If VPC configuration is desired - if vpc_subnet_ids: - - if 'VpcConfig' in current_config: - # Compare VPC config with current config - current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] - current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] - - subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) - vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) - - if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: - new_vpc_config = {'SubnetIds': vpc_subnet_ids, - 'SecurityGroupIds': vpc_security_group_ids} - func_kwargs.update({'VpcConfig': new_vpc_config}) - else: - # No VPC configuration is desired, assure VPC config is empty when present in current config - if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'): - func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}}) - - # Upload new configuration if configuration has changed - if len(func_kwargs) > 1: - if not check_mode: - wait_for_lambda(client, module, name) - - try: - if not check_mode: - response = client.update_function_configuration(aws_retry=True, **func_kwargs) - current_version = response['Version'] - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Trying to update lambda configuration") - - # Tag Function - if tags is not None: - if set_tag(client, module, tags, current_function, purge_tags): - changed = True - - code_kwargs = _code_args(module, current_config) - if code_kwargs: - - # Update code configuration - code_kwargs.update({'FunctionName': name, 'Publish': True}) - - if not check_mode: - wait_for_lambda(client, module, name) - - try: - if not check_mode: - response = client.update_function_code(aws_retry=True, **code_kwargs) - current_version = response['Version'] - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Trying to upload new code") - - # Describe function code and configuration - response = get_current_function(client, name, qualifier=current_version) - if not response: - module.fail_json(msg='Unable to get function information after updating') - response = format_response(response) - # We're done - module.exit_json(changed=changed, code_kwargs=code_kwargs, func_kwargs=func_kwargs, **response) - - # Function doesn't exists, create new Lambda function - elif state == 'present': - - func_kwargs = {'FunctionName': name, - 'Publish': True, - 'Runtime': runtime, - 'Role': role_arn, - 'Timeout': timeout, - 'MemorySize': memory_size, - } - - code = _code_args(module, {}) - if not code: - module.fail_json(msg='Either S3 object or path to zipfile required') - if 'Architectures' in code: - func_kwargs.update({'Architectures': code.pop('Architectures')}) - func_kwargs.update({'Code': code}) - - if description is not None: - func_kwargs.update({'Description': description}) - - if handler is not None: - func_kwargs.update({'Handler': handler}) - - if environment_variables: - func_kwargs.update({'Environment': {'Variables': environment_variables}}) - - if dead_letter_arn: - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) - - if tracing_mode: - func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) - - if kms_key_arn: - func_kwargs.update({'KMSKeyArn': kms_key_arn}) - - # If VPC configuration is given - if vpc_subnet_ids: - func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, - 'SecurityGroupIds': vpc_security_group_ids}}) - - # Tag Function - if tags: - func_kwargs.update({'Tags': tags}) - - # Function would have been created if not check mode - if check_mode: - module.exit_json(changed=True) - - # Finally try to create function - current_version = None - try: - response = client.create_function(aws_retry=True, **func_kwargs) - current_version = response['Version'] - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Trying to create function") - - response = get_current_function(client, name, qualifier=current_version) - if not response: - module.fail_json(msg='Unable to get function information after creating') - response = format_response(response) - module.exit_json(changed=changed, **response) - - # Delete existing Lambda function - if state == 'absent' and current_function: - try: - if not check_mode: - client.delete_function(FunctionName=name, aws_retry=True) - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Trying to delete Lambda function") - - module.exit_json(changed=changed) - - # Function already absent, do nothing - elif state == 'absent': - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/lambda_alias.py b/lambda_alias.py deleted file mode 100644 index f1722ab5ab5..00000000000 --- a/lambda_alias.py +++ /dev/null @@ -1,327 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lambda_alias -version_added: 1.0.0 -short_description: Creates, updates or deletes AWS Lambda function aliases -description: - - This module allows the management of AWS Lambda functions aliases via the Ansible - framework. It is idempotent and supports "Check" mode. Use module M(community.aws.lambda) to manage the lambda function - itself and M(community.aws.lambda_event) to manage event source mappings. - - -author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb) -options: - function_name: - description: - - The name of the function alias. - required: true - type: str - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - name: - description: - - Name of the function alias. - required: true - aliases: ['alias_name'] - type: str - description: - description: - - A short, user-defined function alias description. - type: str - function_version: - description: - - Version associated with the Lambda function alias. - A value of 0 (or omitted parameter) sets the alias to the $LATEST version. - aliases: ['version'] - type: int -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' ---- -# Simple example to create a lambda function and publish a version -- hosts: localhost - gather_facts: false - vars: - state: present - project_folder: /path/to/deployment/package - deployment_package: lambda.zip - account: 123456789012 - production_version: 5 - tasks: - - name: AWS Lambda Function - lambda: - state: "{{ state | default('present') }}" - name: myLambdaFunction - publish: True - description: lambda function description - code_s3_bucket: package-bucket - code_s3_key: "lambda/{{ deployment_package }}" - local_path: "{{ project_folder }}/{{ deployment_package }}" - runtime: python2.7 - timeout: 5 - handler: lambda.handler - memory_size: 128 - role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" - - - name: Get information - lambda_info: - name: myLambdaFunction - register: lambda_info - - name: show results - ansible.builtin.debug: - msg: "{{ lambda_info['lambda_facts'] }}" - -# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) - - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " - community.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: Dev - description: Development is $LATEST version - -# The QA alias will only be created when a new version is published (i.e. not = '$LATEST') - - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " - community.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: QA - version: "{{ lambda_info.lambda_facts.Version }}" - description: "QA is version {{ lambda_info.lambda_facts.Version }}" - when: lambda_info.lambda_facts.Version != "$LATEST" - -# The Prod alias will have a fixed version based on a variable - - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " - community.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: Prod - version: "{{ production_version }}" - description: "Production is version {{ production_version }}" -''' - -RETURN = ''' ---- -alias_arn: - description: Full ARN of the function, including the alias - returned: success - type: str - sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev -description: - description: A short description of the alias - returned: success - type: str - sample: The development stage for my hot new app -function_version: - description: The qualifier that the alias refers to - returned: success - type: str - sample: $LATEST -name: - description: The name of the alias assigned - returned: success - type: str - sample: dev -revision_id: - description: A unique identifier that changes when you update the alias. - returned: success - type: str - sample: 12345678-1234-1234-1234-123456789abc -''' - -import re - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -def set_api_params(module, module_params): - """ - Sets non-None module parameters to those expected by the boto3 API. - - :param module: - :param module_params: - :return: - """ - - api_params = dict() - - for param in module_params: - module_param = module.params.get(param, None) - if module_param: - api_params[param] = module_param - - return snake_dict_to_camel_dict(api_params, capitalize_first=True) - - -def validate_params(module): - """ - Performs basic parameter validation. - - :param module: AnsibleAWSModule reference - :return: - """ - - function_name = module.params['function_name'] - - # validate function name - if not re.search(r'^[\w\-:]+$', function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) - ) - if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string - if module.params['function_version'] == 0: - module.params['function_version'] = '$LATEST' - else: - module.params['function_version'] = str(module.params['function_version']) - - return - - -def get_lambda_alias(module, client): - """ - Returns the lambda function alias if it exists. - - :param module: AnsibleAWSModule - :param client: (wrapped) boto3 lambda client - :return: - """ - - # set API parameters - api_params = set_api_params(module, ('function_name', 'name')) - - # check if alias exists and get facts - try: - results = client.get_alias(aws_retry=True, **api_params) - except is_boto3_error_code('ResourceNotFoundException'): - results = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error retrieving function alias') - - return results - - -def lambda_alias(module, client): - """ - Adds, updates or deletes lambda function aliases. - - :param module: AnsibleAWSModule - :param client: (wrapped) boto3 lambda client - :return dict: - """ - results = dict() - changed = False - current_state = 'absent' - state = module.params['state'] - - facts = get_lambda_alias(module, client) - if facts: - current_state = 'present' - - if state == 'present': - if current_state == 'present': - snake_facts = camel_dict_to_snake_dict(facts) - - # check if alias has changed -- only version and description can change - alias_params = ('function_version', 'description') - for param in alias_params: - if module.params.get(param) is None: - continue - if module.params.get(param) != snake_facts.get(param): - changed = True - break - - if changed: - api_params = set_api_params(module, ('function_name', 'name')) - api_params.update(set_api_params(module, alias_params)) - - if not module.check_mode: - try: - results = client.update_alias(aws_retry=True, **api_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error updating function alias') - - else: - # create new function alias - api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description')) - - try: - if not module.check_mode: - results = client.create_alias(aws_retry=True, **api_params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error creating function alias') - - else: # state = 'absent' - if current_state == 'present': - # delete the function - api_params = set_api_params(module, ('function_name', 'name')) - - try: - if not module.check_mode: - results = client.delete_alias(aws_retry=True, **api_params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error deleting function alias') - - return dict(changed=changed, **dict(results or facts or {})) - - -def main(): - """ - Main entry point. - - :return dict: ansible facts - """ - argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - function_name=dict(required=True), - name=dict(required=True, aliases=['alias_name']), - function_version=dict(type='int', required=False, default=0, aliases=['version']), - description=dict(required=False, default=None), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[], - required_together=[], - ) - - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - - validate_params(module) - results = lambda_alias(module, client) - - module.exit_json(**camel_dict_to_snake_dict(results)) - - -if __name__ == '__main__': - main() diff --git a/lambda_event.py b/lambda_event.py deleted file mode 100644 index cd99ceb199f..00000000000 --- a/lambda_event.py +++ /dev/null @@ -1,429 +0,0 @@ -#!/usr/bin/python -# (c) 2016, Pierre Jodouin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lambda_event -version_added: 1.0.0 -short_description: Creates, updates or deletes AWS Lambda function event mappings -description: - - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream - events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where - AWS Lambda invokes the function. - It is idempotent and supports "Check" mode. Use module M(community.aws.lambda) to manage the lambda - function itself and M(community.aws.lambda_alias) to manage function aliases. - - -author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb) -options: - lambda_function_arn: - description: - - The name or ARN of the lambda function. - required: true - aliases: ['function_name', 'function_arn'] - type: str - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - alias: - description: - - Name of the function alias. - - Mutually exclusive with I(version). - type: str - version: - description: - - Version of the Lambda function. - - Mutually exclusive with I(alias). - type: int - event_source: - description: - - Source of the event that triggers the lambda function. - - For DynamoDB and Kinesis events, select C(stream) - - For SQS queues, select C(sqs) - default: stream - choices: ['stream', 'sqs'] - type: str - source_params: - description: - - Sub-parameters required for event source. - suboptions: - source_arn: - description: - - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source. - type: str - required: true - enabled: - description: - - Indicates whether AWS Lambda should begin polling or readin from the event source. - default: true - type: bool - batch_size: - description: - - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. - default: 100 - type: int - starting_position: - description: - - The position in the stream where AWS Lambda should start reading. - - Required when I(event_source=stream). - choices: [TRIM_HORIZON,LATEST] - type: str - required: true - type: dict -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' -# Example that creates a lambda event notification for a DynamoDB stream -- name: DynamoDB stream event mapping - community.aws.lambda_event: - state: present - event_source: stream - function_name: "{{ function_name }}" - alias: Dev - source_params: - source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 - enabled: True - batch_size: 100 - starting_position: TRIM_HORIZON - register: event - -- name: Show source event - ansible.builtin.debug: - var: event.lambda_stream_events -''' - -RETURN = ''' ---- -lambda_stream_events: - description: list of dictionaries returned by the API describing stream event mappings - returned: success - type: list -''' - -import re - -try: - from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - - -# --------------------------------------------------------------------------------------------------- -# -# Helper Functions & classes -# -# --------------------------------------------------------------------------------------------------- - - -class AWSConnection: - """ - Create the connection object and client objects as required. - """ - - def __init__(self, ansible_obj, resources, use_boto3=True): - - try: - self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3) - - self.resource_client = dict() - if not resources: - resources = ['lambda'] - - resources.append('iam') - - for resource in resources: - aws_connect_kwargs.update(dict(region=self.region, - endpoint=self.endpoint, - conn_type='client', - resource=resource - )) - self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) - - # if region is not provided, then get default profile/session region - if not self.region: - self.region = self.resource_client['lambda'].meta.region_name - - except (ClientError, ParamValidationError, MissingParametersError) as e: - ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) - - # set account ID - try: - self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] - except (ClientError, ValueError, KeyError, IndexError): - self.account_id = '' - - def client(self, resource='lambda'): - return self.resource_client[resource] - - -def pc(key): - """ - Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. - - :param key: - :return: - """ - - return "".join([token.capitalize() for token in key.split('_')]) - - -def ordered_obj(obj): - """ - Order object for comparison purposes - - :param obj: - :return: - """ - - if isinstance(obj, dict): - return sorted((k, ordered_obj(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered_obj(x) for x in obj) - else: - return obj - - -def set_api_sub_params(params): - """ - Sets module sub-parameters to those expected by the boto3 API. - - :param params: - :return: - """ - - api_params = dict() - - for param in params.keys(): - param_value = params.get(param, None) - if param_value: - api_params[pc(param)] = param_value - - return api_params - - -def validate_params(module, aws): - """ - Performs basic parameter validation. - - :param module: - :param aws: - :return: - """ - - function_name = module.params['lambda_function_arn'] - - # validate function name - if not re.search(r'^[\w\-:]+$', function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) - ) - if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'): - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'): - module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name)) - - # check if 'function_name' needs to be expanded in full ARN format - if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'): - function_name = module.params['lambda_function_arn'] - module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name) - - qualifier = get_qualifier(module) - if qualifier: - function_arn = module.params['lambda_function_arn'] - module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) - - return - - -def get_qualifier(module): - """ - Returns the function qualifier as a version or alias or None. - - :param module: - :return: - """ - - qualifier = None - if module.params['version'] > 0: - qualifier = str(module.params['version']) - elif module.params['alias']: - qualifier = str(module.params['alias']) - - return qualifier - - -# --------------------------------------------------------------------------------------------------- -# -# Lambda Event Handlers -# -# This section defines a lambda_event_X function where X is an AWS service capable of initiating -# the execution of a Lambda function (pull only). -# -# --------------------------------------------------------------------------------------------------- - -def lambda_event_stream(module, aws): - """ - Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications. - :param module: - :param aws: - :return: - """ - - client = aws.client('lambda') - facts = dict() - changed = False - current_state = 'absent' - state = module.params['state'] - - api_params = dict(FunctionName=module.params['lambda_function_arn']) - - # check if required sub-parameters are present and valid - source_params = module.params['source_params'] - - source_arn = source_params.get('source_arn') - if source_arn: - api_params.update(EventSourceArn=source_arn) - else: - module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.") - - # check if optional sub-parameters are valid, if present - batch_size = source_params.get('batch_size') - if batch_size: - try: - source_params['batch_size'] = int(batch_size) - except ValueError: - module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size'])) - - # optional boolean value needs special treatment as not present does not imply False - source_param_enabled = module.boolean(source_params.get('enabled', 'True')) - - # check if event mapping exist - try: - facts = client.list_event_source_mappings(**api_params)['EventSourceMappings'] - if facts: - current_state = 'present' - except ClientError as e: - module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e)) - - if state == 'present': - if current_state == 'absent': - - starting_position = source_params.get('starting_position') - if starting_position: - api_params.update(StartingPosition=starting_position) - elif module.params.get('event_source') == 'sqs': - # starting position is not required for SQS - pass - else: - module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.") - - if source_arn: - api_params.update(Enabled=source_param_enabled) - if source_params.get('batch_size'): - api_params.update(BatchSize=source_params.get('batch_size')) - - try: - if not module.check_mode: - facts = client.create_event_source_mapping(**api_params) - changed = True - except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e)) - - else: - # current_state is 'present' - api_params = dict(FunctionName=module.params['lambda_function_arn']) - current_mapping = facts[0] - api_params.update(UUID=current_mapping['UUID']) - mapping_changed = False - - # check if anything changed - if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']: - api_params.update(BatchSize=source_params['batch_size']) - mapping_changed = True - - if source_param_enabled is not None: - if source_param_enabled: - if current_mapping['State'] not in ('Enabled', 'Enabling'): - api_params.update(Enabled=True) - mapping_changed = True - else: - if current_mapping['State'] not in ('Disabled', 'Disabling'): - api_params.update(Enabled=False) - mapping_changed = True - - if mapping_changed: - try: - if not module.check_mode: - facts = client.update_event_source_mapping(**api_params) - changed = True - except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e)) - - else: - if current_state == 'present': - # remove the stream event mapping - api_params = dict(UUID=facts[0]['UUID']) - - try: - if not module.check_mode: - facts = client.delete_event_source_mapping(**api_params) - changed = True - except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e)) - - return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) - - -def main(): - """Produce a list of function suffixes which handle lambda events.""" - source_choices = ["stream", "sqs"] - - argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), - event_source=dict(required=False, default="stream", choices=source_choices), - source_params=dict(type='dict', required=True), - alias=dict(required=False, default=None), - version=dict(type='int', required=False, default=0), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['alias', 'version']], - required_together=[], - ) - - aws = AWSConnection(module, ['lambda']) - - validate_params(module, aws) - - if module.params['event_source'].lower() in ('stream', 'sqs'): - results = lambda_event_stream(module, aws) - else: - module.fail_json(msg='Please select `stream` or `sqs` as the event type') - - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/lambda_execute.py b/lambda_execute.py deleted file mode 100644 index 1d652466d6b..00000000000 --- a/lambda_execute.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lambda_execute -version_added: 1.0.0 -short_description: Execute an AWS Lambda function -description: - - This module executes AWS Lambda functions, allowing synchronous and asynchronous - invocation. - - Prior to release 5.0.0 this module was called C(community.aws.execute_lambda). - The usage did not change. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -author: - - "Ryan Scott Brown (@ryansb) " -notes: - - Async invocation will always return an empty C(output) key. - - Synchronous invocation may result in a function timeout, resulting in an - empty C(output) key. -options: - name: - description: - - The name of the function to be invoked. This can only be used for - invocations within the calling account. To invoke a function in another - account, use I(function_arn) to specify the full ARN. - type: str - function_arn: - description: - - The name of the function to be invoked - type: str - tail_log: - description: - - If I(tail_log=true), the result of the task will include the last 4 KB - of the CloudWatch log for the function execution. Log tailing only - works if you use synchronous invocation I(wait=true). This is usually - used for development or testing Lambdas. - type: bool - default: false - wait: - description: - - Whether to wait for the function results or not. If I(wait=no) - the task will not return any results. To wait for the Lambda function - to complete, set I(wait=true) and the result will be available in the - I(output) key. - type: bool - default: true - dry_run: - description: - - Do not *actually* invoke the function. A C(DryRun) call will check that - the caller has permissions to call the function, especially for - checking cross-account permissions. - type: bool - default: false - version_qualifier: - description: - - Which version/alias of the function to run. This defaults to the - C(LATEST) revision, but can be set to any existing version or alias. - See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html) - for details. - type: str - payload: - description: - - A dictionary in any form to be provided as input to the Lambda function. - default: {} - type: dict -''' - -EXAMPLES = ''' -- community.aws.lambda_execute: - name: test-function - # the payload is automatically serialized and sent to the function - payload: - foo: bar - value: 8 - register: response - -# Test that you have sufficient permissions to execute a Lambda function in -# another account -- community.aws.lambda_execute: - function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function - dry_run: true - -- community.aws.lambda_execute: - name: test-function - payload: - foo: bar - value: 8 - wait: true - tail_log: true - register: response - # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda - -# Pass the Lambda event payload as a json file. -- community.aws.lambda_execute: - name: test-function - payload: "{{ lookup('file','lambda_event.json') }}" - register: response - -- community.aws.lambda_execute: - name: test-function - version_qualifier: PRODUCTION -''' - -RETURN = ''' -result: - description: Resulting data structure from a successful task execution. - returned: success - type: dict - contains: - output: - description: Function output if wait=true and the function returns a value - returned: success - type: dict - sample: "{ 'output': 'something' }" - logs: - description: The last 4KB of the function logs. Only provided if I(tail_log) is C(true) - type: str - returned: if I(tail_log) == true - status: - description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async) - type: int - sample: 200 - returned: always -''' - -import base64 -import json - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -def main(): - argument_spec = dict( - name=dict(), - function_arn=dict(), - wait=dict(default=True, type='bool'), - tail_log=dict(default=False, type='bool'), - dry_run=dict(default=False, type='bool'), - version_qualifier=dict(), - payload=dict(default={}, type='dict'), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ['name', 'function_arn'], - ], - required_one_of=[ - ('name', 'function_arn') - ], - ) - - name = module.params.get('name') - function_arn = module.params.get('function_arn') - await_return = module.params.get('wait') - dry_run = module.params.get('dry_run') - tail_log = module.params.get('tail_log') - version_qualifier = module.params.get('version_qualifier') - payload = module.params.get('payload') - - try: - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - invoke_params = {} - - if await_return: - # await response - invoke_params['InvocationType'] = 'RequestResponse' - else: - # fire and forget - invoke_params['InvocationType'] = 'Event' - if dry_run or module.check_mode: - # dry_run overrides invocation type - invoke_params['InvocationType'] = 'DryRun' - - if tail_log and await_return: - invoke_params['LogType'] = 'Tail' - elif tail_log and not await_return: - module.fail_json(msg="The `tail_log` parameter is only available if " - "the invocation waits for the function to complete. " - "Set `wait` to true or turn off `tail_log`.") - else: - invoke_params['LogType'] = 'None' - - if version_qualifier: - invoke_params['Qualifier'] = version_qualifier - - if payload: - invoke_params['Payload'] = json.dumps(payload) - - if function_arn: - invoke_params['FunctionName'] = function_arn - elif name: - invoke_params['FunctionName'] = name - - if module.check_mode: - module.exit_json(changed=True) - - try: - wait_for_lambda(client, module, name) - response = client.invoke(**invoke_params, aws_retry=True) - except is_boto3_error_code('ResourceNotFoundException') as nfe: - module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure " - "the ARN is correct and your profile has " - "permissions to execute this function.") - except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except - module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") - except botocore.exceptions.ParamValidationError as ve: # pylint: disable=duplicate-except - module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate") - except Exception as e: - module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function") - - results = { - 'logs': '', - 'status': response['StatusCode'], - 'output': '', - } - - if response.get('LogResult'): - try: - # logs are base64 encoded in the API response - results['logs'] = base64.b64decode(response.get('LogResult', '')) - except Exception as e: - module.fail_json_aws(e, msg="Failed while decoding logs") - - if invoke_params['InvocationType'] == 'RequestResponse': - try: - results['output'] = json.loads(response['Payload'].read().decode('utf8')) - except Exception as e: - module.fail_json_aws(e, msg="Failed while decoding function return value") - - if isinstance(results.get('output'), dict) and any( - [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): - # AWS sends back stack traces and error messages when a function failed - # in a RequestResponse (synchronous) context. - template = ("Function executed, but there was an error in the Lambda function. " - "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") - error_data = { - # format the stacktrace sent back as an array into a multiline string - 'trace': '\n'.join( - [' '.join([ - str(x) for x in line # cast line numbers to strings - ]) for line in results.get('output', {}).get('stackTrace', [])] - ), - 'errmsg': results['output'].get('errorMessage'), - 'type': results['output'].get('errorType') - } - module.fail_json(msg=template.format(**error_data), result=results) - - module.exit_json(changed=True, result=results) - - -def wait_for_lambda(client, module, name): - try: - client_active_waiter = client.get_waiter('function_active') - client_updated_waiter = client.get_waiter('function_updated') - client_active_waiter.wait(FunctionName=name) - client_updated_waiter.wait(FunctionName=name) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active') - - -if __name__ == '__main__': - main() diff --git a/lambda_info.py b/lambda_info.py deleted file mode 100644 index 2f091e6e295..00000000000 --- a/lambda_info.py +++ /dev/null @@ -1,535 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lambda_info -version_added: 1.0.0 -short_description: Gathers AWS Lambda function details -description: - - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. - - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases, - M(community.aws.lambda_event) to manage lambda event source mappings, and M(community.aws.lambda_policy) to manage policy statements. -options: - query: - description: - - Specifies the resource type for which to gather information. - - Defaults to C(all) when I(function_name) is specified. - - Defaults to C(config) when I(function_name) is NOT specified. - choices: [ "aliases", "all", "config", "mappings", "policy", "versions", "tags" ] - type: str - function_name: - description: - - The name of the lambda function for which information is requested. - aliases: [ "function", "name"] - type: str - event_source_arn: - description: - - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. - type: str -author: - - Pierre Jodouin (@pjodouin) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 -''' - -EXAMPLES = ''' ---- -# Simple example of listing all info for a function -- name: List all for a specific function - community.aws.lambda_info: - query: all - function_name: myFunction - register: my_function_details - -# List all versions of a function -- name: List function versions - community.aws.lambda_info: - query: versions - function_name: myFunction - register: my_function_versions - -# List all info for all functions -- name: List all functions - community.aws.lambda_info: - query: all - register: output - -- name: show Lambda information - ansible.builtin.debug: - msg: "{{ output['function'] }}" -''' - -RETURN = ''' ---- -function: - description: - - lambda function list. - - C(function) has been deprecated in will be removed in the next major release after 2025-01-01. - returned: success - type: dict -function.TheName: - description: - - lambda function information, including event, mapping, and version information. - - C(function) has been deprecated in will be removed in the next major release after 2025-01-01. - returned: success - type: dict -functions: - description: List of information for each lambda function matching the query. - returned: always - type: list - elements: dict - version_added: 4.1.0 - contains: - aliases: - description: The aliases associated with the function. - returned: when C(query) is I(aliases) or I(all) - type: list - elements: str - architectures: - description: The architectures supported by the function. - returned: successful run where botocore >= 1.21.51 - type: list - elements: str - sample: ['arm64'] - code_sha256: - description: The SHA256 hash of the function's deployment package. - returned: success - type: str - sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=' - code_size: - description: The size of the function's deployment package in bytes. - returned: success - type: int - sample: 123 - dead_letter_config: - description: The function's dead letter queue. - returned: when the function has a dead letter queue configured - type: dict - sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 } - contains: - target_arn: - description: The ARN of an SQS queue or SNS topic. - returned: when the function has a dead letter queue configured - type: str - sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 - description: - description: The function's description. - returned: success - type: str - sample: 'My function' - environment: - description: The function's environment variables. - returned: when environment variables exist - type: dict - contains: - variables: - description: Environment variable key-value pairs. - returned: when environment variables exist - type: dict - sample: {'key': 'value'} - error: - description: Error message for environment variables that could not be applied. - returned: when there is an error applying environment variables - type: dict - contains: - error_code: - description: The error code. - returned: when there is an error applying environment variables - type: str - message: - description: The error message. - returned: when there is an error applying environment variables - type: str - function_arn: - description: The function's Amazon Resource Name (ARN). - returned: on success - type: str - sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1' - function_name: - description: The function's name. - returned: on success - type: str - sample: 'myFunction' - handler: - description: The function Lambda calls to begin executing your function. - returned: on success - type: str - sample: 'index.handler' - last_modified: - description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD). - returned: on success - type: str - sample: '2017-08-01T00:00:00.000+0000' - mappings: - description: List of configuration information for each event source mapping. - returned: when C(query) is I(all) or I(mappings) - type: list - elements: dict - contains: - uuid: - description: The AWS Lambda assigned opaque identifier for the mapping. - returned: on success - type: str - batch_size: - description: The largest number of records that AWS Lambda will retrieve from the event source at the time of invoking the function. - returned: on success - type: int - event_source_arn: - description: The ARN of the Amazon Kinesis or DyanmoDB stream that is the source of events. - returned: on success - type: str - function_arn: - description: The Lambda function to invoke when AWS Lambda detects an event on the poll-based source. - returned: on success - type: str - last_modified: - description: The UTC time string indicating the last time the event mapping was updated. - returned: on success - type: str - last_processing_result: - description: The result of the last AWS Lambda invocation of your Lambda function. - returned: on success - type: str - state: - description: The state of the event source mapping. - returned: on success - type: str - state_transition_reason: - description: The reason the event source mapping is in its current state. - returned: on success - type: str - memory_size: - description: The memory allocated to the function. - returned: on success - type: int - sample: 128 - policy: - description: The policy associated with the function. - returned: when C(query) is I(all) or I(policy) - type: dict - revision_id: - description: The latest updated revision of the function or alias. - returned: on success - type: str - sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c' - role: - description: The function's execution role. - returned: on success - type: str - sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution' - runtime: - description: The funtime environment for the Lambda function. - returned: on success - type: str - sample: 'nodejs6.10' - tracing_config: - description: The function's AWS X-Ray tracing configuration. - returned: on success - type: dict - sample: { 'mode': 'Active' } - contains: - mode: - description: The tracing mode. - returned: on success - type: str - sample: 'Active' - timeout: - description: The amount of time that Lambda allows a function to run before terminating it. - returned: on success - type: int - sample: 3 - version: - description: The version of the Lambda function. - returned: on success - type: str - sample: '1' - versions: - description: List of Lambda function versions. - returned: when C(query) is I(all) or I(versions) - type: list - elements: dict - vpc_config: - description: The function's networking configuration. - returned: on success - type: dict - sample: { - 'security_group_ids': [], - 'subnet_ids': [], - 'vpc_id': '123' - } -''' -import json -import re - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -@AWSRetry.jittered_backoff() -def _paginate(client, function, **params): - paginator = client.get_paginator(function) - return paginator.paginate(**params).build_full_result() - - -def alias_details(client, module, function_name): - """ - Returns list of aliases for a specified function. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :param function_name (str): Name of Lambda function to query - :return dict: - """ - - lambda_info = dict() - - try: - lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(aliases=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get aliases") - - return camel_dict_to_snake_dict(lambda_info) - - -def list_functions(client, module): - """ - Returns queried facts for a specified function (or all functions). - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - """ - - function_name = module.params.get('function_name') - if function_name: - # Function name is specified - retrieve info on that function - function_names = [function_name] - - else: - # Function name is not specified - retrieve all function names - all_function_info = _paginate(client, 'list_functions')['Functions'] - function_names = [function_info['FunctionName'] for function_info in all_function_info] - - query = module.params['query'] - functions = [] - - # keep returning deprecated response (dict of dicts) until removed - all_facts = {} - - for function_name in function_names: - function = {} - - # query = 'config' returns info such as FunctionName, FunctionArn, Description, etc - # these details should be returned regardless of the query - function.update(config_details(client, module, function_name)) - - if query in ['all', 'aliases']: - function.update(alias_details(client, module, function_name)) - - if query in ['all', 'policy']: - function.update(policy_details(client, module, function_name)) - - if query in ['all', 'versions']: - function.update(version_details(client, module, function_name)) - - if query in ['all', 'mappings']: - function.update(mapping_details(client, module, function_name)) - - if query in ['all', 'tags']: - function.update(tags_details(client, module, function_name)) - - all_facts[function['function_name']] = function - - # add current lambda to list of lambdas - functions.append(function) - - # return info - module.exit_json(function=all_facts, functions=functions, changed=False) - - -def config_details(client, module, function_name): - """ - Returns configuration details for a lambda function. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :param function_name (str): Name of Lambda function to query - :return dict: - """ - - lambda_info = dict() - - try: - lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) - - return camel_dict_to_snake_dict(lambda_info) - - -def mapping_details(client, module, function_name): - """ - Returns all lambda event source mappings. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :param function_name (str): Name of Lambda function to query - :return dict: - """ - - lambda_info = dict() - params = dict() - - params['FunctionName'] = function_name - - if module.params.get('event_source_arn'): - params['EventSourceArn'] = module.params.get('event_source_arn') - - try: - lambda_info.update(mappings=_paginate(client, 'list_event_source_mappings', **params)['EventSourceMappings']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(mappings=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get source event mappings") - - return camel_dict_to_snake_dict(lambda_info) - - -def policy_details(client, module, function_name): - """ - Returns policy attached to a lambda function. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :param function_name (str): Name of Lambda function to query - :return dict: - """ - - lambda_info = dict() - - try: - # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(policy={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) - - return camel_dict_to_snake_dict(lambda_info) - - -def version_details(client, module, function_name): - """ - Returns all lambda function versions. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :param function_name (str): Name of Lambda function to query - :return dict: - """ - - lambda_info = dict() - - try: - lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(versions=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) - - return camel_dict_to_snake_dict(lambda_info) - - -def tags_details(client, module, function_name): - """ - Returns tag details for a lambda function. - - :param client: AWS API client reference (boto3) - :param module: Ansible module reference - :param function_name (str): Name of Lambda function to query - :return dict: - """ - - lambda_info = dict() - - try: - lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) - - return camel_dict_to_snake_dict(lambda_info) - - -def main(): - """ - Main entry point. - - :return dict: ansible facts - """ - argument_spec = dict( - function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None), - event_source_arn=dict(required=False, default=None), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[], - required_together=[] - ) - - # validate function_name if present - function_name = module.params['function_name'] - if function_name: - if not re.search(r"^[\w\-:]+$", function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) - ) - if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - # create default values for query if not specified. - # if function name exists, query should default to 'all'. - # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. - if not module.params.get('query'): - if function_name: - module.params['query'] = 'all' - else: - module.params['query'] = 'config' - - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - - # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts - module.deprecate( - "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions'," - " which returns a list of dictionaries. Both keys are returned for now.", - date='2025-01-01', - collection_name='community.aws' - ) - - list_functions(client, module) - - -if __name__ == '__main__': - main() diff --git a/lambda_policy.py b/lambda_policy.py deleted file mode 100644 index 4fc5b084ed9..00000000000 --- a/lambda_policy.py +++ /dev/null @@ -1,426 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2016, Pierre Jodouin -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lambda_policy -version_added: 1.0.0 -short_description: Creates, updates or deletes AWS Lambda policy statements. -description: - - This module allows the management of AWS Lambda policy statements. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases, - M(community.aws.lambda_event) to manage event source mappings such as Kinesis streams, M(community.aws.execute_lambda) to execute a - lambda function and M(community.aws.lambda_info) to gather information relating to one or more lambda functions. - - -author: - - Pierre Jodouin (@pjodouin) - - Michael De La Rue (@mikedlr) -options: - function_name: - description: - - "Name of the Lambda function whose resource policy you are updating by adding a new permission." - - "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the" - - "function (for example, C(arn:aws:lambda:us-west-2:account-id:function:ThumbNail) ). AWS Lambda also allows you to" - - "specify partial ARN (for example, C(account-id:Thumbnail) ). Note that the length constraint applies only to the" - - "ARN. If you specify only the function name, it is limited to 64 character in length." - required: true - aliases: ['lambda_function_arn', 'function_arn'] - type: str - - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - - alias: - description: - - Name of the function alias. Mutually exclusive with I(version). - type: str - - version: - description: - - Version of the Lambda function. Mutually exclusive with I(alias). - type: int - - statement_id: - description: - - A unique statement identifier. - required: true - aliases: ['sid'] - type: str - - action: - description: - - "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with - lambda: followed by the API name (see Operations ). For example, C(lambda:CreateFunction) . You can use wildcard - (C(lambda:*)) to grant permission for all AWS Lambda actions." - required: true - type: str - - principal: - description: - - "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if - you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or - any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom - application in another AWS account to push events to AWS Lambda by invoking your function." - required: true - type: str - - source_arn: - description: - - This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this - field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from - the specified bucket can invoke the function. - type: str - - source_account: - description: - - The AWS account ID (without a hyphen) of the source owner. For example, if I(source_arn) identifies a bucket, - then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you - specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS - account created the bucket). You can also use this condition to specify all sources (that is, you don't - specify the I(source_arn) ) owned by a specific account. - type: str - - event_source_token: - description: - - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account). - type: str - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -''' - -EXAMPLES = ''' - -- name: Lambda S3 event notification - community.aws.lambda_policy: - state: present - function_name: functionName - alias: Dev - statement_id: lambda-s3-myBucket-create-data-log - action: lambda:InvokeFunction - principal: s3.amazonaws.com - source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName - source_account: 123456789012 - register: lambda_policy_action - -- name: show results - ansible.builtin.debug: - var: lambda_policy_action -''' - -RETURN = ''' ---- -lambda_policy_action: - description: describes what action was taken - returned: success - type: str -''' - -import json -import re - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - - -def pc(key): - """ - Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. - - :param key: - :return: - """ - - return "".join([token.capitalize() for token in key.split('_')]) - - -def policy_equal(module, current_statement): - for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'): - if module.params.get(param) != current_statement.get(param): - return False - - return True - - -def set_api_params(module, module_params): - """ - Sets module parameters to those expected by the boto3 API. - - :param module: - :param module_params: - :return: - """ - - api_params = dict() - - for param in module_params: - module_param = module.params.get(param) - if module_param is not None: - api_params[pc(param)] = module_param - - return api_params - - -def validate_params(module): - """ - Performs parameter validation beyond the module framework's validation. - - :param module: - :return: - """ - - function_name = module.params['function_name'] - - # validate function name - if function_name.startswith('arn:'): - if not re.search(r'^[\w\-:]+$', function_name): - module.fail_json( - msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name) - ) - if len(function_name) > 140: - module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name)) - else: - if not re.search(r'^[\w\-]+$', function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format( - function_name) - ) - if len(function_name) > 64: - module.fail_json( - msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - -def get_qualifier(module): - """ - Returns the function qualifier as a version or alias or None. - - :param module: - :return: - """ - - if module.params.get('version') is not None: - return to_native(module.params['version']) - elif module.params['alias']: - return to_native(module.params['alias']) - - return None - - -def extract_statement(policy, sid): - """return flattened single policy statement from a policy - - If a policy statement is present in the policy extract it and - return it in a flattened form. Otherwise return an empty - dictionary. - """ - if 'Statement' not in policy: - return {} - policy_statement = {} - # Now that we have the policy, check if required permission statement is present and flatten to - # simple dictionary if found. - for statement in policy['Statement']: - if statement['Sid'] == sid: - policy_statement['action'] = statement['Action'] - try: - policy_statement['principal'] = statement['Principal']['Service'] - except KeyError: - pass - try: - policy_statement['principal'] = statement['Principal']['AWS'] - except KeyError: - pass - try: - policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn'] - except KeyError: - pass - try: - policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount'] - except KeyError: - pass - try: - policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken'] - except KeyError: - pass - break - - return policy_statement - - -def get_policy_statement(module, client): - """Checks that policy exists and if so, that statement ID is present or absent. - - :param module: - :param client: - :return: - """ - sid = module.params['statement_id'] - - # set API parameters - api_params = set_api_params(module, ('function_name', )) - qualifier = get_qualifier(module) - if qualifier: - api_params.update(Qualifier=qualifier) - - policy_results = None - # check if function policy exists - try: - policy_results = client.get_policy(**api_params) - except is_boto3_error_code('ResourceNotFoundException'): - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="retrieving function policy") - - # get_policy returns a JSON string so must convert to dict before reassigning to its key - policy = json.loads(policy_results.get('Policy', '{}')) - return extract_statement(policy, sid) - - -def add_policy_permission(module, client): - """ - Adds a permission statement to the policy. - - :param module: - :param aws: - :return: - """ - - changed = False - - # set API parameters - params = ( - 'function_name', - 'statement_id', - 'action', - 'principal', - 'source_arn', - 'source_account', - 'event_source_token') - api_params = set_api_params(module, params) - qualifier = get_qualifier(module) - if qualifier: - api_params.update(Qualifier=qualifier) - - if not module.check_mode: - try: - client.add_permission(**api_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="adding permission to policy") - changed = True - - return changed - - -def remove_policy_permission(module, client): - """ - Removed a permission statement from the policy. - - :param module: - :param aws: - :return: - """ - - changed = False - - # set API parameters - api_params = set_api_params(module, ('function_name', 'statement_id')) - qualifier = get_qualifier(module) - if qualifier: - api_params.update(Qualifier=qualifier) - - try: - if not module.check_mode: - client.remove_permission(**api_params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="removing permission from policy") - - return changed - - -def manage_state(module, lambda_client): - changed = False - current_state = 'absent' - state = module.params['state'] - action_taken = 'none' - - # check if the policy exists - current_policy_statement = get_policy_statement(module, lambda_client) - if current_policy_statement: - current_state = 'present' - - if state == 'present': - if current_state == 'present' and not policy_equal(module, current_policy_statement): - remove_policy_permission(module, lambda_client) - changed = add_policy_permission(module, lambda_client) - action_taken = 'updated' - if not current_state == 'present': - changed = add_policy_permission(module, lambda_client) - action_taken = 'added' - elif current_state == 'present': - # remove the policy statement - changed = remove_policy_permission(module, lambda_client) - action_taken = 'deleted' - - return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken)) - - -def setup_module_object(): - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']), - statement_id=dict(required=True, aliases=['sid']), - alias=dict(), - version=dict(type='int'), - action=dict(required=True, ), - principal=dict(required=True, ), - source_arn=dict(), - source_account=dict(), - event_source_token=dict(no_log=False), - ) - - return AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['alias', 'version'], - ['event_source_token', 'source_arn'], - ['event_source_token', 'source_account']], - ) - - -def main(): - """ - Main entry point. - - :return dict: ansible facts - """ - - module = setup_module_object() - client = module.client('lambda') - validate_params(module) - results = manage_state(module, client) - - module.exit_json(**results) - - -if __name__ == '__main__': - main() From f739446571ea6bb1975a260c8f4519c1198a970c Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 5 Oct 2022 17:04:40 +0200 Subject: [PATCH 593/683] Update extends_documentation_fragment with amazon.aws.boto3 (#1459) Update extends_documentation_fragment with amazon.aws.boto3 Depends-On: ansible/ansible-zuul-jobs#1654 SUMMARY As per ansible-collections/amazon.aws#985 add amazon.aws.boto3. ISSUE TYPE Docs Pull Request COMPONENT NAME several Reviewed-by: Jill R Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- accessanalyzer_validate_policy_info.py | 1 + acm_certificate.py | 1 + acm_certificate_info.py | 1 + api_gateway.py | 1 + api_gateway_domain.py | 1 + application_autoscaling_policy.py | 1 + autoscaling_complete_lifecycle_action.py | 1 + autoscaling_instance_refresh.py | 1 + autoscaling_instance_refresh_info.py | 1 + autoscaling_launch_config.py | 1 + autoscaling_launch_config_find.py | 1 + autoscaling_launch_config_info.py | 1 + autoscaling_lifecycle_hook.py | 1 + autoscaling_policy.py | 1 + autoscaling_scheduled_action.py | 1 + aws_region_info.py | 1 + batch_compute_environment.py | 1 + batch_job_definition.py | 1 + batch_job_queue.py | 1 + cloudformation_exports_info.py | 2 +- cloudformation_stack_set.py | 1 + cloudfront_distribution.py | 1 + cloudfront_distribution_info.py | 1 + cloudfront_invalidation.py | 1 + cloudfront_origin_access_identity.py | 1 + cloudfront_response_headers_policy.py | 1 + codebuild_project.py | 1 + codecommit_repository.py | 1 + codepipeline.py | 1 + config_aggregation_authorization.py | 1 + config_aggregator.py | 1 + config_delivery_channel.py | 1 + config_recorder.py | 1 + config_rule.py | 1 + data_pipeline.py | 1 + directconnect_confirm_connection.py | 1 + directconnect_connection.py | 1 + directconnect_gateway.py | 1 + directconnect_link_aggregation_group.py | 1 + directconnect_virtual_interface.py | 1 + dms_endpoint.py | 1 + dms_replication_subnet_group.py | 1 + dynamodb_table.py | 1 + dynamodb_ttl.py | 1 + ec2_ami_copy.py | 1 + ec2_customer_gateway.py | 1 + ec2_customer_gateway_info.py | 1 + ec2_launch_template.py | 1 + ec2_placement_group.py | 2 +- ec2_placement_group_info.py | 1 + ec2_snapshot_copy.py | 1 + ec2_transit_gateway.py | 1 + ec2_transit_gateway_info.py | 1 + ec2_transit_gateway_vpc_attachment.py | 1 + ec2_transit_gateway_vpc_attachment_info.py | 1 + ec2_vpc_egress_igw.py | 1 + ec2_vpc_nacl.py | 1 + ec2_vpc_nacl_info.py | 2 +- ec2_vpc_peer.py | 1 + ec2_vpc_peering_info.py | 1 + ec2_vpc_vgw.py | 1 + ec2_vpc_vgw_info.py | 1 + ec2_vpc_vpn.py | 1 + ec2_vpc_vpn_info.py | 1 + ec2_win_password.py | 1 + ecs_attribute.py | 1 + ecs_cluster.py | 1 + ecs_ecr.py | 1 + ecs_service.py | 1 + ecs_service_info.py | 1 + ecs_tag.py | 1 + ecs_task.py | 1 + ecs_taskdefinition.py | 1 + ecs_taskdefinition_info.py | 1 + efs.py | 1 + efs_info.py | 1 + efs_tag.py | 1 + eks_cluster.py | 2 ++ eks_fargate_profile.py | 1 + elasticache.py | 1 + elasticache_info.py | 1 + elasticache_parameter_group.py | 1 + elasticache_snapshot.py | 1 + elasticache_subnet_group.py | 1 + elasticbeanstalk_app.py | 1 + elb_classic_lb_info.py | 1 + elb_instance.py | 1 + elb_network_lb.py | 1 + elb_target.py | 1 + elb_target_group.py | 1 + elb_target_group_info.py | 1 + elb_target_info.py | 1 + glue_connection.py | 1 + glue_crawler.py | 1 + glue_job.py | 1 + iam_access_key.py | 1 + iam_access_key_info.py | 1 + iam_group.py | 2 +- iam_managed_policy.py | 1 + iam_mfa_device_info.py | 1 + iam_password_policy.py | 1 + iam_role.py | 1 + iam_role_info.py | 1 + iam_saml_federation.py | 1 + iam_server_certificate.py | 1 + iam_server_certificate_info.py | 1 + inspector_target.py | 1 + kinesis_stream.py | 1 + lightsail.py | 1 + lightsail_static_ip.py | 1 + msk_cluster.py | 1 + msk_config.py | 1 + networkfirewall.py | 1 + networkfirewall_info.py | 1 + networkfirewall_policy.py | 1 + networkfirewall_policy_info.py | 1 + networkfirewall_rule_group.py | 1 + networkfirewall_rule_group_info.py | 1 + opensearch.py | 1 + opensearch_info.py | 1 + redshift.py | 1 + redshift_cross_region_snapshots.py | 1 + redshift_info.py | 1 + redshift_subnet_group.py | 1 + s3_bucket_info.py | 1 + s3_bucket_notification.py | 1 + s3_cors.py | 1 + s3_lifecycle.py | 1 + s3_logging.py | 1 + s3_metrics_configuration.py | 1 + s3_sync.py | 1 + s3_website.py | 1 + secretsmanager_secret.py | 1 + ses_identity.py | 1 + ses_identity_policy.py | 1 + ses_rule_set.py | 1 + sns.py | 1 + sns_topic.py | 1 + sns_topic_info.py | 1 + sqs_queue.py | 1 + ssm_parameter.py | 1 + stepfunctions_state_machine.py | 1 + stepfunctions_state_machine_execution.py | 1 + storagegateway_info.py | 1 + sts_assume_role.py | 1 + sts_session_token.py | 1 + waf_condition.py | 1 + waf_info.py | 1 + waf_rule.py | 1 + waf_web_acl.py | 1 + wafv2_ip_set.py | 1 + wafv2_ip_set_info.py | 1 + wafv2_resources.py | 1 + wafv2_resources_info.py | 1 + wafv2_rule_group.py | 1 + wafv2_rule_group_info.py | 1 + wafv2_web_acl.py | 1 + wafv2_web_acl_info.py | 1 + 158 files changed, 159 insertions(+), 4 deletions(-) diff --git a/accessanalyzer_validate_policy_info.py b/accessanalyzer_validate_policy_info.py index 218bd3b781e..e589d0cb011 100644 --- a/accessanalyzer_validate_policy_info.py +++ b/accessanalyzer_validate_policy_info.py @@ -65,6 +65,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/acm_certificate.py b/acm_certificate.py index 8264404be7e..abdecadcc78 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -177,6 +177,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/acm_certificate_info.py b/acm_certificate_info.py index f546bf2c1be..a84d7c0b065 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -45,6 +45,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/api_gateway.py b/api_gateway.py index 787a7f4d1ec..6627c762bd9 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -105,6 +105,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 notes: - A future version of this module will probably use tags or another diff --git a/api_gateway_domain.py b/api_gateway_domain.py index 6a3a6c3ace1..9b4ec85724a 100644 --- a/api_gateway_domain.py +++ b/api_gateway_domain.py @@ -60,6 +60,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 notes: - Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module. - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py index 51f27ed580c..d20c107de9c 100644 --- a/application_autoscaling_policy.py +++ b/application_autoscaling_policy.py @@ -106,6 +106,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/autoscaling_complete_lifecycle_action.py b/autoscaling_complete_lifecycle_action.py index 63fa3b63ef8..8f585a10288 100644 --- a/autoscaling_complete_lifecycle_action.py +++ b/autoscaling_complete_lifecycle_action.py @@ -39,6 +39,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/autoscaling_instance_refresh.py b/autoscaling_instance_refresh.py index 43ef665e3f3..94c2bb38c37 100644 --- a/autoscaling_instance_refresh.py +++ b/autoscaling_instance_refresh.py @@ -63,6 +63,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/autoscaling_instance_refresh_info.py b/autoscaling_instance_refresh_info.py index 4d9cb7e05b7..3037d0b5295 100644 --- a/autoscaling_instance_refresh_info.py +++ b/autoscaling_instance_refresh_info.py @@ -43,6 +43,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index ea50eb7a57e..59972a10e4b 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -184,6 +184,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/autoscaling_launch_config_find.py b/autoscaling_launch_config_find.py index e1f8ec5308b..ae8f187c05f 100644 --- a/autoscaling_launch_config_find.py +++ b/autoscaling_launch_config_find.py @@ -42,6 +42,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/autoscaling_launch_config_info.py b/autoscaling_launch_config_info.py index 1606201c999..1c98d7588d0 100644 --- a/autoscaling_launch_config_info.py +++ b/autoscaling_launch_config_info.py @@ -50,6 +50,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/autoscaling_lifecycle_hook.py b/autoscaling_lifecycle_hook.py index 55d28932338..cf07b7681ac 100644 --- a/autoscaling_lifecycle_hook.py +++ b/autoscaling_lifecycle_hook.py @@ -76,6 +76,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/autoscaling_policy.py b/autoscaling_policy.py index 3fd63abc52b..a29389b0e06 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -191,6 +191,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' - name: Simple Scale Down policy diff --git a/autoscaling_scheduled_action.py b/autoscaling_scheduled_action.py index 60c91403ff0..f1433c522bc 100644 --- a/autoscaling_scheduled_action.py +++ b/autoscaling_scheduled_action.py @@ -71,6 +71,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/aws_region_info.py b/aws_region_info.py index fc4c38b2579..126455a8cff 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -28,6 +28,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/batch_compute_environment.py b/batch_compute_environment.py index fbe69139457..555cfccbe55 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -122,6 +122,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/batch_job_definition.py b/batch_job_definition.py index c62f37fd8ba..2af9093609e 100644 --- a/batch_job_definition.py +++ b/batch_job_definition.py @@ -176,6 +176,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/batch_job_queue.py b/batch_job_queue.py index 8a6224dfb68..ef48896a473 100644 --- a/batch_job_queue.py +++ b/batch_job_queue.py @@ -65,6 +65,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index dc8caae55a4..f7e71e2f8d1 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -17,7 +17,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +- amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index c3f631b1a91..2bb0befc0a4 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -172,6 +172,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 48ff7247e1c..9735e9db3c6 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -24,6 +24,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags options: diff --git a/cloudfront_distribution_info.py b/cloudfront_distribution_info.py index a9df0d8a9d6..cb97664fab2 100644 --- a/cloudfront_distribution_info.py +++ b/cloudfront_distribution_info.py @@ -145,6 +145,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 0599a71de3e..767a1d18182 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -21,6 +21,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 options: diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index dc79c9bd1b2..c6879d0c5a7 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -24,6 +24,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 options: diff --git a/cloudfront_response_headers_policy.py b/cloudfront_response_headers_policy.py index 813f8c657a9..01b38a3bdad 100644 --- a/cloudfront_response_headers_policy.py +++ b/cloudfront_response_headers_policy.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 options: diff --git a/codebuild_project.py b/codebuild_project.py index bef5d410748..873b7401005 100644 --- a/codebuild_project.py +++ b/codebuild_project.py @@ -188,6 +188,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' diff --git a/codecommit_repository.py b/codecommit_repository.py index 689a1c053d7..5e05de9a61f 100644 --- a/codecommit_repository.py +++ b/codecommit_repository.py @@ -40,6 +40,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' RETURN = ''' diff --git a/codepipeline.py b/codepipeline.py index f95094bcbdc..5c5935cb9ac 100644 --- a/codepipeline.py +++ b/codepipeline.py @@ -77,6 +77,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/config_aggregation_authorization.py b/config_aggregation_authorization.py index 1ebfe0d94d6..7b92abb7f4a 100644 --- a/config_aggregation_authorization.py +++ b/config_aggregation_authorization.py @@ -38,6 +38,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/config_aggregator.py b/config_aggregator.py index e4c23b9b5fa..3dc4c6faaf7 100644 --- a/config_aggregator.py +++ b/config_aggregator.py @@ -73,6 +73,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/config_delivery_channel.py b/config_delivery_channel.py index 333c796fc71..371bd6685c1 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -51,6 +51,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/config_recorder.py b/config_recorder.py index 65a8c9d1f9f..d90ce46cd22 100644 --- a/config_recorder.py +++ b/config_recorder.py @@ -64,6 +64,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/config_rule.py b/config_rule.py index 36654c735c4..d14f4d16ca9 100644 --- a/config_rule.py +++ b/config_rule.py @@ -88,6 +88,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/data_pipeline.py b/data_pipeline.py index e0ddaa936de..d354a3c4e22 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -18,6 +18,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 description: - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) given to the datapipeline. diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py index 531683af2e4..45180ac6c16 100644 --- a/directconnect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -24,6 +24,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: name: description: diff --git a/directconnect_connection.py b/directconnect_connection.py index da2a06b9227..28d86717d7a 100644 --- a/directconnect_connection.py +++ b/directconnect_connection.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: state: description: diff --git a/directconnect_gateway.py b/directconnect_gateway.py index 987419d79c9..1433b387b4d 100644 --- a/directconnect_gateway.py +++ b/directconnect_gateway.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: state: description: diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index ab8a04a9d98..cc7122712e6 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -20,6 +20,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: state: description: diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py index 4f96459e179..059cd74250c 100644 --- a/directconnect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -88,6 +88,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' RETURN = r''' diff --git a/dms_endpoint.py b/dms_endpoint.py index bc2d6160af9..fb899d6690a 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -145,6 +145,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 917f27438ff..fb5d596134b 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -45,6 +45,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/dynamodb_table.py b/dynamodb_table.py index 943cdea02b1..28d334fc9c4 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -144,6 +144,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 2bdd9a21d45..9cbbb3e5e77 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -36,6 +36,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 46be5ec2024..d88983f8452 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -73,6 +73,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index f07e92f4f7c..3b176b5ee63 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -51,6 +51,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 7b55d433b99..f6d15464089 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -28,6 +28,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 85fd8d6e9c5..7907be4b5d9 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -19,6 +19,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 author: - Ryan Scott Brown (@ryansb) options: diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 4b90baf57f4..c27917df9f3 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -47,7 +47,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +- amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index f4ee9b753b0..d22f133ae6e 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -26,6 +26,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 62612ad0a0a..57de4e43bdc 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -58,6 +58,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 73822ebd87d..298646cf819 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -76,6 +76,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 8739815693b..8ee0705f8f0 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -29,6 +29,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py index 20178ed5f19..5540590219b 100644 --- a/ec2_transit_gateway_vpc_attachment.py +++ b/ec2_transit_gateway_vpc_attachment.py @@ -100,6 +100,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py index 9e51ad19bda..ea9aec0419c 100644 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -43,6 +43,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index 23c2f86abd0..dbcf15b12b5 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -29,6 +29,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 03cdef89c39..85f8d8baa92 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -76,6 +76,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags notes: - Support for I(purge_tags) was added in release 4.0.0. diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index b8d256470ee..b85c9423663 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -36,7 +36,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 4abf9e990e9..f23ffae1952 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -59,6 +59,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index cdb8f8ca8b0..3996596aec1 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -28,6 +28,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 126f5ff920d..8332e10063d 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -57,6 +57,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 5ddb04d2ae3..3f07a735390 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -29,6 +29,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 039796701f1..462c662e51a 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -17,6 +17,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws + - amazon.aws.boto3 - amazon.aws.tags author: - "Sloane Hertel (@s-hertel)" diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index b1d2bbee43e..94a6dcc9a91 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -30,6 +30,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ec2_win_password.py b/ec2_win_password.py index 2889f334aa8..9b92c3e4f92 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -51,6 +51,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 requirements: - cryptography diff --git a/ecs_attribute.py b/ecs_attribute.py index 7384dfb4692..6efe701d1e3 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -56,6 +56,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ecs_cluster.py b/ecs_cluster.py index c186c0da5c5..3866c1f0fae 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -44,6 +44,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ecs_ecr.py b/ecs_ecr.py index d2947753f38..b392f794a3e 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -90,6 +90,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ecs_service.py b/ecs_service.py index e04f296060d..90785ee20c7 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -267,6 +267,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/ecs_service_info.py b/ecs_service_info.py index 49c2676c7e1..f174a31cddf 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -44,6 +44,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ecs_tag.py b/ecs_tag.py index 87c000cd342..8698a7bbd43 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -55,6 +55,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/ecs_task.py b/ecs_task.py index bdc5cc98718..54948ce213a 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -101,6 +101,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 1c0c863750d..6a994919855 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -561,6 +561,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index b3772a7f7e3..77f5a476ea4 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -29,6 +29,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/efs.py b/efs.py index 68ebe46d395..7f855c6db2f 100644 --- a/efs.py +++ b/efs.py @@ -104,6 +104,7 @@ - amazon.aws.aws - amazon.aws.ec2 - amazon.aws.tags + - amazon.aws.boto3 ''' diff --git a/efs_info.py b/efs_info.py index a44f402ac64..eb1d23224d5 100644 --- a/efs_info.py +++ b/efs_info.py @@ -39,6 +39,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/efs_tag.py b/efs_tag.py index 209c2a276d6..1529fa94489 100644 --- a/efs_tag.py +++ b/efs_tag.py @@ -46,6 +46,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/eks_cluster.py b/eks_cluster.py index f71e1514a87..1b8e7a866fe 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -60,6 +60,8 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 + ''' EXAMPLES = r''' diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 313f8ad33aa..0e6fabc0844 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -69,6 +69,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/elasticache.py b/elasticache.py index 3b3196dd133..75aedb9a301 100644 --- a/elasticache.py +++ b/elasticache.py @@ -94,6 +94,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/elasticache_info.py b/elasticache_info.py index 154567ac581..1f8af9a330c 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 588dcf12214..24738eb1121 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -18,6 +18,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 options: group_family: diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index d9e11345a69..fa18b80c0d3 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -18,6 +18,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 options: name: description: diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 6816cc364d3..0f5f5e75e13 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -42,6 +42,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/elasticbeanstalk_app.py b/elasticbeanstalk_app.py index de18681a6d0..b5b32c178b2 100644 --- a/elasticbeanstalk_app.py +++ b/elasticbeanstalk_app.py @@ -45,6 +45,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 25d4eadbf63..4b28fafd388 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -36,6 +36,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/elb_instance.py b/elb_instance.py index dc79cd6ca9e..ecea32a6309 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -60,6 +60,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r""" diff --git a/elb_network_lb.py b/elb_network_lb.py index a65c42a2c4c..6dcdfd209c3 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -129,6 +129,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. diff --git a/elb_target.py b/elb_target.py index 4e3601a70a2..cff46a62ace 100644 --- a/elb_target.py +++ b/elb_target.py @@ -71,6 +71,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 notes: - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it. diff --git a/elb_target_group.py b/elb_target_group.py index c11f622226f..ad7e223c57f 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -199,6 +199,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags notes: diff --git a/elb_target_group_info.py b/elb_target_group_info.py index bb27bc30ec4..86cc03782f8 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -42,6 +42,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/elb_target_info.py b/elb_target_info.py index ad2f0702879..4f91ac7f3f5 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -28,6 +28,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/glue_connection.py b/glue_connection.py index 1f278d66b3b..bcfacb1712a 100644 --- a/glue_connection.py +++ b/glue_connection.py @@ -74,6 +74,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/glue_crawler.py b/glue_crawler.py index ffe6efd1636..a47b8eb3f93 100644 --- a/glue_crawler.py +++ b/glue_crawler.py @@ -79,6 +79,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/glue_job.py b/glue_job.py index 3fc2ba929fb..47d6156d764 100644 --- a/glue_job.py +++ b/glue_job.py @@ -105,6 +105,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/iam_access_key.py b/iam_access_key.py index 1d5701e9d74..3207741ab94 100644 --- a/iam_access_key.py +++ b/iam_access_key.py @@ -56,6 +56,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/iam_access_key_info.py b/iam_access_key_info.py index 9251cb846f6..91429eff940 100644 --- a/iam_access_key_info.py +++ b/iam_access_key_info.py @@ -26,6 +26,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/iam_group.py b/iam_group.py index 759be42a74d..0bc19d002ee 100644 --- a/iam_group.py +++ b/iam_group.py @@ -70,7 +70,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 - +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 4b3dbfebda4..f86f019d536 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -49,6 +49,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index a2801ca9482..16abae17087 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' RETURN = """ diff --git a/iam_password_policy.py b/iam_password_policy.py index 51291092b0b..19614d26da6 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -77,6 +77,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/iam_role.py b/iam_role.py index 76cd04950d3..cccc062a494 100644 --- a/iam_role.py +++ b/iam_role.py @@ -93,6 +93,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/iam_role_info.py b/iam_role_info.py index 561b9f92d70..84e9a31718e 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -31,6 +31,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 70bd4461d10..f79e4c2c64c 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -45,6 +45,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 author: - Tony (@axc450) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 8520c5bff0e..f3d5c5808df 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -78,6 +78,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index cbe2ab5459c..ee0dc590dc5 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/inspector_target.py b/inspector_target.py index ffcfb343ad5..a664f2f2dd5 100644 --- a/inspector_target.py +++ b/inspector_target.py @@ -41,6 +41,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/kinesis_stream.py b/kinesis_stream.py index 530bc0b7d3b..e4c5d76df80 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -75,6 +75,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/lightsail.py b/lightsail.py index 40e058219ce..c01954a06b9 100644 --- a/lightsail.py +++ b/lightsail.py @@ -70,6 +70,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py index 4f3463652b4..799ff629df8 100644 --- a/lightsail_static_ip.py +++ b/lightsail_static_ip.py @@ -31,6 +31,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' diff --git a/msk_cluster.py b/msk_cluster.py index 1dccf4558f3..651cd94e6af 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -202,6 +202,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags notes: - All operations are time consuming, for example create takes 20-30 minutes, diff --git a/msk_config.py b/msk_config.py index 0547a2a8bbd..2a51414aa67 100644 --- a/msk_config.py +++ b/msk_config.py @@ -44,6 +44,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 """ EXAMPLES = r""" diff --git a/networkfirewall.py b/networkfirewall.py index 9e9b02d0edc..9bb6ebb753e 100644 --- a/networkfirewall.py +++ b/networkfirewall.py @@ -106,6 +106,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/networkfirewall_info.py b/networkfirewall_info.py index 48db97ea67a..85df6b026ba 100644 --- a/networkfirewall_info.py +++ b/networkfirewall_info.py @@ -38,6 +38,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/networkfirewall_policy.py b/networkfirewall_policy.py index 18a5565129b..1026138a6b4 100644 --- a/networkfirewall_policy.py +++ b/networkfirewall_policy.py @@ -145,6 +145,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/networkfirewall_policy_info.py b/networkfirewall_policy_info.py index a91536b6e0c..1f170f5b304 100644 --- a/networkfirewall_policy_info.py +++ b/networkfirewall_policy_info.py @@ -30,6 +30,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py index fef080bcd3c..c8e2ea38bd3 100644 --- a/networkfirewall_rule_group.py +++ b/networkfirewall_rule_group.py @@ -269,6 +269,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py index b21e060795b..a9cec3778bb 100644 --- a/networkfirewall_rule_group_info.py +++ b/networkfirewall_rule_group_info.py @@ -47,6 +47,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/opensearch.py b/opensearch.py index 1cd9dd51e67..95fc45a2f68 100644 --- a/opensearch.py +++ b/opensearch.py @@ -392,6 +392,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags """ diff --git a/opensearch_info.py b/opensearch_info.py index 50b6ddc08d8..700ad26fd75 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -33,6 +33,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 """ EXAMPLES = ''' diff --git a/redshift.py b/redshift.py index 475c2101a5e..27e95989347 100644 --- a/redshift.py +++ b/redshift.py @@ -172,6 +172,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index 3e3653473f2..1c42ea80208 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -56,6 +56,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws +- amazon.aws.boto3 ''' diff --git a/redshift_info.py b/redshift_info.py index a6a8a578a37..ff4da774ea1 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -32,6 +32,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws +- amazon.aws.boto3 ''' diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 89e8bfa8042..3c7ca31f500 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -42,6 +42,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 author: - "Jens Carl (@j-carl), Hothead Games Inc." ''' diff --git a/s3_bucket_info.py b/s3_bucket_info.py index 81e1cd7217b..541a02b0f93 100644 --- a/s3_bucket_info.py +++ b/s3_bucket_info.py @@ -116,6 +116,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 21b598d3bd9..8df48fa56ed 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -102,6 +102,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/s3_cors.py b/s3_cors.py index 5500fecab64..753e395f9eb 100644 --- a/s3_cors.py +++ b/s3_cors.py @@ -38,6 +38,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 0e74feec7c1..b434c09b051 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -142,6 +142,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/s3_logging.py b/s3_logging.py index a9359ca2d3b..011baa951da 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -38,6 +38,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index 2480d1d7560..97eee52daed 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -49,6 +49,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/s3_sync.py b/s3_sync.py index 0a1797c1133..686f7cee488 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -129,6 +129,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/s3_website.py b/s3_website.py index 4b9e911662f..81d3169cdb1 100644 --- a/s3_website.py +++ b/s3_website.py @@ -46,6 +46,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index f35f28762f3..453135e2412 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -83,6 +83,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws + - amazon.aws.boto3 - amazon.aws.tags notes: - Support for I(purge_tags) was added in release 4.0.0. diff --git a/ses_identity.py b/ses_identity.py index 4f64b2be89b..997692df6a0 100644 --- a/ses_identity.py +++ b/ses_identity.py @@ -90,6 +90,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ses_identity_policy.py b/ses_identity_policy.py index 0b93921ec5a..16d9f1deda2 100644 --- a/ses_identity_policy.py +++ b/ses_identity_policy.py @@ -43,6 +43,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/ses_rule_set.py b/ses_rule_set.py index cf516048356..b42ac8088f3 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -48,6 +48,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = """ diff --git a/sns.py b/sns.py index fc400bac5e0..44160e53a52 100644 --- a/sns.py +++ b/sns.py @@ -81,6 +81,7 @@ extends_documentation_fragment: - amazon.aws.ec2 - amazon.aws.aws +- amazon.aws.boto3 ''' EXAMPLES = """ diff --git a/sns_topic.py b/sns_topic.py index e569397e88c..8ef63690fea 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -152,6 +152,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r""" diff --git a/sns_topic_info.py b/sns_topic_info.py index 8d5a3d01532..d3180ed6584 100644 --- a/sns_topic_info.py +++ b/sns_topic_info.py @@ -23,6 +23,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' EXAMPLES = r''' diff --git a/sqs_queue.py b/sqs_queue.py index 3e8931265ab..371b1f514a5 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -90,6 +90,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/ssm_parameter.py b/ssm_parameter.py index 7a6e7eb7c20..af4de1c15fb 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -89,6 +89,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py index 80e617b3e1c..c141610bbe0 100644 --- a/stepfunctions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -47,6 +47,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags author: - Tom De Keyser (@tdekeyser) diff --git a/stepfunctions_state_machine_execution.py b/stepfunctions_state_machine_execution.py index fbd2c7b164e..aacfa987f4a 100644 --- a/stepfunctions_state_machine_execution.py +++ b/stepfunctions_state_machine_execution.py @@ -50,6 +50,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 author: - Prasad Katti (@prasadkatti) diff --git a/storagegateway_info.py b/storagegateway_info.py index 87825711e1f..3f3c3ae2f94 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -47,6 +47,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' RETURN = ''' diff --git a/sts_assume_role.py b/sts_assume_role.py index c7435ad6fdc..8e5a3b4fed2 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -55,6 +55,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' RETURN = ''' diff --git a/sts_session_token.py b/sts_session_token.py index 137d03c8e0d..03df560e9ce 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -34,6 +34,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' RETURN = """ diff --git a/waf_condition.py b/waf_condition.py index e44e889a8fc..63585d50cbb 100644 --- a/waf_condition.py +++ b/waf_condition.py @@ -23,6 +23,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: name: diff --git a/waf_info.py b/waf_info.py index e91a6d62672..6a49a886e9c 100644 --- a/waf_info.py +++ b/waf_info.py @@ -31,6 +31,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' EXAMPLES = ''' diff --git a/waf_rule.py b/waf_rule.py index 201529f25d1..a994b183149 100644 --- a/waf_rule.py +++ b/waf_rule.py @@ -23,6 +23,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: name: diff --git a/waf_web_acl.py b/waf_web_acl.py index d814736ad32..9d5ad59e46f 100644 --- a/waf_web_acl.py +++ b/waf_web_acl.py @@ -22,6 +22,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 options: name: diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index 33fb7c32f68..7a9011e9b3c 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -65,6 +65,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 - amazon.aws.tags ''' diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index 6e3c1075257..b92c9a816d9 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -30,6 +30,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/wafv2_resources.py b/wafv2_resources.py index bbed06a0499..527ee108732 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -39,6 +39,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index d45c274d481..3a2a7b5dd32 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -30,6 +30,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 +- amazon.aws.boto3 ''' diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 44dc9ba88b5..8e46853c8d8 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -70,6 +70,7 @@ - amazon.aws.aws - amazon.aws.ec2 - amazon.aws.tags +- amazon.aws.boto3 ''' diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index 1daa2dd1cf7..a42bea0c2e6 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -35,6 +35,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 518234bbf5d..f91fe64e608 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -105,6 +105,7 @@ - amazon.aws.aws - amazon.aws.ec2 - amazon.aws.tags + - amazon.aws.boto3 ''' diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 86fd603e7cc..13be05db5c3 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -30,6 +30,7 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 + - amazon.aws.boto3 ''' From d4518b39eaeff28f3b11538f848df515aa3d72b1 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 7 Oct 2022 14:19:12 +0200 Subject: [PATCH 594/683] ecs_ecr - Fix `RepositoryNotFoundException` when creating Repositories in check mode (#1550) ecs_ecr - Fix `RepositoryNotFoundException` when creating Repositories in check mode SUMMARY When trying to create a repository in check mode the module throws a RepositoryNotFoundException trying to access policy objects on the repositories also fixes and re-enables basic integration tests ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_ecr ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- ecs_ecr.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index b392f794a3e..1323bc6c35a 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -246,7 +246,7 @@ def get_repository_policy(self, registry_id, name): repositoryName=name, **build_kwargs(registry_id)) text = res.get('policyText') return text and json.loads(text) - except is_boto3_error_code('RepositoryPolicyNotFoundException'): + except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): return None def create_repository(self, registry_id, name, image_tag_mutability): @@ -334,7 +334,7 @@ def get_lifecycle_policy(self, registry_id, name): repositoryName=name, **build_kwargs(registry_id)) text = res.get('lifecyclePolicyText') return text and json.loads(text) - except is_boto3_error_code('LifecyclePolicyNotFoundException'): + except is_boto3_error_code(['LifecyclePolicyNotFoundException', 'RepositoryNotFoundException']): return None def put_lifecycle_policy(self, registry_id, name, policy_text): From 86cd5287bbac0416af2e7dac11da3d6414fce3db Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 7 Oct 2022 14:19:15 +0200 Subject: [PATCH 595/683] ecs_ecr - Use compare_policies instead of naive dict sort (#1551) ecs_ecr - Use compare_policies instead of naive dict sort Depends-On: #1550 SUMMARY When comparing policies on the repos ecs_ecr currently uses a very naive sort function. Since we have something more comprehensive, use it. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/modules/ecs_ecr.py ADDITIONAL INFORMATION ecs_ecr is currently the only module using sort_json_policy_dict and it has poor test coverage. Reviewed-by: Markus Bergholz --- ecs_ecr.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index 1323bc6c35a..d472af2756c 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -207,7 +207,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import sort_json_policy_dict def build_kwargs(registry_id): @@ -457,17 +456,11 @@ def run(ecr, params): elif lifecycle_policy_text is not None: try: - lifecycle_policy = sort_json_policy_dict(lifecycle_policy) result['lifecycle_policy'] = lifecycle_policy - original_lifecycle_policy = ecr.get_lifecycle_policy( registry_id, name) - if original_lifecycle_policy: - original_lifecycle_policy = sort_json_policy_dict( - original_lifecycle_policy) - - if original_lifecycle_policy != lifecycle_policy: + if compare_policies(original_lifecycle_policy, lifecycle_policy): ecr.put_lifecycle_policy(registry_id, name, lifecycle_policy_text) result['changed'] = True From 1f1a946d07c5f7f8ba44fbcdddb3496f59170f64 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Sat, 8 Oct 2022 20:51:16 +0200 Subject: [PATCH 596/683] wafv2_rule_group_info: remove state parameter (#1555) wafv2_rule_group_info: remove state parameter SUMMARY closes #1547 ISSUE TYPE COMPONENT NAME wafv2_rule_group_info ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- wafv2_rule_group_info.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index a42bea0c2e6..46c44801a63 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -15,11 +15,6 @@ description: - Get informations about existing wafv2 rule groups. options: - state: - description: - - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01. - required: false - type: str name: description: - The name of the rule group. @@ -43,7 +38,6 @@ - name: rule group info community.aws.wafv2_rule_group_info: name: test02 - state: present scope: REGIONAL ''' @@ -119,7 +113,6 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): def main(): arg_spec = dict( - state=dict(type='str', required=False), name=dict(type='str', required=True), scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) ) @@ -129,17 +122,11 @@ def main(): supports_check_mode=True ) - state = module.params.get("state") name = module.params.get("name") scope = module.params.get("scope") wafv2 = module.client('wafv2') - if state: - module.deprecate( - 'The state parameter does nothing, has been deprecated, and will be removed in a future release.', - version='6.0.0', collection_name='community.aws') - # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None From eee4df93ab02e61461a67c14625c0c13b959b5c9 Mon Sep 17 00:00:00 2001 From: Gabriel PREDA Date: Wed, 26 Oct 2022 18:29:13 +0300 Subject: [PATCH 597/683] cloudfront_distribution: add connection_attempts and connection_timeout parameters (#1435) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cloudfront_distribution: add connection_attempts and connection_timeout parameters Depends-On: #1569 SUMMARY Missing connection_attempts & connection_timeout. Closes #1434. ISSUE TYPE Docs Pull Request COMPONENT NAME cloudfront_distribution Reviewed-by: Brian Scholer Reviewed-by: Gonéri Le Bouder --- cloudfront_distribution.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 9735e9db3c6..eff7971847c 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -168,7 +168,16 @@ origin_keepalive_timeout: description: A keep-alive timeout (in seconds). type: int - + connection_attempts: + description: The number of times that CloudFront attempts to connect to the origin. + The minimum number is C(1), the maximum is C(3). + type: int + default: 3 + connection_timeout: + description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. + The minimum timeout is C(1) second, the maximum is C(10) seconds. + type: int + default: 10 purge_origins: description: Whether to remove any origins that aren't listed in I(origins). default: false @@ -1277,6 +1286,16 @@ returned: always type: str sample: '' + connection_attempts: + description: The number of times that CloudFront attempts to connect to the origin. + returned: always + type: int + sample: 3 + connection_timeout: + description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. + returned: always + type: int + sample: 10 s3_origin_config: description: Origin access identity configuration for S3 Origin. returned: when s3_origin_access_identity_enabled is true From f750ff9f2a868462e846a82ca288609ce3b02a91 Mon Sep 17 00:00:00 2001 From: Sharvari Khedkar Date: Tue, 1 Nov 2022 00:06:38 -0700 Subject: [PATCH 598/683] elb_target_group: Add support for protocol_version parameter (#1496) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit elb_target_group: Add support for protocol_version parameter SUMMARY Added support for protocol_version param in elb_target_group. Fixes 1422. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_target_group Reviewed-by: Gonéri Le Bouder Reviewed-by: Sharvari Khedkar Reviewed-by: Markus Bergholz --- elb_target_group.py | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index ad7e223c57f..f9849264200 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -89,6 +89,14 @@ required: false choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] type: str + protocol_version: + description: + - Specifies protocol version. + - The protocol_version parameter is immutable and cannot be changed when updating an elb_target_group. + required: false + choices: ['GRPC', 'HTTP1', 'HTTP2'] + type: str + version_added: 5.1.0 state: description: - Create or destroy the target group. @@ -217,6 +225,15 @@ vpc_id: vpc-01234567 state: present +- name: Create a target group with protocol_version 'GRPC' + community.aws.elb_target_group: + name: mytargetgroup + protocol: http + port: 80 + vpc_id: vpc-01234567 + protocol_version: GRPC + state: present + - name: Modify the target group with a custom health check community.aws.elb_target_group: name: mytargetgroup @@ -567,6 +584,8 @@ def create_or_update_target_group(connection, module): params['TargetType'] = target_type if target_type != "lambda": params['Protocol'] = module.params.get("protocol").upper() + if module.params.get('protocol_version') is not None: + params['ProtocolVersion'] = module.params.get('protocol_version') params['Port'] = module.params.get("port") params['VpcId'] = module.params.get("vpc_id") tags = module.params.get("tags") @@ -608,7 +627,11 @@ def create_or_update_target_group(connection, module): if module.params.get("successful_response_codes") is not None: params['Matcher'] = {} - params['Matcher']['HttpCode'] = module.params.get("successful_response_codes") + code_key = 'HttpCode' + protocol_version = module.params.get('protocol_version') + if protocol_version is not None and protocol_version.upper() == "GRPC": + code_key = 'GrpcCode' + params['Matcher'][code_key] = module.params.get("successful_response_codes") # Get target group target_group = get_target_group(connection, module) @@ -658,11 +681,14 @@ def create_or_update_target_group(connection, module): # Matcher (successful response codes) # TODO: required and here? if 'Matcher' in params: - current_matcher_list = target_group['Matcher']['HttpCode'].split(',') - requested_matcher_list = params['Matcher']['HttpCode'].split(',') + code_key = 'HttpCode' + if target_group['ProtocolVersion'] == 'GRPC': + code_key = 'GrpcCode' + current_matcher_list = target_group['Matcher'][code_key].split(',') + requested_matcher_list = params['Matcher'][code_key].split(',') if set(current_matcher_list) != set(requested_matcher_list): health_check_params['Matcher'] = {} - health_check_params['Matcher']['HttpCode'] = ','.join(requested_matcher_list) + health_check_params['Matcher'][code_key] = ','.join(requested_matcher_list) try: if health_check_params: @@ -913,6 +939,7 @@ def main(): name=dict(required=True), port=dict(type='int'), protocol=dict(choices=protocols_list), + protocol_version=dict(type='str', choices=['GRPC', 'HTTP1', 'HTTP2']), purge_tags=dict(default=True, type='bool'), stickiness_enabled=dict(type='bool'), stickiness_type=dict(), From 97b2ef6545dd454d4c26ed1e5a0f72f82c6a6205 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 2 Nov 2022 11:49:57 +0100 Subject: [PATCH 599/683] Fix non-matching defaults in docs (#1576) Fix non-matching defaults in docs Depends-On: #1579 SUMMARY Fix various non-matching default values exposed by ansible/ansible#79267. ISSUE TYPE Docs Pull Request COMPONENT NAME various Reviewed-by: Markus Bergholz --- api_gateway.py | 2 ++ autoscaling_launch_config.py | 1 + batch_job_definition.py | 5 +++++ cloudformation_stack_set.py | 1 + cloudfront_distribution.py | 1 + codecommit_repository.py | 1 + data_pipeline.py | 4 ++++ ec2_ami_copy.py | 1 + ec2_customer_gateway_info.py | 2 ++ ec2_snapshot_copy.py | 1 + ec2_transit_gateway_info.py | 2 ++ ec2_vpc_nacl.py | 1 + ec2_vpc_peering_info.py | 1 + ec2_vpc_vgw_info.py | 1 + ec2_vpc_vpn.py | 3 +++ ec2_vpc_vpn_info.py | 2 ++ ecs_service.py | 9 +++++++++ ecs_taskdefinition.py | 2 ++ efs.py | 1 + efs_info.py | 2 ++ eks_fargate_profile.py | 1 + elasticache.py | 5 +++++ elasticache_parameter_group.py | 1 + elb_classic_lb_info.py | 1 + iam_group.py | 2 ++ lightsail.py | 1 + msk_config.py | 2 ++ s3_bucket_notification.py | 4 ++++ s3_metrics_configuration.py | 1 + s3_sync.py | 2 ++ secretsmanager_secret.py | 1 + 31 files changed, 64 insertions(+) diff --git a/api_gateway.py b/api_gateway.py index 6627c762bd9..a084bf93eff 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -76,6 +76,7 @@ description: - ENV variables for the stage. Define a dict of key values pairs for variables. type: dict + default: {} stage_canary_settings: description: - Canary settings for the deployment of the stage. @@ -86,6 +87,7 @@ - 'C(useStageCache): A Boolean flag to indicate whether the canary deployment uses the stage cache or not.' - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage) type: dict + default: {} tracing_enabled: description: - Specifies whether active tracing with X-ray is enabled for the API GW stage. diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index 59972a10e4b..1b13d1027d3 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -59,6 +59,7 @@ - You can specify either security group names or IDs or a mix. type: list elements: str + default: [] volumes: description: - A list dictionaries defining the volumes to create. diff --git a/batch_job_definition.py b/batch_job_definition.py index 2af9093609e..0e5020da8a8 100644 --- a/batch_job_definition.py +++ b/batch_job_definition.py @@ -78,6 +78,7 @@ see U(https://docs.docker.com/engine/reference/builder/#cmd). type: list elements: str + default: [] job_role_arn: description: - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. @@ -104,6 +105,7 @@ allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints. type: list elements: dict + default: [] environment: description: - The environment variables to pass to a container. This parameter maps to Env in the Create a container section @@ -117,6 +119,7 @@ - The value of the key value pair. For environment variables, this is the value of the environment variable. type: list elements: dict + default: [] mount_points: description: - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container @@ -134,6 +137,7 @@ - The name of the volume to mount. type: list elements: dict + default: [] readonly_root_filesystem: description: - When this parameter is true, the container is given read-only access to its root file system. This parameter @@ -162,6 +166,7 @@ - The soft limit for the ulimit type. type: list elements: dict + default: [] user: description: - The user name to use inside the container. This parameter maps to User in the Create a container section of diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 2bb0befc0a4..3abf9734a32 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -139,6 +139,7 @@ description: - Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time. type: dict + default: {} suboptions: fail_count: description: diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index eff7971847c..12a6231f399 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -72,6 +72,7 @@ - Each alias must be unique across all distribution for the AWS account. type: list elements: str + default: [] purge_aliases: description: diff --git a/codecommit_repository.py b/codecommit_repository.py index 5e05de9a61f..fce4d15d6e9 100644 --- a/codecommit_repository.py +++ b/codecommit_repository.py @@ -31,6 +31,7 @@ aliases: - comment type: str + default: '' state: description: - Specifies the state of repository. diff --git a/data_pipeline.py b/data_pipeline.py index d354a3c4e22..fc441c10cc7 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -39,6 +39,7 @@ objects: type: list elements: dict + default: [] description: - A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields). suboptions: @@ -77,6 +78,7 @@ - A list of parameter objects (dicts) in the pipeline definition. type: list elements: dict + default: [] suboptions: id: description: @@ -99,6 +101,7 @@ - A list of parameter values (dicts) in the pipeline definition. type: list elements: dict + default: [] suboptions: id: description: The ID of the parameter value @@ -121,6 +124,7 @@ description: - A dict of key:value pair(s) to add to the pipeline. type: dict + default: {} aliases: ['resource_tags'] ''' diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index d88983f8452..15a69163d30 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -34,6 +34,7 @@ description: - An optional human-readable string describing the contents and purpose of the new AMI. type: str + default: '' encrypted: description: - Whether or not the destination snapshots of the copied AMI should be encrypted. diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index f6d15464089..429ba20839b 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -20,11 +20,13 @@ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters. type: dict + default: {} customer_gateway_ids: description: - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list. type: list elements: str + default: [] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 57de4e43bdc..455a7c6b85c 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -29,6 +29,7 @@ description: - An optional human-readable string describing purpose of the new Snapshot. type: str + default: '' encrypted: description: - Whether or not the destination Snapshot should be encrypted. diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 8ee0705f8f0..5ce3dc6a46a 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -21,11 +21,13 @@ aliases: [transit_gateway_id] type: list elements: str + default: [] filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters. type: dict + default: {} extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 85f8d8baa92..e11df3de532 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -40,6 +40,7 @@ required: false type: list elements: str + default: [] egress: description: - A list of rules for outgoing traffic. Each rule must be specified as a list. diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 3996596aec1..680fa3b68ba 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -24,6 +24,7 @@ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html) for possible filters. type: dict + default: {} author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: - amazon.aws.aws diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index 3f07a735390..fcb520cf054 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -19,6 +19,7 @@ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters. type: dict + default: {} vpn_gateway_ids: description: - Get details of a specific Virtual Gateway ID. diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 462c662e51a..77a994aaab1 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -61,6 +61,7 @@ required: false type: list elements: dict + default: [] suboptions: TunnelInsideCidr: type: str @@ -110,11 +111,13 @@ description: - The customer gateway id as a string or a list of those strings. type: dict + default: {} routes: description: - Routes to add to the connection. type: list elements: str + default: [] purge_routes: description: - Whether or not to delete VPN connections routes that are not specified in the task. diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 94a6dcc9a91..c7a71f15451 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -21,12 +21,14 @@ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters. required: false type: dict + default: {} vpn_connection_ids: description: - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list. required: false type: list elements: str + default: [] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/ecs_service.py b/ecs_service.py index 90785ee20c7..93abbe75a7b 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -57,6 +57,7 @@ required: false type: list elements: dict + default: [] desired_count: description: - The count of how many instances of the service. @@ -68,6 +69,7 @@ - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed. required: false type: str + default: '' role: description: - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer @@ -75,6 +77,7 @@ - This parameter is only required if you are using a load balancer with your service in a network mode other than C(awsvpc). required: false type: str + default: '' delay: description: - The time to wait before checking that the service is available. @@ -99,6 +102,7 @@ required: false version_added: 4.1.0 type: dict + default: {} suboptions: type: type: str @@ -110,6 +114,7 @@ - Format is '{"maximum_percent":, "minimum_healthy_percent":} required: false type: dict + default: {} suboptions: maximum_percent: type: int @@ -134,6 +139,7 @@ required: false type: list elements: dict + default: [] suboptions: type: description: The type of constraint. @@ -148,6 +154,7 @@ required: false type: list elements: dict + default: [] suboptions: type: description: The type of placement strategy. @@ -193,6 +200,7 @@ required: false type: list elements: dict + default: [] suboptions: capacity_provider: description: @@ -223,6 +231,7 @@ - Describes service discovery registries this service will register with. type: list elements: dict + default: [] required: false suboptions: container_name: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 6a994919855..246c9373933 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -508,11 +508,13 @@ the permissions that are specified in this role. required: false type: str + default: '' execution_role_arn: description: - The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. required: false type: str + default: '' volumes: description: - A list of names of volumes to be attached. diff --git a/efs.py b/efs.py index 7f855c6db2f..de1d563fb04 100644 --- a/efs.py +++ b/efs.py @@ -59,6 +59,7 @@ This data may be modified for existing EFS using state 'present' and new list of mount targets." type: list elements: dict + default: [] suboptions: subnet_id: required: true diff --git a/efs_info.py b/efs_info.py index eb1d23224d5..5ef436f3c91 100644 --- a/efs_info.py +++ b/efs_info.py @@ -30,12 +30,14 @@ description: - List of tags of Amazon EFS. Should be defined as dictionary. type: dict + default: {} targets: description: - List of targets on which to filter the returned results. - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address. type: list elements: str + default: [] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 0e6fabc0844..d78cbbe2d10 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -48,6 +48,7 @@ labels: description: A dictionary of labels used in fargate profile. type: dict + default: {} state: description: Create or delete the Fargate Profile. choices: diff --git a/elasticache.py b/elasticache.py index 75aedb9a301..454baafe3ec 100644 --- a/elasticache.py +++ b/elasticache.py @@ -39,6 +39,7 @@ description: - The version number of the cache engine. type: str + default: '' node_type: description: - The compute and memory capacity of the nodes in the cache cluster. @@ -61,22 +62,26 @@ for the specified engine will be used. aliases: [ 'parameter_group' ] type: str + default: '' cache_subnet_group: description: - The subnet group name to associate with. Only use if inside a VPC. - Required if inside a VPC. type: str + default: '' security_group_ids: description: - A list of VPC security group IDs to associate with this cache cluster. Only use if inside a VPC. type: list elements: str + default: [] cache_security_groups: description: - A list of cache security group names to associate with this cache cluster. - Don't use if your Cache is inside a VPC. In that case use I(security_group_ids) instead! type: list elements: str + default: [] zone: description: - The EC2 Availability Zone in which the cache cluster will be created. diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 24738eb1121..71ea159a14a 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -36,6 +36,7 @@ description: - A user-specified description for the cache parameter group. type: str + default: '' state: description: - Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed. diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 4b28fafd388..4cbeb95890d 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -33,6 +33,7 @@ - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. type: list elements: str + default: [] extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/iam_group.py b/iam_group.py index 0bc19d002ee..31987ef1de4 100644 --- a/iam_group.py +++ b/iam_group.py @@ -41,6 +41,7 @@ required: false type: list elements: str + default: [] aliases: ['managed_policy'] users: description: @@ -48,6 +49,7 @@ required: false type: list elements: str + default: [] state: description: - Create or remove the IAM group. diff --git a/lightsail.py b/lightsail.py index c01954a06b9..5e403515412 100644 --- a/lightsail.py +++ b/lightsail.py @@ -49,6 +49,7 @@ description: - Launch script that can configure the instance with additional data. type: str + default: '' key_pair_name: description: - Name of the key pair to use with the instance. diff --git a/msk_config.py b/msk_config.py index 2a51414aa67..812eba16dee 100644 --- a/msk_config.py +++ b/msk_config.py @@ -31,9 +31,11 @@ description: description: The description of the configuration. type: str + default: '' config: description: Contents of the server.properties file. type: dict + default: {} aliases: ['configuration'] kafka_versions: description: diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 8df48fa56ed..645ca698974 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -74,6 +74,7 @@ - Version of the Lambda function. - Mutually exclusive with I(lambda_alias). type: int + default: 0 events: description: - Events that will be triggering a notification. You can select multiple events to send @@ -89,16 +90,19 @@ 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] type: list elements: str + default: [] prefix: description: - Optional prefix to limit the notifications to objects with keys that start with matching characters. type: str + default: '' suffix: description: - Optional suffix to limit the notifications to objects with keys that end with matching characters. type: str + default: '' extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index 97eee52daed..dff5668212c 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -40,6 +40,7 @@ required: false aliases: ['filter_tag'] type: dict + default: {} state: description: - "Create or delete metrics configuration" diff --git a/s3_sync.py b/s3_sync.py index 686f7cee488..80e3db0bd89 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -53,6 +53,7 @@ - In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary. required: false type: str + default: '' file_root: description: - File/directory path for synchronization. This is a local path. @@ -102,6 +103,7 @@ - Directives are separated by commas. required: false type: str + default: '' storage_class: description: - Storage class to be associated to each object added to the S3 bucket. diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index 453135e2412..c4c434e3252 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -40,6 +40,7 @@ description: - Specifies a user-provided description of the secret. type: str + default: '' kms_key_id: description: - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be From f26a05df23098a799f66310b84b16506b4e3b93c Mon Sep 17 00:00:00 2001 From: Julien Klaer Date: Tue, 15 Nov 2022 09:47:08 +0100 Subject: [PATCH 600/683] feat(elasticache): support for redis6.x group family for parameter groups (#1476) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit feat(elasticache): support for redis6.x group family for parameter groups SUMMARY AWS cache parameters groups can now be of type redis6.x. This PR adds to the module elasticache_parameter_group support to pass this new family group. ISSUE TYPE Feature Pull Request COMPONENT NAME Impacted component: elasticache_parameter_group module. Reviewed-by: Markus Bergholz Reviewed-by: Felix Fontein Reviewed-by: Gonéri Le Bouder Reviewed-by: Alina Buzachis --- elasticache_parameter_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 71ea159a14a..247dd0bab00 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -25,7 +25,7 @@ description: - The name of the cache parameter group family that the cache parameter group can be used with. Required when creating a cache parameter group. - choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0'] + choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x'] type: str name: description: @@ -274,7 +274,7 @@ def get_info(conn, name): def main(): argument_spec = dict( - group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']), + group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']), name=dict(required=True, type='str'), description=dict(default='', type='str'), state=dict(required=True, choices=['present', 'absent', 'reset']), From e8da9b4629a8254095c13f10e88abf88016ac1ca Mon Sep 17 00:00:00 2001 From: bplaxco Date: Fri, 25 Nov 2022 06:50:57 -0500 Subject: [PATCH 601/683] Add ResourceType on ec2 snapshot copy (#1419) Add ResourceType on ec2 snapshot copy SUMMARY Fixes this issue that shows up when tags are specified: An error occurred waiting for the snapshot to become available.: An error occurred (InvalidParameterValue) when calling the CopySnapshot operation: Tag specification resource type must have a value ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_snapshot_copy ADDITIONAL INFORMATION # before botocore.exceptions.ClientError: An error occurred (InvalidParameterValue) when calling the CopySnapshot operation: Tag specification resource type must have a value fatal: [localhost]: FAILED! => { "boto3_version": "1.24.57", "botocore_version": "1.27.58", "changed": false, "error": { "code": "InvalidParameterValue", "message": "Tag specification resource type must have a value" }, .... # after (no error message) changed: [localhost] => { "changed": true, "invocation": { "module_args": { ..... Reviewed-by: Mark Chappell Reviewed-by: Colby Shores Reviewed-by: None --- ec2_snapshot_copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 455a7c6b85c..f45be44178d 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -140,7 +140,7 @@ def copy_snapshot(module, ec2): params['KmsKeyId'] = module.params.get('kms_key_id') if module.params.get('tags'): - params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags')) + params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags'), types=['snapshot']) try: snapshot_id = ec2.copy_snapshot(**params)['SnapshotId'] From 8f524f8da1ef83fc5787dcb4e89cc28ebe0501c3 Mon Sep 17 00:00:00 2001 From: Vincent Tan Date: Tue, 6 Dec 2022 02:48:14 -0800 Subject: [PATCH 602/683] ecs_taskdefinition - add health check documentation (#1610) ecs_taskdefinition - add health check documentation SUMMARY Adds documentation for the healthCheck dict for ecs_taskdefinition. Fixes #1565 ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/ecs_taskdefinition.py ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- ecs_taskdefinition.py | 66 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 246c9373933..0ebffdd54f2 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -466,6 +466,49 @@ description: The health check command and associated configuration parameters for the container. required: False type: dict + suboptions: + command: + description: + - A string array representing the command that the container runs to determine if it is healthy. + - > + The string array must start with CMD to run the command arguments directly, + or CMD-SHELL to run the command with the container's default shell. + - An exit code of 0 indicates success, and non-zero exit code indicates failure. + required: False + type: list + elements: str + interval: + description: + - The time period in seconds between each health check execution. + - You may specify between 5 and 300 seconds. The default value is 30 seconds. + required: False + type: int + default: 30 + retries: + description: + - The number of times to retry a failed health check before the container is considered unhealthy. + - You may specify between 1 and 10 retries. The default value is 3. + required: False + type: int + default: 3 + startPeriod: + description: + - > + The optional grace period to provide containers time to bootstrap + before failed health checks count towards the maximum number of retries. + - You can specify between 0 and 300 seconds. By default, the startPeriod is disabled. + - > + Note: If a health check succeeds within the startPeriod, + then the container is considered healthy and any subsequent failures count toward the maximum number of retries. + required: False + type: int + timeout: + description: + - The time period in seconds to wait for a health check to succeed before it is considered a failure. + - You may specify between 2 and 60 seconds. The default value is 5. + required: False + type: int + default: 5 systemControls: description: A list of namespaced kernel parameters to set in the container. required: False @@ -677,6 +720,29 @@ memory: 1GB state: present network_mode: awsvpc + +# Create Task Definition with health check +- name: Create task definition + community.aws.ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + healthCheck: + command: + - CMD-SHELL + - /app/healthcheck.py + interval: 60 + retries: 3 + startPeriod: 15 + timeout: 15 + state: present ''' RETURN = r''' taskdefinition: From 9f209738e4cac59074fc4deb891eb8bf40660f60 Mon Sep 17 00:00:00 2001 From: Giovanni Toraldo Date: Wed, 7 Dec 2022 11:42:27 +0100 Subject: [PATCH 603/683] Fixup opensearch when using advanced security options (#1613) Fixup opensearch when using advanced security options Fix #1560 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- opensearch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/opensearch.py b/opensearch.py index 95fc45a2f68..7ed8c0722a2 100644 --- a/opensearch.py +++ b/opensearch.py @@ -948,6 +948,7 @@ def set_advanced_security_options( ] = advanced_security_opts.get("internal_user_database_enabled") master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ "MasterUserARN" From 1212e9474884525dccd593a7c8139ea183a89740 Mon Sep 17 00:00:00 2001 From: Nicolas Boutet Date: Wed, 7 Dec 2022 14:02:01 +0200 Subject: [PATCH 604/683] Add origin shield in cloudfront_distribution module (#1557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add origin shield in cloudfront_distribution module SUMMARY Add Origin Shield option to cloudfront_distribution module. ISSUE TYPE Feature Pull Request COMPONENT NAME cloudfront_distribution.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz Reviewed-by: Gonéri Le Bouder --- cloudfront_distribution.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 12a6231f399..c9ae1c25c42 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -119,6 +119,17 @@ origin_path: description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. type: str + origin_shield: + description: Specify origin shield options for the origin. + type: dict + suboptions: + enabled: + description: Indicate whether you want the origin to have Origin Shield enabled or not. + type: bool + origin_shield_region: + description: Specify which AWS region will be used for Origin Shield. Required if Origin Shield is enabled. + type: str + version_added: 5.1.0 custom_headers: description: - Custom headers you wish to add to the request before passing it to the origin. @@ -1297,6 +1308,22 @@ returned: always type: int sample: 10 + origin_shield: + description: Configuration of the origin Origin Shield. + returned: always + type: complex + contains: + enabled: + description: Whether Origin Shield is enabled or not. + returned: always + type: bool + sample: false + origin_shield_region: + description: Which region is used by Origin Shield. + returned: when enabled is true + type: str + sample: us-east-1 + version_added: 5.1.0 s3_origin_config: description: Origin access identity configuration for S3 Origin. returned: when s3_origin_access_identity_enabled is true @@ -1731,6 +1758,15 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers')) else: origin['custom_headers'] = ansible_list_to_cloudfront_list() + if 'origin_shield' in origin: + origin_shield = origin.get('origin_shield') + if origin_shield.get('enabled'): + origin_shield_region = origin_shield.get('origin_shield_region') + if origin_shield_region is None: + self.module.fail_json(msg="origins[].origin_shield.origin_shield_region must be specified" + " when origins[].origin_shield.enabled is true.") + else: + origin_shield_region = origin_shield_region.lower() if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): if origin.get("s3_origin_access_identity_enabled") is not None: if origin['s3_origin_access_identity_enabled']: From e50b0af0094e48e3e9ba346b8693711b4f733abc Mon Sep 17 00:00:00 2001 From: rwha Date: Thu, 22 Dec 2022 03:51:12 -0500 Subject: [PATCH 605/683] Fix KeyError when Description is not present in ssm_parameter (#1627) Fix KeyError when Description is not present in ssm_parameter SUMMARY Fixes #1471 ISSUE TYPE Bugfix Pull Request COMPONENT NAME ssm_parameter ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell --- ssm_parameter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ssm_parameter.py b/ssm_parameter.py index af4de1c15fb..5381c0d67bf 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -383,7 +383,7 @@ def create_update_parameter(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter['Description'] != args['Description']: + if describe_existing_parameter.get('Description') != args['Description']: (changed, response) = update_parameter(client, module, **args) if changed: _wait_updated(client, module, module.params.get('name'), original_version) From c04f813fb06828061a60e1cd703cb4a76857f1c8 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Wed, 4 Jan 2023 09:44:49 +0100 Subject: [PATCH 606/683] iam_role: drop deprecation (#1636) iam_role: drop deprecation SUMMARY The change was announced since community.aws 1.0.0 for ansible 2.14 With community.aws 2.1.0, it was changed to the date after 2022-06-01 However, in the meantime the standard value is true for purge parameters. Therefore we just drop the deprecation warning. COMPONENT NAME iam_role Reviewed-by: Mark Chappell --- iam_role.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/iam_role.py b/iam_role.py index cccc062a494..09a86a54e82 100644 --- a/iam_role.py +++ b/iam_role.py @@ -58,9 +58,9 @@ purge_policies: description: - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. - - By default I(purge_policies=true). In a release after 2022-06-01 this will be changed to I(purge_policies=false). type: bool aliases: ['purge_policy', 'purge_managed_policies'] + default: true state: description: - Create or remove the IAM role. @@ -448,8 +448,6 @@ def create_or_update_role(module, client): purge_tags = module.params.get('purge_tags') tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None purge_policies = module.params.get('purge_policies') - if purge_policies is None: - purge_policies = True managed_policies = module.params.get('managed_policies') if managed_policies: # Attempt to list the policies early so we don't leave things behind if we can't find them. @@ -665,7 +663,7 @@ def main(): boundary=dict(type='str', aliases=['boundary_policy_arn']), create_instance_profile=dict(type='bool', default=True), delete_instance_profile=dict(type='bool', default=False), - purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']), + purge_policies=dict(default=True, type='bool', aliases=['purge_policy', 'purge_managed_policies']), tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), @@ -676,10 +674,6 @@ def main(): required_if=[('state', 'present', ['assume_role_policy_document'])], supports_check_mode=True) - if module.params.get('purge_policies') is None: - module.deprecate('After 2022-06-01 the default value of purge_policies will change from true to false.' - ' To maintain the existing behaviour explicitly set purge_policies=true', date='2022-06-01', collection_name='community.aws') - if module.params.get('boundary'): if module.params.get('create_instance_profile'): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") From 5b645930b6d34101ef04688c8c9598bf4e131e69 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 10 Jan 2023 16:20:42 +0100 Subject: [PATCH 607/683] ssm_parameter - fix typo in docs (#1644) ssm_parameter - fix typo in docs SUMMARY ssm_parameter - fix typo in docs paramater instead of parameter ISSUE TYPE Docs Pull Request COMPONENT NAME ssm_parameter ADDITIONAL INFORMATION Fixes: #1642 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- ssm_parameter.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ssm_parameter.py b/ssm_parameter.py index 5381c0d67bf..b7544d4a569 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -94,25 +94,25 @@ EXAMPLES = ''' - name: Create or update key/value pair in AWS SSM parameter store - community.aws.ssm_paramater: + community.aws.ssm_parameter: name: "Hello" description: "This is your first key" value: "World" - name: Delete the key - community.aws.ssm_paramater: + community.aws.ssm_parameter: name: "Hello" state: absent - name: Create or update secure key/value pair with default KMS key (aws/ssm) - community.aws.ssm_paramater: + community.aws.ssm_parameter: name: "Hello" description: "This is your first key" string_type: "SecureString" value: "World" - name: Create or update secure key/value pair with nominated KMS key - community.aws.ssm_paramater: + community.aws.ssm_parameter: name: "Hello" description: "This is your first key" string_type: "SecureString" @@ -120,7 +120,7 @@ value: "World" - name: Always update a parameter store value and create a new version - community.aws.ssm_paramater: + community.aws.ssm_parameter: name: "overwrite_example" description: "This example will always overwrite the value" string_type: "String" @@ -128,7 +128,7 @@ overwrite_value: "always" - name: Create or update key/value pair in AWS SSM parameter store with tier - community.aws.ssm_paramater: + community.aws.ssm_parameter: name: "Hello" description: "This is your first key" value: "World" From b5a5deadda2d392912469225929a7d44db927c7c Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Tue, 10 Jan 2023 19:22:13 +0100 Subject: [PATCH 608/683] Ansible User-Agent identification for community.aws (#1632) Ansible User-Agent identification for community.aws SUMMARY The value will be similar to this APN/1.0 Ansible/2.14.1 community.aws/6.0.0-dev0 ISSUE TYPE Feature Pull Request Reviewed-by: Mark Chappell Reviewed-by: Bikouo Aubin Reviewed-by: Alina Buzachis --- accessanalyzer_validate_policy_info.py | 2 +- acm_certificate.py | 2 +- acm_certificate_info.py | 2 +- api_gateway.py | 2 +- api_gateway_domain.py | 3 ++- application_autoscaling_policy.py | 2 +- autoscaling_complete_lifecycle_action.py | 2 +- autoscaling_instance_refresh.py | 2 +- autoscaling_instance_refresh_info.py | 2 +- autoscaling_launch_config.py | 2 +- autoscaling_launch_config_find.py | 2 +- autoscaling_launch_config_info.py | 2 +- autoscaling_lifecycle_hook.py | 2 +- autoscaling_policy.py | 2 +- autoscaling_scheduled_action.py | 2 +- aws_region_info.py | 2 +- batch_compute_environment.py | 2 +- batch_job_definition.py | 2 +- batch_job_queue.py | 2 +- cloudformation_exports_info.py | 2 +- cloudformation_stack_set.py | 2 +- cloudfront_distribution.py | 2 +- cloudfront_distribution_info.py | 2 +- cloudfront_invalidation.py | 2 +- cloudfront_origin_access_identity.py | 2 +- cloudfront_response_headers_policy.py | 2 +- codebuild_project.py | 2 +- codecommit_repository.py | 2 +- codepipeline.py | 2 +- config_aggregation_authorization.py | 2 +- config_aggregator.py | 3 ++- config_delivery_channel.py | 2 +- config_recorder.py | 2 +- config_rule.py | 2 +- data_pipeline.py | 2 +- directconnect_confirm_connection.py | 2 +- directconnect_connection.py | 2 +- directconnect_gateway.py | 2 +- directconnect_link_aggregation_group.py | 2 +- directconnect_virtual_interface.py | 2 +- dms_endpoint.py | 2 +- dms_replication_subnet_group.py | 2 +- dynamodb_table.py | 2 +- dynamodb_ttl.py | 2 +- ec2_ami_copy.py | 2 +- ec2_customer_gateway.py | 2 +- ec2_customer_gateway_info.py | 2 +- ec2_launch_template.py | 2 +- ec2_placement_group.py | 2 +- ec2_placement_group_info.py | 2 +- ec2_snapshot_copy.py | 2 +- ec2_transit_gateway.py | 2 +- ec2_transit_gateway_info.py | 2 +- ec2_transit_gateway_vpc_attachment.py | 2 +- ec2_transit_gateway_vpc_attachment_info.py | 2 +- ec2_vpc_egress_igw.py | 2 +- ec2_vpc_nacl.py | 2 +- ec2_vpc_nacl_info.py | 2 +- ec2_vpc_peer.py | 2 +- ec2_vpc_peering_info.py | 2 +- ec2_vpc_vgw.py | 2 +- ec2_vpc_vgw_info.py | 2 +- ec2_vpc_vpn.py | 2 +- ec2_vpc_vpn_info.py | 2 +- ec2_win_password.py | 2 +- ecs_attribute.py | 2 +- ecs_cluster.py | 2 +- ecs_ecr.py | 2 +- ecs_service.py | 2 +- ecs_service_info.py | 2 +- ecs_tag.py | 2 +- ecs_task.py | 2 +- ecs_taskdefinition.py | 2 +- ecs_taskdefinition_info.py | 2 +- efs.py | 2 +- efs_info.py | 2 +- efs_tag.py | 2 +- eks_cluster.py | 3 ++- eks_fargate_profile.py | 3 ++- elasticache.py | 2 +- elasticache_info.py | 2 +- elasticache_parameter_group.py | 2 +- elasticache_snapshot.py | 2 +- elasticache_subnet_group.py | 2 +- elasticbeanstalk_app.py | 2 +- elb_classic_lb_info.py | 3 ++- elb_instance.py | 2 +- elb_network_lb.py | 2 +- elb_target.py | 2 +- elb_target_group.py | 2 +- elb_target_group_info.py | 2 +- elb_target_info.py | 2 +- glue_connection.py | 2 +- glue_crawler.py | 2 +- glue_job.py | 2 +- iam_access_key.py | 2 +- iam_access_key_info.py | 2 +- iam_group.py | 2 +- iam_managed_policy.py | 2 +- iam_mfa_device_info.py | 2 +- iam_password_policy.py | 2 +- iam_role.py | 2 +- iam_role_info.py | 2 +- iam_saml_federation.py | 2 +- iam_server_certificate.py | 2 +- iam_server_certificate_info.py | 2 +- inspector_target.py | 2 +- kinesis_stream.py | 2 +- lightsail.py | 2 +- lightsail_static_ip.py | 2 +- msk_cluster.py | 2 +- msk_config.py | 2 +- opensearch_info.py | 2 +- redshift.py | 2 +- redshift_cross_region_snapshots.py | 2 +- redshift_info.py | 2 +- redshift_subnet_group.py | 2 +- s3_bucket_info.py | 2 +- s3_bucket_notification.py | 2 +- s3_cors.py | 2 +- s3_lifecycle.py | 2 +- s3_logging.py | 2 +- s3_metrics_configuration.py | 2 +- s3_sync.py | 2 +- s3_website.py | 2 +- secretsmanager_secret.py | 2 +- ses_identity.py | 2 +- ses_identity_policy.py | 2 +- ses_rule_set.py | 2 +- sns.py | 2 +- sns_topic.py | 2 +- sns_topic_info.py | 2 +- sqs_queue.py | 2 +- ssm_parameter.py | 2 +- stepfunctions_state_machine.py | 2 +- stepfunctions_state_machine_execution.py | 2 +- storagegateway_info.py | 2 +- sts_assume_role.py | 2 +- sts_session_token.py | 2 +- waf_condition.py | 2 +- waf_info.py | 2 +- waf_rule.py | 2 +- waf_web_acl.py | 2 +- wafv2_ip_set.py | 2 +- wafv2_ip_set_info.py | 2 +- wafv2_resources.py | 2 +- wafv2_resources_info.py | 2 +- wafv2_rule_group.py | 2 +- wafv2_rule_group_info.py | 2 +- wafv2_web_acl.py | 2 +- wafv2_web_acl_info.py | 2 +- 151 files changed, 156 insertions(+), 151 deletions(-) diff --git a/accessanalyzer_validate_policy_info.py b/accessanalyzer_validate_policy_info.py index e589d0cb011..790486e13c2 100644 --- a/accessanalyzer_validate_policy_info.py +++ b/accessanalyzer_validate_policy_info.py @@ -169,7 +169,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/acm_certificate.py b/acm_certificate.py index abdecadcc78..313bdc424a3 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -267,7 +267,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( diff --git a/acm_certificate_info.py b/acm_certificate_info.py index a84d7c0b065..7395ec65ddc 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -259,7 +259,7 @@ type: str ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager diff --git a/api_gateway.py b/api_gateway.py index a084bf93eff..e4085deced8 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -180,7 +180,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/api_gateway_domain.py b/api_gateway_domain.py index 9b4ec85724a..ab0486d4f75 100644 --- a/api_gateway_domain.py +++ b/api_gateway_domain.py @@ -119,7 +119,8 @@ import copy -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py index d20c107de9c..08264400341 100644 --- a/application_autoscaling_policy.py +++ b/application_autoscaling_policy.py @@ -285,7 +285,7 @@ sample: '2017-09-28T08:22:51.881000-03:00' ''' # NOQA -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict try: diff --git a/autoscaling_complete_lifecycle_action.py b/autoscaling_complete_lifecycle_action.py index 8f585a10288..62d5e64b3bc 100644 --- a/autoscaling_complete_lifecycle_action.py +++ b/autoscaling_complete_lifecycle_action.py @@ -66,7 +66,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def main(): diff --git a/autoscaling_instance_refresh.py b/autoscaling_instance_refresh.py index 94c2bb38c37..73e223af3de 100644 --- a/autoscaling_instance_refresh.py +++ b/autoscaling_instance_refresh.py @@ -145,7 +145,7 @@ pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters diff --git a/autoscaling_instance_refresh_info.py b/autoscaling_instance_refresh_info.py index 3037d0b5295..064e92789b6 100644 --- a/autoscaling_instance_refresh_info.py +++ b/autoscaling_instance_refresh_info.py @@ -127,7 +127,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index 1b13d1027d3..0e5cf844673 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -454,7 +454,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names diff --git a/autoscaling_launch_config_find.py b/autoscaling_launch_config_find.py index ae8f187c05f..699859af7e1 100644 --- a/autoscaling_launch_config_find.py +++ b/autoscaling_launch_config_find.py @@ -140,7 +140,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def find_launch_configs(client, module): diff --git a/autoscaling_launch_config_info.py b/autoscaling_launch_config_info.py index 1c98d7588d0..44359bc64da 100644 --- a/autoscaling_launch_config_info.py +++ b/autoscaling_launch_config_info.py @@ -159,7 +159,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def list_launch_configs(connection, module): diff --git a/autoscaling_lifecycle_hook.py b/autoscaling_lifecycle_hook.py index cf07b7681ac..72d3c6dfda2 100644 --- a/autoscaling_lifecycle_hook.py +++ b/autoscaling_lifecycle_hook.py @@ -138,7 +138,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict diff --git a/autoscaling_policy.py b/autoscaling_policy.py index a29389b0e06..19c7e46b481 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -357,7 +357,7 @@ pass # caught by imported AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict diff --git a/autoscaling_scheduled_action.py b/autoscaling_scheduled_action.py index f1433c522bc..cbccf7f1fe3 100644 --- a/autoscaling_scheduled_action.py +++ b/autoscaling_scheduled_action.py @@ -164,7 +164,7 @@ except ImportError: HAS_DATEUTIL = False -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/aws_region_info.py b/aws_region_info.py index 126455a8cff..ad9368ef14c 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -56,7 +56,7 @@ }]" ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/batch_compute_environment.py b/batch_compute_environment.py index 555cfccbe55..c6c752a3c40 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -225,7 +225,7 @@ ''' import re -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict try: diff --git a/batch_job_definition.py b/batch_job_definition.py index 0e5020da8a8..7e4ea05f5b4 100644 --- a/batch_job_definition.py +++ b/batch_job_definition.py @@ -226,7 +226,7 @@ ''' from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/batch_job_queue.py b/batch_job_queue.py index ef48896a473..e20c430ba6a 100644 --- a/batch_job_queue.py +++ b/batch_job_queue.py @@ -107,7 +107,7 @@ ''' from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index f7e71e2f8d1..7030ca8ba5c 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -37,7 +37,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry try: diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 3abf9734a32..2d5bd83d455 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -310,7 +310,7 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index c9ae1c25c42..d2e00f0221c 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1417,7 +1417,7 @@ ''' from ansible.module_utils._text import to_text, to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager from ansible.module_utils.common.dict_transformations import recursive_diff from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict diff --git a/cloudfront_distribution_info.py b/cloudfront_distribution_info.py index cb97664fab2..8898e895029 100644 --- a/cloudfront_distribution_info.py +++ b/cloudfront_distribution_info.py @@ -251,7 +251,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index 767a1d18182..b99a56c530e 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -142,7 +142,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index c6879d0c5a7..2d9009a9b9b 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -126,7 +126,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class CloudFrontOriginAccessIdentityServiceManager(object): diff --git a/cloudfront_response_headers_policy.py b/cloudfront_response_headers_policy.py index 01b38a3bdad..f009fe89b3c 100644 --- a/cloudfront_response_headers_policy.py +++ b/cloudfront_response_headers_policy.py @@ -149,7 +149,7 @@ pass # caught by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule import datetime diff --git a/codebuild_project.py b/codebuild_project.py index 873b7401005..a1bd310eba5 100644 --- a/codebuild_project.py +++ b/codebuild_project.py @@ -329,7 +329,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict diff --git a/codecommit_repository.py b/codecommit_repository.py index fce4d15d6e9..b9282183d58 100644 --- a/codecommit_repository.py +++ b/codecommit_repository.py @@ -139,7 +139,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/codepipeline.py b/codepipeline.py index 5c5935cb9ac..774231d5bdd 100644 --- a/codepipeline.py +++ b/codepipeline.py @@ -205,7 +205,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies diff --git a/config_aggregation_authorization.py b/config_aggregation_authorization.py index 7b92abb7f4a..9060fcd97d8 100644 --- a/config_aggregation_authorization.py +++ b/config_aggregation_authorization.py @@ -59,7 +59,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/config_aggregator.py b/config_aggregator.py index 3dc4c6faaf7..c54357700a3 100644 --- a/config_aggregator.py +++ b/config_aggregator.py @@ -97,7 +97,8 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict diff --git a/config_delivery_channel.py b/config_delivery_channel.py index 371bd6685c1..d97467502a2 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -74,7 +74,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/config_recorder.py b/config_recorder.py index d90ce46cd22..7d6ebae8ef1 100644 --- a/config_recorder.py +++ b/config_recorder.py @@ -88,7 +88,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/config_rule.py b/config_rule.py index d14f4d16ca9..aa1ff626a92 100644 --- a/config_rule.py +++ b/config_rule.py @@ -116,7 +116,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/data_pipeline.py b/data_pipeline.py index fc441c10cc7..a5b0e627a40 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -209,7 +209,7 @@ from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py index 45180ac6c16..2705c50a6b5 100644 --- a/directconnect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -69,7 +69,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/directconnect_connection.py b/directconnect_connection.py index 28d86717d7a..11ac14dfa22 100644 --- a/directconnect_connection.py +++ b/directconnect_connection.py @@ -164,7 +164,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection diff --git a/directconnect_gateway.py b/directconnect_gateway.py index 1433b387b4d..8ad4f9bf161 100644 --- a/directconnect_gateway.py +++ b/directconnect_gateway.py @@ -106,7 +106,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def dx_gateway_info(client, gateway_id, module): diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index cc7122712e6..6b7ec8bdbe8 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -173,7 +173,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py index 059cd74250c..88a8f5622a8 100644 --- a/directconnect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -257,7 +257,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/dms_endpoint.py b/dms_endpoint.py index fb899d6690a..692fb25bd88 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -334,7 +334,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index fb5d596134b..3fdbdc0a429 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -64,7 +64,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry backoff_params = dict(retries=5, delay=1, backoff=1.5) diff --git a/dynamodb_table.py b/dynamodb_table.py index 28d334fc9c4..71b0e4ccc26 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -252,7 +252,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index 9cbbb3e5e77..ec7d6ed2f65 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -68,7 +68,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_current_ttl_state(c, table_name): diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 15a69163d30..665aeab8a25 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -143,7 +143,7 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index 3b176b5ee63..a4637f38659 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -117,7 +117,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index 429ba20839b..ce576b7ae81 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -86,7 +86,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 7907be4b5d9..094a6afdfa0 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -423,7 +423,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ec2_placement_group.py b/ec2_placement_group.py index c27917df9f3..4b4adc964e5 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -106,7 +106,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index d22f133ae6e..4bcc9cfb4ae 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -73,7 +73,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index f45be44178d..7b38b1ea29a 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -115,7 +115,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 298646cf819..158fcf929a8 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -217,7 +217,7 @@ except ImportError: pass # handled by imported AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from time import sleep, time from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 5ce3dc6a46a..cae82e570cd 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -171,7 +171,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py index 5540590219b..55267bc9185 100644 --- a/ec2_transit_gateway_vpc_attachment.py +++ b/ec2_transit_gateway_vpc_attachment.py @@ -219,7 +219,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py index ea9aec0419c..3a8d4dfd4d1 100644 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -147,7 +147,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index dbcf15b12b5..09531892548 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -65,7 +65,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index e11df3de532..3b10a0a66f9 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -159,7 +159,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index b85c9423663..3684d0d397b 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -109,7 +109,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index f23ffae1952..3c39f11dede 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -359,7 +359,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 680fa3b68ba..2e257a31ffe 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -206,7 +206,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 8332e10063d..990ad908acc 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -142,7 +142,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index fcb520cf054..dcddd69bc31 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -130,7 +130,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 77a994aaab1..d543cde00e7 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -294,7 +294,7 @@ """ from ansible.module_utils._text import to_text -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index c7a71f15451..ac9be556e23 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -166,7 +166,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, diff --git a/ec2_win_password.py b/ec2_win_password.py index 9b92c3e4f92..10d33658f88 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -113,7 +113,7 @@ from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ecs_attribute.py b/ecs_attribute.py index 6efe701d1e3..a942228e305 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -116,7 +116,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EcsAttributes(object): diff --git a/ecs_cluster.py b/ecs_cluster.py index 3866c1f0fae..ebee2760747 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -111,7 +111,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class EcsClusterManager: diff --git a/ecs_ecr.py b/ecs_ecr.py index d472af2756c..2a70a2aa520 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -203,7 +203,7 @@ from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies diff --git a/ecs_service.py b/ecs_service.py index 93abbe75a7b..e69ad4676f8 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -668,7 +668,7 @@ from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/ecs_service_info.py b/ecs_service_info.py index f174a31cddf..fe651444cea 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -139,7 +139,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ecs_tag.py b/ecs_tag.py index 8698a7bbd43..9f25881d207 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -110,7 +110,7 @@ type: dict ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags try: diff --git a/ecs_task.py b/ecs_task.py index 54948ce213a..ebc872ba959 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -244,7 +244,7 @@ type: str ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list try: diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 0ebffdd54f2..4488b5ac924 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -757,7 +757,7 @@ pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index 77f5a476ea4..6f5c145aaa8 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -298,7 +298,7 @@ type: str ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/efs.py b/efs.py index de1d563fb04..7223c99f3a4 100644 --- a/efs.py +++ b/efs.py @@ -257,7 +257,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/efs_info.py b/efs_info.py index 5ef436f3c91..634ff2a4273 100644 --- a/efs_info.py +++ b/efs_info.py @@ -180,7 +180,7 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/efs_tag.py b/efs_tag.py index 1529fa94489..bc99d110635 100644 --- a/efs_tag.py +++ b/efs_tag.py @@ -104,7 +104,7 @@ pass from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags, AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing WAIT_RETRY = 5 # how many seconds to wait between propagation status polls diff --git a/eks_cluster.py b/eks_cluster.py index 1b8e7a866fe..425ff9db264 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -161,7 +161,8 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index d78cbbe2d10..c54d67aec02 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -166,7 +166,8 @@ - ACTIVE ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter diff --git a/elasticache.py b/elasticache.py index 454baafe3ec..bd976aa841f 100644 --- a/elasticache.py +++ b/elasticache.py @@ -139,7 +139,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info diff --git a/elasticache_info.py b/elasticache_info.py index 1f8af9a330c..3aa7a4317c7 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -405,7 +405,7 @@ ''' from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 247dd0bab00..fe8cc08fc00 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -115,7 +115,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def create(module, conn, name, group_family, description): diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index fa18b80c0d3..9ae5be427ca 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -117,7 +117,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 0f5f5e75e13..802d8c0949e 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -104,7 +104,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/elasticbeanstalk_app.py b/elasticbeanstalk_app.py index b5b32c178b2..46529276997 100644 --- a/elasticbeanstalk_app.py +++ b/elasticbeanstalk_app.py @@ -90,7 +90,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 4cbeb95890d..9298085e28f 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -139,7 +139,8 @@ vpc_id: vpc-c248fda4 ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, camel_dict_to_snake_dict, diff --git a/elb_instance.py b/elb_instance.py index ecea32a6309..c09ae0429bb 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -96,7 +96,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/elb_network_lb.py b/elb_network_lb.py index 6dcdfd209c3..44025cccb94 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -330,7 +330,7 @@ sample: vpc-0011223344 ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener diff --git a/elb_target.py b/elb_target.py index cff46a62ace..133419a1757 100644 --- a/elb_target.py +++ b/elb_target.py @@ -121,7 +121,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/elb_target_group.py b/elb_target_group.py index f9849264200..18f9ca5e46b 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -448,7 +448,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/elb_target_group_info.py b/elb_target_group_info.py index 86cc03782f8..d6a73d3307f 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -213,7 +213,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict diff --git a/elb_target_info.py b/elb_target_info.py index 4f91ac7f3f5..92ab33ba945 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -212,7 +212,7 @@ # we can handle the lack of boto3 based on the ec2 module pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry diff --git a/glue_connection.py b/glue_connection.py index bcfacb1712a..2e01b6fed32 100644 --- a/glue_connection.py +++ b/glue_connection.py @@ -169,7 +169,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names diff --git a/glue_crawler.py b/glue_crawler.py index a47b8eb3f93..d5cdc04d6d5 100644 --- a/glue_crawler.py +++ b/glue_crawler.py @@ -208,7 +208,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags diff --git a/glue_job.py b/glue_job.py index 47d6156d764..ea6e79180fc 100644 --- a/glue_job.py +++ b/glue_job.py @@ -234,7 +234,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags diff --git a/iam_access_key.py b/iam_access_key.py index 3207741ab94..ab3e9110604 100644 --- a/iam_access_key.py +++ b/iam_access_key.py @@ -126,7 +126,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters diff --git a/iam_access_key_info.py b/iam_access_key_info.py index 91429eff940..9d7363b420a 100644 --- a/iam_access_key_info.py +++ b/iam_access_key_info.py @@ -76,7 +76,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/iam_group.py b/iam_group.py index 31987ef1de4..cedf41613eb 100644 --- a/iam_group.py +++ b/iam_group.py @@ -185,7 +185,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/iam_managed_policy.py b/iam_managed_policy.py index f86f019d536..eabf03b23d7 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -133,7 +133,7 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 16abae17087..8b78eee02f9 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -64,7 +64,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def list_mfa_devices(connection, module): diff --git a/iam_password_policy.py b/iam_password_policy.py index 19614d26da6..00b4f8872c0 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -105,7 +105,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/iam_role.py b/iam_role.py index 09a86a54e82..a1aea8a5848 100644 --- a/iam_role.py +++ b/iam_role.py @@ -202,7 +202,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/iam_role_info.py b/iam_role_info.py index 84e9a31718e..3d6e6bdc597 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -153,7 +153,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/iam_saml_federation.py b/iam_saml_federation.py index f79e4c2c64c..b20f44d3690 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -108,7 +108,7 @@ except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/iam_server_certificate.py b/iam_server_certificate.py index f3d5c5808df..4b8ee782ddb 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -113,7 +113,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index ee0dc590dc5..ac33a36f1a2 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -84,7 +84,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_server_certs(iam, name=None): diff --git a/inspector_target.py b/inspector_target.py index a664f2f2dd5..9ebdf764002 100644 --- a/inspector_target.py +++ b/inspector_target.py @@ -99,7 +99,7 @@ sample: "2018-01-29T13:48:51.958000+00:00" ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( ansible_dict_to_boto3_tag_list, diff --git a/kinesis_stream.py b/kinesis_stream.py index e4c5d76df80..001fad26546 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -191,7 +191,7 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags diff --git a/lightsail.py b/lightsail.py index 5e403515412..3b29fa0ba13 100644 --- a/lightsail.py +++ b/lightsail.py @@ -161,7 +161,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py index 799ff629df8..dc956fd5337 100644 --- a/lightsail_static_ip.py +++ b/lightsail_static_ip.py @@ -74,7 +74,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/msk_cluster.py b/msk_cluster.py index 651cd94e6af..8bd8f9bba13 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -266,7 +266,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( camel_dict_to_snake_dict, compare_aws_tags, diff --git a/msk_config.py b/msk_config.py index 812eba16dee..7f7874b74d4 100644 --- a/msk_config.py +++ b/msk_config.py @@ -99,7 +99,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( camel_dict_to_snake_dict, AWSRetry, diff --git a/opensearch_info.py b/opensearch_info.py index 700ad26fd75..9ef4a1eac37 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -449,7 +449,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( AWSRetry, boto3_tag_list_to_ansible_dict, diff --git a/redshift.py b/redshift.py index 27e95989347..f218e0a70c9 100644 --- a/redshift.py +++ b/redshift.py @@ -264,7 +264,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index 1c42ea80208..be4a5cbb46d 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -88,7 +88,7 @@ RETURN = ''' # ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class SnapshotController(object): diff --git a/redshift_info.py b/redshift_info.py index ff4da774ea1..2b94e313640 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -282,7 +282,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 3c7ca31f500..902cee75282 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -101,7 +101,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict diff --git a/s3_bucket_info.py b/s3_bucket_info.py index 541a02b0f93..b4f72dd55a0 100644 --- a/s3_bucket_info.py +++ b/s3_bucket_info.py @@ -406,7 +406,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 645ca698974..fa0424b40c2 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -160,7 +160,7 @@ type: list ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/s3_cors.py b/s3_cors.py index 753e395f9eb..e7dc16cbaa7 100644 --- a/s3_cors.py +++ b/s3_cors.py @@ -103,7 +103,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies diff --git a/s3_lifecycle.py b/s3_lifecycle.py index b434c09b051..19e62093174 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -227,7 +227,7 @@ except ImportError: pass # handled by AnsibleAwsModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result diff --git a/s3_logging.py b/s3_logging.py index 011baa951da..5e600582d9c 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -66,7 +66,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index dff5668212c..d4c73e55267 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -101,7 +101,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list diff --git a/s3_sync.py b/s3_sync.py index 80e3db0bd89..19466f21f26 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -263,7 +263,7 @@ from ansible.module_utils._text import to_text # import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag diff --git a/s3_website.py b/s3_website.py index 81d3169cdb1..f5ba78bf746 100644 --- a/s3_website.py +++ b/s3_website.py @@ -168,7 +168,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index c4c434e3252..d46267b278d 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -181,7 +181,7 @@ ''' from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies diff --git a/ses_identity.py b/ses_identity.py index 997692df6a0..c68f3984c65 100644 --- a/ses_identity.py +++ b/ses_identity.py @@ -220,7 +220,7 @@ ''' from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry import time diff --git a/ses_identity_policy.py b/ses_identity_policy.py index 16d9f1deda2..4aae1e933a9 100644 --- a/ses_identity_policy.py +++ b/ses_identity_policy.py @@ -85,7 +85,7 @@ sample: [ExamplePolicy] ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry import json diff --git a/ses_rule_set.py b/ses_rule_set.py index b42ac8088f3..8dd85dfe35b 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -100,7 +100,7 @@ }] """ -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry try: diff --git a/sns.py b/sns.py index 44160e53a52..798bf555318 100644 --- a/sns.py +++ b/sns.py @@ -134,7 +134,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup diff --git a/sns_topic.py b/sns_topic.py index 8ef63690fea..166fb68a66f 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -325,7 +325,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies from ansible_collections.community.aws.plugins.module_utils.sns import list_topics diff --git a/sns_topic_info.py b/sns_topic_info.py index d3180ed6584..0244b2ff74e 100644 --- a/sns_topic_info.py +++ b/sns_topic_info.py @@ -135,7 +135,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.community.aws.plugins.module_utils.sns import list_topics from ansible_collections.community.aws.plugins.module_utils.sns import get_info diff --git a/sqs_queue.py b/sqs_queue.py index 371b1f514a5..ee06746e7ef 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -244,7 +244,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags diff --git a/ssm_parameter.py b/ssm_parameter.py index b7544d4a569..8647d9886be 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -219,7 +219,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py index c141610bbe0..8bab4f7fecd 100644 --- a/stepfunctions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -86,7 +86,7 @@ returned: always ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, diff --git a/stepfunctions_state_machine_execution.py b/stepfunctions_state_machine_execution.py index aacfa987f4a..17273f8146c 100644 --- a/stepfunctions_state_machine_execution.py +++ b/stepfunctions_state_machine_execution.py @@ -97,7 +97,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code diff --git a/storagegateway_info.py b/storagegateway_info.py index 3f3c3ae2f94..252c13f87ca 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -174,7 +174,7 @@ region: eu-west-3 ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/sts_assume_role.py b/sts_assume_role.py index 8e5a3b4fed2..fe29cd3c62a 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -102,7 +102,7 @@ ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict try: diff --git a/sts_session_token.py b/sts_session_token.py index 03df560e9ce..77e89f79687 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -81,7 +81,7 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def normalize_credentials(credentials): diff --git a/waf_condition.py b/waf_condition.py index 63585d50cbb..2f9f16d116a 100644 --- a/waf_condition.py +++ b/waf_condition.py @@ -406,7 +406,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies diff --git a/waf_info.py b/waf_info.py index 6a49a886e9c..a69d9793348 100644 --- a/waf_info.py +++ b/waf_info.py @@ -116,7 +116,7 @@ ] ''' -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl diff --git a/waf_rule.py b/waf_rule.py index a994b183149..116ba87ceb9 100644 --- a/waf_rule.py +++ b/waf_rule.py @@ -144,7 +144,7 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.waf import ( MATCH_LOOKUP, diff --git a/waf_web_acl.py b/waf_web_acl.py index 9d5ad59e46f..a6f84aa23db 100644 --- a/waf_web_acl.py +++ b/waf_web_acl.py @@ -167,7 +167,7 @@ import re -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.waf import ( diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index 7a9011e9b3c..83375c89002 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -121,7 +121,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index b92c9a816d9..e800ed0b499 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -76,7 +76,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags diff --git a/wafv2_resources.py b/wafv2_resources.py index 527ee108732..db59b91197b 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -66,7 +66,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 3a2a7b5dd32..4833d7657f1 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -55,7 +55,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 8e46853c8d8..e2751b9b438 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -204,7 +204,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index 46c44801a63..b59d4d613f4 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -93,7 +93,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index f91fe64e608..e5770cd7439 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -319,7 +319,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 13be05db5c3..649cf10b884 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -95,7 +95,7 @@ except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls From 5aa9b17271e2ca770e98370abe1acf83c825f851 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 11 Jan 2023 14:00:15 +0100 Subject: [PATCH 609/683] [6.0.0] codebuild_project - update tags param to use dict rather than list of dicts (#1643) [6.0.0] codebuild_project - update tags param to use dict rather than list of dicts SUMMARY Passing list of dicts was deprecated, move it to dict fixes: #1546 Originally deprecated in #1221 as part of the tagging cleanup. ISSUE TYPE Feature Pull Request COMPONENT NAME codebuild_project ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- codebuild_project.py | 248 ++++++++++++++++++++++--------------------- 1 file changed, 128 insertions(+), 120 deletions(-) diff --git a/codebuild_project.py b/codebuild_project.py index a1bd310eba5..cd372258d67 100644 --- a/codebuild_project.py +++ b/codebuild_project.py @@ -14,6 +14,7 @@ short_description: Create or delete an AWS CodeBuild project notes: - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html). + - I(tags) changed from boto3 format to standard dict format in release 6.0.0. description: - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code. - Prior to release 5.0.0 this module was called C(community.aws.aws_codebuild). @@ -137,23 +138,6 @@ description: - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts. type: str - tags: - description: - - A set of tags for the build project. - - Mutually exclusive with the I(resource_tags) parameter. - - In release 6.0.0 this parameter will accept a simple dictionary - instead of the list of dictionaries format. To use the simple - dictionary format prior to release 6.0.0 the I(resource_tags) can - be used instead of I(tags). - type: list - elements: dict - suboptions: - key: - description: The name of the Tag. - type: str - value: - description: The value of the Tag. - type: str vpc_config: description: - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC. @@ -164,32 +148,12 @@ default: 'present' choices: ['present', 'absent'] type: str - resource_tags: - description: - - A dictionary representing the tags to be applied to the build project. - - If the I(resource_tags) parameter is not set then tags will not be modified. - - Mutually exclusive with the I(tags) parameter. - type: dict - required: false - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is set, existing tags will be purged - from the resource to match exactly what is defined by I(tags) parameter. - - If the I(resource_tags) parameter is not set then tags will not be modified, even - if I(purge_tags=True). - - Tag keys beginning with C(aws:) are reserved by Amazon and can not be - modified. As such they will be ignored for the purposes of the - I(purge_tags) parameter. See the Amazon documentation for more information - U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). - type: bool - default: true - required: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - + - amazon.aws.boto3.modules + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules ''' EXAMPLES = r''' @@ -326,99 +290,142 @@ sample: "2018-04-17T16:56:03.245000+02:00" ''' +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule +class CodeBuildAnsibleAWSError(AnsibleAWSError): + pass -def create_or_update_project(client, params, module): - resp = {} - name = params['name'] - # clean up params - formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) - permitted_create_params = get_boto3_client_method_parameters(client, 'create_project') - permitted_update_params = get_boto3_client_method_parameters(client, 'update_project') +def do_create_project(client, params, formatted_params): - formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) - formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) + if params["source"] is None or params["artifacts"] is None: + raise CodeBuildAnsibleAWSError( + message="The source and artifacts parameters must be provided " + "when creating a new project. No existing project was found.") - # Check if project with that name already exists and if so update existing: - found = describe_project(client=client, name=name, module=module) - changed = False + if params["tags"] is not None: + formatted_params["tags"] = ansible_dict_to_boto3_tag_list( + params["tags"], + tag_name_key_name="key", + tag_value_key_name="value" + ) + + permitted_create_params = get_boto3_client_method_parameters(client, "create_project") + formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) - if 'name' in found: - found_project = found - found_tags = found_project.pop('tags', []) - # Support tagging using a dict instead of the list of dicts - if params['resource_tags'] is not None: - if params['purge_tags']: - tags = dict() - else: - tags = boto3_tag_list_to_ansible_dict(found_tags) - tags.update(params['resource_tags']) - formatted_update_params['tags'] = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') - - resp = update_project(client=client, params=formatted_update_params, module=module) - updated_project = resp['project'] - - # Prep both dicts for sensible change comparison: - found_project.pop('lastModified') - updated_project.pop('lastModified') - updated_tags = updated_project.pop('tags', []) - found_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(found_tags) - updated_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(updated_tags) - - if updated_project != found_project: - changed = True - updated_project['tags'] = updated_tags - return resp, changed # Or create new project: try: - if params['source'] is None or params['artifacts'] is None: - module.fail_json( - "The source and artifacts parameters must be provided when " - "creating a new project. No existing project was found.") resp = client.create_project(**formatted_create_params) changed = True return resp, changed except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create CodeBuild project") + raise CodeBuildAnsibleAWSError( + message="Unable to create CodeBuild project", + exception=e, + ) + + +def merge_tags(found_tags, tags, purge_tags): + if purge_tags: + return tags + + merged_tags = boto3_tag_list_to_ansible_dict(found_tags) + merged_tags.update(tags) + return merged_tags + + +def format_tags(tags): + return ansible_dict_to_boto3_tag_list( + tags, + tag_name_key_name="key", + tag_value_key_name="value", + ) + + +def do_update_project(client, params, formatted_params, found_project): + permitted_update_params = get_boto3_client_method_parameters(client, "update_project") + formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) + + found_tags = found_project.pop('tags', []) + if params["tags"] is not None: + formatted_update_params["tags"] = format_tags( + merge_tags(found_tags, params["tags"], params["purge_tags"]), + ) + + resp = update_project(client=client, params=formatted_update_params) + updated_project = resp["project"] + + # Prep both dicts for sensible change comparison: + found_project.pop("lastModified") + updated_project.pop("lastModified") + updated_tags = updated_project.pop("tags", []) + found_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(found_tags) + updated_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(updated_tags) + + changed = (updated_project != found_project) + + updated_project["tags"] = updated_tags + return resp, changed -def update_project(client, params, module): +def create_or_update_project(client, params): + resp = {} name = params['name'] + # clean up params + formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) + + # Check if project with that name already exists and if so update existing: + found = describe_project(client=client, name=name) + changed = False + + if "name" not in found: + return do_create_project(client, params, formatted_params) + + return do_update_project(client, params, formatted_params, found) + + +def update_project(client, params): + name = params["name"] try: resp = client.update_project(**params) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update CodeBuild project") + raise CodeBuildAnsibleAWSError( + message="Unable to update CodeBuild project", + exception=e, + ) -def delete_project(client, name, module): - found = describe_project(client=client, name=name, module=module) - changed = False - if 'name' in found: - # Mark as changed when a project with that name existed before calling delete - changed = True +def delete_project(client, name): + found = describe_project(client=client, name=name) + if "name" not in found: + return {}, False + try: resp = client.delete_project(name=name) - return resp, changed + return resp, True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete CodeBuild project") + raise CodeBuildAnsibleAWSError( + message="Unable to update CodeBuild project", + exception=e, + ) -def describe_project(client, name, module): +def describe_project(client, name): project = {} try: projects = client.batch_get_projects(names=[name])['projects'] @@ -426,7 +433,10 @@ def describe_project(client, name, module): project = projects[0] return project except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe CodeBuild projects") + raise CodeBuildAnsibleAWSError( + message="Unable to describe CodeBuild projects", + exception=e, + ) def format_project_result(project_result): @@ -450,8 +460,7 @@ def main(): service_role=dict(), timeout_in_minutes=dict(type='int', default=60), encryption_key=dict(no_log=False), - tags=dict(type='list', elements='dict'), - resource_tags=dict(type='dict'), + tags=dict(type='dict', aliases=["resource_tags"]), purge_tags=dict(type='bool', default=True), vpc_config=dict(type='dict'), state=dict(choices=['present', 'absent'], default='present') @@ -463,22 +472,21 @@ def main(): state = module.params.get('state') changed = False - if module.params['tags']: - module.deprecate( - 'The tags parameter currently uses a non-standard format and has ' - 'been deprecated. In release 6.0.0 this paramater will accept ' - 'a simple key/value pair dictionary instead of the current list ' - 'of dictionaries. It is recommended to migrate to using the ' - 'resource_tags parameter which already accepts the simple dictionary ' - 'format.', version='6.0.0', collection_name='community.aws') - - if state == 'present': - project_result, changed = create_or_update_project( - client=client_conn, - params=module.params, - module=module) - elif state == 'absent': - project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module) + try: + if state == 'present': + project_result, changed = create_or_update_project( + client=client_conn, + params=module.params, + ) + elif state == 'absent': + project_result, changed = delete_project( + client=client_conn, + name=module.params['name'], + ) + except CodeBuildAnsibleAWSError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) formatted_result = format_project_result(project_result) module.exit_json(changed=changed, **formatted_result) From d4528418fd3ae3f0446912d9bd0e006489c1f08c Mon Sep 17 00:00:00 2001 From: Matthew Davis <7035647+mdavis-xyz@users.noreply.github.com> Date: Thu, 12 Jan 2023 00:00:20 +1100 Subject: [PATCH 610/683] make s3_lifecycle not call put_lifecycle_configuration if there is no change to put (#1629) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit make s3_lifecycle not call put_lifecycle_configuration if there is no… SUMMARY Fixes #1624 ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_lifecycle ADDITIONAL INFORMATION I have not written integration tests for this. My MWE in #1624 used multiple hosts (all localhost). I don't know how to add that here. Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- s3_lifecycle.py | 129 +++++++++++++++++++++++++----------------------- 1 file changed, 67 insertions(+), 62 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 19e62093174..e0f8caa91c5 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -467,38 +467,40 @@ def create_lifecycle_rule(client, module): (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) - - # Write lifecycle to bucket - try: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): - # Amazon interpretted this as not changing anything - changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket + try: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_configuration) + except is_boto3_error_message('At least one action needs to be specified in a rule'): + # Amazon interpretted this as not changing anything + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, + new_rules, + new_rule) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name) @@ -521,36 +523,39 @@ def destroy_lifecycle_rule(client, module): current_lifecycle_rules = fetch_rules(client, module, name) changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix) - # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration - try: - if lifecycle_obj['Rules']: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) - elif current_lifecycle_rules: - changed = True - client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration + try: + if lifecycle_obj['Rules']: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_obj) + elif current_lifecycle_rules: + changed = True + client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name) From 0ed98b45d4db7088d1371329cc0db79a430f1bf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E4=B8=B0?= Date: Wed, 11 Jan 2023 21:00:25 +0800 Subject: [PATCH 611/683] ecs_service supports constraints and strategy update (#1601) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ecs_service supports constraints and strategy update SUMMARY ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION related issue #1414 Reviewed-by: Markus Bergholz Reviewed-by: 周丰 Reviewed-by: Alina Buzachis --- ecs_service.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index e69ad4676f8..c6980c6f2d6 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -819,14 +819,24 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan response = self.ecs.create_service(**params) return self.jsonize(response['service']) - def update_service(self, service_name, cluster_name, task_definition, - desired_count, deployment_configuration, network_configuration, - health_check_grace_period_seconds, force_new_deployment, capacity_provider_strategy): + def update_service(self, service_name, cluster_name, task_definition, desired_count, + deployment_configuration, placement_constraints, placement_strategy, + network_configuration, health_check_grace_period_seconds, + force_new_deployment, capacity_provider_strategy): params = dict( cluster=cluster_name, service=service_name, taskDefinition=task_definition, deploymentConfiguration=deployment_configuration) + # filter placement_constraint and left only those where value is not None + # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation + if placement_constraints: + params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints] + + if placement_strategy: + params['placementStrategy'] = placement_strategy + if network_configuration: params['networkConfiguration'] = network_configuration if force_new_deployment: @@ -1038,6 +1048,8 @@ def main(): task_definition, module.params['desired_count'], deploymentConfiguration, + module.params['placement_constraints'], + module.params['placement_strategy'], network_configuration, module.params['health_check_grace_period_seconds'], module.params['force_new_deployment'], From 636a46c0b63300d9a1f619291f4b39ff3b737790 Mon Sep 17 00:00:00 2001 From: rwha Date: Wed, 18 Jan 2023 14:49:39 -0500 Subject: [PATCH 612/683] ecs_ecr - Add encryption_configuration option (#1623) ecs_ecr - Add encryption_configuration option SUMMARY Adds an encryption_configuration option for new repositories to allow specifying a KMS key. Fixes #1203. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_ecr ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- ecs_ecr.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/ecs_ecr.py b/ecs_ecr.py index 2a70a2aa520..fd335928e0a 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -85,6 +85,24 @@ default: false type: bool version_added: 1.3.0 + encryption_configuration: + description: + - The encryption configuration for the repository. + required: false + suboptions: + encryption_type: + description: + - The encryption type to use. + choices: [AES256, KMS] + default: 'AES256' + type: str + kms_key: + description: + - If I(encryption_type=KMS), specify the KMS key to use for encryption. + - The alias, key ID, or full ARN of the KMS key can be specified. + type: str + type: dict + version_added: 5.2.0 author: - David M. Lee (@leedm777) extends_documentation_fragment: @@ -161,6 +179,13 @@ community.aws.ecs_ecr: name: needs-no-lifecycle-policy purge_lifecycle_policy: true + +- name: set-encryption-configuration + community.aws.ecs_ecr: + name: uses-custom-kms-key + encryption_configuration: + encryption_type: KMS + kms_key: custom-kms-key-alias ''' RETURN = ''' @@ -201,6 +226,7 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @@ -248,17 +274,21 @@ def get_repository_policy(self, registry_id, name): except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): return None - def create_repository(self, registry_id, name, image_tag_mutability): + def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): if registry_id: default_registry_id = self.sts.get_caller_identity().get('Account') if registry_id != default_registry_id: raise Exception('Cannot create repository in registry {0}.' 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + if encryption_configuration is None: + encryption_configuration = dict(encryptionType='AES256') + if not self.check_mode: repo = self.ecr.create_repository( repositoryName=name, - imageTagMutability=image_tag_mutability).get('repository') + imageTagMutability=image_tag_mutability, + encryptionConfiguration=encryption_configuration).get('repository') self.changed = True return repo else: @@ -411,6 +441,7 @@ def run(ecr, params): lifecycle_policy_text = params['lifecycle_policy'] purge_lifecycle_policy = params['purge_lifecycle_policy'] scan_on_push = params['scan_on_push'] + encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration']) # Parse policies, if they are given try: @@ -437,10 +468,16 @@ def run(ecr, params): result['created'] = False if not repo: - repo = ecr.create_repository(registry_id, name, image_tag_mutability) + repo = ecr.create_repository( + registry_id, name, image_tag_mutability, encryption_configuration) result['changed'] = True result['created'] = True else: + if encryption_configuration is not None: + if repo.get('encryptionConfiguration') != encryption_configuration: + result['msg'] = 'Cannot modify repository encryption type' + return False, result + repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) result['repository'] = repo @@ -550,7 +587,18 @@ def main(): purge_policy=dict(required=False, type='bool'), lifecycle_policy=dict(required=False, type='json'), purge_lifecycle_policy=dict(required=False, type='bool'), - scan_on_push=(dict(required=False, type='bool', default=False)) + scan_on_push=(dict(required=False, type='bool', default=False)), + encryption_configuration=dict( + required=False, + type='dict', + options=dict( + encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']), + kms_key=dict(required=False, type='str', no_log=False), + ), + required_if=[ + ['encryption_type', 'KMS', ['kms_key']], + ], + ), ) mutually_exclusive = [ ['policy', 'purge_policy'], From c4f731de0c043fc70a9d887787f89e4cad2ba277 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E4=B8=B0?= Date: Mon, 23 Jan 2023 19:33:38 +0800 Subject: [PATCH 613/683] ecs_service supports load balancer update (#1625) ecs_service supports load balancer update SUMMARY ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION As aws docs says, aws ecs service now supports load balancer update with UpdateService API if deploymentController type is ECS. This pull request supports load balancer update. Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- ecs_service.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index c6980c6f2d6..4907187f3ab 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -822,7 +822,7 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan def update_service(self, service_name, cluster_name, task_definition, desired_count, deployment_configuration, placement_constraints, placement_strategy, network_configuration, health_check_grace_period_seconds, - force_new_deployment, capacity_provider_strategy): + force_new_deployment, capacity_provider_strategy, load_balancers): params = dict( cluster=cluster_name, service=service_name, @@ -849,6 +849,9 @@ def update_service(self, service_name, cluster_name, task_definition, desired_co if desired_count is not None: params['desiredCount'] = desired_count + if load_balancers: + params['loadBalancers'] = load_balancers + response = self.ecs.update_service(**params) return self.jsonize(response['service']) @@ -1027,7 +1030,8 @@ def main(): if 'capacityProviderStrategy' in existing.keys(): module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") if (existing['loadBalancers'] or []) != loadBalancers: - if existing['deploymentController']['type'] != 'CODE_DEPLOY': + # fails if deployment type is not CODE_DEPLOY or ECS + if existing['deploymentController']['type'] not in ['CODE_DEPLOY', 'ECS']: module.fail_json(msg="It is not possible to update the load balancers of an existing service") if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY': @@ -1042,6 +1046,8 @@ def main(): if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']: module.fail_json(msg="It is not currently supported to change tags of an existing service") + updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'], @@ -1054,6 +1060,7 @@ def main(): module.params['health_check_grace_period_seconds'], module.params['force_new_deployment'], capacityProviders, + updatedLoadBalancers, ) else: From bf18f3e099de7b877f1f8f8d803c42f214b932b2 Mon Sep 17 00:00:00 2001 From: Justin McCormick Date: Mon, 23 Jan 2023 05:59:19 -0600 Subject: [PATCH 614/683] ecs_cluster capacity provider strategy (#1640) ecs_cluster capacity provider strategy SUMMARY Fixes #770 - Add AWS ECS_Cluster Capacity Provider Strategy Support ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_cluster ADDITIONAL INFORMATION When creating or updating an ECS Cluster, configure the capacity providers and capacity provider strategy as provided by the user. Given playbook task: - name: Create an ECS Cluster with Capacity Providers ecs_cluster: name: default state: present capacity_providers: - FARGATE - FARGATE_SPOT capacity_provider_strategy: - capacity_provider: FARGATE base: 1 weight: 1 - capacity_provider: FARGATE_SPOT weight: 100 Previously would throw "Unsupported parameter" and no other parameter exists to expose these features. Now you should see changed: [localhost] with the resultant created ECS Cluster having the same providers and provider_strategy fields as provided by the user. Reviewed-by: Markus Bergholz Reviewed-by: Justin McCormick Reviewed-by: Alina Buzachis --- ecs_cluster.py | 141 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 136 insertions(+), 5 deletions(-) diff --git a/ecs_cluster.py b/ecs_cluster.py index ebee2760747..aaa94729153 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -41,6 +41,41 @@ required: false type: int default: 10 + capacity_providers: + version_added: 5.2.0 + description: + - List of capacity providers to use for the cluster. + required: false + type: list + elements: str + capacity_provider_strategy: + version_added: 5.2.0 + description: + - List of capacity provider strategies to use for the cluster. + required: false + type: list + elements: dict + suboptions: + capacity_provider: + description: + - Name of capacity provider. + type: str + weight: + description: + - The relative percentage of the total number of launched tasks that should use the specified provider. + type: int + base: + description: + - How many tasks, at a minimum, should use the specified provider. + type: int + purge_capacity_providers: + version_added: 5.2.0 + description: + - Toggle overwriting of existing capacity providers or strategy. This is needed for backwards compatibility. + - By default I(purge_capacity_providers=false). In a release after 2024-06-01 this will be changed to I(purge_capacity_providers=true). + required: false + type: bool + default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -56,6 +91,21 @@ name: default state: present +- name: Cluster creation with capacity providers and strategies. + community.aws.ecs_cluster: + name: default + state: present + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + purge_capacity_providers: True + - name: Cluster deletion community.aws.ecs_cluster: name: default @@ -75,6 +125,16 @@ description: how many services are active in this cluster returned: 0 if a new cluster type: int +capacityProviders: + version_added: 5.2.0 + description: list of capacity providers used in this cluster + returned: always + type: list +defaultCapacityProviderStrategy: + version_added: 5.2.0 + description: list of capacity provider strategies used in this cluster + returned: always + type: list clusterArn: description: the ARN of the cluster just created type: str @@ -112,6 +172,8 @@ pass # Handled by AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict class EcsClusterManager: @@ -145,8 +207,26 @@ def describe_cluster(self, cluster_name): return c raise Exception("Unknown problem describing cluster %s." % cluster_name) - def create_cluster(self, clusterName='default'): - response = self.ecs.create_cluster(clusterName=clusterName) + def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): + params = dict(clusterName=cluster_name) + if capacity_providers: + params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + if capacity_provider_strategy: + params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + response = self.ecs.create_cluster(**params) + return response['cluster'] + + def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): + params = dict(cluster=cluster_name) + if capacity_providers: + params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + else: + params['capacityProviders'] = [] + if capacity_provider_strategy: + params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + else: + params['defaultCapacityProviderStrategy'] = [] + response = self.ecs.put_cluster_capacity_providers(**params) return response['cluster'] def delete_cluster(self, clusterName): @@ -159,7 +239,17 @@ def main(): state=dict(required=True, choices=['present', 'absent', 'has_instances']), name=dict(required=True, type='str'), delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10) + repeat=dict(required=False, type='int', default=10), + purge_capacity_providers=dict(required=False, type='bool', default=False), + capacity_providers=dict(required=False, type='list', elements='str'), + capacity_provider_strategy=dict(required=False, + type='list', + elements='dict', + options=dict(capacity_provider=dict(type='str'), + weight=dict(type='int'), + base=dict(type='int', default=0) + ) + ), ) required_together = [['state', 'name']] @@ -177,12 +267,53 @@ def main(): results = dict(changed=False) if module.params['state'] == 'present': + # Pull requested and existing capacity providers and strategies. + purge_capacity_providers = module.params['purge_capacity_providers'] + requested_cp = module.params['capacity_providers'] + requested_cps = module.params['capacity_provider_strategy'] if existing and 'status' in existing and existing['status'] == "ACTIVE": - results['cluster'] = existing + existing_cp = existing['capacityProviders'] + existing_cps = existing['defaultCapacityProviderStrategy'] + + if requested_cp is None: + requested_cp = [] + + # Check if capacity provider strategy needs to trigger an update. + cps_update_needed = False + if requested_cps is not None: + for strategy in requested_cps: + if snake_dict_to_camel_dict(strategy) not in existing_cps: + cps_update_needed = True + for strategy in existing_cps: + if camel_dict_to_snake_dict(strategy) not in requested_cps: + cps_update_needed = True + elif requested_cps is None and existing_cps != []: + cps_update_needed = True + + # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. + if not purge_capacity_providers: + module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.' + ' To maintain the existing behaviour explicitly set purge_capacity_providers=true', + date='2024-06-01', collection_name='community.aws') + cps_update_needed = False + requested_cp = existing_cp + requested_cps = existing_cps + + # If either the providers or strategy differ, update the cluster. + if requested_cp != existing_cp or cps_update_needed: + if not module.check_mode: + results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps) + results['changed'] = True + else: + results['cluster'] = existing else: if not module.check_mode: # doesn't exist. create it. - results['cluster'] = cluster_mgr.create_cluster(module.params['name']) + results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps) results['changed'] = True # delete the cluster From ce725362fed193b5e8c6af66a2dc7d10ec7da2b8 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Mon, 23 Jan 2023 19:18:39 +0100 Subject: [PATCH 615/683] ecs_cluster: add default value to doc (#1676) ecs_cluster: add default value to doc SUMMARY missing default value in the docs section of #1640 not released yet. ISSUE TYPE Docs Pull Request COMPONENT NAME ecs_cluster Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- ecs_cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ecs_cluster.py b/ecs_cluster.py index aaa94729153..8b64a14abbd 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -68,6 +68,7 @@ description: - How many tasks, at a minimum, should use the specified provider. type: int + default: 0 purge_capacity_providers: version_added: 5.2.0 description: From 648d4fe209f446c362ecd12cdbc75640208b332a Mon Sep 17 00:00:00 2001 From: Eric Millbrandt Date: Tue, 31 Jan 2023 10:01:32 -0500 Subject: [PATCH 616/683] Add secret manager replication support (#827) Add secret manager replication support Signed-off-by: Eric Millbrandt eric.millbrandt@numerated.com SUMMARY Add support for regional secret replication. The component now supports: Creating a secret with a regional replica Adding a region replica to a secret Removing a region replica from a secret ISSUE TYPE Feature Pull Request COMPONENT NAME aws_secret ADDITIONAL INFORMATION https://aws.amazon.com/about-aws/whats-new/2021/03/aws-secrets-manager-provides-support-to-replicate-secrets-in-aws-secrets-manager-to-multiple-aws-regions/ https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/secretsmanager.html Reviewed-by: Eric Millbrandt Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley --- secretsmanager_secret.py | 112 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 2 deletions(-) diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index d46267b278d..337b28669bc 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -41,6 +41,24 @@ - Specifies a user-provided description of the secret. type: str default: '' + replica: + description: + - Specifies a list of regions and kms_key_ids (optional) to replicate the secret to + type: list + elements: dict + version_added: 5.3.0 + suboptions: + region: + description: + - Region to replicate secret to. + type: str + required: true + kms_key_id: + description: + - Specifies the ARN or alias of the AWS KMS customer master key (CMK) in the + destination region to be used (alias/aws/secretsmanager is assumed if not specified) + type: str + required: false kms_key_id: description: - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be @@ -196,10 +214,13 @@ class Secret(object): """An object representation of the Secret described by the self.module args""" - def __init__(self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, - tags=None, lambda_arn=None, rotation_interval=None): + def __init__( + self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, + tags=None, lambda_arn=None, rotation_interval=None, replica_regions=None, + ): self.name = name self.description = description + self.replica_regions = replica_regions self.kms_key_id = kms_key_id if secret_type == "binary": self.secret_type = "SecretBinary" @@ -223,6 +244,15 @@ def create_args(self): args["Description"] = self.description if self.kms_key_id: args["KmsKeyId"] = self.kms_key_id + if self.replica_regions: + add_replica_regions = [] + for replica in self.replica_regions: + if replica["kms_key_id"]: + add_replica_regions.append({'Region': replica["region"], + 'KmsKeyId': replica["kms_key_id"]}) + else: + add_replica_regions.append({'Region': replica["region"]}) + args["AddReplicaRegions"] = add_replica_regions if self.tags: args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) args[self.secret_type] = self.secret @@ -320,6 +350,35 @@ def put_resource_policy(self, secret): self.module.fail_json_aws(e, msg="Failed to update secret resource policy") return response + def remove_replication(self, name, regions): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + replica_regions = [] + response = self.client.remove_regions_from_replication( + SecretId=name, + RemoveReplicaRegions=regions) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to replicate secret") + return response + + def replicate_secret(self, name, regions): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + replica_regions = [] + for replica in regions: + if replica["kms_key_id"]: + replica_regions.append({'Region': replica["region"], 'KmsKeyId': replica["kms_key_id"]}) + else: + replica_regions.append({'Region': replica["region"]}) + response = self.client.replicate_secret_to_regions( + SecretId=name, + AddReplicaRegions=replica_regions) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to replicate secret") + return response + def restore_secret(self, name): if self.module.check_mode: self.module.exit_json(changed=True) @@ -424,12 +483,49 @@ def rotation_match(desired_secret, current_secret): return True +def compare_regions(desired_secret, current_secret): + """Compare secrets replication configuration + + Args: + desired_secret: camel dict representation of the desired secret state. + current_secret: secret reference as returned by the secretsmanager api. + + Returns: bool + """ + regions_to_set_replication = [] + regions_to_remove_replication = [] + + if desired_secret.replica_regions is None: + return regions_to_set_replication, regions_to_remove_replication + + if desired_secret.replica_regions: + regions_to_set_replication = desired_secret.replica_regions + + for current_secret_region in current_secret.get("ReplicationStatus", []): + if regions_to_set_replication: + for desired_secret_region in regions_to_set_replication: + if current_secret_region["Region"] == desired_secret_region["region"]: + regions_to_set_replication.remove(desired_secret_region) + else: + regions_to_remove_replication.append(current_secret_region["Region"]) + else: + regions_to_remove_replication.append(current_secret_region["Region"]) + + return regions_to_set_replication, regions_to_remove_replication + + def main(): + replica_args = dict( + region=dict(type='str', required=True), + kms_key_id=dict(type='str', required=False), + ) + module = AnsibleAWSModule( argument_spec={ 'name': dict(required=True), 'state': dict(choices=['present', 'absent'], default='present'), 'description': dict(default=""), + 'replica': dict(type='list', elements='dict', options=replica_args), 'kms_key_id': dict(), 'secret_type': dict(choices=['binary', 'string'], default="string"), 'secret': dict(default="", no_log=True), @@ -454,6 +550,7 @@ def main(): module.params.get('secret_type'), module.params.get('secret') or module.params.get('json_secret'), description=module.params.get('description'), + replica_regions=module.params.get('replica'), kms_key_id=module.params.get('kms_key_id'), resource_policy=module.params.get('resource_policy'), tags=module.params.get('tags'), @@ -492,6 +589,7 @@ def main(): if not rotation_match(secret, current_secret): result = secrets_mgr.update_rotation(secret) changed = True + current_resource_policy_response = secrets_mgr.get_resource_policy(secret.name) current_resource_policy = current_resource_policy_response.get("ResourcePolicy") if compare_policies(secret.resource_policy, current_resource_policy): @@ -500,6 +598,7 @@ def main(): else: result = secrets_mgr.put_resource_policy(secret) changed = True + if module.params.get('tags') is not None: current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags) @@ -509,6 +608,15 @@ def main(): if tags_to_remove: secrets_mgr.untag_secret(secret.name, tags_to_remove) changed = True + + regions_to_set_replication, regions_to_remove_replication = compare_regions(secret, current_secret) + if regions_to_set_replication: + secrets_mgr.replicate_secret(secret.name, regions_to_set_replication) + changed = True + if regions_to_remove_replication: + secrets_mgr.remove_replication(secret.name, regions_to_remove_replication) + changed = True + result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) if result.get('tags', None) is not None: result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', [])) From b7515932ae4ed34c75063c5cce89697e1700d8d9 Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Wed, 1 Feb 2023 13:05:37 +0100 Subject: [PATCH 617/683] update module using module_utils/cloudfront_facts.py (#1596) update module using module_utils/cloudfront_facts.py Depends-On: ansible-collections/amazon.aws#1265 SUMMARY update cloudfront_* modules, fix some bugs and add integration tests ISSUE TYPE Feature Pull Request Reviewed-by: Alina Buzachis Reviewed-by: Bikouo Aubin --- cloudfront_distribution.py | 12 +- cloudfront_distribution_info.py | 306 +++------------------------ cloudfront_invalidation.py | 38 ++-- cloudfront_origin_access_identity.py | 68 +++--- 4 files changed, 88 insertions(+), 336 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index d2e00f0221c..f7ff3b51553 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -2105,12 +2105,12 @@ def validate_attribute_with_allowed_values(self, attribute, attribute_name, allo def validate_distribution_from_caller_reference(self, caller_reference): try: - distributions = self.__cloudfront_facts_mgr.list_distributions(False) + distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) distribution_name = 'Distribution' distribution_config_name = 'DistributionConfig' distribution_ids = [dist.get('Id') for dist in distributions] for distribution_id in distribution_ids: - distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id) + distribution = self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) if distribution is not None: distribution_config = distribution[distribution_name].get(distribution_config_name) if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference: @@ -2128,13 +2128,13 @@ def validate_distribution_from_aliases_caller_reference(self, distribution_id, a if aliases and distribution_id is None: distribution_id = self.validate_distribution_id_from_alias(aliases) if distribution_id: - return self.__cloudfront_facts_mgr.get_distribution(distribution_id) + return self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) return None except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference") def validate_distribution_id_from_alias(self, aliases): - distributions = self.__cloudfront_facts_mgr.list_distributions(False) + distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) if distributions: for distribution in distributions: distribution_aliases = distribution.get('Aliases', {}).get('Items', []) @@ -2253,12 +2253,12 @@ def main(): if not (update or create or delete): module.exit_json(changed=False) + config = {} if update or delete: config = distribution['Distribution']['DistributionConfig'] e_tag = distribution['ETag'] distribution_id = distribution['Distribution']['Id'] - else: - config = dict() + if update: config = camel_dict_to_snake_dict(config, reversible=True) diff --git a/cloudfront_distribution_info.py b/cloudfront_distribution_info.py index 8898e895029..179e572e0c9 100644 --- a/cloudfront_distribution_info.py +++ b/cloudfront_distribution_info.py @@ -3,6 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) + __metaclass__ = type @@ -244,265 +245,18 @@ type: dict ''' -import traceback - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - - -class CloudFrontServiceManager: - """Handles CloudFront Services""" - - def __init__(self, module): - self.module = module - - try: - self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - def get_distribution(self, distribution_id): - try: - distribution = self.client.get_distribution(aws_retry=True, Id=distribution_id) - return distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing distribution") - - def get_distribution_config(self, distribution_id): - try: - distribution = self.client.get_distribution_config(aws_retry=True, Id=distribution_id) - return distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing distribution configuration") - - def get_origin_access_identity(self, origin_access_identity_id): - try: - origin_access_identity = self.client.get_cloud_front_origin_access_identity(aws_retry=True, Id=origin_access_identity_id) - return origin_access_identity - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity") - - def get_origin_access_identity_config(self, origin_access_identity_id): - try: - origin_access_identity = self.client.get_cloud_front_origin_access_identity_config(aws_retry=True, Id=origin_access_identity_id) - return origin_access_identity - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") - - def get_invalidation(self, distribution_id, invalidation_id): - try: - invalidation = self.client.get_invalidation(aws_retry=True, DistributionId=distribution_id, Id=invalidation_id) - return invalidation - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing invalidation") - - def get_streaming_distribution(self, distribution_id): - try: - streaming_distribution = self.client.get_streaming_distribution(aws_retry=True, Id=distribution_id) - return streaming_distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - def get_streaming_distribution_config(self, distribution_id): - try: - streaming_distribution = self.client.get_streaming_distribution_config(aws_retry=True, Id=distribution_id) - return streaming_distribution - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - # Split out paginator to allow for the backoff decorator to function - @AWSRetry.jittered_backoff() - def _paginated_result(self, paginator_name, **params): - paginator = self.client.get_paginator(paginator_name) - results = paginator.paginate(**params).build_full_result() - return results - - def list_origin_access_identities(self): - try: - results = self._paginated_result('list_cloud_front_origin_access_identities') - origin_access_identity_list = results.get('CloudFrontOriginAccessIdentityList', {'Items': []}) - - if len(origin_access_identity_list['Items']) > 0: - return origin_access_identity_list['Items'] - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities") - - def list_distributions(self, keyed=True): - try: - results = self._paginated_result('list_distributions') - distribution_list = results.get('DistributionList', {'Items': []}) - - if len(distribution_list['Items']) > 0: - distribution_list = distribution_list['Items'] - else: - return {} - - if not keyed: - return distribution_list - return self.keyed_list_helper(distribution_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing distributions") - - def list_distributions_by_web_acl_id(self, web_acl_id): - try: - results = self._paginated_result('list_cloud_front_origin_access_identities', WebAclId=web_acl_id) - distribution_list = results.get('DistributionList', {'Items': []}) - - if len(distribution_list['Items']) > 0: - distribution_list = distribution_list['Items'] - else: - return {} - return self.keyed_list_helper(distribution_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") - - def list_invalidations(self, distribution_id): - try: - results = self._paginated_result('list_invalidations', DistributionId=distribution_id) - invalidation_list = results.get('InvalidationList', {'Items': []}) - - if len(invalidation_list['Items']) > 0: - return invalidation_list['Items'] - return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing invalidations") - - def list_streaming_distributions(self, keyed=True): - try: - results = self._paginated_result('list_streaming_distributions') - streaming_distribution_list = results.get('StreamingDistributionList', {'Items': []}) - - if len(streaming_distribution_list['Items']) > 0: - streaming_distribution_list = streaming_distribution_list['Items'] - else: - return {} - - if not keyed: - return streaming_distribution_list - return self.keyed_list_helper(streaming_distribution_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing streaming distributions") - - def summary(self): - summary_dict = {} - summary_dict.update(self.summary_get_distribution_list(False)) - summary_dict.update(self.summary_get_distribution_list(True)) - summary_dict.update(self.summary_get_origin_access_identity_list()) - return summary_dict - - def summary_get_origin_access_identity_list(self): - try: - origin_access_identity_list = {'origin_access_identities': []} - origin_access_identities = self.list_origin_access_identities() - for origin_access_identity in origin_access_identities: - oai_id = origin_access_identity['Id'] - oai_full_response = self.get_origin_access_identity(oai_id) - oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} - origin_access_identity_list['origin_access_identities'].append(oai_summary) - return origin_access_identity_list - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error generating summary of origin access identities") - - def summary_get_distribution_list(self, streaming=False): - try: - list_name = 'streaming_distributions' if streaming else 'distributions' - key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled'] - distribution_list = {list_name: []} - distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False) - for dist in distributions: - temp_distribution = {} - for key_name in key_list: - temp_distribution[key_name] = dist[key_name] - temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])] - temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming) - if not streaming: - temp_distribution['WebACLId'] = dist['WebACLId'] - invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id']) - if invalidation_ids: - temp_distribution['Invalidations'] = invalidation_ids - resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN']) - temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) - distribution_list[list_name].append(temp_distribution) - return distribution_list - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error generating summary of distributions") - except Exception as e: - self.module.fail_json(msg="Error generating summary of distributions - " + str(e), - exception=traceback.format_exc()) - - def get_etag_from_distribution_id(self, distribution_id, streaming): - distribution = {} - if not streaming: - distribution = self.get_distribution(distribution_id) - else: - distribution = self.get_streaming_distribution(distribution_id) - return distribution['ETag'] - - def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): - try: - invalidation_ids = [] - invalidations = self.list_invalidations(distribution_id) - for invalidation in invalidations: - invalidation_ids.append(invalidation['Id']) - return invalidation_ids - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting list of invalidation ids") - - def get_distribution_id_from_domain_name(self, domain_name): - try: - distribution_id = "" - distributions = self.list_distributions(False) - distributions += self.list_streaming_distributions(False) - for dist in distributions: - if 'Items' in dist['Aliases']: - for alias in dist['Aliases']['Items']: - if str(alias).lower() == domain_name.lower(): - distribution_id = dist['Id'] - break - return distribution_id - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting distribution id from domain name") - - def get_aliases_from_distribution_id(self, distribution_id): - aliases = [] - try: - distributions = self.list_distributions(False) - for dist in distributions: - if dist['Id'] == distribution_id and 'Items' in dist['Aliases']: - for alias in dist['Aliases']['Items']: - aliases.append(alias) - break - return aliases - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") - - def keyed_list_helper(self, list_to_key): - keyed_list = dict() - for item in list_to_key: - distribution_id = item['Id'] - if 'Items' in item['Aliases']: - aliases = item['Aliases']['Items'] - for alias in aliases: - keyed_list.update({alias: item}) - keyed_list.update({distribution_id: item}) - return keyed_list +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): - facts[distribution_id].update(details) + facts[distribution_id] = details # also have a fixed key for accessing results/details returned facts['result'] = details facts['result']['DistributionId'] = distribution_id for alias in aliases: - facts[alias].update(details) + facts[alias] = details return facts @@ -530,7 +284,7 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - service_mgr = CloudFrontServiceManager(module) + service_mgr = CloudFrontFactsServiceManager(module) distribution_id = module.params.get('distribution_id') invalidation_id = module.params.get('invalidation_id') @@ -582,55 +336,47 @@ def main(): module.fail_json(msg='Error unable to source a distribution id from domain_name_alias') # set appropriate cloudfront id - if distribution_id and not list_invalidations: - facts = {distribution_id: {}} - aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) - for alias in aliases: - facts.update({alias: {}}) - if invalidation_id: - facts.update({invalidation_id: {}}) - elif distribution_id and list_invalidations: - facts = {distribution_id: {}} - aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) - for alias in aliases: - facts.update({alias: {}}) - elif origin_access_identity_id: - facts = {origin_access_identity_id: {}} - elif web_acl_id: - facts = {web_acl_id: {}} + if invalidation_id is not None and invalidation: + facts.update({invalidation_id: {}}) + if origin_access_identity_id and (origin_access_identity or origin_access_identity_config): + facts.update({origin_access_identity_id: {}}) + if web_acl_id: + facts.update({web_acl_id: {}}) # get details based on options if distribution: - facts_to_set = service_mgr.get_distribution(distribution_id) + facts_to_set = service_mgr.get_distribution(id=distribution_id) if distribution_config: - facts_to_set = service_mgr.get_distribution_config(distribution_id) + facts_to_set = service_mgr.get_distribution_config(id=distribution_id) if origin_access_identity: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id)) + facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(id=origin_access_identity_id)) if origin_access_identity_config: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id)) + facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(id=origin_access_identity_id)) if invalidation: - facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id) + facts_to_set = service_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation_id) facts[invalidation_id].update(facts_to_set) if streaming_distribution: - facts_to_set = service_mgr.get_streaming_distribution(distribution_id) + facts_to_set = service_mgr.get_streaming_distribution(id=distribution_id) if streaming_distribution_config: - facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id) + facts_to_set = service_mgr.get_streaming_distribution_config(id=distribution_id) if list_invalidations: - facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)} + invalidations = service_mgr.list_invalidations(distribution_id=distribution_id) or {} + facts_to_set = {'invalidations': invalidations} if 'facts_to_set' in vars(): + aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) # get list based on options if all_lists or list_origin_access_identities: - facts['origin_access_identities'] = service_mgr.list_origin_access_identities() + facts['origin_access_identities'] = service_mgr.list_origin_access_identities() or {} if all_lists or list_distributions: - facts['distributions'] = service_mgr.list_distributions() + facts['distributions'] = service_mgr.list_distributions() or {} if all_lists or list_streaming_distributions: - facts['streaming_distributions'] = service_mgr.list_streaming_distributions() + facts['streaming_distributions'] = service_mgr.list_streaming_distributions() or {} if list_distributions_by_web_acl_id: - facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id) + facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} if list_invalidations: - facts['invalidations'] = service_mgr.list_invalidations(distribution_id) + facts['invalidations'] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} # default summary option if summary: diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index b99a56c530e..dbf478e4408 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -152,9 +152,10 @@ class CloudFrontInvalidationServiceManager(object): Handles CloudFront service calls to AWS for invalidations """ - def __init__(self, module): + def __init__(self, module, cloudfront_facts_mgr): self.module = module self.client = module.client('cloudfront') + self.__cloudfront_facts_mgr = cloudfront_facts_mgr def create_invalidation(self, distribution_id, invalidation_batch): current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference']) @@ -174,28 +175,16 @@ def create_invalidation(self, distribution_id, invalidation_batch): self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") def get_invalidation(self, distribution_id, caller_reference): - current_invalidation = {} # find all invalidations for the distribution - try: - paginator = self.client.get_paginator('list_invalidations') - invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', []) - invalidation_ids = [inv['Id'] for inv in invalidations] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.") + invalidations = self.__cloudfront_facts_mgr.list_invalidations(distribution_id=distribution_id) # check if there is an invalidation with the same caller reference - for inv_id in invalidation_ids: - try: - invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation'] - caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id)) - if caller_ref == caller_reference: - current_invalidation = invalidation - break - - current_invalidation.pop('ResponseMetadata', None) - return current_invalidation + for invalidation in invalidations: + invalidation_info = self.__cloudfront_facts_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation['Id']) + if invalidation_info.get('InvalidationBatch', {}).get('CallerReference') == caller_reference: + invalidation_info.pop('ResponseMetadata', None) + return invalidation_info + return {} class CloudFrontInvalidationValidationManager(object): @@ -203,9 +192,9 @@ class CloudFrontInvalidationValidationManager(object): Manages CloudFront validations for invalidation batches """ - def __init__(self, module): + def __init__(self, module, cloudfront_facts_mgr): self.module = module - self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + self.__cloudfront_facts_mgr = cloudfront_facts_mgr def validate_distribution_id(self, distribution_id, alias): try: @@ -248,8 +237,9 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) - validation_mgr = CloudFrontInvalidationValidationManager(module) - service_mgr = CloudFrontInvalidationServiceManager(module) + cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) + validation_mgr = CloudFrontInvalidationValidationManager(module, cloudfront_facts_mgr) + service_mgr = CloudFrontInvalidationServiceManager(module, cloudfront_facts_mgr) caller_reference = module.params.get('caller_reference') distribution_id = module.params.get('distribution_id') diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 2d9009a9b9b..e59c9439701 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -127,6 +127,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code class CloudFrontOriginAccessIdentityServiceManager(object): @@ -151,9 +152,10 @@ def create_origin_access_identity(self, caller_reference, comment): def delete_origin_access_identity(self, origin_access_identity_id, e_tag): try: - return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag) + result = self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag) + return result, True except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.") + self.module.fail_json_aws(e, msg="Error deleting Origin Access Identity.") def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag): changed = False @@ -194,34 +196,45 @@ def __init__(self, module): self.module = module self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) - def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id): + def describe_origin_access_identity(self, origin_access_identity_id, fail_if_missing=True): try: - if origin_access_identity_id is None: - return - oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id) - if oai is not None: - return oai.get('ETag') - except (ClientError, BotoCoreError) as e: + return self.__cloudfront_facts_mgr.get_origin_access_identity(id=origin_access_identity_id, fail_if_error=False) + except is_boto3_error_code('NoSuchCloudFrontOriginAccessIdentity') as e: # pylint: disable=duplicate-except + if fail_if_missing: + self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") + return {} + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") + def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id, fail_if_missing): + oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing) + if oai is not None: + return oai.get('ETag') + def validate_origin_access_identity_id_from_caller_reference( self, caller_reference): - try: - origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() - origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities] - for origin_access_identity_id in origin_origin_access_identity_ids: - oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id)) - temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference') - if temp_caller_reference == caller_reference: - return origin_access_identity_id - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.") + origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() + origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities] + for origin_access_identity_id in origin_origin_access_identity_ids: + oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id)) + temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference') + if temp_caller_reference == caller_reference: + return origin_access_identity_id def validate_comment(self, comment): if comment is None: return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') return comment + def validate_caller_reference_from_origin_access_identity_id(self, origin_access_identity_id, caller_reference): + if caller_reference is None: + if origin_access_identity_id is None: + return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing=True) + origin_access_config = oai.get('CloudFrontOriginAccessIdentity', {}).get('CloudFrontOriginAccessIdentityConfig', {}) + return origin_access_config.get('CallerReference') + return caller_reference + def main(): argument_spec = dict( @@ -248,18 +261,21 @@ def main(): if origin_access_identity_id is None and caller_reference is not None: origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference) - e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id) - comment = validation_mgr.validate_comment(comment) - if state == 'present': - if origin_access_identity_id is not None and e_tag is not None: + comment = validation_mgr.validate_comment(comment) + caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id(origin_access_identity_id, caller_reference) + if origin_access_identity_id is not None: + e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, True) + # update cloudfront origin access identity result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag) else: + # create cloudfront origin access identity result = service_mgr.create_origin_access_identity(caller_reference, comment) changed = True - elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None: - result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) - changed = True + else: + e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, False) + if e_tag: + result, changed = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) result.pop('ResponseMetadata', None) From 46d282668a24ed33e63ba2d412a7207e0f87aa0e Mon Sep 17 00:00:00 2001 From: David James Date: Thu, 2 Feb 2023 02:11:25 +1100 Subject: [PATCH 618/683] s3_lifecycle - ability to set the number of newest noncurrent versions to retain (#1606) s3_lifecycle - ability to set the number of newest noncurrent versions to retain SUMMARY Adds the ability to set "Number of newer versions to retain" ISSUE TYPE Feature Pull Request COMPONENT NAME s3_lifecycle ADDITIONAL INFORMATION See: https://docs.aws.amazon.com/AmazonS3/latest/API/API_NoncurrentVersionExpiration.html Previously only the NoncurrentDays parameter was supported, this PR adds support for NewerNoncurrentVersions Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell --- s3_lifecycle.py | 51 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index e0f8caa91c5..1bad5dbecf0 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -63,8 +63,17 @@ noncurrent_version_expiration_days: description: - The number of days after which non-current versions should be deleted. + - Must be set if I(noncurrent_version_keep_newer) is set. required: false type: int + noncurrent_version_keep_newer: + description: + - The minimum number of non-current versions to retain. + - Requires C(botocore >= 1.23.12) + - Requres I(noncurrent_version_expiration_days). + required: false + type: int + version_added: 5.3.0 noncurrent_version_storage_class: description: - The storage class to which non-current versions are transitioned. @@ -269,6 +278,7 @@ def build_rule(client, module): noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days") noncurrent_version_transitions = module.params.get("noncurrent_version_transitions") noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class") + noncurrent_version_keep_newer = module.params.get("noncurrent_version_keep_newer") prefix = module.params.get("prefix") or "" rule_id = module.params.get("rule_id") status = module.params.get("status") @@ -294,10 +304,12 @@ def build_rule(client, module): rule['Expiration'] = dict(Date=expiration_date.isoformat()) elif expire_object_delete_marker is not None: rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) - + if noncurrent_version_expiration_days or noncurrent_version_keep_newer: + rule['NoncurrentVersionExpiration'] = dict() if noncurrent_version_expiration_days is not None: - rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days) - + rule['NoncurrentVersionExpiration']['NoncurrentDays'] = noncurrent_version_expiration_days + if noncurrent_version_keep_newer is not None: + rule['NoncurrentVersionExpiration']['NewerNoncurrentVersions'] = noncurrent_version_keep_newer if transition_days is not None: rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ] @@ -572,6 +584,7 @@ def main(): expiration_date=dict(), expire_object_delete_marker=dict(type='bool'), noncurrent_version_expiration_days=dict(type='int'), + noncurrent_version_keep_newer=dict(type='int'), noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class), noncurrent_version_transition_days=dict(type='int'), noncurrent_version_transitions=dict(type='list', elements='dict'), @@ -587,16 +600,21 @@ def main(): wait=dict(type='bool', default=False) ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[ - ['expiration_days', 'expiration_date', 'expire_object_delete_marker'], - ['expiration_days', 'transition_date'], - ['transition_days', 'transition_date'], - ['transition_days', 'expiration_date'], - ['transition_days', 'transitions'], - ['transition_date', 'transitions'], - ['noncurrent_version_transition_days', 'noncurrent_version_transitions'], - ],) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ["expiration_days", "expiration_date", "expire_object_delete_marker"], + ["expiration_days", "transition_date"], + ["transition_days", "transition_date"], + ["transition_days", "expiration_date"], + ["transition_days", "transitions"], + ["transition_date", "transitions"], + ["noncurrent_version_transition_days", "noncurrent_version_transitions"], + ], + required_by={ + "noncurrent_version_keep_newer": ["noncurrent_version_expiration_days"], + }, + ) client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) @@ -604,12 +622,19 @@ def main(): transition_date = module.params.get("transition_date") state = module.params.get("state") + if module.params.get("noncurrent_version_keep_newer"): + module.require_botocore_at_least( + "1.23.12", + reason="to set number of versions to keep with noncurrent_version_keep_newer" + ) + if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix required_when_present = ('abort_incomplete_multipart_upload_days', 'expiration_date', 'expiration_days', 'expire_object_delete_marker', 'transition_date', 'transition_days', 'transitions', 'noncurrent_version_expiration_days', + 'noncurrent_version_keep_newer', 'noncurrent_version_transition_days', 'noncurrent_version_transitions') for param in required_when_present: From 13cde6a82f4c2cf5d089d2e7730e14d080d4f94e Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 1 Feb 2023 16:50:22 +0100 Subject: [PATCH 619/683] sns_topic - Add tags and purge_tags options (#972) sns_topic - Add tags and purge_tags options SUMMARY sns_topic - Add tags and purge_tags options Closes #964 ISSUE TYPE Feature Pull Request COMPONENT NAME sns_topic Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- sns_topic.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/sns_topic.py b/sns_topic.py index 166fb68a66f..ac7a351f6f0 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -12,8 +12,7 @@ short_description: Manages AWS SNS topics and subscriptions version_added: 1.0.0 description: - - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. - - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account. + - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. author: - "Joel Thompson (@joelthompson)" - "Fernando Jose Pando (@nand0p)" @@ -149,10 +148,13 @@ Blame Amazon." default: true type: bool +notes: + - Support for I(tags) and I(purge_tags) was added in release 5.3.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 ''' EXAMPLES = r""" @@ -328,12 +330,14 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.community.aws.plugins.module_utils.sns import list_topics from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint from ansible_collections.community.aws.plugins.module_utils.sns import get_info +from ansible_collections.community.aws.plugins.module_utils.sns import update_tags class SnsTopicManager(object): @@ -349,6 +353,8 @@ def __init__(self, delivery_policy, subscriptions, purge_subscriptions, + tags, + purge_tags, check_mode): self.connection = module.client('sns') @@ -371,6 +377,8 @@ def __init__(self, self.topic_deleted = False self.topic_arn = None self.attributes_set = [] + self.tags = tags + self.purge_tags = purge_tags def _create_topic(self): attributes = {} @@ -383,6 +391,9 @@ def _create_topic(self): if not self.name.endswith('.fifo'): self.name = self.name + '.fifo' + if self.tags: + tags = ansible_dict_to_boto3_tag_list(self.tags) + if not self.check_mode: try: response = self.connection.create_topic(Name=self.name, @@ -542,12 +553,13 @@ def ensure_ok(self): elif self.display_name or self.policy or self.delivery_policy: self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") changed |= self._set_topic_subs() - self._init_desired_subscription_attributes() if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_subs_attributes() elif any(self.desired_subscription_attributes.values()): self.module.fail_json(msg="Cannot set subscription attributes for SNS topics not owned by this account") + # Check tagging + changed |= update_tags(self.connection, self.module, self.topic_arn) return changed @@ -600,6 +612,8 @@ def main(): delivery_policy=dict(type='dict', options=delivery_args), subscriptions=dict(default=[], type='list', elements='dict'), purge_subscriptions=dict(type='bool', default=True), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -614,6 +628,8 @@ def main(): subscriptions = module.params.get('subscriptions') purge_subscriptions = module.params.get('purge_subscriptions') check_mode = module.check_mode + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') sns_topic = SnsTopicManager(module, name, @@ -624,6 +640,8 @@ def main(): delivery_policy, subscriptions, purge_subscriptions, + tags, + purge_tags, check_mode) if state == 'present': From fb79c9a67094719d5a5556b8c7cfae75e4d827ae Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 1 Feb 2023 18:28:14 +0100 Subject: [PATCH 620/683] Update docs (#1426) Update docs SUMMARY Update ecs_taskdefinition documentation with firelensConfiguration option (see also #1425) ISSUE TYPE Docs Pull Request COMPONENT NAME ecs_taskdefinition ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- ecs_taskdefinition.py | 26 ++++++++++++++++++ ecs_taskdefinition_info.py | 54 +++++++++++++++++++++++++++++++++++++- 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 4488b5ac924..3eb7716f503 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -536,6 +536,32 @@ description: The type of resource to assign to a container. type: str choices: ['GPU', 'InferenceAccelerator'] + firelensConfiguration: + description: + - The FireLens configuration for the container. + - This is used to specify and configure a log router for container logs. + required: False + type: dict + suboptions: + type: + description: + - The log router to use. The valid values are C(fluentd) or C(fluentbit). + required: False + type: str + choices: + - fluentd + - fluentbit + options: + description: + - The options to use when configuring the log router. + - This field is optional and can be used to specify a custom configuration + file or to add additional metadata, such as the task, task definition, cluster, + and container instance details to the log event. + - If specified, the syntax to use is + C({"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"}). + - For more information, see U(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html#firelens-taskdef). + required: False + type: dict network_mode: description: - The Docker networking mode to use for the containers in the task. diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index 6f5c145aaa8..d57214cf419 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -219,7 +219,59 @@ description: The configuration options to send to the log driver. returned: when present type: str - + healthCheck: + description: The container health check command and associated configuration parameters for the container. + returned: when present + type: dict + contains: + command: + description: A string array representing the command that the container runs to determine if it is healthy. + type: list + interval: + description: The time period in seconds between each health check execution. + type: int + timeout: + description: The time period in seconds to wait for a health check to succeed before it is considered a failure. + type: int + retries: + description: The number of times to retry a failed health check before the container is considered unhealthy. + type: int + startPeriod: + description: The optional grace period to provide containers time to bootstrap before failed. + type: int + resourceRequirements: + description: The type and amount of a resource to assign to a container. + returned: when present + type: dict + contains: + value: + description: The value for the specified resource type. + type: str + type: + description: The type of resource to assign to a container. + type: str + systemControls: + description: A list of namespaced kernel parameters to set in the container. + returned: when present + type: dict + contains: + namespace: + description: TThe namespaced kernel. + type: str + value: + description: The value for the namespaced kernel. + type: str + firelensConfiguration: + description: The FireLens configuration for the container. + returned: when present + type: dict + contains: + type: + description: The log router. + type: str + options: + description: The options to use when configuring the log router. + type: dict family: description: The family of your task definition, used as the definition name returned: always From bc839afa61f8c26a93a0287bbe35929dc39a2fd8 Mon Sep 17 00:00:00 2001 From: "Michael Haskell (mikehas)" Date: Thu, 2 Feb 2023 01:29:58 -0800 Subject: [PATCH 621/683] ssm_parameter: add support for tags (#1573) (#1575) ssm_parameter: add support for tags (#1573) SUMMARY Adding support for tags following community guidelines and other practices from other modules. secretsmanager_secret was used along with helper functions from ec2 code. Addresses open issue for feature request #1573. ISSUE TYPE Feature Pull Request COMPONENT NAME ssm_parameter ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Michael Haskell (mikehas) Reviewed-by: Dennis Qian Reviewed-by: Markus Bergholz Reviewed-by: Mark Chappell --- ssm_parameter.py | 120 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 117 insertions(+), 3 deletions(-) diff --git a/ssm_parameter.py b/ssm_parameter.py index 8647d9886be..d654d45ecf5 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -90,6 +90,11 @@ - amazon.aws.aws - amazon.aws.ec2 - amazon.aws.boto3 + - amazon.aws.tags + +notes: + - Support for I(tags) and I(purge_tags) was added in release 5.3.0. + ''' EXAMPLES = ''' @@ -137,6 +142,29 @@ - name: recommend to use with aws_ssm lookup plugin ansible.builtin.debug: msg: "{{ lookup('amazon.aws.aws_ssm', 'Hello') }}" + +- name: Create or update key/value pair in AWS SSM parameter store w/ tags + community.aws.ssm_parameter: + name: "Hello" + description: "This is your first key" + value: "World" + tags: + Environment: "dev" + Version: "1.0" + Confidentiality: "low" + Tag With Space: "foo bar" + +- name: Add or update a tag on an existing parameter w/o removing existing tags + community.aws.ssm_parameter: + name: "Hello" + purge_tags: false + tags: + Contact: "person1" + +- name: Delete all tags on an existing parameter + community.aws.ssm_parameter: + name: "Hello" + tags: {} ''' RETURN = ''' @@ -208,12 +236,19 @@ description: Parameter version number example: 3 returned: success + tags: + description: A dictionary representing the tags associated with the parameter. + type: dict + returned: when the parameter has tags + example: {'MyTagName': 'Some Value'} + version_added: 5.3.0 ''' import time try: import botocore + from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -223,6 +258,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags class ParameterWaiterFactory(BaseWaiterFactory): @@ -301,6 +339,58 @@ def _wait_deleted(client, module, name): module.fail_json_aws(e, msg="Failed to describe parameter while waiting for deletion") +def tag_parameter(client, module, parameter_name, tags): + try: + return client.add_tags_to_resource(aws_retry=True, ResourceType='Parameter', + ResourceId=parameter_name, Tags=tags) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to add tag(s) to parameter") + + +def untag_parameter(client, module, parameter_name, tag_keys): + try: + return client.remove_tags_from_resource(aws_retry=True, ResourceType='Parameter', + ResourceId=parameter_name, TagKeys=tag_keys) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter") + + +def get_parameter_tags(client, module, parameter_name): + try: + tags = client.list_tags_for_resource(aws_retry=True, ResourceType='Parameter', + ResourceId=parameter_name)['TagList'] + tags_dict = boto3_tag_list_to_ansible_dict(tags) + return tags_dict + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to retrieve parameter tags") + + +def update_parameter_tags(client, module, parameter_name, supplied_tags): + changed = False + response = {} + + if supplied_tags is None: + return False, response + + current_tags = get_parameter_tags(client, module, parameter_name) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, + module.params.get('purge_tags')) + + if tags_to_add: + if module.check_mode: + return True, response + response = tag_parameter(client, module, parameter_name, + ansible_dict_to_boto3_tag_list(tags_to_add)) + changed = True + if tags_to_remove: + if module.check_mode: + return True, response + response = untag_parameter(client, module, parameter_name, tags_to_remove) + changed = True + + return changed, response + + def update_parameter(client, module, **args): changed = False response = {} @@ -310,8 +400,8 @@ def update_parameter(client, module, **args): try: response = client.put_parameter(aws_retry=True, **args) changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="setting parameter") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as exc: + module.fail_json_aws(exc, msg="setting parameter") return changed, response @@ -324,6 +414,9 @@ def describe_parameter(client, module, **args): if not existing_parameter['Parameters']: return None + tags_dict = get_parameter_tags(client, module, module.params.get('name')) + existing_parameter['Parameters'][0]['tags'] = tags_dict + return existing_parameter['Parameters'][0] @@ -387,7 +480,25 @@ def create_update_parameter(client, module): (changed, response) = update_parameter(client, module, **args) if changed: _wait_updated(client, module, module.params.get('name'), original_version) + + # Handle tag updates for existing parameters + if module.params.get('overwrite_value') != 'never': + tags_changed, tags_response = update_parameter_tags( + client, module, existing_parameter['Parameter']['Name'], + module.params.get('tags')) + + changed = changed or tags_changed + + if tags_response: + response['tag_updates'] = tags_response + else: + # Add tags in initial creation request + if module.params.get('tags'): + args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get('tags'))) + # Overwrite=True conflicts with tags and is not needed for new param + args.update(Overwrite=False) + (changed, response) = update_parameter(client, module, **args) _wait_exists(client, module, module.params.get('name')) @@ -444,6 +555,8 @@ def setup_module_object(): key_id=dict(default="alias/aws/ssm"), overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), ) return AnsibleAWSModule( @@ -474,7 +587,8 @@ def main(): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="to describe parameter") if parameter_metadata: - result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata) + result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata, + ignore_list=['tags']) module.exit_json(changed=changed, **result) From 2bd14fbbfc28e3dc170f06be522f1bb02e6aa72e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 2 Feb 2023 15:59:36 +0100 Subject: [PATCH 622/683] iam_role - add assume_role_policy_document_raw (#1692) iam_role - add assume_role_policy_document_raw SUMMARY fixes: #551 assume_role_policy_document is an IAM policy document, and as such we shouldn't be modifying it. Running camel / snake conversion against the document breaks it. Adds assume_role_policy_document_raw and deprecates the current snake_case behaviour. ISSUE TYPE Feature Pull Request COMPONENT NAME iam_role iam_role_info ADDITIONAL INFORMATION Follows up on #1054 / #1068 Reviewed-by: Alina Buzachis --- iam_role.py | 38 ++++++++++++++++++++++++++++++++++++-- iam_role_info.py | 36 +++++++++++++++++++++++++++++++++--- 2 files changed, 69 insertions(+), 5 deletions(-) diff --git a/iam_role.py b/iam_role.py index a1aea8a5848..255b4cb7964 100644 --- a/iam_role.py +++ b/iam_role.py @@ -160,8 +160,12 @@ returned: always sample: "2016-08-14T04:36:28+00:00" assume_role_policy_document: - description: the policy that grants an entity permission to assume the role - type: str + description: + - the policy that grants an entity permission to assume the role + - | + note: the case of keys in this dictionary are currently converted from CamelCase to + snake_case. In a release after 2023-12-01 this behaviour will change + type: dict returned: always sample: { 'statement': [ @@ -176,6 +180,25 @@ ], 'version': '2012-10-17' } + assume_role_policy_document_raw: + description: the policy that grants an entity permission to assume the role + type: dict + returned: always + version_added: 5.3.0 + sample: { + 'Statement': [ + { + 'Action': 'sts:AssumeRole', + 'Effect': 'Allow', + 'Principal': { + 'Service': 'ec2.amazonaws.com' + }, + 'Sid': '' + } + ], + 'Version': '2012-10-17' + } + attached_policies: description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role type: list @@ -498,6 +521,7 @@ def create_or_update_role(module, client): role['tags'] = get_role_tags(module, client) camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {}) module.exit_json(changed=changed, iam_role=camel_role, **camel_role) @@ -674,6 +698,16 @@ def main(): required_if=[('state', 'present', ['assume_role_policy_document'])], supports_check_mode=True) + module.deprecate("All return values other than iam_role and changed have been deprecated and " + "will be removed in a release after 2023-12-01.", + date="2023-12-01", collection_name="community.aws") + + module.deprecate("In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + "iam_role.assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", collection_name="community.aws") + if module.params.get('boundary'): if module.params.get('create_instance_profile'): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") diff --git a/iam_role_info.py b/iam_role_info.py index 3d6e6bdc597..23da3e04097 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -61,9 +61,18 @@ type: str sample: arn:aws:iam::123456789012:role/AnsibleTestRole assume_role_policy_document: - description: Policy Document describing what can assume the role. + description: + - The policy that grants an entity permission to assume the role + - | + Note: the case of keys in this dictionary are currently converted from CamelCase to + snake_case. In a release after 2023-12-01 this behaviour will change. returned: always - type: str + type: dict + assume_role_policy_document_raw: + description: The policy document describing what can assume the role. + returned: always + type: dict + version_added: 5.3.0 create_date: description: Date IAM role was created. returned: always @@ -227,7 +236,22 @@ def describe_iam_roles(module, client): roles = list_iam_roles_with_backoff(client, **params)['Roles'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list IAM roles") - return [camel_dict_to_snake_dict(describe_iam_role(module, client, role), ignore_list=['tags']) for role in roles] + return [normalize_role(describe_iam_role(module, client, role)) for role in roles] + + +def normalize_profile(profile): + new_profile = camel_dict_to_snake_dict(profile) + if profile.get("Roles"): + profile["roles"] = [normalize_role(role) for role in profile.get("Roles")] + return new_profile + + +def normalize_role(role): + new_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") + if role.get("InstanceProfiles"): + role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] + return new_role def main(): @@ -245,6 +269,12 @@ def main(): client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + module.deprecate("In a release after 2023-12-01 the contents of assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + ".assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", collection_name="community.aws") + module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) From e45349e6cc23c65b332f43ec55fc1ba2ed3a819b Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 2 Feb 2023 16:02:50 +0100 Subject: [PATCH 623/683] SNS: content-based deduplication (#1693) SNS: content-based deduplication SUMMARY Original Author: redradrat Original PR: #1602 Adds the missing option of "content-based deduplication" for fifo topics. Also fixes looking up fifo topic ARNs, which resulted in changing always be True. ISSUE TYPE Feature Pull Request COMPONENT NAME sns_topic ADDITIONAL INFORMATION #1602 is missing tests and has merge conflicts. Fixes the conflicts and adds integration tests Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- sns_topic.py | 54 +++++++++++++++++++++++++++++++++++++++-------- sns_topic_info.py | 5 +++++ 2 files changed, 50 insertions(+), 9 deletions(-) diff --git a/sns_topic.py b/sns_topic.py index ac7a351f6f0..7bf643cb96e 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -148,6 +148,14 @@ Blame Amazon." default: true type: bool + content_based_deduplication: + description: + - Whether to enable content-based deduplication for this topic. + - Ignored unless I(topic_type=fifo). + - Defaults to C(disabled). + choices: ["disabled", "enabled"] + type: str + version_added: 5.3.0 notes: - Support for I(tags) and I(purge_tags) was added in release 5.3.0. extends_documentation_fragment: @@ -229,6 +237,12 @@ returned: always type: bool sample: false + content_based_deduplication: + description: Whether or not content_based_deduplication was set + returned: always + type: str + sample: disabled + version_added: 5.3.0 delivery_policy: description: Delivery policy for the SNS topic returned: when topic is owned by this AWS account @@ -355,6 +369,7 @@ def __init__(self, purge_subscriptions, tags, purge_tags, + content_based_deduplication, check_mode): self.connection = module.client('sns') @@ -372,6 +387,7 @@ def __init__(self, self.subscriptions_attributes_set = [] self.desired_subscription_attributes = dict() self.purge_subscriptions = purge_subscriptions + self.content_based_deduplication = content_based_deduplication self.check_mode = check_mode self.topic_created = False self.topic_deleted = False @@ -431,6 +447,20 @@ def _set_topic_attrs(self): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic policy") + # Set content-based deduplication attribute. Ignore if topic_type is not fifo. + if ("FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true") and \ + self.content_based_deduplication: + enabled = "true" if self.content_based_deduplication in 'enabled' else "false" + if enabled != topic_attributes['ContentBasedDeduplication']: + changed = True + self.attributes_set.append('content_based_deduplication') + if not self.check_mode: + try: + self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='ContentBasedDeduplication', + AttributeValue=enabled) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication") + if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): changed = True @@ -542,10 +572,7 @@ def _name_is_arn(self): def ensure_ok(self): changed = False - if self._name_is_arn(): - self.topic_arn = self.name - else: - self.topic_arn = topic_arn_lookup(self.connection, self.module, self.name) + self.populate_topic_arn() if not self.topic_arn: changed = self._create_topic() if self.topic_arn in list_topics(self.connection, self.module): @@ -565,10 +592,7 @@ def ensure_ok(self): def ensure_gone(self): changed = False - if self._name_is_arn(): - self.topic_arn = self.name - else: - self.topic_arn = topic_arn_lookup(self.connection, self.module, self.name) + self.populate_topic_arn() if self.topic_arn: if self.topic_arn not in list_topics(self.connection, self.module): self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") @@ -576,6 +600,16 @@ def ensure_gone(self): changed |= self._delete_topic() return changed + def populate_topic_arn(self): + if self._name_is_arn(): + self.topic_arn = self.name + return + + name = self.name + if self.topic_type == 'fifo' and not name.endswith('.fifo'): + name += ".fifo" + self.topic_arn = topic_arn_lookup(self.connection, self.module, name) + def main(): # We're kinda stuck with CamelCase here, it would be nice to switch to @@ -614,6 +648,7 @@ def main(): purge_subscriptions=dict(type='bool', default=True), tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool', default=True), + content_based_deduplication=dict(choices=['enabled', 'disabled']) ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -627,6 +662,7 @@ def main(): delivery_policy = module.params.get('delivery_policy') subscriptions = module.params.get('subscriptions') purge_subscriptions = module.params.get('purge_subscriptions') + content_based_deduplication = module.params.get('content_based_deduplication') check_mode = module.check_mode tags = module.params.get('tags') purge_tags = module.params.get('purge_tags') @@ -642,11 +678,11 @@ def main(): purge_subscriptions, tags, purge_tags, + content_based_deduplication, check_mode) if state == 'present': changed = sns_topic.ensure_ok() - elif state == 'absent': changed = sns_topic.ensure_gone() diff --git a/sns_topic_info.py b/sns_topic_info.py index 0244b2ff74e..6ff85ddd247 100644 --- a/sns_topic_info.py +++ b/sns_topic_info.py @@ -54,6 +54,11 @@ type: complex returned: always contains: + content_based_deduplication: + description: Whether or not content_based_deduplication was set + returned: always + type: str + sample: "true" delivery_policy: description: Delivery policy for the SNS topic. returned: when topic is owned by this AWS account From d19bfc23364aaa57c71d1e3c3dec895b9eede421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ralph=20K=C3=BChnert?= Date: Thu, 2 Feb 2023 18:17:50 +0100 Subject: [PATCH 624/683] Add SQS FIFO high throughput options (#1603) Add SQS FIFO high throughput options SUMMARY adds high throughput options for SQS fifo queues ISSUE TYPE Feature Pull Request COMPONENT NAME sqs_topic ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- sqs_queue.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/sqs_queue.py b/sqs_queue.py index ee06746e7ef..b9cb0fa0f80 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -39,6 +39,22 @@ choices: ['standard', 'fifo'] default: 'standard' type: str + deduplication_scope: + description: + - Deduplication scope for FIFO queues. + - C(messageGroup) is required for high throughput FIFO. + - Defaults to C(queue) on creation. + choices: ['queue', 'messageGroup'] + type: str + version_added: 5.3.0 + fifo_throughput_limit: + description: + - Throughput limit for FIFO queues. + - C(perMessageGroupId) is required for high throughput FIFO. + - Defaults to C(perQueue) on creation. + choices: ['perQueue', 'perMessageGroupId'] + type: str + version_added: 5.3.0 visibility_timeout: description: - The default visibility timeout in seconds. @@ -100,6 +116,16 @@ type: bool returned: always sample: True +fifo_throughput_limit: + description: Which throughput limit strategy is applied. + type: str + returned: always + sample: perQueue +deduplication_scope: + description: The deduplication setting. + type: str + returned: always + sample: messageGroup visibility_timeout: description: The default visibility timeout in seconds. type: int @@ -472,6 +498,8 @@ def main(): redrive_policy=dict(type='dict'), visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']), kms_master_key_id=dict(type='str'), + fifo_throughput_limit=dict(type='str', choices=["perQueue", "perMessageGroupId"]), + deduplication_scope=dict(type='str', choices=['queue', 'messageGroup']), kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), content_based_deduplication=dict(type='bool'), tags=dict(type='dict', aliases=['resource_tags']), From db520d158e329fc6fb2417529f04bd090bc9bfe3 Mon Sep 17 00:00:00 2001 From: Amir <107848552+mohgho@users.noreply.github.com> Date: Fri, 3 Feb 2023 13:17:59 +0100 Subject: [PATCH 625/683] eks_cluster: adding tags to eks cluster (#1591) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit eks_cluster: adding tags to eks cluster SUMMARY As described by issue #1513 All the other community aws modules that I use support applying tags to the created resources (where such a feature is supported by aws). Using aws_eks_cluster it is not possible to add tags, although this appears to be a supported feature when viewing a cluster in the aws console. ISSUE TYPE Feature Pull Request COMPONENT NAME aws_eks_cluster ADDITIONAL INFORMATION Reviewed-by: Gonéri Le Bouder Reviewed-by: Amir Reviewed-by: Mark Chappell --- eks_cluster.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/eks_cluster.py b/eks_cluster.py index 425ff9db264..73467d7322b 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -45,6 +45,11 @@ - present default: present type: str + tags: + description: + - A dictionary of tags to add the EKS cluster. + type: dict + version_added: 5.3.0 wait: description: >- Specifies whether the module waits until the cluster is active or deleted @@ -212,6 +217,8 @@ def ensure_present(client, module): ) if module.params['version']: params['version'] = module.params['version'] + if module.params['tags']: + params['tags'] = module.params['tags'] cluster = client.create_cluster(**params)['cluster'] except botocore.exceptions.EndpointConnectionError as e: module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) @@ -276,6 +283,7 @@ def main(): subnets=dict(type='list', elements='str'), security_groups=dict(type='list', elements='str'), state=dict(choices=['absent', 'present'], default='present'), + tags=dict(type='dict', required=False), wait=dict(default=False, type='bool'), wait_timeout=dict(default=1200, type='int') ) From 11d85faa55a90a38260c98ca70ae890946df57b0 Mon Sep 17 00:00:00 2001 From: Ivan Chekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Fri, 3 Feb 2023 08:36:28 -0500 Subject: [PATCH 626/683] sns_topic - Fix Permission Issue for Cross Account Subscriptions (#1418) sns_topic - Fix Permission Issue for Cross Account Subscriptions SUMMARY sns_topic currently fails with the following error if it has any cross account subscriptions: Couldn't get subscription attributes for subscription arn:aws:sns:us-east-1:123412341234:my-sns-topic-name:555950dc-7c5f-416c-8f8e-e8f38eabfa54: An error occurred (AuthorizationError) when calling the GetSubscriptionAttributes operation: Not authorized to access this subscription This happens, for example, when a Lambda function in account A is subscribed to an SNS topic in account B, as described here. I believe this was caused by #640. I am not sure how to write a test for this specific situation as it would require multiple AWS accounts. ISSUE TYPE Bugfix Pull Request COMPONENT NAME sns_topic ADDITIONAL INFORMATION - community.aws.sns_topic: name: my-sns-topic-in-account-123412341234 subscriptions: - endpoint: "arn:aws:lambda:us-east-1:567856785678:function:my-lambda-function-in-account-567856785678" protocol: lambda state: present Reviewed-by: Mark Chappell --- sns_topic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sns_topic.py b/sns_topic.py index 7bf643cb96e..bcaf44a8840 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -519,8 +519,8 @@ def _set_topic_subs_attributes(self): for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): sub_key = (sub['Protocol'], sub['Endpoint']) sub_arn = sub['SubscriptionArn'] - if sub_key not in self.desired_subscription_attributes: - # subscription isn't defined in desired, skipping + if not self.desired_subscription_attributes.get(sub_key): + # subscription attributes aren't defined in desired, skipping continue try: From 4bbd811e64c2bc8cd88bad288d2b00e6d3e1ace6 Mon Sep 17 00:00:00 2001 From: tjarra Date: Fri, 3 Feb 2023 12:25:44 -0300 Subject: [PATCH 627/683] New Module - eks_nodegroup (#1415) SUMMARY Add a new module to manage nodegroups in EKS clusters. ISSUE TYPE New Module Pull Request COMPONENT NAME eks_nodegroup ADDITIONAL INFORMATION --- eks_nodegroup.py | 707 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 707 insertions(+) create mode 100644 eks_nodegroup.py diff --git a/eks_nodegroup.py b/eks_nodegroup.py new file mode 100644 index 00000000000..bdf5817b133 --- /dev/null +++ b/eks_nodegroup.py @@ -0,0 +1,707 @@ +#!/usr/bin/python +# Copyright (c) 2022 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: eks_nodegroup +version_added: 5.1.0 +short_description: Manage EKS Nodegroup module +description: + - Manage EKS Nodegroup. +author: + - Tiago Jarra (@tjarra) +options: + name: + description: Name of EKS Nodegroup. + required: True + type: str + cluster_name: + description: Name of EKS Cluster. + required: True + type: str + node_role: + description: ARN of IAM role used by the EKS cluster Nodegroup. + type: str + subnets: + description: list of subnet IDs for the Kubernetes cluster. + type: list + elements: str + scaling_config: + description: The scaling configuration details for the Auto Scaling group that is created for your node group. + type: dict + default: + min_size: 1 + max_size: 2 + desired_size: 1 + suboptions: + min_size: + description: The minimum number of nodes that the managed node group can scale in to. + type: int + max_size: + description: The maximum number of nodes that the managed node group can scale out to. + type: int + desired_size: + description: The current number of nodes that the managed node group should maintain. + type: int + disk_size: + description: + - Size of disk in nodegroup nodes. + If you specify I(launch_template), then don't specify I(disk_size), or the node group deployment will fail. + type: int + instance_types: + description: + - Specify the instance types for a node group. + If you specify I(launch_template), then don't specify I(instance_types), or the node group deployment will fail. + type: list + elements: str + ami_type: + description: The AMI type for your node group. + type: str + choices: + - AL2_x86_64 + - AL2_x86_64_GPU + - AL2_ARM_64 + - CUSTOM + - BOTTLEROCKET_ARM_64 + - BOTTLEROCKET_x86_64 + remote_access: + description: + - The remote access (SSH) configuration to use with your node group. + If you specify I(launch_template), then don't specify I(remote_access), or the node group deployment will fail. + type: dict + suboptions: + ec2_ssh_key: + description: The Amazon EC2 SSH key that provides access for SSH communication with the nodes in the managed node group. + type: str + source_sg: + description: The security groups that are allowed SSH access (port 22) to the nodes. + type: list + elements: str + update_config: + description: The node group update configuration. + type: dict + default: + max_unavailable: 1 + suboptions: + max_unavailable: + description: The maximum number of nodes unavailable at once during a version update. + type: int + max_unavailable_percentage: + description: The maximum percentage of nodes unavailable during a version update. + type: int + labels: + description: The Kubernetes labels to be applied to the nodes in the node group when they are created. + type: dict + taints: + description: The Kubernetes taints to be applied to the nodes in the node group. + type: list + elements: dict + suboptions: + key: + description: The key of the taint. + type: str + value: + description: The value of the taint. + type: str + effect: + description: The effect of the taint. + type: str + choices: + - NO_SCHEDULE + - NO_EXECUTE + - PREFER_NO_SCHEDULE + launch_template: + description: + - An object representing a node group's launch template specification. + - If specified, then do not specify I(instanceTypes), I(diskSize), or I(remoteAccess). + type: dict + suboptions: + name: + description: The name of the launch template. + type: str + version: + description: + - The version of the launch template to use. + - If no version is specified, then the template's default version is used. + type: str + id: + description: The ID of the launch template. + type: str + capacity_type: + description: The capacity type for your node group. + default: ON_DEMAND + type: str + choices: + - ON_DEMAND + - SPOT + release_version: + description: The AMI version of the Amazon EKS optimized AMI to use with your node group. + type: str + state: + description: Create or delete the Nodegroup. + choices: + - absent + - present + default: present + type: str + tags: + description: A dictionary of resource tags. + type: dict + aliases: ['resource_tags'] + purge_tags: + description: + - Purge existing tags that are not found in the nodegroup. + type: bool + default: true + wait: + description: Specifies whether the module waits until the profile is created or deleted before moving on. + type: bool + default: false + wait_timeout: + description: The duration in seconds to wait for the nodegroup to become active. Defaults to C(1200) seconds. + default: 1200 + type: int +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create nodegroup + community.aws.eks_nodegroup: + name: test_nodegroup + state: present + cluster_name: test_cluster + node_role: arn:aws:eks:us-east-1:1231231123:role/asdf + subnets: + - subnet-qwerty123 + - subnet-asdfg456 + scaling_config: + - min_size: 1 + - max_size: 2 + - desired_size: 1 + disk_size: 20 + instance_types: 't3.micro' + ami_type: 'AL2_x86_64' + labels: + - 'teste': 'test' + taints: + - key: 'test' + value: 'test' + effect: 'NO_SCHEDULE' + capacity_type: 'on_demand' + +- name: Remove an EKS Nodegrop + community.aws.eks_nodegroup: + name: test_nodegroup + cluster_name: test_cluster + wait: yes + state: absent +''' + +RETURN = r''' +nodegroup_name: + description: The name associated with an Amazon EKS managed node group. + returned: when state is present + type: str + sample: test_cluster +nodegroup_arn: + description: The Amazon Resource Name (ARN) associated with the managed node group. + returned: when state is present + type: str + sample: arn:aws:eks:us-east-1:1231231123:safd +cluster_name: + description: Name of EKS Cluster + returned: when state is present + type: str + sample: test_cluster +version: + description: The Kubernetes version of the managed node group. + returned: when state is present + type: str + sample: need_validate +release_version: + description: This is the version of the Amazon EKS optimized AMI that the node group was deployed with. + returned: when state is present + type: str + sample: need_validate +created_at: + description: Nodegroup creation date and time. + returned: when state is present + type: str + sample: '2022-01-18T20:00:00.111000+00:00' +modified_at: + description: Nodegroup modified date and time. + returned: when state is present + type: str + sample: '2022-01-18T20:00:00.111000+00:00' +status: + description: status of the EKS Nodegroup. + returned: when state is present + type: str + sample: + - CREATING + - ACTIVE +capacity_type: + description: The capacity type of your managed node group. + returned: when state is present + type: str + sample: need_validate +scaling_config: + description: The scaling configuration details for the Auto Scaling group that is associated with your node group. + returned: when state is present + type: dict + sample: need_validate +instance_types: + description: This is the instance type that is associated with the node group. + returned: when state is present + type: list + sample: need_validate +subnets: + description: List of subnets used in Fargate Profile. + returned: when state is present + type: list + sample: + - subnet-qwerty123 + - subnet-asdfg456 +remote_access: + description: This is the remote access configuration that is associated with the node group. + returned: when state is present + type: dict + sample: need_validate +ami_type: + description: This is the AMI type that was specified in the node group configuration. + returned: when state is present + type: str + sample: need_validate +node_role: + description: ARN of the IAM Role used by Nodegroup. + returned: when state is present + type: str + sample: arn:aws:eks:us-east-1:1231231123:role/asdf +labels: + description: The Kubernetes labels applied to the nodes in the node group. + returned: when state is present + type: dict + sample: need_validate +taints: + description: The Kubernetes taints to be applied to the nodes in the node group when they are created. + returned: when state is present + type: list + sample: need_validate +resources: + description: The resources associated with the node group. + returned: when state is present + type: complex + contains: + autoScalingGroups: + description: The Auto Scaling groups associated with the node group. + returned: when state is present + type: list + elements: dict + remoteAccessSecurityGroup: + description: The remote access security group associated with the node group. + returned: when state is present + type: str +diskSize: + description: This is the disk size in the node group configuration. + returned: when state is present + type: int + sample: 20 +health: + description: The health status of the node group. + returned: when state is present + type: dict + sample: need_validate +update_config: + description: The node group update configuration. + returned: when state is present + type: dict + contains: + maxUnavailable: + description: The maximum number of nodes unavailable at once during a version update. + type: int + maxUnavailablePercentage: + description: The maximum percentage of nodes unavailable during a version update. + type: int +launch_template: + description: If a launch template was used to create the node group, then this is the launch template that was used. + returned: when state is present + type: dict + sample: need_validate +tags: + description: Nodegroup tags. + returned: when state is present + type: dict + sample: + foo: bar +''' + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +try: + import botocore.exceptions +except ImportError: + pass + + +def validate_tags(client, module, nodegroup): + changed = False + + desired_tags = module.params.get('tags') + if desired_tags is None: + return False + + try: + existing_tags = client.list_tags_for_resource(resourceArn=nodegroup['nodegroupArn'])['tags'] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to list or compare tags for Nodegroup %s.' % module.params.get('name')) + if tags_to_remove: + if not module.check_mode: + changed = True + try: + client.untag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tagKeys=tags_to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + if tags_to_add: + if not module.check_mode: + changed = True + try: + client.tag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tags=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + + return changed + + +def compare_taints(nodegroup_taints, param_taints): + taints_to_unset = [] + taints_to_add_or_update = [] + for taint in nodegroup_taints: + if taint not in param_taints: + taints_to_unset.append(taint) + for taint in param_taints: + if taint not in nodegroup_taints: + taints_to_add_or_update.append(taint) + + return taints_to_add_or_update, taints_to_unset + + +def validate_taints(client, module, nodegroup, param_taints): + changed = False + params = dict() + params['clusterName'] = nodegroup['clusterName'] + params['nodegroupName'] = nodegroup['nodegroupName'] + params['taints'] = [] + if 'taints' not in nodegroup: + nodegroup['taints'] = [] + taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup['taints'], param_taints) + + if taints_to_add_or_update: + params['taints']['addOrUpdateTaints'] = taints_to_add_or_update + if taints_to_unset: + params['taints']['removeTaints'] = taints_to_unset + if params['taints']: + if not module.check_mode: + changed = True + try: + client.update_nodegroup_config(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set taints for Nodegroup %s.' % params['nodegroupName']) + + return changed + + +def compare_labels(nodegroup_labels, param_labels): + labels_to_unset = [] + labels_to_add_or_update = {} + for label in nodegroup_labels.keys(): + if label not in param_labels: + labels_to_unset.append(label) + for key, value in param_labels.items(): + if key not in nodegroup_labels.keys(): + labels_to_add_or_update[key] = value + + return labels_to_add_or_update, labels_to_unset + + +def validate_labels(client, module, nodegroup, param_labels): + changed = False + params = dict() + params['clusterName'] = nodegroup['clusterName'] + params['nodegroupName'] = nodegroup['nodegroupName'] + params['labels'] = {} + labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup['labels'], param_labels) + + if labels_to_add_or_update: + params['labels']['addOrUpdateLabels'] = labels_to_add_or_update + if labels_to_unset: + params['labels']['removeLabels'] = labels_to_unset + if params['labels']: + if not module.check_mode: + changed = True + try: + client.update_nodegroup_config(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set labels for Nodegroup %s.' % params['nodegroupName']) + + return changed + + +def compare_params(module, params, nodegroup): + for param in ['nodeRole', 'subnets', 'diskSize', 'instanceTypes', 'amiTypes', 'remoteAccess', 'capacityType']: + if (param in nodegroup) and (param in params): + if (nodegroup[param] != params[param]): + module.fail_json(msg="Cannot modify parameter %s." % param) + if ('launchTemplate' not in nodegroup) and ('launchTemplate' in params): + module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") + if nodegroup['updateConfig'] != params['updateConfig']: + return True + if nodegroup['scalingConfig'] != params['scalingConfig']: + return True + return False + + +def compare_params_launch_template(module, params, nodegroup): + if 'launchTemplate' not in params: + module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.") + else: + for key in ['name', 'id']: + if (key in params['launchTemplate']) and (params['launchTemplate'][key] != nodegroup['launchTemplate'][key]): + module.fail_json(msg="Cannot modify Launch Template %s." % key) + if ('version' in params['launchTemplate']) and (params['launchTemplate']['version'] != nodegroup['launchTemplate']['version']): + return True + return False + + +def create_or_update_nodegroups(client, module): + + changed = False + params = dict() + params['nodegroupName'] = module.params['name'] + params['clusterName'] = module.params['cluster_name'] + params['nodeRole'] = module.params['node_role'] + params['subnets'] = module.params['subnets'] + params['tags'] = module.params['tags'] or {} + if module.params['ami_type'] is not None: + params['amiType'] = module.params['ami_type'] + if module.params['disk_size'] is not None: + params['diskSize'] = module.params['disk_size'] + if module.params['instance_types'] is not None: + params['instanceTypes'] = module.params['instance_types'] + if module.params['launch_template'] is not None: + params['launchTemplate'] = dict() + if module.params['launch_template']['id'] is not None: + params['launchTemplate']['id'] = module.params['launch_template']['id'] + if module.params['launch_template']['version'] is not None: + params['launchTemplate']['version'] = module.params['launch_template']['version'] + if module.params['launch_template']['name'] is not None: + params['launchTemplate']['name'] = module.params['launch_template']['name'] + if module.params['release_version'] is not None: + params['releaseVersion'] = module.params['release_version'] + if module.params['remote_access'] is not None: + params['remoteAccess'] = module.params['remote_access'] + if module.params['capacity_type'] is not None: + params['capacityType'] = module.params['capacity_type'].upper() + if module.params['labels'] is not None: + params['labels'] = module.params['labels'] + if module.params['taints'] is not None: + params['taints'] = module.params['taints'] + if module.params['update_config'] is not None: + params['updateConfig'] = dict() + if module.params['update_config']['max_unavailable'] is not None: + params['updateConfig']['maxUnavailable'] = module.params['update_config']['max_unavailable'] + if module.params['update_config']['max_unavailable_percentage'] is not None: + params['updateConfig']['maxUnavailablePercentage'] = module.params['update_config']['max_unavailable_percentage'] + if module.params['scaling_config'] is not None: + params['scalingConfig'] = snake_dict_to_camel_dict(module.params['scaling_config']) + + wait = module.params.get('wait') + nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + + if nodegroup: + update_params = dict() + update_params['clusterName'] = params['clusterName'] + update_params['nodegroupName'] = params['nodegroupName'] + + if 'launchTemplate' in nodegroup: + if compare_params_launch_template(module, params, nodegroup): + update_params['launchTemplate'] = params['launchTemplate'] + if not module.check_mode: + try: + client.update_nodegroup_version(**update_params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't update nodegroup.") + changed |= True + + if compare_params(module, params, nodegroup): + try: + if 'launchTemplate' in update_params: + update_params.pop('launchTemplate') + update_params['scalingConfig'] = params['scalingConfig'] + update_params['updateConfig'] = params['updateConfig'] + + if not module.check_mode: + client.update_nodegroup_config(**update_params) + + changed |= True + + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't update nodegroup.") + + changed |= validate_tags(client, module, nodegroup) + + changed |= validate_labels(client, module, nodegroup, params['labels']) + + if 'taints' in nodegroup: + changed |= validate_taints(client, module, nodegroup, params['taints']) + + if wait: + wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) + + nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup)) + + if module.check_mode: + module.exit_json(changed=True) + + try: + nodegroup = client.create_nodegroup(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params['nodegroupName']) + + if wait: + wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) + nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + + module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup)) + + +def delete_nodegroups(client, module): + name = module.params.get('name') + clusterName = module.params['cluster_name'] + existing = get_nodegroup(client, module, name, clusterName) + wait = module.params.get('wait') + if not existing or existing['status'] == 'DELETING': + module.exit_json(changed=False, msg='Nodegroup not exists or in DELETING status.') + if not module.check_mode: + try: + client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name) + + if wait: + wait_until(client, module, 'nodegroup_deleted', name, clusterName) + + module.exit_json(changed=True) + + +def get_nodegroup(client, module, nodegroup_name, cluster_name): + try: + return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)['nodegroup'] + except is_boto3_error_code('ResourceNotFoundException'): + return None + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name) + + +def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): + wait_timeout = module.params.get('wait_timeout') + waiter = get_waiter(client, waiter_name) + attempts = 1 + int(wait_timeout / waiter.config.delay) + try: + waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={'MaxAttempts': attempts}) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg="An error occurred waiting") + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + cluster_name=dict(type='str', required=True), + node_role=dict(), + subnets=dict(type='list', elements='str'), + scaling_config=dict(type='dict', default={'min_size': 1, 'max_size': 2, 'desired_size': 1}, options=dict( + min_size=dict(type='int'), + max_size=dict(type='int'), + desired_size=dict(type='int') + )), + disk_size=dict(type='int'), + instance_types=dict(type='list', elements='str'), + ami_type=dict(choices=['AL2_x86_64', 'AL2_x86_64_GPU', 'AL2_ARM_64', 'CUSTOM', 'BOTTLEROCKET_ARM_64', 'BOTTLEROCKET_x86_64']), + remote_access=dict(type='dict', options=dict( + ec2_ssh_key=dict(no_log=True), + source_sg=dict(type='list', elements='str') + )), + update_config=dict(type='dict', default={'max_unavailable': 1}, options=dict( + max_unavailable=dict(type='int'), + max_unavailable_percentage=dict(type='int') + )), + labels=dict(type='dict', default={}), + taints=dict(type='list', elements='dict', default=[], options=dict( + key=dict(type='str', no_log=False,), + value=dict(type='str'), + effect=dict(type='str', choices=['NO_SCHEDULE', 'NO_EXECUTE', 'PREFER_NO_SCHEDULE']) + )), + launch_template=dict(type='dict', options=dict( + name=dict(type='str'), + version=dict(type='str'), + id=dict(type='str') + )), + capacity_type=dict(choices=['ON_DEMAND', 'SPOT'], default='ON_DEMAND'), + release_version=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + state=dict(choices=['absent', 'present'], default='present'), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=1200, type='int') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['node_role', 'subnets']]], + mutually_exclusive=[ + ('launch_template', 'instance_types'), + ('launch_template', 'disk_size'), + ('launch_template', 'remote_access'), + ('launch_template', 'ami_type') + ], + supports_check_mode=True, + ) + + if module.params['launch_template'] is None: + if module.params['disk_size'] is None: + module.params['disk_size'] = 20 + if module.params['ami_type'] is None: + module.params['ami_type'] = "AL2_x86_64" + if module.params['instance_types'] is None: + module.params['instance_types'] = ["t3.medium"] + else: + if (module.params['launch_template']['id'] is None) and (module.params['launch_template']['name'] is None): + module.exit_json(changed=False, msg='To use launch_template, it is necessary to inform the id or name.') + try: + client = module.client('eks') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't connect to AWS.") + + if module.params.get('state') == 'present': + create_or_update_nodegroups(client, module) + else: + delete_nodegroups(client, module) + + +if __name__ == '__main__': + main() From 2e4733de974b5ffa1491673baa07ee787413a117 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 3 Feb 2023 18:09:53 +0100 Subject: [PATCH 628/683] Bump release_version for eks_nodegroup (#1703) Bump release_version for eks_nodegroup SUMMARY Couldn't bump the version in #1415 - bumping it here ISSUE TYPE Docs Pull Request COMPONENT NAME eks_nodegroup ADDITIONAL INFORMATION --- eks_nodegroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eks_nodegroup.py b/eks_nodegroup.py index bdf5817b133..0ff11b4d407 100644 --- a/eks_nodegroup.py +++ b/eks_nodegroup.py @@ -9,7 +9,7 @@ DOCUMENTATION = r''' --- module: eks_nodegroup -version_added: 5.1.0 +version_added: 5.3.0 short_description: Manage EKS Nodegroup module description: - Manage EKS Nodegroup. From 0501f63c9356446a1f94f708d8fbc8f52f8d963f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 3 Feb 2023 21:23:06 +0100 Subject: [PATCH 629/683] eks_nodegroup - Update documented defaults (#1705) eks_nodegroup - Update documented defaults SUMMARY Defaults are missing for labels and taints in the docs. ISSUE TYPE Bugfix Pull Request COMPONENT NAME eks_nodegroup ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- eks_nodegroup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/eks_nodegroup.py b/eks_nodegroup.py index 0ff11b4d407..5ba33128a66 100644 --- a/eks_nodegroup.py +++ b/eks_nodegroup.py @@ -97,10 +97,12 @@ labels: description: The Kubernetes labels to be applied to the nodes in the node group when they are created. type: dict + default: {} taints: description: The Kubernetes taints to be applied to the nodes in the node group. type: list elements: dict + default: [] suboptions: key: description: The key of the taint. From 9f7c9b608744716b7162126b6045afe966611d94 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sat, 4 Feb 2023 18:32:48 +0100 Subject: [PATCH 630/683] Various lint fixups (#1707) Various lint fixups SUMMARY minor linting fixups ISSUE TYPE Feature Pull Request COMPONENT NAME aws_ssm batch_job_definition inspector_target ADDITIONAL INFORMATION See Also: ansible-collections/news-for-maintainers#34 Reviewed-by: Felix Fontein --- batch_job_definition.py | 2 +- inspector_target.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/batch_job_definition.py b/batch_job_definition.py index 7e4ea05f5b4..021d833b464 100644 --- a/batch_job_definition.py +++ b/batch_job_definition.py @@ -308,7 +308,7 @@ def create_job_definition(module, batch_client): def get_retry_strategy_params(): - return 'attempts', + return ('attempts',) def get_container_property_params(): diff --git a/inspector_target.py b/inspector_target.py index 9ebdf764002..4bfe5b502d6 100644 --- a/inspector_target.py +++ b/inspector_target.py @@ -194,7 +194,7 @@ def main(): ) updated_target.update({'tags': ansible_dict_tags}) - module.exit_json(changed=True, **updated_target), + module.exit_json(changed=True, **updated_target) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, From 4f7a79ee515384c38d7ae2f26df429a8ffb405a3 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Wed, 8 Feb 2023 07:34:38 +0100 Subject: [PATCH 631/683] fix (#1711) iam_access_key - fix example docs SUMMARY Closes #1710 ISSUE TYPE Docs Pull Request COMPONENT NAME iam_access_key Reviewed-by: Mark Chappell --- iam_access_key.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/iam_access_key.py b/iam_access_key.py index ab3e9110604..32220a216e3 100644 --- a/iam_access_key.py +++ b/iam_access_key.py @@ -69,8 +69,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent ''' From 583574a9a2a02d1888ec0558a955f8fa75aaad2a Mon Sep 17 00:00:00 2001 From: Brad Solomon <81818815+brsolomon-deloitte@users.noreply.github.com> Date: Wed, 8 Feb 2023 08:13:48 -0500 Subject: [PATCH 632/683] secretsmanager_secret: add 'overwrite' parameter (#1628) secretsmanager_secret: add 'overwrite' parameter SUMMARY Adds an 'overwrite' parameter to secretsmanager_secret - If set to True, an existing secret with the same name will be overwritten. - If set to False, a secret with the given name will only be created if none exists. Fixes #1626 ISSUE TYPE Feature Pull Request COMPONENT NAME secretsmanager_secret ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- secretsmanager_secret.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index 337b28669bc..870ed89059e 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -29,6 +29,14 @@ default: 'present' choices: ['present', 'absent'] type: str + overwrite: + description: + - Whether to overwrite an existing secret with the same name. + - If set to C(True), an existing secret with the same I(name) will be overwritten. + - If set to C(False), a secret with the given I(name) will only be created if none exists. + type: bool + default: True + version_added: 5.3.0 recovery_window: description: - Only used if state is absent. @@ -130,6 +138,14 @@ state: absent secret_type: 'string' secret: "{{ super_secret_string }}" + +- name: Only create a new secret, but do not update if alredy exists by name + community.aws.secretsmanager_secret: + name: 'random_string' + state: present + secret_type: 'string' + secret: "{{ lookup('community.general.random_string', length=16, special=false) }}" + overwrite: false ''' RETURN = r''' @@ -524,6 +540,7 @@ def main(): argument_spec={ 'name': dict(required=True), 'state': dict(choices=['present', 'absent'], default='present'), + 'overwrite': dict(type='bool', default=True), 'description': dict(default=""), 'replica': dict(type='list', elements='dict', options=replica_args), 'kms_key_id': dict(), @@ -580,12 +597,15 @@ def main(): result = secrets_mgr.put_resource_policy(secret) changed = True else: + # current_secret exists; decide what to do with it if current_secret.get("DeletedDate"): secrets_mgr.restore_secret(secret.name) changed = True if not secrets_mgr.secrets_match(secret, current_secret): - result = secrets_mgr.update_secret(secret) - changed = True + overwrite = module.params.get('overwrite') + if overwrite: + result = secrets_mgr.update_secret(secret) + changed = True if not rotation_match(secret, current_secret): result = secrets_mgr.update_rotation(secret) changed = True From c36c2fb9c19ccb9b7838fc8c568c8ba1424091c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Wed, 22 Feb 2023 05:49:35 -0500 Subject: [PATCH 633/683] elasticache: use an up to date node type in example (#1719) elasticache: use an up to date node type in example The cache.m1.small node type is deprecated. It's not replaced by cache.t3.small. See: https://aws.amazon.com/elasticache/previous-generation/ Reviewed-by: Mark Chappell --- elasticache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/elasticache.py b/elasticache.py index bd976aa841f..eeabcfe76cf 100644 --- a/elasticache.py +++ b/elasticache.py @@ -113,7 +113,7 @@ state: present engine: memcached cache_engine_version: 1.4.14 - node_type: cache.m1.small + node_type: cache.m3.small num_nodes: 1 cache_port: 11211 cache_security_groups: From 41a942510504ef3702deae846aba8d2c56be3646 Mon Sep 17 00:00:00 2001 From: mihai-satmarean <4729542+mihai-satmarean@users.noreply.github.com> Date: Tue, 28 Feb 2023 12:52:07 +0100 Subject: [PATCH 634/683] fixed unneeded `state` in module docs. (#1728) fixed unneeded `state` in module docs. SUMMARY removed state as it does not work in the info part ISSUE TYPE Docs Pull Request COMPONENT NAME ADDITIONAL INFORMATION "Unsupported parameters for (community.aws.ec2_transit_gateway_vpc_attachment_info) module: state. Supported parameters include: access_key, aws_ca_bundle, aws_config, debug_botocore_endpoint_logs, endpoint_url, filters, id, include_deleted, name, profile, region, secret_key, session_token, validate_certs (access_token, attachment_id, aws_access_key, aws_access_key_id, aws_endpoint_url, aws_profile, aws_region, aws_secret_access_key, aws_secret_key, aws_security_token, aws_session_token, ec2_access_key, ec2_region, ec2_secret_key, ec2_url, s3_url, security_token).", Reviewed-by: Mark Chappell --- ec2_transit_gateway_vpc_attachment_info.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py index 3a8d4dfd4d1..88f57fefa1b 100644 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -49,18 +49,15 @@ EXAMPLES = ''' # Describe a specific Transit Gateway attachment. - community.aws.ec2_transit_gateway_vpc_attachment_info: - state: present id: 'tgw-attach-0123456789abcdef0' # Describe all attachments attached to a transit gateway. - community.aws.ec2_transit_gateway_vpc_attachment_info: - state: present filters: transit-gateway-id: tgw-0fedcba9876543210' # Describe all attachments in an account. - community.aws.ec2_transit_gateway_vpc_attachment_info: - state: present filters: transit-gateway-id: tgw-0fedcba9876543210' ''' From 3d9efb03b20e4b7a4781a27ac70fdcd16236db6a Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Wed, 1 Mar 2023 11:25:46 +0100 Subject: [PATCH 635/683] ecs: integration test and new purge parameters (#1716) ecs: integration test and new purge parameters SUMMARY Make the ecs_cluster integration test work again ecs_service - new parameter purge_placement_constraints and purge_placement_strategy. Otherwise it is impossible to remove those placements without breaking backwards compatibility. purge_placement_constraints in the integration test purge_placement_strategy in the integration test required by mattclay/aws-terminator#210 (comment) ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION works for me again ansible-test integration --python 3.10 ecs_cluster --docker --allow-unsupported ... PLAY RECAP ********************************************************************* testhost : ok=143 changed=69 unreachable=0 failed=0 skipped=1 rescued=0 ignored=6 Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis Reviewed-by: Mike Graves --- ecs_service.py | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 4907187f3ab..928d03b4386 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -148,6 +148,14 @@ description: A cluster query language expression to apply to the constraint. required: false type: str + purge_placement_constraints: + version_added: 5.3.0 + description: + - Toggle overwriting of existing placement constraints. This is needed for backwards compatibility. + - By default I(purge_placement_constraints=false). In a release after 2024-06-01 this will be changed to I(purge_placement_constraints=true). + required: false + type: bool + default: false placement_strategy: description: - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service. @@ -162,6 +170,14 @@ field: description: The field to apply the placement strategy against. type: str + purge_placement_strategy: + version_added: 5.3.0 + description: + - Toggle overwriting of existing placement strategy. This is needed for backwards compatibility. + - By default I(purge_placement_strategy=false). In a release after 2024-06-01 this will be changed to I(purge_placement_strategy=true). + required: false + type: bool + default: false force_deletion: description: - Forcibly delete the service. Required when deleting a service with >0 scale, or no target group. @@ -396,7 +412,9 @@ returned: always type: int loadBalancers: - description: A list of load balancer objects + description: + - A list of load balancer objects + - Updating the loadbalancer configuration of an existing service requires botocore>=1.24.14. returned: always type: complex contains: @@ -822,7 +840,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan def update_service(self, service_name, cluster_name, task_definition, desired_count, deployment_configuration, placement_constraints, placement_strategy, network_configuration, health_check_grace_period_seconds, - force_new_deployment, capacity_provider_strategy, load_balancers): + force_new_deployment, capacity_provider_strategy, load_balancers, + purge_placement_constraints, purge_placement_strategy): params = dict( cluster=cluster_name, service=service_name, @@ -834,9 +853,15 @@ def update_service(self, service_name, cluster_name, task_definition, desired_co params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} for constraint in placement_constraints] + if purge_placement_constraints and not placement_constraints: + params['placementConstraints'] = [] + if placement_strategy: params['placementStrategy'] = placement_strategy + if purge_placement_strategy and not placement_strategy: + params['placementStrategy'] = [] + if network_configuration: params['networkConfiguration'] = network_configuration if force_new_deployment: @@ -907,6 +932,7 @@ def main(): expression=dict(required=False, type='str') ) ), + purge_placement_constraints=dict(required=False, default=False, type='bool'), placement_strategy=dict( required=False, default=[], @@ -917,6 +943,7 @@ def main(): field=dict(type='str'), ) ), + purge_placement_strategy=dict(required=False, default=False, type='bool'), health_check_grace_period_seconds=dict(required=False, type='int'), network_configuration=dict(required=False, type='dict', options=dict( subnets=dict(type='list', elements='str'), @@ -1061,6 +1088,8 @@ def main(): module.params['force_new_deployment'], capacityProviders, updatedLoadBalancers, + module.params['purge_placement_constraints'], + module.params['purge_placement_strategy'], ) else: From 7a343a50cb6e413deb33aeca20eee6a356e008ea Mon Sep 17 00:00:00 2001 From: Justin McCormick Date: Thu, 2 Mar 2023 03:58:02 -0600 Subject: [PATCH 636/683] ecs_service -- with force_new_deployment user can specify taskdef or not (#1680) ecs_service -- with force_new_deployment user can specify taskdef or not SUMMARY Fixes #1106 Support force_new_deployment without having to specify a task definition. ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION Previously task_definition was required when state was present; regardless of whether force_new_deployment was set or not. Previous error was along the lines of "state is present but all of the following are missing: task_definition". New behavior enforces either task_definition or force_new_deployment is set. If both are provided, the user's task_definition will be sent through to boto. If only task_definition is defined, original behavior resumes. If only force_new_deployment is set, pull the taskDefinition from existing and pass it through to boto. Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- ecs_service.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 928d03b4386..15c74b92c50 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -44,7 +44,7 @@ task_definition: description: - The task definition the service will run. - - This parameter is required when I(state=present). + - This parameter is required when I(state=present) unless I(force_new_deployment=True). - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case the task definition is managed by Code Pipeline and cannot be updated. required: false @@ -971,14 +971,15 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[('state', 'present', ['task_definition']), - ('launch_type', 'FARGATE', ['network_configuration'])], + required_if=[('launch_type', 'FARGATE', ['network_configuration'])], required_together=[['load_balancers', 'role']], mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) - if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': - if module.params['desired_count'] is None: + if module.params['state'] == 'present': + if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None: module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') + if module.params['task_definition'] is None and not module.params['force_new_deployment']: + module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.') if len(module.params['capacity_provider_strategy']) > 6: module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') @@ -1075,6 +1076,9 @@ def main(): updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + if task_definition is None and module.params['force_new_deployment']: + task_definition = existing['taskDefinition'] + # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'], From a1d0dc15c28632ee76f9db0899d049dce0d9763f Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Tue, 7 Mar 2023 18:25:39 +0100 Subject: [PATCH 637/683] ssm_inventory_info - new module to retrieve ssm inventory for configured ec2 instances (#1745) ssm_inventory_info module SUMMARY new module to retrieve ssm inventory info for EC2 configured instances ISSUE TYPE New Module Pull Request COMPONENT NAME ssm_inventory_info Reviewed-by: Mark Chappell --- ssm_inventory_info.py | 120 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 ssm_inventory_info.py diff --git a/ssm_inventory_info.py b/ssm_inventory_info.py new file mode 100644 index 00000000000..4242596f128 --- /dev/null +++ b/ssm_inventory_info.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = """ +module: ssm_inventory_info +version_added: 6.0.0 +short_description: Get SSM inventory information for EC2 instance + +description: + - Gather SSM inventory for EC2 instance configured with SSM. + +author: 'Aubin Bikouo (@abikouo)' + +options: + instance_id: + description: + - EC2 instance id. + required: true + type: str + +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" + +EXAMPLES = """ +- name: Retrieve SSM inventory info for instance id 'i-012345678902' + community.aws.ssm_inventory_info: + instance_id: 'i-012345678902' +""" + + +RETURN = """ +ssm_inventory: + returned: on success + description: > + SSM inventory information. + type: dict + sample: { + 'agent_type': 'amazon-ssm-agent', + 'agent_version': '3.2.582.0', + 'computer_name': 'ip-172-31-44-166.ec2.internal', + 'instance_id': 'i-039eb9b1f55934ab6', + 'instance_status': 'Active', + 'ip_address': '172.31.44.166', + 'platform_name': 'Fedora Linux', + 'platform_type': 'Linux', + 'platform_version': '37', + 'resource_type': 'EC2Instance' + } +""" + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +class SsmInventoryInfoFailure(Exception): + def __init__(self, exc, msg): + self.exc = exc + self.msg = msg + super().__init__(self) + + +def get_ssm_inventory(connection, filters): + try: + return connection.get_inventory(Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise SsmInventoryInfoFailure(exc=e, msg="get_ssm_inventory() failed.") + + +def execute_module(module, connection): + + instance_id = module.params.get("instance_id") + try: + filters = [ + { + "Key": "AWS:InstanceInformation.InstanceId", + "Values": [instance_id] + } + ] + + response = get_ssm_inventory(connection, filters) + entities = response.get("Entities", []) + ssm_inventory = {} + if entities: + content = entities[0].get("Data", {}).get("AWS:InstanceInformation", {}).get("Content", []) + if content: + ssm_inventory = camel_dict_to_snake_dict(content[0]) + module.exit_json(changed=False, ssm_inventory=ssm_inventory) + except SsmInventoryInfoFailure as e: + module.fail_json_aws(exception=e.exc, msg=e.msg) + + +def main(): + argument_spec = dict( + instance_id=dict(required=True, type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + connection = module.client("ssm") + execute_module(module, connection) + + +if __name__ == "__main__": + main() From 1da55da40eec6251aa508a0b9293eb77ed8ea854 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 8 Mar 2023 12:07:26 +0100 Subject: [PATCH 638/683] Cleanup headers and imports (#1738) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cleanup headers and imports SUMMARY Mass update of imports, docs fragments and file headers Many of the amazon.aws module_utils and docs fragments got moved about, update community.aws to reflect this. Consistently apply the comment headers as documented at https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#python-shebang-utf-8-coding ISSUE TYPE Docs Pull Request Feature Pull Request COMPONENT NAME ADDITIONAL INFORMATION Header cleanup based upon: https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#python-shebang-utf-8-coding Begin your Ansible module with #!/usr/bin/python - this “shebang” allows ansible_python_interpreter to work. Follow the shebang immediately with # -*- coding: utf-8 -*- to clarify that the file is UTF-8 encoded. and https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright-and-license After the shebang and UTF-8 coding, add a copyright line with the original copyright holder and a license declaration. The license declaration should be ONLY one line, not the full GPL prefix. ... Additions to the module (for instance, rewrites) are not permitted to add additional copyright lines other than the default copyright statement if missing: Reviewed-by: Alina Buzachis --- accessanalyzer_validate_policy_info.py | 25 +++++---- acm_certificate.py | 53 +++++++------------ acm_certificate_info.py | 25 +++++---- api_gateway.py | 28 +++++----- api_gateway_domain.py | 41 ++++++++------- application_autoscaling_policy.py | 30 +++++------ autoscaling_complete_lifecycle_action.py | 21 ++++---- autoscaling_instance_refresh.py | 34 ++++++------ autoscaling_instance_refresh_info.py | 36 ++++++------- autoscaling_launch_config.py | 28 +++++----- autoscaling_launch_config_find.py | 23 ++++----- autoscaling_launch_config_info.py | 24 ++++----- autoscaling_lifecycle_hook.py | 24 ++++----- autoscaling_policy.py | 29 ++++++----- autoscaling_scheduled_action.py | 29 ++++++----- aws_region_info.py | 37 ++++++------- batch_compute_environment.py | 32 ++++++------ batch_job_definition.py | 36 +++++++------ batch_job_queue.py | 35 +++++++------ cloudformation_exports_info.py | 31 ++++++----- cloudformation_stack_set.py | 43 ++++++++-------- cloudfront_distribution.py | 58 ++++++++++----------- cloudfront_distribution_info.py | 26 +++++----- cloudfront_invalidation.py | 38 +++++++------- cloudfront_origin_access_identity.py | 41 ++++++++------- cloudfront_response_headers_policy.py | 53 +++++++++---------- codebuild_project.py | 20 ++++---- codecommit_repository.py | 27 +++++----- codepipeline.py | 28 +++++----- config_aggregation_authorization.py | 23 ++++----- config_aggregator.py | 26 +++++----- config_delivery_channel.py | 24 ++++----- config_recorder.py | 24 ++++----- config_rule.py | 24 ++++----- data_pipeline.py | 30 +++++------ directconnect_confirm_connection.py | 38 +++++++------- directconnect_connection.py | 31 +++++------ directconnect_gateway.py | 26 +++++----- directconnect_link_aggregation_group.py | 28 +++++----- directconnect_virtual_interface.py | 30 +++++------ dms_endpoint.py | 32 ++++++------ dms_replication_subnet_group.py | 27 +++++----- dynamodb_table.py | 37 +++++++------ dynamodb_ttl.py | 25 +++++---- ec2_ami_copy.py | 28 +++++----- ec2_customer_gateway.py | 45 ++++++++-------- ec2_customer_gateway_info.py | 43 ++++++++-------- ec2_launch_template.py | 49 +++++++++--------- ec2_placement_group.py | 40 +++++++-------- ec2_placement_group_info.py | 36 ++++++------- ec2_snapshot_copy.py | 24 ++++----- ec2_transit_gateway.py | 41 ++++++++------- ec2_transit_gateway_info.py | 40 +++++++-------- ec2_transit_gateway_vpc_attachment.py | 26 ++++------ ec2_transit_gateway_vpc_attachment_info.py | 27 +++++----- ec2_vpc_egress_igw.py | 35 ++++++------- ec2_vpc_nacl.py | 35 +++++++------ ec2_vpc_nacl_info.py | 39 +++++++------- ec2_vpc_peer.py | 38 +++++++------- ec2_vpc_peering_info.py | 40 +++++++-------- ec2_vpc_vgw.py | 33 ++++++------ ec2_vpc_vgw_info.py | 27 +++++----- ec2_vpc_vpn.py | 42 +++++++-------- ec2_vpc_vpn_info.py | 43 ++++++++-------- ec2_win_password.py | 35 +++++++------ ecs_attribute.py | 33 ++++++------ ecs_cluster.py | 37 +++++++------ ecs_ecr.py | 35 ++++++------- ecs_service.py | 52 ++++++++++--------- ecs_service_info.py | 30 +++++------ ecs_tag.py | 43 ++++++++-------- ecs_task.py | 32 ++++++------ ecs_taskdefinition.py | 29 +++++------ ecs_taskdefinition_info.py | 32 ++++++------ efs.py | 32 ++++++------ efs_info.py | 35 ++++++------- efs_tag.py | 43 ++++++++-------- eks_cluster.py | 39 +++++++------- eks_fargate_profile.py | 41 ++++++++------- eks_nodegroup.py | 39 +++++++------- elasticache.py | 38 ++++++-------- elasticache_info.py | 37 +++++++------ elasticache_parameter_group.py | 28 +++++----- elasticache_snapshot.py | 28 +++++----- elasticache_subnet_group.py | 29 +++++------ elasticbeanstalk_app.py | 25 +++++---- elb_classic_lb_info.py | 60 +++++++++------------- elb_instance.py | 32 ++++++------ elb_network_lb.py | 37 +++++++------ elb_target.py | 38 +++++++------- elb_target_group.py | 40 +++++++-------- elb_target_group_info.py | 34 ++++++------ elb_target_info.py | 34 ++++++------ glue_connection.py | 31 ++++++----- glue_crawler.py | 33 ++++++------ glue_job.py | 35 ++++++------- iam_access_key.py | 36 ++++++------- iam_access_key_info.py | 32 ++++++------ iam_group.py | 51 +++++++----------- iam_managed_policy.py | 36 ++++++------- iam_mfa_device_info.py | 27 +++++----- iam_password_policy.py | 29 +++++------ iam_role.py | 38 +++++++------- iam_role_info.py | 32 ++++++------ iam_saml_federation.py | 49 +++++++----------- iam_server_certificate.py | 43 ++++++---------- iam_server_certificate_info.py | 29 +++++------ inspector_target.py | 40 +++++++-------- kinesis_stream.py | 41 ++++++++------- lightsail.py | 36 ++++++------- lightsail_static_ip.py | 25 ++++----- msk_cluster.py | 27 +++++----- msk_config.py | 19 +++---- networkfirewall.py | 27 +++++----- networkfirewall_info.py | 28 +++++----- networkfirewall_policy.py | 26 ++++------ networkfirewall_policy_info.py | 28 +++++----- networkfirewall_rule_group.py | 28 +++++----- networkfirewall_rule_group_info.py | 28 +++++----- opensearch.py | 54 +++++++++---------- opensearch_info.py | 38 ++++++-------- redshift.py | 40 +++++++-------- redshift_cross_region_snapshots.py | 25 ++++----- redshift_info.py | 33 ++++++------ redshift_subnet_group.py | 30 +++++------ s3_bucket_info.py | 34 ++++++------ s3_bucket_notification.py | 32 ++++++------ s3_cors.py | 31 +++++------ s3_lifecycle.py | 36 +++++++------ s3_logging.py | 33 ++++++------ s3_metrics_configuration.py | 42 +++++++-------- s3_sync.py | 54 ++++++++----------- s3_website.py | 33 ++++++------ secretsmanager_secret.py | 45 ++++++++-------- ses_identity.py | 35 +++++++------ ses_identity_policy.py | 33 ++++++------ ses_rule_set.py | 30 ++++++----- sns.py | 24 ++++----- sns_topic.py | 20 ++++---- sns_topic_info.py | 26 +++++----- sqs_queue.py | 33 ++++++------ ssm_parameter.py | 41 ++++++++------- stepfunctions_state_machine.py | 47 +++++++++-------- stepfunctions_state_machine_execution.py | 32 ++++++------ storagegateway_info.py | 32 ++++++------ sts_assume_role.py | 40 +++++++-------- sts_session_token.py | 27 +++++----- waf_condition.py | 36 ++++++------- waf_info.py | 26 +++++----- waf_rule.py | 47 +++++++++-------- waf_web_acl.py | 47 +++++++++-------- wafv2_ip_set.py | 31 +++++------ wafv2_ip_set_info.py | 28 +++++----- wafv2_resources.py | 28 +++++----- wafv2_resources_info.py | 28 +++++----- wafv2_rule_group.py | 39 +++++++------- wafv2_rule_group_info.py | 28 +++++----- wafv2_web_acl.py | 31 +++++------ wafv2_web_acl_info.py | 26 +++++----- 159 files changed, 2595 insertions(+), 2792 deletions(-) diff --git a/accessanalyzer_validate_policy_info.py b/accessanalyzer_validate_policy_info.py index 790486e13c2..817f414671b 100644 --- a/accessanalyzer_validate_policy_info.py +++ b/accessanalyzer_validate_policy_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: accessanalyzer_validate_policy_info version_added: 5.0.0 @@ -63,19 +61,19 @@ author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Validate a policy - name: Validate a simple IAM policy community.aws.accessanalyzer_validate_policy_info: policy: "{{ lookup('template', 'managed_policy.json.j2') }}" -''' +""" -RETURN = r''' +RETURN = r""" findings: description: The list of findings in a policy returned by IAM Access Analyzer based on its suite of policy checks. returned: success @@ -160,7 +158,7 @@ description: The offset within the policy that corresponds to the position, starting from C(0). type: int returned: success -''' +""" try: import botocore @@ -169,8 +167,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def filter_findings(findings, type_filter): diff --git a/acm_certificate.py b/acm_certificate.py index 313bdc424a3..e7ea9c6d87a 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -1,31 +1,14 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# + # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# + # Author: # - Matthew Davis # on behalf of Telstra Corporation Limited -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: acm_certificate short_description: Upload and delete certificates in the AWS Certificate Manager service @@ -175,13 +158,13 @@ author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: upload a self-signed certificate community.aws.aws_acm: @@ -230,9 +213,9 @@ Application: search Environment: development purge_tags: true -''' +""" -RETURN = ''' +RETURN = r""" certificate: description: Information about the certificate which was uploaded type: complex @@ -255,7 +238,7 @@ returned: when I(state=absent) sample: - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" -''' +""" import base64 @@ -267,15 +250,15 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - boto3_tag_list_to_ansible_dict, - ansible_dict_to_boto3_tag_list, -) from ansible.module_utils._text import to_text +from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): if tags is None: diff --git a/acm_certificate_info.py b/acm_certificate_info.py index 7395ec65ddc..2364751f519 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: acm_certificate_info short_description: Retrieve certificate information from AWS Certificate Manager service version_added: 1.0.0 @@ -43,12 +41,12 @@ author: - Will Thames (@willthames) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: obtain all ACM certificates community.aws.aws_acm_info: @@ -73,9 +71,9 @@ community.aws.aws_acm_info: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" -''' +""" -RETURN = r''' +RETURN = r""" certificates: description: A list of certificates returned: always @@ -257,11 +255,12 @@ returned: always sample: AMAZON_ISSUED type: str -''' +""" -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): argument_spec = dict( diff --git a/api_gateway.py b/api_gateway.py index e4085deced8..615c3d89aa3 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -4,11 +4,7 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: api_gateway version_added: 1.0.0 @@ -104,19 +100,18 @@ default: EDGE author: - 'Michael De La Rue (@mikedlr)' -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - notes: - A future version of this module will probably use tags or another ID so that an API can be created only once. - As an early work around an intermediate version will probably do the same using a tag embedded in the API name. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Setup AWS API Gateway setup on AWS and deploy API definition community.aws.api_gateway: swagger_file: my_api.yml @@ -145,9 +140,9 @@ cache_size: '6.1' canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True } state: present -''' +""" -RETURN = ''' +RETURN = r""" api_id: description: API id of the API endpoint created returned: success @@ -168,7 +163,7 @@ returned: always type: list sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"] -''' +""" import json import traceback @@ -180,8 +175,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def main(): diff --git a/api_gateway_domain.py b/api_gateway_domain.py index ab0486d4f75..a1afeaec95f 100644 --- a/api_gateway_domain.py +++ b/api_gateway_domain.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: api_gateway_domain short_description: Manage AWS API Gateway custom domains @@ -57,17 +55,17 @@ default: present choices: [ 'present', 'absent' ] type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 notes: - Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module. - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) options to add own Certificates. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Setup endpoint for a custom domain for your API Gateway HTTP API community.aws.api_gateway_domain: domain_name: myapi.foobar.com @@ -88,9 +86,9 @@ zone: foobar.com alias_hosted_zone_id: "{{ api_gw_domain_result.response.domain.distribution_hosted_zone_id }}" command: create -''' +""" -RETURN = ''' +RETURN = r""" response: description: The data returned by create_domain_name (or update and delete) and create_base_path_mapping methods by boto3. returned: success @@ -110,19 +108,24 @@ path_mappings: [ { base_path: '(empty)', rest_api_id: 'abcd123', stage: 'production' } ] -''' +""" + +import copy try: - from botocore.exceptions import ClientError, BotoCoreError, EndpointConnectionError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import EndpointConnectionError except ImportError: pass # caught by imported AnsibleAWSModule -import copy +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict def get_domain(module, client): diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py index 08264400341..1b8669d84c8 100644 --- a/application_autoscaling_policy.py +++ b/application_autoscaling_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: application_autoscaling_policy version_added: 1.0.0 @@ -104,12 +102,12 @@ required: false type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create step scaling policy for ECS Service @@ -160,9 +158,9 @@ service_namespace: ecs resource_id: service/cluster-name/service-name scalable_dimension: ecs:service:DesiredCount -''' +""" -RETURN = ''' +RETURN = r""" alarms: description: List of the CloudWatch alarms associated with the scaling policy returned: when state present @@ -283,16 +281,18 @@ returned: when state present type: str sample: '2017-09-28T08:22:51.881000-03:00' -''' # NOQA - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict +""" try: import botocore except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # Merge the results of the scalable target creation and policy deletion/creation # There's no risk in overriding values since mutual keys have the same values in our case diff --git a/autoscaling_complete_lifecycle_action.py b/autoscaling_complete_lifecycle_action.py index 62d5e64b3bc..2b752c9a4b8 100644 --- a/autoscaling_complete_lifecycle_action.py +++ b/autoscaling_complete_lifecycle_action.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_complete_lifecycle_action short_description: Completes the lifecycle action of an instance @@ -37,12 +36,12 @@ type: str required: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Complete the lifecycle action - aws_asg_complete_lifecycle_action: @@ -50,16 +49,16 @@ lifecycle_hook_name: my-lifecycle-hook lifecycle_action_result: CONTINUE instance_id: i-123knm1l2312 -''' +""" -RETURN = ''' +RETURN = r""" --- status: description: How things went returned: success type: str sample: ["OK"] -''' +""" try: import botocore diff --git a/autoscaling_instance_refresh.py b/autoscaling_instance_refresh.py index 73e223af3de..7cf82132e64 100644 --- a/autoscaling_instance_refresh.py +++ b/autoscaling_instance_refresh.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_instance_refresh version_added: 3.2.0 @@ -61,12 +59,12 @@ type: int type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Start a refresh @@ -87,9 +85,9 @@ min_healthy_percentage: 91 instance_warmup: 60 -''' +""" -RETURN = ''' +RETURN = r""" --- instance_refresh_id: description: instance refresh id @@ -137,19 +135,21 @@ returned: success type: int sample: 5 -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict def start_or_cancel_instance_refresh(conn, module): diff --git a/autoscaling_instance_refresh_info.py b/autoscaling_instance_refresh_info.py index 064e92789b6..64581b46829 100644 --- a/autoscaling_instance_refresh_info.py +++ b/autoscaling_instance_refresh_info.py @@ -1,14 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_instance_refresh_info version_added: 3.2.0 @@ -18,7 +14,8 @@ - You can determine the status of a request by looking at the I(status) parameter. - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh_info). The usage did not change. -author: "Dan Khersonsky (@danquixote)" +author: + - "Dan Khersonsky (@danquixote)" options: name: description: @@ -41,12 +38,12 @@ type: int required: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Find an refresh by ASG name @@ -70,9 +67,9 @@ name: somename-asg next_token: 'some-token-123' register: asgs -''' +""" -RETURN = ''' +RETURN = r""" --- instance_refresh_id: description: instance refresh id @@ -120,16 +117,19 @@ returned: success type: int sample: 5 -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def find_asg_instance_refreshes(conn, module): diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index 0e5cf844673..b21f296ce0e 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_launch_config version_added: 1.0.0 @@ -183,12 +180,12 @@ type: str choices: ['default', 'dedicated'] extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a launch configuration with an encrypted volume community.aws.autoscaling_launch_config: @@ -254,9 +251,9 @@ security_groups: "['sg-xxxx']" volumes: "{{ volumes }}" register: lc_info -''' +""" -RETURN = r''' +RETURN = r""" arn: description: The Amazon Resource Name of the launch configuration. returned: when I(state=present) @@ -440,7 +437,7 @@ type: list sample: - sg-5e27db2f -''' +""" import traceback @@ -454,9 +451,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def create_block_device_meta(module, volume): if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume: diff --git a/autoscaling_launch_config_find.py b/autoscaling_launch_config_find.py index 699859af7e1..8f3ca14bec3 100644 --- a/autoscaling_launch_config_find.py +++ b/autoscaling_launch_config_find.py @@ -1,14 +1,10 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2015, Jose Armesto # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_launch_config_find version_added: 1.0.0 @@ -40,12 +36,12 @@ - Corresponds to Python slice notation like list[:limit]. type: int extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Search for the Launch Configurations that start with "app" @@ -53,9 +49,9 @@ name_regex: app.* sort_order: descending limit: 2 -''' +""" -RETURN = ''' +RETURN = r""" image_id: description: AMI id returned: when Launch Configuration was found @@ -132,7 +128,8 @@ type: bool sample: True ... -''' +""" + import re try: diff --git a/autoscaling_launch_config_info.py b/autoscaling_launch_config_info.py index 44359bc64da..73e8fbdd8da 100644 --- a/autoscaling_launch_config_info.py +++ b/autoscaling_launch_config_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_launch_config_info version_added: 1.0.0 @@ -48,12 +45,12 @@ - Corresponds to Python slice notation. type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all launch configurations @@ -67,9 +64,9 @@ community.aws.autoscaling_launch_config_info: sort: created_time sort_order: descending -''' +""" -RETURN = r''' +RETURN = r""" block_device_mapping: description: Block device mapping for the instances of launch configuration. type: list @@ -149,11 +146,10 @@ description: User data available. type: str returned: always -''' +""" try: import botocore - from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -173,7 +169,7 @@ def list_launch_configs(connection, module): try: pg = connection.get_paginator('describe_launch_configurations') launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() - except ClientError as e: + except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to list launch configs") snaked_launch_configs = [] diff --git a/autoscaling_lifecycle_hook.py b/autoscaling_lifecycle_hook.py index 72d3c6dfda2..a3b8edb499b 100644 --- a/autoscaling_lifecycle_hook.py +++ b/autoscaling_lifecycle_hook.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_lifecycle_hook version_added: 1.0.0 @@ -74,12 +71,12 @@ default: ABANDON type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create / Update lifecycle hook community.aws.autoscaling_lifecycle_hook: region: eu-central-1 @@ -96,9 +93,9 @@ state: absent autoscaling_group_name: example lifecycle_hook_name: example -''' +""" -RETURN = ''' +RETURN = r""" --- auto_scaling_group_name: description: The unique name of the auto scaling group. @@ -130,7 +127,7 @@ returned: success type: str sample: "autoscaling:EC2_INSTANCE_LAUNCHING" -''' +""" try: @@ -138,9 +135,10 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def create_lifecycle_hook(connection, module): diff --git a/autoscaling_policy.py b/autoscaling_policy.py index 19c7e46b481..b628fe7b58f 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: autoscaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups version_added: 1.0.0 @@ -189,11 +187,12 @@ description: - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' -EXAMPLES = ''' +""" + +EXAMPLES = r""" - name: Simple Scale Down policy community.aws.autoscaling_policy: state: present @@ -261,9 +260,9 @@ target_value: 98.0 asg_name: asg-test-1 register: result -''' +""" -RETURN = ''' +RETURN = r""" adjustment_type: description: Scaling policy adjustment type. returned: always @@ -349,17 +348,19 @@ returned: always type: int sample: 50 -''' +""" try: import botocore except ImportError: pass # caught by imported AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def build_target_specification(target_tracking_config): diff --git a/autoscaling_scheduled_action.py b/autoscaling_scheduled_action.py index cbccf7f1fe3..bf0d4bcc44f 100644 --- a/autoscaling_scheduled_action.py +++ b/autoscaling_scheduled_action.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -6,10 +7,7 @@ # Based off of https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py # (c) 2016, Mike Mochan <@mmochan> -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_scheduled_action version_added: 2.2.0 @@ -67,14 +65,15 @@ required: false default: present choices: ['present', 'absent'] -author: Mark Woolley(@marknet15) +author: + - Mark Woolley(@marknet15) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create a scheduled action for a autoscaling group. - name: Create a minimal scheduled action for autoscaling group community.aws.autoscaling_scheduled_action: @@ -108,9 +107,9 @@ autoscaling_group_name: test_asg scheduled_action_name: test_scheduled_action state: absent -''' +""" -RETURN = r''' +RETURN = r""" scheduled_action_name: description: The name of the scheduled action. returned: when I(state=present) @@ -151,7 +150,7 @@ returned: when I(state=present) type: int sample: 1 -''' +""" try: import botocore @@ -160,12 +159,14 @@ try: from dateutil.parser import parse as timedate_parse + HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def format_request(): diff --git a/aws_region_info.py b/aws_region_info.py index ad9368ef14c..837e9326552 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: aws_region_info short_description: Gather information about AWS regions version_added: 1.0.0 @@ -26,12 +24,12 @@ default: {} type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all regions @@ -41,9 +39,9 @@ - community.aws.aws_region_info: filters: region-name: eu-west-1 -''' +""" -RETURN = ''' +RETURN = r""" regions: returned: on success description: > @@ -54,18 +52,21 @@ 'endpoint': 'ec2.us-west-1.amazonaws.com', 'region_name': 'us-west-1' }]" -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): argument_spec = dict( diff --git a/batch_compute_environment.py b/batch_compute_environment.py index c6c752a3c40..79123501992 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Jon Meran # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: batch_compute_environment version_added: 1.0.0 @@ -120,12 +118,12 @@ - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: My Batch Compute Environment community.aws.batch_compute_environment: compute_environment_name: computeEnvironmentName @@ -155,9 +153,9 @@ - name: show results ansible.builtin.debug: var: aws_batch_compute_environment_action -''' +""" -RETURN = r''' +RETURN = r""" --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -222,17 +220,21 @@ statusReason: "ComputeEnvironment Healthy" type: MANAGED type: dict -''' +""" import re -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # --------------------------------------------------------------------------------------------------- # diff --git a/batch_job_definition.py b/batch_job_definition.py index 021d833b464..5eac0cacfe1 100644 --- a/batch_job_definition.py +++ b/batch_job_definition.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Jon Meran # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: batch_job_definition version_added: 1.0.0 @@ -179,12 +177,12 @@ many times. type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: My Batch Job Definition community.aws.batch_job_definition: @@ -207,9 +205,9 @@ - name: show results ansible.builtin.debug: var=job_definition_create_result -''' +""" -RETURN = r''' +RETURN = r""" --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -223,17 +221,21 @@ status: INACTIVE type: container type: dict -''' - -from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.batch import cc +from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # --------------------------------------------------------------------------------------------------- # diff --git a/batch_job_queue.py b/batch_job_queue.py index e20c430ba6a..f71848bb04a 100644 --- a/batch_job_queue.py +++ b/batch_job_queue.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Jon Meran # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: batch_job_queue version_added: 1.0.0 @@ -63,12 +61,12 @@ type: str description: The name of the compute environment. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: My Batch Job Queue community.aws.batch_job_queue: job_queue_name: jobQueueName @@ -86,9 +84,9 @@ - name: show results ansible.builtin.debug: var: batch_job_queue_action -''' +""" -RETURN = r''' +RETURN = r""" --- output: description: "returns what action was taken, whether something was changed, invocation and response" @@ -104,17 +102,20 @@ status: UPDATING status_reason: "JobQueue Healthy" type: dict -''' - -from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + # --------------------------------------------------------------------------------------------------- # # Helper Functions & classes diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 7030ca8ba5c..604abfd1436 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: cloudformation_exports_info short_description: Read a value from CloudFormation Exports version_added: 1.0.0 @@ -15,12 +13,12 @@ author: - "Michael Moyle (@mmoyle)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get Exports community.aws.cloudformation_exports_info: profile: 'my_aws_profile' @@ -28,17 +26,14 @@ register: cf_exports - ansible.builtin.debug: msg: "{{ cf_exports }}" -''' +""" -RETURN = ''' +RETURN = r""" export_items: description: A dictionary of Exports items names and values. returned: Always type: dict -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +""" try: from botocore.exceptions import ClientError @@ -46,6 +41,10 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + @AWSRetry.exponential_backoff() def list_exports(cloudformation_client): diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 2d5bd83d455..e15f1c95229 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -1,20 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: cloudformation_stack_set version_added: 1.0.0 short_description: Manage groups of CloudFormation stacks description: - - Launches/updates/deletes AWS CloudFormation Stack Sets. + - Launches/updates/deletes AWS CloudFormation Stack Sets. notes: - - To make an individual stack, you want the M(amazon.aws.cloudformation) module. + - To make an individual stack, you want the M(amazon.aws.cloudformation) module. options: name: description: @@ -169,14 +167,15 @@ - Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual count may be lower. -author: "Ryan Scott Brown (@ryansb)" +author: + - "Ryan Scott Brown (@ryansb)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a stack set with instances in two accounts community.aws.cloudformation_stack_set: name: my-stack @@ -213,9 +212,9 @@ accounts: [1234567890, 2345678901] regions: - us-east-1 -''' +""" -RETURN = r''' +RETURN = r""" operations_log: type: list description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. @@ -294,7 +293,7 @@ Type: "AWS::SNS::Topic" Properties: {} -''' # NOQA +""" import datetime import itertools @@ -302,7 +301,8 @@ import uuid try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # handled by AnsibleAWSModule pass @@ -310,11 +310,12 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def create_stack_set(module, stack_params, cfn): diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index f7ff3b51553..ec6e74daf36 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- version_added: 1.0.0 @@ -21,12 +19,6 @@ - Willem van Ketwich (@wilvk) - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags - options: state: @@ -597,9 +589,14 @@ default: 1800 type: int -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a basic distribution with defaults and tags community.aws.cloudfront_distribution: state: present @@ -689,9 +686,9 @@ community.aws.cloudfront_distribution: state: absent caller_reference: replaceable distribution -''' +""" -RETURN = r''' +RETURN = r""" active_trusted_signers: description: Key pair IDs that CloudFront is aware of for each trusted signer. returned: always @@ -1414,29 +1411,30 @@ returned: always type: str sample: abcd1234-1234-abcd-abcd-abcd12345678 -''' +""" -from ansible.module_utils._text import to_text, to_native -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager -from ansible.module_utils.common.dict_transformations import recursive_diff -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from collections import OrderedDict import datetime -try: - from collections import OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict - except ImportError: - pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed) - try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import recursive_diff +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def change_dict_key_name(dictionary, old_key, new_key): if old_key in dictionary: diff --git a/cloudfront_distribution_info.py b/cloudfront_distribution_info.py index 179e572e0c9..bc6bd8073c9 100644 --- a/cloudfront_distribution_info.py +++ b/cloudfront_distribution_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudfront_distribution_info version_added: 1.0.0 @@ -144,12 +141,12 @@ type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get a summary of distributions @@ -192,9 +189,9 @@ - name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) community.aws.cloudfront_distribution_info: all_lists: true -''' +""" -RETURN = ''' +RETURN = r""" origin_access_identity: description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set. returned: only if I(origin_access_identity) is true @@ -243,11 +240,12 @@ as figuring out the DistributionId is usually the reason one uses this module in the first place. returned: always type: dict -''' +""" -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): facts[distribution_id] = details diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index dbf478e4408..adee5058b17 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- version_added: 1.0.0 @@ -14,15 +12,10 @@ short_description: create invalidations for AWS CloudFront distributions description: - - Allows for invalidation of a batch of paths for a CloudFront distribution. - -author: Willem van Ketwich (@wilvk) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - Allows for invalidation of a batch of paths for a CloudFront distribution. +author: + - Willem van Ketwich (@wilvk) options: distribution_id: @@ -52,9 +45,13 @@ notes: - does not support check mode -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a batch of invalidations using a distribution_id for a reference community.aws.cloudfront_invalidation: @@ -74,9 +71,9 @@ - /testpathtwo/test5.js - /testpaththree/* -''' +""" -RETURN = r''' +RETURN = r""" invalidation: description: The invalidation's information. returned: always @@ -130,7 +127,7 @@ returned: always type: str sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 -''' +""" import datetime @@ -142,10 +139,11 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class CloudFrontInvalidationServiceManager(object): """ diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index e59c9439701..1da411f8677 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- version_added: 1.0.0 @@ -16,16 +14,11 @@ CloudFront distribution description: - - Allows for easy creation, updating and deletion of origin access - identities. - -author: Willem van Ketwich (@wilvk) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - Allows for easy creation, updating and deletion of origin access + identities. +author: + - Willem van Ketwich (@wilvk) options: state: @@ -54,9 +47,13 @@ notes: - Does not support check mode. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: create an origin access identity community.aws.cloudfront_origin_access_identity: @@ -76,9 +73,9 @@ caller_reference: this is an example reference comment: this is a new comment -''' +""" -RETURN = ''' +RETURN = r""" cloud_front_origin_access_identity: description: The origin access identity's information. returned: always @@ -114,20 +111,22 @@ returned: when initially created type: str -''' +""" import datetime try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code class CloudFrontOriginAccessIdentityServiceManager(object): diff --git a/cloudfront_response_headers_policy.py b/cloudfront_response_headers_policy.py index f009fe89b3c..c84346c387a 100644 --- a/cloudfront_response_headers_policy.py +++ b/cloudfront_response_headers_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- version_added: 3.2.0 module: cloudfront_response_headers_policy @@ -14,16 +12,11 @@ short_description: Create, update and delete response headers policies to be used in a Cloudfront distribution description: - - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers - - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy) - -author: Stefan Horning (@stefanhorning) - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers + - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy) +author: + - Stefan Horning (@stefanhorning) options: state: @@ -57,9 +50,13 @@ default: {} type: dict -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Creationg a Cloudfront header policy using all predefined header features and a custom header for demonstration community.aws.cloudfront_response_headers_policy: name: my-header-policy @@ -113,9 +110,9 @@ community.aws.cloudfront_response_headers_policy: name: my-header-policy state: absent -''' +""" -RETURN = ''' +RETURN = r""" response_headers_policy: description: The policy's information returned: success @@ -141,16 +138,20 @@ type: str returned: always sample: my-header-policy -''' +""" + +import datetime try: - from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by imported AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -import datetime class CloudfrontResponseHeadersPolicyService(object): @@ -174,7 +175,7 @@ def find_response_headers_policy(self, name): matching_policy = None return matching_policy - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error fetching policy information") def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config): @@ -208,7 +209,7 @@ def create_response_header_policy(self, name, comment, cors_config, security_hea try: result = self.client.create_response_headers_policy(ResponseHeadersPolicyConfig=config) changed = True - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating policy") else: policy_id = matching_policy['ResponseHeadersPolicy']['Id'] @@ -223,7 +224,7 @@ def create_response_header_policy(self, name, comment, cors_config, security_hea # consider change made by this execution of the module if returned timestamp was very recent if changed_time > seconds_ago: changed = True - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Updating creating policy") self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) @@ -241,7 +242,7 @@ def delete_response_header_policy(self, name): else: try: result = self.client.delete_response_headers_policy(Id=policy_id, IfMatch=etag) - except (ParamValidationError, ClientError, BotoCoreError) as e: + except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error deleting policy") self.module.exit_json(changed=True, **camel_dict_to_snake_dict(result)) diff --git a/codebuild_project.py b/codebuild_project.py index cd372258d67..71f05bf7233 100644 --- a/codebuild_project.py +++ b/codebuild_project.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: codebuild_project version_added: 1.0.0 @@ -154,9 +151,9 @@ - amazon.aws.common.modules - amazon.aws.region.modules - amazon.aws.tags.modules -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - community.aws.codebuild_project: @@ -182,9 +179,9 @@ encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3" region: us-east-1 state: present -''' +""" -RETURN = r''' +RETURN = r""" project: description: Returns the dictionary describing the code project configuration. returned: success @@ -288,7 +285,7 @@ returned: always type: str sample: "2018-04-17T16:56:03.245000+02:00" -''' +""" try: import botocore @@ -302,6 +299,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule diff --git a/codecommit_repository.py b/codecommit_repository.py index b9282183d58..1552738bea5 100644 --- a/codecommit_repository.py +++ b/codecommit_repository.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: (c) 2018, Shuang Wang # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: codecommit_repository version_added: 1.0.0 @@ -17,7 +14,8 @@ - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit. - Prior to release 5.0.0 this module was called C(community.aws.aws_codecommit). The usage did not change. -author: Shuang Wang (@ptux) +author: + - Shuang Wang (@ptux) options: name: description: @@ -39,12 +37,12 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -RETURN = ''' +RETURN = r""" repository_metadata: description: "Information about the repository." returned: always @@ -120,9 +118,9 @@ returned: always type: str sample: "0" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create a new repository - community.aws.codecommit_repository: name: repo @@ -132,15 +130,16 @@ - community.aws.codecommit_repository: name: repo state: absent -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict class CodeCommit(object): diff --git a/codepipeline.py b/codepipeline.py index 774231d5bdd..a2ec7713b4a 100644 --- a/codepipeline.py +++ b/codepipeline.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: codepipeline version_added: 1.0.0 @@ -75,12 +72,12 @@ choices: ['present', 'absent'] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) @@ -147,9 +144,9 @@ FileName: imagedefinitions.json region: us-east-1 state: present -''' +""" -RETURN = r''' +RETURN = r""" pipeline: description: Returns the dictionary describing the CodePipeline configuration. returned: success @@ -194,7 +191,7 @@ - This number is auto incremented when CodePipeline params are changed. returned: always type: int -''' +""" import copy @@ -205,9 +202,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): diff --git a/config_aggregation_authorization.py b/config_aggregation_authorization.py index 9060fcd97d8..96f1eb1d9cd 100644 --- a/config_aggregation_authorization.py +++ b/config_aggregation_authorization.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_aggregation_authorization version_added: 1.0.0 @@ -36,12 +33,12 @@ type: str required: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get current account ID community.aws.aws_caller_info: register: whoami @@ -49,18 +46,18 @@ state: present authorized_account_id: '{{ whoami.account }}' authorized_aws_region: us-east-1 -''' - -RETURN = '''#''' +""" +RETURN = r"""#""" try: import botocore except ImportError: pass # handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def resource_exists(client, module, params): diff --git a/config_aggregator.py b/config_aggregator.py index c54357700a3..38271fc4542 100644 --- a/config_aggregator.py +++ b/config_aggregator.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: config_aggregator version_added: 1.0.0 @@ -71,12 +68,12 @@ type: dict required: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create cross-account aggregator community.aws.config_aggregator: name: test_config_rule @@ -87,9 +84,9 @@ - 0123456789 - 9012345678 all_aws_regions: true -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" try: @@ -97,9 +94,12 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict def resource_exists(client, module, params): diff --git a/config_delivery_channel.py b/config_delivery_channel.py index d97467502a2..2dd5fbc68d2 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_delivery_channel version_added: 1.0.0 @@ -49,12 +46,12 @@ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Delivery Channel for AWS Config community.aws.config_delivery_channel: name: test_delivery_channel @@ -62,9 +59,9 @@ s3_bucket: 'test_aws_config_bucket' sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' delivery_frequency: 'Twelve_Hours' -''' +""" -RETURN = '''#''' +RETURN = r"""#""" try: @@ -74,9 +71,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry # this waits for an IAM role to become fully available, at the cost of diff --git a/config_recorder.py b/config_recorder.py index 7d6ebae8ef1..b310787b72d 100644 --- a/config_recorder.py +++ b/config_recorder.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_recorder version_added: 1.0.0 @@ -62,12 +59,12 @@ - Before you can set this option, you must set I(all_supported=false). type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Configuration Recorder for AWS Config community.aws.config_recorder: name: test_configuration_recorder @@ -76,9 +73,9 @@ recording_group: all_supported: true include_global_types: true -''' +""" -RETURN = '''#''' +RETURN = r"""#""" try: @@ -88,9 +85,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def resource_exists(client, module, params): diff --git a/config_rule.py b/config_rule.py index aa1ff626a92..4fd3103931f 100644 --- a/config_rule.py +++ b/config_rule.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: config_rule version_added: 1.0.0 @@ -86,12 +83,12 @@ choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Config Rule for AWS Config community.aws.config_rule: name: test_config_rule @@ -104,9 +101,9 @@ owner: AWS identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' -''' +""" -RETURN = '''#''' +RETURN = r"""#""" try: @@ -116,9 +113,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def rule_exists(client, module, params): diff --git a/data_pipeline.py b/data_pipeline.py index a5b0e627a40..51068159507 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: data_pipeline version_added: 1.0.0 @@ -15,10 +12,6 @@ - Raghu Udiyar (@raags) - Sloane Hertel (@s-hertel) short_description: Create and manage AWS Datapipelines -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 description: - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) given to the datapipeline. @@ -126,9 +119,13 @@ type: dict default: {} aliases: ['resource_tags'] -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create pipeline @@ -178,9 +175,9 @@ region: us-west-2 state: absent -''' +""" -RETURN = r''' +RETURN = r""" changed: description: whether the data pipeline has been modified type: bool @@ -195,7 +192,7 @@ data_pipeline will be an empty dict. The msg describes the status of the operation. returned: always type: dict -''' +""" import hashlib import json @@ -209,8 +206,9 @@ from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py index 2705c50a6b5..25aeebb244c 100644 --- a/directconnect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -1,15 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: directconnect_confirm_connection short_description: Confirms the creation of a hosted DirectConnect connection @@ -21,10 +16,6 @@ The usage did not change. author: - "Matt Traynham (@mtraynham)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: description: @@ -36,9 +27,13 @@ - The ID of the Direct Connect connection. - One of I(connection_id) or I(name) must be specified. type: str -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # confirm a Direct Connect by name - name: confirm the connection id @@ -49,29 +44,32 @@ - name: confirm the connection id community.aws.directconnect_confirm_connection: connection_id: dxcon-xxxxxxxx -''' +""" -RETURN = ''' +RETURN = r""" connection_state: description: The state of the connection. returned: always type: str sample: pending -''' +""" import traceback try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} diff --git a/directconnect_connection.py b/directconnect_connection.py index 11ac14dfa22..709fef7a79f 100644 --- a/directconnect_connection.py +++ b/directconnect_connection.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: directconnect_connection version_added: 1.0.0 @@ -19,10 +17,6 @@ The usage did not change. author: - "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: state: description: @@ -68,9 +62,13 @@ - By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location). type: bool default: false -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # create a Direct Connect connection - community.aws.directconnect_connection: @@ -102,7 +100,7 @@ name: ansible-test-connection """ -RETURN = """ +RETURN = r""" connection: description: The attributes of the direct connect connection. type: complex @@ -158,18 +156,21 @@ import traceback try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} diff --git a/directconnect_gateway.py b/directconnect_gateway.py index 8ad4f9bf161..53fb47c9603 100644 --- a/directconnect_gateway.py +++ b/directconnect_gateway.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: directconnect_gateway author: - Gobin Sougrakpam (@gobins) @@ -19,10 +17,6 @@ - Detaches Virtual Gateways to Direct Connect Gateway. - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_gateway). The usage did not change. -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: state: description: @@ -54,9 +48,13 @@ - How long to wait for the association to be deleted. type: int default: 320 -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new direct connect gateway attached to virtual private gateway community.aws.directconnect_gateway: state: present @@ -71,9 +69,9 @@ name: my-dx-gateway amazon_asn: 7224 register: created_dxgw -''' +""" -RETURN = ''' +RETURN = r""" result: description: - The attributes of the Direct Connect Gateway @@ -95,7 +93,7 @@ owner_account: description: The AWS account ID of the owner of the direct connect gateway. type: str -''' +""" import time diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index 6b7ec8bdbe8..8a50e3c7e89 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: directconnect_link_aggregation_group version_added: 1.0.0 @@ -17,10 +15,6 @@ The usage did not change. author: - "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: state: description: @@ -81,9 +75,13 @@ - The duration in seconds to wait if I(wait=true). default: 120 type: int -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # create a Direct Connect connection - community.aws.directconnect_link_aggregation_group: @@ -93,7 +91,7 @@ bandwidth: 1Gbps """ -RETURN = """ +RETURN = r""" changed: type: str description: Whether or not the LAG has changed. @@ -173,13 +171,13 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry - from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def lag_status(client, lag_id): diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py index 88a8f5622a8..bded2ab57ab 100644 --- a/directconnect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: directconnect_virtual_interface version_added: 1.0.0 @@ -86,12 +84,12 @@ - The virtual interface ID. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -RETURN = r''' +RETURN = r""" address_family: description: The address family for the BGP peer. returned: always @@ -228,9 +226,9 @@ returned: always type: int sample: 100 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: create an association between a LAG and connection community.aws.directconnect_virtual_interface: @@ -245,22 +243,24 @@ connection_id: dxcon-XXXXXXXX virtual_interface_id: dxv-XXXXXXXX -''' +""" import traceback try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # handled by AnsibleAWSModule pass from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def try_except_ClientError(failure_msg): diff --git a/dms_endpoint.py b/dms_endpoint.py index 692fb25bd88..66b5dd9b2c3 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: dms_endpoint version_added: 1.0.0 @@ -143,13 +141,13 @@ author: - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details - name: Endpoint Creation community.aws.dms_endpoint: @@ -164,9 +162,9 @@ databasename: 'testdb' sslmode: none wait: false -''' +""" -RETURN = ''' +RETURN = r""" endpoint: description: - A description of the DMS endpoint. @@ -325,7 +323,7 @@ - Additional settings for Redis endpoints. type: dict returned: when the I(endpoint_type) is C(redshift) -''' +""" try: import botocore @@ -334,13 +332,15 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + backoff_params = dict(retries=5, delay=1, backoff=1.5) diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index 3fdbdc0a429..bd75df6d67b 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: dms_replication_subnet_group version_added: 1.0.0 @@ -43,29 +41,30 @@ author: - "Rui Moreira (@ruimoreira)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - community.aws.dms_replication_subnet_group: state: present identifier: "dev-sngroup" description: "Development Subnet Group asdasdas" subnet_ids: ['subnet-id1','subnet-id2'] -''' +""" -RETURN = ''' # ''' +RETURN = r""" # """ try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + backoff_params = dict(retries=5, delay=1, backoff=1.5) diff --git a/dynamodb_table.py b/dynamodb_table.py index 71b0e4ccc26..a059198d858 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: dynamodb_table version_added: 1.0.0 @@ -142,13 +140,13 @@ default: True type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create dynamo table with hash and range primary key community.aws.dynamodb_table: name: my-table @@ -197,9 +195,9 @@ name: my-table region: us-east-1 state: absent -''' +""" -RETURN = r''' +RETURN = r""" table: description: The returned table params from the describe API call. returned: success @@ -243,21 +241,22 @@ returned: success type: str sample: ACTIVE -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags DYNAMO_TYPE_DEFAULT = 'STRING' diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index ec7d6ed2f65..bd1c7789903 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: dynamodb_ttl version_added: 1.0.0 @@ -32,14 +30,15 @@ required: true type: str -author: Ted Timmons (@tedder) +author: +- Ted Timmons (@tedder) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: enable TTL on my cowfacts table community.aws.dynamodb_ttl: state: enable @@ -51,9 +50,9 @@ state: disable table_name: cowfacts attribute_name: cow_deleted_date -''' +""" -RETURN = ''' +RETURN = r""" current_status: description: current or new TTL specification. type: dict @@ -61,7 +60,7 @@ sample: - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" } - { "AttributeName": "deploy_timestamp", "Enabled": true } -''' +""" try: import botocore diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 665aeab8a25..5af897cdb80 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_ami_copy version_added: 1.0.0 @@ -72,12 +69,12 @@ - Amir Moulavi (@amir343) - Tim C (@defunctio) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic AMI Copy community.aws.ec2_ami_copy: source_region: us-east-1 @@ -125,26 +122,29 @@ source_image_id: ami-xxxxxxx encrypted: true kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b -''' +""" -RETURN = ''' +RETURN = r""" image_id: description: AMI ID of the copied AMI returned: always type: str sample: ami-e689729e -''' +""" try: - from botocore.exceptions import ClientError, WaiterError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass # caught by AnsibleAWSModule from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list def copy_image(module, ec2): diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index a4637f38659..b858f9c4ead 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -1,25 +1,24 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_customer_gateway version_added: 1.0.0 short_description: Manage an AWS customer gateway description: - - Manage an AWS customer gateway. -author: Michael Baydoun (@MichaelBaydoun) + - Manage an AWS customer gateway. +author: + - Michael Baydoun (@MichaelBaydoun) notes: - - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the - first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent - requests do not create new customer gateway resources. - - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use - customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. + - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the + first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent + requests do not create new customer gateway resources. + - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use + customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. options: bgp_asn: description: @@ -49,13 +48,12 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Customer Gateway community.aws.ec2_customer_gateway: bgp_asn: 12345 @@ -71,9 +69,9 @@ state: absent region: us-east-1 register: cgw -''' +""" -RETURN = ''' +RETURN = r""" gateway.customer_gateways: description: details about the gateway that was created. returned: success @@ -108,7 +106,7 @@ returned: when gateway exists and is available. sample: ipsec.1 type: str -''' +""" try: import botocore @@ -117,8 +115,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class Ec2CustomerGatewayManager: diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index ce576b7ae81..d0674f52b19 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_customer_gateway_info version_added: 1.0.0 short_description: Gather information about customer gateways in AWS description: - - Gather information about customer gateways in AWS. -author: Madhura Naniwadekar (@Madhura-CSI) + - Gather information about customer gateways in AWS. +author: + - Madhura Naniwadekar (@Madhura-CSI) options: filters: description: @@ -28,13 +27,12 @@ elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all customer gateways @@ -55,9 +53,9 @@ - 'cgw-48841a09' - 'cgw-fec021ce' register: cust_gw_info -''' +""" -RETURN = r''' +RETURN = r""" customer_gateways: description: List of one or more customer gateways. returned: always @@ -78,19 +76,22 @@ "type": "ipsec.1" } ] -''' +""" import json + try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) def date_handler(obj): diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 094a6afdfa0..17f345a2f7e 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_launch_template version_added: 1.0.0 @@ -16,10 +15,6 @@ - The M(amazon.aws.ec2_instance) and M(community.aws.autoscaling_group) modules can, instead of specifying all parameters on those tasks, be passed a Launch Template which contains settings like instance size, disk type, subnet, and more. -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 author: - Ryan Scott Brown (@ryansb) options: @@ -378,9 +373,13 @@ - Requires botocore >= 1.23.30 choices: [enabled, disabled] default: 'disabled' -''' +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an ec2 launch template community.aws.ec2_launch_template: name: "my_template" @@ -404,9 +403,9 @@ state: absent # This module does not yet allow deletion of specific versions of launch templates -''' +""" -RETURN = ''' +RETURN = r""" latest_version: description: Latest available version of the launch template returned: when state=present @@ -415,25 +414,29 @@ description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always. returned: when state=present type: int -''' +""" + import re from uuid import uuid4 +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters -try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError -except ImportError: - pass # caught by AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def determine_iam_role(module, name_or_arn): diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 4b4adc964e5..8687ded59d1 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -1,22 +1,21 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_placement_group version_added: 1.0.0 short_description: Create or delete an EC2 Placement Group description: - - Create an EC2 Placement Group; if the placement group already exists, - nothing is done. Or, delete an existing placement group. If the placement - group is absent, do nothing. See also - U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) -author: "Brad Macpherson (@iiibrad)" + - Create an EC2 Placement Group; if the placement group already exists, + nothing is done. Or, delete an existing placement group. If the placement + group is absent, do nothing. See also + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +author: + - "Brad Macpherson (@iiibrad)" options: name: description: @@ -45,12 +44,12 @@ choices: [ 'cluster', 'spread', 'partition' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide # for details. @@ -77,10 +76,10 @@ name: my-cluster state: absent -''' +""" -RETURN = ''' +RETURN = r""" placement_group: description: Placement group attributes returned: when state != absent @@ -99,16 +98,17 @@ type: str sample: "cluster" -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @AWSRetry.exponential_backoff() diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 4bcc9cfb4ae..bc9d717e49d 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_placement_group_info version_added: 1.0.0 short_description: List EC2 Placement Group(s) details description: - - List details of EC2 Placement Group(s). -author: "Brad Macpherson (@iiibrad)" + - List details of EC2 Placement Group(s). +author: + - "Brad Macpherson (@iiibrad)" options: names: description: @@ -24,13 +23,12 @@ required: false default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details or the AWS region, # see the AWS Guide for details. @@ -49,10 +47,10 @@ msg: > {{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }} -''' +""" -RETURN = r''' +RETURN = r""" placement_groups: description: Placement group attributes returned: always @@ -71,14 +69,16 @@ type: str sample: "cluster" -''' +""" -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule try: - from botocore.exceptions import (BotoCoreError, ClientError) + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def get_placement_groups_details(connection, module): names = module.params.get("names") diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 7b38b1ea29a..59d0582c048 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_snapshot_copy version_added: 1.0.0 @@ -57,12 +54,12 @@ author: - Deepak Kothandan (@Deepakkothandan) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic Snapshot Copy community.aws.ec2_snapshot_copy: source_region: eu-central-1 @@ -100,24 +97,25 @@ source_snapshot_id: snap-xxxxxxx encrypted: true kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b -''' +""" -RETURN = ''' +RETURN = r""" snapshot_id: description: snapshot id of the newly created snapshot returned: when snapshot copy is successful type: str sample: "snap-e9095e8c" -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def copy_snapshot(module, ec2): """ diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 158fcf929a8..832d16defc9 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_transit_gateway short_description: Create and delete AWS Transit Gateways version_added: 1.0.0 @@ -74,13 +72,13 @@ author: - "Bob Boldin (@BobBoldin)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new transit gateway using defaults community.aws.ec2_transit_gateway: state: present @@ -114,9 +112,9 @@ region: ap-southeast-2 transit_gateway_id: tgw-3a9aa123 register: deleted_tgw -''' +""" -RETURN = ''' +RETURN = r""" transit_gateway: description: The attributes of the transit gateway. type: complex @@ -210,20 +208,25 @@ returned: always type: str sample: tgw-3a9aa123 -''' +""" + +from time import sleep +from time import time try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by imported AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from time import sleep, time -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule class AnsibleEc2Tgw(object): diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index cae82e570cd..5053c8d65d8 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -1,19 +1,17 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_transit_gateway_info short_description: Gather information about ec2 transit gateways in AWS version_added: 1.0.0 description: - - Gather information about ec2 transit gateways in AWS -author: "Bob Boldin (@BobBoldin)" + - Gather information about ec2 transit gateways in AWS +author: + - "Bob Boldin (@BobBoldin)" options: transit_gateway_ids: description: @@ -29,13 +27,12 @@ type: dict default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather info about all transit gateways @@ -57,9 +54,9 @@ transit_gateway_ids: - tgw-02c42332e6b7da829 - tgw-03c53443d5a8cb716 -''' +""" -RETURN = r''' +RETURN = r""" transit_gateways: description: > Transit gateways that match the provided filters. Each element consists of a dict with all the information @@ -162,7 +159,7 @@ returned: always type: str sample: "tgw-02c42332e6b7da829" -''' +""" try: import botocore @@ -171,11 +168,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict class AnsibleEc2TgwInfo(object): diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py index 55267bc9185..2878fbf9129 100644 --- a/ec2_transit_gateway_vpc_attachment.py +++ b/ec2_transit_gateway_vpc_attachment.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_transit_gateway_vpc_attachment short_description: Create and delete AWS Transit Gateway VPC attachments version_added: 4.0.0 @@ -98,13 +96,13 @@ author: - "Mark Chappell (@tremble)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create a Transit Gateway attachment - community.aws.ec2_transit_gateway_vpc_attachment: state: present @@ -135,9 +133,9 @@ - community.aws.ec2_transit_gateway_vpc_attachment: state: absent id: 'tgw-attach-0c0c5fd0b0f01d1c9' -''' +""" -RETURN = ''' +RETURN = r""" transit_gateway_attachments: description: The attributes of the Transit Gateway attachments. type: list @@ -216,11 +214,9 @@ type: str returned: success example: '123456789012' -''' - +""" from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py index 88f57fefa1b..49c03ff432c 100644 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_transit_gateway_vpc_attachment_info short_description: describes AWS Transit Gateway VPC attachments version_added: 4.0.0 @@ -39,14 +37,15 @@ type: bool required: false default: false -author: "Mark Chappell (@tremble)" +author: + - "Mark Chappell (@tremble)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe a specific Transit Gateway attachment. - community.aws.ec2_transit_gateway_vpc_attachment_info: id: 'tgw-attach-0123456789abcdef0' @@ -60,9 +59,9 @@ - community.aws.ec2_transit_gateway_vpc_attachment_info: filters: transit-gateway-id: tgw-0fedcba9876543210' -''' +""" -RETURN = ''' +RETURN = r""" transit_gateway_attachments: description: The attributes of the Transit Gateway attachments. type: list @@ -141,11 +140,9 @@ type: str returned: success example: '123456789012' -''' - +""" from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index 09531892548..b6fb0b837f1 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_egress_igw version_added: 1.0.0 short_description: Manage an AWS VPC Egress Only Internet gateway description: - - Manage an AWS VPC Egress Only Internet gateway -author: Daniel Shepherd (@shepdelacreme) + - Manage an AWS VPC Egress Only Internet gateway +author: + - Daniel Shepherd (@shepdelacreme) options: vpc_id: description: @@ -27,13 +26,12 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. @@ -43,9 +41,9 @@ state: present register: eigw -''' +""" -RETURN = ''' +RETURN = r""" gateway_id: description: The ID of the Egress Only Internet Gateway or Null. returned: always @@ -56,7 +54,7 @@ returned: always type: str sample: vpc-012345678 -''' +""" try: import botocore @@ -65,9 +63,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def delete_eigw(module, connection, eigw_id): diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 3b10a0a66f9..fa34ccd8118 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_nacl short_description: create and delete Network ACLs version_added: 1.0.0 @@ -73,17 +71,18 @@ type: str choices: ['present', 'absent'] default: present -author: Mike Mochan (@mmochan) +author: + - Mike Mochan (@mmochan) +notes: + - Support for I(purge_tags) was added in release 4.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -notes: - - Support for I(purge_tags) was added in release 4.0.0. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Complete example to create and delete a network ACL # that allows SSH, HTTP and ICMP in, and all traffic out. @@ -141,8 +140,8 @@ community.aws.ec2_vpc_nacl: nacl_id: acl-33b4ee5b state: absent -''' -RETURN = r''' +""" +RETURN = r""" task: description: The result of the create, or delete action. returned: success @@ -152,18 +151,20 @@ returned: success type: str sample: acl-123456789abcdef01 -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58} diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 3684d0d397b..3d37cf26524 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -1,18 +1,18 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_nacl_info version_added: 1.0.0 short_description: Gather information about Network ACLs in an AWS VPC description: - - Gather information about Network ACLs in an AWS VPC -author: "Brad Davidson (@brandond)" + - Gather information about Network ACLs in an AWS VPC +author: + - "Brad Davidson (@brandond)" options: nacl_ids: description: @@ -34,12 +34,12 @@ - By default, the module will return all Network ACLs. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all Network ACLs: @@ -55,9 +55,9 @@ filters: 'default': 'true' register: default_nacls -''' +""" -RETURN = r''' +RETURN = r""" nacls: description: Returns an array of complex objects as described below. returned: success @@ -100,7 +100,7 @@ type: list elements: list sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]] -''' +""" try: import botocore @@ -109,11 +109,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict # VPC-supported IANA protocol numbers diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index 3c39f11dede..a5af559cc9d 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_vpc_peer short_description: create, delete, accept, and reject VPC peering connections between two VPCs. version_added: 1.0.0 @@ -57,13 +55,13 @@ author: - Mike Mochan (@mmochan) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Complete example to create and accept a local peering connection. - name: Create local account VPC peering Connection community.aws.ec2_vpc_peer: @@ -212,8 +210,8 @@ profile: bot03_profile_for_cross_account state: reject -''' -RETURN = ''' +""" +RETURN = r""" peering_id: description: The id of the VPC peering connection created/deleted. returned: always @@ -352,21 +350,23 @@ returned: success type: str example: "pcx-0123456789abcdef0" -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def wait_for_state(client, module, state, pcx_id): diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 2e257a31ffe..8faf64b8906 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_peering_info short_description: Retrieves AWS VPC Peering details using AWS methods. version_added: 1.0.0 @@ -25,15 +23,15 @@ for possible filters. type: dict default: {} -author: Karen Cheng (@Etherdaemon) +author: + - Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all VPC Peers - name: List all vpc peers community.aws.ec2_vpc_peering_info: @@ -58,9 +56,9 @@ filters: status-code: ['pending-acceptance'] register: pending_vpc_peers -''' +""" -RETURN = r''' +RETURN = r""" vpc_peering_connections: description: Details of the matching VPC peering connections. returned: success @@ -199,19 +197,21 @@ description: The result of the describe. returned: success type: list -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def get_vpc_peers(client, module): diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 990ad908acc..74aab4a077c 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: ec2_vpc_vgw short_description: Create and delete AWS VPN Virtual Gateways version_added: 1.0.0 @@ -55,13 +53,13 @@ author: - Nick Aslanidis (@naslanidis) extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new VGW attached to a specific VPC community.aws.ec2_vpc_vgw: state: present @@ -100,9 +98,9 @@ profile: personal vpn_gateway_id: vgw-3a9aa123 register: deleted_vgw -''' +""" -RETURN = ''' +RETURN = r""" vgw: description: A description of the VGW returned: success @@ -133,7 +131,7 @@ type: str returned: success example: vpc-123456789abcdef01 -''' +""" import time @@ -142,13 +140,14 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule # AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index dcddd69bc31..c729b2fa5b8 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_vgw_info version_added: 1.0.0 @@ -28,12 +26,12 @@ author: - "Nick Aslanidis (@naslanidis)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all virtual gateways for an account or profile @@ -56,9 +54,9 @@ profile: production vpn_gateway_ids: vgw-c432f6a7 register: vgw_info -''' +""" -RETURN = r''' +RETURN = r""" virtual_gateways: description: The virtual gateways for the account. returned: always @@ -121,7 +119,7 @@ type: dict returned: success example: {"MyKey": "MyValue"} -''' +""" try: import botocore @@ -130,9 +128,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def get_virtual_gateway_info(virtual_gateway): diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index d543cde00e7..b7e997fade0 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_vpn version_added: 1.0.0 @@ -14,11 +12,6 @@ description: - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters option or specifying the VPN connection identifier. -extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws - - amazon.aws.boto3 - - amazon.aws.tags author: - "Sloane Hertel (@s-hertel)" options: @@ -135,7 +128,12 @@ required: false type: int default: 15 -''' +extends_documentation_fragment: + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. @@ -293,19 +291,23 @@ vpn_connection_id: vpn-781e0e19 """ -from ansible.module_utils._text import to_text -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - try: - from botocore.exceptions import BotoCoreError, ClientError, WaiterError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class VPNConnectionException(Exception): def __init__(self, msg, exception=None): diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index ac9be556e23..5070af22266 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_vpn_info version_added: 1.0.0 short_description: Gather information about VPN Connections in AWS. description: - - Gather information about VPN Connections in AWS. -author: Madhura Naniwadekar (@Madhura-CSI) + - Gather information about VPN Connections in AWS. +author: + - Madhura Naniwadekar (@Madhura-CSI) options: filters: description: @@ -30,13 +29,12 @@ elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all vpn connections community.aws.ec2_vpc_vpn_info: @@ -52,9 +50,9 @@ filters: vpn-gateway-id: vgw-cbe66beb register: vpn_conn_info -''' +""" -RETURN = r''' +RETURN = r""" vpn_connections: description: List of one or more VPN Connections. returned: always @@ -158,19 +156,22 @@ returned: always type: str sample: vgw-cbe56bfb -''' +""" import json + try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - ) def date_handler(obj): diff --git a/ec2_win_password.py b/ec2_win_password.py index 10d33658f88..aec9940cd30 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_win_password version_added: 1.0.0 short_description: Gets the default administrator password for EC2 Windows instances description: - - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). -author: "Rick Mendes (@rickmendes)" + - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). +author: + - "Rick Mendes (@rickmendes)" options: instance_id: description: @@ -48,16 +47,18 @@ default: 120 type: int +requirements: + - cryptography + extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -requirements: -- cryptography -''' +RETURN = r""" # """ -EXAMPLES = ''' +EXAMPLES = r""" # Example of getting a password - name: get the Administrator password community.aws.ec2_win_password: @@ -92,7 +93,7 @@ key_file: "~/aws-creds/my_test_key.pem" wait: true wait_timeout: 45 -''' +""" import datetime import time @@ -102,6 +103,7 @@ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 from cryptography.hazmat.primitives.serialization import load_pem_private_key + HAS_CRYPTOGRAPHY = True except ImportError: HAS_CRYPTOGRAPHY = False @@ -113,8 +115,9 @@ from ansible.module_utils._text import to_bytes +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def setup_module_object(): diff --git a/ecs_attribute.py b/ecs_attribute.py index a942228e305..c6931fc4f53 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_attribute version_added: 1.0.0 short_description: manage ecs attributes description: - - Create, update or delete ECS container instance attributes. -author: Andrej Svenke (@anryko) + - Create, update or delete ECS container instance attributes. +author: + - Andrej Svenke (@anryko) options: cluster: description: @@ -54,13 +53,12 @@ required: true type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Set attributes @@ -82,9 +80,9 @@ - flavor: test - migrated delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" attributes: description: attributes type: complex @@ -108,11 +106,12 @@ description: value of the attribute returned: if present type: str -''' +""" try: import botocore - from botocore.exceptions import ClientError, EndpointConnectionError + from botocore.exceptions import ClientError + from botocore.exceptions import EndpointConnectionError except ImportError: pass # Handled by AnsibleAWSModule diff --git a/ecs_cluster.py b/ecs_cluster.py index 8b64a14abbd..7520cd0abc9 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -1,22 +1,21 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ecs_cluster version_added: 1.0.0 short_description: Create or terminate ECS clusters. notes: - - When deleting a cluster, the information returned is the state of the cluster prior to deletion. - - It will also wait for a cluster to have instances registered to it. + - When deleting a cluster, the information returned is the state of the cluster prior to deletion. + - It will also wait for a cluster to have instances registered to it. description: - - Creates or terminates ecs clusters. -author: Mark Chance (@Java1Guy) + - Creates or terminates ecs clusters. +author: + - Mark Chance (@Java1Guy) options: state: description: @@ -78,13 +77,12 @@ type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Cluster creation @@ -120,8 +118,8 @@ repeat: 10 register: task_output -''' -RETURN = ''' +""" +RETURN = r""" activeServicesCount: description: how many services are active in this cluster returned: 0 if a new cluster @@ -163,7 +161,7 @@ returned: always type: str sample: ACTIVE -''' +""" import time @@ -172,10 +170,11 @@ except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class EcsClusterManager: """Handles ECS Clusters""" diff --git a/ecs_ecr.py b/ecs_ecr.py index fd335928e0a..4b5ce1ebe75 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -1,15 +1,10 @@ #!/usr/bin/python -# -*- coding: utf-8 -* +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ecs_ecr version_added: 1.0.0 @@ -104,15 +99,14 @@ type: dict version_added: 5.2.0 author: - - David M. Lee (@leedm777) + - David M. Lee (@leedm777) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # If the repository does not exist, it is created. If it does exist, would not # affect any policies already on it. - name: ecr-repo @@ -186,9 +180,9 @@ encryption_configuration: encryption_type: KMS kms_key: custom-kms-key-alias -''' +""" -RETURN = ''' +RETURN = r""" state: type: str description: The asserted state of the repository (present, absent) @@ -216,7 +210,7 @@ repositoryArn: arn:aws:ecr:us-east-1:123456789012:repository/ecr-test-1484664090 repositoryName: ecr-test-1484664090 repositoryUri: 123456789012.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090 -''' +""" import json import traceback @@ -229,10 +223,11 @@ from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies def build_kwargs(registry_id): diff --git a/ecs_service.py b/ecs_service.py index 15c74b92c50..074dec4b176 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_service version_added: 1.0.0 @@ -290,12 +289,12 @@ required: false version_added: 4.1.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example - community.aws.ecs_service: @@ -377,9 +376,9 @@ Firstname: jane lastName: doe propagate_tags: SERVICE -''' +""" -RETURN = r''' +RETURN = r""" service: description: Details of created service. returned: when creating a service @@ -671,9 +670,25 @@ returned: always type: str -''' +""" + import time +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import map_complex_type + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + DEPLOYMENT_CONTROLLER_TYPE_MAP = { 'type': 'str', } @@ -684,19 +699,6 @@ 'deployment_circuit_breaker': 'dict', } -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - class EcsServiceManager: """Handles ECS Services""" diff --git a/ecs_service_info.py b/ecs_service_info.py index fe651444cea..41dd999c9c1 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_service_info version_added: 1.0.0 @@ -42,13 +40,12 @@ elements: str aliases: ['name'] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic listing example @@ -62,9 +59,9 @@ - community.aws.ecs_service_info: cluster: test-cluster register: output -''' +""" -RETURN = r''' +RETURN = r""" services: description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below. returned: success @@ -132,16 +129,17 @@ returned: when events is true type: list elements: dict -''' # NOQA +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class EcsServiceManager: diff --git a/ecs_tag.py b/ecs_tag.py index 9f25881d207..7aac8dfb4a7 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -1,20 +1,17 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: (c) 2019, Michael Pechner # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_tag version_added: 1.0.0 short_description: create and remove tags on Amazon ECS resources -notes: - - none description: - - Creates and removes tags for Amazon ECS resources. - - Resources are referenced by their cluster name. + - Creates and removes tags for Amazon ECS resources. + - Resources are referenced by their cluster name. author: - Michael Pechner (@mpechner) options: @@ -53,13 +50,12 @@ type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure tags are present on a resource community.aws.ecs_tag: cluster_name: mycluster @@ -93,9 +89,9 @@ Name: foo state: absent purge_tags: true -''' +""" -RETURN = r''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always @@ -108,16 +104,19 @@ description: A dict of tags that were removed from the resource returned: If tags were removed type: dict -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: - pass # Handled by AnsibleAWSModule -__metaclass__ = type + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def get_tags(ecs, module, resource): diff --git a/ecs_task.py b/ecs_task.py index ebc872ba959..6c693b317bc 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_task version_added: 1.0.0 @@ -99,13 +97,12 @@ default: false version_added: 4.1.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of run task - name: Run task community.aws.ecs_task: @@ -177,8 +174,9 @@ cluster: console-sample-app-static-cluster task_definition: console-sample-app-static-taskdef task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" -''' -RETURN = r''' +""" + +RETURN = r""" task: description: details about the task that was started returned: success @@ -242,16 +240,18 @@ description: The launch type on which to run your task. returned: always type: str -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class EcsExecManager: """Handles ECS Tasks""" diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 3eb7716f503..16fcab712c4 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ecs_taskdefinition version_added: 1.0.0 @@ -630,12 +628,12 @@ description: A cluster query language expression to apply to the constraint. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create task definition community.aws.ecs_taskdefinition: containers: @@ -769,22 +767,23 @@ startPeriod: 15 timeout: 15 state: present -''' -RETURN = r''' +""" + +RETURN = r""" taskdefinition: description: a reflection of the input parameters type: dict returned: always -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class EcsTaskManager: diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index d57214cf419..b619cd4c4be 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ecs_taskdefinition_info version_added: 1.0.0 @@ -27,20 +25,19 @@ required: true type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - community.aws.ecs_taskdefinition_info: task_definition: test-td -''' +""" -RETURN = ''' +RETURN = r""" container_definitions: description: Returns a list of complex objects representing the containers returned: success @@ -348,16 +345,17 @@ description: A cluster query language expression to apply to the constraint. returned: when present type: str -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): argument_spec = dict( diff --git a/efs.py b/efs.py index 7223c99f3a4..c3d1bc6830a 100644 --- a/efs.py +++ b/efs.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: efs version_added: 1.0.0 @@ -102,14 +100,13 @@ version_added: 2.1.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: EFS provisioning community.aws.efs: state: present @@ -153,9 +150,9 @@ community.aws.efs: state: absent name: myTestEFS -''' +""" -RETURN = r''' +RETURN = r""" creation_time: description: timestamp of creation date returned: always @@ -245,7 +242,7 @@ "key": "Value" } -''' +""" from time import sleep from time import time as timestamp @@ -257,11 +254,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags def _index_by_key(key, items): diff --git a/efs_info.py b/efs_info.py index 634ff2a4273..e73042555bb 100644 --- a/efs_info.py +++ b/efs_info.py @@ -1,21 +1,19 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: efs_info version_added: 1.0.0 short_description: Get information about Amazon EFS file systems description: - - This module can be used to search Amazon EFS file systems. - Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! +- This module can be used to search Amazon EFS file systems. + Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! author: - - "Ryan Sydnor (@ryansydnor)" +- "Ryan Sydnor (@ryansydnor)" options: name: description: @@ -39,13 +37,12 @@ elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Find all existing efs community.aws.efs_info: register: result @@ -66,9 +63,9 @@ - ansible.builtin.debug: msg: "{{ result['efs'] }}" -''' +""" -RETURN = r''' +RETURN = r""" creation_time: description: timestamp of creation date returned: always @@ -168,7 +165,7 @@ "key": "Value" } -''' +""" from collections import defaultdict @@ -180,9 +177,11 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict class EFSConnection(object): diff --git a/efs_tag.py b/efs_tag.py index bc99d110635..10978c5bf2f 100644 --- a/efs_tag.py +++ b/efs_tag.py @@ -1,21 +1,17 @@ #!/usr/bin/python -""" -Copyright: (c) 2021, Milan Zink -GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -""" - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Milan Zink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: efs_tag version_added: 2.0.0 short_description: create and remove tags on Amazon EFS resources description: - - Creates and removes tags for Amazon EFS resources. - - Resources are referenced by their ID (filesystem or filesystem access point). + - Creates and removes tags for Amazon EFS resources. + - Resources are referenced by their ID (filesystem or filesystem access point). author: - Milan Zink (@zeten30) options: @@ -44,13 +40,12 @@ type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure tags are present on a resource community.aws.efs_tag: resource: fs-123456ab @@ -80,9 +75,9 @@ state: absent tags: {} purge_tags: true -''' +""" -RETURN = r''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always @@ -95,17 +90,23 @@ description: A dict of tags that were removed from the resource returned: If tags were removed type: dict -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # Handled by AnsibleAWSModule pass -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags, AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing WAIT_RETRY = 5 # how many seconds to wait between propagation status polls diff --git a/eks_cluster.py b/eks_cluster.py index 73467d7322b..699c74bdb5b 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: eks_cluster version_added: 1.0.0 @@ -63,13 +61,12 @@ default: 1200 type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS cluster @@ -89,9 +86,9 @@ name: my_cluster wait: true state: absent -''' +""" -RETURN = r''' +RETURN = r""" arn: description: ARN of the EKS cluster returned: when state is present @@ -163,19 +160,21 @@ returned: when state is present type: str sample: '1.10' -''' - - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +""" try: - import botocore.exceptions + import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def ensure_present(client, module): name = module.params.get('name') diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index c54d67aec02..abc4dd09f90 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: eks_fargate_profile version_added: 4.0.0 @@ -68,14 +66,13 @@ default: 1200 type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags + - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an EKS Fargate Profile @@ -98,9 +95,9 @@ cluster_name: test_cluster wait: true state: absent -''' +""" -RETURN = r''' +RETURN = r""" fargate_profile_name: description: Name of Fargate Profile. returned: when state is present @@ -164,19 +161,21 @@ sample: - CREATING - ACTIVE -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +""" try: - import botocore.exceptions + import botocore except ImportError: pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def validate_tags(client, module, fargate_profile): changed = False diff --git a/eks_nodegroup.py b/eks_nodegroup.py index 5ba33128a66..8528cc8a488 100644 --- a/eks_nodegroup.py +++ b/eks_nodegroup.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: eks_nodegroup version_added: 5.3.0 @@ -169,12 +167,11 @@ default: 1200 type: int extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create nodegroup @@ -207,9 +204,9 @@ cluster_name: test_cluster wait: yes state: absent -''' +""" -RETURN = r''' +RETURN = r""" nodegroup_name: description: The name associated with an Amazon EKS managed node group. returned: when state is present @@ -345,18 +342,22 @@ type: dict sample: foo: bar -''' - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +""" try: - import botocore.exceptions + import botocore except ImportError: pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def validate_tags(client, module, nodegroup): changed = False diff --git a/elasticache.py b/elasticache.py index eeabcfe76cf..067134725d7 100644 --- a/elasticache.py +++ b/elasticache.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elasticache version_added: 1.0.0 @@ -15,7 +12,8 @@ description: - Manage cache clusters in Amazon ElastiCache. - Returns information about the specified cache cluster. -author: "Jim Dalton (@jsdalton)" +author: + - "Jim Dalton (@jsdalton)" options: state: description: @@ -97,11 +95,12 @@ - Defaults to C(false). type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. @@ -130,8 +129,8 @@ community.aws.elasticache: name: "test-please-delete" state: rebooted - """ + from time import sleep try: @@ -139,12 +138,12 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -class ElastiCacheManager(object): +class ElastiCacheManager(): """Handles elasticache creation and destruction""" @@ -153,7 +152,7 @@ class ElastiCacheManager(object): def __init__(self, module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, cache_parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs): + hard_modify): self.module = module self.name = name self.engine = engine.lower() @@ -169,9 +168,6 @@ def __init__(self, module, name, engine, cache_engine_version, node_type, self.wait = wait self.hard_modify = hard_modify - self.region = region - self.aws_connect_kwargs = aws_connect_kwargs - self.changed = False self.data = None self.status = 'gone' @@ -500,8 +496,6 @@ def main(): argument_spec=argument_spec, ) - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) - name = module.params['name'] state = module.params['state'] engine = module.params['engine'] @@ -530,7 +524,7 @@ def main(): cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs) + hard_modify) if state == 'present': elasticache_manager.ensure_present() diff --git a/elasticache_info.py b/elasticache_info.py index 3aa7a4317c7..02b18ee4778 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: elasticache_info short_description: Retrieve information for AWS ElastiCache clusters version_added: 1.0.0 @@ -20,21 +18,21 @@ author: - Will Thames (@willthames) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: obtain all ElastiCache information community.aws.elasticache_info: - name: obtain all information for a single ElastiCache cluster community.aws.elasticache_info: name: test_elasticache -''' +""" -RETURN = ''' +RETURN = r""" elasticache_clusters: description: List of ElastiCache clusters. returned: always @@ -402,20 +400,21 @@ sample: Application: web Environment: test -''' - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict - +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + @AWSRetry.exponential_backoff() def describe_cache_clusters_with_backoff(client, cluster_id=None): diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index fe8cc08fc00..04a789bd59f 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elasticache_parameter_group version_added: 1.0.0 @@ -14,11 +12,8 @@ description: - Manage cache security groups in Amazon ElastiCache. - Returns information about the specified cache cluster. -author: "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 +author: + - "Sloane Hertel (@s-hertel)" options: group_family: @@ -47,9 +42,14 @@ description: - A user-specified dictionary of parameters to reset or modify for the cache parameter group. type: dict -''' -EXAMPLES = """ +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. --- @@ -79,7 +79,7 @@ state: 'absent' """ -RETURN = """ +RETURN = r""" elasticache: description: cache parameter group information and response metadata returned: always diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 9ae5be427ca..e477fc86aa5 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elasticache_snapshot version_added: 1.0.0 @@ -14,11 +12,8 @@ description: - Manage cache snapshots in Amazon ElastiCache. - Returns information about the specified snapshot. -author: "Sloane Hertel (@s-hertel)" -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 +author: + - "Sloane Hertel (@s-hertel)" options: name: description: @@ -47,9 +42,13 @@ description: - The s3 bucket to which the snapshot is exported. type: str -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. @@ -61,7 +60,7 @@ replication_id: '{{ replication }}' """ -RETURN = """ +RETURN = r""" response_metadata: description: response metadata about the snapshot returned: always @@ -117,8 +116,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def create(module, connection, replication_id, cluster_id, name): diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 802d8c0949e..6353c72837b 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -1,18 +1,16 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elasticache_subnet_group version_added: 1.0.0 short_description: manage ElastiCache subnet groups description: - - Creates, modifies, and deletes ElastiCache subnet groups. + - Creates, modifies, and deletes ElastiCache subnet groups. options: state: description: @@ -40,12 +38,12 @@ author: - "Tim Mahoney (@timmahoney)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or change a subnet group community.aws.elasticache_subnet_group: state: present @@ -59,9 +57,9 @@ community.aws.elasticache_subnet_group: state: absent name: norwegian-blue -''' +""" -RETURN = r''' +RETURN = r""" cache_subnet_group: description: Description of the Elasticache Subnet Group. returned: always @@ -95,7 +93,7 @@ sample: - subnet-aaaaaaaa - subnet-bbbbbbbb -''' +""" try: import botocore @@ -104,9 +102,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def get_subnet_group(name): diff --git a/elasticbeanstalk_app.py b/elasticbeanstalk_app.py index 46529276997..c6333379456 100644 --- a/elasticbeanstalk_app.py +++ b/elasticbeanstalk_app.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elasticbeanstalk_app version_added: 1.0.0 @@ -43,12 +41,12 @@ - Harpreet Singh (@hsingh) - Stephen Granger (@viper233) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create or update an application - community.aws.elasticbeanstalk_app: app_name: Sample_App @@ -60,9 +58,9 @@ app_name: Sample_App state: absent -''' +""" -RETURN = ''' +RETURN = r""" app: description: Beanstalk application. returned: always @@ -83,15 +81,16 @@ returned: in check mode type: str sample: App is up-to-date -''' +""" try: import botocore except ImportError: pass # handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message def describe_app(ebs, app_name, module): diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 9298085e28f..3d3d43d4e71 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -1,29 +1,16 @@ #!/usr/bin/python -# -# This is a free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This Ansible library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this library. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: elb_classic_lb_info version_added: 1.0.0 short_description: Gather information about EC2 Elastic Load Balancers in AWS description: - - Gather information about EC2 Elastic Load Balancers in AWS + - Gather information about EC2 Elastic Load Balancers in AWS author: - "Michael Schultz (@mjschultz)" - "Fernando Jose Pando (@nand0p)" @@ -35,12 +22,12 @@ elements: str default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Output format tries to match amazon.aws.ec2_elb_lb module input parameters @@ -71,9 +58,9 @@ msg: "{{ item.dns_name }}" loop: "{{ elb_info.elbs }}" -''' +""" -RETURN = r''' +RETURN = r""" elbs: description: a list of load balancers returned: always @@ -137,21 +124,22 @@ - subnet-XXXXXXXX tags: {} vpc_id: vpc-c248fda4 -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - camel_dict_to_snake_dict, - boto3_tag_list_to_ansible_dict -) +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + MAX_AWS_RETRIES = 5 MAX_AWS_DELAY = 5 diff --git a/elb_instance.py b/elb_instance.py index c09ae0429bb..fe1128c9661 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_instance version_added: 1.0.0 @@ -15,7 +13,8 @@ - This module de-registers or registers an AWS EC2 instance from the ELBs that it belongs to. - Will be marked changed when called only if there are ELBs found to operate on. -author: "John Jarvis (@jarv)" +author: + - "John Jarvis (@jarv)" options: state: description: @@ -55,13 +54,13 @@ default: 0 type: int notes: -- The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release - 4.0.0 is no longer set. + - The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release + 4.0.0 is no longer set. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" EXAMPLES = r""" # basic pre_task and post_task example @@ -83,22 +82,23 @@ delegate_to: localhost """ -RETURN = ''' +RETURN = r""" updated_elbs: description: A list of ELB names that the instance has been added to or removed from. returned: always type: list elements: str -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class ElbManager: diff --git a/elb_network_lb.py b/elb_network_lb.py index 44025cccb94..069882dc90c 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_network_lb version_added: 1.0.0 @@ -126,17 +123,17 @@ - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. choices: [ 'ipv4', 'dualstack' ] type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create an ELB and attach a listener @@ -187,9 +184,9 @@ name: myelb state: absent -''' +""" -RETURN = r''' +RETURN = r""" load_balancer: description: A representation of the Network Load Balancer returned: when state is present @@ -328,11 +325,17 @@ returned: when state is present type: str sample: vpc-0011223344 -''' +""" + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListener +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListeners +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener def create_or_update_elb(elb_obj): diff --git a/elb_target.py b/elb_target.py index 133419a1757..fd05cd67a3a 100644 --- a/elb_target.py +++ b/elb_target.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elb_target version_added: 1.0.0 short_description: Manage a target in a target group description: - - Used to register or deregister a target in a target group. -author: "Rob White (@wimnat)" + - Used to register or deregister a target in a target group. +author: + - "Rob White (@wimnat)" options: deregister_unused: description: @@ -68,16 +67,17 @@ required: true choices: [ 'present', 'absent' ] type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 notes: - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it. -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Register an IP address target to a target group @@ -106,13 +106,14 @@ target_port: 8080 state: present -''' +""" -RETURN = ''' +RETURN = r""" -''' +""" -from time import time, sleep +from time import sleep +from time import time try: import botocore @@ -121,8 +122,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) diff --git a/elb_target_group.py b/elb_target_group.py index 18f9ca5e46b..784fa143a4f 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_target_group version_added: 1.0.0 @@ -204,17 +202,18 @@ - The time to wait for the target group. default: 200 type: int -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags notes: - Once a target group has been created, only its health check can then be modified using subsequent calls -''' -EXAMPLES = r''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a target group with a default health check @@ -324,9 +323,9 @@ targets: - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function -''' +""" -RETURN = r''' +RETURN = r""" deregistration_delay_timeout_seconds: description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. returned: when state present @@ -437,7 +436,7 @@ returned: when state present type: str sample: vpc-0123456 -''' +""" import time @@ -448,12 +447,13 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags def get_tg_attributes(connection, module, tg_arn): diff --git a/elb_target_group_info.py b/elb_target_group_info.py index d6a73d3307f..d58c2f248f5 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_target_group_info version_added: 1.0.0 short_description: Gather information about ELB target groups in AWS description: - - Gather information about ELB target groups in AWS -author: Rob White (@wimnat) + - Gather information about ELB target groups in AWS +author: + - Rob White (@wimnat) options: load_balancer_arn: description: @@ -40,13 +39,12 @@ type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all target groups @@ -62,9 +60,9 @@ - tg1 - tg2 -''' +""" -RETURN = r''' +RETURN = r""" target_groups: description: a list of target groups returned: always @@ -204,7 +202,7 @@ returned: always type: str sample: vpc-0123456 -''' +""" try: import botocore @@ -213,9 +211,11 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict @AWSRetry.jittered_backoff(retries=10) diff --git a/elb_target_info.py b/elb_target_info.py index 92ab33ba945..393e290e51b 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -1,10 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Yaakov Kuperman # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elb_target_info version_added: 1.0.0 @@ -12,8 +12,8 @@ description: - This module will search through every target group in a region to find which ones have registered a given instance ID or IP. - -author: "Yaakov Kuperman (@yaakov-github)" +author: + - "Yaakov Kuperman (@yaakov-github)" options: instance_id: description: @@ -25,14 +25,14 @@ - Whether or not to get target groups not used by any load balancers. type: bool default: true -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" # practical use case - dynamically de-registering and re-registering nodes - name: Get EC2 Metadata @@ -127,7 +127,7 @@ """ -RETURN = """ +RETURN = r""" instance_target_groups: description: a list of target groups to which the instance is registered to returned: always @@ -204,16 +204,18 @@ type: str """ -__metaclass__ = type - try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: # we can handle the lack of boto3 based on the ec2 module pass +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry class Target(object): diff --git a/glue_connection.py b/glue_connection.py index 2e01b6fed32..e9a6b306dac 100644 --- a/glue_connection.py +++ b/glue_connection.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: glue_connection version_added: 1.0.0 @@ -72,12 +70,12 @@ - Required when I(connection_type=NETWORK). type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue connection @@ -106,9 +104,9 @@ - community.aws.glue_connection: name: my-glue-connection state: absent -''' +""" -RETURN = r''' +RETURN = r""" connection_properties: description: - (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection. @@ -157,11 +155,11 @@ returned: when state is present type: dict sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'} -''' +""" -# Non-ansible imports import copy import time + try: import botocore except ImportError: @@ -169,10 +167,11 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _get_glue_connection(connection, module): diff --git a/glue_crawler.py b/glue_crawler.py index d5cdc04d6d5..04c6cd3eb52 100644 --- a/glue_crawler.py +++ b/glue_crawler.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: glue_crawler version_added: 4.1.0 @@ -77,13 +75,13 @@ - Required when I(state=present). type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue crawler @@ -109,9 +107,9 @@ - community.aws.glue_crawler: name: my-glue-crawler state: absent -''' +""" -RETURN = r''' +RETURN = r""" creation_time: description: The time and date that this crawler definition was created. returned: when state is present @@ -198,7 +196,7 @@ description: List of catalog targets. returned: when state is present type: list -''' +""" try: import botocore @@ -208,11 +206,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _get_glue_crawler(connection, module, glue_crawler_name): diff --git a/glue_job.py b/glue_job.py index ea6e79180fc..6e979f28a9d 100644 --- a/glue_job.py +++ b/glue_job.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Rob White (@wimnat) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: glue_job version_added: 1.0.0 @@ -103,13 +101,13 @@ notes: - Support for I(tags) and I(purge_tags) was added in release 2.2.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an AWS Glue job @@ -126,9 +124,9 @@ - community.aws.glue_job: name: my-glue-job state: absent -''' +""" -RETURN = r''' +RETURN = r""" allocated_capacity: description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power @@ -223,10 +221,10 @@ returned: when state is present type: int sample: 300 -''' +""" -# Non-ansible imports import copy + try: import botocore except ImportError: @@ -234,11 +232,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _get_glue_job(connection, module, glue_job_name): diff --git a/iam_access_key.py b/iam_access_key.py index 32220a216e3..af472fbe8c6 100644 --- a/iam_access_key.py +++ b/iam_access_key.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2021 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iam_access_key version_added: 2.1.0 short_description: Manage AWS IAM User access keys description: - Manage AWS IAM user access keys. -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) options: user_name: description: @@ -54,12 +53,12 @@ default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a new access key @@ -72,9 +71,9 @@ user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent -''' +""" -RETURN = r''' +RETURN = r""" access_key: description: A dictionary containing all the access key information. returned: When the key exists. @@ -117,7 +116,7 @@ returned: When a key was deleted during the rotation of access keys type: str sample: AKIA1EXAMPLE1EXAMPLE -''' +""" try: import botocore @@ -126,11 +125,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def delete_access_key(access_keys, user, access_key_id): diff --git a/iam_access_key_info.py b/iam_access_key_info.py index 9d7363b420a..6573e657a18 100644 --- a/iam_access_key_info.py +++ b/iam_access_key_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2021 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iam_access_key_info version_added: 2.1.0 @@ -14,7 +12,8 @@ description: - 'Fetches information AWS IAM user access keys.' - 'Note: It is not possible to fetch the secret access key.' -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) options: user_name: description: @@ -24,20 +23,20 @@ aliases: ['username'] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Fetch Access keys for a user community.aws.iam_access_key_info: user_name: example_user -''' +""" -RETURN = r''' +RETURN = r""" access_key: description: A dictionary containing all the access key information. returned: When the key exists. @@ -67,7 +66,7 @@ returned: success type: str sample: Inactive -''' +""" try: import botocore @@ -76,9 +75,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def get_access_keys(user): diff --git a/iam_group.py b/iam_group.py index cedf41613eb..9dc43ec0a94 100644 --- a/iam_group.py +++ b/iam_group.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: iam_group version_added: 1.0.0 @@ -26,8 +12,8 @@ description: - Manage AWS IAM groups. author: -- Nick Aslanidis (@naslanidis) -- Maksym Postument (@infectsoldier) + - Nick Aslanidis (@naslanidis) + - Maksym Postument (@infectsoldier) options: name: description: @@ -70,12 +56,12 @@ default: false type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a group @@ -119,8 +105,8 @@ name: testgroup1 state: absent -''' -RETURN = r''' +""" +RETURN = r""" iam_group: description: dictionary containing all the group information including group membership returned: success @@ -176,7 +162,7 @@ description: the path to the user type: str sample: / -''' +""" try: import botocore @@ -185,9 +171,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def compare_attached_group_policies(current_attached_policies, new_attached_policies): diff --git a/iam_managed_policy.py b/iam_managed_policy.py index eabf03b23d7..f590fcf9d64 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -1,18 +1,16 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iam_managed_policy version_added: 1.0.0 short_description: Manage User Managed IAM policies description: - - Allows creating and removing managed IAM policies + - Allows creating and removing managed IAM policies options: policy_name: description: @@ -45,14 +43,15 @@ choices: [ "present", "absent" ] type: str -author: "Dan Kozlowski (@dkhenry)" +author: + - "Dan Kozlowski (@dkhenry)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create a policy - name: Create IAM Managed Policy community.aws.iam_managed_policy: @@ -102,9 +101,9 @@ community.aws.iam_managed_policy: policy_name: "ManagedPolicy" state: absent -''' +""" -RETURN = r''' +RETURN = r""" policy: description: Returns the policy json structure, when state == absent this will return the value of the removed policy. returned: success @@ -121,7 +120,7 @@ "policy_name": "AdministratorAccess", "update_date": "2017-03-01T15:42:55.981000+00:00" }' -''' +""" import json @@ -133,10 +132,11 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index 8b78eee02f9..c0c434a9be9 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -1,31 +1,30 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_mfa_device_info version_added: 1.0.0 short_description: List the MFA (Multi-Factor Authentication) devices registered for a user description: - - List the MFA (Multi-Factor Authentication) devices registered for a user -author: Victor Costan (@pwnall) + - List the MFA (Multi-Factor Authentication) devices registered for a user +author: + - Victor Costan (@pwnall) options: user_name: description: - The name of the user whose MFA devices will be listed type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -RETURN = """ +RETURN = r""" mfa_devices: description: The MFA devices registered for the given user returned: always @@ -39,7 +38,7 @@ user_name: example """ -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html @@ -54,7 +53,7 @@ role_arn: "arn:aws:iam::123456789012:role/someRole" role_session_name: "someRoleSession" register: assumed_role -''' +""" try: import botocore diff --git a/iam_password_policy.py b/iam_password_policy.py index 00b4f8872c0..a980511c2fa 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -1,21 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Aaron Smith # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_password_policy version_added: 1.0.0 short_description: Update an IAM Password Policy description: - - Module updates an IAM Password Policy on a given AWS account + - Module updates an IAM Password Policy on a given AWS account author: - - "Aaron Smith (@slapula)" + - "Aaron Smith (@slapula)" options: state: description: @@ -75,13 +72,12 @@ type: bool aliases: [password_expire, expire] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Password policy for AWS account community.aws.iam_password_policy: state: present @@ -94,9 +90,9 @@ pw_max_age: 60 pw_reuse_prevent: 5 pw_expire: false -''' +""" -RETURN = ''' # ''' +RETURN = r""" # """ try: import botocore @@ -105,8 +101,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code class IAMConnection(object): diff --git a/iam_role.py b/iam_role.py index 255b4cb7964..07463cd9736 100644 --- a/iam_role.py +++ b/iam_role.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iam_role version_added: 1.0.0 @@ -91,13 +90,13 @@ default: True type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a role with description and tags @@ -127,8 +126,8 @@ assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" state: absent -''' -RETURN = r''' +""" +RETURN = r""" iam_role: description: dictionary containing the IAM Role data returned: success @@ -214,7 +213,7 @@ type: dict returned: always sample: '{"Env": "Prod"}' -''' +""" import json @@ -225,13 +224,14 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies @AWSRetry.jittered_backoff() diff --git a/iam_role_info.py b/iam_role_info.py index 23da3e04097..e3bdb7695bf 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_role_info version_added: 1.0.0 @@ -29,13 +27,12 @@ - Mutually exclusive with I(name). type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: find all existing IAM roles community.aws.iam_role_info: register: result @@ -47,9 +44,9 @@ - name: describe all roles matching a path prefix community.aws.iam_role_info: path_prefix: /application/path -''' +""" -RETURN = ''' +RETURN = r""" iam_roles: description: List of IAM roles returned: always @@ -153,7 +150,7 @@ type: dict returned: always sample: '{"Env": "Prod"}' -''' +""" try: import botocore @@ -162,10 +159,11 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @AWSRetry.jittered_backoff() diff --git a/iam_saml_federation.py b/iam_saml_federation.py index b20f44d3690..e134588f7ef 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -1,25 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: iam_saml_federation version_added: 1.0.0 @@ -42,17 +27,18 @@ default: present choices: [ "present", "absent" ] type: str -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 author: - Tony (@axc450) - Aidan Rowe (@aidan-) -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # It is assumed that their matching environment variables are set. # Creates a new iam saml identity provider if not present @@ -74,9 +60,9 @@ community.aws.iam_saml_federation: name: example3 state: absent -''' +""" -RETURN = ''' +RETURN = r""" saml_provider: description: Details of the SAML Identity Provider that was created/modified. type: complex @@ -101,15 +87,16 @@ type: str returned: present sample: "2017-02-08T04:36:28+00:00" -''' +""" try: - import botocore.exceptions + import botocore except ImportError: pass +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class SAMLProviderManager: diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 4b8ee782ddb..3ab35fb6864 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: iam_server_certificate version_added: 1.0.0 @@ -76,12 +62,14 @@ author: - Jonathan I. Davila (@defionscode) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +RETURN = r""" # """ + +EXAMPLES = r""" - name: Basic server certificate upload from local file community.aws.iam_server_certificate: name: very_ssl @@ -104,7 +92,7 @@ name: very_ssl new_name: new_very_ssl state: present -''' +""" try: import botocore @@ -113,9 +101,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry @AWSRetry.jittered_backoff() diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index ac33a36f1a2..eb38a5f8b48 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -1,32 +1,30 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_server_certificate_info version_added: 1.0.0 short_description: Retrieve the information of a server certificate description: - Retrieve the attributes of a server certificate. -author: "Allen Sanabria (@linuxdynasty)" +author: + - "Allen Sanabria (@linuxdynasty)" options: name: description: - The name of the server certificate you are retrieving attributes for. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Retrieve server certificate community.aws.iam_server_certificate_info: name: production-cert @@ -37,9 +35,9 @@ name: production-cert register: server_cert failed_when: "{{ server_cert.results | length == 0 }}" -''' +""" -RETURN = ''' +RETURN = r""" server_certificate_id: description: The 21 character certificate id returned: success @@ -75,12 +73,11 @@ returned: success type: str sample: "2015-04-25T00:36:40+00:00" -''' +""" try: import botocore - import botocore.exceptions except ImportError: pass # Handled by AnsibleAWSModule diff --git a/inspector_target.py b/inspector_target.py index 4bfe5b502d6..8891fa34a67 100644 --- a/inspector_target.py +++ b/inspector_target.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Dennis Conrad for Sainsbury's # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: inspector_target version_added: 1.0.0 @@ -39,12 +37,12 @@ - Required if I(state=present). type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create my_target Assessment Target community.aws.inspector_target: name: my_target @@ -62,9 +60,9 @@ community.aws.inspector_target: name: my_target state: absent -''' +""" -RETURN = ''' +RETURN = r""" arn: description: The ARN that specifies the Amazon Inspector assessment target. returned: success @@ -97,22 +95,22 @@ returned: success type: str sample: "2018-01-29T13:48:51.958000+00:00" -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - ansible_dict_to_boto3_tag_list, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, - compare_aws_tags, -) +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def main(): diff --git a/kinesis_stream.py b/kinesis_stream.py index 001fad26546..2bcca6a4ad4 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -1,22 +1,21 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: kinesis_stream version_added: 1.0.0 short_description: Manage a Kinesis Stream. description: - - Create or Delete a Kinesis Stream. - - Update the retention period of a Kinesis Stream. - - Update Tags on a Kinesis Stream. - - Enable/disable server side encryption on a Kinesis Stream. -author: Allen Sanabria (@linuxdynasty) + - Create or Delete a Kinesis Stream. + - Update the retention period of a Kinesis Stream. + - Update Tags on a Kinesis Stream. + - Enable/disable server side encryption on a Kinesis Stream. +author: + - Allen Sanabria (@linuxdynasty) options: name: description: @@ -73,13 +72,12 @@ - The GUID or alias for the KMS key. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: @@ -148,9 +146,9 @@ wait: true wait_timeout: 600 register: test_stream -''' +""" -RETURN = ''' +RETURN = r""" stream_name: description: The name of the Kinesis Stream. returned: when state == present. @@ -179,7 +177,7 @@ "Name": "Splunk", "Env": "development" } -''' +""" import time @@ -191,9 +189,10 @@ from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags def get_tags(client, stream_name): diff --git a/lightsail.py b/lightsail.py index 3b29fa0ba13..23ab0c76c86 100644 --- a/lightsail.py +++ b/lightsail.py @@ -1,23 +1,20 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lightsail version_added: 1.0.0 short_description: Manage instances in AWS Lightsail description: - - Manage instances in AWS Lightsail. - - Instance tagging is not yet supported in this module. + - Manage instances in AWS Lightsail. + - Instance tagging is not yet supported in this module. author: - - "Nick Ball (@nickball)" - - "Prasad Katti (@prasadkatti)" + - "Nick Ball (@nickball)" + - "Prasad Katti (@prasadkatti)" options: state: description: @@ -69,14 +66,13 @@ type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new Lightsail instance community.aws.lightsail: state: present @@ -94,10 +90,9 @@ state: absent region: us-east-1 name: my_instance +""" -''' - -RETURN = ''' +RETURN = r""" changed: description: if a snapshot has been modified/created returned: always @@ -149,7 +144,7 @@ name: running support_code: "123456789012/i-0997c97831ee21e33" username: "ubuntu" -''' +""" import time @@ -161,8 +156,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def find_instance_info(module, client, instance_name, fail_if_not_found=False): diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py index dc956fd5337..961f451a4ee 100644 --- a/lightsail_static_ip.py +++ b/lightsail_static_ip.py @@ -1,14 +1,10 @@ #!/usr/bin/python - # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lightsail_static_ip version_added: 4.1.0 @@ -29,13 +25,13 @@ required: true type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Provision a Lightsail static IP community.aws.lightsail_static_ip: state: present @@ -46,9 +42,9 @@ community.aws.lightsail_static_ip: state: absent name: my_static_ip -''' +""" -RETURN = ''' +RETURN = r""" static_ip: description: static_ipinstance data returned: always @@ -64,7 +60,7 @@ name: "static_ip" resource_type: StaticIp support_code: "123456789012/192.0.2.5" -''' +""" try: import botocore @@ -74,8 +70,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): diff --git a/msk_cluster.py b/msk_cluster.py index 8bd8f9bba13..65c9edea258 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -1,12 +1,9 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - DOCUMENTATION = r""" --- module: msk_cluster @@ -199,16 +196,16 @@ description: How many seconds to wait. Cluster creation can take up to 20-30 minutes. type: int default: 3600 -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags notes: - All operations are time consuming, for example create takes 20-30 minutes, update kafka version -- more than one hour, update configuration -- 10-15 minutes; - Cluster's brokers get evenly distributed over a number of availability zones that's equal to the number of subnets. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags """ EXAMPLES = r""" @@ -266,12 +263,12 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - camel_dict_to_snake_dict, - compare_aws_tags, - AWSRetry, -) @AWSRetry.jittered_backoff(retries=5, delay=5) diff --git a/msk_config.py b/msk_config.py index 7f7874b74d4..8dce485410f 100644 --- a/msk_config.py +++ b/msk_config.py @@ -1,12 +1,9 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - DOCUMENTATION = r""" --- module: msk_config @@ -44,8 +41,8 @@ type: list elements: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 """ @@ -99,11 +96,11 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - camel_dict_to_snake_dict, - AWSRetry, -) def dict_to_prop(d): diff --git a/networkfirewall.py b/networkfirewall.py index 9bb6ebb753e..9460701cc9a 100644 --- a/networkfirewall.py +++ b/networkfirewall.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall short_description: manage AWS Network Firewall firewalls version_added: 4.0.0 @@ -104,13 +102,13 @@ author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create an AWS Network Firewall - community.aws.networkfirewall: name: 'ExampleFirewall' @@ -142,9 +140,9 @@ - community.aws.networkfirewall: state: absent name: 'ExampleFirewall' -''' +""" -RETURN = ''' +RETURN = r""" firewall: description: The full details of the firewall returned: success @@ -269,10 +267,9 @@ } } } -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager diff --git a/networkfirewall_info.py b/networkfirewall_info.py index 85df6b026ba..70395f75d9e 100644 --- a/networkfirewall_info.py +++ b/networkfirewall_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_info short_description: describe AWS Network Firewall firewalls version_added: 4.0.0 @@ -34,14 +32,15 @@ elements: str aliases: ['vpcs', 'vpc_id'] -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe all firewalls in an account - community.aws.networkfirewall_info: {} @@ -53,9 +52,9 @@ # Describe a firewall by name - community.aws.networkfirewall_info: name: ExampleFirewall -''' +""" -RETURN = ''' +RETURN = r""" firewall_list: description: A list of ARNs of the matching firewalls. type: list @@ -184,10 +183,9 @@ } } } -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager diff --git a/networkfirewall_policy.py b/networkfirewall_policy.py index 1026138a6b4..61affcbc9ab 100644 --- a/networkfirewall_policy.py +++ b/networkfirewall_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_policy short_description: manage AWS Network Firewall policies version_added: 4.0.0 @@ -139,17 +137,16 @@ type: int required: false - author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 - amazon.aws.tags -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create an AWS Network Firewall Policy with default rule order - community.aws.networkfirewall_policy: stateful_rule_order: 'default' @@ -178,9 +175,9 @@ - community.aws.networkfirewall_policy: state: absent name: 'ExampleDropPolicy' -''' +""" -RETURN = ''' +RETURN = r""" policy: description: The details of the policy type: dict @@ -336,10 +333,9 @@ type: dict returned: success example: {'tagName': 'Some Value'} -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager diff --git a/networkfirewall_policy_info.py b/networkfirewall_policy_info.py index 1f170f5b304..9f0de62e119 100644 --- a/networkfirewall_policy_info.py +++ b/networkfirewall_policy_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_policy_info short_description: describe AWS Network Firewall policies version_added: 4.0.0 @@ -26,14 +24,15 @@ required: false type: str -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe all Firewall policies in an account - community.aws.networkfirewall_policy_info: {} @@ -45,9 +44,9 @@ # Describe a Firewall policy by name - community.aws.networkfirewall_policy_info: name: ExamplePolicy -''' +""" -RETURN = ''' +RETURN = r""" policy_list: description: A list of ARNs of the matching policies. type: list @@ -212,10 +211,9 @@ type: dict returned: success example: {'tagName': 'Some Value'} -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py index c8e2ea38bd3..2a10b6f4e69 100644 --- a/networkfirewall_rule_group.py +++ b/networkfirewall_rule_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_rule_group short_description: create, delete and modify AWS Network Firewall rule groups version_added: 4.0.0 @@ -263,17 +261,16 @@ type: int required: false - author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create a rule group - name: Create a minimal AWS Network Firewall Rule Group community.aws.networkfirewall_rule_group: @@ -397,9 +394,9 @@ type: 'stateful' state: absent -''' +""" -RETURN = ''' +RETURN = r""" rule_group: description: Details of the rules in the rule group type: dict @@ -708,10 +705,9 @@ type: str returned: success example: 'STATEFUL' -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py index a9cec3778bb..bcd83070b42 100644 --- a/networkfirewall_rule_group_info.py +++ b/networkfirewall_rule_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: networkfirewall_rule_group_info short_description: describe AWS Network Firewall rule groups version_added: 4.0.0 @@ -43,14 +41,15 @@ choices: ['managed', 'account'] type: str -author: Mark Chappell (@tremble) +author: + - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Describe all Rule Groups in an account (excludes managed groups) - community.aws.networkfirewall_rule_group_info: {} @@ -69,9 +68,9 @@ name: ExampleRuleGroup type: stateful -''' +""" -RETURN = ''' +RETURN = r""" rule_list: description: A list of ARNs of the matching rule groups. type: list @@ -387,10 +386,9 @@ type: str returned: success example: 'STATEFUL' -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager diff --git a/opensearch.py b/opensearch.py index 7ed8c0722a2..bbe290890a0 100644 --- a/opensearch.py +++ b/opensearch.py @@ -1,20 +1,18 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = """ +DOCUMENTATION = r""" --- module: opensearch short_description: Creates OpenSearch or ElasticSearch domain description: - Creates or modify a Amazon OpenSearch Service domain. version_added: 4.0.0 -author: "Sebastien Rosset (@sebastien-rosset)" +author: + - "Sebastien Rosset (@sebastien-rosset)" options: state: description: @@ -390,13 +388,15 @@ requirements: - botocore >= 1.21.38 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags + - amazon.aws.boto3 """ -EXAMPLES = """ +RETURN = r""" # """ + +EXAMPLES = r""" - name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters community.aws.opensearch: @@ -494,26 +494,20 @@ from ansible.module_utils.six import string_types -# import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import ( - AnsibleAWSModule, - is_boto3_error_code, -) -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - boto3_tag_list_to_ansible_dict, - compare_policies, -) -from ansible_collections.community.aws.plugins.module_utils.opensearch import ( - compare_domain_versions, - ensure_tags, - get_domain_status, - get_domain_config, - get_target_increment_version, - normalize_opensearch, - parse_version, - wait_for_domain_status, -) +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.community.aws.plugins.module_utils.opensearch import compare_domain_versions +from ansible_collections.community.aws.plugins.module_utils.opensearch import ensure_tags +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_target_increment_version +from ansible_collections.community.aws.plugins.module_utils.opensearch import normalize_opensearch +from ansible_collections.community.aws.plugins.module_utils.opensearch import parse_version +from ansible_collections.community.aws.plugins.module_utils.opensearch import wait_for_domain_status def ensure_domain_absent(client, module): diff --git a/opensearch_info.py b/opensearch_info.py index 9ef4a1eac37..2859c4854b5 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -1,20 +1,18 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = """ +DOCUMENTATION = r""" --- module: opensearch_info short_description: obtain information about one or more OpenSearch or ElasticSearch domain description: - Obtain information about one Amazon OpenSearch Service domain. version_added: 4.0.0 -author: "Sebastien Rosset (@sebastien-rosset)" +author: + - "Sebastien Rosset (@sebastien-rosset)" options: domain_name: description: @@ -31,12 +29,12 @@ requirements: - botocore >= 1.21.38 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 """ -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about an OpenSearch domain instance community.aws.opensearch_info: domain-name: my-search-cluster @@ -50,9 +48,9 @@ tags: Applications: search Environment: Development -''' +""" -RETURN = ''' +RETURN = r""" instances: description: List of OpenSearch domain instances returned: always @@ -441,7 +439,7 @@ description: The name of the OpenSearch domain. returned: always type: str -''' +""" try: @@ -449,16 +447,14 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ( - AWSRetry, - boto3_tag_list_to_ansible_dict, - camel_dict_to_snake_dict, -) -from ansible_collections.community.aws.plugins.module_utils.opensearch import ( - get_domain_config, - get_domain_status, -) +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config +from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status def domain_info(client, module): diff --git a/redshift.py b/redshift.py index f218e0a70c9..e0efbefa02a 100644 --- a/redshift.py +++ b/redshift.py @@ -1,14 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: - "Jens Carl (@j-carl), Hothead Games Inc." @@ -170,13 +166,13 @@ notes: - Support for I(tags) and I(purge_tags) was added in release 1.3.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic cluster provisioning example community.aws.redshift: command: create @@ -191,9 +187,9 @@ identifier: new_cluster skip_final_cluster_snapshot: true wait: true -''' +""" -RETURN = r''' +RETURN = r""" cluster: description: dictionary containing all the cluster information returned: success @@ -257,21 +253,23 @@ description: aws tags for cluster. returned: success type: dict -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def _ensure_tags(redshift, identifier, existing_tags, module): diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index be4a5cbb46d..f8e0970f65c 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, JR Kerkstra # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: redshift_cross_region_snapshots version_added: 1.0.0 @@ -15,7 +12,8 @@ description: - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots. - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy) -author: JR Kerkstra (@captainkerk) +author: + - JR Kerkstra (@captainkerk) options: cluster_name: description: @@ -54,13 +52,12 @@ aliases: [ "retention_period" ] type: int extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -- amazon.aws.boto3 - -''' + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: configure cross-region snapshot on cluster `johniscool` community.aws.redshift_cross_region_snapshots: cluster_name: johniscool @@ -84,9 +81,9 @@ state: absent region: us-east-1 destination_region: us-west-2 -''' +""" -RETURN = ''' # ''' +RETURN = r""" # """ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule diff --git a/redshift_info.py b/redshift_info.py index 2b94e313640..2093dd38ad5 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -1,17 +1,15 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: redshift_info version_added: 1.0.0 -author: "Jens Carl (@j-carl)" +author: + - "Jens Carl (@j-carl)" short_description: Gather information about Redshift cluster(s) description: - Gather information about Redshift cluster(s). @@ -30,13 +28,12 @@ required: false type: dict extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -- amazon.aws.boto3 + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do net set authentication details, see the AWS guide for details. - name: Find all clusters @@ -65,9 +62,9 @@ stack: db register: redshift_user failed_when: "{{ redshift_user.results | length == 0 }}" -''' +""" -RETURN = ''' +RETURN = r""" # For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters) --- cluster_identifier: @@ -273,17 +270,19 @@ returned: success type: list sample: [] -''' +""" import re try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def match_tags(tags_to_match, cluster): diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 902cee75282..724c064cbe9 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: redshift_subnet_group version_added: 1.0.0 @@ -40,14 +37,14 @@ type: list elements: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 author: - "Jens Carl (@j-carl), Hothead Games Inc." -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a Redshift subnet group community.aws.redshift_subnet_group: state: present @@ -61,9 +58,9 @@ community.aws.redshift_subnet_group: state: absent group_name: redshift-subnet -''' +""" -RETURN = r''' +RETURN = r""" cluster_subnet_group: description: A dictionary containing information about the Redshift subnet group. returned: success @@ -92,7 +89,7 @@ sample: - subnet-aaaaaaaa - subnet-bbbbbbbb -''' +""" try: import botocore @@ -101,10 +98,11 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict def get_subnet_group(name): diff --git a/s3_bucket_info.py b/s3_bucket_info.py index b4f72dd55a0..20995539c45 100644 --- a/s3_bucket_info.py +++ b/s3_bucket_info.py @@ -1,14 +1,10 @@ #!/usr/bin/python -""" -Copyright (c) 2017 Ansible Project -GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -""" +# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_bucket_info version_added: 1.0.0 @@ -114,12 +110,12 @@ default: False version_added: 1.4.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Note: Only AWS S3 is currently supported @@ -157,9 +153,9 @@ - name: List buckets ansible.builtin.debug: msg: "{{ result['buckets'] }}" -''' +""" -RETURN = ''' +RETURN = r""" bucket_list: description: "List of buckets" returned: always @@ -399,17 +395,19 @@ returned: always type: str sample: https -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict def get_bucket_list(module, connection, name="", name_filter=""): diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index fa0424b40c2..0a8109b2adb 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -1,15 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Ansible Project # (c) 2019, XLAB d.o.o # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_bucket_notification version_added: 1.0.0 @@ -104,12 +100,12 @@ type: str default: '' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- # Examples adding notification target configs to a S3 bucket - name: Setup bucket event notification to a Lambda function @@ -138,9 +134,9 @@ state: absent event_name: on_file_add_or_remove bucket_name: test-bucket -''' +""" -RETURN = r''' +RETURN = r""" notification_configuration: description: dictionary of currently applied notifications returned: success @@ -158,16 +154,18 @@ description: - List of current SNS notification configurations applied to the bucket. type: list -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # will be protected by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class AmazonBucket: def __init__(self, module, client): diff --git a/s3_cors.py b/s3_cors.py index e7dc16cbaa7..797c8cc5050 100644 --- a/s3_cors.py +++ b/s3_cors.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_cors version_added: 1.0.0 @@ -36,12 +33,12 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple cors for s3 bucket @@ -65,9 +62,9 @@ - community.aws.s3_cors: name: mys3bucket state: absent -''' +""" -RETURN = r''' +RETURN = r""" changed: description: check to see if a change was made to the rules returned: always @@ -96,15 +93,19 @@ "max_age_seconds": 30000 } ] -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies def create_or_update_bucket_cors(connection, module): diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 1bad5dbecf0..b01402ebdb5 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_lifecycle version_added: 1.0.0 short_description: Manage S3 bucket lifecycle rules in AWS description: - - Manage S3 bucket lifecycle rules in AWS. -author: "Rob White (@wimnat)" + - Manage S3 bucket lifecycle rules in AWS. +author: + - "Rob White (@wimnat)" notes: - If specifying expiration time as days then transition time must also be specified in days. - If specifying expiration time as a date then transition time must also be specified as a date. @@ -149,13 +148,14 @@ type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days @@ -219,7 +219,7 @@ storage_class: standard_ia - transition_days: 90 storage_class: glacier -''' +""" from copy import deepcopy import datetime @@ -227,6 +227,7 @@ try: from dateutil import parser as date_parser + HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False @@ -236,11 +237,12 @@ except ImportError: pass # handled by AnsibleAwsModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def parse_date(date): diff --git a/s3_logging.py b/s3_logging.py index 5e600582d9c..3db5fbf61e7 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_logging version_added: 1.0.0 short_description: Manage logging facility of an s3 bucket in AWS description: - - Manage logging facility of an s3 bucket in AWS -author: Rob White (@wimnat) + - Manage logging facility of an s3 bucket in AWS +author: + - Rob White (@wimnat) options: name: description: @@ -36,13 +35,14 @@ default: "" type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs @@ -57,18 +57,19 @@ name: mywebsite.com state: absent -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index d4c73e55267..333bb98cb67 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -1,23 +1,22 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_metrics_configuration version_added: 1.3.0 short_description: Manage s3 bucket metrics configuration in AWS description: - - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket -author: Dmytro Vorotyntsev (@vorotech) + - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket +author: + - Dmytro Vorotyntsev (@vorotech) notes: - - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations - - To request metrics for the entire bucket, create a metrics configuration without a filter - - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on + - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations + - To request metrics for the entire bucket, create a metrics configuration without a filter + - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on options: bucket_name: description: @@ -48,13 +47,14 @@ choices: ['present', 'absent'] type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' +RETURN = r""" # """ -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a metrics configuration that enables metrics for an entire bucket @@ -94,17 +94,19 @@ id: EntireBucket state: absent -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): diff --git a/s3_sync.py b/s3_sync.py index 19466f21f26..30a2e675f33 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -1,31 +1,17 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: s3_sync version_added: 1.0.0 short_description: Efficiently upload multiple files to S3 description: - - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, - inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping. +- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, + inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping. options: mode: description: @@ -127,15 +113,15 @@ default: false type: bool -author: Ted Timmons (@tedder) +author: +- Ted Timmons (@tedder) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: basic upload community.aws.s3_sync: bucket: tedder @@ -166,9 +152,9 @@ storage_class: "GLACIER" include: "*" exclude: "*.txt,.*" -''' +""" -RETURN = ''' +RETURN = r""" filelist_initial: description: file listing (dicts) from initial globbing returned: always @@ -241,7 +227,7 @@ "whytime": "1477931637 / 1477931489" }] -''' +""" import datetime import fnmatch @@ -251,6 +237,7 @@ try: from dateutil import tz + HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False @@ -262,11 +249,10 @@ from ansible.module_utils._text import to_text -# import module snippets -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule def gather_files(fileroot, include=None, exclude=None): diff --git a/s3_website.py b/s3_website.py index f5ba78bf746..b73da51a68c 100644 --- a/s3_website.py +++ b/s3_website.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_website version_added: 1.0.0 short_description: Configure an s3 bucket as a website description: - - Configure an s3 bucket as a website -author: Rob White (@wimnat) + - Configure an s3 bucket as a website +author: + - Rob White (@wimnat) options: name: description: @@ -44,13 +43,12 @@ type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Configure an s3 bucket to redirect all requests to example.com @@ -71,9 +69,9 @@ error_key: errors/404.htm state: present -''' +""" -RETURN = ''' +RETURN = r""" index_document: description: index document type: complex @@ -157,7 +155,7 @@ returned: when routing rule present type: str sample: documents/ -''' +""" import time @@ -168,8 +166,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def _create_redirect_dict(url): diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index 870ed89059e..4aea26ebfc2 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, REY Remi # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: secretsmanager_secret version_added: 1.0.0 @@ -107,16 +105,16 @@ - Specifies the number of days between automatic scheduled rotations of the secret. default: 30 type: int -extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws - - amazon.aws.boto3 - - amazon.aws.tags notes: - Support for I(purge_tags) was added in release 4.0.0. -''' +extends_documentation_fragment: + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add string to AWS Secrets Manager community.aws.secretsmanager_secret: name: 'test_secret_string' @@ -146,9 +144,9 @@ secret_type: 'string' secret: "{{ lookup('community.general.random_string', length=16, special=false) }}" overwrite: false -''' +""" -RETURN = r''' +RETURN = r""" secret: description: The secret information returned: always @@ -212,21 +210,28 @@ returned: when the secret has tags example: {'MyTagName': 'Some Value'} version_added: 4.0.0 -''' +""" -from ansible.module_utils._text import to_bytes -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies from traceback import format_exc import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class Secret(object): """An object representation of the Secret described by the self.module args""" diff --git a/ses_identity.py b/ses_identity.py index c68f3984c65..40ac0fc94a3 100644 --- a/ses_identity.py +++ b/ses_identity.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ses_identity version_added: 1.0.0 @@ -88,12 +86,12 @@ type: 'bool' default: True extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Ensure example@example.com email identity exists @@ -147,9 +145,9 @@ state: present delivery_notifications: topic: "{{ topic_info.sns_arn }}" -''' +""" -RETURN = ''' +RETURN = r""" identity: description: The identity being modified. returned: success @@ -217,19 +215,22 @@ headers_in_delivery_notifications_enabled: description: Whether or not headers are included in messages delivered to the delivery topic. type: bool -''' - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +""" import time try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10): # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've diff --git a/ses_identity_policy.py b/ses_identity_policy.py index 4aae1e933a9..ed558307df5 100644 --- a/ses_identity_policy.py +++ b/ses_identity_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ses_identity_policy version_added: 1.0.0 @@ -41,12 +39,12 @@ choices: [ 'present', 'absent' ] type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: add sending authorization policy to domain identity @@ -75,26 +73,29 @@ identity: example.com policy_name: ExamplePolicy state: absent -''' +""" -RETURN = ''' +RETURN = r""" policies: description: A list of all policies present on the identity after the operation. returned: success type: list sample: [ExamplePolicy] -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry +""" import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def get_identity_policy(connection, module, identity, policy_name): try: diff --git a/ses_rule_set.py b/ses_rule_set.py index 8dd85dfe35b..72730b1b28f 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017, Ben Tomasik # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ses_rule_set version_added: 1.0.0 @@ -46,12 +44,12 @@ required: False default: False extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. --- @@ -84,7 +82,7 @@ force: true """ -RETURN = """ +RETURN = r""" active: description: if the SES rule set is active returned: success if I(state) is C(present) @@ -100,14 +98,18 @@ }] """ -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry - try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def list_rule_sets(client, module): try: diff --git a/sns.py b/sns.py index 798bf555318..6f4338954f8 100644 --- a/sns.py +++ b/sns.py @@ -4,11 +4,7 @@ # Copyright: (c) 2014, Michael J. Schultz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: sns short_description: Send Amazon Simple Notification Service messages version_added: 1.0.0 @@ -79,12 +75,12 @@ choices: ['json', 'string'] type: str extends_documentation_fragment: -- amazon.aws.ec2 -- amazon.aws.aws -- amazon.aws.boto3 -''' + - amazon.aws.region.modules + - amazon.aws.common.modules + - amazon.aws.boto3 +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send default notification message via SNS community.aws.sns: msg: '{{ inventory_hostname }} has completed the play.' @@ -114,7 +110,7 @@ delegate_to: localhost """ -RETURN = """ +RETURN = r""" msg: description: Human-readable diagnostic information returned: always @@ -130,13 +126,15 @@ import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): protocols = [ diff --git a/sns_topic.py b/sns_topic.py index bcaf44a8840..03b3338350c 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: sns_topic short_description: Manages AWS SNS topics and subscriptions version_added: 1.0.0 @@ -163,7 +160,7 @@ - amazon.aws.region.modules - amazon.aws.tags.modules - amazon.aws.boto3 -''' +""" EXAMPLES = r""" @@ -216,7 +213,7 @@ state: absent """ -RETURN = r''' +RETURN = r""" sns_arn: description: The ARN of the topic you are modifying type: str @@ -332,7 +329,7 @@ returned: always type: bool sample: false -''' +""" import json @@ -341,10 +338,11 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.community.aws.plugins.module_utils.sns import list_topics from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies diff --git a/sns_topic_info.py b/sns_topic_info.py index 6ff85ddd247..2fcde33e94c 100644 --- a/sns_topic_info.py +++ b/sns_topic_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: sns_topic_info short_description: sns_topic_info module version_added: 3.2.0 @@ -21,12 +18,12 @@ required: false type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: list all the topics community.aws.sns_topic_info: register: sns_topic_list @@ -35,9 +32,9 @@ community.aws.sns_topic_info: topic_arn: "{{ sns_arn }}" register: sns_topic_info -''' +""" -RETURN = r''' +RETURN = r""" result: description: - The result contaning the details of one or all AWS SNS topics. @@ -132,7 +129,7 @@ description: The type of topic. type: str sample: "standard" -''' +""" try: @@ -140,11 +137,12 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.community.aws.plugins.module_utils.sns import list_topics from ansible_collections.community.aws.plugins.module_utils.sns import get_info +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def main(): argument_spec = dict( diff --git a/sqs_queue.py b/sqs_queue.py index b9cb0fa0f80..4a23f18871b 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: sqs_queue version_added: 1.0.0 @@ -104,13 +102,13 @@ - Enables content-based deduplication. Used for FIFOs only. - Defaults to C(false). extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags -''' + - amazon.aws.boto3 +""" -RETURN = r''' +RETURN = r""" content_based_deduplication: description: Enables content-based deduplication. Used for FIFOs only. type: bool @@ -186,9 +184,9 @@ type: dict returned: always sample: '{"Env": "prod"}' -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create SQS queue with redrive policy community.aws.sqs_queue: name: my-queue @@ -258,7 +256,7 @@ name: my-queue region: ap-southeast-2 state: absent -''' +""" import json @@ -270,11 +268,12 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies def get_queue_name(module, is_fifo=False): diff --git a/ssm_parameter.py b/ssm_parameter.py index d654d45ecf5..493d2b294c4 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ssm_parameter version_added: 1.0.0 @@ -86,18 +84,17 @@ - "Bill Wang (@ozbillwang) " - "Michael De La Rue (@mikedlr)" -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags - notes: - Support for I(tags) and I(purge_tags) was added in release 5.3.0. -''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create or update key/value pair in AWS SSM parameter store community.aws.ssm_parameter: name: "Hello" @@ -165,9 +162,9 @@ community.aws.ssm_parameter: name: "Hello" tags: {} -''' +""" -RETURN = ''' +RETURN = r""" parameter_metadata: type: dict description: @@ -242,26 +239,28 @@ returned: when the parameter has tags example: {'MyTagName': 'Some Value'} version_added: 5.3.0 -''' +""" import time try: import botocore - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class ParameterWaiterFactory(BaseWaiterFactory): def __init__(self, module): diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py index 8bab4f7fecd..0f4b3ec1397 100644 --- a/stepfunctions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2019, Tom De Keyser (@tdekeyser) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: stepfunctions_state_machine version_added: 1.0.0 @@ -44,16 +41,17 @@ choices: [ present, absent ] type: str -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - - amazon.aws.tags author: - Tom De Keyser (@tdekeyser) -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" # Create a new AWS Step Functions state machine - name: Setup HelloWorld state machine community.aws.stepfunctions_state_machine: @@ -77,27 +75,28 @@ community.aws.stepfunctions_state_machine: name: HelloWorldStateMachine state: absent -''' +""" -RETURN = ''' +RETURN = r""" state_machine_arn: description: ARN of the AWS Step Functions state machine type: str returned: always -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list, - AWSRetry, - compare_aws_tags, - boto3_tag_list_to_ansible_dict, - ) +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def manage_state_machine(state, sfn_client, module): state_machine_arn = get_state_machine_arn(sfn_client, module) diff --git a/stepfunctions_state_machine_execution.py b/stepfunctions_state_machine_execution.py index 17273f8146c..0b6858fbf42 100644 --- a/stepfunctions_state_machine_execution.py +++ b/stepfunctions_state_machine_execution.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2019, Prasad Katti (@prasadkatti) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: stepfunctions_state_machine_execution version_added: 1.0.0 @@ -47,16 +44,16 @@ type: str default: '' -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 - author: - Prasad Katti (@prasadkatti) -''' -EXAMPLES = ''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" - name: Start an execution of a state machine community.aws.stepfunctions_state_machine_execution: name: an_execution_name @@ -69,9 +66,9 @@ execution_arn: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" cause: "cause of task failure" error: "error code of the failure" -''' +""" -RETURN = ''' +RETURN = r""" execution_arn: description: ARN of the AWS Step Functions state machine execution. type: str @@ -87,7 +84,7 @@ type: str returned: if action == stop sample: "2019-11-02T22:39:49.071000-07:00" -''' +""" try: @@ -97,8 +94,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code def start_execution(module, sfn_client): diff --git a/storagegateway_info.py b/storagegateway_info.py index 252c13f87ca..854d1cbb0d8 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Loic BLOT (@nerzhul) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # This module is sponsored by E.T.A.I. (www.etai.fr) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: storagegateway_info version_added: 1.0.0 @@ -45,12 +43,12 @@ required: false default: true extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -RETURN = ''' +RETURN = r""" gateways: description: list of gateway objects returned: always @@ -161,9 +159,9 @@ returned: always type: str sample: "present" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: "Get AWS storage gateway information" @@ -172,16 +170,18 @@ - name: "Get AWS storage gateway information for region eu-west-3" community.aws.aws_sgw_info: region: eu-west-3 -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class SGWInformationManager(object): def __init__(self, client, module): diff --git a/sts_assume_role.py b/sts_assume_role.py index fe29cd3c62a..c53bfa9c978 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -1,21 +1,19 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: sts_assume_role version_added: 1.0.0 short_description: Assume a role using AWS Security Token Service and obtain temporary credentials description: - - Assume a role using AWS Security Token Service and obtain temporary credentials. + - Assume a role using AWS Security Token Service and obtain temporary credentials. author: - - Boris Ekelchik (@bekelchik) - - Marek Piatek (@piontas) + - Boris Ekelchik (@bekelchik) + - Marek Piatek (@piontas) options: role_arn: description: @@ -53,12 +51,12 @@ notes: - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -RETURN = ''' +RETURN = r""" sts_creds: description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token returned: always @@ -79,9 +77,9 @@ description: True if obtaining the credentials succeeds type: bool returned: always -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) @@ -100,16 +98,18 @@ tags: MyNewTag: value -''' - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +""" try: - from botocore.exceptions import ClientError, ParamValidationError + from botocore.exceptions import ClientError + from botocore.exceptions import ParamValidationError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + def _parse_response(response): credentials = response.get('Credentials', {}) diff --git a/sts_session_token.py b/sts_session_token.py index 77e89f79687..c780097be61 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -1,19 +1,18 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: sts_session_token version_added: 1.0.0 short_description: Obtain a session token from the AWS Security Token Service description: - - Obtain a session token from the AWS Security Token Service. -author: Victor Costan (@pwnall) + - Obtain a session token from the AWS Security Token Service. +author: + - Victor Costan (@pwnall) options: duration_seconds: description: @@ -32,12 +31,12 @@ notes: - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token). extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -RETURN = """ +RETURN = r""" sts_creds: description: The Credentials object returned by the AWS Security Token Service returned: always @@ -54,7 +53,7 @@ """ -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) @@ -73,7 +72,7 @@ tags: MyNewTag: value -''' +""" try: import botocore diff --git a/waf_condition.py b/waf_condition.py index 2f9f16d116a..6e1911323c9 100644 --- a/waf_condition.py +++ b/waf_condition.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Will Thames # Copyright (c) 2015 Mike Mochan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: waf_condition short_description: Create and delete WAF Conditions version_added: 1.0.0 @@ -20,10 +18,6 @@ author: - Will Thames (@willthames) - Mike Mochan (@mmochan) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: @@ -137,9 +131,14 @@ - absent default: present type: str -''' -EXAMPLES = r''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" - name: create WAF byte condition community.aws.waf_condition: name: my_byte_condition @@ -205,9 +204,9 @@ transformation: url_decode type: xss -''' +""" -RETURN = r''' +RETURN = r""" condition: description: Condition returned by operation. returned: always @@ -397,7 +396,7 @@ description: transformation applied to the text before matching. type: str sample: URL_DECODE -''' +""" try: import botocore @@ -406,16 +405,17 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + class Condition(object): diff --git a/waf_info.py b/waf_info.py index a69d9793348..37a8c2bd025 100644 --- a/waf_info.py +++ b/waf_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: waf_info short_description: Retrieve information for WAF ACLs, Rules, Conditions and Filters version_added: 1.0.0 @@ -29,12 +27,12 @@ - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: obtain all WAF information community.aws.waf_info: @@ -46,9 +44,9 @@ community.aws.waf_info: name: test_waf waf_regional: true -''' +""" -RETURN = ''' +RETURN = r""" wafs: description: The WAFs that match the passed arguments. returned: success @@ -114,10 +112,12 @@ "type": "ByteMatch" } ] -''' +""" + +from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl def main(): diff --git a/waf_rule.py b/waf_rule.py index 116ba87ceb9..54129dafc12 100644 --- a/waf_rule.py +++ b/waf_rule.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Will Thames # Copyright (c) 2015 Mike Mochan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: waf_rule short_description: Create and delete WAF Rules version_added: 1.0.0 @@ -20,10 +18,6 @@ author: - Mike Mochan (@mmochan) - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: @@ -71,9 +65,14 @@ default: false required: false type: bool -''' -EXAMPLES = r''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" - name: create WAF rule community.aws.waf_rule: name: my_waf_rule @@ -92,9 +91,9 @@ community.aws.waf_rule: name: "my_waf_rule" state: absent -''' +""" -RETURN = r''' +RETURN = r""" rule: description: WAF rule contents returned: always @@ -135,7 +134,7 @@ returned: always type: str sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261 -''' +""" import re @@ -144,17 +143,17 @@ except ImportError: pass # handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.waf import ( - MATCH_LOOKUP, - list_regional_rules_with_backoff, - list_rules_with_backoff, - run_func_with_change_token_backoff, - get_web_acl_with_backoff, - list_web_acls_with_backoff, - list_regional_web_acls_with_backoff, -) def get_rule_by_name(client, module, name): diff --git a/waf_web_acl.py b/waf_web_acl.py index a6f84aa23db..dc35308e833 100644 --- a/waf_web_acl.py +++ b/waf_web_acl.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: waf_web_acl short_description: Create and delete WAF Web ACLs version_added: 1.0.0 @@ -19,10 +17,6 @@ author: - Mike Mochan (@mmochan) - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 options: name: @@ -85,9 +79,14 @@ default: false required: false type: bool -''' -EXAMPLES = r''' +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" - name: create web ACL community.aws.waf_web_acl: name: my_web_acl @@ -103,9 +102,9 @@ community.aws.waf_web_acl: name: my_web_acl state: absent -''' +""" -RETURN = r''' +RETURN = r""" web_acl: description: contents of the Web ACL. returned: always @@ -158,25 +157,25 @@ returned: always type: str sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c -''' +""" + +import re try: import botocore except ImportError: pass # handled by AnsibleAWSModule -import re +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.waf import ( - list_regional_rules_with_backoff, - list_regional_web_acls_with_backoff, - list_rules_with_backoff, - list_web_acls_with_backoff, - run_func_with_change_token_backoff, -) + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff +from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff def get_web_acl_by_name(client, module, name): diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index 83375c89002..961c9325b31 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_ip_set version_added: 1.5.0 @@ -63,14 +62,13 @@ - Support for I(purge_tags) was added in release 4.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: test ip set wafv2_ip_set: name: test02 @@ -84,9 +82,9 @@ tags: A: B C: D -''' +""" -RETURN = """ +RETURN = r""" addresses: description: Current addresses of the ip set sample: @@ -117,13 +115,16 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index e800ed0b499..4e0d4feb538 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_ip_set_info version_added: 1.5.0 @@ -28,20 +27,19 @@ type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: test ip set wafv2_ip_set_info: name: test02 scope: REGIONAL -''' +""" -RETURN = """ +RETURN = r""" addresses: description: Current addresses of the ip set sample: @@ -72,12 +70,14 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags diff --git a/wafv2_resources.py b/wafv2_resources.py index db59b91197b..552a2de03bd 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_resources version_added: 1.5.0 @@ -37,22 +36,21 @@ required: true extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: add test alb to waf string03 community.aws.wafv2_resources: name: string03 scope: REGIONAL state: present arn: "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" -''' +""" -RETURN = """ +RETURN = r""" resource_arns: description: Current resources where the wafv2 is applied on sample: @@ -62,12 +60,14 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index 4833d7657f1..f9c5c3c08c2 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_resources_info version_added: 1.5.0 @@ -28,20 +27,19 @@ type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: get web acl community.aws.wafv2_resources_info: name: string03 scope: REGIONAL -''' +""" -RETURN = """ +RETURN = r""" resource_arns: description: Current resources where the wafv2 is applied on sample: @@ -51,12 +49,14 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index e2751b9b438..60cc60a131f 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_rule_group version_added: 1.5.0 @@ -67,14 +66,13 @@ type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: change description community.aws.wafv2_rule_group: name: test02 @@ -150,9 +148,9 @@ A: B C: D register: out -''' +""" -RETURN = """ +RETURN = r""" arn: description: Rule group arn sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 @@ -200,19 +198,22 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict class RuleGroup: diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index b59d4d613f4..c95b74f81c7 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_rule_group_info version_added: 1.5.0 @@ -28,20 +27,19 @@ type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: rule group info community.aws.wafv2_rule_group_info: name: test02 scope: REGIONAL -''' +""" -RETURN = """ +RETURN = r""" arn: description: Rule group arn sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 @@ -89,14 +87,16 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags +from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups def get_rule_group(wafv2, name, scope, id, fail_json_aws): diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index e5770cd7439..3b7fd8daa16 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_web_acl version_added: 1.5.0 @@ -102,14 +101,13 @@ - Support for the I(purge_tags) parameter was added in release 4.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create test web acl community.aws.wafv2_web_acl: name: test05 @@ -250,9 +248,9 @@ region: us-east-1 state: present -''' +""" -RETURN = """ +RETURN = r""" arn: description: web acl arn sample: arn:aws:wafv2:eu-central-1:123456789012:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 @@ -315,14 +313,17 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 649cf10b884..8fe00f66a1a 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: wafv2_web_acl_info version_added: 1.5.0 @@ -28,21 +27,20 @@ type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: get web acl community.aws.wafv2_web_acl_info: name: test05 scope: REGIONAL register: out -''' +""" -RETURN = """ +RETURN = r""" arn: description: web acl arn sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 @@ -91,12 +89,14 @@ """ try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls From b611e17627ca8416e1521b2a0db7d0aa4af492a7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 13 Mar 2023 16:37:51 +0100 Subject: [PATCH 639/683] [6.0.0] Bump botocore requirements (#1744) [6.0.0] Bump botocore requirements SUMMARY In line with our botocore version policy bump the minimum requirements Starting with the 2.0.0 releases of amazon.aws and community.aws, it is generally the collection's policy to support the versions of botocore and boto3 that were released 12 months prior to the most recent major collection release, following semantic versioning (for example, 2.0.0, 3.0.0). ISSUE TYPE Feature Pull Request COMPONENT NAME requirements.txt ADDITIONAL INFORMATION boto3 - 1.22.0 - "Mon Apr 25 18:07:20 2022 +0000" botocore - 1.25.0 - "Mon Apr 25 18:07:02 2022 +0000" See also ansible-collections/amazon.aws#1342 Reviewed-by: Alina Buzachis --- opensearch.py | 2 -- opensearch_info.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/opensearch.py b/opensearch.py index bbe290890a0..e6635da499b 100644 --- a/opensearch.py +++ b/opensearch.py @@ -385,8 +385,6 @@ - how long before wait gives up, in seconds. default: 300 type: int -requirements: - - botocore >= 1.21.38 extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules diff --git a/opensearch_info.py b/opensearch_info.py index 2859c4854b5..7d6d8bb94ac 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -26,8 +26,6 @@ all tag key, value pairs. required: false type: dict -requirements: - - botocore >= 1.21.38 extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules From e6bc42e980499b967b9603a404b5d26c1998de8e Mon Sep 17 00:00:00 2001 From: Nuno Saavedra <32967989+Nfsaavedra@users.noreply.github.com> Date: Tue, 14 Mar 2023 10:48:17 +0000 Subject: [PATCH 640/683] lightsail: add support for firewall ports and snapshot management (#1724) lightsail: add support for firewall ports and snapshot management SUMMARY Created a new module called lightsail_snapshot that allows to create and delete snapshots of instances already created. Add functionality to module lightsail to define the firewall ports. Fixes #174 ISSUE TYPE Feature Pull Request New Module Pull Request COMPONENT NAME lightsail lightsail_snapshot Reviewed-by: Mark Chappell --- lightsail.py | 113 ++++++++++++++++++----- lightsail_snapshot.py | 205 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 296 insertions(+), 22 deletions(-) create mode 100644 lightsail_snapshot.py diff --git a/lightsail.py b/lightsail.py index 23ab0c76c86..0739a042316 100644 --- a/lightsail.py +++ b/lightsail.py @@ -47,6 +47,38 @@ - Launch script that can configure the instance with additional data. type: str default: '' + public_ports: + description: + - A list of dictionaries to describe the ports to open for the specified instance. + type: list + elements: dict + suboptions: + from_port: + description: The first port in a range of open ports on the instance. + type: int + required: true + to_port: + description: The last port in a range of open ports on the instance. + type: int + required: true + protocol: + description: The IP protocol name accepted for the defined range of open ports. + type: str + choices: ['tcp', 'all', 'udp', 'icmp'] + required: true + cidrs: + description: + - The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. + - One of I(cidrs) or I(ipv6_cidrs) must be specified. + type: list + elements: str + ipv6_cidrs: + description: + - The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. + - One of I(cidrs) or I(ipv6_cidrs) must be specified. + type: list + elements: str + version_added: 6.0.0 key_pair_name: description: - Name of the key pair to use with the instance. @@ -83,6 +115,12 @@ bundle_id: nano_1_0 key_pair_name: id_rsa user_data: " echo 'hello world' > /home/ubuntu/test.txt" + public_ports: + - from_port: 22 + to_port: 22 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] register: my_instance - name: Delete an instance @@ -155,6 +193,7 @@ pass from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code @@ -194,17 +233,28 @@ def wait_for_instance_state(module, client, instance_name, states): ' {1}'.format(instance_name, states)) -def create_instance(module, client, instance_name): +def update_public_ports(module, client, instance_name): + try: + client.put_instance_public_ports( + portInfos=snake_dict_to_camel_dict(module.params.get("public_ports")), + instanceName=instance_name, + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +def create_or_update_instance(module, client, instance_name): inst = find_instance_info(module, client, instance_name) - if inst: - module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst)) - else: - create_params = {'instanceNames': [instance_name], - 'availabilityZone': module.params.get('zone'), - 'blueprintId': module.params.get('blueprint_id'), - 'bundleId': module.params.get('bundle_id'), - 'userData': module.params.get('user_data')} + + if not inst: + create_params = { + "instanceNames": [instance_name], + "availabilityZone": module.params.get("zone"), + "blueprintId": module.params.get("blueprint_id"), + "bundleId": module.params.get("bundle_id"), + "userData": module.params.get("user_data"), + } key_pair_name = module.params.get('key_pair_name') if key_pair_name: @@ -219,9 +269,15 @@ def create_instance(module, client, instance_name): if wait: desired_states = ['running'] wait_for_instance_state(module, client, instance_name, desired_states) - inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst)) + if module.params.get("public_ports") is not None: + update_public_ports(module, client, instance_name) + after_update_inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + + module.exit_json( + changed=after_update_inst != inst, + instance=camel_dict_to_snake_dict(after_update_inst), + ) def delete_instance(module, client, instance_name): @@ -302,16 +358,29 @@ def start_or_stop_instance(module, client, instance_name, state): def main(): argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted', - 'rebooted']), - zone=dict(type='str'), - blueprint_id=dict(type='str'), - bundle_id=dict(type='str'), - key_pair_name=dict(type='str'), - user_data=dict(type='str', default=''), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300, type='int'), + name=dict(type="str", required=True), + state=dict( + type="str", default="present", choices=["present", "absent", "stopped", "running", "restarted", "rebooted"] + ), + zone=dict(type="str"), + blueprint_id=dict(type="str"), + bundle_id=dict(type="str"), + key_pair_name=dict(type="str"), + user_data=dict(type="str", default=""), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + public_ports=dict( + type="list", + elements="dict", + options=dict( + from_port=dict(type="int", required=True), + to_port=dict(type="int", required=True), + protocol=dict(type="str", choices=["tcp", "all", "udp", "icmp"], required=True), + cidrs=dict(type="list", elements="str"), + ipv6_cidrs=dict(type="list", elements="str"), + ), + required_one_of=[("cidrs", "ipv6_cidrs")], + ), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -323,7 +392,7 @@ def main(): state = module.params.get('state') if state == 'present': - create_instance(module, client, name) + create_or_update_instance(module, client, name) elif state == 'absent': delete_instance(module, client, name) elif state in ('running', 'stopped'): diff --git a/lightsail_snapshot.py b/lightsail_snapshot.py new file mode 100644 index 00000000000..1d0d178aa49 --- /dev/null +++ b/lightsail_snapshot.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: lightsail_snapshot +version_added: "6.0.0" +short_description: Creates snapshots of AWS Lightsail instances +description: + - Creates snapshots of AWS Lightsail instances. +author: + - "Nuno Saavedra (@Nfsaavedra)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + snapshot_name: + description: Name of the new instance snapshot. + required: true + type: str + instance_name: + description: + - Name of the instance to create the snapshot. + - Required when I(state=present). + type: str + wait: + description: + - Wait for the instance snapshot to be created before returning. + type: bool + default: true + wait_timeout: + description: + - How long before I(wait) gives up, in seconds. + default: 300 + type: int + +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Create AWS Lightsail snapshot + lightsail_snapshot: + region: us-east-1 + snapshot_name: "my_instance_snapshot" + instance_name: "my_instance" + +- name: Delete AWS Lightsail snapshot + lightsail_snapshot: + region: us-east-1 + snapshot_name: "my_instance_snapshot" + state: absent +""" + +RETURN = r""" +changed: + description: if a snapshot has been modified/created + returned: always + type: bool + sample: + changed: true +snapshot: + description: instance snapshot data + type: dict + returned: always + sample: + arn: "arn:aws:lightsail:us-east-1:070807442430:InstanceSnapshot/54b0f785-7132-443d-9e32-95a6825636a4" + created_at: "2023-02-23T18:46:11.183000+00:00" + from_attached_disks: [] + from_blueprint_id: "amazon_linux_2" + from_bundle_id: "nano_2_0" + from_instance_arn: "arn:aws:lightsail:us-east-1:070807442430:Instance/5ca1e7ca-a994-4e19-bb82-deb9d79e9ca3" + from_instance_name: "my_instance" + is_from_auto_snapshot: false + location: + availability_zone: "all" + region_name: "us-east-1" + name: "my_instance_snapshot" + resource_type: "InstanceSnapshot" + size_in_gb: 20 + state: "available" + support_code: "351201681302/ami-06b48e5589f1e248b" + tags: [] +""" + +import time + +try: + import botocore +except ImportError: + # will be caught by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +def find_instance_snapshot_info(module, client, instance_snapshot_name, fail_if_not_found=False): + try: + res = client.get_instance_snapshot(instanceSnapshotName=instance_snapshot_name) + except is_boto3_error_code("NotFoundException") as e: + if fail_if_not_found: + module.fail_json_aws(e) + return None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + return res["instanceSnapshot"] + + +def wait_for_instance_snapshot(module, client, instance_snapshot_name): + wait_timeout = module.params.get("wait_timeout") + wait_max = time.time() + wait_timeout + snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) + + while wait_max > time.time(): + snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) + current_state = snapshot["state"] + if current_state != "pending": + break + time.sleep(5) + else: + module.fail_json(msg=f'Timed out waiting for instance snapshot "{instance_snapshot_name}" to be created.') + + return snapshot + + +def create_snapshot(module, client): + snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) + new_instance = snapshot is None + + if module.check_mode or not new_instance: + snapshot = snapshot if snapshot is not None else {} + module.exit_json( + changed=new_instance, + instance_snapshot=camel_dict_to_snake_dict(snapshot), + ) + + try: + snapshot = client.create_instance_snapshot( + instanceSnapshotName=module.params.get("snapshot_name"), + instanceName=module.params.get("instance_name"), + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + if module.params.get("wait"): + snapshot = wait_for_instance_snapshot(module, client, module.params.get("snapshot_name")) + + module.exit_json( + changed=new_instance, + instance_snapshot=camel_dict_to_snake_dict(snapshot), + ) + + +def delete_snapshot(module, client): + snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) + if module.check_mode or snapshot is None: + changed = not (snapshot is None) + instance = snapshot if changed else {} + module.exit_json(changed=changed, instance=instance) + + try: + client.delete_instance_snapshot(instanceSnapshotName=module.params.get("snapshot_name")) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=True, instance=camel_dict_to_snake_dict(snapshot)) + + +def main(): + argument_spec = dict( + state=dict(type="str", default="present", choices=["present", "absent"]), + snapshot_name=dict(type="str", required=True), + instance_name=dict(type="str"), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + ) + required_if = [ + ["state", "present", ("instance_name",)], + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + client = module.client("lightsail") + + state = module.params.get("state") + + if state == "present": + create_snapshot(module, client) + elif state == "absent": + delete_snapshot(module, client) + + +if __name__ == "__main__": + main() From 79d7e13a1699c4ea54708226b2119dfc064538ff Mon Sep 17 00:00:00 2001 From: Kamil Turek Date: Tue, 14 Mar 2023 12:23:57 +0100 Subject: [PATCH 641/683] ses: fix clearing notification topic (#1730) ses: fix clearing notification topic SUMMARY Fixes #150. As per the docs, the SnsTopic parameter has to be omitted from the request in order to clear the notification setting. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ses_identity Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell Reviewed-by: Kamil Turek --- ses_identity.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ses_identity.py b/ses_identity.py index 40ac0fc94a3..df80b736b91 100644 --- a/ses_identity.py +++ b/ses_identity.py @@ -300,26 +300,40 @@ def desired_topic(module, notification_type): def update_notification_topic(connection, module, identity, identity_notifications, notification_type): + # Not passing the parameter should not cause any changes. + if module.params.get(f"{notification_type.lower()}_notifications") is None: + return False + topic_key = notification_type + 'Topic' if identity_notifications is None: # If there is no configuration for notifications cannot be being sent to topics # hence assume None as the current state. - current = None + current_topic = None elif topic_key in identity_notifications: - current = identity_notifications[topic_key] + current_topic = identity_notifications[topic_key] else: # If there is information on the notifications setup but no information on the # particular notification topic it's pretty safe to assume there's no topic for # this notification. AWS API docs suggest this information will always be # included but best to be defensive - current = None + current_topic = None - required = desired_topic(module, notification_type) + required_topic = desired_topic(module, notification_type) - if current != required: + if current_topic != required_topic: try: if not module.check_mode: - connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True) + request_kwargs = { + "Identity": identity, + "NotificationType": notification_type, + "aws_retry": True, + } + + # The topic has to be omitted from the request to disable the notification. + if required_topic is not None: + request_kwargs["SnsTopic"] = required_topic + + connection.set_identity_notification_topic(**request_kwargs) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format( identity=identity, From d3abfa08d8c8299cdf82e04256b4484194e1c551 Mon Sep 17 00:00:00 2001 From: Pascal Walter <79926629+Pascal-Architrave@users.noreply.github.com> Date: Tue, 14 Mar 2023 13:06:39 +0100 Subject: [PATCH 642/683] sns: Add parameters for fifo topics (#1733) sns: Add parameters for fifo topics SUMMARY Implements #1718. Adding message_group_id and message_deduplication_id to support publishing of messages to fifo topics. ISSUE TYPE Feature Pull Request COMPONENT NAME sns Reviewed-by: Mark Chappell --- sns.py | 47 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/sns.py b/sns.py index 6f4338954f8..96f5b72e70e 100644 --- a/sns.py +++ b/sns.py @@ -69,11 +69,28 @@ message_structure: description: - The payload format to use for the message. - - This must be 'json' to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)). - - It must be 'string' to support I(message_attributes). + - This must be C(json) to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)). + - It must be C(string) to support I(message_attributes). default: json choices: ['json', 'string'] type: str + message_group_id: + description: + - A tag which is used to process messages that belong to the same group in a FIFO manner. + - Has to be included when publishing a message to a fifo topic. + - Can contain up to 128 alphanumeric characters and punctuation. + type: str + version_added: 5.4.0 + message_deduplication_id: + description: + - Only in connection with the message_group_id. + - Overwrites the auto generated MessageDeduplicationId. + - Can contain up to 128 alphanumeric characters and punctuation. + - Messages with the same deduplication id getting recognized as the same message. + - Gets overwritten by an auto generated token, if the topic has ContentBasedDeduplication set. + type: str + version_added: 5.4.0 + extends_documentation_fragment: - amazon.aws.region.modules - amazon.aws.common.modules @@ -108,6 +125,14 @@ data_type: String string_value: "green" delegate_to: localhost + +- name: Send message to a fifo topic + community.aws.sns: + topic: "deploy" + msg: "Message with message group id" + subject: Deploy complete! + message_group_id: "deploy-1" + delegate_to: localhost """ RETURN = r""" @@ -121,6 +146,10 @@ returned: when success type: str sample: 2f681ef0-6d76-5c94-99b2-4ae3996ce57b +sequence_number: + description: A 128 bits long sequence number which gets assigned to the message in fifo topics + returned: when success + type: str """ import json @@ -154,6 +183,8 @@ def main(): topic=dict(required=True), message_attributes=dict(type='dict'), message_structure=dict(choices=['json', 'string'], default='json'), + message_group_id=dict(), + message_deduplication_id=dict(), ) for p in protocols: @@ -172,6 +203,11 @@ def main(): module.fail_json(msg='message_attributes is only supported when the message_structure is "string".') sns_kwargs['MessageAttributes'] = module.params['message_attributes'] + if module.params["message_group_id"]: + sns_kwargs["MessageGroupId"] = module.params["message_group_id"] + if module.params["message_deduplication_id"]: + sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"] + dict_msg = { 'default': sns_kwargs['Message'] } @@ -202,7 +238,12 @@ def main(): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to publish message') - module.exit_json(msg='OK', message_id=result['MessageId']) + sns_result = dict(msg="OK", message_id=result["MessageId"]) + + if module.params["message_group_id"]: + sns_result["sequence_number"] = result["SequenceNumber"] + + module.exit_json(**sns_result) if __name__ == '__main__': From 8a3c37d46ef54592a81e8398b08064facff07266 Mon Sep 17 00:00:00 2001 From: Kosei Kitahara Date: Mon, 20 Mar 2023 19:16:41 +0900 Subject: [PATCH 643/683] Support new enableExecuteCommand options for ECS service (#488) Support new enableExecuteCommand options for ECS service SUMMARY Support new ecs exec feature for ECS service ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_service ADDITIONAL INFORMATION Create ECS service with enable_execute_command option, - name: create exec service ecs_service: state: present ... enable_execute_command: true and we can exec ECS task $ aws ecs execute-command --cluster xxxxx --task arn:aws:ecs:us-east-1:*****:task/webapp/***** --container xxxxx --interactive --command /bin/bash The Session Manager plugin was installed successfully. Use the AWS CLI to start a session. Starting session with SessionId: ecs-execute-command-0c17f94b36227381f root@ip-10-0-66-68:/# Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- ecs_service.py | 156 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 106 insertions(+), 50 deletions(-) diff --git a/ecs_service.py b/ecs_service.py index 074dec4b176..2009dc3b54a 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -131,6 +131,13 @@ rollback: type: bool description: If enabled, ECS will roll back your service to the last completed deployment after a failure. + enable_execute_command: + description: + - Whether or not to enable the execute command functionality for the containers in the ECS task. + - If I(enable_execute_command=true) execute command functionality is enabled on all containers in the ECS task. + required: false + type: bool + version_added: 5.4.0 placement_constraints: description: - The placement constraints for the tasks in the service. @@ -778,6 +785,9 @@ def is_matching_service(self, expected, existing): if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}): return False + if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False): + return False + # expected is params. DAEMON scheduling strategy returns desired count equal to # number of instances running; don't check desired count if scheduling strat is daemon if (expected['scheduling_strategy'] != 'DAEMON'): @@ -786,11 +796,30 @@ def is_matching_service(self, expected, existing): return True - def create_service(self, service_name, cluster_name, task_definition, load_balancers, - desired_count, client_token, role, deployment_controller, deployment_configuration, - placement_constraints, placement_strategy, health_check_grace_period_seconds, - network_configuration, service_registries, launch_type, platform_version, - scheduling_strategy, capacity_provider_strategy, tags, propagate_tags): + def create_service( + self, + service_name, + cluster_name, + task_definition, + load_balancers, + desired_count, + client_token, + role, + deployment_controller, + deployment_configuration, + placement_constraints, + placement_strategy, + health_check_grace_period_seconds, + network_configuration, + service_registries, + launch_type, + platform_version, + scheduling_strategy, + capacity_provider_strategy, + tags, + propagate_tags, + enable_execute_command, + ): params = dict( cluster=cluster_name, @@ -836,14 +865,30 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan if scheduling_strategy: params['schedulingStrategy'] = scheduling_strategy + if enable_execute_command: + params["enableExecuteCommand"] = enable_execute_command + response = self.ecs.create_service(**params) return self.jsonize(response['service']) - def update_service(self, service_name, cluster_name, task_definition, desired_count, - deployment_configuration, placement_constraints, placement_strategy, - network_configuration, health_check_grace_period_seconds, - force_new_deployment, capacity_provider_strategy, load_balancers, - purge_placement_constraints, purge_placement_strategy): + def update_service( + self, + service_name, + cluster_name, + task_definition, + desired_count, + deployment_configuration, + placement_constraints, + placement_strategy, + network_configuration, + health_check_grace_period_seconds, + force_new_deployment, + capacity_provider_strategy, + load_balancers, + purge_placement_constraints, + purge_placement_strategy, + enable_execute_command, + ): params = dict( cluster=cluster_name, service=service_name, @@ -875,11 +920,14 @@ def update_service(self, service_name, cluster_name, task_definition, desired_co # desired count is not required if scheduling strategy is daemon if desired_count is not None: params['desiredCount'] = desired_count + if enable_execute_command is not None: + params["enableExecuteCommand"] = enable_execute_command if load_balancers: params['loadBalancers'] = load_balancers response = self.ecs.update_service(**params) + return self.jsonize(response['service']) def jsonize(self, service): @@ -967,8 +1015,9 @@ def main(): base=dict(type='int') ) ), - propagate_tags=dict(required=False, choices=['TASK_DEFINITION', 'SERVICE']), - tags=dict(required=False, type='dict'), + propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]), + tags=dict(required=False, type="dict"), + enable_execute_command=dict(required=False, type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -1081,47 +1130,54 @@ def main(): if task_definition is None and module.params['force_new_deployment']: task_definition = existing['taskDefinition'] - # update required - response = service_mgr.update_service(module.params['name'], - module.params['cluster'], - task_definition, - module.params['desired_count'], - deploymentConfiguration, - module.params['placement_constraints'], - module.params['placement_strategy'], - network_configuration, - module.params['health_check_grace_period_seconds'], - module.params['force_new_deployment'], - capacityProviders, - updatedLoadBalancers, - module.params['purge_placement_constraints'], - module.params['purge_placement_strategy'], - ) + try: + # update required + response = service_mgr.update_service( + module.params["name"], + module.params["cluster"], + task_definition, + module.params["desired_count"], + deploymentConfiguration, + module.params["placement_constraints"], + module.params["placement_strategy"], + network_configuration, + module.params["health_check_grace_period_seconds"], + module.params["force_new_deployment"], + capacityProviders, + updatedLoadBalancers, + module.params["purge_placement_constraints"], + module.params["purge_placement_strategy"], + module.params["enable_execute_command"], + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't create service") else: try: - response = service_mgr.create_service(module.params['name'], - module.params['cluster'], - module.params['task_definition'], - loadBalancers, - module.params['desired_count'], - clientToken, - role, - deploymentController, - deploymentConfiguration, - module.params['placement_constraints'], - module.params['placement_strategy'], - module.params['health_check_grace_period_seconds'], - network_configuration, - serviceRegistries, - module.params['launch_type'], - module.params['platform_version'], - module.params['scheduling_strategy'], - capacityProviders, - module.params['tags'], - module.params['propagate_tags'], - ) - except botocore.exceptions.ClientError as e: + response = service_mgr.create_service( + module.params["name"], + module.params["cluster"], + module.params["task_definition"], + loadBalancers, + module.params["desired_count"], + clientToken, + role, + deploymentController, + deploymentConfiguration, + module.params["placement_constraints"], + module.params["placement_strategy"], + module.params["health_check_grace_period_seconds"], + network_configuration, + serviceRegistries, + module.params["launch_type"], + module.params["platform_version"], + module.params["scheduling_strategy"], + capacityProviders, + module.params["tags"], + module.params["propagate_tags"], + module.params["enable_execute_command"], + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create service") if response.get('tags', None): From fd9663524229ae2e3976c959c4c62983bc93672f Mon Sep 17 00:00:00 2001 From: FCO Date: Mon, 20 Mar 2023 14:42:43 +0100 Subject: [PATCH 644/683] New Module: Amazon MQ (#432) New Module: Amazon MQ SUMMARY replicates ansible-collections/amazon.aws#266 as suggested This PR contains some basic support for Amazon MQ. It covers Managing Amazon MQ brokers Managing Amazon MQ configurations Managing Amazon MQ (local) users ISSUE TYPE New Module Pull Request COMPONENT NAME Amazon MQ ADDITIONAL INFORMATION We use those modules to manage our MQ Brokers. The context of usage is brokers are created by different means (e.g. terraform) - but the module is capable of doing that as well we've developed a custom role around those modules to manage MQ user credentials we use another local extension of amazon.aws collection (will come as separate PR) that interfaces with AWS SecretsManager that role uses the reboot broker feature (part of this PR) to implement a custom handler that reboots a broker whenever a configuration run sees any changes in configuration and/or users The functionality of the added modules is illustrated in the added test suite (tests/integration/targets/mq) some of them still require a running MQ broker to be usable. Missing functionality/limitations: requires a recent version of boto3 library (older versions don't support all Amazon MQ features used here) API results are returned "as is", i.e. there's no conversion from camel case yaml to snake yaml mq_broker.py no support for LDAP connection (external user management) update configuration only supported through mq_broker_config.py known to work only with EngineType=ACTIVEMQ (proper support for RABBITMQ still missing) mq_broker_config.py needs proper XML comparison between current and desired configuration (current one is too simplistic) only tested with ActiveMQ configurations Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- mq_broker.py | 542 ++++++++++++++++++++++++++++++++++++++++++++ mq_broker_config.py | 222 ++++++++++++++++++ mq_broker_info.py | 116 ++++++++++ mq_user.py | 269 ++++++++++++++++++++++ mq_user_info.py | 150 ++++++++++++ 5 files changed, 1299 insertions(+) create mode 100644 mq_broker.py create mode 100644 mq_broker_config.py create mode 100644 mq_broker_info.py create mode 100644 mq_user.py create mode 100644 mq_user_info.py diff --git a/mq_broker.py b/mq_broker.py new file mode 100644 index 00000000000..2cc5b8e375a --- /dev/null +++ b/mq_broker.py @@ -0,0 +1,542 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_broker +version_added: 6.0.0 +short_description: MQ broker management +description: + - Create/update/delete a broker. + - Reboot a broker. +author: + - FCO (@fotto) +options: + broker_name: + description: + - The Name of the MQ broker to work on. + type: str + required: true + state: + description: + - "C(present): Create/update broker." + - "C(absent): Delete broker." + - "C(restarted): Reboot broker." + choices: [ 'present', 'absent', 'restarted' ] + default: present + type: str + deployment_mode: + description: + - Set broker deployment type. + - Can be used only during creation. + - Defaults to C(SINGLE_INSTANCE). + choices: [ 'SINGLE_INSTANCE', 'ACTIVE_STANDBY_MULTI_AZ', 'CLUSTER_MULTI_AZ' ] + type: str + use_aws_owned_key: + description: + - Must be set to C(false) if I(kms_key_id) is provided as well. + - Can be used only during creation. + - Defaults to C(true). + type: bool + kms_key_id: + description: + - Use referenced key to encrypt broker data at rest. + - Can be used only during creation. + type: str + engine_type: + description: + - Set broker engine type. + - Can be used only during creation. + - Defaults to C(ACTIVEMQ). + choices: [ 'ACTIVEMQ', 'RABBITMQ' ] + type: str + maintenance_window_start_time: + description: + - Set maintenance window for automatic minor upgrades. + - Can be used only during creation. + - Not providing any value means "no maintenance window". + type: dict + publicly_accessible: + description: + - Allow/disallow public access. + - Can be used only during creation. + - Defaults to C(false). + type: bool + storage_type: + description: + - Set underlying storage type. + - Can be used only during creation. + - Defaults to C(EFS). + choices: [ 'EBS', 'EFS' ] + type: str + subnet_ids: + description: + - Defines where deploy broker instances to. + - Minimum required number depends on deployment type. + - Can be used only during creation. + type: list + elements: str + users: + description: + - This parameter allows to use a custom set of initial user(s). + - M(community.aws.mq_user) is the preferred way to manage (local) users + however a broker cannot be created without any user. + - If nothing is specified a default C(admin) user will be created along with brokers. + - Can be used only during creation. Use M(community.aws.mq_user) module for updates. + type: list + elements: dict + tags: + description: + - Tag newly created brokers. + - Can be used only during creation. + type: dict + authentication_strategy: + description: Choose between locally and remotely managed users. + choices: [ 'SIMPLE', 'LDAP' ] + type: str + auto_minor_version_upgrade: + description: Allow/disallow automatic minor version upgrades. + type: bool + default: true + engine_version: + description: + - Set engine version of broker. + - The special value C(latest) will pick the latest available version. + - The special value C(latest) is ignored on update. + type: str + host_instance_type: + description: Instance type of broker instances. + type: str + enable_audit_log: + description: Enable/disable to push audit logs to AWS CloudWatch. + type: bool + default: false + enable_general_log: + description: Enable/disable to push general logs to AWS CloudWatch. + type: bool + default: false + security_groups: + description: + - Associate security groups with broker. + - At least one must be provided during creation. + type: list + elements: str + +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + + +EXAMPLES = r""" +- name: create broker (if missing) with minimal required parameters + amazon.aws.mq_broker: + broker_name: "{{ broker_name }}" + security_groups: + - sg_xxxxxxx + subnet_ids: + - subnet_xxx + - subnet_yyy + register: result +- set_fact: + broker_id: "{{ result.broker['BrokerId'] }}" +- name: use mq_broker_info to wait until broker is ready + amazon.aws.mq_broker_info: + broker_id: "{{ broker_id }}" + register: result + until: "result.broker['BrokerState'] == 'RUNNING'" + retries: 15 + delay: 60 +- name: create or update broker with almost all parameter set including credentials + amazon.aws.mq_broker: + broker_name: "my_broker_2" + state: present + deployment_mode: 'ACTIVE_STANDBY_MULTI_AZ' + use_aws_owned_key: false + kms_key_id: 'my-precreted-key-id' + engine_type: 'ACTIVEMQ' + maintenance_window_start_time: + DayOfWeek: 'MONDAY' + TimeOfDay: '03:15' + TimeZone: 'Europe/Berlin' + publicly_accessible: true + storage_type: 'EFS' + security_groups: + - sg_xxxxxxx + subnet_ids: + - subnet_xxx + - subnet_yyy + users: + - Username: 'initial-user' + Password': 'plain-text-password' + ConsoleAccess: true + tags: + - env: Test + creator: ansible + authentication_strategy: 'SIMPLE' + auto_minor_version_upgrade: true + engine_version: "5.15.13" + host_instance_type: 'mq.t3.micro' + enable_audit_log: true + enable_general_log: true +- name: reboot a broker + amazon.aws.mq_broker: + broker_name: "my_broker_2" + state: restarted +- name: delete a broker + amazon.aws.mq_broker: + broker_name: "my_broker_2" + state: absent +""" + +RETURN = r""" +broker: + description: + - "All API responses are converted to snake yaml except 'Tags'" + - "'state=present': API response of create_broker() or update_broker() call" + - "'state=absent': result of describe_broker() call before delete_broker() is triggerd" + - "'state=restarted': result of describe_broker() after reboot has been triggered" + type: dict + returned: success +""" + +try: + import botocore +except ImportError: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +PARAMS_MAP = { + "authentication_strategy": "AuthenticationStrategy", + "auto_minor_version_upgrade": "AutoMinorVersionUpgrade", + "broker_name": "BrokerName", + "deployment_mode": "DeploymentMode", + "use_aws_owned_key": "EncryptionOptions/UseAwsOwnedKey", + "kms_key_id": "EncryptionOptions/KmsKeyId", + "engine_type": "EngineType", + "engine_version": "EngineVersion", + "host_instance_type": "HostInstanceType", + "enable_audit_log": "Logs/Audit", + "enable_general_log": "Logs/General", + "maintenance_window_start_time": "MaintenanceWindowStartTime", + "publicly_accessible": "PubliclyAccessible", + "security_groups": "SecurityGroups", + "storage_type": "StorageType", + "subnet_ids": "SubnetIds", + "users": "Users", +} + + +DEFAULTS = { + "authentication_strategy": "SIMPLE", + "auto_minor_version_upgrade": False, + "deployment_mode": "SINGLE_INSTANCE", + "use_aws_owned_key": True, + "engine_type": "ACTIVEMQ", + "engine_version": "latest", + "host_instance_type": "mq.t3.micro", + "enable_audit_log": False, + "enable_general_log": False, + "publicly_accessible": False, + "storage_type": "EFS", +} + +CREATE_ONLY_PARAMS = [ + "deployment_mode", + "use_aws_owned_key", + "kms_key_id", + "engine_type", + "maintenance_window_start_time", + "publicly_accessible", + "storage_type", + "subnet_ids", + "users", + "tags", +] + + +def _set_kwarg(kwargs, key, value): + mapped_key = PARAMS_MAP[key] + if "/" in mapped_key: + key_list = mapped_key.split("/") + key_list.reverse() + else: + key_list = [mapped_key] + data = kwargs + while len(key_list) > 1: + this_key = key_list.pop() + if this_key not in data: + data[this_key] = {} + # + data = data[this_key] + data[key_list[0]] = value + + +def _fill_kwargs(module, apply_defaults=True, ignore_create_params=False): + kwargs = {} + if apply_defaults: + for p_name, p_value in DEFAULTS.items(): + _set_kwarg(kwargs, p_name, p_value) + for p_name in module.params: + if ignore_create_params and p_name in CREATE_ONLY_PARAMS: + # silently ignore CREATE_ONLY_PARAMS on update to + # make playbooks idempotent + continue + if p_name in PARAMS_MAP and module.params[p_name] is not None: + _set_kwarg(kwargs, p_name, module.params[p_name]) + else: + # ignore + pass + return kwargs + + +def __list_needs_change(current, desired): + if len(current) != len(desired): + return True + # equal length: + c_sorted = sorted(current) + d_sorted = sorted(desired) + for index, value in enumerate(current): + if value != desired[index]: + return True + # + return False + + +def __dict_needs_change(current, desired): + # values contained in 'current' but not specified in 'desired' are ignored + # value contained in 'desired' but not in 'current' (unsupported attributes) are ignored + for key in desired: + if key in current: + if desired[key] != current[key]: + return True + # + return False + + +def _needs_change(current, desired): + needs_change = False + for key in desired: + current_value = current[key] + desired_value = desired[key] + if isinstance(current_value, (int, str, bool)): + if current_value != desired_value: + needs_change = True + break + elif isinstance(current_value, list): + # assumption: all 'list' type settings we allow changes for have scalar values + if __list_needs_change(current_value, desired_value): + needs_change = True + break + elif isinstance(current_value, dict): + # assumption: all 'dict' type settings we allow changes for have scalar values + if __dict_needs_change(current_value, desired_value): + needs_change = True + break + else: + # unexpected type + needs_change = True + break + # + return needs_change + + +def get_latest_engine_version(conn, module, engine_type): + try: + response = conn.describe_broker_engine_types(EngineType=engine_type) + return response["BrokerEngineTypes"][0]["EngineVersions"][0]["Name"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list engine versions") + + +def get_broker_id(conn, module): + try: + broker_name = module.params["broker_name"] + broker_id = None + response = conn.list_brokers(MaxResults=100) + for broker in response["BrokerSummaries"]: + if broker["BrokerName"] == broker_name: + broker_id = broker["BrokerId"] + break + return broker_id + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list broker brokers.") + + +def get_broker_info(conn, module, broker_id): + try: + return conn.describe_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get broker details.") + + +def reboot_broker(conn, module, broker_id): + try: + return conn.reboot_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't reboot broker.") + + +def delete_broker(conn, module, broker_id): + try: + return conn.delete_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete broker.") + + +def create_broker(conn, module): + kwargs = _fill_kwargs(module) + if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": + kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"]) + if kwargs["AuthenticationStrategy"] == "LDAP": + module.fail_json(msg="'AuthenticationStrategy=LDAP' not supported, yet") + if "Users" not in kwargs: + # add some stupid default (cannot create broker without any users) + kwargs["Users"] = [{"Username": "admin", "Password": "adminPassword", "ConsoleAccess": True, "Groups": []}] + if "EncryptionOptions" in kwargs and "UseAwsOwnedKey" in kwargs["EncryptionOptions"]: + kwargs["EncryptionOptions"]["UseAwsOwnedKey"] = False + # + if "SecurityGroups" not in kwargs or len(kwargs["SecurityGroups"]) == 0: + module.fail_json(msg="At least one security group must be specified on broker creation") + # + changed = True + result = conn.create_broker(**kwargs) + # + return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed} + + +def update_broker(conn, module, broker_id): + kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True) + # replace name with id + broker_name = kwargs["BrokerName"] + del kwargs["BrokerName"] + kwargs["BrokerId"] = broker_id + # get current state for comparison: + api_result = get_broker_info(conn, module, broker_id) + if api_result["BrokerState"] != "RUNNING": + module.fail_json( + msg=f"Cannot trigger update while broker ({broker_id}) is in state {api_result['BrokerState']}", + ) + # engine version of 'latest' is taken as "keep current one" + # i.e. do not request upgrade on playbook rerun + if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": + kwargs["EngineVersion"] = api_result["EngineVersion"] + result = {"broker_id": broker_id, "broker_name": broker_name} + changed = False + if _needs_change(api_result, kwargs): + changed = True + if not module.check_mode: + api_result = conn.update_broker(**kwargs) + # + # + return {"broker": result, "changed": changed} + + +def ensure_absent(conn, module): + result = {"broker_name": module.params["broker_name"], "broker_id": None} + if module.check_mode: + return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": True} + broker_id = get_broker_id(conn, module) + result["broker_id"] = broker_id + + if not broker_id: + # silently ignore delete of unknown broker (to make it idempotent) + return {"broker": result, "changed": False} + + try: + # check for pending delete (small race condition possible here + api_result = get_broker_info(conn, module, broker_id) + if api_result["BrokerState"] == "DELETION_IN_PROGRESS": + return {"broker": result, "changed": False} + delete_broker(conn, module, broker_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + return {"broker": result, "changed": True} + + +def ensure_present(conn, module): + if module.check_mode: + return {"broker": {"broker_arn": "fakeArn", "broker_id": "fakeId"}, "changed": True} + + broker_id = get_broker_id(conn, module) + if broker_id: + return update_broker(conn, module, broker_id) + + return create_broker(conn, module) + + +def main(): + argument_spec = dict( + broker_name=dict(required=True, type="str"), + state=dict(default="present", choices=["present", "absent", "restarted"]), + # parameters only allowed on create + deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]), + use_aws_owned_key=dict(type="bool"), + kms_key_id=dict(type="str"), + engine_type=dict(choices=["ACTIVEMQ", "RABBITMQ"], type="str"), + maintenance_window_start_time=dict(type="dict"), + publicly_accessible=dict(type="bool"), + storage_type=dict(choices=["EBS", "EFS"]), + subnet_ids=dict(type="list", elements="str"), + users=dict(type="list", elements="dict"), + tags=dict(type="dict"), + # parameters allowed on update as well + authentication_strategy=dict(choices=["SIMPLE", "LDAP"]), + auto_minor_version_upgrade=dict(default=True, type="bool"), + engine_version=dict(type="str"), + host_instance_type=dict(type="str"), + enable_audit_log=dict(default=False, type="bool"), + enable_general_log=dict(default=False, type="bool"), + security_groups=dict(type="list", elements="str"), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + if module.params["state"] == "present": + try: + compound_result = ensure_present(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + # + module.exit_json(**compound_result) + + if module.params["state"] == "absent": + try: + compound_result = ensure_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + # + module.exit_json(**compound_result) + + if module.params["state"] == "restarted": + broker_id = get_broker_id(connection, module) + if module.check_mode: + module.exit_json(broker={"broker_id": broker_id if broker_id else "fakeId"}, changed=True) + if not broker_id: + module.fail_json( + msg="Cannot find broker with name {module.params['broker_name']}.", + ) + try: + changed = True + if not module.check_mode: + reboot_broker(connection, module, broker_id) + # + result = get_broker_info(connection, module, broker_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + module.exit_json(broker=result, changed=changed) + + +if __name__ == "__main__": + main() diff --git a/mq_broker_config.py b/mq_broker_config.py new file mode 100644 index 00000000000..e530af47384 --- /dev/null +++ b/mq_broker_config.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_broker_config +version_added: 6.0.0 +short_description: Update Amazon MQ broker configuration +description: + - Update configuration for an MQ broker. + - If new configuration differs from the current one a new configuration + is created and the new version is assigned to the broker. + - Optionally allows broker reboot to make changes effective immediately. +author: + - FCO (@fotto) +options: + broker_id: + description: + - The ID of the MQ broker to work on. + type: str + required: true + config_xml: + description: + - The maximum number of results to return. + type: str + required: true + config_description: + description: + - Description to set on new configuration revision. + type: str + reboot: + description: + - Reboot broker after new config has been applied. + type: bool + default: false +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + +EXAMPLES = r""" +- name: send new XML config to broker relying on credentials from environment + amazon.aws.mq_broker_config: + broker_id: "aws-mq-broker-id" + config_xml: "{{ lookup('file', 'activemq.xml' )}}" + region: "{{ aws_region }}" +- name: send new XML config to broker and reboot if necessary + amazon.aws.mq_broker_config: + broker_id: "aws-mq-broker-id" + config_xml: "{{ lookup('file', 'activemq2.xml' )}}" + reboot: true +- name: send new broker config and set all credentials explicitly + amazon.aws.mq_broker_config: + broker_id: "{{ broker_id }}" + config_xml: "{{ lookup('file', 'activemq3.xml')}}" + config_description: "custom description for configuration object" + register: result +""" + +RETURN = r""" +broker: + description: API response of describe_broker() converted to snake yaml after changes have been applied. + type: dict + returned: success +configuration: + description: Details about new configuration object. + returned: I(changed=true) + type: complex + contains: + id: + description: Configuration ID of broker configuration. + type: str + example: c-386541b8-3139-42c2-9c2c-a4c267c1714f + revision: + description: Revision of the configuration that will be active after next reboot. + type: int + example: 4 +""" + +import base64 +import re + +try: + import botocore +except ImportError: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +DEFAULTS = {"reboot": False} +FULL_DEBUG = False + + +def is_same_config(old, new): + # we a simple comparision here: strip down spaces and compare the rest + # TODO: use same XML normalizer on new as used by AWS before comparing strings + old_stripped = re.sub(r"\s+", " ", old, flags=re.S).rstrip() + new_stripped = re.sub(r"\s+", " ", new, flags=re.S).rstrip() + return old_stripped == new_stripped + + +def get_broker_info(conn, module): + try: + return conn.describe_broker(BrokerId=module.params["broker_id"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + return { + "broker_id": module.params["broker_id"], + } + module.fail_json_aws(e, msg="Couldn't get broker details.") + + +def get_current_configuration(conn, module, cfg_id, cfg_revision): + try: + return conn.describe_configuration_revision(ConfigurationId=cfg_id, ConfigurationRevision=str(cfg_revision)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get configuration revision.") + + +def create_and_assign_config(conn, module, broker_id, cfg_id, cfg_xml_encoded): + kwargs = {"ConfigurationId": cfg_id, "Data": cfg_xml_encoded} + if "config_description" in module.params and module.params["config_description"]: + kwargs["Description"] = module.params["config_description"] + else: + kwargs["Description"] = "Updated through amazon.aws.mq_broker_config ansible module" + # + try: + c_response = conn.update_configuration(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create new configuration revision.") + # + new_config_revision = c_response["LatestRevision"]["Revision"] + try: + b_response = conn.update_broker( + BrokerId=broker_id, Configuration={"Id": cfg_id, "Revision": new_config_revision} + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't assign new configuration revision to broker.") + # + return (c_response, b_response) + + +def reboot_broker(conn, module, broker_id): + try: + return conn.reboot_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't reboot broker.") + + +def ensure_config(conn, module): + broker_id = module.params["broker_id"] + broker_info = get_broker_info(conn, module) + changed = False + if module.check_mode and "Configurations" not in broker_info: + # not result from get_broker_info(). use requeste config + current_cfg_decoded = module.params["config_xml"] + else: + current_cfg = broker_info["Configurations"]["Current"] + if "Pending" in broker_info["Configurations"]: + current_cfg = broker_info["Configurations"]["Pending"] + current_cfg_encoded = get_current_configuration(conn, module, current_cfg["Id"], current_cfg["Revision"])[ + "Data" + ] + current_cfg_decoded = base64.b64decode(current_cfg_encoded.encode()).decode() + + if is_same_config(current_cfg_decoded, module.params["config_xml"]): + return {"changed": changed, "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"])} + + (c_response, _b_response) = (None, None) + if not module.check_mode: + new_cfg_encoded = base64.b64encode(module.params["config_xml"].encode()).decode() + (c_response, _b_response) = create_and_assign_config( + conn, module, broker_id, current_cfg["Id"], new_cfg_encoded + ) + # + changed = True + + if changed and module.params["reboot"] and not module.check_mode: + reboot_broker(conn, module, broker_id) + # + broker_info = get_broker_info(conn, module) + return_struct = { + "changed": changed, + "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"]), + "configuration": {"id": c_response["Id"], "revision": c_response["LatestRevision"]["Revision"]}, + } + if FULL_DEBUG: + return_struct["old_config_xml"] = base64.b64decode(current_cfg_encoded) + return_struct["new_config_xml"] = module.params["config_xml"] + return_struct["old_config_revision"] = current_cfg["Revision"] + return return_struct + + +def main(): + argument_spec = dict( + broker_id=dict(required=True, type="str"), + config_xml=dict(required=True, type="str"), + config_description=dict(required=False, type="str"), + reboot=dict(required=False, type="bool", default=DEFAULTS["reboot"]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + try: + result = ensure_config(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/mq_broker_info.py b/mq_broker_info.py new file mode 100644 index 00000000000..65a3524db41 --- /dev/null +++ b/mq_broker_info.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_broker_info +version_added: 6.0.0 +short_description: Retrieve MQ Broker details +description: + - Get details about a broker. +author: + - FCO (@fotto) +options: + broker_id: + description: Get details for broker with specified ID. + type: str + broker_name: + description: + - Get details for broker with specified Name. + - Is ignored if I(broker_id) is specified. + type: str +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + + +EXAMPLES = r""" +- name: get current broker settings by id + amazon.aws.mq_broker_info: + broker_id: "aws-mq-broker-id" + register: broker_info +- name: get current broker settings by name setting all credential parameters explicitly + amazon.aws.mq_broker_info: + broker_name: "aws-mq-broker-name" + register: broker_info +""" + +RETURN = r""" +broker: + description: API response of describe_broker() converted to snake yaml. + type: dict + returned: success +""" + +try: + import botocore +except ImportError: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def get_broker_id(conn, module): + try: + broker_name = module.params["broker_name"] + broker_id = None + response = conn.list_brokers(MaxResults=100) + for broker in response["BrokerSummaries"]: + if broker["BrokerName"] == broker_name: + broker_id = broker["BrokerId"] + break + return broker_id + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list broker brokers.") + + +def get_broker_info(conn, module, broker_id): + try: + return conn.describe_broker(BrokerId=broker_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + module.exit_json(broker={"broker_id": broker_id, "broker_name": "fakeName"}) + else: + module.fail_json_aws(e, msg="Couldn't get broker details.") + + +def main(): + argument_spec = dict(broker_id=dict(type="str"), broker_name=dict(type="str")) + required_one_of = ( + ("broker_name", "broker_id",), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + supports_check_mode=True, + ) + broker_id = module.params["broker_id"] + broker_name = module.params["broker_name"] + + connection = module.client("mq") + + try: + if not broker_id: + broker_id = get_broker_id(connection, module) + if not broker_id: + if module.check_mode: + module.exit_json( + broker={"broker_id": "fakeId", "broker_name": broker_name if broker_name else "fakeName"} + ) + result = get_broker_info(connection, module, broker_id) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + # + module.exit_json(broker=camel_dict_to_snake_dict(result, ignore_list=["Tags"])) + + +if __name__ == "__main__": + main() diff --git a/mq_user.py b/mq_user.py new file mode 100644 index 00000000000..00d8adfd58f --- /dev/null +++ b/mq_user.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_user +version_added: 6.0.0 +short_description: Manage users in existing Amazon MQ broker +description: + - Manage Amazon MQ users. + - Pending changes are taking into account for idempotency. +author: + - FCO (@fotto) +options: + broker_id: + description: + - The ID of the MQ broker to work on. + type: str + required: true + username: + description: + - The name of the user to create/update/delete. + type: str + required: true + state: + description: + - Create/Update vs Delete of user. + default: present + choices: [ 'present', 'absent' ] + type: str + console_access: + description: + - Whether the user can access the MQ Console. + - Defaults to C(false) on creation. + type: bool + groups: + description: + - Set group memberships for user. + - Defaults to C([]) on creation. + type: list + elements: str + password: + description: + - Set password for user. + - Defaults to a random password on creation. + - Ignored unless I(allow_pw_update=true). + type: str + allow_pw_update: + description: + - When I(allow_pw_update=true) and I(password) is set, the password + will always be updated for the user. + default: false + type: bool +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + +EXAMPLES = r""" +- name: create/update user - set provided password if user doesn't exist, yet + amazon.aws.mq_user: + state: present + broker_id: "aws-mq-broker-id" + username: "sample_user1" + console_access: false + groups: [ "g1", "g2" ] + password: "plain-text-password" +- name: allow console access and update group list - relying on default state + amazon.aws.mq_user: + broker_id: "aws-mq-broker-id" + username: "sample_user1" + region: "{{ aws_region }}" + console_access: true + groups: [ "g1", "g2", "g3" ] +- name: remove user - setting all credentials explicitly + amazon.aws.mq_user: + state: absent + broker_id: "aws-mq-broker-id" + username: "other_user" +""" + +RETURN = r""" +user: + description: + - just echos the username + - "only present when state=present" + type: str + returned: success +""" + +import secrets + +try: + import botocore +except ImportError as ex: + # handled by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + +CREATE_DEFAULTS = { + "console_access": False, + "groups": [], +} + + +def _group_change_required(user_response, requested_groups): + current_groups = [] + if "Groups" in user_response: + current_groups = user_response["Groups"] + elif "Pending" in user_response: + # to support automatic testing without broker reboot + current_groups = user_response["Pending"]["Groups"] + if len(current_groups) != len(requested_groups): + return True + if len(current_groups) != len(set(current_groups) & set(requested_groups)): + return True + # + return False + + +def _console_access_change_required(user_response, requested_boolean): + current_boolean = CREATE_DEFAULTS["console_access"] + if "ConsoleAccess" in user_response: + current_boolean = user_response["ConsoleAccess"] + elif "Pending" in user_response: + # to support automatic testing without broker reboot + current_boolean = user_response["Pending"]["ConsoleAccess"] + # + return current_boolean != requested_boolean + + +def generate_password(): + return secrets.token_hex(20) + + +# returns API response object +def _create_user(conn, module): + kwargs = {"BrokerId": module.params["broker_id"], "Username": module.params["username"]} + if "groups" in module.params and module.params["groups"] is not None: + kwargs["Groups"] = module.params["groups"] + else: + kwargs["Groups"] = CREATE_DEFAULTS["groups"] + if "password" in module.params and module.params["password"]: + kwargs["Password"] = module.params["password"] + else: + kwargs["Password"] = generate_password() + if "console_access" in module.params and module.params["console_access"] is not None: + kwargs["ConsoleAccess"] = module.params["console_access"] + else: + kwargs["ConsoleAccess"] = CREATE_DEFAULTS["console_access"] + try: + response = conn.create_user(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create user") + return response + + +# returns API response object +def _update_user(conn, module, kwargs): + try: + response = conn.update_user(**kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update user") + return response + + +def get_matching_user(conn, module, broker_id, username): + try: + response = conn.describe_user(BrokerId=broker_id, Username=username) + except is_boto3_error_code("NotFoundException"): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get user details") + return response + + +def ensure_user_present(conn, module): + user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) + changed = False + + if user is None: + if not module.check_mode: + _response = _create_user(conn, module) + changed = True + else: + kwargs = {} + if "groups" in module.params and module.params["groups"] is not None: + if _group_change_required(user, module.params["groups"]): + kwargs["Groups"] = module.params["groups"] + if "console_access" in module.params and module.params["console_access"] is not None: + if _console_access_change_required(user, module.params["console_access"]): + kwargs["ConsoleAccess"] = module.params["console_access"] + if "password" in module.params and module.params["password"]: + if "allow_pw_update" in module.params and module.params["allow_pw_update"]: + kwargs["Password"] = module.params["password"] + if len(kwargs) == 0: + changed = False + else: + if not module.check_mode: + kwargs["BrokerId"] = module.params["broker_id"] + kwargs["Username"] = module.params["username"] + response = _update_user(conn, module, kwargs) + # + changed = True + # + user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) + + return {"changed": changed, "user": camel_dict_to_snake_dict(user, ignore_list=["Tags"])} + + +def ensure_user_absent(conn, module): + user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) + result = {"changed": False} + if user is None: + return result + # better support for testing + if "Pending" in user and "PendingChange" in user["Pending"] and user["Pending"]["PendingChange"] == "DELETE": + return result + + result = {"changed": True} + if module.check_mode: + return result + + try: + conn.delete_user(BrokerId=user["BrokerId"], Username=user["Username"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete user") + + return result + + +def main(): + argument_spec = dict( + broker_id=dict(required=True, type="str"), + username=dict(required=True, type="str"), + console_access=dict(required=False, type="bool"), + groups=dict(required=False, type="list", elements="str"), + password=dict(required=False, type="str", no_log=True), + allow_pw_update=dict(default=False, required=False, type="bool"), + state=dict(default="present", choices=["present", "absent"]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + state = module.params.get("state") + + try: + if state == "present": + result = ensure_user_present(connection, module) + elif state == "absent": + result = ensure_user_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/mq_user_info.py b/mq_user_info.py new file mode 100644 index 00000000000..bd6b09c4eb0 --- /dev/null +++ b/mq_user_info.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: mq_user_info +version_added: 6.0.0 +short_description: List users of an Amazon MQ broker +description: + - List users for the specified broker ID. + - Pending creations and deletions can be skipped by options. +author: + - FCO (@fotto) +options: + broker_id: + description: + - The ID of the MQ broker to work on. + type: str + required: true + max_results: + description: + - The maximum number of results to return. + type: int + default: 100 + skip_pending_create: + description: + - Will skip pending creates from the result set. + type: bool + default: false + skip_pending_delete: + description: + - Will skip pending deletes from the result set. + type: bool + default: false + as_dict: + description: + - Convert result into lookup table by username. + type: bool + default: false + +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules +""" + + +EXAMPLES = r""" +- name: get all users as list - relying on environment for API credentials + amazon.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + max_results: 50 + register: result +- name: get users as dict - explicitly specifying all credentials + amazon.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + register: result +- name: get list of users to decide which may need to be deleted + amazon.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + skip_pending_delete: true +- name: get list of users to decide which may need to be created + amazon.aws.mq_user_info: + broker_id: "aws-mq-broker-id" + skip_pending_create: true +""" + +RETURN = r""" +users: + type: dict + returned: success + description: + - dict key is username + - each entry is the record for a user as returned by API but converted to snake yaml +""" + +try: + import botocore +except ImportError as ex: + # handled by AnsibleAWSModule + pass + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +DEFAULTS = {"max_results": 100, "skip_pending_create": False, "skip_pending_delete": False, "as_dict": True} + + +def get_user_info(conn, module): + try: + response = conn.list_users(BrokerId=module.params["broker_id"], MaxResults=module.params["max_results"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if module.check_mode: + # return empty set for unknown broker in check mode + if DEFAULTS["as_dict"]: + return {} + return [] + module.fail_json_aws(e, msg="Failed to describe users") + # + if not module.params["skip_pending_create"] and not module.params["skip_pending_delete"]: + # we can simply return the sub-object from the response + records = response["Users"] + else: + records = [] + for record in response["Users"]: + if "PendingChange" in record: + if record["PendingChange"] == "CREATE" and module.params["skip_pending_create"]: + continue + if record["PendingChange"] == "DELETE" and module.params["skip_pending_delete"]: + continue + # + records.append(record) + # + if DEFAULTS["as_dict"]: + user_records = {} + for record in records: + user_records[record["Username"]] = record + # + return camel_dict_to_snake_dict(user_records, ignore_list=["Tags"]) + + return camel_dict_to_snake_dict(records, ignore_list=["Tags"]) + + +def main(): + argument_spec = dict( + broker_id=dict(required=True, type="str"), + max_results=dict(required=False, type="int", default=DEFAULTS["max_results"]), + skip_pending_create=dict(required=False, type="bool", default=DEFAULTS["skip_pending_create"]), + skip_pending_delete=dict(required=False, type="bool", default=DEFAULTS["skip_pending_delete"]), + as_dict=dict(required=False, type="bool", default=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("mq") + + try: + user_records = get_user_info(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(users=user_records) + + +if __name__ == "__main__": + main() From 077b27e98a7a2a263db7885b92e52adc2ff22fcd Mon Sep 17 00:00:00 2001 From: Marco Braga Date: Thu, 30 Mar 2023 10:05:34 -0300 Subject: [PATCH 645/683] feat(module/vpc-cagw): Add Carrier Gateway modules (#1353) feat(module/vpc-cagw): Add Carrier Gateway modules SUMMARY New modules to manage VPC Carrear Gateways. ISSUE TYPE New Module Pull Request COMPONENT NAME modules (new): ec2_carrier_gateway ec2_carrier_gateway_info ADDITIONAL INFORMATION $ ansible localhost -m ec2_vpc_cagw_info localhost | SUCCESS => { "carrier_gateways": [ { "carrier_gateway_id": "cagw-037df45cae5362d59", "tags": { "Name": "test1-54dsl-vpc-cagw" }, "vpc_id": "vpc-069cabb60c7e7fc6d" } ], "changed": false } $ ansible localhost -m ec2_carrier_gateway -a "state=absent vpc_id=vpc-069cabb60c7e7fc6d carrier_gateway_id=cagw-037df45cae5362d59" localhost | CHANGED => { "changed": true } $ ansible localhost -m ec2_carrier_gateway_info localhost | SUCCESS => { "carrier_gateways": [], "changed": false } $ ansible localhost -m ec2_carrier_gateway-a "vpc_id=vpc-069cabb60c7e7fc6d" localhost | CHANGED => { "carrier_gateway_id": "cagw-095f998ebdcb5ef86", "changed": true, "tags": {}, "vpc_id": "vpc-069cabb60c7e7fc6d" } $ ansible localhost -m ec2_carrier_gateway_info localhost | SUCCESS => { "carrier_gateways": [ { "carrier_gateway_id": "cagw-095f998ebdcb5ef86", "tags": {}, "vpc_id": "vpc-069cabb60c7e7fc6d" } ], "changed": false } Reviewed-by: Mark Chappell Reviewed-by: Marco Braga Reviewed-by: Markus Bergholz --- ec2_carrier_gateway.py | 261 ++++++++++++++++++++++++++++++++++++ ec2_carrier_gateway_info.py | 159 ++++++++++++++++++++++ 2 files changed, 420 insertions(+) create mode 100644 ec2_carrier_gateway.py create mode 100644 ec2_carrier_gateway_info.py diff --git a/ec2_carrier_gateway.py b/ec2_carrier_gateway.py new file mode 100644 index 00000000000..3458170e393 --- /dev/null +++ b/ec2_carrier_gateway.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_carrier_gateway +version_added: 6.0.0 +short_description: Manage an AWS VPC Carrier gateway +description: + - Manage an AWS VPC Carrier gateway. +author: + - "Marco Braga (@mtulio)" +options: + vpc_id: + description: + - The VPC ID for the VPC in which to manage the Carrier Gateway. + required: true + type: str + carrier_gateway_id: + description: + - The Carrier Gateway ID to manage the Carrier Gateway. + required: false + type: str + state: + description: + - Create or terminate the Carrier Gateway. + default: present + choices: [ 'present', 'absent' ] + type: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Ensure that the VPC has an Carrier Gateway. +# The Carrier Gateway ID can be accessed via {{cagw.carrier_gateway_id}} for use in setting up Route tables etc. +- name: Create Carrier gateway + community.aws.ec2_carrier_gateway: + vpc_id: vpc-abcdefgh + state: present + register: cagw + +- name: Create Carrier gateway with tags + community.aws.ec2_carrier_gateway: + vpc_id: vpc-abcdefgh + state: present + tags: + Tag1: tag1 + Tag2: tag2 + register: cagw + +- name: Delete Carrier gateway + community.aws.ec2_carrier_gateway: + vpc_id: vpc-abcdefgh + carrier_gateway_id: "cagw-123" + state: absent + register: vpc_cagw_delete +""" + +RETURN = r""" +changed: + description: If any changes have been made to the Carrier Gateway. + type: bool + returned: always + sample: + changed: false +carrier_gateway_id: + description: The unique identifier for the Carrier Gateway. + type: str + returned: I(state=present) + sample: + carrier_gateway_id: "cagw-XXXXXXXX" +tags: + description: The tags associated the Carrier Gateway. + type: dict + returned: I(state=present) + sample: + tags: + "Ansible": "Test" +vpc_id: + description: The VPC ID associated with the Carrier Gateway. + type: str + returned: I(state=present) + sample: + vpc_id: "vpc-XXXXXXXX" +""" + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +@AWSRetry.jittered_backoff(retries=10, delay=10) +def describe_cagws_with_backoff(connection, **params): + paginator = connection.get_paginator("describe_carrier_gateways") + return paginator.paginate(**params).build_full_result()["CarrierGateways"] + + +class AnsibleEc2Cagw: + def __init__(self, module, results): + self._module = module + self._results = results + self._connection = self._module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + self._check_mode = self._module.check_mode + + def process(self): + vpc_id = self._module.params.get("vpc_id") + state = self._module.params.get("state", "present") + tags = self._module.params.get("tags") + purge_tags = self._module.params.get("purge_tags") + + if state == "present": + self.ensure_cagw_present(vpc_id, tags, purge_tags) + elif state == "absent": + self.ensure_cagw_absent(vpc_id) + + def get_matching_cagw(self, vpc_id, carrier_gateway_id=None): + """ + Returns the carrier gateway found. + Parameters: + vpc_id (str): VPC ID + carrier_gateway_id (str): Carrier Gateway ID, if specified + Returns: + cagw (dict): dict of cagw found, None if none found + """ + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) + try: + if not carrier_gateway_id: + cagws = describe_cagws_with_backoff( + self._connection, + Filters=filters, + ) + else: + cagws = describe_cagws_with_backoff( + self._connection, + CarrierGatewayIds=[carrier_gateway_id], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e) + + cagw = None + if len(cagws) > 1: + self._module.fail_json( + msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting" + ) + elif cagws: + cagw = camel_dict_to_snake_dict(cagws[0]) + + return cagw + + @staticmethod + def get_cagw_info(cagw, vpc_id): + return { + "carrier_gateway_id": cagw["carrier_gateway_id"], + "tags": boto3_tag_list_to_ansible_dict(cagw["tags"]), + "vpc_id": vpc_id, + } + + def ensure_cagw_absent(self, vpc_id): + cagw = self.get_matching_cagw(vpc_id) + if cagw is None: + return self._results + + if self._check_mode: + self._results["changed"] = True + return self._results + + try: + self._results["changed"] = True + self._connection.delete_carrier_gateway( + aws_retry=True, + CarrierGatewayId=cagw["carrier_gateway_id"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Unable to delete Carrier Gateway") + + return self._results + + def ensure_cagw_present(self, vpc_id, tags, purge_tags): + cagw = self.get_matching_cagw(vpc_id) + + if cagw is None: + if self._check_mode: + self._results["changed"] = True + self._results["carrier_gateway_id"] = None + return self._results + + try: + response = self._connection.create_carrier_gateway(VpcId=vpc_id, aws_retry=True) + cagw = camel_dict_to_snake_dict(response["CarrierGateway"]) + self._results["changed"] = True + except is_boto3_error_message("You must be opted into a wavelength zone to create a carrier gateway.") as e: + self._module.fail_json(msg="You must be opted into a wavelength zone to create a carrier gateway") + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="No Carrier Gateway exists.") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Unable to create Carrier Gateway") + + # Modify tags + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + cagw["carrier_gateway_id"], + resource_type="carrier-gateway", + tags=tags, + purge_tags=purge_tags, + retry_codes="InvalidCarrierGatewayID.NotFound", + ) + + # Update cagw + cagw = self.get_matching_cagw(vpc_id, carrier_gateway_id=cagw["carrier_gateway_id"]) + cagw_info = self.get_cagw_info(cagw, vpc_id) + self._results.update(cagw_info) + + return self._results + + +def main(): + argument_spec = dict( + carrier_gateway_id=dict(required=False), + vpc_id=dict(required=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[["vpc_id", "carrier_gateway_id"]], + supports_check_mode=True, + ) + results = dict( + changed=False, + ) + cagw_manager = AnsibleEc2Cagw(module=module, results=results) + cagw_manager.process() + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ec2_carrier_gateway_info.py b/ec2_carrier_gateway_info.py new file mode 100644 index 00000000000..43d77d59aa6 --- /dev/null +++ b/ec2_carrier_gateway_info.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_carrier_gateway_info +version_added: 6.0.0 +short_description: Gather information about carrier gateways in AWS +description: + - Gather information about carrier gateways in AWS. +author: + - "Marco Braga (@mtulio)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCarrierGateways.html) for possible filters. + required: false + default: {} + type: dict + carrier_gateway_ids: + description: + - Get details of specific Carrier Gateway ID. + required: false + type: list + elements: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all Carrier Gateways for an account or profile + community.aws.ec2_carrier_gateway_info: + region: ap-southeast-2 + register: cagw_info + +- name: Gather information about a filtered list of Carrier Gateways + community.aws.ec2_carrier_gateway_info: + region: ap-southeast-2 + filters: + "tag:Name": "cagw-123" + register: cagw_info + +- name: Gather information about a specific carrier gateway by CarrierGatewayId + community.aws.ec2_carrier_gateway_info: + region: ap-southeast-2 + carrier_gateway_ids: cagw-c1231234 + register: cagw_info +""" + +RETURN = r""" +changed: + description: True if listing the carrier gateways succeeds. + type: bool + returned: always + sample: "false" +carrier_gateways: + description: The carrier gateways for the account. + returned: always + type: complex + contains: + vpc_id: + description: The ID of the VPC. + returned: I(state=present) + type: str + sample: vpc-02123b67 + carrier_gateway_id: + description: The ID of the carrier gateway. + returned: I(state=present) + type: str + sample: cagw-2123634d + tags: + description: Any tags assigned to the carrier gateway. + returned: I(state=present) + type: dict + sample: + tags: + "Ansible": "Test" +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +def get_carrier_gateway_info(carrier_gateway): + tags = boto3_tag_list_to_ansible_dict(carrier_gateway["Tags"]) + ignore_list = [] + carrier_gateway_info = { + "CarrierGatewayId": carrier_gateway["CarrierGatewayId"], + "VpcId": carrier_gateway["VpcId"], + "Tags": tags, + } + + carrier_gateway_info = camel_dict_to_snake_dict(carrier_gateway_info, ignore_list=ignore_list) + return carrier_gateway_info + + +def list_carrier_gateways(connection, module): + params = dict() + + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("carrier_gateway_ids"): + params["CarrierGatewayIds"] = module.params.get("carrier_gateway_ids") + + try: + all_carrier_gateways = connection.describe_carrier_gateways(aws_retry=True, **params) + except is_boto3_error_code("InvalidCarrierGatewayID.NotFound"): + module.fail_json("CarrierGateway not found") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Unable to describe carrier gateways") + + return [get_carrier_gateway_info(cagw) for cagw in all_carrier_gateways["CarrierGateways"]] + + +def main(): + argument_spec = dict( + carrier_gateway_ids=dict(default=None, elements="str", type="list"), + filters=dict(default={}, type="dict"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Validate Requirements + try: + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + results = list_carrier_gateways(connection, module) + + module.exit_json(carrier_gateways=results) + + +if __name__ == "__main__": + main() From 6736960e4831ca03e9d45de6b36dd61d3f76bfd7 Mon Sep 17 00:00:00 2001 From: Ivan Chekaldin <39010411+ichekaldin@users.noreply.github.com> Date: Mon, 3 Apr 2023 04:17:23 -0400 Subject: [PATCH 646/683] config_rule - Fix Idempotency by Ignoring `EvaluationModes` Parameter (#1757) config_rule - Fix Idempotency by Ignoring `EvaluationModes` Parameter SUMMARY config_rule module currently always returns changed = True. I believe this is due to EvaluationModes parameter recently added to describe_config_rules method output. ISSUE TYPE Bugfix Pull Request COMPONENT NAME config_rule ADDITIONAL INFORMATION Example configuration: - community.aws.config_rule: name: cloudwatch-log-group-encrypted description: Checks if a log group in Amazon CloudWatch Logs is encrypted with a AWS Key Management Service (KMS) managed Customer Master Keys (CMK). source: identifier: CLOUDWATCH_LOG_GROUP_ENCRYPTED owner: AWS Expected result: first execution returns changed = True, subsequent executions return changed = False. Current behavior: every execution returns changed = True. This is because update_resource method ends up comparing: {'ConfigRuleName': 'cloudwatch-log-group-encrypted', 'Description': 'Checks if a log group in Amazon CloudWatch Logs is encrypted with a AWS Key Management Service (KMS) managed Customer Master Keys (CMK).', 'Source': {'Owner': 'AWS', 'SourceIdentifier': 'CLOUDWATCH_LOG_GROUP_ENCRYPTED'}, 'ConfigRuleState': 'ACTIVE'} with: {'ConfigRuleName': 'cloudwatch-log-group-encrypted', 'Description': 'Checks if a log group in Amazon CloudWatch Logs is encrypted with a AWS Key Management Service (KMS) managed Customer Master Keys (CMK).', 'Source': {'Owner': 'AWS', 'SourceIdentifier': 'CLOUDWATCH_LOG_GROUP_ENCRYPTED'}, 'ConfigRuleState': 'ACTIVE', 'EvaluationModes': [{'Mode': 'DETECTIVE'}]} Reviewed-by: Markus Bergholz --- config_rule.py | 1 + 1 file changed, 1 insertion(+) diff --git a/config_rule.py b/config_rule.py index 4fd3103931f..cae18b2a0a4 100644 --- a/config_rule.py +++ b/config_rule.py @@ -151,6 +151,7 @@ def update_resource(client, module, params, result): del current_params['ConfigRules'][0]['ConfigRuleArn'] del current_params['ConfigRules'][0]['ConfigRuleId'] + del current_params['ConfigRules'][0]['EvaluationModes'] if params != current_params['ConfigRules'][0]: try: From ecfc0d6c1f4eaeae045aa1967b639bf70ea76a56 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 12 Apr 2023 15:45:13 +0800 Subject: [PATCH 647/683] Add version_description to ec2_launch_template (#1763) Add version_description to ec2_launch_template SUMMARY Add version_description to ec2_launch_template module, which allows user update the VersionDescription of a launch template. Fix #1762 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_launch_template ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- ec2_launch_template.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 17f345a2f7e..b807d3aa09f 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -38,6 +38,12 @@ - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default. type: str default: latest + version_description: + version_added: 5.5.0 + description: + - The description of a launch template version. + default: "" + type: str state: description: - Whether the launch template should exist or not. @@ -576,8 +582,10 @@ def create_or_update(module, template_options): template, template_versions = existing_templates(module) out['changed'] = True elif template and template_versions: - most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] - if lt_data == most_recent['LaunchTemplateData']: + most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] + if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( + "VersionDescription", "" + ): out['changed'] = False return out try: @@ -586,6 +594,7 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) elif module.params.get('source_version') == 'latest': @@ -593,7 +602,8 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, - SourceVersion=str(most_recent['VersionNumber']), + SourceVersion=str(most_recent["VersionNumber"]), + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) else: @@ -609,7 +619,8 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, - SourceVersion=str(source_version['VersionNumber']), + SourceVersion=str(source_version["VersionNumber"]), + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) @@ -782,11 +793,12 @@ def main(): ) arg_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - template_name=dict(aliases=['name']), - template_id=dict(aliases=['id']), - default_version=dict(default='latest'), - source_version=dict(default='latest') + state=dict(choices=["present", "absent"], default="present"), + template_name=dict(aliases=["name"]), + template_id=dict(aliases=["id"]), + default_version=dict(default="latest"), + source_version=dict(default="latest"), + version_description=dict(default=""), ) arg_spec.update(template_options) From 94c699bad02761f4c1d52711073e58faede06407 Mon Sep 17 00:00:00 2001 From: Nicolas Boutet Date: Mon, 17 Apr 2023 20:32:47 +0200 Subject: [PATCH 648/683] Add http3 support in cloudfront_distribution module (#1753) Add http3 support in cloudfront_distribution module SUMMARY Add http3 support to cloudfront_distribution module. ISSUE TYPE Feature Pull Request COMPONENT NAME cloudfront_distribution.py ADDITIONAL INFORMATION Tests failure seem to be unrelated to this PR. Reviewed-by: Markus Bergholz Reviewed-by: Nicolas Boutet --- cloudfront_distribution.py | 41 ++++++++++---------------------------- 1 file changed, 11 insertions(+), 30 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index ec6e74daf36..82a00b283be 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -568,7 +568,7 @@ description: - The version of the http protocol to use for the distribution. - AWS defaults this to C(http2). - - Valid values are C(http1.1) and C(http2). + - Valid values are C(http1.1), C(http2), C(http3) and C(http2and3). type: str ipv6_enabled: @@ -1617,35 +1617,16 @@ def __init__(self, module): self.__valid_methods_cached_methods[1], self.__valid_methods ] - self.__valid_lambda_function_association_event_types = set([ - 'viewer-request', - 'viewer-response', - 'origin-request', - 'origin-response' - ]) - self.__valid_viewer_certificate_ssl_support_methods = set([ - 'sni-only', - 'vip' - ]) - self.__valid_viewer_certificate_minimum_protocol_versions = set([ - 'SSLv3', - 'TLSv1', - 'TLSv1_2016', - 'TLSv1.1_2016', - 'TLSv1.2_2018', - 'TLSv1.2_2019', - 'TLSv1.2_2021' - ]) - self.__valid_viewer_certificate_certificate_sources = set([ - 'cloudfront', - 'iam', - 'acm' - ]) - self.__valid_http_versions = set([ - 'http1.1', - 'http2' - ]) - self.__s3_bucket_domain_identifier = '.s3.amazonaws.com' + self.__valid_lambda_function_association_event_types = set( + ["viewer-request", "viewer-response", "origin-request", "origin-response"] + ) + self.__valid_viewer_certificate_ssl_support_methods = set(["sni-only", "vip"]) + self.__valid_viewer_certificate_minimum_protocol_versions = set( + ["SSLv3", "TLSv1", "TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018", "TLSv1.2_2019", "TLSv1.2_2021"] + ) + self.__valid_viewer_certificate_certificate_sources = set(["cloudfront", "iam", "acm"]) + self.__valid_http_versions = set(["http1.1", "http2", "http3", "http2and3"]) + self.__s3_bucket_domain_identifier = ".s3.amazonaws.com" def add_missing_key(self, dict_object, key_to_set, value_to_set): if key_to_set not in dict_object and value_to_set is not None: From aad84a11ad65b13e52ac1ff6debccf7c970ce6f0 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Tue, 18 Apr 2023 13:28:26 +0200 Subject: [PATCH 649/683] elb_target_group: fix lost property AvailabilityZone (#1767) elb_target_group: fix lost property AvailabilityZone SUMMARY Closes #1736 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_target_group Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- elb_target_group.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/elb_target_group.py b/elb_target_group.py index 784fa143a4f..16cafc958e3 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -729,9 +729,12 @@ def create_or_update_target_group(connection, module): if add_instances: instances_to_add = [] - for target in params['Targets']: - if target['Id'] in add_instances: - instances_to_add.append({'Id': target['Id'], 'Port': target['Port']}) + for target in params["Targets"]: + if target["Id"] in add_instances: + tmp_item = {"Id": target["Id"], "Port": target["Port"]} + if target.get("AvailabilityZone"): + tmp_item["AvailabilityZone"] = target["AvailabilityZone"] + instances_to_add.append(tmp_item) changed = True try: From b19cb6967bdf2a666df20deef0a27aa123a7dd98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Wed, 19 Apr 2023 03:29:06 -0400 Subject: [PATCH 650/683] elasticache_info: ignore CacheClusterNotFound exeption during tag collect (#1777) elasticache_info: ignore CacheClusterNotFound exeption during tag collect If we call get_elasticache_tags_with_backoff() on a cluster with an invalid state (eg: deleting), AWS will trigger an CacheClusterNotFound. With this change, elasticache_info will ignore the cluster and continue with the next one. Reviewed-by: Mark Chappell --- elasticache_info.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/elasticache_info.py b/elasticache_info.py index 02b18ee4778..31283cd18aa 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -471,6 +471,9 @@ def get_elasticache_clusters(client, module): arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id']) try: tags = get_elasticache_tags_with_backoff(client, arn) + except is_boto3_error_code("CacheClusterNotFound"): + # e.g: Cluster was listed but is in deleting state + continue except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") From 01aaada586b1c1a31fca32b7caae141815ada414 Mon Sep 17 00:00:00 2001 From: Gabriel PREDA Date: Wed, 19 Apr 2023 14:42:32 +0300 Subject: [PATCH 651/683] Fix SASL/SCRAM + add option for SASL IAM & add option to disable unauthenticated clients (#1764) Fix SASL/SCRAM + add option for SASL IAM & add option to disable unauthenticated clients SUMMARY fix SASL/SCRAM - Fixes #1761 add IAM authentication add option to disable unauthenticated clients Many thanx to @markuman for throwing me into this issue. ISSUE TYPE Bugfix Pull Request COMPONENT NAME msk_cluster ADDITIONAL INFORMATION I will probably add more tests after working w/ this. Reviewed-by: Mark Chappell Reviewed-by: Gabriel PREDA Reviewed-by: Markus Bergholz --- msk_cluster.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/msk_cluster.py b/msk_cluster.py index 65c9edea258..6bf143509ae 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -122,7 +122,15 @@ sasl_scram: description: SASL/SCRAM authentication is enabled or not. type: bool - default: False + sasl_iam: + version_added: 5.5.0 + description: IAM authentication is enabled or not. + type: bool + unauthenticated: + version_added: 5.5.0 + description: Option to explicitly turn on or off authentication + type: bool + default: True enhanced_monitoring: description: Specifies the level of monitoring for the MSK cluster. choices: @@ -382,13 +390,21 @@ def prepare_create_options(module): if module.params["authentication"]: c_params["ClientAuthentication"] = {} - if module.params["authentication"].get("sasl_scram"): - c_params["ClientAuthentication"]["Sasl"] = { - "Scram": module.params["authentication"]["sasl_scram"] - } + if module.params["authentication"].get("sasl_scram") or module.params["authentication"].get("sasl_iam"): + sasl = {} + if module.params["authentication"].get("sasl_scram"): + sasl["Scram"] = {"Enabled": True} + if module.params["authentication"].get("sasl_iam"): + sasl["Iam"] = {"Enabled": True} + c_params["ClientAuthentication"]["Sasl"] = sasl if module.params["authentication"].get("tls_ca_arn"): c_params["ClientAuthentication"]["Tls"] = { - "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"] + "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"], + "Enabled": True, + } + if module.params["authentication"].get("unauthenticated"): + c_params["ClientAuthentication"] = { + "Unauthenticated": {"Enabled": True}, } c_params.update(prepare_enhanced_monitoring_options(module)) @@ -713,7 +729,9 @@ def main(): type="dict", options=dict( tls_ca_arn=dict(type="list", elements="str", required=False), - sasl_scram=dict(type="bool", default=False), + sasl_scram=dict(type="bool", required=False), + sasl_iam=dict(type="bool", required=False), + unauthenticated=dict(type="bool", default=True, required=False), ), ), enhanced_monitoring=dict( From df169d4db4c2efd2fcdb1f7a1d2b185b5a856027 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 20 Apr 2023 13:13:27 +0200 Subject: [PATCH 652/683] eks_nodegroup - fixing remote access and added to integration tests (#1773) (#1781) eks_nodegroup - fixing remote access and added to integration tests SUMMARY This was incorrectly merged directly into stable-5 rather than main. Fixes #1771 Handling remote_access configuration the right way that boto understands it. Also included it to integration tests. ISSUE TYPE Bugfix Pull Request COMPONENT NAME eks_nodegroup ADDITIONAL INFORMATION This is pulling #1773 from stable-5 into main Reviewed-by: Markus Bergholz Reviewed-by: Thomas Bruckmann Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- eks_nodegroup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/eks_nodegroup.py b/eks_nodegroup.py index 8528cc8a488..f6ce192c96c 100644 --- a/eks_nodegroup.py +++ b/eks_nodegroup.py @@ -515,7 +515,11 @@ def create_or_update_nodegroups(client, module): if module.params['release_version'] is not None: params['releaseVersion'] = module.params['release_version'] if module.params['remote_access'] is not None: - params['remoteAccess'] = module.params['remote_access'] + params['remoteAccess'] = dict() + if module.params['remote_access']['ec2_ssh_key'] is not None: + params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key'] + if module.params['remote_access']['source_sg'] is not None: + params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg'] if module.params['capacity_type'] is not None: params['capacityType'] = module.params['capacity_type'].upper() if module.params['labels'] is not None: From 30383f1106102624d048918b5ce4aa426088bdce Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 26 Apr 2023 19:26:07 +0200 Subject: [PATCH 653/683] Big Black PR (#1784) * Black prep * Black * changelog * Fix pylint unused-import in tests * Split SSM connection plugin changes * disable glue tests - bucket's missing * Disable s3_logging and s3_sync tests --- accessanalyzer_validate_policy_info.py | 55 +- acm_certificate.py | 130 +-- acm_certificate_info.py | 41 +- api_gateway.py | 93 +- api_gateway_domain.py | 120 +-- application_autoscaling_policy.py | 232 ++--- autoscaling_complete_lifecycle_action.py | 22 +- autoscaling_instance_refresh.py | 93 +- autoscaling_instance_refresh_info.py | 55 +- autoscaling_launch_config.py | 256 ++--- autoscaling_launch_config_find.py | 66 +- autoscaling_launch_config_info.py | 49 +- autoscaling_lifecycle_hook.py | 121 +-- autoscaling_policy.py | 301 +++--- autoscaling_scheduled_action.py | 92 +- aws_region_info.py | 17 +- batch_compute_environment.py | 168 ++-- batch_job_definition.py | 116 +-- batch_job_queue.py | 93 +- cloudformation_exports_info.py | 19 +- cloudformation_stack_set.py | 378 ++++---- cloudfront_distribution.py | 1003 ++++++++++++-------- cloudfront_distribution_info.py | 146 +-- cloudfront_invalidation.py | 55 +- cloudfront_origin_access_identity.py | 74 +- cloudfront_response_headers_policy.py | 81 +- codebuild_project.py | 54 +- codecommit_repository.py | 46 +- codepipeline.py | 72 +- config_aggregation_authorization.py | 56 +- config_aggregator.py | 129 ++- config_delivery_channel.py | 136 +-- config_recorder.py | 106 +-- config_rule.py | 183 ++-- data_pipeline.py | 214 ++--- directconnect_confirm_connection.py | 57 +- directconnect_connection.py | 124 +-- directconnect_gateway.py | 144 +-- directconnect_link_aggregation_group.py | 213 +++-- directconnect_virtual_interface.py | 233 ++--- dms_endpoint.py | 247 +++-- dms_replication_subnet_group.py | 64 +- dynamodb_table.py | 509 +++++----- dynamodb_ttl.py | 52 +- ec2_ami_copy.py | 72 +- ec2_carrier_gateway.py | 4 +- ec2_customer_gateway.py | 107 +-- ec2_customer_gateway_info.py | 32 +- ec2_launch_template.py | 342 ++++--- ec2_placement_group.py | 118 +-- ec2_placement_group_info.py | 37 +- ec2_snapshot_copy.py | 49 +- ec2_transit_gateway.py | 165 ++-- ec2_transit_gateway_info.py | 33 +- ec2_transit_gateway_vpc_attachment.py | 98 +- ec2_transit_gateway_vpc_attachment_info.py | 40 +- ec2_vpc_egress_igw.py | 78 +- ec2_vpc_nacl.py | 220 +++-- ec2_vpc_nacl_info.py | 96 +- ec2_vpc_peer.py | 197 ++-- ec2_vpc_peering_info.py | 26 +- ec2_vpc_vgw.py | 252 ++--- ec2_vpc_vgw_info.py | 30 +- ec2_vpc_vpn.py | 417 ++++---- ec2_vpc_vpn_info.py | 27 +- ec2_win_password.py | 38 +- ecs_attribute.py | 94 +- ecs_cluster.py | 154 +-- ecs_ecr.py | 248 +++-- ecs_service.py | 456 ++++----- ecs_service_info.py | 69 +- ecs_tag.py | 79 +- ecs_task.py | 213 +++-- ecs_taskdefinition.py | 348 ++++--- ecs_taskdefinition_info.py | 8 +- efs.py | 352 +++---- efs_info.py | 112 +-- efs_tag.py | 54 +- eks_cluster.py | 90 +- eks_fargate_profile.py | 120 +-- eks_nodegroup.py | 359 +++---- elasticache.py | 266 +++--- elasticache_info.py | 35 +- elasticache_parameter_group.py | 111 ++- elasticache_snapshot.py | 64 +- elasticache_subnet_group.py | 54 +- elasticbeanstalk_app.py | 54 +- elb_classic_lb_info.py | 60 +- elb_instance.py | 156 +-- elb_network_lb.py | 112 +-- elb_target.py | 95 +- elb_target_group.py | 426 +++++---- elb_target_group_info.py | 56 +- elb_target_info.py | 107 +-- glue_connection.py | 174 ++-- glue_crawler.py | 192 ++-- glue_job.py | 168 ++-- iam_access_key.py | 88 +- iam_access_key_info.py | 22 +- iam_group.py | 116 ++- iam_managed_policy.py | 132 +-- iam_mfa_device_info.py | 10 +- iam_password_policy.py | 78 +- iam_role.py | 283 +++--- iam_role_info.py | 84 +- iam_saml_federation.py | 44 +- iam_server_certificate.py | 186 ++-- iam_server_certificate_info.py | 36 +- inspector_target.py | 77 +- kinesis_stream.py | 631 +++++------- lightsail.py | 57 +- lightsail_static_ip.py | 26 +- mq_broker_info.py | 5 +- msk_cluster.py | 124 +-- msk_config.py | 20 +- networkfirewall.py | 73 +- networkfirewall_info.py | 25 +- networkfirewall_policy.py | 96 +- networkfirewall_policy_info.py | 21 +- networkfirewall_rule_group.py | 137 +-- networkfirewall_rule_group_info.py | 43 +- opensearch.py | 336 ++----- opensearch_info.py | 35 +- redshift.py | 347 +++---- redshift_cross_region_snapshots.py | 71 +- redshift_info.py | 31 +- redshift_subnet_group.py | 70 +- s3_bucket_info.py | 97 +- s3_bucket_notification.py | 171 ++-- s3_cors.py | 21 +- s3_lifecycle.py | 272 +++--- s3_logging.py | 66 +- s3_metrics_configuration.py | 72 +- s3_sync.py | 224 +++-- s3_website.py | 85 +- secretsmanager_secret.py | 136 +-- ses_identity.py | 167 ++-- ses_identity_policy.py | 46 +- ses_rule_set.py | 50 +- sns.py | 66 +- sns_topic.py | 277 +++--- sns_topic_info.py | 13 +- sqs_queue.py | 148 ++- ssm_inventory_info.py | 8 +- ssm_parameter.py | 176 ++-- stepfunctions_state_machine.py | 83 +- stepfunctions_state_machine_execution.py | 85 +- storagegateway_info.py | 67 +- sts_assume_role.py | 35 +- sts_session_token.py | 38 +- waf_condition.py | 276 +++--- waf_info.py | 14 +- waf_rule.py | 150 +-- waf_web_acl.py | 148 +-- wafv2_ip_set.py | 119 +-- wafv2_ip_set_info.py | 39 +- wafv2_resources.py | 50 +- wafv2_resources_info.py | 27 +- wafv2_rule_group.py | 141 ++- wafv2_rule_group_info.py | 28 +- wafv2_web_acl.py | 166 ++-- wafv2_web_acl_info.py | 27 +- 162 files changed, 10737 insertions(+), 10034 deletions(-) diff --git a/accessanalyzer_validate_policy_info.py b/accessanalyzer_validate_policy_info.py index 817f414671b..fab777175e7 100644 --- a/accessanalyzer_validate_policy_info.py +++ b/accessanalyzer_validate_policy_info.py @@ -177,11 +177,10 @@ def filter_findings(findings, type_filter): return findings # Convert type_filter to the findingType strings returned by the API - filter_map = dict(error='ERROR', security='SECURITY_WARNING', - suggestion='SUGGESTION', warning='WARNING') + filter_map = dict(error="ERROR", security="SECURITY_WARNING", suggestion="SUGGESTION", warning="WARNING") allowed_types = [filter_map[t] for t in type_filter] - filtered_results = [f for f in findings if f.get('findingType', None) in allowed_types] + filtered_results = [f for f in findings if f.get("findingType", None) in allowed_types] return filtered_results @@ -190,47 +189,47 @@ def main(): # values are likely to be expanded, let's avoid hard coding limits which might not hold true in # the long term... argument_spec = dict( - policy=dict(required=True, type='json', aliases=['policy_document']), - locale=dict(required=False, type='str', default='EN'), - policy_type=dict(required=False, type='str', default='identity', - choices=['identity', 'resource', 'service_control']), - resource_type=dict(required=False, type='str'), - results_filter=dict(required=False, type='list', elements='str', - choices=['error', 'security', 'suggestion', 'warning']), + policy=dict(required=True, type="json", aliases=["policy_document"]), + locale=dict(required=False, type="str", default="EN"), + policy_type=dict( + required=False, type="str", default="identity", choices=["identity", "resource", "service_control"] + ), + resource_type=dict(required=False, type="str"), + results_filter=dict( + required=False, type="list", elements="str", choices=["error", "security", "suggestion", "warning"] + ), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - policy_type_map = dict(identity='IDENTITY_POLICY', resource='RESOURCE_POLICY', - service_control='SERVICE_CONTROL_POLICY') + policy_type_map = dict( + identity="IDENTITY_POLICY", resource="RESOURCE_POLICY", service_control="SERVICE_CONTROL_POLICY" + ) - policy = module.params.get('policy') - policy_type = policy_type_map[module.params.get('policy_type')] - locale = module.params.get('locale').upper() - resource_type = module.params.get('resource_type') - results_filter = module.params.get('results_filter') + policy = module.params.get("policy") + policy_type = policy_type_map[module.params.get("policy_type")] + locale = module.params.get("locale").upper() + resource_type = module.params.get("resource_type") + results_filter = module.params.get("results_filter") try: - client = module.client('accessanalyzer', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("accessanalyzer", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") params = dict(locale=locale, policyDocument=policy, policyType=policy_type) - if policy_type == 'RESOURCE_POLICY' and resource_type: - params['policyType'] = resource_type + if policy_type == "RESOURCE_POLICY" and resource_type: + params["policyType"] = resource_type results = client.validate_policy(aws_retry=True, **params) - findings = filter_findings(results.get('findings', []), results_filter) - results['findings'] = findings + findings = filter_findings(results.get("findings", []), results_filter) + results["findings"] = findings results = camel_dict_to_snake_dict(results) module.exit_json(changed=False, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/acm_certificate.py b/acm_certificate.py index e7ea9c6d87a..197124fb59e 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -276,12 +276,10 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't add tags to certificate {0}".format(resource_arn) - ) + module.fail_json_aws(e, "Couldn't add tags to certificate {0}".format(resource_arn)) if tags_to_remove and not module.check_mode: # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. - tags_list = [{'Key': key, 'Value': existing_tags.get(key)} for key in tags_to_remove] + tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove] try: client.remove_tags_from_certificate( CertificateArn=resource_arn, @@ -291,9 +289,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws( - e, "Couldn't remove tags from certificate {0}".format(resource_arn) - ) + module.fail_json_aws(e, "Couldn't remove tags from certificate {0}".format(resource_arn)) new_tags = deepcopy(existing_tags) for key, value in tags_to_add.items(): new_tags[key] = value @@ -308,7 +304,6 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): # May include some lines between each chain in the cert, e.g. "Subject: ..." # Returns True iff the chains/certs are functionally identical (including chain order) def chain_compare(module, a, b): - chain_a_pem = pem_chain_split(module, a) chain_b_pem = pem_chain_split(module, b) @@ -316,7 +311,7 @@ def chain_compare(module, a, b): return False # Chain length is the same - for (ca, cb) in zip(chain_a_pem, chain_b_pem): + for ca, cb in zip(chain_a_pem, chain_b_pem): der_a = PEM_body_to_DER(module, ca) der_b = PEM_body_to_DER(module, cb) if der_a != der_b: @@ -336,7 +331,9 @@ def PEM_body_to_DER(module, pem): # Store this globally to avoid repeated recompilation -pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?") +pem_chain_split_regex = re.compile( + r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?" +) # Use regex to split up a chain or single cert into an array of base64 encoded data @@ -344,7 +341,6 @@ def PEM_body_to_DER(module, pem): # Noting that some chains have non-pem data in between each cert # This function returns only what's between the headers, excluding the headers def pem_chain_split(module, pem): - pem_arr = re.findall(pem_chain_split_regex, to_text(pem)) if len(pem_arr) == 0: @@ -359,53 +355,55 @@ def update_imported_certificate(client, module, acm, old_cert, desired_tags): Update the existing certificate that was previously imported in ACM. """ module.debug("Existing certificate found in ACM") - if ('tags' not in old_cert) or ('Name' not in old_cert['tags']): + if ("tags" not in old_cert) or ("Name" not in old_cert["tags"]): # shouldn't happen module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) - if module.params.get('name_tag') is not None and (old_cert['tags']['Name'] != module.params.get('name_tag')): + if module.params.get("name_tag") is not None and (old_cert["tags"]["Name"] != module.params.get("name_tag")): # This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name', # and the 'Name' tag in the AWS API does not match the ansible 'name_tag'. module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert) - if 'certificate' not in old_cert: + if "certificate" not in old_cert: # shouldn't happen module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) cert_arn = None # Are the existing certificate in ACM and the local certificate the same? same = True - if module.params.get('certificate') is not None: - same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) - if module.params['certificate_chain']: + if module.params.get("certificate") is not None: + same &= chain_compare(module, old_cert["certificate"], module.params["certificate"]) + if module.params["certificate_chain"]: # Need to test this # not sure if Amazon appends the cert itself to the chain when self-signed - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) + same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate_chain"]) else: # When there is no chain with a cert # it seems Amazon returns the cert itself as the chain - same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) + same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate"]) if same: module.debug("Existing certificate in ACM is the same") - cert_arn = old_cert['certificate_arn'] + cert_arn = old_cert["certificate_arn"] changed = False else: - absent_args = ['certificate', 'name_tag', 'private_key'] + absent_args = ["certificate", "name_tag", "private_key"] if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json(msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.fail_json( + msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" + ) module.debug("Existing certificate in ACM is different, overwriting") changed = True if module.check_mode: - cert_arn = old_cert['certificate_arn'] + cert_arn = old_cert["certificate_arn"] # note: returned domain will be the domain of the previous cert else: # update cert in ACM cert_arn = acm.import_certificate( client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], - arn=old_cert['certificate_arn'], + certificate=module.params["certificate"], + private_key=module.params["private_key"], + certificate_chain=module.params["certificate_chain"], + arn=old_cert["certificate_arn"], tags=desired_tags, ) return (changed, cert_arn) @@ -416,22 +414,24 @@ def import_certificate(client, module, acm, desired_tags): Import a certificate to ACM. """ # Validate argument requirements - absent_args = ['certificate', 'name_tag', 'private_key'] + absent_args = ["certificate", "name_tag", "private_key"] cert_arn = None if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json(msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified") + module.fail_json( + msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" + ) module.debug("No certificate in ACM. Creating new one.") changed = True if module.check_mode: - domain = 'example.com' + domain = "example.com" module.exit_json(certificate=dict(domain_name=domain), changed=True) else: cert_arn = acm.import_certificate( client, module, - certificate=module.params['certificate'], - private_key=module.params['private_key'], - certificate_chain=module.params['certificate_chain'], + certificate=module.params["certificate"], + private_key=module.params["private_key"], + certificate_chain=module.params["certificate_chain"], tags=desired_tags, ) return (changed, cert_arn) @@ -441,7 +441,7 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, cert_arn = None changed = False if len(certificates) > 1: - msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag'] + msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params["name_tag"] module.fail_json(msg=msg, certificates=certificates) elif len(certificates) == 1: # Update existing certificate that was previously imported to ACM. @@ -452,11 +452,13 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, # Add/remove tags to/from certificate try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_certificate(CertificateArn=cert_arn)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict( + client.list_tags_for_certificate(CertificateArn=cert_arn)["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for certificate") - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") (c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags) changed |= c domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn) @@ -466,21 +468,21 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, def ensure_certificates_absent(client, module, acm, certificates): for cert in certificates: if not module.check_mode: - acm.delete_certificate(client, module, cert['certificate_arn']) - module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0)) + acm.delete_certificate(client, module, cert["certificate_arn"]) + module.exit_json(arns=[cert["certificate_arn"] for cert in certificates], changed=(len(certificates) > 0)) def main(): argument_spec = dict( certificate=dict(), - certificate_arn=dict(aliases=['arn']), + certificate_arn=dict(aliases=["arn"]), certificate_chain=dict(), - domain_name=dict(aliases=['domain']), - name_tag=dict(aliases=['name']), + domain_name=dict(aliases=["domain"]), + name_tag=dict(aliases=["name"]), private_key=dict(no_log=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -489,62 +491,66 @@ def main(): acm = ACMServiceManager(module) # Check argument requirements - if module.params['state'] == 'present': + if module.params["state"] == "present": # at least one of these should be specified. - absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) < 1: for a in absent_args: module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + module.fail_json( + msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" + ) else: # absent # exactly one of these should be specified - absent_args = ['certificate_arn', 'domain_name', 'name_tag'] + absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: module.debug("%s is %s" % (a, module.params[a])) - module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified") + module.fail_json( + msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" + ) filter_tags = None desired_tags = None - if module.params.get('tags') is not None: - desired_tags = module.params['tags'] + if module.params.get("tags") is not None: + desired_tags = module.params["tags"] else: # Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed - module.params['purge_tags'] = False - if module.params.get('name_tag') is not None: + module.params["purge_tags"] = False + if module.params.get("name_tag") is not None: # The module was originally implemented to filter certificates based on the 'Name' tag. # Other tags are not used to filter certificates. # It would make sense to replace the existing name_tag, domain, certificate_arn attributes # with a 'filter' attribute, but that would break backwards-compatibility. - filter_tags = dict(Name=module.params['name_tag']) + filter_tags = dict(Name=module.params["name_tag"]) if desired_tags is not None: - if 'Name' in desired_tags: - if desired_tags['Name'] != module.params['name_tag']: + if "Name" in desired_tags: + if desired_tags["Name"] != module.params["name_tag"]: module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'") else: - desired_tags['Name'] = module.params['name_tag'] + desired_tags["Name"] = module.params["name_tag"] else: desired_tags = deepcopy(filter_tags) - client = module.client('acm') + client = module.client("acm") # fetch the list of certificates currently in ACM certificates = acm.get_certificates( client=client, module=module, - domain_name=module.params['domain_name'], - arn=module.params['certificate_arn'], + domain_name=module.params["domain_name"], + arn=module.params["certificate_arn"], only_tags=filter_tags, ) module.debug("Found %d corresponding certificates in ACM" % len(certificates)) - if module.params['state'] == 'present': + if module.params["state"] == "present": ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) else: # state == absent ensure_certificates_absent(client, module, acm, certificates) -if __name__ == '__main__': +if __name__ == "__main__": # tests() main() diff --git a/acm_certificate_info.py b/acm_certificate_info.py index 2364751f519..287e7006aef 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -264,31 +264,42 @@ def main(): argument_spec = dict( - certificate_arn=dict(aliases=['arn']), - domain_name=dict(aliases=['name']), + certificate_arn=dict(aliases=["arn"]), + domain_name=dict(aliases=["name"]), statuses=dict( - type='list', - elements='str', - choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] + type="list", + elements="str", + choices=[ + "PENDING_VALIDATION", + "ISSUED", + "INACTIVE", + "EXPIRED", + "VALIDATION_TIMED_OUT", + "REVOKED", + "FAILED", + ], ), - tags=dict(type='dict'), + tags=dict(type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) acm_info = ACMServiceManager(module) - client = module.client('acm') + client = module.client("acm") - certificates = acm_info.get_certificates(client, module, - domain_name=module.params['domain_name'], - statuses=module.params['statuses'], - arn=module.params['certificate_arn'], - only_tags=module.params['tags']) + certificates = acm_info.get_certificates( + client, + module, + domain_name=module.params["domain_name"], + statuses=module.params["statuses"], + arn=module.params["certificate_arn"], + only_tags=module.params["tags"], + ) - if module.params['certificate_arn'] and len(certificates) != 1: - module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn']) + if module.params["certificate_arn"] and len(certificates) != 1: + module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params["certificate_arn"]) module.exit_json(certificates=certificates) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/api_gateway.py b/api_gateway.py index 615c3d89aa3..176404f644d 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -182,22 +182,22 @@ def main(): argument_spec = dict( - api_id=dict(type='str', required=False), - state=dict(type='str', default='present', choices=['present', 'absent']), - swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']), - swagger_dict=dict(type='json', default=None), - swagger_text=dict(type='str', default=None), - stage=dict(type='str', default=None), - deploy_desc=dict(type='str', default="Automatic deployment by Ansible."), - cache_enabled=dict(type='bool', default=False), - cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']), - stage_variables=dict(type='dict', default={}), - stage_canary_settings=dict(type='dict', default={}), - tracing_enabled=dict(type='bool', default=False), - endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']) + api_id=dict(type="str", required=False), + state=dict(type="str", default="present", choices=["present", "absent"]), + swagger_file=dict(type="path", default=None, aliases=["src", "api_file"]), + swagger_dict=dict(type="json", default=None), + swagger_text=dict(type="str", default=None), + stage=dict(type="str", default=None), + deploy_desc=dict(type="str", default="Automatic deployment by Ansible."), + cache_enabled=dict(type="bool", default=False), + cache_size=dict(type="str", default="0.5", choices=["0.5", "1.6", "6.1", "13.5", "28.4", "58.2", "118", "237"]), + stage_variables=dict(type="dict", default={}), + stage_canary_settings=dict(type="dict", default={}), + tracing_enabled=dict(type="bool", default=False), + endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), ) - mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841 + mutually_exclusive = [["swagger_file", "swagger_dict", "swagger_text"]] # noqa: F841 module = AnsibleAWSModule( argument_spec=argument_spec, @@ -205,16 +205,16 @@ def main(): mutually_exclusive=mutually_exclusive, ) - api_id = module.params.get('api_id') - state = module.params.get('state') # noqa: F841 - swagger_file = module.params.get('swagger_file') - swagger_dict = module.params.get('swagger_dict') - swagger_text = module.params.get('swagger_text') - endpoint_type = module.params.get('endpoint_type') + api_id = module.params.get("api_id") + state = module.params.get("state") # noqa: F841 + swagger_file = module.params.get("swagger_file") + swagger_dict = module.params.get("swagger_dict") + swagger_text = module.params.get("swagger_text") + endpoint_type = module.params.get("endpoint_type") - client = module.client('apigateway') + client = module.client("apigateway") - changed = True # for now it will stay that way until we can sometimes avoid change + changed = True # for now it will stay that way until we can sometimes avoid change conf_res = None dep_res = None del_res = None @@ -222,8 +222,9 @@ def main(): if state == "present": if api_id is None: api_id = create_empty_api(module, client, endpoint_type) - api_data = get_api_definitions(module, swagger_file=swagger_file, - swagger_dict=swagger_dict, swagger_text=swagger_text) + api_data = get_api_definitions( + module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text + ) conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) if state == "absent": del_res = delete_rest_api(module, client, api_id) @@ -231,11 +232,11 @@ def main(): exit_args = {"changed": changed, "api_id": api_id} if conf_res is not None: - exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res) + exit_args["configure_response"] = camel_dict_to_snake_dict(conf_res) if dep_res is not None: - exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res) + exit_args["deploy_response"] = camel_dict_to_snake_dict(dep_res) if del_res is not None: - exit_args['delete_response'] = camel_dict_to_snake_dict(del_res) + exit_args["delete_response"] = camel_dict_to_snake_dict(del_res) module.exit_json(**exit_args) @@ -255,7 +256,7 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te apidata = swagger_text if apidata is None: - module.fail_json(msg='module error - no swagger info provided') + module.fail_json(msg="module error - no swagger info provided") return apidata @@ -302,7 +303,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): deploy_response = None - stage = module.params.get('stage') + stage = module.params.get("stage") if stage: try: deploy_response = create_deployment(client, api_id, **module.params) @@ -313,12 +314,14 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): return configure_response, deploy_response -retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']} +retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ["TooManyRequestsException"]} @AWSRetry.jittered_backoff(**retry_params) def create_api(client, name=None, description=None, endpoint_type=None): - return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]}) + return client.create_rest_api( + name="ansible-temp-api", description=description, endpointConfiguration={"types": [endpoint_type]} + ) @AWSRetry.jittered_backoff(**retry_params) @@ -333,32 +336,32 @@ def configure_api(client, api_id, api_data=None, mode="overwrite"): @AWSRetry.jittered_backoff(**retry_params) def create_deployment(client, rest_api_id, **params): - canary_settings = params.get('stage_canary_settings') + canary_settings = params.get("stage_canary_settings") if canary_settings and len(canary_settings) > 0: result = client.create_deployment( restApiId=rest_api_id, - stageName=params.get('stage'), - description=params.get('deploy_desc'), - cacheClusterEnabled=params.get('cache_enabled'), - cacheClusterSize=params.get('cache_size'), - variables=params.get('stage_variables'), + stageName=params.get("stage"), + description=params.get("deploy_desc"), + cacheClusterEnabled=params.get("cache_enabled"), + cacheClusterSize=params.get("cache_size"), + variables=params.get("stage_variables"), canarySettings=canary_settings, - tracingEnabled=params.get('tracing_enabled') + tracingEnabled=params.get("tracing_enabled"), ) else: result = client.create_deployment( restApiId=rest_api_id, - stageName=params.get('stage'), - description=params.get('deploy_desc'), - cacheClusterEnabled=params.get('cache_enabled'), - cacheClusterSize=params.get('cache_size'), - variables=params.get('stage_variables'), - tracingEnabled=params.get('tracing_enabled') + stageName=params.get("stage"), + description=params.get("deploy_desc"), + cacheClusterEnabled=params.get("cache_enabled"), + cacheClusterSize=params.get("cache_size"), + variables=params.get("stage_variables"), + tracingEnabled=params.get("tracing_enabled"), ) return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/api_gateway_domain.py b/api_gateway_domain.py index a1afeaec95f..10a1ca1f2f7 100644 --- a/api_gateway_domain.py +++ b/api_gateway_domain.py @@ -129,12 +129,12 @@ def get_domain(module, client): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") result = {} try: - result['domain'] = get_domain_name(client, domain_name) - result['path_mappings'] = get_domain_mappings(client, domain_name) - except is_boto3_error_code('NotFoundException'): + result["domain"] = get_domain_name(client, domain_name) + result["path_mappings"] = get_domain_mappings(client, domain_name) + except is_boto3_error_code("NotFoundException"): return None except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="getting API GW domain") @@ -142,28 +142,28 @@ def get_domain(module, client): def create_domain(module, client): - path_mappings = module.params.get('domain_mappings', []) - domain_name = module.params.get('domain_name') - result = {'domain': {}, 'path_mappings': []} + path_mappings = module.params.get("domain_mappings", []) + domain_name = module.params.get("domain_name") + result = {"domain": {}, "path_mappings": []} try: - result['domain'] = create_domain_name( + result["domain"] = create_domain_name( module, client, domain_name, - module.params.get('certificate_arn'), - module.params.get('endpoint_type'), - module.params.get('security_policy') + module.params.get("certificate_arn"), + module.params.get("endpoint_type"), + module.params.get("security_policy"), ) for mapping in path_mappings: - base_path = mapping.get('base_path', '') - rest_api_id = mapping.get('rest_api_id') - stage = mapping.get('stage') + base_path = mapping.get("base_path", "") + rest_api_id = mapping.get("rest_api_id") + stage = mapping.get("stage") if rest_api_id is None or stage is None: - module.fail_json('Every domain mapping needs a rest_api_id and stage name') + module.fail_json("Every domain mapping needs a rest_api_id and stage name") - result['path_mappings'].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) + result["path_mappings"].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="creating API GW domain") @@ -171,54 +171,56 @@ def create_domain(module, client): def update_domain(module, client, existing_domain): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") result = existing_domain - result['updated'] = False + result["updated"] = False - domain = existing_domain.get('domain') + domain = existing_domain.get("domain") # Compare only relevant set of domain arguments. # As get_domain_name gathers all kind of state information that can't be set anyways. # Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity. existing_domain_settings = { - 'certificate_arn': domain.get('certificate_arn'), - 'security_policy': domain.get('security_policy'), - 'endpoint_type': domain.get('endpoint_configuration').get('types')[0] + "certificate_arn": domain.get("certificate_arn"), + "security_policy": domain.get("security_policy"), + "endpoint_type": domain.get("endpoint_configuration").get("types")[0], } specified_domain_settings = { - 'certificate_arn': module.params.get('certificate_arn'), - 'security_policy': module.params.get('security_policy'), - 'endpoint_type': module.params.get('endpoint_type') + "certificate_arn": module.params.get("certificate_arn"), + "security_policy": module.params.get("security_policy"), + "endpoint_type": module.params.get("endpoint_type"), } if specified_domain_settings != existing_domain_settings: try: - result['domain'] = update_domain_name(client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings)) - result['updated'] = True + result["domain"] = update_domain_name( + client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings) + ) + result["updated"] = True except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="updating API GW domain") - existing_mappings = copy.deepcopy(existing_domain.get('path_mappings', [])) + existing_mappings = copy.deepcopy(existing_domain.get("path_mappings", [])) # Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings for mapping in existing_mappings: - if mapping.get('base_path', 'missing') == '(none)': - mapping.pop('base_path') + if mapping.get("base_path", "missing") == "(none)": + mapping.pop("base_path") - specified_mappings = copy.deepcopy(module.params.get('domain_mappings', [])) + specified_mappings = copy.deepcopy(module.params.get("domain_mappings", [])) # Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings for mapping in specified_mappings: - if mapping.get('base_path', 'missing') == '': - mapping.pop('base_path') + if mapping.get("base_path", "missing") == "": + mapping.pop("base_path") if specified_mappings != existing_mappings: try: # When lists missmatch delete all existing mappings before adding new ones as specified - for mapping in existing_domain.get('path_mappings', []): - delete_domain_mapping(client, domain_name, mapping['base_path']) - for mapping in module.params.get('domain_mappings', []): - result['path_mappings'] = add_domain_mapping( - client, domain_name, mapping.get('base_path', ''), mapping.get('rest_api_id'), mapping.get('stage') + for mapping in existing_domain.get("path_mappings", []): + delete_domain_mapping(client, domain_name, mapping["base_path"]) + for mapping in module.params.get("domain_mappings", []): + result["path_mappings"] = add_domain_mapping( + client, domain_name, mapping.get("base_path", ""), mapping.get("rest_api_id"), mapping.get("stage") ) - result['updated'] = True + result["updated"] = True except (ClientError, BotoCoreError, EndpointConnectionError) as e: module.fail_json_aws(e, msg="updating API GW domain mapping") @@ -226,7 +228,7 @@ def update_domain(module, client, existing_domain): def delete_domain(module, client): - domain_name = module.params.get('domain_name') + domain_name = module.params.get("domain_name") try: result = delete_domain_name(client, domain_name) except (ClientError, BotoCoreError, EndpointConnectionError) as e: @@ -244,19 +246,19 @@ def get_domain_name(client, domain_name): @AWSRetry.jittered_backoff(**retry_params) def get_domain_mappings(client, domain_name): - return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', []) + return client.get_base_path_mappings(domainName=domain_name, limit=200).get("items", []) @AWSRetry.jittered_backoff(**retry_params) def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy): - endpoint_configuration = {'types': [endpoint_type]} + endpoint_configuration = {"types": [endpoint_type]} - if endpoint_type == 'EDGE': + if endpoint_type == "EDGE": return client.create_domain_name( domainName=domain_name, certificateArn=certificate_arn, endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy + securityPolicy=security_policy, ) else: # Use regionalCertificateArn for regional domain deploys @@ -264,13 +266,15 @@ def create_domain_name(module, client, domain_name, certificate_arn, endpoint_ty domainName=domain_name, regionalCertificateArn=certificate_arn, endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy + securityPolicy=security_policy, ) @AWSRetry.jittered_backoff(**retry_params) def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage): - return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage) + return client.create_base_path_mapping( + domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage + ) @AWSRetry.jittered_backoff(**retry_params) @@ -298,29 +302,29 @@ def delete_domain_mapping(client, domain_name, base_path): def main(): argument_spec = dict( - domain_name=dict(type='str', required=True), - certificate_arn=dict(type='str', required=True), - security_policy=dict(type='str', default='TLS_1_2', choices=['TLS_1_0', 'TLS_1_2']), - endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']), - domain_mappings=dict(type='list', required=True, elements='dict'), - state=dict(type='str', default='present', choices=['present', 'absent']) + domain_name=dict(type="str", required=True), + certificate_arn=dict(type="str", required=True), + security_policy=dict(type="str", default="TLS_1_2", choices=["TLS_1_0", "TLS_1_2"]), + endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), + domain_mappings=dict(type="list", required=True, elements="dict"), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=False + supports_check_mode=False, ) - client = module.client('apigateway') + client = module.client("apigateway") - state = module.params.get('state') + state = module.params.get("state") changed = False if state == "present": existing_domain = get_domain(module, client) if existing_domain is not None: result = update_domain(module, client, existing_domain) - changed = result['updated'] + changed = result["updated"] else: result = create_domain(module, client) changed = True @@ -331,10 +335,10 @@ def main(): exit_args = {"changed": changed} if result is not None: - exit_args['response'] = result + exit_args["response"] = result module.exit_json(**exit_args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py index 1b8669d84c8..8bbd91728a6 100644 --- a/application_autoscaling_policy.py +++ b/application_autoscaling_policy.py @@ -297,13 +297,13 @@ # Merge the results of the scalable target creation and policy deletion/creation # There's no risk in overriding values since mutual keys have the same values in our case def merge_results(scalable_target_result, policy_result): - if scalable_target_result['changed'] or policy_result['changed']: + if scalable_target_result["changed"] or policy_result["changed"]: changed = True else: changed = False - merged_response = scalable_target_result['response'].copy() - merged_response.update(policy_result['response']) + merged_response = scalable_target_result["response"].copy() + merged_response.update(policy_result["response"]) return {"changed": changed, "response": merged_response} @@ -312,22 +312,22 @@ def delete_scaling_policy(connection, module): changed = False try: scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") - if scaling_policy['ScalingPolicies']: + if scaling_policy["ScalingPolicies"]: try: connection.delete_scaling_policy( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyName=module.params.get('policy_name'), + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyName=module.params.get("policy_name"), ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -341,11 +341,11 @@ def create_scalable_target(connection, module): try: scalable_targets = connection.describe_scalable_targets( - ServiceNamespace=module.params.get('service_namespace'), + ServiceNamespace=module.params.get("service_namespace"), ResourceIds=[ - module.params.get('resource_id'), + module.params.get("resource_id"), ], - ScalableDimension=module.params.get('scalable_dimension') + ScalableDimension=module.params.get("scalable_dimension"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scalable targets") @@ -353,41 +353,38 @@ def create_scalable_target(connection, module): # Scalable target registration will occur if: # 1. There is no scalable target registered for this service # 2. A scalable target exists, different min/max values are defined and override is set to "yes" - if ( - not scalable_targets['ScalableTargets'] - or ( - module.params.get('override_task_capacity') - and ( - scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks') - or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks') - ) + if not scalable_targets["ScalableTargets"] or ( + module.params.get("override_task_capacity") + and ( + scalable_targets["ScalableTargets"][0]["MinCapacity"] != module.params.get("minimum_tasks") + or scalable_targets["ScalableTargets"][0]["MaxCapacity"] != module.params.get("maximum_tasks") ) ): changed = True try: connection.register_scalable_target( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - MinCapacity=module.params.get('minimum_tasks'), - MaxCapacity=module.params.get('maximum_tasks') + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + MinCapacity=module.params.get("minimum_tasks"), + MaxCapacity=module.params.get("maximum_tasks"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to register scalable target") try: response = connection.describe_scalable_targets( - ServiceNamespace=module.params.get('service_namespace'), + ServiceNamespace=module.params.get("service_namespace"), ResourceIds=[ - module.params.get('resource_id'), + module.params.get("resource_id"), ], - ScalableDimension=module.params.get('scalable_dimension') + ScalableDimension=module.params.get("scalable_dimension"), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scalable targets") - if (response['ScalableTargets']): - snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0]) + if response["ScalableTargets"]: + snaked_response = camel_dict_to_snake_dict(response["ScalableTargets"][0]) else: snaked_response = {} @@ -397,78 +394,82 @@ def create_scalable_target(connection, module): def create_scaling_policy(connection, module): try: scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") changed = False - if scaling_policy['ScalingPolicies']: - scaling_policy = scaling_policy['ScalingPolicies'][0] + if scaling_policy["ScalingPolicies"]: + scaling_policy = scaling_policy["ScalingPolicies"][0] # check if the input parameters are equal to what's already configured - for attr in ('PolicyName', - 'ServiceNamespace', - 'ResourceId', - 'ScalableDimension', - 'PolicyType', - 'StepScalingPolicyConfiguration', - 'TargetTrackingScalingPolicyConfiguration'): + for attr in ( + "PolicyName", + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "PolicyType", + "StepScalingPolicyConfiguration", + "TargetTrackingScalingPolicyConfiguration", + ): if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)): changed = True scaling_policy[attr] = module.params.get(_camel_to_snake(attr)) else: changed = True scaling_policy = { - 'PolicyName': module.params.get('policy_name'), - 'ServiceNamespace': module.params.get('service_namespace'), - 'ResourceId': module.params.get('resource_id'), - 'ScalableDimension': module.params.get('scalable_dimension'), - 'PolicyType': module.params.get('policy_type'), - 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'), - 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration') + "PolicyName": module.params.get("policy_name"), + "ServiceNamespace": module.params.get("service_namespace"), + "ResourceId": module.params.get("resource_id"), + "ScalableDimension": module.params.get("scalable_dimension"), + "PolicyType": module.params.get("policy_type"), + "StepScalingPolicyConfiguration": module.params.get("step_scaling_policy_configuration"), + "TargetTrackingScalingPolicyConfiguration": module.params.get( + "target_tracking_scaling_policy_configuration" + ), } if changed: try: - if (module.params.get('step_scaling_policy_configuration')): + if module.params.get("step_scaling_policy_configuration"): connection.put_scaling_policy( - PolicyName=scaling_policy['PolicyName'], - ServiceNamespace=scaling_policy['ServiceNamespace'], - ResourceId=scaling_policy['ResourceId'], - ScalableDimension=scaling_policy['ScalableDimension'], - PolicyType=scaling_policy['PolicyType'], - StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration'] + PolicyName=scaling_policy["PolicyName"], + ServiceNamespace=scaling_policy["ServiceNamespace"], + ResourceId=scaling_policy["ResourceId"], + ScalableDimension=scaling_policy["ScalableDimension"], + PolicyType=scaling_policy["PolicyType"], + StepScalingPolicyConfiguration=scaling_policy["StepScalingPolicyConfiguration"], ) - elif (module.params.get('target_tracking_scaling_policy_configuration')): + elif module.params.get("target_tracking_scaling_policy_configuration"): connection.put_scaling_policy( - PolicyName=scaling_policy['PolicyName'], - ServiceNamespace=scaling_policy['ServiceNamespace'], - ResourceId=scaling_policy['ResourceId'], - ScalableDimension=scaling_policy['ScalableDimension'], - PolicyType=scaling_policy['PolicyType'], - TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration'] + PolicyName=scaling_policy["PolicyName"], + ServiceNamespace=scaling_policy["ServiceNamespace"], + ResourceId=scaling_policy["ResourceId"], + ScalableDimension=scaling_policy["ScalableDimension"], + PolicyType=scaling_policy["PolicyType"], + TargetTrackingScalingPolicyConfiguration=scaling_policy["TargetTrackingScalingPolicyConfiguration"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create scaling policy") try: response = connection.describe_scaling_policies( - ServiceNamespace=module.params.get('service_namespace'), - ResourceId=module.params.get('resource_id'), - ScalableDimension=module.params.get('scalable_dimension'), - PolicyNames=[module.params.get('policy_name')], - MaxResults=1 + ServiceNamespace=module.params.get("service_namespace"), + ResourceId=module.params.get("resource_id"), + ScalableDimension=module.params.get("scalable_dimension"), + PolicyNames=[module.params.get("policy_name")], + MaxResults=1, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe scaling policies") - if (response['ScalingPolicies']): - snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0]) + if response["ScalingPolicies"]: + snaked_response = camel_dict_to_snake_dict(response["ScalingPolicies"][0]) else: snaked_response = {} @@ -477,52 +478,63 @@ def create_scaling_policy(connection, module): def main(): argument_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - policy_name=dict(type='str', required=True), - service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']), - resource_id=dict(type='str', required=True), - scalable_dimension=dict(type='str', - required=True, - choices=['ecs:service:DesiredCount', - 'ec2:spot-fleet-request:TargetCapacity', - 'elasticmapreduce:instancegroup:InstanceCount', - 'appstream:fleet:DesiredCapacity', - 'dynamodb:table:ReadCapacityUnits', - 'dynamodb:table:WriteCapacityUnits', - 'dynamodb:index:ReadCapacityUnits', - 'dynamodb:index:WriteCapacityUnits']), - policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']), - step_scaling_policy_configuration=dict(type='dict'), + state=dict(type="str", required=True, choices=["present", "absent"]), + policy_name=dict(type="str", required=True), + service_namespace=dict( + type="str", required=True, choices=["appstream", "dynamodb", "ec2", "ecs", "elasticmapreduce"] + ), + resource_id=dict(type="str", required=True), + scalable_dimension=dict( + type="str", + required=True, + choices=[ + "ecs:service:DesiredCount", + "ec2:spot-fleet-request:TargetCapacity", + "elasticmapreduce:instancegroup:InstanceCount", + "appstream:fleet:DesiredCapacity", + "dynamodb:table:ReadCapacityUnits", + "dynamodb:table:WriteCapacityUnits", + "dynamodb:index:ReadCapacityUnits", + "dynamodb:index:WriteCapacityUnits", + ], + ), + policy_type=dict(type="str", required=True, choices=["StepScaling", "TargetTrackingScaling"]), + step_scaling_policy_configuration=dict(type="dict"), target_tracking_scaling_policy_configuration=dict( - type='dict', + type="dict", options=dict( - CustomizedMetricSpecification=dict(type='dict'), - DisableScaleIn=dict(type='bool'), - PredefinedMetricSpecification=dict(type='dict'), - ScaleInCooldown=dict(type='int'), - ScaleOutCooldown=dict(type='int'), - TargetValue=dict(type='float'), - ) + CustomizedMetricSpecification=dict(type="dict"), + DisableScaleIn=dict(type="bool"), + PredefinedMetricSpecification=dict(type="dict"), + ScaleInCooldown=dict(type="int"), + ScaleOutCooldown=dict(type="int"), + TargetValue=dict(type="float"), + ), ), - minimum_tasks=dict(type='int'), - maximum_tasks=dict(type='int'), - override_task_capacity=dict(type='bool'), + minimum_tasks=dict(type="int"), + maximum_tasks=dict(type="int"), + override_task_capacity=dict(type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('application-autoscaling') + connection = module.client("application-autoscaling") # Remove any target_tracking_scaling_policy_configuration suboptions that are None policy_config_options = [ - 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue' + "CustomizedMetricSpecification", + "DisableScaleIn", + "PredefinedMetricSpecification", + "ScaleInCooldown", + "ScaleOutCooldown", + "TargetValue", ] - if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict): + if isinstance(module.params["target_tracking_scaling_policy_configuration"], dict): for option in policy_config_options: - if module.params['target_tracking_scaling_policy_configuration'][option] is None: - module.params['target_tracking_scaling_policy_configuration'].pop(option) + if module.params["target_tracking_scaling_policy_configuration"][option] is None: + module.params["target_tracking_scaling_policy_configuration"].pop(option) - if module.params.get("state") == 'present': + if module.params.get("state") == "present": # A scalable target must be registered prior to creating a scaling policy scalable_target_result = create_scalable_target(connection, module) policy_result = create_scaling_policy(connection, module) @@ -535,5 +547,5 @@ def main(): module.exit_json(**policy_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_complete_lifecycle_action.py b/autoscaling_complete_lifecycle_action.py index 2b752c9a4b8..94a8d031fdd 100644 --- a/autoscaling_complete_lifecycle_action.py +++ b/autoscaling_complete_lifecycle_action.py @@ -70,26 +70,26 @@ def main(): argument_spec = dict( - asg_name=dict(required=True, type='str'), - lifecycle_hook_name=dict(required=True, type='str'), - lifecycle_action_result=dict(required=True, type='str', choices=['CONTINUE', 'ABANDON']), - instance_id=dict(required=True, type='str') + asg_name=dict(required=True, type="str"), + lifecycle_hook_name=dict(required=True, type="str"), + lifecycle_action_result=dict(required=True, type="str", choices=["CONTINUE", "ABANDON"]), + instance_id=dict(required=True, type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - asg_name = module.params.get('asg_name') - lifecycle_hook_name = module.params.get('lifecycle_hook_name') - lifecycle_action_result = module.params.get('lifecycle_action_result') - instance_id = module.params.get('instance_id') + asg_name = module.params.get("asg_name") + lifecycle_hook_name = module.params.get("lifecycle_hook_name") + lifecycle_action_result = module.params.get("lifecycle_action_result") + instance_id = module.params.get("instance_id") - autoscaling = module.client('autoscaling') + autoscaling = module.client("autoscaling") try: results = autoscaling.complete_lifecycle_action( LifecycleHookName=lifecycle_hook_name, AutoScalingGroupName=asg_name, LifecycleActionResult=lifecycle_action_result, - InstanceId=instance_id + InstanceId=instance_id, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to completes the lifecycle action") @@ -97,5 +97,5 @@ def main(): module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_instance_refresh.py b/autoscaling_instance_refresh.py index 7cf82132e64..5b9855d135d 100644 --- a/autoscaling_instance_refresh.py +++ b/autoscaling_instance_refresh.py @@ -179,75 +179,75 @@ def start_or_cancel_instance_refresh(conn, module): } """ - asg_state = module.params.get('state') - asg_name = module.params.get('name') - preferences = module.params.get('preferences') + asg_state = module.params.get("state") + asg_name = module.params.get("name") + preferences = module.params.get("preferences") args = {} - args['AutoScalingGroupName'] = asg_name - if asg_state == 'started': - args['Strategy'] = module.params.get('strategy') + args["AutoScalingGroupName"] = asg_name + if asg_state == "started": + args["Strategy"] = module.params.get("strategy") if preferences: - if asg_state == 'cancelled': - module.fail_json(msg='can not pass preferences dict when canceling a refresh') + if asg_state == "cancelled": + module.fail_json(msg="can not pass preferences dict when canceling a refresh") _prefs = scrub_none_parameters(preferences) - args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) + args["Preferences"] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) cmd_invocations = { - 'cancelled': conn.cancel_instance_refresh, - 'started': conn.start_instance_refresh, + "cancelled": conn.cancel_instance_refresh, + "started": conn.start_instance_refresh, } try: if module.check_mode: - if asg_state == 'started': - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]') + if asg_state == "started": + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( + "InstanceRefreshes", "[]" + ) if ongoing_refresh: - module.exit_json(changed=False, msg='In check_mode - Instance Refresh is already in progress, can not start new instance refresh.') + module.exit_json( + changed=False, + msg="In check_mode - Instance Refresh is already in progress, can not start new instance refresh.", + ) else: - module.exit_json(changed=True, msg='Would have started instance refresh if not in check mode.') - elif asg_state == 'cancelled': - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0] - if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']: - module.exit_json(changed=False, msg='In check_mode - Instance Refresh already cancelled or is pending cancellation.') + module.exit_json(changed=True, msg="Would have started instance refresh if not in check mode.") + elif asg_state == "cancelled": + ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( + "InstanceRefreshes", "[]" + )[0] + if ongoing_refresh.get("Status", "") in ["Cancelling", "Cancelled"]: + module.exit_json( + changed=False, + msg="In check_mode - Instance Refresh already cancelled or is pending cancellation.", + ) elif not ongoing_refresh: - module.exit_json(chaned=False, msg='In check_mode - No active referesh found, nothing to cancel.') + module.exit_json(chaned=False, msg="In check_mode - No active referesh found, nothing to cancel.") else: - module.exit_json(changed=True, msg='Would have cancelled instance refresh if not in check mode.') + module.exit_json(changed=True, msg="Would have cancelled instance refresh if not in check mode.") result = cmd_invocations[asg_state](aws_retry=True, **args) - instance_refreshes = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']]) - result = dict( - instance_refreshes=camel_dict_to_snake_dict(instance_refreshes['InstanceRefreshes'][0]) + instance_refreshes = conn.describe_instance_refreshes( + AutoScalingGroupName=asg_name, InstanceRefreshIds=[result["InstanceRefreshId"]] ) + result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0])) return module.exit_json(**result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg='Failed to {0} InstanceRefresh'.format( - asg_state.replace('ed', '') - ) - ) + module.fail_json_aws(e, msg="Failed to {0} InstanceRefresh".format(asg_state.replace("ed", ""))) def main(): - argument_spec = dict( state=dict( - type='str', + type="str", required=True, - choices=['started', 'cancelled'], + choices=["started", "cancelled"], ), name=dict(required=True), - strategy=dict( - type='str', - default='Rolling', - required=False - ), + strategy=dict(type="str", default="Rolling", required=False), preferences=dict( - type='dict', + type="dict", required=False, options=dict( - min_healthy_percentage=dict(type='int', default=90), - instance_warmup=dict(type='int'), - ) + min_healthy_percentage=dict(type="int", default=90), + instance_warmup=dict(type="int"), + ), ), ) @@ -256,15 +256,12 @@ def main(): supports_check_mode=True, ) autoscaling = module.client( - 'autoscaling', - retry_decorator=AWSRetry.jittered_backoff( - retries=10, - catch_extra_error_codes=['InstanceRefreshInProgress'] - ) + "autoscaling", + retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InstanceRefreshInProgress"]), ) start_or_cancel_instance_refresh(autoscaling, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_instance_refresh_info.py b/autoscaling_instance_refresh_info.py index 64581b46829..639940b1b77 100644 --- a/autoscaling_instance_refresh_info.py +++ b/autoscaling_instance_refresh_info.py @@ -158,51 +158,51 @@ def find_asg_instance_refreshes(conn, module): ], 'next_token': 'string' } - """ + """ - asg_name = module.params.get('name') - asg_ids = module.params.get('ids') - asg_next_token = module.params.get('next_token') - asg_max_records = module.params.get('max_records') + asg_name = module.params.get("name") + asg_ids = module.params.get("ids") + asg_next_token = module.params.get("next_token") + asg_max_records = module.params.get("max_records") args = {} - args['AutoScalingGroupName'] = asg_name + args["AutoScalingGroupName"] = asg_name if asg_ids: - args['InstanceRefreshIds'] = asg_ids + args["InstanceRefreshIds"] = asg_ids if asg_next_token: - args['NextToken'] = asg_next_token + args["NextToken"] = asg_next_token if asg_max_records: - args['MaxRecords'] = asg_max_records + args["MaxRecords"] = asg_max_records try: instance_refreshes_result = {} response = conn.describe_instance_refreshes(**args) - if 'InstanceRefreshes' in response: + if "InstanceRefreshes" in response: instance_refreshes_dict = dict( - instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', '')) - instance_refreshes_result = camel_dict_to_snake_dict( - instance_refreshes_dict) + instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "") + ) + instance_refreshes_result = camel_dict_to_snake_dict(instance_refreshes_dict) - while 'NextToken' in response: - args['NextToken'] = response['NextToken'] + while "NextToken" in response: + args["NextToken"] = response["NextToken"] response = conn.describe_instance_refreshes(**args) - if 'InstanceRefreshes' in response: - instance_refreshes_dict = camel_dict_to_snake_dict(dict( - instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', ''))) + if "InstanceRefreshes" in response: + instance_refreshes_dict = camel_dict_to_snake_dict( + dict(instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "")) + ) instance_refreshes_result.update(instance_refreshes_dict) return module.exit_json(**instance_refreshes_result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to describe InstanceRefreshes') + module.fail_json_aws(e, msg="Failed to describe InstanceRefreshes") def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - ids=dict(required=False, default=[], elements='str', type='list'), - next_token=dict(required=False, default=None, type='str', no_log=True), - max_records=dict(required=False, type='int'), + name=dict(required=True, type="str"), + ids=dict(required=False, default=[], elements="str", type="list"), + next_token=dict(required=False, default=None, type="str", no_log=True), + max_records=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -210,12 +210,9 @@ def main(): supports_check_mode=True, ) - autoscaling = module.client( - 'autoscaling', - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + autoscaling = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff(retries=10)) find_asg_instance_refreshes(autoscaling, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py index b21f296ce0e..a3cd600fa70 100644 --- a/autoscaling_launch_config.py +++ b/autoscaling_launch_config.py @@ -457,176 +457,214 @@ def create_block_device_meta(module, volume): - if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if volume.get('volume_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg='io1 volumes must have an iops value set') - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg='Cannot set both ephemeral and snapshot') + if "snapshot" not in volume and "ephemeral" not in volume and "no_device" not in volume: + if "volume_size" not in volume: + module.fail_json(msg="Size must be specified when creating a new volume or modifying the root volume") + if "snapshot" in volume: + if volume.get("volume_type") == "io1" and "iops" not in volume: + module.fail_json(msg="io1 volumes must have an iops value set") + if "ephemeral" in volume: + if "snapshot" in volume: + module.fail_json(msg="Cannot set both ephemeral and snapshot") return_object = {} - if 'ephemeral' in volume: - return_object['VirtualName'] = volume.get('ephemeral') + if "ephemeral" in volume: + return_object["VirtualName"] = volume.get("ephemeral") - if 'device_name' in volume: - return_object['DeviceName'] = volume.get('device_name') + if "device_name" in volume: + return_object["DeviceName"] = volume.get("device_name") - if 'no_device' in volume: - return_object['NoDevice'] = volume.get('no_device') + if "no_device" in volume: + return_object["NoDevice"] = volume.get("no_device") - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']): - return_object['Ebs'] = {} + if any( + key in volume + for key in [ + "snapshot", + "volume_size", + "volume_type", + "delete_on_termination", + "iops", + "throughput", + "encrypted", + ] + ): + return_object["Ebs"] = {} - if 'snapshot' in volume: - return_object['Ebs']['SnapshotId'] = volume.get('snapshot') + if "snapshot" in volume: + return_object["Ebs"]["SnapshotId"] = volume.get("snapshot") - if 'volume_size' in volume: - return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0)) + if "volume_size" in volume: + return_object["Ebs"]["VolumeSize"] = int(volume.get("volume_size", 0)) - if 'volume_type' in volume: - return_object['Ebs']['VolumeType'] = volume.get('volume_type') + if "volume_type" in volume: + return_object["Ebs"]["VolumeType"] = volume.get("volume_type") - if 'delete_on_termination' in volume: - return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False) + if "delete_on_termination" in volume: + return_object["Ebs"]["DeleteOnTermination"] = volume.get("delete_on_termination", False) - if 'iops' in volume: - return_object['Ebs']['Iops'] = volume.get('iops') + if "iops" in volume: + return_object["Ebs"]["Iops"] = volume.get("iops") - if 'throughput' in volume: - if volume.get('volume_type') != 'gp3': - module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.') - return_object['Ebs']['Throughput'] = volume.get('throughput') + if "throughput" in volume: + if volume.get("volume_type") != "gp3": + module.fail_json(msg="The throughput parameter is supported only for GP3 volumes.") + return_object["Ebs"]["Throughput"] = volume.get("throughput") - if 'encrypted' in volume: - return_object['Ebs']['Encrypted'] = volume.get('encrypted') + if "encrypted" in volume: + return_object["Ebs"]["Encrypted"] = volume.get("encrypted") return return_object def create_launch_config(connection, module): - name = module.params.get('name') - vpc_id = module.params.get('vpc_id') + name = module.params.get("name") + vpc_id = module.params.get("vpc_id") try: - ec2_connection = module.client('ec2') + ec2_connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") try: - security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) + security_groups = get_ec2_security_group_ids_from_names( + module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id, boto3=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get Security Group IDs') + module.fail_json_aws(e, msg="Failed to get Security Group IDs") except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) - user_data = module.params.get('user_data') - user_data_path = module.params.get('user_data_path') - volumes = module.params['volumes'] - instance_monitoring = module.params.get('instance_monitoring') - assign_public_ip = module.params.get('assign_public_ip') - instance_profile_name = module.params.get('instance_profile_name') - ebs_optimized = module.params.get('ebs_optimized') - classic_link_vpc_id = module.params.get('classic_link_vpc_id') - classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') + user_data = module.params.get("user_data") + user_data_path = module.params.get("user_data_path") + volumes = module.params["volumes"] + instance_monitoring = module.params.get("instance_monitoring") + assign_public_ip = module.params.get("assign_public_ip") + instance_profile_name = module.params.get("instance_profile_name") + ebs_optimized = module.params.get("ebs_optimized") + classic_link_vpc_id = module.params.get("classic_link_vpc_id") + classic_link_vpc_security_groups = module.params.get("classic_link_vpc_security_groups") block_device_mapping = [] - convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price'] - - launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) + convert_list = [ + "image_id", + "instance_type", + "instance_type", + "instance_id", + "placement_tenancy", + "key_name", + "kernel_id", + "ramdisk_id", + "spot_price", + ] + + launch_config = snake_dict_to_camel_dict( + dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list) + ) if user_data_path: try: - with open(user_data_path, 'r') as user_data_file: + with open(user_data_path, "r") as user_data_file: user_data = user_data_file.read() except IOError as e: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg='Device name must be set for volume') + if "device_name" not in volume: + module.fail_json(msg="Device name must be set for volume") # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: + if "volume_size" not in volume or int(volume["volume_size"]) > 0: block_device_mapping.append(create_block_device_meta(module, volume)) try: - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe launch configuration by name") changed = False result = {} - launch_config['LaunchConfigurationName'] = name + launch_config["LaunchConfigurationName"] = name if security_groups is not None: - launch_config['SecurityGroups'] = security_groups + launch_config["SecurityGroups"] = security_groups if classic_link_vpc_id is not None: - launch_config['ClassicLinkVPCId'] = classic_link_vpc_id + launch_config["ClassicLinkVPCId"] = classic_link_vpc_id if instance_monitoring is not None: - launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} + launch_config["InstanceMonitoring"] = {"Enabled": instance_monitoring} if classic_link_vpc_security_groups is not None: - launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups + launch_config["ClassicLinkVPCSecurityGroups"] = classic_link_vpc_security_groups if block_device_mapping: - launch_config['BlockDeviceMappings'] = block_device_mapping + launch_config["BlockDeviceMappings"] = block_device_mapping if instance_profile_name is not None: - launch_config['IamInstanceProfile'] = instance_profile_name + launch_config["IamInstanceProfile"] = instance_profile_name if assign_public_ip is not None: - launch_config['AssociatePublicIpAddress'] = assign_public_ip + launch_config["AssociatePublicIpAddress"] = assign_public_ip if user_data is not None: - launch_config['UserData'] = user_data + launch_config["UserData"] = user_data if ebs_optimized is not None: - launch_config['EbsOptimized'] = ebs_optimized + launch_config["EbsOptimized"] = ebs_optimized if len(launch_configs) == 0: try: connection.create_launch_configuration(**launch_config) - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) changed = True if launch_configs: launch_config = launch_configs[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create launch configuration") - result = (dict((k, v) for k, v in launch_config.items() - if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings'])) + result = dict( + (k, v) + for k, v in launch_config.items() + if k not in ["Connection", "CreatedTime", "InstanceMonitoring", "BlockDeviceMappings"] + ) - result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) + result["CreatedTime"] = to_text(launch_config.get("CreatedTime")) try: - result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled')) + result["InstanceMonitoring"] = module.boolean(launch_config.get("InstanceMonitoring").get("Enabled")) except AttributeError: - result['InstanceMonitoring'] = False - - result['BlockDeviceMappings'] = [] - - for block_device_mapping in launch_config.get('BlockDeviceMappings', []): - result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) - if block_device_mapping.get('Ebs') is not None: - result['BlockDeviceMappings'][-1]['ebs'] = dict( - snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) + result["InstanceMonitoring"] = False + + result["BlockDeviceMappings"] = [] + + for block_device_mapping in launch_config.get("BlockDeviceMappings", []): + result["BlockDeviceMappings"].append( + dict( + device_name=block_device_mapping.get("DeviceName"), virtual_name=block_device_mapping.get("VirtualName") + ) + ) + if block_device_mapping.get("Ebs") is not None: + result["BlockDeviceMappings"][-1]["ebs"] = dict( + snapshot_id=block_device_mapping.get("Ebs").get("SnapshotId"), + volume_size=block_device_mapping.get("Ebs").get("VolumeSize"), + ) if user_data_path: - result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal + result["UserData"] = "hidden" # Otherwise, we dump binary to the user's terminal return_object = { - 'Name': result.get('LaunchConfigurationName'), - 'CreatedTime': result.get('CreatedTime'), - 'ImageId': result.get('ImageId'), - 'Arn': result.get('LaunchConfigurationARN'), - 'SecurityGroups': result.get('SecurityGroups'), - 'InstanceType': result.get('InstanceType'), - 'Result': result + "Name": result.get("LaunchConfigurationName"), + "CreatedTime": result.get("CreatedTime"), + "ImageId": result.get("ImageId"), + "Arn": result.get("LaunchConfigurationARN"), + "SecurityGroups": result.get("SecurityGroups"), + "InstanceType": result.get("InstanceType"), + "Result": result, } module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object)) @@ -634,10 +672,14 @@ def create_launch_config(connection, module): def delete_launch_config(connection, module): try: - name = module.params.get('name') - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations') + name = module.params.get("name") + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( + "LaunchConfigurations" + ) if launch_configs: - connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName')) + connection.delete_launch_configuration( + LaunchConfigurationName=launch_configs[0].get("LaunchConfigurationName") + ) module.exit_json(changed=True) else: module.exit_json(changed=False) @@ -651,42 +693,42 @@ def main(): image_id=dict(), instance_id=dict(), key_name=dict(), - security_groups=dict(default=[], type='list', elements='str'), + security_groups=dict(default=[], type="list", elements="str"), user_data=dict(), - user_data_path=dict(type='path'), + user_data_path=dict(type="path"), kernel_id=dict(), - volumes=dict(type='list', elements='dict'), + volumes=dict(type="list", elements="dict"), instance_type=dict(), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='float'), + state=dict(default="present", choices=["present", "absent"]), + spot_price=dict(type="float"), ramdisk_id=dict(), instance_profile_name=dict(), - ebs_optimized=dict(default=False, type='bool'), - instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool'), - classic_link_vpc_security_groups=dict(type='list', elements='str'), + ebs_optimized=dict(default=False, type="bool"), + instance_monitoring=dict(default=False, type="bool"), + assign_public_ip=dict(type="bool"), + classic_link_vpc_security_groups=dict(type="list", elements="str"), classic_link_vpc_id=dict(), vpc_id=dict(), - placement_tenancy=dict(choices=['default', 'dedicated']) + placement_tenancy=dict(choices=["default", "dedicated"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['user_data', 'user_data_path']], + mutually_exclusive=[["user_data", "user_data_path"]], ) try: - connection = module.client('autoscaling') + connection = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="unable to establish connection") - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": create_launch_config(connection, module) - elif state == 'absent': + elif state == "absent": delete_launch_config(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_launch_config_find.py b/autoscaling_launch_config_find.py index 8f3ca14bec3..037c21ed9f9 100644 --- a/autoscaling_launch_config_find.py +++ b/autoscaling_launch_config_find.py @@ -141,50 +141,46 @@ def find_launch_configs(client, module): - name_regex = module.params.get('name_regex') - sort_order = module.params.get('sort_order') - limit = module.params.get('limit') + name_regex = module.params.get("name_regex") + sort_order = module.params.get("sort_order") + limit = module.params.get("limit") - paginator = client.get_paginator('describe_launch_configurations') + paginator = client.get_paginator("describe_launch_configurations") - response_iterator = paginator.paginate( - PaginationConfig={ - 'MaxItems': 1000, - 'PageSize': 100 - } - ) + response_iterator = paginator.paginate(PaginationConfig={"MaxItems": 1000, "PageSize": 100}) results = [] for response in response_iterator: - response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']), - response['LaunchConfigurations']) + response["LaunchConfigurations"] = filter( + lambda lc: re.compile(name_regex).match(lc["LaunchConfigurationName"]), response["LaunchConfigurations"] + ) - for lc in response['LaunchConfigurations']: + for lc in response["LaunchConfigurations"]: data = { - 'name': lc['LaunchConfigurationName'], - 'arn': lc['LaunchConfigurationARN'], - 'created_time': lc['CreatedTime'], - 'user_data': lc['UserData'], - 'instance_type': lc['InstanceType'], - 'image_id': lc['ImageId'], - 'ebs_optimized': lc['EbsOptimized'], - 'instance_monitoring': lc['InstanceMonitoring'], - 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'], - 'block_device_mappings': lc['BlockDeviceMappings'], - 'keyname': lc['KeyName'], - 'security_groups': lc['SecurityGroups'], - 'kernel_id': lc['KernelId'], - 'ram_disk_id': lc['RamdiskId'], - 'associate_public_address': lc.get('AssociatePublicIpAddress', False), + "name": lc["LaunchConfigurationName"], + "arn": lc["LaunchConfigurationARN"], + "created_time": lc["CreatedTime"], + "user_data": lc["UserData"], + "instance_type": lc["InstanceType"], + "image_id": lc["ImageId"], + "ebs_optimized": lc["EbsOptimized"], + "instance_monitoring": lc["InstanceMonitoring"], + "classic_link_vpc_security_groups": lc["ClassicLinkVPCSecurityGroups"], + "block_device_mappings": lc["BlockDeviceMappings"], + "keyname": lc["KeyName"], + "security_groups": lc["SecurityGroups"], + "kernel_id": lc["KernelId"], + "ram_disk_id": lc["RamdiskId"], + "associate_public_address": lc.get("AssociatePublicIpAddress", False), } results.append(data) - results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending')) + results.sort(key=lambda e: e["name"], reverse=(sort_order == "descending")) if limit: - results = results[:int(limit)] + results = results[:int(limit)] # fmt: skip module.exit_json(changed=False, results=results) @@ -192,8 +188,8 @@ def find_launch_configs(client, module): def main(): argument_spec = dict( name_regex=dict(required=True), - sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), - limit=dict(required=False, type='int'), + sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), + limit=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -201,12 +197,12 @@ def main(): ) try: - client = module.client('autoscaling') + client = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") find_launch_configs(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_launch_config_info.py b/autoscaling_launch_config_info.py index 73e8fbdd8da..f5123c2ef00 100644 --- a/autoscaling_launch_config_info.py +++ b/autoscaling_launch_config_info.py @@ -159,29 +159,28 @@ def list_launch_configs(connection, module): - launch_config_name = module.params.get("name") - sort = module.params.get('sort') - sort_order = module.params.get('sort_order') - sort_start = module.params.get('sort_start') - sort_end = module.params.get('sort_end') + sort = module.params.get("sort") + sort_order = module.params.get("sort_order") + sort_start = module.params.get("sort_start") + sort_end = module.params.get("sort_end") try: - pg = connection.get_paginator('describe_launch_configurations') + pg = connection.get_paginator("describe_launch_configurations") launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to list launch configs") snaked_launch_configs = [] - for launch_config in launch_configs['LaunchConfigurations']: + for launch_config in launch_configs["LaunchConfigurations"]: snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config)) for launch_config in snaked_launch_configs: - if 'CreatedTime' in launch_config: - launch_config['CreatedTime'] = str(launch_config['CreatedTime']) + if "CreatedTime" in launch_config: + launch_config["CreatedTime"] = str(launch_config["CreatedTime"]) if sort: - snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending')) + snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == "descending")) if sort and sort_start and sort_end: snaked_launch_configs = snaked_launch_configs[sort_start:sort_end] @@ -195,13 +194,23 @@ def list_launch_configs(connection, module): def main(): argument_spec = dict( - name=dict(required=False, default=[], type='list', elements='str'), - sort=dict(required=False, default=None, - choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), - sort_order=dict(required=False, default='ascending', - choices=['ascending', 'descending']), - sort_start=dict(required=False, type='int'), - sort_end=dict(required=False, type='int'), + name=dict(required=False, default=[], type="list", elements="str"), + sort=dict( + required=False, + default=None, + choices=[ + "launch_configuration_name", + "image_id", + "created_time", + "instance_type", + "kernel_id", + "ramdisk_id", + "key_name", + ], + ), + sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), + sort_start=dict(required=False, type="int"), + sort_end=dict(required=False, type="int"), ) module = AnsibleAWSModule( @@ -210,12 +219,12 @@ def main(): ) try: - connection = module.client('autoscaling') + connection = module.client("autoscaling") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_launch_configs(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_lifecycle_hook.py b/autoscaling_lifecycle_hook.py index a3b8edb499b..a77fcce0ad0 100644 --- a/autoscaling_lifecycle_hook.py +++ b/autoscaling_lifecycle_hook.py @@ -141,56 +141,58 @@ def create_lifecycle_hook(connection, module): - - lch_name = module.params.get('lifecycle_hook_name') - asg_name = module.params.get('autoscaling_group_name') - transition = module.params.get('transition') - role_arn = module.params.get('role_arn') - notification_target_arn = module.params.get('notification_target_arn') - notification_meta_data = module.params.get('notification_meta_data') - heartbeat_timeout = module.params.get('heartbeat_timeout') - default_result = module.params.get('default_result') + lch_name = module.params.get("lifecycle_hook_name") + asg_name = module.params.get("autoscaling_group_name") + transition = module.params.get("transition") + role_arn = module.params.get("role_arn") + notification_target_arn = module.params.get("notification_target_arn") + notification_meta_data = module.params.get("notification_meta_data") + heartbeat_timeout = module.params.get("heartbeat_timeout") + default_result = module.params.get("default_result") return_object = {} - return_object['changed'] = False + return_object["changed"] = False lch_params = { - 'LifecycleHookName': lch_name, - 'AutoScalingGroupName': asg_name, - 'LifecycleTransition': transition + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, + "LifecycleTransition": transition, } if role_arn: - lch_params['RoleARN'] = role_arn + lch_params["RoleARN"] = role_arn if notification_target_arn: - lch_params['NotificationTargetARN'] = notification_target_arn + lch_params["NotificationTargetARN"] = notification_target_arn if notification_meta_data: - lch_params['NotificationMetadata'] = notification_meta_data + lch_params["NotificationMetadata"] = notification_meta_data if heartbeat_timeout: - lch_params['HeartbeatTimeout'] = heartbeat_timeout + lch_params["HeartbeatTimeout"] = heartbeat_timeout if default_result: - lch_params['DefaultResult'] = default_result + lch_params["DefaultResult"] = default_result try: existing_hook = connection.describe_lifecycle_hooks( AutoScalingGroupName=asg_name, - LifecycleHookNames=[lch_name] - )['LifecycleHooks'] + LifecycleHookNames=[lch_name], + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Lifecycle Hook") if not existing_hook: try: if module.check_mode: - module.exit_json(changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode.") - return_object['changed'] = True + module.exit_json( + changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode." + ) + return_object["changed"] = True connection.put_lifecycle_hook(**lch_params) - return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") @@ -199,11 +201,14 @@ def create_lifecycle_hook(connection, module): if modified: try: if module.check_mode: - module.exit_json(changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode.") - return_object['changed'] = True + module.exit_json( + changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode." + ) + return_object["changed"] = True connection.put_lifecycle_hook(**lch_params) - return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks'] + return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( + AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] + )["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create LifecycleHook") @@ -227,33 +232,37 @@ def dict_compare(d1, d2): def delete_lifecycle_hook(connection, module): - - lch_name = module.params.get('lifecycle_hook_name') - asg_name = module.params.get('autoscaling_group_name') + lch_name = module.params.get("lifecycle_hook_name") + asg_name = module.params.get("autoscaling_group_name") return_object = {} - return_object['changed'] = False + return_object["changed"] = False try: all_hooks = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name + AutoScalingGroupName=asg_name, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks") - for hook in all_hooks['LifecycleHooks']: - if hook['LifecycleHookName'] == lch_name: + for hook in all_hooks["LifecycleHooks"]: + if hook["LifecycleHookName"] == lch_name: lch_params = { - 'LifecycleHookName': lch_name, - 'AutoScalingGroupName': asg_name + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, } try: if module.check_mode: - module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode.") + module.exit_json( + changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode." + ) connection.delete_lifecycle_hook(**lch_params) - return_object['changed'] = True - return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name} + return_object["changed"] = True + return_object["lifecycle_hook_removed"] = { + "LifecycleHookName": lch_name, + "AutoScalingGroupName": asg_name, + } except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete LifecycleHook") else: @@ -264,34 +273,36 @@ def delete_lifecycle_hook(connection, module): def main(): argument_spec = dict( - autoscaling_group_name=dict(required=True, type='str'), - lifecycle_hook_name=dict(required=True, type='str'), - transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']), - role_arn=dict(type='str'), - notification_target_arn=dict(type='str'), - notification_meta_data=dict(type='str'), - heartbeat_timeout=dict(type='int'), - default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']), - state=dict(default='present', choices=['present', 'absent']) + autoscaling_group_name=dict(required=True, type="str"), + lifecycle_hook_name=dict(required=True, type="str"), + transition=dict( + type="str", choices=["autoscaling:EC2_INSTANCE_TERMINATING", "autoscaling:EC2_INSTANCE_LAUNCHING"] + ), + role_arn=dict(type="str"), + notification_target_arn=dict(type="str"), + notification_meta_data=dict(type="str"), + heartbeat_timeout=dict(type="int"), + default_result=dict(default="ABANDON", choices=["ABANDON", "CONTINUE"]), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[['state', 'present', ['transition']]], + required_if=[["state", "present", ["transition"]]], ) - state = module.params.get('state') + state = module.params.get("state") - connection = module.client('autoscaling') + connection = module.client("autoscaling") changed = False - if state == 'present': + if state == "present": create_lifecycle_hook(connection, module) - elif state == 'absent': + elif state == "absent": delete_lifecycle_hook(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_policy.py b/autoscaling_policy.py index b628fe7b58f..f76ce74ceb1 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -363,124 +363,132 @@ def build_target_specification(target_tracking_config): - # Initialize an empty dict() for building TargetTrackingConfiguration policies, # which will be returned targetTrackingConfig = dict() - if target_tracking_config.get('target_value'): - targetTrackingConfig['TargetValue'] = target_tracking_config['target_value'] + if target_tracking_config.get("target_value"): + targetTrackingConfig["TargetValue"] = target_tracking_config["target_value"] - if target_tracking_config.get('disable_scalein'): - targetTrackingConfig['DisableScaleIn'] = target_tracking_config['disable_scalein'] + if target_tracking_config.get("disable_scalein"): + targetTrackingConfig["DisableScaleIn"] = target_tracking_config["disable_scalein"] else: # Accounting for boto3 response - targetTrackingConfig['DisableScaleIn'] = False + targetTrackingConfig["DisableScaleIn"] = False - if target_tracking_config['predefined_metric_spec'] is not None: + if target_tracking_config["predefined_metric_spec"] is not None: # Build spec for predefined_metric_spec - targetTrackingConfig['PredefinedMetricSpecification'] = dict() - if target_tracking_config['predefined_metric_spec'].get('predefined_metric_type'): - targetTrackingConfig['PredefinedMetricSpecification']['PredefinedMetricType'] = \ - target_tracking_config['predefined_metric_spec']['predefined_metric_type'] - - if target_tracking_config['predefined_metric_spec'].get('resource_label'): - targetTrackingConfig['PredefinedMetricSpecification']['ResourceLabel'] = \ - target_tracking_config['predefined_metric_spec']['resource_label'] - - elif target_tracking_config['customized_metric_spec'] is not None: + targetTrackingConfig["PredefinedMetricSpecification"] = dict() + if target_tracking_config["predefined_metric_spec"].get("predefined_metric_type"): + targetTrackingConfig["PredefinedMetricSpecification"]["PredefinedMetricType"] = target_tracking_config[ + "predefined_metric_spec" + ]["predefined_metric_type"] + + if target_tracking_config["predefined_metric_spec"].get("resource_label"): + targetTrackingConfig["PredefinedMetricSpecification"]["ResourceLabel"] = target_tracking_config[ + "predefined_metric_spec" + ]["resource_label"] + + elif target_tracking_config["customized_metric_spec"] is not None: # Build spec for customized_metric_spec - targetTrackingConfig['CustomizedMetricSpecification'] = dict() - if target_tracking_config['customized_metric_spec'].get('metric_name'): - targetTrackingConfig['CustomizedMetricSpecification']['MetricName'] = \ - target_tracking_config['customized_metric_spec']['metric_name'] - - if target_tracking_config['customized_metric_spec'].get('namespace'): - targetTrackingConfig['CustomizedMetricSpecification']['Namespace'] = \ - target_tracking_config['customized_metric_spec']['namespace'] - - if target_tracking_config['customized_metric_spec'].get('dimensions'): - targetTrackingConfig['CustomizedMetricSpecification']['Dimensions'] = \ - target_tracking_config['customized_metric_spec']['dimensions'] - - if target_tracking_config['customized_metric_spec'].get('statistic'): - targetTrackingConfig['CustomizedMetricSpecification']['Statistic'] = \ - target_tracking_config['customized_metric_spec']['statistic'] - - if target_tracking_config['customized_metric_spec'].get('unit'): - targetTrackingConfig['CustomizedMetricSpecification']['Unit'] = \ - target_tracking_config['customized_metric_spec']['unit'] + targetTrackingConfig["CustomizedMetricSpecification"] = dict() + if target_tracking_config["customized_metric_spec"].get("metric_name"): + targetTrackingConfig["CustomizedMetricSpecification"]["MetricName"] = target_tracking_config[ + "customized_metric_spec" + ]["metric_name"] + + if target_tracking_config["customized_metric_spec"].get("namespace"): + targetTrackingConfig["CustomizedMetricSpecification"]["Namespace"] = target_tracking_config[ + "customized_metric_spec" + ]["namespace"] + + if target_tracking_config["customized_metric_spec"].get("dimensions"): + targetTrackingConfig["CustomizedMetricSpecification"]["Dimensions"] = target_tracking_config[ + "customized_metric_spec" + ]["dimensions"] + + if target_tracking_config["customized_metric_spec"].get("statistic"): + targetTrackingConfig["CustomizedMetricSpecification"]["Statistic"] = target_tracking_config[ + "customized_metric_spec" + ]["statistic"] + + if target_tracking_config["customized_metric_spec"].get("unit"): + targetTrackingConfig["CustomizedMetricSpecification"]["Unit"] = target_tracking_config[ + "customized_metric_spec" + ]["unit"] return targetTrackingConfig def create_scaling_policy(connection, module): changed = False - asg_name = module.params['asg_name'] - policy_type = module.params['policy_type'] - policy_name = module.params['name'] - - if policy_type == 'TargetTrackingScaling': - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name) + asg_name = module.params["asg_name"] + policy_type = module.params["policy_type"] + policy_name = module.params["name"] + + if policy_type == "TargetTrackingScaling": + params = dict(PolicyName=policy_name, PolicyType=policy_type, AutoScalingGroupName=asg_name) else: - params = dict(PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name, - AdjustmentType=module.params['adjustment_type']) + params = dict( + PolicyName=policy_name, + PolicyType=policy_type, + AutoScalingGroupName=asg_name, + AdjustmentType=module.params["adjustment_type"], + ) # min_adjustment_step attribute is only relevant if the adjustment_type # is set to percentage change in capacity, so it is a special case - if module.params['adjustment_type'] == 'PercentChangeInCapacity': - if module.params['min_adjustment_step']: - params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step'] + if module.params["adjustment_type"] == "PercentChangeInCapacity": + if module.params["min_adjustment_step"]: + params["MinAdjustmentMagnitude"] = module.params["min_adjustment_step"] - if policy_type == 'SimpleScaling': + if policy_type == "SimpleScaling": # can't use required_if because it doesn't allow multiple criteria - # it's only required if policy is SimpleScaling and state is present - if not module.params['scaling_adjustment']: - module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling ' - 'and state is present') - params['ScalingAdjustment'] = module.params['scaling_adjustment'] - if module.params['cooldown']: - params['Cooldown'] = module.params['cooldown'] - - elif policy_type == 'StepScaling': - if not module.params['step_adjustments']: - module.fail_json(msg='step_adjustments is required when policy_type is StepScaling' - 'and state is present') - params['StepAdjustments'] = [] - for step_adjustment in module.params['step_adjustments']: - step_adjust_params = dict( - ScalingAdjustment=step_adjustment['scaling_adjustment']) - if step_adjustment.get('lower_bound'): - step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound'] - if step_adjustment.get('upper_bound'): - step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound'] - params['StepAdjustments'].append(step_adjust_params) - if module.params['metric_aggregation']: - params['MetricAggregationType'] = module.params['metric_aggregation'] - if module.params['estimated_instance_warmup']: - params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] - - elif policy_type == 'TargetTrackingScaling': - if not module.params['target_tracking_config']: - module.fail_json(msg='target_tracking_config is required when policy_type is ' - 'TargetTrackingScaling and state is present') + if not module.params["scaling_adjustment"]: + module.fail_json( + msg="scaling_adjustment is required when policy_type is SimpleScaling " "and state is present" + ) + params["ScalingAdjustment"] = module.params["scaling_adjustment"] + if module.params["cooldown"]: + params["Cooldown"] = module.params["cooldown"] + + elif policy_type == "StepScaling": + if not module.params["step_adjustments"]: + module.fail_json(msg="step_adjustments is required when policy_type is StepScaling" "and state is present") + params["StepAdjustments"] = [] + for step_adjustment in module.params["step_adjustments"]: + step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"]) + if step_adjustment.get("lower_bound"): + step_adjust_params["MetricIntervalLowerBound"] = step_adjustment["lower_bound"] + if step_adjustment.get("upper_bound"): + step_adjust_params["MetricIntervalUpperBound"] = step_adjustment["upper_bound"] + params["StepAdjustments"].append(step_adjust_params) + if module.params["metric_aggregation"]: + params["MetricAggregationType"] = module.params["metric_aggregation"] + if module.params["estimated_instance_warmup"]: + params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] + + elif policy_type == "TargetTrackingScaling": + if not module.params["target_tracking_config"]: + module.fail_json( + msg="target_tracking_config is required when policy_type is " + "TargetTrackingScaling and state is present" + ) else: - params['TargetTrackingConfiguration'] = build_target_specification(module.params.get('target_tracking_config')) - if module.params['estimated_instance_warmup']: - params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup'] + params["TargetTrackingConfiguration"] = build_target_specification( + module.params.get("target_tracking_config") + ) + if module.params["estimated_instance_warmup"]: + params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] # Ensure idempotency with policies try: - policies = connection.describe_policies(aws_retry=True, - AutoScalingGroupName=asg_name, - PolicyNames=[policy_name])['ScalingPolicies'] + policies = connection.describe_policies( + aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] + )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) before = after = {} if not policies: @@ -500,41 +508,39 @@ def create_scaling_policy(connection, module): module.fail_json_aws(e, msg="Failed to create autoscaling policy") try: - policies = connection.describe_policies(aws_retry=True, - AutoScalingGroupName=asg_name, - PolicyNames=[policy_name])['ScalingPolicies'] + policies = connection.describe_policies( + aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] + )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values - policy['arn'] = policy['policy_arn'] - policy['as_name'] = policy['auto_scaling_group_name'] - policy['name'] = policy['policy_name'] + policy["arn"] = policy["policy_arn"] + policy["as_name"] = policy["auto_scaling_group_name"] + policy["name"] = policy["policy_name"] if before and after: - module.exit_json(changed=changed, diff=dict( - before=before, after=after), **policy) + module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy) else: module.exit_json(changed=changed, **policy) def delete_scaling_policy(connection, module): - policy_name = module.params.get('name') + policy_name = module.params.get("name") try: - policy = connection.describe_policies( - aws_retry=True, PolicyNames=[policy_name]) + policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) - if policy['ScalingPolicies']: + if policy["ScalingPolicies"]: try: - connection.delete_policy(aws_retry=True, - AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'], - PolicyName=policy_name) + connection.delete_policy( + aws_retry=True, + AutoScalingGroupName=policy["ScalingPolicies"][0]["AutoScalingGroupName"], + PolicyName=policy_name, + ) module.exit_json(changed=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to delete autoscaling policy") @@ -544,65 +550,62 @@ def delete_scaling_policy(connection, module): def main(): step_adjustment_spec = dict( - lower_bound=dict(type='int'), - upper_bound=dict(type='int'), - scaling_adjustment=dict(type='int', required=True) + lower_bound=dict(type="int"), upper_bound=dict(type="int"), scaling_adjustment=dict(type="int", required=True) ) predefined_metric_spec = dict( - predefined_metric_type=dict(type='str', choices=['ASGAverageCPUUtilization', - 'ASGAverageNetworkIn', - 'ASGAverageNetworkOut', - 'ALBRequestCountPerTarget'], required=True), - resource_label=dict(type='str') + predefined_metric_type=dict( + type="str", + choices=[ + "ASGAverageCPUUtilization", + "ASGAverageNetworkIn", + "ASGAverageNetworkOut", + "ALBRequestCountPerTarget", + ], + required=True, + ), + resource_label=dict(type="str"), ) customized_metric_spec = dict( - metric_name=dict(type='str', required=True), - namespace=dict(type='str', required=True), - statistic=dict(type='str', required=True, choices=['Average', 'Minimum', 'Maximum', 'SampleCount', 'Sum']), - dimensions=dict(type='list', elements='dict'), - unit=dict(type='str') + metric_name=dict(type="str", required=True), + namespace=dict(type="str", required=True), + statistic=dict(type="str", required=True, choices=["Average", "Minimum", "Maximum", "SampleCount", "Sum"]), + dimensions=dict(type="list", elements="dict"), + unit=dict(type="str"), ) target_tracking_spec = dict( - disable_scalein=dict(type='bool'), - target_value=dict(type='float', required=True), - predefined_metric_spec=dict(type='dict', - options=predefined_metric_spec), - customized_metric_spec=dict(type='dict', - options=customized_metric_spec) + disable_scalein=dict(type="bool"), + target_value=dict(type="float", required=True), + predefined_metric_spec=dict(type="dict", options=predefined_metric_spec), + customized_metric_spec=dict(type="dict", options=customized_metric_spec), ) argument_spec = dict( name=dict(required=True), - adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), + adjustment_type=dict(choices=["ChangeInCapacity", "ExactCapacity", "PercentChangeInCapacity"]), asg_name=dict(), - scaling_adjustment=dict(type='int'), - min_adjustment_step=dict(type='int'), - cooldown=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - metric_aggregation=dict(default='Average', choices=[ - 'Minimum', 'Maximum', 'Average']), - policy_type=dict(default='SimpleScaling', choices=[ - 'SimpleScaling', 'StepScaling', 'TargetTrackingScaling']), - target_tracking_config=dict(type='dict', options=target_tracking_spec), - step_adjustments=dict( - type='list', options=step_adjustment_spec, elements='dict'), - estimated_instance_warmup=dict(type='int') + scaling_adjustment=dict(type="int"), + min_adjustment_step=dict(type="int"), + cooldown=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + metric_aggregation=dict(default="Average", choices=["Minimum", "Maximum", "Average"]), + policy_type=dict(default="SimpleScaling", choices=["SimpleScaling", "StepScaling", "TargetTrackingScaling"]), + target_tracking_config=dict(type="dict", options=target_tracking_spec), + step_adjustments=dict(type="list", options=step_adjustment_spec, elements="dict"), + estimated_instance_warmup=dict(type="int"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['asg_name']]]) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["asg_name"]]]) - connection = module.client( - 'autoscaling', retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get('state') + connection = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) + state = module.params.get("state") - if state == 'present': + if state == "present": create_scaling_policy(connection, module) - elif state == 'absent': + elif state == "absent": delete_scaling_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/autoscaling_scheduled_action.py b/autoscaling_scheduled_action.py index bf0d4bcc44f..9bfb70b8330 100644 --- a/autoscaling_scheduled_action.py +++ b/autoscaling_scheduled_action.py @@ -171,29 +171,29 @@ def format_request(): params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionName=module.params.get('scheduled_action_name'), - Recurrence=module.params.get('recurrence') + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionName=module.params.get("scheduled_action_name"), + Recurrence=module.params.get("recurrence"), ) # Some of these params are optional - if module.params.get('desired_capacity') is not None: - params['DesiredCapacity'] = module.params.get('desired_capacity') + if module.params.get("desired_capacity") is not None: + params["DesiredCapacity"] = module.params.get("desired_capacity") - if module.params.get('min_size') is not None: - params['MinSize'] = module.params.get('min_size') + if module.params.get("min_size") is not None: + params["MinSize"] = module.params.get("min_size") - if module.params.get('max_size') is not None: - params['MaxSize'] = module.params.get('max_size') + if module.params.get("max_size") is not None: + params["MaxSize"] = module.params.get("max_size") - if module.params.get('time_zone') is not None: - params['TimeZone'] = module.params.get('time_zone') + if module.params.get("time_zone") is not None: + params["TimeZone"] = module.params.get("time_zone") - if module.params.get('start_time') is not None: - params['StartTime'] = module.params.get('start_time') + if module.params.get("start_time") is not None: + params["StartTime"] = module.params.get("start_time") - if module.params.get('end_time') is not None: - params['EndTime'] = module.params.get('end_time') + if module.params.get("end_time") is not None: + params["EndTime"] = module.params.get("end_time") return params @@ -206,8 +206,8 @@ def delete_scheduled_action(current_actions): return True params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionName=module.params.get('scheduled_action_name') + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionName=module.params.get("scheduled_action_name"), ) try: @@ -220,8 +220,8 @@ def delete_scheduled_action(current_actions): def get_scheduled_actions(): params = dict( - AutoScalingGroupName=module.params.get('autoscaling_group_name'), - ScheduledActionNames=[module.params.get('scheduled_action_name')] + AutoScalingGroupName=module.params.get("autoscaling_group_name"), + ScheduledActionNames=[module.params.get("scheduled_action_name")], ) try: @@ -271,55 +271,53 @@ def main(): global client argument_spec = dict( - autoscaling_group_name=dict(required=True, type='str'), - scheduled_action_name=dict(required=True, type='str'), - start_time=dict(default=None, type='str'), - end_time=dict(default=None, type='str'), - time_zone=dict(default=None, type='str'), - recurrence=dict(type='str'), - min_size=dict(default=None, type='int'), - max_size=dict(default=None, type='int'), - desired_capacity=dict(default=None, type='int'), - state=dict(default='present', choices=['present', 'absent']) + autoscaling_group_name=dict(required=True, type="str"), + scheduled_action_name=dict(required=True, type="str"), + start_time=dict(default=None, type="str"), + end_time=dict(default=None, type="str"), + time_zone=dict(default=None, type="str"), + recurrence=dict(type="str"), + min_size=dict(default=None, type="int"), + max_size=dict(default=None, type="int"), + desired_capacity=dict(default=None, type="int"), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[['state', 'present', ['recurrence']]], - supports_check_mode=True + argument_spec=argument_spec, required_if=[["state", "present", ["recurrence"]]], supports_check_mode=True ) if not HAS_DATEUTIL: - module.fail_json(msg='dateutil is required for this module') + module.fail_json(msg="dateutil is required for this module") if not module.botocore_at_least("1.20.24"): - module.fail_json(msg='botocore version >= 1.20.24 is required for this module') + module.fail_json(msg="botocore version >= 1.20.24 is required for this module") - client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) current_actions = get_scheduled_actions() - state = module.params.get('state') + state = module.params.get("state") results = dict() - if state == 'present': + if state == "present": changed = put_scheduled_update_group_action(current_actions) if not module.check_mode: updated_action = get_scheduled_actions()[0] results = dict( - scheduled_action_name=updated_action.get('ScheduledActionName'), - start_time=updated_action.get('StartTime'), - end_time=updated_action.get('EndTime'), - time_zone=updated_action.get('TimeZone'), - recurrence=updated_action.get('Recurrence'), - min_size=updated_action.get('MinSize'), - max_size=updated_action.get('MaxSize'), - desired_capacity=updated_action.get('DesiredCapacity') + scheduled_action_name=updated_action.get("ScheduledActionName"), + start_time=updated_action.get("StartTime"), + end_time=updated_action.get("EndTime"), + time_zone=updated_action.get("TimeZone"), + recurrence=updated_action.get("Recurrence"), + min_size=updated_action.get("MinSize"), + max_size=updated_action.get("MaxSize"), + desired_capacity=updated_action.get("DesiredCapacity"), ) else: changed = delete_scheduled_action(current_actions) - results['changed'] = changed + results["changed"] = changed module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/aws_region_info.py b/aws_region_info.py index 837e9326552..a268c13b3c8 100644 --- a/aws_region_info.py +++ b/aws_region_info.py @@ -70,30 +70,29 @@ def main(): argument_spec = dict( - filters=dict(default={}, type='dict') + filters=dict(default={}, type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict(module.params.get('filters')) - for k in module.params.get('filters').keys(): + sanitized_filters = dict(module.params.get("filters")) + for k in module.params.get("filters").keys(): if "_" in k: - sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] del sanitized_filters[k] try: regions = connection.describe_regions( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to describe regions.") - module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']]) + module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions["Regions"]]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/batch_compute_environment.py b/batch_compute_environment.py index 79123501992..ffc1f19b003 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -242,6 +242,7 @@ # # --------------------------------------------------------------------------------------------------- + def set_api_params(module, module_params): """ Sets module parameters to those expected by the boto3 API. @@ -262,18 +263,19 @@ def validate_params(module): :return: """ - compute_environment_name = module.params['compute_environment_name'] + compute_environment_name = module.params["compute_environment_name"] # validate compute environment name - if not re.search(r'^[\w\_:]+$', compute_environment_name): + if not re.search(r"^[\w\_:]+$", compute_environment_name): module.fail_json( msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters " - "and underscores.".format(compute_environment_name) + "and underscores.".format(compute_environment_name) ) - if not compute_environment_name.startswith('arn:aws:batch:'): + if not compute_environment_name.startswith("arn:aws:batch:"): if len(compute_environment_name) > 128: - module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit' - .format(compute_environment_name)) + module.fail_json( + msg='compute_environment_name "{0}" exceeds 128 character limit'.format(compute_environment_name) + ) return @@ -284,13 +286,14 @@ def validate_params(module): # # --------------------------------------------------------------------------------------------------- + def get_current_compute_environment(module, client): try: environments = client.describe_compute_environments( - computeEnvironments=[module.params['compute_environment_name']] + computeEnvironments=[module.params["compute_environment_name"]] ) - if len(environments['computeEnvironments']) > 0: - return environments['computeEnvironments'][0] + if len(environments["computeEnvironments"]) > 0: + return environments["computeEnvironments"][0] else: return None except ClientError: @@ -299,42 +302,52 @@ def get_current_compute_environment(module, client): def create_compute_environment(module, client): """ - Adds a Batch compute environment + Adds a Batch compute environment - :param module: - :param client: - :return: - """ + :param module: + :param client: + :return: + """ changed = False # set API parameters - params = ( - 'compute_environment_name', 'type', 'service_role') + params = ("compute_environment_name", "type", "service_role") api_params = set_api_params(module, params) - if module.params['compute_environment_state'] is not None: - api_params['state'] = module.params['compute_environment_state'] - - compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets', - 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage', - 'spot_iam_fleet_role') + if module.params["compute_environment_state"] is not None: + api_params["state"] = module.params["compute_environment_state"] + + compute_resources_param_list = ( + "minv_cpus", + "maxv_cpus", + "desiredv_cpus", + "instance_types", + "image_id", + "subnets", + "security_group_ids", + "ec2_key_pair", + "instance_role", + "tags", + "bid_percentage", + "spot_iam_fleet_role", + ) compute_resources_params = set_api_params(module, compute_resources_param_list) - if module.params['compute_resource_type'] is not None: - compute_resources_params['type'] = module.params['compute_resource_type'] + if module.params["compute_resource_type"] is not None: + compute_resources_params["type"] = module.params["compute_resource_type"] # if module.params['minv_cpus'] is not None: # compute_resources_params['minvCpus'] = module.params['minv_cpus'] - api_params['computeResources'] = compute_resources_params + api_params["computeResources"] = compute_resources_params try: if not module.check_mode: client.create_compute_environment(**api_params) changed = True except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Error creating compute environment') + module.fail_json_aws(e, msg="Error creating compute environment") return changed @@ -351,29 +364,29 @@ def remove_compute_environment(module, client): changed = False # set API parameters - api_params = {'computeEnvironment': module.params['compute_environment_name']} + api_params = {"computeEnvironment": module.params["compute_environment_name"]} try: if not module.check_mode: client.delete_compute_environment(**api_params) changed = True except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Error removing compute environment') + module.fail_json_aws(e, msg="Error removing compute environment") return changed def manage_state(module, client): changed = False - current_state = 'absent' - state = module.params['state'] - compute_environment_state = module.params['compute_environment_state'] - compute_environment_name = module.params['compute_environment_name'] - service_role = module.params['service_role'] - minv_cpus = module.params['minv_cpus'] - maxv_cpus = module.params['maxv_cpus'] - desiredv_cpus = module.params['desiredv_cpus'] - action_taken = 'none' - update_env_response = '' + current_state = "absent" + state = module.params["state"] + compute_environment_state = module.params["compute_environment_state"] + compute_environment_name = module.params["compute_environment_name"] + service_role = module.params["service_role"] + minv_cpus = module.params["minv_cpus"] + maxv_cpus = module.params["maxv_cpus"] + desiredv_cpus = module.params["desiredv_cpus"] + action_taken = "none" + update_env_response = "" check_mode = module.check_mode @@ -381,37 +394,40 @@ def manage_state(module, client): current_compute_environment = get_current_compute_environment(module, client) response = current_compute_environment if current_compute_environment: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": updates = False # Update Batch Compute Environment configuration - compute_kwargs = {'computeEnvironment': compute_environment_name} + compute_kwargs = {"computeEnvironment": compute_environment_name} # Update configuration if needed compute_resources = {} - if compute_environment_state and current_compute_environment['state'] != compute_environment_state: - compute_kwargs.update({'state': compute_environment_state}) + if compute_environment_state and current_compute_environment["state"] != compute_environment_state: + compute_kwargs.update({"state": compute_environment_state}) updates = True - if service_role and current_compute_environment['serviceRole'] != service_role: - compute_kwargs.update({'serviceRole': service_role}) + if service_role and current_compute_environment["serviceRole"] != service_role: + compute_kwargs.update({"serviceRole": service_role}) updates = True - if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus: - compute_resources['minvCpus'] = minv_cpus - if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus: - compute_resources['maxvCpus'] = maxv_cpus - if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus: - compute_resources['desiredvCpus'] = desiredv_cpus + if minv_cpus is not None and current_compute_environment["computeResources"]["minvCpus"] != minv_cpus: + compute_resources["minvCpus"] = minv_cpus + if maxv_cpus is not None and current_compute_environment["computeResources"]["maxvCpus"] != maxv_cpus: + compute_resources["maxvCpus"] = maxv_cpus + if ( + desiredv_cpus is not None + and current_compute_environment["computeResources"]["desiredvCpus"] != desiredv_cpus + ): + compute_resources["desiredvCpus"] = desiredv_cpus if len(compute_resources) > 0: - compute_kwargs['computeResources'] = compute_resources + compute_kwargs["computeResources"] = compute_resources updates = True if updates: try: if not check_mode: update_env_response = client.update_compute_environment(**compute_kwargs) if not update_env_response: - module.fail_json(msg='Unable to get compute environment information after creating') + module.fail_json(msg="Unable to get compute environment information after creating") changed = True action_taken = "updated" except (BotoCoreError, ClientError) as e: @@ -421,15 +437,15 @@ def manage_state(module, client): # Create Batch Compute Environment changed = create_compute_environment(module, client) # Describe compute environment - action_taken = 'added' + action_taken = "added" response = get_current_compute_environment(module, client) if not response: - module.fail_json(msg='Unable to get compute environment information after creating') + module.fail_json(msg="Unable to get compute environment information after creating") else: - if current_state == 'present': + if current_state == "present": # remove the compute environment changed = remove_compute_environment(module, client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, batch_compute_environment_action=action_taken, response=response) @@ -439,6 +455,7 @@ def manage_state(module, client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -447,39 +464,36 @@ def main(): """ argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), compute_environment_name=dict(required=True), - type=dict(required=True, choices=['MANAGED', 'UNMANAGED']), - compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), + type=dict(required=True, choices=["MANAGED", "UNMANAGED"]), + compute_environment_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), service_role=dict(required=True), - compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']), - minv_cpus=dict(type='int', required=True), - maxv_cpus=dict(type='int', required=True), - desiredv_cpus=dict(type='int'), - instance_types=dict(type='list', required=True, elements='str'), + compute_resource_type=dict(required=True, choices=["EC2", "SPOT"]), + minv_cpus=dict(type="int", required=True), + maxv_cpus=dict(type="int", required=True), + desiredv_cpus=dict(type="int"), + instance_types=dict(type="list", required=True, elements="str"), image_id=dict(), - subnets=dict(type='list', required=True, elements='str'), - security_group_ids=dict(type='list', required=True, elements='str'), + subnets=dict(type="list", required=True, elements="str"), + security_group_ids=dict(type="list", required=True, elements="str"), ec2_key_pair=dict(no_log=False), instance_role=dict(required=True), - tags=dict(type='dict'), - bid_percentage=dict(type='int'), + tags=dict(type="dict"), + bid_percentage=dict(type="int"), spot_iam_fleet_role=dict(), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('batch') + client = module.client("batch") validate_params(module) results = manage_state(module, client) - module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags'])) + module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=["Tags"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/batch_job_definition.py b/batch_job_definition.py index 5eac0cacfe1..9ea5dc8cefa 100644 --- a/batch_job_definition.py +++ b/batch_job_definition.py @@ -265,15 +265,15 @@ def validate_params(module, batch_client): # # --------------------------------------------------------------------------------------------------- + def get_current_job_definition(module, batch_client): try: - environments = batch_client.describe_job_definitions( - jobDefinitionName=module.params['job_definition_name'] - ) - if len(environments['jobDefinitions']) > 0: - latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions'])) - latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision), - None) + environments = batch_client.describe_job_definitions(jobDefinitionName=module.params["job_definition_name"]) + if len(environments["jobDefinitions"]) > 0: + latest_revision = max(map(lambda d: d["revision"], environments["jobDefinitions"])) + latest_definition = next( + (x for x in environments["jobDefinitions"] if x["revision"] == latest_revision), None + ) return latest_definition return None except ClientError: @@ -282,12 +282,12 @@ def get_current_job_definition(module, batch_client): def create_job_definition(module, batch_client): """ - Adds a Batch job definition + Adds a Batch job definition - :param module: - :param batch_client: - :return: - """ + :param module: + :param batch_client: + :return: + """ changed = False @@ -296,36 +296,48 @@ def create_job_definition(module, batch_client): container_properties_params = set_api_params(module, get_container_property_params()) retry_strategy_params = set_api_params(module, get_retry_strategy_params()) - api_params['retryStrategy'] = retry_strategy_params - api_params['containerProperties'] = container_properties_params + api_params["retryStrategy"] = retry_strategy_params + api_params["containerProperties"] = container_properties_params try: if not module.check_mode: batch_client.register_job_definition(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error registering job definition') + module.fail_json_aws(e, msg="Error registering job definition") return changed def get_retry_strategy_params(): - return ('attempts',) + return ("attempts",) def get_container_property_params(): - return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points', - 'readonly_root_filesystem', 'privileged', 'ulimits', 'user') + return ( + "image", + "vcpus", + "memory", + "command", + "job_role_arn", + "volumes", + "environment", + "mount_points", + "readonly_root_filesystem", + "privileged", + "ulimits", + "user", + ) def get_base_params(): - return 'job_definition_name', 'type', 'parameters' + return "job_definition_name", "type", "parameters" def get_compute_environment_order_list(module): compute_environment_order_list = [] - for ceo in module.params['compute_environment_order']: - compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + for ceo in module.params["compute_environment_order"]: + compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) return compute_environment_order_list @@ -342,10 +354,10 @@ def remove_job_definition(module, batch_client): try: if not module.check_mode: - batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn']) + batch_client.deregister_job_definition(jobDefinition=module.params["job_definition_arn"]) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error removing job definition') + module.fail_json_aws(e, msg="Error removing job definition") return changed @@ -358,12 +370,12 @@ def job_definition_equal(module, current_definition): break for param in get_container_property_params(): - if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)): + if module.params.get(param) != current_definition.get("containerProperties").get(cc(param)): equal = False break for param in get_retry_strategy_params(): - if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)): + if module.params.get(param) != current_definition.get("retryStrategy").get(cc(param)): equal = False break @@ -372,10 +384,10 @@ def job_definition_equal(module, current_definition): def manage_state(module, batch_client): changed = False - current_state = 'absent' - state = module.params['state'] - job_definition_name = module.params['job_definition_name'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + job_definition_name = module.params["job_definition_name"] + action_taken = "none" response = None check_mode = module.check_mode @@ -383,28 +395,28 @@ def manage_state(module, batch_client): # check if the job definition exists current_job_definition = get_current_job_definition(module, batch_client) if current_job_definition: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": # check if definition has changed and register a new version if necessary if not job_definition_equal(module, current_job_definition): create_job_definition(module, batch_client) - action_taken = 'updated with new version' + action_taken = "updated with new version" changed = True else: # Create Job definition changed = create_job_definition(module, batch_client) - action_taken = 'added' + action_taken = "added" response = get_current_job_definition(module, batch_client) if not response: - module.fail_json(msg='Unable to get job definition information after creating/updating') + module.fail_json(msg="Unable to get job definition information after creating/updating") else: - if current_state == 'present': + if current_state == "present": # remove the Job definition changed = remove_job_definition(module, batch_client) - action_taken = 'deregistered' + action_taken = "deregistered" return dict(changed=changed, batch_job_definition_action=action_taken, response=response) @@ -414,6 +426,7 @@ def manage_state(module, batch_client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -422,32 +435,29 @@ def main(): """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), job_definition_name=dict(required=True), job_definition_arn=dict(), type=dict(required=True), - parameters=dict(type='dict'), + parameters=dict(type="dict"), image=dict(required=True), - vcpus=dict(type='int', required=True), - memory=dict(type='int', required=True), - command=dict(type='list', default=[], elements='str'), + vcpus=dict(type="int", required=True), + memory=dict(type="int", required=True), + command=dict(type="list", default=[], elements="str"), job_role_arn=dict(), - volumes=dict(type='list', default=[], elements='dict'), - environment=dict(type='list', default=[], elements='dict'), - mount_points=dict(type='list', default=[], elements='dict'), + volumes=dict(type="list", default=[], elements="dict"), + environment=dict(type="list", default=[], elements="dict"), + mount_points=dict(type="list", default=[], elements="dict"), readonly_root_filesystem=dict(), privileged=dict(), - ulimits=dict(type='list', default=[], elements='dict'), + ulimits=dict(type="list", default=[], elements="dict"), user=dict(), - attempts=dict(type='int') + attempts=dict(type="int"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - batch_client = module.client('batch') + batch_client = module.client("batch") validate_params(module, batch_client) @@ -456,5 +466,5 @@ def main(): module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/batch_job_queue.py b/batch_job_queue.py index f71848bb04a..c9e253d0652 100644 --- a/batch_job_queue.py +++ b/batch_job_queue.py @@ -138,50 +138,49 @@ def validate_params(module): # # --------------------------------------------------------------------------------------------------- + def get_current_job_queue(module, client): try: - environments = client.describe_job_queues( - jobQueues=[module.params['job_queue_name']] - ) - return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None + environments = client.describe_job_queues(jobQueues=[module.params["job_queue_name"]]) + return environments["jobQueues"][0] if len(environments["jobQueues"]) > 0 else None except ClientError: return None def create_job_queue(module, client): """ - Adds a Batch job queue + Adds a Batch job queue - :param module: - :param client: - :return: - """ + :param module: + :param client: + :return: + """ changed = False # set API parameters - params = ('job_queue_name', 'priority') + params = ("job_queue_name", "priority") api_params = set_api_params(module, params) - if module.params['job_queue_state'] is not None: - api_params['state'] = module.params['job_queue_state'] + if module.params["job_queue_state"] is not None: + api_params["state"] = module.params["job_queue_state"] - api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module) + api_params["computeEnvironmentOrder"] = get_compute_environment_order_list(module) try: if not module.check_mode: client.create_job_queue(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error creating compute environment') + module.fail_json_aws(e, msg="Error creating compute environment") return changed def get_compute_environment_order_list(module): compute_environment_order_list = [] - for ceo in module.params['compute_environment_order']: - compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) + for ceo in module.params["compute_environment_order"]: + compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) return compute_environment_order_list @@ -197,25 +196,25 @@ def remove_job_queue(module, client): changed = False # set API parameters - api_params = {'jobQueue': module.params['job_queue_name']} + api_params = {"jobQueue": module.params["job_queue_name"]} try: if not module.check_mode: client.delete_job_queue(**api_params) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Error removing job queue') + module.fail_json_aws(e, msg="Error removing job queue") return changed def manage_state(module, client): changed = False - current_state = 'absent' - state = module.params['state'] - job_queue_state = module.params['job_queue_state'] - job_queue_name = module.params['job_queue_name'] - priority = module.params['priority'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + job_queue_state = module.params["job_queue_state"] + job_queue_name = module.params["job_queue_name"] + priority = module.params["priority"] + action_taken = "none" response = None check_mode = module.check_mode @@ -223,25 +222,25 @@ def manage_state(module, client): # check if the job queue exists current_job_queue = get_current_job_queue(module, client) if current_job_queue: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": updates = False # Update Batch Job Queue configuration - job_kwargs = {'jobQueue': job_queue_name} + job_kwargs = {"jobQueue": job_queue_name} # Update configuration if needed - if job_queue_state and current_job_queue['state'] != job_queue_state: - job_kwargs.update({'state': job_queue_state}) + if job_queue_state and current_job_queue["state"] != job_queue_state: + job_kwargs.update({"state": job_queue_state}) updates = True - if priority is not None and current_job_queue['priority'] != priority: - job_kwargs.update({'priority': priority}) + if priority is not None and current_job_queue["priority"] != priority: + job_kwargs.update({"priority": priority}) updates = True new_compute_environment_order_list = get_compute_environment_order_list(module) - if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']: - job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list + if new_compute_environment_order_list != current_job_queue["computeEnvironmentOrder"]: + job_kwargs["computeEnvironmentOrder"] = new_compute_environment_order_list updates = True if updates: @@ -256,17 +255,17 @@ def manage_state(module, client): else: # Create Job Queue changed = create_job_queue(module, client) - action_taken = 'added' + action_taken = "added" # Describe job queue response = get_current_job_queue(module, client) if not response: - module.fail_json(msg='Unable to get job queue information after creating/updating') + module.fail_json(msg="Unable to get job queue information after creating/updating") else: - if current_state == 'present': + if current_state == "present": # remove the Job Queue changed = remove_job_queue(module, client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, batch_job_queue_action=action_taken, response=response) @@ -276,6 +275,7 @@ def manage_state(module, client): # # --------------------------------------------------------------------------------------------------- + def main(): """ Main entry point. @@ -284,19 +284,16 @@ def main(): """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), job_queue_name=dict(required=True), - job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), - priority=dict(type='int', required=True), - compute_environment_order=dict(type='list', required=True, elements='dict'), + job_queue_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), + priority=dict(type="int", required=True), + compute_environment_order=dict(type="list", required=True, elements="dict"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('batch') + client = module.client("batch") validate_params(module) @@ -305,5 +302,5 @@ def main(): module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 604abfd1436..3c93c6a3459 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -48,29 +48,26 @@ @AWSRetry.exponential_backoff() def list_exports(cloudformation_client): - '''Get Exports Names and Values and return in dictionary ''' - list_exports_paginator = cloudformation_client.get_paginator('list_exports') - exports = list_exports_paginator.paginate().build_full_result()['Exports'] + """Get Exports Names and Values and return in dictionary""" + list_exports_paginator = cloudformation_client.get_paginator("list_exports") + exports = list_exports_paginator.paginate().build_full_result()["Exports"] export_items = dict() for item in exports: - export_items[item['Name']] = item['Value'] + export_items[item["Name"]] = item["Value"] return export_items def main(): argument_spec = dict() - result = dict( - changed=False, - original_message='' - ) + result = dict(changed=False, original_message="") module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - cloudformation_client = module.client('cloudformation') + cloudformation_client = module.client("cloudformation") try: - result['export_items'] = list_exports(cloudformation_client) + result["export_items"] = list_exports(cloudformation_client) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e) @@ -79,5 +76,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index e15f1c95229..1a8673a909c 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -321,9 +321,9 @@ def create_stack_set(module, stack_params, cfn): try: cfn.create_stack_set(aws_retry=True, **stack_params) - return await_stack_set_exists(cfn, stack_params['StackSetName']) + return await_stack_set_exists(cfn, stack_params["StackSetName"]) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName'))) + module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get("StackSetName"))) def update_stack_set(module, stack_params, cfn): @@ -332,22 +332,29 @@ def update_stack_set(module, stack_params, cfn): # don't need to be updated. try: cfn.update_stack_set(**stack_params) - except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.") - except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check " - "the `accounts` and `regions` parameters.") - except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except + except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws( - err, msg="Another operation is already in progress on this stack set - please try again later. When making " - "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.") + err, + msg="One or more stack instances were not found for this stack set. Double check " + "the `accounts` and `regions` parameters.", + ) + except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except + module.fail_json_aws( + err, + msg="Another operation is already in progress on this stack set - please try again later. When making " + "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.", + ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") - if module.params.get('wait'): + if module.params.get("wait"): await_stack_set_operation( - module, cfn, operation_id=stack_params['OperationId'], - stack_set_name=stack_params['StackSetName'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=stack_params["OperationId"], + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), ) return True @@ -357,20 +364,24 @@ def compare_stack_instances(cfn, stack_set_name, accounts, regions): instance_list = cfn.list_stack_instances( aws_retry=True, StackSetName=stack_set_name, - )['Summaries'] + )["Summaries"] desired_stack_instances = set(itertools.product(accounts, regions)) - existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list) + existing_stack_instances = set((i["Account"], i["Region"]) for i in instance_list) # new stacks, existing stacks, unspecified stacks - return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances) + return ( + (desired_stack_instances - existing_stack_instances), + existing_stack_instances, + (existing_stack_instances - desired_stack_instances), + ) @AWSRetry.jittered_backoff(retries=3, delay=4) def stack_set_facts(cfn, stack_set_name): try: - ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet'] - ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) + ss = cfn.describe_stack_set(StackSetName=stack_set_name)["StackSet"] + ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) return ss - except cfn.exceptions.from_code('StackSetNotFound'): + except cfn.exceptions.from_code("StackSetNotFound"): # Return None if the stack doesn't exist return @@ -381,23 +392,24 @@ def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wai for i in range(max_wait // 15): try: operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id) - if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'): + if operation["StackSetOperation"]["Status"] not in ("RUNNING", "STOPPING"): # Stack set has completed operation break - except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except pass - except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except + except is_boto3_error_code("OperationNotFound"): # pylint: disable=duplicate-except pass time.sleep(15) - if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'): + if operation and operation["StackSetOperation"]["Status"] not in ("FAILED", "STOPPED"): await_stack_instance_completion( - module, cfn, + module, + cfn, stack_set_name=stack_set_name, # subtract however long we waited already max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), ) - elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'): + elif operation and operation["StackSetOperation"]["Status"] in ("FAILED", "STOPPED"): pass else: module.warn( @@ -412,84 +424,84 @@ def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): for i in range(max_wait // 15): try: stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name) - to_await = [inst for inst in stack_instances['Summaries'] - if inst['Status'] != 'CURRENT'] + to_await = [inst for inst in stack_instances["Summaries"] if inst["Status"] != "CURRENT"] if not to_await: - return stack_instances['Summaries'] - except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except + return stack_instances["Summaries"] + except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except # this means the deletion beat us, or the stack set is not yet propagated pass time.sleep(15) module.warn( "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( - stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait + stack_set_name, ", ".join(s["StackId"] for s in to_await), max_wait ) ) def await_stack_set_exists(cfn, stack_set_name): # AWSRetry will retry on `StackSetNotFound` errors for us - ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet'] - ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags']) - return camel_dict_to_snake_dict(ss, ignore_list=('Tags',)) + ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)["StackSet"] + ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) + return camel_dict_to_snake_dict(ss, ignore_list=("Tags",)) def describe_stack_tree(module, stack_set_name, operation_ids=None): - jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound']) - cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) + jittered_backoff_decorator = AWSRetry.jittered_backoff( + retries=5, delay=3, max_delay=5, catch_extra_error_codes=["StackSetNotFound"] + ) + cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) result = dict() - result['stack_set'] = camel_dict_to_snake_dict( + result["stack_set"] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, aws_retry=True, - )['StackSet'] + )["StackSet"] ) - result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags']) - result['operations_log'] = sorted( + result["stack_set"]["tags"] = boto3_tag_list_to_ansible_dict(result["stack_set"]["tags"]) + result["operations_log"] = sorted( camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, aws_retry=True, ) - )['summaries'], - key=lambda x: x['creation_timestamp'] + )["summaries"], + key=lambda x: x["creation_timestamp"], ) - result['stack_instances'] = sorted( - [ - camel_dict_to_snake_dict(i) for i in - cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries'] - ], - key=lambda i: i['region'] + i['account'] + result["stack_instances"] = sorted( + [camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances(StackSetName=stack_set_name)["Summaries"]], + key=lambda i: i["region"] + i["account"], ) if operation_ids: - result['operations'] = [] + result["operations"] = [] for op_id in operation_ids: try: - result['operations'].append(camel_dict_to_snake_dict( - cfn.describe_stack_set_operation( - StackSetName=stack_set_name, - OperationId=op_id, - )['StackSetOperation'] - )) - except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except + result["operations"].append( + camel_dict_to_snake_dict( + cfn.describe_stack_set_operation( + StackSetName=stack_set_name, + OperationId=op_id, + )["StackSetOperation"] + ) + ) + except is_boto3_error_code("OperationNotFoundException"): # pylint: disable=duplicate-except pass return result def get_operation_preferences(module): params = dict() - if module.params.get('regions'): - params['RegionOrder'] = list(module.params['regions']) + if module.params.get("regions"): + params["RegionOrder"] = list(module.params["regions"]) for param, api_name in { - 'fail_count': 'FailureToleranceCount', - 'fail_percentage': 'FailureTolerancePercentage', - 'parallel_percentage': 'MaxConcurrentPercentage', - 'parallel_count': 'MaxConcurrentCount', + "fail_count": "FailureToleranceCount", + "fail_percentage": "FailureTolerancePercentage", + "parallel_percentage": "MaxConcurrentPercentage", + "parallel_count": "MaxConcurrentCount", }.items(): - if module.params.get('failure_tolerance', {}).get(param): - params[api_name] = module.params.get('failure_tolerance', {}).get(param) + if module.params.get("failure_tolerance", {}).get(param): + params[api_name] = module.params.get("failure_tolerance", {}).get(param) return params @@ -497,171 +509,173 @@ def main(): argument_spec = dict( name=dict(required=True), description=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=900), - state=dict(default='present', choices=['present', 'absent']), - purge_stacks=dict(type='bool', default=True), - parameters=dict(type='dict', default={}), - template=dict(type='path'), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=900), + state=dict(default="present", choices=["present", "absent"]), + purge_stacks=dict(type="bool", default=True), + parameters=dict(type="dict", default={}), + template=dict(type="path"), template_url=dict(), template_body=dict(), - capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), - regions=dict(type='list', elements='str'), - accounts=dict(type='list', elements='str'), + capabilities=dict(type="list", elements="str", choices=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]), + regions=dict(type="list", elements="str"), + accounts=dict(type="list", elements="str"), failure_tolerance=dict( - type='dict', + type="dict", default={}, options=dict( - fail_count=dict(type='int'), - fail_percentage=dict(type='int'), - parallel_percentage=dict(type='int'), - parallel_count=dict(type='int'), + fail_count=dict(type="int"), + fail_percentage=dict(type="int"), + parallel_percentage=dict(type="int"), + parallel_count=dict(type="int"), ), mutually_exclusive=[ - ['fail_count', 'fail_percentage'], - ['parallel_count', 'parallel_percentage'], + ["fail_count", "fail_percentage"], + ["parallel_count", "parallel_percentage"], ], ), - administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']), - execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']), - tags=dict(type='dict'), + administration_role_arn=dict(aliases=["admin_role_arn", "administration_role", "admin_role"]), + execution_role_name=dict(aliases=["execution_role", "exec_role", "exec_role_name"]), + tags=dict(type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['template_url', 'template', 'template_body']], - supports_check_mode=True + mutually_exclusive=[["template_url", "template", "template_body"]], + supports_check_mode=True, ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes - jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound']) - cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) - existing_stack_set = stack_set_facts(cfn, module.params['name']) + jittered_backoff_decorator = AWSRetry.jittered_backoff( + retries=10, delay=3, max_delay=30, catch_extra_error_codes=["StackSetNotFound"] + ) + cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) + existing_stack_set = stack_set_facts(cfn, module.params["name"]) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} - state = module.params['state'] - if state == 'present' and not module.params['accounts']: + state = module.params["state"] + if state == "present" and not module.params["accounts"]: module.fail_json( msg="Can't create a stack set without choosing at least one account. " - "To get the ID of the current account, use the aws_caller_info module." + "To get the ID of the current account, use the aws_caller_info module." ) - module.params['accounts'] = [to_native(a) for a in module.params['accounts']] + module.params["accounts"] = [to_native(a) for a in module.params["accounts"]] - stack_params['StackSetName'] = module.params['name'] - if module.params.get('description'): - stack_params['Description'] = module.params['description'] + stack_params["StackSetName"] = module.params["name"] + if module.params.get("description"): + stack_params["Description"] = module.params["description"] - if module.params.get('capabilities'): - stack_params['Capabilities'] = module.params['capabilities'] + if module.params.get("capabilities"): + stack_params["Capabilities"] = module.params["capabilities"] - if module.params['template'] is not None: - with open(module.params['template'], 'r') as tpl: - stack_params['TemplateBody'] = tpl.read() - elif module.params['template_body'] is not None: - stack_params['TemplateBody'] = module.params['template_body'] - elif module.params['template_url'] is not None: - stack_params['TemplateURL'] = module.params['template_url'] + if module.params["template"] is not None: + with open(module.params["template"], "r") as tpl: + stack_params["TemplateBody"] = tpl.read() + elif module.params["template_body"] is not None: + stack_params["TemplateBody"] = module.params["template_body"] + elif module.params["template_url"] is not None: + stack_params["TemplateURL"] = module.params["template_url"] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: - stack_params['UsePreviousTemplate'] = True + stack_params["UsePreviousTemplate"] = True else: module.fail_json( msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " - "`template_body`, or `template_url`".format(module.params['name']) + "`template_body`, or `template_url`".format(module.params["name"]) ) - stack_params['Parameters'] = [] - for k, v in module.params['parameters'].items(): + stack_params["Parameters"] = [] + for k, v in module.params["parameters"].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) - if 'value' in v: - param['ParameterValue'] = to_native(v['value']) + if "value" in v: + param["ParameterValue"] = to_native(v["value"]) - if 'use_previous_value' in v and bool(v['use_previous_value']): - param['UsePreviousValue'] = True - param.pop('ParameterValue', None) + if "use_previous_value" in v and bool(v["use_previous_value"]): + param["UsePreviousValue"] = True + param.pop("ParameterValue", None) - stack_params['Parameters'].append(param) + stack_params["Parameters"].append(param) else: # allow default k/v configuration to set a template parameter - stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)}) - if module.params.get('tags') and isinstance(module.params.get('tags'), dict): - stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + if module.params.get("tags") and isinstance(module.params.get("tags"), dict): + stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"]) - if module.params.get('administration_role_arn'): + if module.params.get("administration_role_arn"): # TODO loosen the semantics here to autodetect the account ID and build the ARN - stack_params['AdministrationRoleARN'] = module.params['administration_role_arn'] - if module.params.get('execution_role_name'): - stack_params['ExecutionRoleName'] = module.params['execution_role_name'] + stack_params["AdministrationRoleARN"] = module.params["administration_role_arn"] + if module.params.get("execution_role_name"): + stack_params["ExecutionRoleName"] = module.params["execution_role_name"] result = {} if module.check_mode: - if state == 'absent' and existing_stack_set: - module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) - elif state == 'absent' and not existing_stack_set: - module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) - elif state == 'present' and not existing_stack_set: - module.exit_json(changed=True, msg='New stack set would be created', meta=[]) - elif state == 'present' and existing_stack_set: + if state == "absent" and existing_stack_set: + module.exit_json(changed=True, msg="Stack set would be deleted", meta=[]) + elif state == "absent" and not existing_stack_set: + module.exit_json(changed=False, msg="Stack set doesn't exist", meta=[]) + elif state == "present" and not existing_stack_set: + module.exit_json(changed=True, msg="New stack set would be created", meta=[]) + elif state == "present" and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, - module.params['name'], - module.params['accounts'], - module.params['regions'], + module.params["name"], + module.params["accounts"], + module.params["regions"], ) if new_stacks: - module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) - elif unspecified_stacks and module.params.get('purge_stack_instances'): - module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) + module.exit_json(changed=True, msg="New stack instance(s) would be created", meta=[]) + elif unspecified_stacks and module.params.get("purge_stack_instances"): + module.exit_json(changed=True, msg="Old stack instance(s) would be deleted", meta=[]) else: # TODO: need to check the template and other settings for correct check mode - module.exit_json(changed=False, msg='No changes detected', meta=[]) + module.exit_json(changed=False, msg="No changes detected", meta=[]) changed = False - if state == 'present': + if state == "present": if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log - stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid) + stack_params["ClientRequestToken"] = "Ansible-StackSet-Create-{0}".format(operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: - stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid) - operation_ids.append(stack_params['OperationId']) - if module.params.get('regions'): - stack_params['OperationPreferences'] = get_operation_preferences(module) + stack_params["OperationId"] = "Ansible-StackSet-Update-{0}".format(operation_uuid) + operation_ids.append(stack_params["OperationId"]) + if module.params.get("regions"): + stack_params["OperationPreferences"] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, - module.params['name'], - module.params['accounts'], - module.params['regions'], + module.params["name"], + module.params["accounts"], + module.params["regions"], ) if new_stack_instances: - operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid)) + operation_ids.append("Ansible-StackInstance-Create-{0}".format(operation_uuid)) changed = True cfn.create_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list(set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: - operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid)) + operation_ids.append("Ansible-StackInstance-Update-{0}".format(operation_uuid)) cfn.update_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in existing_stack_instances)), Regions=list(set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), @@ -669,55 +683,67 @@ def main(): ) for op in operation_ids: await_stack_set_operation( - module, cfn, operation_id=op, - stack_set_name=module.params['name'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=op, + stack_set_name=module.params["name"], + max_wait=module.params.get("wait_timeout"), ) - elif state == 'absent': + elif state == "absent": if not existing_stack_set: - module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name'])) - if module.params.get('purge_stack_instances') is False: + module.exit_json(msg="Stack set {0} does not exist".format(module.params["name"])) + if module.params.get("purge_stack_instances") is False: pass try: cfn.delete_stack_set( - StackSetName=module.params['name'], + StackSetName=module.params["name"], + ) + module.exit_json(msg="Stack set {0} deleted".format(module.params["name"])) + except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg="Cannot delete stack {0} while there is an operation in progress".format(module.params["name"]) ) - module.exit_json(msg='Stack set {0} deleted'.format(module.params['name'])) - except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name'])) - except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except - delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid) + except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except + delete_instances_op = "Ansible-StackInstance-Delete-{0}".format(operation_uuid) cfn.delete_stack_instances( - StackSetName=module.params['name'], - Accounts=module.params['accounts'], - Regions=module.params['regions'], - RetainStacks=(not module.params.get('purge_stacks')), - OperationId=delete_instances_op + StackSetName=module.params["name"], + Accounts=module.params["accounts"], + Regions=module.params["regions"], + RetainStacks=(not module.params.get("purge_stacks")), + OperationId=delete_instances_op, ) await_stack_set_operation( - module, cfn, operation_id=delete_instances_op, - stack_set_name=stack_params['StackSetName'], - max_wait=module.params.get('wait_timeout'), + module, + cfn, + operation_id=delete_instances_op, + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), ) try: cfn.delete_stack_set( - StackSetName=module.params['name'], + StackSetName=module.params["name"], ) - except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except + except is_boto3_error_code("StackSetNotEmptyException") as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( - StackSetName=module.params['name'], + StackSetName=module.params["name"], + ) + stack_states = ", ".join( + "(account={Account}, region={Region}, state={Status})".format(**i) for i in instances["Summaries"] + ) + module.fail_json_aws( + exc, + msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: " + + stack_states, ) - stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries']) - module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) - module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name'])) + module.exit_json(changed=True, msg="Stack set {0} deleted".format(module.params["name"])) - result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids)) - if any(o['status'] == 'FAILED' for o in result['operations']): + result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids)) + if any(o["status"] == "FAILED" for o in result["operations"]): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 82a00b283be..ac43cada3ad 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1461,41 +1461,42 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): if list_items is None: list_items = [] if not isinstance(list_items, list): - raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items))) + raise ValueError("Expected a list, got a {0} with value {1}".format(type(list_items).__name__, str(list_items))) result = {} if include_quantity: - result['quantity'] = len(list_items) + result["quantity"] = len(list_items) if len(list_items) > 0: - result['items'] = list_items + result["items"] = list_items return result def create_distribution(client, module, config, tags): try: if not tags: - return client.create_distribution(aws_retry=True, DistributionConfig=config)['Distribution'] + return client.create_distribution(aws_retry=True, DistributionConfig=config)["Distribution"] else: - distribution_config_with_tags = { - 'DistributionConfig': config, - 'Tags': { - 'Items': tags - } - } - return client.create_distribution_with_tags(aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] + distribution_config_with_tags = {"DistributionConfig": config, "Tags": {"Items": tags}} + return client.create_distribution_with_tags( + aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags + )["Distribution"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating distribution") def delete_distribution(client, module, distribution): try: - return client.delete_distribution(aws_retry=True, Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) + return client.delete_distribution( + aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution'])) + module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution["Distribution"])) def update_distribution(client, module, config, distribution_id, e_tag): try: - return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] + return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)[ + "Distribution" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) @@ -1517,7 +1518,7 @@ def untag_resource(client, module, arn, tag_keys): def list_tags_for_resource(client, module, arn): try: response = client.list_tags_for_resource(aws_retry=True, Resource=arn) - return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items')) + return boto3_tag_list_to_ansible_dict(response.get("Tags").get("Items")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error listing tags for resource") @@ -1549,83 +1550,130 @@ def __init__(self, module): self.__default_https_port = 443 self.__default_ipv6_enabled = False self.__default_origin_ssl_protocols = [ - 'TLSv1', - 'TLSv1.1', - 'TLSv1.2' + "TLSv1", + "TLSv1.1", + "TLSv1.2", ] - self.__default_custom_origin_protocol_policy = 'match-viewer' + self.__default_custom_origin_protocol_policy = "match-viewer" self.__default_custom_origin_read_timeout = 30 self.__default_custom_origin_keepalive_timeout = 5 - self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + self.__default_datetime_string = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") self.__default_cache_behavior_min_ttl = 0 self.__default_cache_behavior_max_ttl = 31536000 self.__default_cache_behavior_default_ttl = 86400 self.__default_cache_behavior_compress = False - self.__default_cache_behavior_viewer_protocol_policy = 'allow-all' + self.__default_cache_behavior_viewer_protocol_policy = "allow-all" self.__default_cache_behavior_smooth_streaming = False - self.__default_cache_behavior_forwarded_values_forward_cookies = 'none' + self.__default_cache_behavior_forwarded_values_forward_cookies = "none" self.__default_cache_behavior_forwarded_values_query_string = True self.__default_trusted_signers_enabled = False - self.__valid_price_classes = set([ - 'PriceClass_100', - 'PriceClass_200', - 'PriceClass_All' - ]) - self.__valid_origin_protocol_policies = set([ - 'http-only', - 'match-viewer', - 'https-only' - ]) - self.__valid_origin_ssl_protocols = set([ - 'SSLv3', - 'TLSv1', - 'TLSv1.1', - 'TLSv1.2' - ]) - self.__valid_cookie_forwarding = set([ - 'none', - 'whitelist', - 'all' - ]) - self.__valid_viewer_protocol_policies = set([ - 'allow-all', - 'https-only', - 'redirect-to-https' - ]) - self.__valid_methods = set([ - 'GET', - 'HEAD', - 'POST', - 'PUT', - 'PATCH', - 'OPTIONS', - 'DELETE' - ]) + self.__valid_price_classes = set( + [ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All", + ] + ) + self.__valid_origin_protocol_policies = set( + [ + "http-only", + "match-viewer", + "https-only", + ] + ) + self.__valid_origin_ssl_protocols = set( + [ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2", + ] + ) + self.__valid_cookie_forwarding = set( + [ + "none", + "whitelist", + "all", + ] + ) + self.__valid_viewer_protocol_policies = set( + [ + "allow-all", + "https-only", + "redirect-to-https", + ] + ) + self.__valid_methods = set( + [ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE", + ] + ) self.__valid_methods_cached_methods = [ - set([ - 'GET', - 'HEAD' - ]), - set([ - 'GET', - 'HEAD', - 'OPTIONS' - ]) + set( + [ + "GET", + "HEAD", + ] + ), + set( + [ + "GET", + "HEAD", + "OPTIONS", + ] + ), ] self.__valid_methods_allowed_methods = [ self.__valid_methods_cached_methods[0], self.__valid_methods_cached_methods[1], - self.__valid_methods + self.__valid_methods, ] self.__valid_lambda_function_association_event_types = set( - ["viewer-request", "viewer-response", "origin-request", "origin-response"] + [ + "viewer-request", + "viewer-response", + "origin-request", + "origin-response", + ] + ) + self.__valid_viewer_certificate_ssl_support_methods = set( + [ + "sni-only", + "vip", + ] ) - self.__valid_viewer_certificate_ssl_support_methods = set(["sni-only", "vip"]) self.__valid_viewer_certificate_minimum_protocol_versions = set( - ["SSLv3", "TLSv1", "TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018", "TLSv1.2_2019", "TLSv1.2_2021"] + [ + "SSLv3", + "TLSv1", + "TLSv1_2016", + "TLSv1.1_2016", + "TLSv1.2_2018", + "TLSv1.2_2019", + "TLSv1.2_2021", + ] + ) + self.__valid_viewer_certificate_certificate_sources = set( + [ + "cloudfront", + "iam", + "acm", + ] + ) + self.__valid_http_versions = set( + [ + "http1.1", + "http2", + "http3", + "http2and3", + ] ) - self.__valid_viewer_certificate_certificate_sources = set(["cloudfront", "iam", "acm"]) - self.__valid_http_versions = set(["http1.1", "http2", "http3", "http2and3"]) self.__s3_bucket_domain_identifier = ".s3.amazonaws.com" def add_missing_key(self, dict_object, key_to_set, value_to_set): @@ -1640,7 +1688,9 @@ def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_s dict_object = change_dict_key_name(dict_object, old_key, new_key) return dict_object - def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False): + def add_key_else_validate( + self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False + ): if key_name in dict_object: self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values) else: @@ -1655,26 +1705,38 @@ def validate_logging(self, logging): if logging is None: return None valid_logging = {} - if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging): - self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.") - valid_logging['include_cookies'] = logging.get('include_cookies') - valid_logging['enabled'] = logging.get('enabled') - valid_logging['bucket'] = logging.get('bucket') - valid_logging['prefix'] = logging.get('prefix') + if logging and not set(["enabled", "include_cookies", "bucket", "prefix"]).issubset(logging): + self.module.fail_json( + msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified." + ) + valid_logging["include_cookies"] = logging.get("include_cookies") + valid_logging["enabled"] = logging.get("enabled") + valid_logging["bucket"] = logging.get("bucket") + valid_logging["prefix"] = logging.get("prefix") return valid_logging except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution logging") def validate_is_list(self, list_to_validate, list_name): if not isinstance(list_to_validate, list): - self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__)) + self.module.fail_json( + msg="%s is of type %s. Must be a list." % (list_name, type(list_to_validate).__name__) + ) def validate_required_key(self, key_name, full_key_name, dict_object): if key_name not in dict_object: self.module.fail_json(msg="%s must be specified." % full_key_name) - def validate_origins(self, client, config, origins, default_origin_domain_name, - default_origin_path, create_distribution, purge_origins=False): + def validate_origins( + self, + client, + config, + origins, + default_origin_domain_name, + default_origin_path, + create_distribution, + purge_origins=False, + ): try: if origins is None: if default_origin_domain_name is None and not create_distribution: @@ -1683,23 +1745,24 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, else: return ansible_list_to_cloudfront_list(config) if default_origin_domain_name is not None: - origins = [{ - 'domain_name': default_origin_domain_name, - 'origin_path': default_origin_path or '' - }] + origins = [{"domain_name": default_origin_domain_name, "origin_path": default_origin_path or ""}] else: origins = [] - self.validate_is_list(origins, 'origins') + self.validate_is_list(origins, "origins") if not origins and default_origin_domain_name is None and create_distribution: - self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.") + self.module.fail_json( + msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one." + ) all_origins = OrderedDict() new_domains = list() for origin in config: - all_origins[origin.get('domain_name')] = origin + all_origins[origin.get("domain_name")] = origin for origin in origins: - origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path) - all_origins[origin['domain_name']] = origin - new_domains.append(origin['domain_name']) + origin = self.validate_origin( + client, all_origins.get(origin.get("domain_name"), {}), origin, default_origin_path + ) + all_origins[origin["domain_name"]] = origin + new_domains.append(origin["domain_name"]) if purge_origins: for domain in list(all_origins.keys()): if domain not in new_domains: @@ -1709,46 +1772,55 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): - if origin.get('s3_origin_config', {}).get('origin_access_identity'): - return origin['s3_origin_config']['origin_access_identity'] + if origin.get("s3_origin_config", {}).get("origin_access_identity"): + return origin["s3_origin_config"]["origin_access_identity"] - if existing_config.get('s3_origin_config', {}).get('origin_access_identity'): - return existing_config['s3_origin_config']['origin_access_identity'] + if existing_config.get("s3_origin_config", {}).get("origin_access_identity"): + return existing_config["s3_origin_config"]["origin_access_identity"] try: - comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) - caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) - cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, - Comment=comment)) - oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] + comment = "access-identity-by-ansible-%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) + caller_reference = "%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) + cfoai_config = dict( + CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment) + ) + oai = client.create_cloud_front_origin_access_identity(**cfoai_config)["CloudFrontOriginAccessIdentity"][ + "Id" + ] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) + self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin["id"]) return "origin-access-identity/cloudfront/%s" % oai def validate_origin(self, client, existing_config, origin, default_origin_path): try: - origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or '')) - self.validate_required_key('origin_path', 'origins[].origin_path', origin) - origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string)) - if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0: - for custom_header in origin.get('custom_headers'): - if 'header_name' not in custom_header or 'header_value' not in custom_header: - self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.") - origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers')) + origin = self.add_missing_key( + origin, "origin_path", existing_config.get("origin_path", default_origin_path or "") + ) + self.validate_required_key("origin_path", "origins[].origin_path", origin) + origin = self.add_missing_key(origin, "id", existing_config.get("id", self.__default_datetime_string)) + if "custom_headers" in origin and len(origin.get("custom_headers")) > 0: + for custom_header in origin.get("custom_headers"): + if "header_name" not in custom_header or "header_value" not in custom_header: + self.module.fail_json( + msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified." + ) + origin["custom_headers"] = ansible_list_to_cloudfront_list(origin.get("custom_headers")) else: - origin['custom_headers'] = ansible_list_to_cloudfront_list() - if 'origin_shield' in origin: - origin_shield = origin.get('origin_shield') - if origin_shield.get('enabled'): - origin_shield_region = origin_shield.get('origin_shield_region') + origin["custom_headers"] = ansible_list_to_cloudfront_list() + if "origin_shield" in origin: + origin_shield = origin.get("origin_shield") + if origin_shield.get("enabled"): + origin_shield_region = origin_shield.get("origin_shield_region") if origin_shield_region is None: - self.module.fail_json(msg="origins[].origin_shield.origin_shield_region must be specified" - " when origins[].origin_shield.enabled is true.") + self.module.fail_json( + msg="origins[].origin_shield.origin_shield_region must be specified" + " when origins[].origin_shield.enabled is true." + ) else: origin_shield_region = origin_shield_region.lower() - if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): + if self.__s3_bucket_domain_identifier in origin.get("domain_name").lower(): if origin.get("s3_origin_access_identity_enabled") is not None: - if origin['s3_origin_access_identity_enabled']: + if origin["s3_origin_access_identity_enabled"]: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) else: s3_origin_config = None @@ -1762,26 +1834,47 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin["s3_origin_config"] = dict(origin_access_identity=oai) - if 'custom_origin_config' in origin: - self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") + if "custom_origin_config" in origin: + self.module.fail_json( + msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive" + ) else: - origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {})) - custom_origin_config = origin.get('custom_origin_config') - custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy', - 'origins[].custom_origin_config.origin_protocol_policy', - self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies) - custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout) - custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout) - custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port) - custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port) - if custom_origin_config.get('origin_ssl_protocols', {}).get('items'): - custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items'] - if custom_origin_config.get('origin_ssl_protocols'): - self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols', - self.__valid_origin_ssl_protocols) + origin = self.add_missing_key( + origin, "custom_origin_config", existing_config.get("custom_origin_config", {}) + ) + custom_origin_config = origin.get("custom_origin_config") + custom_origin_config = self.add_key_else_validate( + custom_origin_config, + "origin_protocol_policy", + "origins[].custom_origin_config.origin_protocol_policy", + self.__default_custom_origin_protocol_policy, + self.__valid_origin_protocol_policies, + ) + custom_origin_config = self.add_missing_key( + custom_origin_config, "origin_read_timeout", self.__default_custom_origin_read_timeout + ) + custom_origin_config = self.add_missing_key( + custom_origin_config, "origin_keepalive_timeout", self.__default_custom_origin_keepalive_timeout + ) + custom_origin_config = self.add_key_else_change_dict_key( + custom_origin_config, "http_port", "h_t_t_p_port", self.__default_http_port + ) + custom_origin_config = self.add_key_else_change_dict_key( + custom_origin_config, "https_port", "h_t_t_p_s_port", self.__default_https_port + ) + if custom_origin_config.get("origin_ssl_protocols", {}).get("items"): + custom_origin_config["origin_ssl_protocols"] = custom_origin_config["origin_ssl_protocols"]["items"] + if custom_origin_config.get("origin_ssl_protocols"): + self.validate_attribute_list_with_allowed_list( + custom_origin_config["origin_ssl_protocols"], + "origins[].origin_ssl_protocols", + self.__valid_origin_ssl_protocols, + ) else: - custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols - custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols']) + custom_origin_config["origin_ssl_protocols"] = self.__default_origin_ssl_protocols + custom_origin_config["origin_ssl_protocols"] = ansible_list_to_cloudfront_list( + custom_origin_config["origin_ssl_protocols"] + ) return origin except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating distribution origin") @@ -1795,13 +1888,16 @@ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge # is true (if purge_cache_behaviors is not true, we can't really know the full new order) if not purge_cache_behaviors: for behavior in config: - all_cache_behaviors[behavior['path_pattern']] = behavior + all_cache_behaviors[behavior["path_pattern"]] = behavior for cache_behavior in cache_behaviors: - valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}), - cache_behavior, valid_origins) - all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior + valid_cache_behavior = self.validate_cache_behavior( + all_cache_behaviors.get(cache_behavior.get("path_pattern"), {}), cache_behavior, valid_origins + ) + all_cache_behaviors[cache_behavior["path_pattern"]] = valid_cache_behavior if purge_cache_behaviors: - for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): + for target_origin_id in set(all_cache_behaviors.keys()) - set( + [cb["path_pattern"] for cb in cache_behaviors] + ): del all_cache_behaviors[target_origin_id] return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) except Exception as e: @@ -1812,40 +1908,61 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa cache_behavior = {} if cache_behavior is None and valid_origins is not None: return config - cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache) - cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior) - cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior) - cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior) - cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior) - cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior) + cache_behavior = self.validate_cache_behavior_first_level_keys( + config, cache_behavior, valid_origins, is_default_cache + ) + cache_behavior = self.validate_forwarded_values(config, cache_behavior.get("forwarded_values"), cache_behavior) + cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior) + cache_behavior = self.validate_lambda_function_associations( + config, cache_behavior.get("lambda_function_associations"), cache_behavior + ) + cache_behavior = self.validate_trusted_signers(config, cache_behavior.get("trusted_signers"), cache_behavior) + cache_behavior = self.validate_field_level_encryption_id( + config, cache_behavior.get("field_level_encryption_id"), cache_behavior + ) return cache_behavior def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): try: - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l', - config.get('min_t_t_l', self.__default_cache_behavior_min_ttl)) - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l', - config.get('max_t_t_l', self.__default_cache_behavior_max_ttl)) - cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l', - config.get('default_t_t_l', self.__default_cache_behavior_default_ttl)) - cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress)) - target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id')) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, "min_ttl", "min_t_t_l", config.get("min_t_t_l", self.__default_cache_behavior_min_ttl) + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, "max_ttl", "max_t_t_l", config.get("max_t_t_l", self.__default_cache_behavior_max_ttl) + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "default_ttl", + "default_t_t_l", + config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + ) + cache_behavior = self.add_missing_key( + cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress) + ) + target_origin_id = cache_behavior.get("target_origin_id", config.get("target_origin_id")) if not target_origin_id: target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins) - if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]: + if target_origin_id not in [origin["id"] for origin in valid_origins.get("items", [])]: if is_default_cache: - cache_behavior_name = 'Default cache behavior' + cache_behavior_name = "Default cache behavior" else: - cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern'] - self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." % - cache_behavior_name) - cache_behavior['target_origin_id'] = target_origin_id - cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy', - config.get('viewer_protocol_policy', - self.__default_cache_behavior_viewer_protocol_policy), - self.__valid_viewer_protocol_policies) - cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming', - config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming)) + cache_behavior_name = "Cache behavior for path %s" % cache_behavior["path_pattern"] + self.module.fail_json( + msg="%s has target_origin_id pointing to an origin that does not exist." % cache_behavior_name + ) + cache_behavior["target_origin_id"] = target_origin_id + cache_behavior = self.add_key_else_validate( + cache_behavior, + "viewer_protocol_policy", + "cache_behavior.viewer_protocol_policy", + config.get("viewer_protocol_policy", self.__default_cache_behavior_viewer_protocol_policy), + self.__valid_viewer_protocol_policies, + ) + cache_behavior = self.add_missing_key( + cache_behavior, + "smooth_streaming", + config.get("smooth_streaming", self.__default_cache_behavior_smooth_streaming), + ) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys") @@ -1854,30 +1971,40 @@ def validate_forwarded_values(self, config, forwarded_values, cache_behavior): try: if not forwarded_values: forwarded_values = dict() - existing_config = config.get('forwarded_values', {}) - headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items')) + existing_config = config.get("forwarded_values", {}) + headers = forwarded_values.get("headers", existing_config.get("headers", {}).get("items")) if headers: headers.sort() - forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers) - if 'cookies' not in forwarded_values: - forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies) - forwarded_values['cookies'] = {'forward': forward} + forwarded_values["headers"] = ansible_list_to_cloudfront_list(headers) + if "cookies" not in forwarded_values: + forward = existing_config.get("cookies", {}).get( + "forward", self.__default_cache_behavior_forwarded_values_forward_cookies + ) + forwarded_values["cookies"] = {"forward": forward} else: - existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items') - whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist) + existing_whitelist = existing_config.get("cookies", {}).get("whitelisted_names", {}).get("items") + whitelist = forwarded_values.get("cookies").get("whitelisted_names", existing_whitelist) if whitelist: - self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names') - forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist) - cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward')) - self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward', - self.__valid_cookie_forwarding) - forwarded_values['cookies']['forward'] = cookie_forwarding - query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', [])) - self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys') - forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys) - forwarded_values = self.add_missing_key(forwarded_values, 'query_string', - existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string)) - cache_behavior['forwarded_values'] = forwarded_values + self.validate_is_list(whitelist, "forwarded_values.whitelisted_names") + forwarded_values["cookies"]["whitelisted_names"] = ansible_list_to_cloudfront_list(whitelist) + cookie_forwarding = forwarded_values.get("cookies").get( + "forward", existing_config.get("cookies", {}).get("forward") + ) + self.validate_attribute_with_allowed_values( + cookie_forwarding, "cache_behavior.forwarded_values.cookies.forward", self.__valid_cookie_forwarding + ) + forwarded_values["cookies"]["forward"] = cookie_forwarding + query_string_cache_keys = forwarded_values.get( + "query_string_cache_keys", existing_config.get("query_string_cache_keys", {}).get("items", []) + ) + self.validate_is_list(query_string_cache_keys, "forwarded_values.query_string_cache_keys") + forwarded_values["query_string_cache_keys"] = ansible_list_to_cloudfront_list(query_string_cache_keys) + forwarded_values = self.add_missing_key( + forwarded_values, + "query_string", + existing_config.get("query_string", self.__default_cache_behavior_forwarded_values_query_string), + ) + cache_behavior["forwarded_values"] = forwarded_values return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating forwarded values") @@ -1885,57 +2012,68 @@ def validate_forwarded_values(self, config, forwarded_values, cache_behavior): def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior): try: if lambda_function_associations is not None: - self.validate_is_list(lambda_function_associations, 'lambda_function_associations') + self.validate_is_list(lambda_function_associations, "lambda_function_associations") for association in lambda_function_associations: - association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n') - self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type', - self.__valid_lambda_function_association_event_types) - cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations) + association = change_dict_key_name(association, "lambda_function_arn", "lambda_function_a_r_n") + self.validate_attribute_with_allowed_values( + association.get("event_type"), + "cache_behaviors[].lambda_function_associations.event_type", + self.__valid_lambda_function_association_event_types, + ) + cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list( + lambda_function_associations + ) else: - if 'lambda_function_associations' in config: - cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations') + if "lambda_function_associations" in config: + cache_behavior["lambda_function_associations"] = config.get("lambda_function_associations") else: - cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([]) + cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list([]) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating lambda function associations") def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior): if field_level_encryption_id is not None: - cache_behavior['field_level_encryption_id'] = field_level_encryption_id - elif 'field_level_encryption_id' in config: - cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id') + cache_behavior["field_level_encryption_id"] = field_level_encryption_id + elif "field_level_encryption_id" in config: + cache_behavior["field_level_encryption_id"] = config.get("field_level_encryption_id") else: - cache_behavior['field_level_encryption_id'] = "" + cache_behavior["field_level_encryption_id"] = "" return cache_behavior def validate_allowed_methods(self, config, allowed_methods, cache_behavior): try: if allowed_methods is not None: - self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods) - temp_allowed_items = allowed_methods.get('items') - self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items') - self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]', - self.__valid_methods_allowed_methods) - cached_items = allowed_methods.get('cached_methods') - if 'cached_methods' in allowed_methods: - self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods') - self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]', - self.__valid_methods_cached_methods) + self.validate_required_key("items", "cache_behavior.allowed_methods.items[]", allowed_methods) + temp_allowed_items = allowed_methods.get("items") + self.validate_is_list(temp_allowed_items, "cache_behavior.allowed_methods.items") + self.validate_attribute_list_with_allowed_list( + temp_allowed_items, "cache_behavior.allowed_methods.items[]", self.__valid_methods_allowed_methods + ) + cached_items = allowed_methods.get("cached_methods") + if "cached_methods" in allowed_methods: + self.validate_is_list(cached_items, "cache_behavior.allowed_methods.cached_methods") + self.validate_attribute_list_with_allowed_list( + cached_items, + "cache_behavior.allowed_items.cached_methods[]", + self.__valid_methods_cached_methods, + ) # we don't care if the order of how cloudfront stores the methods differs - preserving existing # order reduces likelihood of making unnecessary changes - if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items): - cache_behavior['allowed_methods'] = config['allowed_methods'] + if "allowed_methods" in config and set(config["allowed_methods"]["items"]) == set(temp_allowed_items): + cache_behavior["allowed_methods"] = config["allowed_methods"] else: - cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items) + cache_behavior["allowed_methods"] = ansible_list_to_cloudfront_list(temp_allowed_items) - if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])): - cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods'] + if cached_items and set(cached_items) == set( + config.get("allowed_methods", {}).get("cached_methods", {}).get("items", []) + ): + cache_behavior["allowed_methods"]["cached_methods"] = config["allowed_methods"]["cached_methods"] else: - cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items) + cache_behavior["allowed_methods"]["cached_methods"] = ansible_list_to_cloudfront_list(cached_items) else: - if 'allowed_methods' in config: - cache_behavior['allowed_methods'] = config.get('allowed_methods') + if "allowed_methods" in config: + cache_behavior["allowed_methods"] = config.get("allowed_methods") return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating allowed methods") @@ -1944,14 +2082,16 @@ def validate_trusted_signers(self, config, trusted_signers, cache_behavior): try: if trusted_signers is None: trusted_signers = {} - if 'items' in trusted_signers: - valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items')) + if "items" in trusted_signers: + valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get("items")) else: - valid_trusted_signers = dict(quantity=config.get('quantity', 0)) - if 'items' in config: - valid_trusted_signers = dict(items=config['items']) - valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled)) - cache_behavior['trusted_signers'] = valid_trusted_signers + valid_trusted_signers = dict(quantity=config.get("quantity", 0)) + if "items" in config: + valid_trusted_signers = dict(items=config["items"]) + valid_trusted_signers["enabled"] = trusted_signers.get( + "enabled", config.get("enabled", self.__default_trusted_signers_enabled) + ) + cache_behavior["trusted_signers"] = valid_trusted_signers return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating trusted signers") @@ -1960,19 +2100,37 @@ def validate_viewer_certificate(self, viewer_certificate): try: if viewer_certificate is None: return None - if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None: - self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + - "_certificate set to true.") - self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method', - self.__valid_viewer_certificate_ssl_support_methods) - self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version', - self.__valid_viewer_certificate_minimum_protocol_versions) - self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source', - self.__valid_viewer_certificate_certificate_sources) - viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate') - viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method') - viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id') - viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn') + if ( + viewer_certificate.get("cloudfront_default_certificate") + and viewer_certificate.get("ssl_support_method") is not None + ): + self.module.fail_json( + msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + + "_certificate set to true." + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("ssl_support_method"), + "viewer_certificate.ssl_support_method", + self.__valid_viewer_certificate_ssl_support_methods, + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("minimum_protocol_version"), + "viewer_certificate.minimum_protocol_version", + self.__valid_viewer_certificate_minimum_protocol_versions, + ) + self.validate_attribute_with_allowed_values( + viewer_certificate.get("certificate_source"), + "viewer_certificate.certificate_source", + self.__valid_viewer_certificate_certificate_sources, + ) + viewer_certificate = change_dict_key_name( + viewer_certificate, "cloudfront_default_certificate", "cloud_front_default_certificate" + ) + viewer_certificate = change_dict_key_name(viewer_certificate, "ssl_support_method", "s_s_l_support_method") + viewer_certificate = change_dict_key_name(viewer_certificate, "iam_certificate_id", "i_a_m_certificate_id") + viewer_certificate = change_dict_key_name( + viewer_certificate, "acm_certificate_arn", "a_c_m_certificate_arn" + ) return viewer_certificate except Exception as e: self.module.fail_json_aws(e, msg="Error validating viewer certificate") @@ -1981,16 +2139,18 @@ def validate_custom_error_responses(self, config, custom_error_responses, purge_ try: if custom_error_responses is None and not purge_custom_error_responses: return ansible_list_to_cloudfront_list(config) - self.validate_is_list(custom_error_responses, 'custom_error_responses') + self.validate_is_list(custom_error_responses, "custom_error_responses") result = list() - existing_responses = dict((response['error_code'], response) for response in custom_error_responses) + existing_responses = dict((response["error_code"], response) for response in custom_error_responses) for custom_error_response in custom_error_responses: - self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response) - custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l') - if 'response_code' in custom_error_response: - custom_error_response['response_code'] = str(custom_error_response['response_code']) - if custom_error_response['error_code'] in existing_responses: - del existing_responses[custom_error_response['error_code']] + self.validate_required_key("error_code", "custom_error_responses[].error_code", custom_error_response) + custom_error_response = change_dict_key_name( + custom_error_response, "error_caching_min_ttl", "error_caching_min_t_t_l" + ) + if "response_code" in custom_error_response: + custom_error_response["response_code"] = str(custom_error_response["response_code"]) + if custom_error_response["error_code"] in existing_responses: + del existing_responses[custom_error_response["error_code"]] result.append(custom_error_response) if not purge_custom_error_responses: result.extend(existing_responses.values()) @@ -2006,54 +2166,70 @@ def validate_restrictions(self, config, restrictions, purge_restrictions=False): return None else: return config - self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions) - geo_restriction = restrictions.get('geo_restriction') - self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction) - existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', []) - geo_restriction_items = geo_restriction.get('items') + self.validate_required_key("geo_restriction", "restrictions.geo_restriction", restrictions) + geo_restriction = restrictions.get("geo_restriction") + self.validate_required_key( + "restriction_type", "restrictions.geo_restriction.restriction_type", geo_restriction + ) + existing_restrictions = ( + config.get("geo_restriction", {}).get(geo_restriction["restriction_type"], {}).get("items", []) + ) + geo_restriction_items = geo_restriction.get("items") if not purge_restrictions: - geo_restriction_items.extend([rest for rest in existing_restrictions if - rest not in geo_restriction_items]) + geo_restriction_items.extend( + [rest for rest in existing_restrictions if rest not in geo_restriction_items] + ) valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items) - valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type') - return {'geo_restriction': valid_restrictions} + valid_restrictions["restriction_type"] = geo_restriction.get("restriction_type") + return {"geo_restriction": valid_restrictions} except Exception as e: self.module.fail_json_aws(e, msg="Error validating restrictions") - def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id): + def validate_distribution_config_parameters( + self, config, default_root_object, ipv6_enabled, http_version, web_acl_id + ): try: - config['default_root_object'] = default_root_object or config.get('default_root_object', '') - config['is_i_p_v6_enabled'] = ipv6_enabled if ipv6_enabled is not None else config.get('is_i_p_v6_enabled', self.__default_ipv6_enabled) - if http_version is not None or config.get('http_version'): - self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions) - config['http_version'] = http_version or config.get('http_version') - if web_acl_id or config.get('web_a_c_l_id'): - config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id') + config["default_root_object"] = default_root_object or config.get("default_root_object", "") + config["is_i_p_v6_enabled"] = ( + ipv6_enabled + if ipv6_enabled is not None + else config.get("is_i_p_v6_enabled", self.__default_ipv6_enabled) + ) + if http_version is not None or config.get("http_version"): + self.validate_attribute_with_allowed_values(http_version, "http_version", self.__valid_http_versions) + config["http_version"] = http_version or config.get("http_version") + if web_acl_id or config.get("web_a_c_l_id"): + config["web_a_c_l_id"] = web_acl_id or config.get("web_a_c_l_id") return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution config parameters") - def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False): + def validate_common_distribution_parameters( + self, config, enabled, aliases, logging, price_class, purge_aliases=False + ): try: if config is None: config = {} if aliases is not None: if not purge_aliases: - aliases.extend([alias for alias in config.get('aliases', {}).get('items', []) - if alias not in aliases]) - config['aliases'] = ansible_list_to_cloudfront_list(aliases) + aliases.extend( + [alias for alias in config.get("aliases", {}).get("items", []) if alias not in aliases] + ) + config["aliases"] = ansible_list_to_cloudfront_list(aliases) if logging is not None: - config['logging'] = self.validate_logging(logging) - config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled) + config["logging"] = self.validate_logging(logging) + config["enabled"] = enabled or config.get("enabled", self.__default_distribution_enabled) if price_class is not None: - self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes) - config['price_class'] = price_class + self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes) + config["price_class"] = price_class return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating common distribution parameters") def validate_comment(self, config, comment): - config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string) + config["comment"] = comment or config.get( + "comment", "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string + ) return config def validate_caller_reference(self, caller_reference): @@ -2062,37 +2238,58 @@ def validate_caller_reference(self, caller_reference): def get_first_origin_id_for_default_cache_behavior(self, valid_origins): try: if valid_origins is not None: - valid_origins_list = valid_origins.get('items') - if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0: - return str(valid_origins_list[0].get('id')) - self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.") + valid_origins_list = valid_origins.get("items") + if ( + valid_origins_list is not None + and isinstance(valid_origins_list, list) + and len(valid_origins_list) > 0 + ): + return str(valid_origins_list[0].get("id")) + self.module.fail_json( + msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration." + ) except Exception as e: self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior") def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list): try: self.validate_is_list(attribute_list, attribute_list_name) - if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or - isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)): - self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list))) + if ( + isinstance(allowed_list, list) + and set(attribute_list) not in allowed_list + or isinstance(allowed_list, set) + and not set(allowed_list).issuperset(attribute_list) + ): + self.module.fail_json( + msg="The attribute list {0} must be one of [{1}]".format( + attribute_list_name, " ".join(str(a) for a in allowed_list) + ) + ) except Exception as e: self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): if attribute is not None and attribute not in allowed_list: - self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list))) + self.module.fail_json( + msg="The attribute {0} must be one of [{1}]".format( + attribute_name, " ".join(str(a) for a in allowed_list) + ) + ) def validate_distribution_from_caller_reference(self, caller_reference): try: distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) - distribution_name = 'Distribution' - distribution_config_name = 'DistributionConfig' - distribution_ids = [dist.get('Id') for dist in distributions] + distribution_name = "Distribution" + distribution_config_name = "DistributionConfig" + distribution_ids = [dist.get("Id") for dist in distributions] for distribution_id in distribution_ids: distribution = self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) if distribution is not None: distribution_config = distribution[distribution_name].get(distribution_config_name) - if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference: + if ( + distribution_config is not None + and distribution_config.get("CallerReference") == caller_reference + ): distribution[distribution_name][distribution_config_name] = distribution_config return distribution @@ -2110,28 +2307,33 @@ def validate_distribution_from_aliases_caller_reference(self, distribution_id, a return self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) return None except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference") + self.module.fail_json_aws( + e, msg="Error validating distribution_id from alias, aliases and caller reference" + ) def validate_distribution_id_from_alias(self, aliases): distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) if distributions: for distribution in distributions: - distribution_aliases = distribution.get('Aliases', {}).get('Items', []) + distribution_aliases = distribution.get("Aliases", {}).get("Items", []) if set(aliases) & set(distribution_aliases): - return distribution['Id'] + return distribution["Id"] return None def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id'] + distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)["Id"] try: - waiter = client.get_waiter('distribution_deployed') + waiter = client.get_waiter("distribution_deployed") attempts = 1 + int(wait_timeout / 60) - waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(Id=distribution_id, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action." - " Waited for {0} seconds before timeout.".format(to_text(wait_timeout))) + self.module.fail_json_aws( + e, + msg="Timeout waiting for CloudFront action." + " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) @@ -2139,36 +2341,36 @@ def wait_until_processed(self, client, wait_timeout, distribution_id, caller_ref def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), caller_reference=dict(), comment=dict(), distribution_id=dict(), e_tag=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), alias=dict(), - aliases=dict(type='list', default=[], elements='str'), - purge_aliases=dict(type='bool', default=False), + aliases=dict(type="list", default=[], elements="str"), + purge_aliases=dict(type="bool", default=False), default_root_object=dict(), - origins=dict(type='list', elements='dict'), - purge_origins=dict(type='bool', default=False), - default_cache_behavior=dict(type='dict'), - cache_behaviors=dict(type='list', elements='dict'), - purge_cache_behaviors=dict(type='bool', default=False), - custom_error_responses=dict(type='list', elements='dict'), - purge_custom_error_responses=dict(type='bool', default=False), - logging=dict(type='dict'), + origins=dict(type="list", elements="dict"), + purge_origins=dict(type="bool", default=False), + default_cache_behavior=dict(type="dict"), + cache_behaviors=dict(type="list", elements="dict"), + purge_cache_behaviors=dict(type="bool", default=False), + custom_error_responses=dict(type="list", elements="dict"), + purge_custom_error_responses=dict(type="bool", default=False), + logging=dict(type="dict"), price_class=dict(), - enabled=dict(type='bool'), - viewer_certificate=dict(type='dict'), - restrictions=dict(type='dict'), + enabled=dict(type="bool"), + viewer_certificate=dict(type="dict"), + restrictions=dict(type="dict"), web_acl_id=dict(), http_version=dict(), - ipv6_enabled=dict(type='bool'), + ipv6_enabled=dict(type="bool"), default_origin_domain_name=dict(), default_origin_path=dict(), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1800, type='int') + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1800, type="int"), ) result = {} @@ -2178,129 +2380,154 @@ def main(): argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[ - ['distribution_id', 'alias'], - ['default_origin_domain_name', 'distribution_id'], - ['default_origin_domain_name', 'alias'], - ] + ["distribution_id", "alias"], + ["default_origin_domain_name", "distribution_id"], + ["default_origin_domain_name", "alias"], + ], ) - client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff()) validation_mgr = CloudFrontValidationManager(module) - state = module.params.get('state') - caller_reference = module.params.get('caller_reference') - comment = module.params.get('comment') - e_tag = module.params.get('e_tag') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - distribution_id = module.params.get('distribution_id') - alias = module.params.get('alias') - aliases = module.params.get('aliases') - purge_aliases = module.params.get('purge_aliases') - default_root_object = module.params.get('default_root_object') - origins = module.params.get('origins') - purge_origins = module.params.get('purge_origins') - default_cache_behavior = module.params.get('default_cache_behavior') - cache_behaviors = module.params.get('cache_behaviors') - purge_cache_behaviors = module.params.get('purge_cache_behaviors') - custom_error_responses = module.params.get('custom_error_responses') - purge_custom_error_responses = module.params.get('purge_custom_error_responses') - logging = module.params.get('logging') - price_class = module.params.get('price_class') - enabled = module.params.get('enabled') - viewer_certificate = module.params.get('viewer_certificate') - restrictions = module.params.get('restrictions') - purge_restrictions = module.params.get('purge_restrictions') - web_acl_id = module.params.get('web_acl_id') - http_version = module.params.get('http_version') - ipv6_enabled = module.params.get('ipv6_enabled') - default_origin_domain_name = module.params.get('default_origin_domain_name') - default_origin_path = module.params.get('default_origin_path') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + state = module.params.get("state") + caller_reference = module.params.get("caller_reference") + comment = module.params.get("comment") + e_tag = module.params.get("e_tag") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + distribution_id = module.params.get("distribution_id") + alias = module.params.get("alias") + aliases = module.params.get("aliases") + purge_aliases = module.params.get("purge_aliases") + default_root_object = module.params.get("default_root_object") + origins = module.params.get("origins") + purge_origins = module.params.get("purge_origins") + default_cache_behavior = module.params.get("default_cache_behavior") + cache_behaviors = module.params.get("cache_behaviors") + purge_cache_behaviors = module.params.get("purge_cache_behaviors") + custom_error_responses = module.params.get("custom_error_responses") + purge_custom_error_responses = module.params.get("purge_custom_error_responses") + logging = module.params.get("logging") + price_class = module.params.get("price_class") + enabled = module.params.get("enabled") + viewer_certificate = module.params.get("viewer_certificate") + restrictions = module.params.get("restrictions") + purge_restrictions = module.params.get("purge_restrictions") + web_acl_id = module.params.get("web_acl_id") + http_version = module.params.get("http_version") + ipv6_enabled = module.params.get("ipv6_enabled") + default_origin_domain_name = module.params.get("default_origin_domain_name") + default_origin_path = module.params.get("default_origin_path") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") if alias and alias not in aliases: aliases.append(alias) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( + distribution_id, aliases, caller_reference + ) - update = state == 'present' and distribution - create = state == 'present' and not distribution - delete = state == 'absent' and distribution + update = state == "present" and distribution + create = state == "present" and not distribution + delete = state == "absent" and distribution if not (update or create or delete): module.exit_json(changed=False) config = {} if update or delete: - config = distribution['Distribution']['DistributionConfig'] - e_tag = distribution['ETag'] - distribution_id = distribution['Distribution']['Id'] + config = distribution["Distribution"]["DistributionConfig"] + e_tag = distribution["ETag"] + distribution_id = distribution["Distribution"]["Id"] if update: config = camel_dict_to_snake_dict(config, reversible=True) if create or update: - config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases) - config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id) - config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name, - default_origin_path, create, purge_origins) - config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []), - cache_behaviors, config['origins'], purge_cache_behaviors) - config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}), - default_cache_behavior, config['origins'], True) - config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []), - custom_error_responses, purge_custom_error_responses) - valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions) + config = validation_mgr.validate_common_distribution_parameters( + config, enabled, aliases, logging, price_class, purge_aliases + ) + config = validation_mgr.validate_distribution_config_parameters( + config, default_root_object, ipv6_enabled, http_version, web_acl_id + ) + config["origins"] = validation_mgr.validate_origins( + client, + config.get("origins", {}).get("items", []), + origins, + default_origin_domain_name, + default_origin_path, + create, + purge_origins, + ) + config["cache_behaviors"] = validation_mgr.validate_cache_behaviors( + config.get("cache_behaviors", {}).get("items", []), + cache_behaviors, + config["origins"], + purge_cache_behaviors, + ) + config["default_cache_behavior"] = validation_mgr.validate_cache_behavior( + config.get("default_cache_behavior", {}), default_cache_behavior, config["origins"], True + ) + config["custom_error_responses"] = validation_mgr.validate_custom_error_responses( + config.get("custom_error_responses", {}).get("items", []), + custom_error_responses, + purge_custom_error_responses, + ) + valid_restrictions = validation_mgr.validate_restrictions( + config.get("restrictions", {}), restrictions, purge_restrictions + ) if valid_restrictions: - config['restrictions'] = valid_restrictions + config["restrictions"] = valid_restrictions valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate) - config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate') + config = merge_validation_into_config(config, valid_viewer_certificate, "viewer_certificate") config = validation_mgr.validate_comment(config, comment) config = snake_dict_to_camel_dict(config, capitalize_first=True) if create: - config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference) + config["CallerReference"] = validation_mgr.validate_caller_reference(caller_reference) result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {})) result = camel_dict_to_snake_dict(result) - result['tags'] = list_tags_for_resource(client, module, result['arn']) + result["tags"] = list_tags_for_resource(client, module, result["arn"]) if delete: - if config['Enabled']: - config['Enabled'] = False + if config["Enabled"]: + config["Enabled"] = False result = update_distribution(client, module, config, distribution_id, e_tag) - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) + distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( + distribution_id, aliases, caller_reference + ) # e_tag = distribution['ETag'] result = delete_distribution(client, module, distribution) if update: - changed = config != distribution['Distribution']['DistributionConfig'] + changed = config != distribution["Distribution"]["DistributionConfig"] if changed: result = update_distribution(client, module, config, distribution_id, e_tag) else: - result = distribution['Distribution'] - existing_tags = list_tags_for_resource(client, module, result['ARN']) - distribution['Distribution']['DistributionConfig']['tags'] = existing_tags - changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN']) + result = distribution["Distribution"] + existing_tags = list_tags_for_resource(client, module, result["ARN"]) + distribution["Distribution"]["DistributionConfig"]["tags"] = existing_tags + changed |= update_tags(client, module, existing_tags, tags, purge_tags, result["ARN"]) result = camel_dict_to_snake_dict(result) - result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn']) - result['diff'] = dict() - diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config) + result["distribution_config"]["tags"] = config["tags"] = list_tags_for_resource(client, module, result["arn"]) + result["diff"] = dict() + diff = recursive_diff(distribution["Distribution"]["DistributionConfig"], config) if diff: - result['diff']['before'] = diff[0] - result['diff']['after'] = diff[1] + result["diff"]["before"] = diff[0] + result["diff"]["after"] = diff[1] if wait and (create or update): - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) + validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) - if 'distribution_config' in result: - result.update(result['distribution_config']) - del result['distribution_config'] + if "distribution_config" in result: + result.update(result["distribution_config"]) + del result["distribution_config"] module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudfront_distribution_info.py b/cloudfront_distribution_info.py index bc6bd8073c9..3bd20868ae5 100644 --- a/cloudfront_distribution_info.py +++ b/cloudfront_distribution_info.py @@ -250,8 +250,8 @@ def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): facts[distribution_id] = details # also have a fixed key for accessing results/details returned - facts['result'] = details - facts['result']['DistributionId'] = distribution_id + facts["result"] = details + facts["result"]["DistributionId"] = distribution_id for alias in aliases: facts[alias] = details @@ -260,78 +260,94 @@ def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, ali def main(): argument_spec = dict( - distribution_id=dict(required=False, type='str'), - invalidation_id=dict(required=False, type='str'), - origin_access_identity_id=dict(required=False, type='str'), - domain_name_alias=dict(required=False, type='str'), - all_lists=dict(required=False, default=False, type='bool'), - distribution=dict(required=False, default=False, type='bool'), - distribution_config=dict(required=False, default=False, type='bool'), - origin_access_identity=dict(required=False, default=False, type='bool'), - origin_access_identity_config=dict(required=False, default=False, type='bool'), - invalidation=dict(required=False, default=False, type='bool'), - streaming_distribution=dict(required=False, default=False, type='bool'), - streaming_distribution_config=dict(required=False, default=False, type='bool'), - list_origin_access_identities=dict(required=False, default=False, type='bool'), - list_distributions=dict(required=False, default=False, type='bool'), - list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'), - list_invalidations=dict(required=False, default=False, type='bool'), - list_streaming_distributions=dict(required=False, default=False, type='bool'), - summary=dict(required=False, default=False, type='bool'), + distribution_id=dict(required=False, type="str"), + invalidation_id=dict(required=False, type="str"), + origin_access_identity_id=dict(required=False, type="str"), + domain_name_alias=dict(required=False, type="str"), + all_lists=dict(required=False, default=False, type="bool"), + distribution=dict(required=False, default=False, type="bool"), + distribution_config=dict(required=False, default=False, type="bool"), + origin_access_identity=dict(required=False, default=False, type="bool"), + origin_access_identity_config=dict(required=False, default=False, type="bool"), + invalidation=dict(required=False, default=False, type="bool"), + streaming_distribution=dict(required=False, default=False, type="bool"), + streaming_distribution_config=dict(required=False, default=False, type="bool"), + list_origin_access_identities=dict(required=False, default=False, type="bool"), + list_distributions=dict(required=False, default=False, type="bool"), + list_distributions_by_web_acl_id=dict(required=False, default=False, type="bool"), + list_invalidations=dict(required=False, default=False, type="bool"), + list_streaming_distributions=dict(required=False, default=False, type="bool"), + summary=dict(required=False, default=False, type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) service_mgr = CloudFrontFactsServiceManager(module) - distribution_id = module.params.get('distribution_id') - invalidation_id = module.params.get('invalidation_id') - origin_access_identity_id = module.params.get('origin_access_identity_id') - web_acl_id = module.params.get('web_acl_id') - domain_name_alias = module.params.get('domain_name_alias') - all_lists = module.params.get('all_lists') - distribution = module.params.get('distribution') - distribution_config = module.params.get('distribution_config') - origin_access_identity = module.params.get('origin_access_identity') - origin_access_identity_config = module.params.get('origin_access_identity_config') - invalidation = module.params.get('invalidation') - streaming_distribution = module.params.get('streaming_distribution') - streaming_distribution_config = module.params.get('streaming_distribution_config') - list_origin_access_identities = module.params.get('list_origin_access_identities') - list_distributions = module.params.get('list_distributions') - list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id') - list_invalidations = module.params.get('list_invalidations') - list_streaming_distributions = module.params.get('list_streaming_distributions') - summary = module.params.get('summary') + distribution_id = module.params.get("distribution_id") + invalidation_id = module.params.get("invalidation_id") + origin_access_identity_id = module.params.get("origin_access_identity_id") + web_acl_id = module.params.get("web_acl_id") + domain_name_alias = module.params.get("domain_name_alias") + all_lists = module.params.get("all_lists") + distribution = module.params.get("distribution") + distribution_config = module.params.get("distribution_config") + origin_access_identity = module.params.get("origin_access_identity") + origin_access_identity_config = module.params.get("origin_access_identity_config") + invalidation = module.params.get("invalidation") + streaming_distribution = module.params.get("streaming_distribution") + streaming_distribution_config = module.params.get("streaming_distribution_config") + list_origin_access_identities = module.params.get("list_origin_access_identities") + list_distributions = module.params.get("list_distributions") + list_distributions_by_web_acl_id = module.params.get("list_distributions_by_web_acl_id") + list_invalidations = module.params.get("list_invalidations") + list_streaming_distributions = module.params.get("list_streaming_distributions") + summary = module.params.get("summary") aliases = [] - result = {'cloudfront': {}} + result = {"cloudfront": {}} facts = {} - require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or - streaming_distribution_config or list_invalidations) + require_distribution_id = ( + distribution + or distribution_config + or invalidation + or streaming_distribution + or streaming_distribution_config + or list_invalidations + ) # set default to summary if no option specified - summary = summary or not (distribution or distribution_config or origin_access_identity or - origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or - list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or - list_streaming_distributions or list_distributions) + summary = summary or not ( + distribution + or distribution_config + or origin_access_identity + or origin_access_identity_config + or invalidation + or streaming_distribution + or streaming_distribution_config + or list_origin_access_identities + or list_distributions_by_web_acl_id + or list_invalidations + or list_streaming_distributions + or list_distributions + ) # validations if require_distribution_id and distribution_id is None and domain_name_alias is None: - module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.') - if (invalidation and invalidation_id is None): - module.fail_json(msg='Error invalidation_id has not been specified.') + module.fail_json(msg="Error distribution_id or domain_name_alias have not been specified.") + if invalidation and invalidation_id is None: + module.fail_json(msg="Error invalidation_id has not been specified.") if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None: - module.fail_json(msg='Error origin_access_identity_id has not been specified.') + module.fail_json(msg="Error origin_access_identity_id has not been specified.") if list_distributions_by_web_acl_id and web_acl_id is None: - module.fail_json(msg='Error web_acl_id has not been specified.') + module.fail_json(msg="Error web_acl_id has not been specified.") # get distribution id from domain name alias if require_distribution_id and distribution_id is None: distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias) if not distribution_id: - module.fail_json(msg='Error unable to source a distribution id from domain_name_alias') + module.fail_json(msg="Error unable to source a distribution id from domain_name_alias") # set appropriate cloudfront id if invalidation_id is not None and invalidation: @@ -349,7 +365,9 @@ def main(): if origin_access_identity: facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(id=origin_access_identity_id)) if origin_access_identity_config: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(id=origin_access_identity_id)) + facts[origin_access_identity_id].update( + service_mgr.get_origin_access_identity_config(id=origin_access_identity_id) + ) if invalidation: facts_to_set = service_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation_id) facts[invalidation_id].update(facts_to_set) @@ -359,32 +377,32 @@ def main(): facts_to_set = service_mgr.get_streaming_distribution_config(id=distribution_id) if list_invalidations: invalidations = service_mgr.list_invalidations(distribution_id=distribution_id) or {} - facts_to_set = {'invalidations': invalidations} - if 'facts_to_set' in vars(): + facts_to_set = {"invalidations": invalidations} + if "facts_to_set" in vars(): aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) # get list based on options if all_lists or list_origin_access_identities: - facts['origin_access_identities'] = service_mgr.list_origin_access_identities() or {} + facts["origin_access_identities"] = service_mgr.list_origin_access_identities() or {} if all_lists or list_distributions: - facts['distributions'] = service_mgr.list_distributions() or {} + facts["distributions"] = service_mgr.list_distributions() or {} if all_lists or list_streaming_distributions: - facts['streaming_distributions'] = service_mgr.list_streaming_distributions() or {} + facts["streaming_distributions"] = service_mgr.list_streaming_distributions() or {} if list_distributions_by_web_acl_id: - facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} + facts["distributions_by_web_acl_id"] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} if list_invalidations: - facts['invalidations'] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} + facts["invalidations"] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} # default summary option if summary: - facts['summary'] = service_mgr.summary() + facts["summary"] = service_mgr.summary() - result['changed'] = False - result['cloudfront'].update(facts) + result["changed"] = False + result["cloudfront"].update(facts) module.exit_json(msg="Retrieved CloudFront info.", **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py index adee5058b17..b98b56be2d2 100644 --- a/cloudfront_invalidation.py +++ b/cloudfront_invalidation.py @@ -152,24 +152,33 @@ class CloudFrontInvalidationServiceManager(object): def __init__(self, module, cloudfront_facts_mgr): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") self.__cloudfront_facts_mgr = cloudfront_facts_mgr def create_invalidation(self, distribution_id, invalidation_batch): - current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference']) + current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch["CallerReference"]) try: - response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch) - response.pop('ResponseMetadata', None) + response = self.client.create_invalidation( + DistributionId=distribution_id, InvalidationBatch=invalidation_batch + ) + response.pop("ResponseMetadata", None) if current_invalidation_response: return response, False else: return response, True - except is_boto3_error_message('Your request contains a caller reference that was used for a previous invalidation ' - 'batch for the same distribution.'): - self.module.warn("InvalidationBatch target paths are not modifiable. " - "To make a new invalidation please update caller_reference.") + except is_boto3_error_message( + "Your request contains a caller reference that was used for a previous invalidation " + "batch for the same distribution." + ): + self.module.warn( + "InvalidationBatch target paths are not modifiable. " + "To make a new invalidation please update caller_reference." + ) return current_invalidation_response, False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") def get_invalidation(self, distribution_id, caller_reference): @@ -178,9 +187,11 @@ def get_invalidation(self, distribution_id, caller_reference): # check if there is an invalidation with the same caller reference for invalidation in invalidations: - invalidation_info = self.__cloudfront_facts_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation['Id']) - if invalidation_info.get('InvalidationBatch', {}).get('CallerReference') == caller_reference: - invalidation_info.pop('ResponseMetadata', None) + invalidation_info = self.__cloudfront_facts_mgr.get_invalidation( + distribution_id=distribution_id, id=invalidation["Id"] + ) + if invalidation_info.get("InvalidationBatch", {}).get("CallerReference") == caller_reference: + invalidation_info.pop("ResponseMetadata", None) return invalidation_info return {} @@ -217,8 +228,8 @@ def validate_invalidation_batch(self, invalidation_batch, caller_reference): else: valid_caller_reference = datetime.datetime.now().isoformat() valid_invalidation_batch = { - 'paths': self.create_aws_list(invalidation_batch), - 'caller_reference': valid_caller_reference + "paths": self.create_aws_list(invalidation_batch), + "caller_reference": valid_caller_reference, } return valid_invalidation_batch except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -230,19 +241,21 @@ def main(): caller_reference=dict(), distribution_id=dict(), alias=dict(), - target_paths=dict(required=True, type='list', elements='str') + target_paths=dict(required=True, type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[["distribution_id", "alias"]] + ) cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) validation_mgr = CloudFrontInvalidationValidationManager(module, cloudfront_facts_mgr) service_mgr = CloudFrontInvalidationServiceManager(module, cloudfront_facts_mgr) - caller_reference = module.params.get('caller_reference') - distribution_id = module.params.get('distribution_id') - alias = module.params.get('alias') - target_paths = module.params.get('target_paths') + caller_reference = module.params.get("caller_reference") + distribution_id = module.params.get("distribution_id") + alias = module.params.get("alias") + target_paths = module.params.get("target_paths") result = {} @@ -254,5 +267,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 1da411f8677..3c9340df611 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -136,15 +136,12 @@ class CloudFrontOriginAccessIdentityServiceManager(object): def __init__(self, module): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") def create_origin_access_identity(self, caller_reference, comment): try: return self.client.create_cloud_front_origin_access_identity( - CloudFrontOriginAccessIdentityConfig={ - 'CallerReference': caller_reference, - 'Comment': comment - } + CloudFrontOriginAccessIdentityConfig={"CallerReference": caller_reference, "Comment": comment} ) except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.") @@ -158,14 +155,12 @@ def delete_origin_access_identity(self, origin_access_identity_id, e_tag): def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag): changed = False - new_config = { - 'CallerReference': caller_reference, - 'Comment': comment - } + new_config = {"CallerReference": caller_reference, "Comment": comment} try: - current_config = self.client.get_cloud_front_origin_access_identity_config( - Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig'] + current_config = self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)[ + "CloudFrontOriginAccessIdentityConfig" + ] except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.") @@ -197,8 +192,10 @@ def __init__(self, module): def describe_origin_access_identity(self, origin_access_identity_id, fail_if_missing=True): try: - return self.__cloudfront_facts_mgr.get_origin_access_identity(id=origin_access_identity_id, fail_if_error=False) - except is_boto3_error_code('NoSuchCloudFrontOriginAccessIdentity') as e: # pylint: disable=duplicate-except + return self.__cloudfront_facts_mgr.get_origin_access_identity( + id=origin_access_identity_id, fail_if_error=False + ) + except is_boto3_error_code("NoSuchCloudFrontOriginAccessIdentity") as e: # pylint: disable=duplicate-except if fail_if_missing: self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") return {} @@ -208,36 +205,39 @@ def describe_origin_access_identity(self, origin_access_identity_id, fail_if_mis def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id, fail_if_missing): oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing) if oai is not None: - return oai.get('ETag') + return oai.get("ETag") - def validate_origin_access_identity_id_from_caller_reference( - self, caller_reference): + def validate_origin_access_identity_id_from_caller_reference(self, caller_reference): origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() - origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities] + origin_origin_access_identity_ids = [oai.get("Id") for oai in origin_access_identities] for origin_access_identity_id in origin_origin_access_identity_ids: - oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id)) - temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference') + oai_config = self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id) + temp_caller_reference = oai_config.get("CloudFrontOriginAccessIdentityConfig").get("CallerReference") if temp_caller_reference == caller_reference: return origin_access_identity_id def validate_comment(self, comment): if comment is None: - return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime( + "%Y-%m-%dT%H:%M:%S.%f" + ) return comment def validate_caller_reference_from_origin_access_identity_id(self, origin_access_identity_id, caller_reference): if caller_reference is None: if origin_access_identity_id is None: - return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing=True) - origin_access_config = oai.get('CloudFrontOriginAccessIdentity', {}).get('CloudFrontOriginAccessIdentityConfig', {}) - return origin_access_config.get('CallerReference') + origin_access_config = oai.get("CloudFrontOriginAccessIdentity", {}).get( + "CloudFrontOriginAccessIdentityConfig", {} + ) + return origin_access_config.get("CallerReference") return caller_reference def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), origin_access_identity_id=dict(), caller_reference=dict(), comment=dict(), @@ -251,22 +251,28 @@ def main(): service_mgr = CloudFrontOriginAccessIdentityServiceManager(module) validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module) - state = module.params.get('state') - caller_reference = module.params.get('caller_reference') + state = module.params.get("state") + caller_reference = module.params.get("caller_reference") - comment = module.params.get('comment') - origin_access_identity_id = module.params.get('origin_access_identity_id') + comment = module.params.get("comment") + origin_access_identity_id = module.params.get("origin_access_identity_id") if origin_access_identity_id is None and caller_reference is not None: - origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference) + origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference( + caller_reference + ) - if state == 'present': + if state == "present": comment = validation_mgr.validate_comment(comment) - caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id(origin_access_identity_id, caller_reference) + caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id( + origin_access_identity_id, caller_reference + ) if origin_access_identity_id is not None: e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, True) # update cloudfront origin access identity - result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag) + result, changed = service_mgr.update_origin_access_identity( + caller_reference, comment, origin_access_identity_id, e_tag + ) else: # create cloudfront origin access identity result = service_mgr.create_origin_access_identity(caller_reference, comment) @@ -276,10 +282,10 @@ def main(): if e_tag: result, changed = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) - result.pop('ResponseMetadata', None) + result.pop("ResponseMetadata", None) module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/cloudfront_response_headers_policy.py b/cloudfront_response_headers_policy.py index c84346c387a..a7558e8a86d 100644 --- a/cloudfront_response_headers_policy.py +++ b/cloudfront_response_headers_policy.py @@ -155,21 +155,20 @@ class CloudfrontResponseHeadersPolicyService(object): - def __init__(self, module): self.module = module - self.client = module.client('cloudfront') + self.client = module.client("cloudfront") self.check_mode = module.check_mode def find_response_headers_policy(self, name): try: - policies = self.client.list_response_headers_policies()['ResponseHeadersPolicyList']['Items'] + policies = self.client.list_response_headers_policies()["ResponseHeadersPolicyList"]["Items"] for policy in policies: - if policy['ResponseHeadersPolicy']['ResponseHeadersPolicyConfig']['Name'] == name: - policy_id = policy['ResponseHeadersPolicy']['Id'] + if policy["ResponseHeadersPolicy"]["ResponseHeadersPolicyConfig"]["Name"] == name: + policy_id = policy["ResponseHeadersPolicy"]["Id"] # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here - matching_policy = self.client.get_response_headers_policy(Id=policy['ResponseHeadersPolicy']['Id']) + matching_policy = self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"]) break else: matching_policy = None @@ -183,17 +182,17 @@ def create_response_header_policy(self, name, comment, cors_config, security_hea security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True) # Little helper for turning xss_protection into XSSProtection and not into XssProtection - if 'XssProtection' in security_headers_config: - security_headers_config['XSSProtection'] = security_headers_config.pop('XssProtection') + if "XssProtection" in security_headers_config: + security_headers_config["XSSProtection"] = security_headers_config.pop("XssProtection") custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True) config = { - 'Name': name, - 'Comment': comment, - 'CorsConfig': self.insert_quantities(cors_config), - 'SecurityHeadersConfig': security_headers_config, - 'CustomHeadersConfig': self.insert_quantities(custom_headers_config) + "Name": name, + "Comment": comment, + "CorsConfig": self.insert_quantities(cors_config), + "SecurityHeadersConfig": security_headers_config, + "CustomHeadersConfig": self.insert_quantities(custom_headers_config), } config = {k: v for k, v in config.items() if v} @@ -212,14 +211,16 @@ def create_response_header_policy(self, name, comment, cors_config, security_hea except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating policy") else: - policy_id = matching_policy['ResponseHeadersPolicy']['Id'] - etag = matching_policy['ETag'] + policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] + etag = matching_policy["ETag"] try: - result = self.client.update_response_headers_policy(Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config) + result = self.client.update_response_headers_policy( + Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config + ) - changed_time = result['ResponseHeadersPolicy']['LastModifiedTime'] + changed_time = result["ResponseHeadersPolicy"]["LastModifiedTime"] seconds = 3 # threshhold for returned timestamp age - seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds)) + seconds_ago = datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds) # consider change made by this execution of the module if returned timestamp was very recent if changed_time > seconds_ago: @@ -235,8 +236,8 @@ def delete_response_header_policy(self, name): if matching_policy is None: self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting") else: - policy_id = matching_policy['ResponseHeadersPolicy']['Id'] - etag = matching_policy['ETag'] + policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] + etag = matching_policy["ETag"] if self.check_mode: result = {} else: @@ -251,43 +252,45 @@ def delete_response_header_policy(self, name): @staticmethod def insert_quantities(dict_with_items): # Items on top level case - if 'Items' in dict_with_items and isinstance(dict_with_items['Items'], list): - dict_with_items['Quantity'] = len(dict_with_items['Items']) + if "Items" in dict_with_items and isinstance(dict_with_items["Items"], list): + dict_with_items["Quantity"] = len(dict_with_items["Items"]) # Items on second level case for k, v in dict_with_items.items(): - if isinstance(v, dict) and 'Items' in v: - v['Quantity'] = len(v['Items']) + if isinstance(v, dict) and "Items" in v: + v["Quantity"] = len(v["Items"]) return dict_with_items def main(): argument_spec = dict( - name=dict(required=True, type='str'), - comment=dict(type='str'), - cors_config=dict(type='dict', default=dict()), - security_headers_config=dict(type='dict', default=dict()), - custom_headers_config=dict(type='dict', default=dict()), - state=dict(choices=['present', 'absent'], type='str', default='present'), + name=dict(required=True, type="str"), + comment=dict(type="str"), + cors_config=dict(type="dict", default=dict()), + security_headers_config=dict(type="dict", default=dict()), + custom_headers_config=dict(type="dict", default=dict()), + state=dict(choices=["present", "absent"], type="str", default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - name = module.params.get('name') - comment = module.params.get('comment', '') - cors_config = module.params.get('cors_config') - security_headers_config = module.params.get('security_headers_config') - custom_headers_config = module.params.get('custom_headers_config') - state = module.params.get('state') + name = module.params.get("name") + comment = module.params.get("comment", "") + cors_config = module.params.get("cors_config") + security_headers_config = module.params.get("security_headers_config") + custom_headers_config = module.params.get("custom_headers_config") + state = module.params.get("state") service = CloudfrontResponseHeadersPolicyService(module) - if state == 'absent': + if state == "absent": service.delete_response_header_policy(name) else: - service.create_response_header_policy(name, comment, cors_config, security_headers_config, custom_headers_config) + service.create_response_header_policy( + name, comment, cors_config, security_headers_config, custom_headers_config + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/codebuild_project.py b/codebuild_project.py index 71f05bf7233..6a910799d88 100644 --- a/codebuild_project.py +++ b/codebuild_project.py @@ -308,17 +308,15 @@ class CodeBuildAnsibleAWSError(AnsibleAWSError): def do_create_project(client, params, formatted_params): - if params["source"] is None or params["artifacts"] is None: raise CodeBuildAnsibleAWSError( message="The source and artifacts parameters must be provided " - "when creating a new project. No existing project was found.") + "when creating a new project. No existing project was found." + ) if params["tags"] is not None: formatted_params["tags"] = ansible_dict_to_boto3_tag_list( - params["tags"], - tag_name_key_name="key", - tag_value_key_name="value" + params["tags"], tag_name_key_name="key", tag_value_key_name="value" ) permitted_create_params = get_boto3_client_method_parameters(client, "create_project") @@ -357,7 +355,7 @@ def do_update_project(client, params, formatted_params, found_project): permitted_update_params = get_boto3_client_method_parameters(client, "update_project") formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) - found_tags = found_project.pop('tags', []) + found_tags = found_project.pop("tags", []) if params["tags"] is not None: formatted_update_params["tags"] = format_tags( merge_tags(found_tags, params["tags"], params["purge_tags"]), @@ -373,7 +371,7 @@ def do_update_project(client, params, formatted_params, found_project): found_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(found_tags) updated_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(updated_tags) - changed = (updated_project != found_project) + changed = updated_project != found_project updated_project["tags"] = updated_tags return resp, changed @@ -381,7 +379,7 @@ def do_update_project(client, params, formatted_params, found_project): def create_or_update_project(client, params): resp = {} - name = params['name'] + name = params["name"] # clean up params formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) @@ -426,7 +424,7 @@ def delete_project(client, name): def describe_project(client, name): project = {} try: - projects = client.batch_get_projects(names=[name])['projects'] + projects = client.batch_get_projects(names=[name])["projects"] if len(projects) > 0: project = projects[0] return project @@ -439,11 +437,11 @@ def describe_project(client, name): def format_project_result(project_result): formated_result = camel_dict_to_snake_dict(project_result) - project = project_result.get('project', {}) + project = project_result.get("project", {}) if project: - tags = project.get('tags', []) - formated_result['project']['resource_tags'] = boto3_tag_list_to_ansible_dict(tags) - formated_result['ORIGINAL'] = project_result + tags = project.get("tags", []) + formated_result["project"]["resource_tags"] = boto3_tag_list_to_ansible_dict(tags) + formated_result["ORIGINAL"] = project_result return formated_result @@ -451,35 +449,35 @@ def main(): argument_spec = dict( name=dict(required=True), description=dict(), - source=dict(type='dict'), - artifacts=dict(type='dict'), - cache=dict(type='dict'), - environment=dict(type='dict'), + source=dict(type="dict"), + artifacts=dict(type="dict"), + cache=dict(type="dict"), + environment=dict(type="dict"), service_role=dict(), - timeout_in_minutes=dict(type='int', default=60), + timeout_in_minutes=dict(type="int", default=60), encryption_key=dict(no_log=False), - tags=dict(type='dict', aliases=["resource_tags"]), - purge_tags=dict(type='bool', default=True), - vpc_config=dict(type='dict'), - state=dict(choices=['present', 'absent'], default='present') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + vpc_config=dict(type="dict"), + state=dict(choices=["present", "absent"], default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client('codebuild') + client_conn = module.client("codebuild") - state = module.params.get('state') + state = module.params.get("state") changed = False try: - if state == 'present': + if state == "present": project_result, changed = create_or_update_project( client=client_conn, params=module.params, ) - elif state == 'absent': + elif state == "absent": project_result, changed = delete_project( client=client_conn, - name=module.params['name'], + name=module.params["name"], ) except CodeBuildAnsibleAWSError as e: if e.exception: @@ -490,5 +488,5 @@ def main(): module.exit_json(changed=changed, **formatted_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/codecommit_repository.py b/codecommit_repository.py index 1552738bea5..14b08bd88a9 100644 --- a/codecommit_repository.py +++ b/codecommit_repository.py @@ -145,39 +145,39 @@ class CodeCommit(object): def __init__(self, module=None): self._module = module - self._client = self._module.client('codecommit') + self._client = self._module.client("codecommit") self._check_mode = self._module.check_mode def process(self): result = dict(changed=False) - if self._module.params['state'] == 'present': + if self._module.params["state"] == "present": if not self._repository_exists(): if not self._check_mode: result = self._create_repository() - result['changed'] = True + result["changed"] = True else: - metadata = self._get_repository()['repositoryMetadata'] - if not metadata.get('repositoryDescription'): - metadata['repositoryDescription'] = '' - if metadata['repositoryDescription'] != self._module.params['description']: + metadata = self._get_repository()["repositoryMetadata"] + if not metadata.get("repositoryDescription"): + metadata["repositoryDescription"] = "" + if metadata["repositoryDescription"] != self._module.params["description"]: if not self._check_mode: self._update_repository() - result['changed'] = True + result["changed"] = True result.update(self._get_repository()) - if self._module.params['state'] == 'absent' and self._repository_exists(): + if self._module.params["state"] == "absent" and self._repository_exists(): if not self._check_mode: result = self._delete_repository() - result['changed'] = True + result["changed"] = True return result def _repository_exists(self): try: - paginator = self._client.get_paginator('list_repositories') + paginator = self._client.get_paginator("list_repositories") for page in paginator.paginate(): - repositories = page['repositories'] + repositories = page["repositories"] for item in repositories: - if self._module.params['name'] in item.values(): + if self._module.params["name"] in item.values(): return True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't get repository") @@ -186,7 +186,7 @@ def _repository_exists(self): def _get_repository(self): try: result = self._client.get_repository( - repositoryName=self._module.params['name'] + repositoryName=self._module.params["name"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't get repository") @@ -195,8 +195,8 @@ def _get_repository(self): def _update_repository(self): try: result = self._client.update_repository_description( - repositoryName=self._module.params['name'], - repositoryDescription=self._module.params['description'] + repositoryName=self._module.params["name"], + repositoryDescription=self._module.params["description"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't create repository") @@ -205,8 +205,8 @@ def _update_repository(self): def _create_repository(self): try: result = self._client.create_repository( - repositoryName=self._module.params['name'], - repositoryDescription=self._module.params['description'] + repositoryName=self._module.params["name"], + repositoryDescription=self._module.params["description"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't create repository") @@ -215,7 +215,7 @@ def _create_repository(self): def _delete_repository(self): try: result = self._client.delete_repository( - repositoryName=self._module.params['name'] + repositoryName=self._module.params["name"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="couldn't delete repository") @@ -225,13 +225,13 @@ def _delete_repository(self): def main(): argument_spec = dict( name=dict(required=True), - state=dict(choices=['present', 'absent'], required=True), - description=dict(default='', aliases=['comment']) + state=dict(choices=["present", "absent"], required=True), + description=dict(default="", aliases=["comment"]), ) ansible_aws_module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) aws_codecommit = CodeCommit(module=ansible_aws_module) @@ -239,5 +239,5 @@ def main(): ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/codepipeline.py b/codepipeline.py index a2ec7713b4a..7e0baf3fd65 100644 --- a/codepipeline.py +++ b/codepipeline.py @@ -209,14 +209,14 @@ def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): - pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages} + pipeline_dict = {"name": name, "roleArn": role_arn, "artifactStore": artifact_store, "stages": stages} if version: - pipeline_dict['version'] = version + pipeline_dict["version"] = version try: resp = client.create_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict['name'])) + module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict["name"])) def update_pipeline(client, pipeline_dict, module): @@ -224,7 +224,7 @@ def update_pipeline(client, pipeline_dict, module): resp = client.update_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict['name'])) + module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict["name"])) def delete_pipeline(client, name, module): @@ -244,63 +244,69 @@ def describe_pipeline(client, name, version, module): else: pipeline = client.get_pipeline(name=name) return pipeline - except is_boto3_error_code('PipelineNotFoundException'): + except is_boto3_error_code("PipelineNotFoundException"): return pipeline - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - role_arn=dict(required=True, type='str'), - artifact_store=dict(required=True, type='dict'), - stages=dict(required=True, type='list', elements='dict'), - version=dict(type='int'), - state=dict(choices=['present', 'absent'], default='present') + name=dict(required=True, type="str"), + role_arn=dict(required=True, type="str"), + artifact_store=dict(required=True, type="dict"), + stages=dict(required=True, type="list", elements="dict"), + version=dict(type="int"), + state=dict(choices=["present", "absent"], default="present"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client('codepipeline') + client_conn = module.client("codepipeline") - state = module.params.get('state') + state = module.params.get("state") changed = False # Determine if the CodePipeline exists - found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module) + found_code_pipeline = describe_pipeline( + client=client_conn, name=module.params["name"], version=module.params["version"], module=module + ) pipeline_result = {} - if state == 'present': - if 'pipeline' in found_code_pipeline: - pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline']) + if state == "present": + if "pipeline" in found_code_pipeline: + pipeline_dict = copy.deepcopy(found_code_pipeline["pipeline"]) # Update dictionary with provided module params: - pipeline_dict['roleArn'] = module.params['role_arn'] - pipeline_dict['artifactStore'] = module.params['artifact_store'] - pipeline_dict['stages'] = module.params['stages'] - if module.params['version'] is not None: - pipeline_dict['version'] = module.params['version'] + pipeline_dict["roleArn"] = module.params["role_arn"] + pipeline_dict["artifactStore"] = module.params["artifact_store"] + pipeline_dict["stages"] = module.params["stages"] + if module.params["version"] is not None: + pipeline_dict["version"] = module.params["version"] pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module) - if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']): + if compare_policies(found_code_pipeline["pipeline"], pipeline_result["pipeline"]): changed = True else: pipeline_result = create_pipeline( client=client_conn, - name=module.params['name'], - role_arn=module.params['role_arn'], - artifact_store=module.params['artifact_store'], - stages=module.params['stages'], - version=module.params['version'], - module=module) + name=module.params["name"], + role_arn=module.params["role_arn"], + artifact_store=module.params["artifact_store"], + stages=module.params["stages"], + version=module.params["version"], + module=module, + ) changed = True - elif state == 'absent': + elif state == "absent": if found_code_pipeline: - pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module) + pipeline_result = delete_pipeline(client=client_conn, name=module.params["name"], module=module) changed = True module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/config_aggregation_authorization.py b/config_aggregation_authorization.py index 96f1eb1d9cd..903d5a5e1fe 100644 --- a/config_aggregation_authorization.py +++ b/config_aggregation_authorization.py @@ -62,10 +62,10 @@ def resource_exists(client, module, params): try: - current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] authorization_exists = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), - None + (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), + None, ) if authorization_exists: return True @@ -76,32 +76,32 @@ def resource_exists(client, module, params): def create_resource(client, module, params, result): try: response = client.put_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") def update_resource(client, module, params, result): - current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] current_params = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), - None + (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), + None, ) - del current_params['AggregationAuthorizationArn'] - del current_params['CreationTime'] + del current_params["AggregationAuthorizationArn"] + del current_params["CreationTime"] if params != current_params: try: response = client.put_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") @@ -110,10 +110,10 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: response = client.delete_aggregation_authorization( - AuthorizedAccountId=params['AuthorizedAccountId'], - AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + AuthorizedAccountId=params["AuthorizedAccountId"], + AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) - result['changed'] = True + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization") @@ -122,35 +122,35 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'authorized_account_id': dict(type='str', required=True), - 'authorized_aws_region': dict(type='str', required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "authorized_account_id": dict(type="str", required=True), + "authorized_aws_region": dict(type="str", required=True), }, supports_check_mode=False, ) - result = {'changed': False} + result = {"changed": False} params = { - 'AuthorizedAccountId': module.params.get('authorized_account_id'), - 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'), + "AuthorizedAccountId": module.params.get("authorized_account_id"), + "AuthorizedAwsRegion": module.params.get("authorized_aws_region"), } - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if module.params.get('state') == 'present': + if module.params.get("state") == "present": if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/config_aggregator.py b/config_aggregator.py index 38271fc4542..58866159028 100644 --- a/config_aggregator.py +++ b/config_aggregator.py @@ -105,50 +105,53 @@ def resource_exists(client, module, params): try: aggregator = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] + ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] ) - return aggregator['ConfigurationAggregators'][0] - except is_boto3_error_code('NoSuchConfigurationAggregatorException'): + return aggregator["ConfigurationAggregators"][0] + except is_boto3_error_code("NoSuchConfigurationAggregatorException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: client.put_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'], - AccountAggregationSources=params['AccountAggregationSources'], - OrganizationAggregationSource=params['OrganizationAggregationSource'] + ConfigurationAggregatorName=params["ConfigurationAggregatorName"], + AccountAggregationSources=params["AccountAggregationSources"], + OrganizationAggregationSource=params["OrganizationAggregationSource"], ) - result['changed'] = True - result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") def update_resource(client, module, params, result): - result['changed'] = False + result["changed"] = False current_params = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']] - )['ConfigurationAggregators'][0] + ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] + )["ConfigurationAggregators"][0] - if params['AccountAggregationSources'] != current_params.get('AccountAggregationSources', []): - result['changed'] = True + if params["AccountAggregationSources"] != current_params.get("AccountAggregationSources", []): + result["changed"] = True - if params['OrganizationAggregationSource'] != current_params.get('OrganizationAggregationSource', {}): - result['changed'] = True + if params["OrganizationAggregationSource"] != current_params.get("OrganizationAggregationSource", {}): + result["changed"] = True - if result['changed']: + if result["changed"]: try: client.put_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'], - AccountAggregationSources=params['AccountAggregationSources'], - OrganizationAggregationSource=params['OrganizationAggregationSource'] + ConfigurationAggregatorName=params["ConfigurationAggregatorName"], + AccountAggregationSources=params["AccountAggregationSources"], + OrganizationAggregationSource=params["OrganizationAggregationSource"], ) - result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") @@ -156,10 +159,8 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - client.delete_configuration_aggregator( - ConfigurationAggregatorName=params['ConfigurationAggregatorName'] - ) - result['changed'] = True + client.delete_configuration_aggregator(ConfigurationAggregatorName=params["ConfigurationAggregatorName"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator") @@ -168,66 +169,64 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'account_sources': dict(type='list', required=True, elements='dict'), - 'organization_source': dict(type='dict', required=True) + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "account_sources": dict(type="list", required=True, elements="dict"), + "organization_source": dict(type="dict", required=True), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['ConfigurationAggregatorName'] = name - params['AccountAggregationSources'] = [] - if module.params.get('account_sources'): - for i in module.params.get('account_sources'): + params["ConfigurationAggregatorName"] = name + params["AccountAggregationSources"] = [] + if module.params.get("account_sources"): + for i in module.params.get("account_sources"): tmp_dict = {} - if i.get('account_ids'): - tmp_dict['AccountIds'] = i.get('account_ids') - if i.get('aws_regions'): - tmp_dict['AwsRegions'] = i.get('aws_regions') - if i.get('all_aws_regions') is not None: - tmp_dict['AllAwsRegions'] = i.get('all_aws_regions') - params['AccountAggregationSources'].append(tmp_dict) - if module.params.get('organization_source'): - params['OrganizationAggregationSource'] = {} - if module.params.get('organization_source').get('role_arn'): - params['OrganizationAggregationSource'].update({ - 'RoleArn': module.params.get('organization_source').get('role_arn') - }) - if module.params.get('organization_source').get('aws_regions'): - params['OrganizationAggregationSource'].update({ - 'AwsRegions': module.params.get('organization_source').get('aws_regions') - }) - if module.params.get('organization_source').get('all_aws_regions') is not None: - params['OrganizationAggregationSource'].update({ - 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') - }) - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + if i.get("account_ids"): + tmp_dict["AccountIds"] = i.get("account_ids") + if i.get("aws_regions"): + tmp_dict["AwsRegions"] = i.get("aws_regions") + if i.get("all_aws_regions") is not None: + tmp_dict["AllAwsRegions"] = i.get("all_aws_regions") + params["AccountAggregationSources"].append(tmp_dict) + if module.params.get("organization_source"): + params["OrganizationAggregationSource"] = {} + if module.params.get("organization_source").get("role_arn"): + params["OrganizationAggregationSource"].update( + {"RoleArn": module.params.get("organization_source").get("role_arn")} + ) + if module.params.get("organization_source").get("aws_regions"): + params["OrganizationAggregationSource"].update( + {"AwsRegions": module.params.get("organization_source").get("aws_regions")} + ) + if module.params.get("organization_source").get("all_aws_regions") is not None: + params["OrganizationAggregationSource"].update( + {"AllAwsRegions": module.params.get("organization_source").get("all_aws_regions")} + ) + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/config_delivery_channel.py b/config_delivery_channel.py index 2dd5fbc68d2..aae8799de20 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -80,20 +80,23 @@ # this waits for an IAM role to become fully available, at the cost of # taking a long time to fail when the IAM role/policy really is invalid retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff( - catch_extra_error_codes=['InsufficientDeliveryPolicyException'], + catch_extra_error_codes=["InsufficientDeliveryPolicyException"], ) def resource_exists(client, module, params): try: channel = client.describe_delivery_channels( - DeliveryChannelNames=[params['name']], + DeliveryChannelNames=[params["name"]], aws_retry=True, ) - return channel['DeliveryChannels'][0] - except is_boto3_error_code('NoSuchDeliveryChannelException'): + return channel["DeliveryChannels"][0] + except is_boto3_error_code("NoSuchDeliveryChannelException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -104,49 +107,64 @@ def create_resource(client, module, params, result): )( DeliveryChannel=params, ) - result['changed'] = True - result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result - except is_boto3_error_code('InvalidS3KeyPrefixException') as e: - module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + except is_boto3_error_code("InvalidS3KeyPrefixException") as e: + module.fail_json_aws( + e, + msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix", + ) + except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="Couldn't create AWS Config delivery channel", + ) def update_resource(client, module, params, result): current_params = client.describe_delivery_channels( - DeliveryChannelNames=[params['name']], + DeliveryChannelNames=[params["name"]], aws_retry=True, ) - if params != current_params['DeliveryChannels'][0]: + if params != current_params["DeliveryChannels"][0]: try: retry_unavailable_iam_on_put_delivery( client.put_delivery_channel, )( DeliveryChannel=params, ) - result['changed'] = True - result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + result["changed"] = True + result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result - except is_boto3_error_code('InvalidS3KeyPrefixException') as e: + except is_boto3_error_code("InvalidS3KeyPrefixException") as e: module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " + "Make sure the bucket exists and is available", + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") def delete_resource(client, module, params, result): try: - response = client.delete_delivery_channel( - DeliveryChannelName=params['name'] - ) - result['changed'] = True + response = client.delete_delivery_channel(DeliveryChannelName=params["name"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel") @@ -155,62 +173,58 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 's3_bucket': dict(type='str', required=True), - 's3_prefix': dict(type='str'), - 'sns_topic_arn': dict(type='str'), - 'delivery_frequency': dict( - type='str', + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "s3_bucket": dict(type="str", required=True), + "s3_prefix": dict(type="str"), + "sns_topic_arn": dict(type="str"), + "delivery_frequency": dict( + type="str", choices=[ - 'One_Hour', - 'Three_Hours', - 'Six_Hours', - 'Twelve_Hours', - 'TwentyFour_Hours' - ] + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], ), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['name'] = name - if module.params.get('s3_bucket'): - params['s3BucketName'] = module.params.get('s3_bucket') - if module.params.get('s3_prefix'): - params['s3KeyPrefix'] = module.params.get('s3_prefix') - if module.params.get('sns_topic_arn'): - params['snsTopicARN'] = module.params.get('sns_topic_arn') - if module.params.get('delivery_frequency'): - params['configSnapshotDeliveryProperties'] = { - 'deliveryFrequency': module.params.get('delivery_frequency') - } - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + params["name"] = name + if module.params.get("s3_bucket"): + params["s3BucketName"] = module.params.get("s3_bucket") + if module.params.get("s3_prefix"): + params["s3KeyPrefix"] = module.params.get("s3_prefix") + if module.params.get("sns_topic_arn"): + params["snsTopicARN"] = module.params.get("sns_topic_arn") + if module.params.get("delivery_frequency"): + params["configSnapshotDeliveryProperties"] = {"deliveryFrequency": module.params.get("delivery_frequency")} + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/config_recorder.py b/config_recorder.py index b310787b72d..2672664a5fe 100644 --- a/config_recorder.py +++ b/config_recorder.py @@ -93,40 +93,35 @@ def resource_exists(client, module, params): try: - recorder = client.describe_configuration_recorders( - ConfigurationRecorderNames=[params['name']] - ) - return recorder['ConfigurationRecorders'][0] - except is_boto3_error_code('NoSuchConfigurationRecorderException'): + recorder = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) + return recorder["ConfigurationRecorders"][0] + except is_boto3_error_code("NoSuchConfigurationRecorderException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: - response = client.put_configuration_recorder( - ConfigurationRecorder=params - ) - result['changed'] = True - result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + response = client.put_configuration_recorder(ConfigurationRecorder=params) + result["changed"] = True + result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder") def update_resource(client, module, params, result): - current_params = client.describe_configuration_recorders( - ConfigurationRecorderNames=[params['name']] - ) + current_params = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) - if params != current_params['ConfigurationRecorders'][0]: + if params != current_params["ConfigurationRecorders"][0]: try: - response = client.put_configuration_recorder( - ConfigurationRecorder=params - ) - result['changed'] = True - result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + response = client.put_configuration_recorder(ConfigurationRecorder=params) + result["changed"] = True + result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder") @@ -134,77 +129,68 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_configuration_recorder( - ConfigurationRecorderName=params['name'] - ) - result['changed'] = True + response = client.delete_configuration_recorder(ConfigurationRecorderName=params["name"]) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder") def main(): - module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'role_arn': dict(type='str'), - 'recording_group': dict(type='dict'), + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "role_arn": dict(type="str"), + "recording_group": dict(type="dict"), }, supports_check_mode=False, required_if=[ - ('state', 'present', ['role_arn', 'recording_group']), + ("state", "present", ["role_arn", "recording_group"]), ], ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") params = {} if name: - params['name'] = name - if module.params.get('role_arn'): - params['roleARN'] = module.params.get('role_arn') - if module.params.get('recording_group'): - params['recordingGroup'] = {} - if module.params.get('recording_group').get('all_supported') is not None: - params['recordingGroup'].update({ - 'allSupported': module.params.get('recording_group').get('all_supported') - }) - if module.params.get('recording_group').get('include_global_types') is not None: - params['recordingGroup'].update({ - 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types') - }) - if module.params.get('recording_group').get('resource_types'): - params['recordingGroup'].update({ - 'resourceTypes': module.params.get('recording_group').get('resource_types') - }) + params["name"] = name + if module.params.get("role_arn"): + params["roleARN"] = module.params.get("role_arn") + if module.params.get("recording_group"): + params["recordingGroup"] = {} + if module.params.get("recording_group").get("all_supported") is not None: + params["recordingGroup"].update({"allSupported": module.params.get("recording_group").get("all_supported")}) + if module.params.get("recording_group").get("include_global_types") is not None: + params["recordingGroup"].update( + {"includeGlobalResourceTypes": module.params.get("recording_group").get("include_global_types")} + ) + if module.params.get("recording_group").get("resource_types"): + params["recordingGroup"].update( + {"resourceTypes": module.params.get("recording_group").get("resource_types")} + ) else: - params['recordingGroup'].update({ - 'resourceTypes': [] - }) + params["recordingGroup"].update({"resourceTypes": []}) - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) - if state == 'present': + if state == "present": if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if resource_status: delete_resource(client, module, params, result) - module.exit_json(changed=result['changed']) + module.exit_json(changed=result["changed"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/config_rule.py b/config_rule.py index cae18b2a0a4..3b49c17465e 100644 --- a/config_rule.py +++ b/config_rule.py @@ -122,22 +122,23 @@ def rule_exists(client, module, params): try: rule = client.describe_config_rules( - ConfigRuleNames=[params['ConfigRuleName']], + ConfigRuleNames=[params["ConfigRuleName"]], aws_retry=True, ) - return rule['ConfigRules'][0] - except is_boto3_error_code('NoSuchConfigRuleException'): + return rule["ConfigRules"][0] + except is_boto3_error_code("NoSuchConfigRuleException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) def create_resource(client, module, params, result): try: - client.put_config_rule( - ConfigRule=params - ) - result['changed'] = True + client.put_config_rule(ConfigRule=params) + result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config rule") @@ -145,21 +146,19 @@ def create_resource(client, module, params, result): def update_resource(client, module, params, result): current_params = client.describe_config_rules( - ConfigRuleNames=[params['ConfigRuleName']], + ConfigRuleNames=[params["ConfigRuleName"]], aws_retry=True, ) - del current_params['ConfigRules'][0]['ConfigRuleArn'] - del current_params['ConfigRules'][0]['ConfigRuleId'] - del current_params['ConfigRules'][0]['EvaluationModes'] + del current_params["ConfigRules"][0]["ConfigRuleArn"] + del current_params["ConfigRules"][0]["ConfigRuleId"] + del current_params["ConfigRules"][0]["EvaluationModes"] - if params != current_params['ConfigRules'][0]: + if params != current_params["ConfigRules"][0]: try: - client.put_config_rule( - ConfigRule=params - ) - result['changed'] = True - result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params)) + client.put_config_rule(ConfigRule=params) + result["changed"] = True + result["rule"] = camel_dict_to_snake_dict(rule_exists(client, module, params)) return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create AWS Config rule") @@ -168,11 +167,11 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: response = client.delete_config_rule( - ConfigRuleName=params['ConfigRuleName'], + ConfigRuleName=params["ConfigRuleName"], aws_retry=True, ) - result['changed'] = True - result['rule'] = {} + result["changed"] = True + result["rule"] = {} return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete AWS Config rule") @@ -181,93 +180,105 @@ def delete_resource(client, module, params, result): def main(): module = AnsibleAWSModule( argument_spec={ - 'name': dict(type='str', required=True), - 'state': dict(type='str', choices=['present', 'absent'], default='present'), - 'description': dict(type='str'), - 'scope': dict(type='dict'), - 'source': dict(type='dict', required=True), - 'input_parameters': dict(type='str'), - 'execution_frequency': dict( - type='str', + "name": dict(type="str", required=True), + "state": dict(type="str", choices=["present", "absent"], default="present"), + "description": dict(type="str"), + "scope": dict(type="dict"), + "source": dict(type="dict", required=True), + "input_parameters": dict(type="str"), + "execution_frequency": dict( + type="str", choices=[ - 'One_Hour', - 'Three_Hours', - 'Six_Hours', - 'Twelve_Hours', - 'TwentyFour_Hours' - ] + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + ], ), }, supports_check_mode=False, ) - result = { - 'changed': False - } + result = {"changed": False} - name = module.params.get('name') - resource_type = module.params.get('resource_type') - state = module.params.get('state') + name = module.params.get("name") + resource_type = module.params.get("resource_type") + state = module.params.get("state") params = {} if name: - params['ConfigRuleName'] = name - if module.params.get('description'): - params['Description'] = module.params.get('description') - if module.params.get('scope'): - params['Scope'] = {} - if module.params.get('scope').get('compliance_types'): - params['Scope'].update({ - 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') - }) - if module.params.get('scope').get('tag_key'): - params['Scope'].update({ - 'TagKey': module.params.get('scope').get('tag_key') - }) - if module.params.get('scope').get('tag_value'): - params['Scope'].update({ - 'TagValue': module.params.get('scope').get('tag_value') - }) - if module.params.get('scope').get('compliance_id'): - params['Scope'].update({ - 'ComplianceResourceId': module.params.get('scope').get('compliance_id') - }) - if module.params.get('source'): - params['Source'] = {} - if module.params.get('source').get('owner'): - params['Source'].update({ - 'Owner': module.params.get('source').get('owner') - }) - if module.params.get('source').get('identifier'): - params['Source'].update({ - 'SourceIdentifier': module.params.get('source').get('identifier') - }) - if module.params.get('source').get('details'): - params['Source'].update({ - 'SourceDetails': module.params.get('source').get('details') - }) - if module.params.get('input_parameters'): - params['InputParameters'] = module.params.get('input_parameters') - if module.params.get('execution_frequency'): - params['MaximumExecutionFrequency'] = module.params.get('execution_frequency') - params['ConfigRuleState'] = 'ACTIVE' - - client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + params["ConfigRuleName"] = name + if module.params.get("description"): + params["Description"] = module.params.get("description") + if module.params.get("scope"): + params["Scope"] = {} + if module.params.get("scope").get("compliance_types"): + params["Scope"].update( + { + "ComplianceResourceTypes": module.params.get("scope").get("compliance_types"), + } + ) + if module.params.get("scope").get("tag_key"): + params["Scope"].update( + { + "TagKey": module.params.get("scope").get("tag_key"), + } + ) + if module.params.get("scope").get("tag_value"): + params["Scope"].update( + { + "TagValue": module.params.get("scope").get("tag_value"), + } + ) + if module.params.get("scope").get("compliance_id"): + params["Scope"].update( + { + "ComplianceResourceId": module.params.get("scope").get("compliance_id"), + } + ) + if module.params.get("source"): + params["Source"] = {} + if module.params.get("source").get("owner"): + params["Source"].update( + { + "Owner": module.params.get("source").get("owner"), + } + ) + if module.params.get("source").get("identifier"): + params["Source"].update( + { + "SourceIdentifier": module.params.get("source").get("identifier"), + } + ) + if module.params.get("source").get("details"): + params["Source"].update( + { + "SourceDetails": module.params.get("source").get("details"), + } + ) + if module.params.get("input_parameters"): + params["InputParameters"] = module.params.get("input_parameters") + if module.params.get("execution_frequency"): + params["MaximumExecutionFrequency"] = module.params.get("execution_frequency") + params["ConfigRuleState"] = "ACTIVE" + + client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) existing_rule = rule_exists(client, module, params) - if state == 'present': + if state == "present": if not existing_rule: create_resource(client, module, params, result) else: update_resource(client, module, params, result) - if state == 'absent': + if state == "absent": if existing_rule: delete_resource(client, module, params, result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/data_pipeline.py b/data_pipeline.py index 51068159507..d30be5c847d 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -211,11 +211,11 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED'] -DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING'] -DP_ACTIVATING_STATE = 'ACTIVATING' -DP_DEACTIVATING_STATE = 'DEACTIVATING' -PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$' +DP_ACTIVE_STATES = ["ACTIVE", "SCHEDULED"] +DP_INACTIVE_STATES = ["INACTIVE", "PENDING", "FINISHED", "DELETING"] +DP_ACTIVATING_STATE = "ACTIVATING" +DP_DEACTIVATING_STATE = "DEACTIVATING" +PIPELINE_DOESNT_EXIST = "^.*Pipeline with id: {0} does not exist$" class DataPipelineNotFound(Exception): @@ -236,9 +236,9 @@ def pipeline_id(client, name): """ pipelines = client.list_pipelines() - for dp in pipelines['pipelineIdList']: - if dp['name'] == name: - return dp['id'] + for dp in pipelines["pipelineIdList"]: + if dp["name"] == name: + return dp["id"] raise DataPipelineNotFound @@ -252,7 +252,7 @@ def pipeline_description(client, dp_id): """ try: return client.describe_pipelines(pipelineIds=[dp_id]) - except is_boto3_error_code(['PipelineNotFoundException', 'PipelineDeletedException']): + except is_boto3_error_code(["PipelineNotFoundException", "PipelineDeletedException"]): raise DataPipelineNotFound @@ -268,9 +268,9 @@ def pipeline_field(client, dp_id, field): """ dp_description = pipeline_description(client, dp_id) - for field_key in dp_description['pipelineDescriptionList'][0]['fields']: - if field_key['key'] == field: - return field_key['stringValue'] + for field_key in dp_description["pipelineDescriptionList"][0]["fields"]: + if field_key["key"] == field: + return field_key["stringValue"] raise KeyError("Field key {0} not found!".format(field)) @@ -343,70 +343,70 @@ def pipeline_exists_timeout(client, dp_id, timeout): def activate_pipeline(client, module): - """Activates pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Activates pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: changed = False else: try: client.activate_pipeline(pipelineId=dp_id) - except is_boto3_error_code('InvalidRequestException'): + except is_boto3_error_code("InvalidRequestException"): module.fail_json(msg="You need to populate your pipeline before activation.") try: - pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, - timeout=timeout) + pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, timeout=timeout) except TimeOutException: if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": # activated but completed more rapidly than it was checked pass else: - module.fail_json(msg=('Data Pipeline {0} failed to activate ' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to activate within timeout {timeout} seconds", + ) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} activated.'.format(dp_name)} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} activated.", + } return (changed, result) def deactivate_pipeline(client, module): - """Deactivates pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Deactivates pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name)) + module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: changed = False else: client.deactivate_pipeline(pipelineId=dp_id) try: - pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, - timeout=timeout) + pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, timeout=timeout) except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to deactivate' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to deactivate within timeout {timeout} seconds", + ) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} deactivated.", + } return (changed, result) @@ -420,11 +420,9 @@ def _delete_dp_with_check(dp_id, client, timeout): def delete_pipeline(client, module): - """Deletes pipeline - - """ - dp_name = module.params.get('name') - timeout = module.params.get('timeout') + """Deletes pipeline""" + dp_name = module.params.get("name") + timeout = module.params.get("timeout") try: dp_id = pipeline_id(client, dp_name) @@ -433,10 +431,13 @@ def delete_pipeline(client, module): except DataPipelineNotFound: changed = False except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to delete' - 'within timeout {1} seconds').format(dp_name, timeout)) - result = {'data_pipeline': {}, - 'msg': 'Data Pipeline {0} deleted'.format(dp_name)} + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to delete within timeout {timeout} seconds", + ) + result = { + "data_pipeline": {}, + "msg": f"Data Pipeline {dp_name} deleted", + } return (changed, result) @@ -444,14 +445,14 @@ def delete_pipeline(client, module): def build_unique_id(module): data = dict(module.params) # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline - [data.pop(each, None) for each in ('objects', 'timeout')] + [data.pop(each, None) for each in ("objects", "timeout")] json_data = json.dumps(data, sort_keys=True).encode("utf-8") hashed_data = hashlib.md5(json_data).hexdigest() return hashed_data def format_tags(tags): - """ Reformats tags + """Reformats tags :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3}) :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}]) @@ -461,16 +462,16 @@ def format_tags(tags): def get_result(client, dp_id): - """ Get the current state of the data pipeline and reformat it to snake_case for exit_json + """Get the current state of the data pipeline and reformat it to snake_case for exit_json :param object client: boto3 datapipeline client :param string dp_id: pipeline id :returns: reformatted dict of pipeline description - """ + """ # pipeline_description returns a pipelineDescriptionList of length 1 # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict) - dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0] + dp = pipeline_description(client, dp_id)["pipelineDescriptionList"][0] # Get uniqueId and pipelineState in fields to add to the exit_json result dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId") @@ -487,8 +488,7 @@ def get_result(client, dp_id): def diff_pipeline(client, module, objects, unique_id, dp_name): - """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated - """ + """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated""" result = {} changed = False create_dp = False @@ -504,16 +504,18 @@ def diff_pipeline(client, module, objects, unique_id, dp_name): create_dp = True # Unique ids are the same - check if pipeline needs modification else: - dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects'] + dp_objects = client.get_pipeline_definition(pipelineId=dp_id)["pipelineObjects"] # Definition needs to be updated if dp_objects != objects: changed, msg = define_pipeline(client, module, objects, dp_id) # No changes else: - msg = 'Data Pipeline {0} is present'.format(dp_name) + msg = f"Data Pipeline {dp_name} is present" data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': msg} + result = { + "data_pipeline": data_pipeline, + "msg": msg, + } except DataPipelineNotFound: create_dp = True @@ -521,30 +523,30 @@ def diff_pipeline(client, module, objects, unique_id, dp_name): def define_pipeline(client, module, objects, dp_id): - """Puts pipeline definition - - """ - dp_name = module.params.get('name') + """Puts pipeline definition""" + dp_name = module.params.get("name") if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name) + msg = "Data Pipeline {0} is unable to be updated while in state FINISHED.".format(dp_name) changed = False elif objects: - parameters = module.params.get('parameters') - values = module.params.get('values') + parameters = module.params.get("parameters") + values = module.params.get("values") try: - client.put_pipeline_definition(pipelineId=dp_id, - pipelineObjects=objects, - parameterObjects=parameters, - parameterValues=values) - msg = 'Data Pipeline {0} has been updated.'.format(dp_name) + client.put_pipeline_definition( + pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values + ) + msg = "Data Pipeline {0} has been updated.".format(dp_name) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to put the definition for pipeline {0}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects".format(dp_name)) + module.fail_json_aws( + e, + msg=f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects", + ) else: changed = False msg = "" @@ -553,14 +555,12 @@ def define_pipeline(client, module, objects, dp_id): def create_pipeline(client, module): - """Creates datapipeline. Uses uniqueId to achieve idempotency. - - """ - dp_name = module.params.get('name') - objects = module.params.get('objects', None) - description = module.params.get('description', '') - tags = module.params.get('tags') - timeout = module.params.get('timeout') + """Creates datapipeline. Uses uniqueId to achieve idempotency.""" + dp_name = module.params.get("name") + objects = module.params.get("objects", None) + description = module.params.get("description", "") + tags = module.params.get("tags") + timeout = module.params.get("timeout") unique_id = build_unique_id(module) create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name) @@ -574,24 +574,27 @@ def create_pipeline(client, module): # Make pipeline try: tags = format_tags(tags) - dp = client.create_pipeline(name=dp_name, - uniqueId=unique_id, - description=description, - tags=tags) - dp_id = dp['pipelineId'] + dp = client.create_pipeline(name=dp_name, uniqueId=unique_id, description=description, tags=tags) + dp_id = dp["pipelineId"] pipeline_exists_timeout(client, dp_id, timeout) except TimeOutException: - module.fail_json(msg=('Data Pipeline {0} failed to create' - 'within timeout {1} seconds').format(dp_name, timeout)) + module.fail_json( + msg=f"Data Pipeline {dp_name} failed to create within timeout {timeout} seconds", + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create the data pipeline {0}.".format(dp_name)) + module.fail_json_aws( + e, + msg=f"Failed to create the data pipeline {dp_name}.", + ) # Put pipeline definition changed, msg = define_pipeline(client, module, objects, dp_id) changed = True data_pipeline = get_result(client, dp_id) - result = {'data_pipeline': data_pipeline, - 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg} + result = { + "data_pipeline": data_pipeline, + "msg": f"Data Pipeline {dp_name} created." + msg, + } return (changed, result) @@ -599,34 +602,33 @@ def create_pipeline(client, module): def main(): argument_spec = dict( name=dict(required=True), - description=dict(required=False, default=''), - objects=dict(required=False, type='list', default=[], elements='dict'), - parameters=dict(required=False, type='list', default=[], elements='dict'), - timeout=dict(required=False, type='int', default=300), - state=dict(default='present', choices=['present', 'absent', - 'active', 'inactive']), - tags=dict(required=False, type='dict', default={}, aliases=['resource_tags']), - values=dict(required=False, type='list', default=[], elements='dict'), + description=dict(required=False, default=""), + objects=dict(required=False, type="list", default=[], elements="dict"), + parameters=dict(required=False, type="list", default=[], elements="dict"), + timeout=dict(required=False, type="int", default=300), + state=dict(default="present", choices=["present", "absent", "active", "inactive"]), + tags=dict(required=False, type="dict", default={}, aliases=["resource_tags"]), + values=dict(required=False, type="list", default=[], elements="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) try: - client = module.client('datapipeline') + client = module.client("datapipeline") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": changed, result = create_pipeline(client, module) - elif state == 'absent': + elif state == "absent": changed, result = delete_pipeline(client, module) - elif state == 'active': + elif state == "active": changed, result = activate_pipeline(client, module) - elif state == 'inactive': + elif state == "inactive": changed, result = deactivate_pipeline(client, module) module.exit_json(result=result, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py index 25aeebb244c..e8e0f2c6b08 100644 --- a/directconnect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -82,7 +82,7 @@ def describe_connections(client, params): def find_connection_id(client, connection_id=None, connection_name=None): params = {} if connection_id: - params['connectionId'] = connection_id + params["connectionId"] = connection_id try: response = describe_connections(client, params) except (BotoCoreError, ClientError) as e: @@ -90,18 +90,20 @@ def find_connection_id(client, connection_id=None, connection_name=None): msg = "Failed to describe DirectConnect ID {0}".format(connection_id) else: msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=msg, + last_traceback=traceback.format_exc(), + exception=e, + ) match = [] - if len(response.get('connections', [])) == 1 and connection_id: - if response['connections'][0]['connectionState'] != 'deleted': - match.append(response['connections'][0]['connectionId']) + if len(response.get("connections", [])) == 1 and connection_id: + if response["connections"][0]["connectionState"] != "deleted": + match.append(response["connections"][0]["connectionId"]) - for conn in response.get('connections', []): - if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': - match.append(conn['connectionId']) + for conn in response.get("connections", []): + if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": + match.append(conn["connectionId"]) if len(match) == 1: return match[0] @@ -112,34 +114,33 @@ def find_connection_id(client, connection_id=None, connection_name=None): def get_connection_state(client, connection_id): try: response = describe_connections(client, dict(connectionId=connection_id)) - return response['connections'][0]['connectionState'] + return response["connections"][0]["connectionState"] except (BotoCoreError, ClientError, IndexError) as e: - raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Failed to describe DirectConnect connection {0} state".format(connection_id), + last_traceback=traceback.format_exc(), + exception=e, + ) def main(): - argument_spec = dict( - connection_id=dict(), - name=dict() + argument_spec = dict(connection_id=dict(), name=dict()) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[["connection_id", "name"]], + required_one_of=[["connection_id", "name"]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['connection_id', 'name']], - required_one_of=[['connection_id', 'name']]) - client = module.client('directconnect') + client = module.client("directconnect") - connection_id = module.params['connection_id'] - connection_name = module.params['name'] + connection_id = module.params["connection_id"] + connection_name = module.params["name"] changed = False connection_state = None try: - connection_id = find_connection_id(client, - connection_id, - connection_name) + connection_id = find_connection_id(client, connection_id, connection_name) connection_state = get_connection_state(client, connection_id) - if connection_state == 'ordering': + if connection_state == "ordering": client.confirm_connection(connectionId=connection_id) changed = True connection_state = get_connection_state(client, connection_id) @@ -152,5 +153,5 @@ def main(): module.exit_json(changed=changed, connection_state=connection_state) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/directconnect_connection.py b/directconnect_connection.py index 709fef7a79f..176d83392d4 100644 --- a/directconnect_connection.py +++ b/directconnect_connection.py @@ -182,7 +182,7 @@ def connection_status(client, connection_id): def connection_exists(client, connection_id=None, connection_name=None, verify=True): params = {} if connection_id: - params['connectionId'] = connection_id + params["connectionId"] = connection_id try: response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: @@ -190,23 +190,21 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T msg = "Failed to describe DirectConnect ID {0}".format(connection_id) else: msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) match = [] connection = [] # look for matching connections - if len(response.get('connections', [])) == 1 and connection_id: - if response['connections'][0]['connectionState'] != 'deleted': - match.append(response['connections'][0]['connectionId']) - connection.extend(response['connections']) + if len(response.get("connections", [])) == 1 and connection_id: + if response["connections"][0]["connectionState"] != "deleted": + match.append(response["connections"][0]["connectionId"]) + connection.extend(response["connections"]) - for conn in response.get('connections', []): - if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': - match.append(conn['connectionId']) + for conn in response.get("connections", []): + if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": + match.append(conn["connectionId"]) connection.append(conn) # verifying if the connections exists; if true, return connection identifier, otherwise return False @@ -216,33 +214,35 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T return False # not verifying if the connection exists; just return current connection info elif len(connection) == 1: - return {'connection': connection[0]} - return {'connection': {}} + return {"connection": connection[0]} + return {"connection": {}} def create_connection(client, location, bandwidth, name, lag_id): if not name: raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.") params = { - 'location': location, - 'bandwidth': bandwidth, - 'connectionName': name, + "location": location, + "bandwidth": bandwidth, + "connectionName": name, } if lag_id: - params['lagId'] = lag_id + params["lagId"] = lag_id try: connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: - raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), - last_traceback=traceback.format_exc(), - exception=e) - return connection['connectionId'] + raise DirectConnectError( + msg="Failed to create DirectConnect connection {0}".format(name), + last_traceback=traceback.format_exc(), + exception=e, + ) + return connection["connectionId"] def changed_properties(current_status, location, bandwidth): - current_bandwidth = current_status['bandwidth'] - current_location = current_status['location'] + current_bandwidth = current_status["bandwidth"] + current_location = current_status["location"] return current_bandwidth != bandwidth or current_location != location @@ -250,10 +250,10 @@ def changed_properties(current_status, location, bandwidth): @AWSRetry.jittered_backoff(**retry_params) def update_associations(client, latest_state, connection_id, lag_id): changed = False - if 'lagId' in latest_state and lag_id != latest_state['lagId']: - disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId']) + if "lagId" in latest_state and lag_id != latest_state["lagId"]: + disassociate_connection_and_lag(client, connection_id, lag_id=latest_state["lagId"]) changed = True - if (changed and lag_id) or (lag_id and 'lagId' not in latest_state): + if (changed and lag_id) or (lag_id and "lagId" not in latest_state): associate_connection_and_lag(client, connection_id, lag_id) changed = True return changed @@ -262,16 +262,18 @@ def update_associations(client, latest_state, connection_id, lag_id): def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update): # the connection is found; get the latest state and see if it needs to be updated if connection_id: - latest_state = connection_status(client, connection_id=connection_id)['connection'] + latest_state = connection_status(client, connection_id=connection_id)["connection"] if changed_properties(latest_state, location, bandwidth) and forced_update: ensure_absent(client, connection_id) - return ensure_present(client=client, - connection_id=None, - connection_name=connection_name, - location=location, - bandwidth=bandwidth, - lag_id=lag_id, - forced_update=forced_update) + return ensure_present( + client=client, + connection_id=None, + connection_name=connection_name, + location=location, + bandwidth=bandwidth, + lag_id=lag_id, + forced_update=forced_update, + ) elif update_associations(client, latest_state, connection_id, lag_id): return True, connection_id @@ -294,53 +296,59 @@ def ensure_absent(client, connection_id): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(), location=dict(), - bandwidth=dict(choices=['1Gbps', '10Gbps']), + bandwidth=dict(choices=["1Gbps", "10Gbps"]), link_aggregation_group=dict(), connection_id=dict(), - forced_update=dict(type='bool', default=False) + forced_update=dict(type="bool", default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('connection_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))] + required_one_of=[("connection_id", "name")], + required_if=[("state", "present", ("location", "bandwidth"))], ) - connection = module.client('directconnect') + connection = module.client("directconnect") - state = module.params.get('state') + state = module.params.get("state") try: connection_id = connection_exists( - connection, - connection_id=module.params.get('connection_id'), - connection_name=module.params.get('name') + connection, connection_id=module.params.get("connection_id"), connection_name=module.params.get("name") ) - if not connection_id and module.params.get('connection_id'): - module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id'))) - - if state == 'present': - changed, connection_id = ensure_present(connection, - connection_id=connection_id, - connection_name=module.params.get('name'), - location=module.params.get('location'), - bandwidth=module.params.get('bandwidth'), - lag_id=module.params.get('link_aggregation_group'), - forced_update=module.params.get('forced_update')) + if not connection_id and module.params.get("connection_id"): + module.fail_json( + msg=f"The Direct Connect connection {module.params['connection_id']} does not exist.", + ) + + if state == "present": + changed, connection_id = ensure_present( + connection, + connection_id=connection_id, + connection_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + lag_id=module.params.get("link_aggregation_group"), + forced_update=module.params.get("forced_update"), + ) response = connection_status(connection, connection_id) - elif state == 'absent': + elif state == "absent": changed = ensure_absent(connection, connection_id) response = {} except DirectConnectError as e: if e.last_traceback: - module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response)) + module.fail_json( + msg=e.msg, + exception=e.last_traceback, + **camel_dict_to_snake_dict(e.exception.response), + ) else: module.fail_json(msg=e.msg) module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/directconnect_gateway.py b/directconnect_gateway.py index 53fb47c9603..b231f0e8f44 100644 --- a/directconnect_gateway.py +++ b/directconnect_gateway.py @@ -110,11 +110,12 @@ def dx_gateway_info(client, gateway_id, module): try: resp = client.describe_direct_connect_gateways( - directConnectGatewayId=gateway_id) + directConnectGatewayId=gateway_id, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to fetch gateway information.") - if resp['directConnectGateways']: - return resp['directConnectGateways'][0] + if resp["directConnectGateways"]: + return resp["directConnectGateways"][0] def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): @@ -128,9 +129,10 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): client, module, gateway_id=gateway_id, - virtual_gateway_id=virtual_gateway_id) - if response['directConnectGatewayAssociations']: - if response['directConnectGatewayAssociations'][0]['associationState'] == status: + virtual_gateway_id=virtual_gateway_id, + ) + if response["directConnectGatewayAssociations"]: + if response["directConnectGatewayAssociations"][0]["associationState"] == status: status_achieved = True break else: @@ -147,17 +149,18 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): def associate_direct_connect_gateway(client, module, gateway_id): params = dict() - params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") try: response = client.create_direct_connect_gateway_association( directConnectGatewayId=gateway_id, - virtualGatewayId=params['virtual_gateway_id']) + virtualGatewayId=params["virtual_gateway_id"], + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to associate gateway') + module.fail_json_aws(e, "Failed to associate gateway") - status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating') + status_achieved, dxgw = wait_for_status(client, module, gateway_id, params["virtual_gateway_id"], "associating") if not status_achieved: - module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console') + module.fail_json(msg="Error waiting for dxgw to attach to vpg - please check the AWS console") result = response return result @@ -167,13 +170,14 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): try: response = client.delete_direct_connect_gateway_association( directConnectGatewayId=gateway_id, - virtualGatewayId=virtual_gateway_id) + virtualGatewayId=virtual_gateway_id, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway association.") - status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating') + status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, "disassociating") if not status_achieved: - module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console') + module.fail_json(msg="Error waiting for dxgw to detach from vpg - please check the AWS console") result = response return result @@ -181,12 +185,13 @@ def delete_association(client, module, gateway_id, virtual_gateway_id): def create_dx_gateway(client, module): params = dict() - params['name'] = module.params.get('name') - params['amazon_asn'] = module.params.get('amazon_asn') + params["name"] = module.params.get("name") + params["amazon_asn"] = module.params.get("amazon_asn") try: response = client.create_direct_connect_gateway( - directConnectGatewayName=params['name'], - amazonSideAsn=int(params['amazon_asn'])) + directConnectGatewayName=params["name"], + amazonSideAsn=int(params["amazon_asn"]), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create direct connect gateway.") @@ -198,21 +203,21 @@ def find_dx_gateway(client, module, gateway_id=None): params = dict() gateways = list() if gateway_id is not None: - params['directConnectGatewayId'] = gateway_id + params["directConnectGatewayId"] = gateway_id while True: try: resp = client.describe_direct_connect_gateways(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe gateways") - gateways.extend(resp['directConnectGateways']) - if 'nextToken' in resp: - params['nextToken'] = resp['nextToken'] + gateways.extend(resp["directConnectGateways"]) + if "nextToken" in resp: + params["nextToken"] = resp["nextToken"] else: break if gateways != []: count = 0 for gateway in gateways: - if module.params.get('name') == gateway['directConnectGatewayName']: + if module.params.get("name") == gateway["directConnectGatewayName"]: count += 1 return gateway return None @@ -222,7 +227,7 @@ def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None): try: if virtual_gateway_id is None: resp = client.describe_direct_connect_gateway_associations( - directConnectGatewayId=gateway_id + directConnectGatewayId=gateway_id, ) else: resp = client.describe_direct_connect_gateway_associations( @@ -241,22 +246,20 @@ def ensure_present(client, module): changed = False params = dict() result = dict() - params['name'] = module.params.get('name') - params['amazon_asn'] = module.params.get('amazon_asn') - params['virtual_gateway_id'] = module.params.get('virtual_gateway_id') + params["name"] = module.params.get("name") + params["amazon_asn"] = module.params.get("amazon_asn") + params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") # check if a gateway matching our module args already exists existing_dxgw = find_dx_gateway(client, module) - if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted': - gateway_id = existing_dxgw['directConnectGatewayId'] + if existing_dxgw is not None and existing_dxgw["directConnectGatewayState"] != "deleted": + gateway_id = existing_dxgw["directConnectGatewayId"] # if a gateway_id was provided, check if it is attach to the DXGW - if params['virtual_gateway_id']: + if params["virtual_gateway_id"]: resp = check_dxgw_association( - client, - module, - gateway_id=gateway_id, - virtual_gateway_id=params['virtual_gateway_id']) + client, module, gateway_id=gateway_id, virtual_gateway_id=params["virtual_gateway_id"] + ) if not resp["directConnectGatewayAssociations"]: # attach the dxgw to the supplied virtual_gateway_id associate_direct_connect_gateway(client, module, gateway_id) @@ -267,26 +270,28 @@ def ensure_present(client, module): resp = check_dxgw_association(client, module, gateway_id=gateway_id) if resp["directConnectGatewayAssociations"]: - for association in resp['directConnectGatewayAssociations']: - if association['associationState'] not in ['disassociating', 'disassociated']: + for association in resp["directConnectGatewayAssociations"]: + if association["associationState"] not in ["disassociating", "disassociated"]: delete_association( client, module, gateway_id=gateway_id, - virtual_gateway_id=association['virtualGatewayId']) + virtual_gateway_id=association["virtualGatewayId"], + ) else: # create a new dxgw new_dxgw = create_dx_gateway(client, module) changed = True - gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId'] + gateway_id = new_dxgw["directConnectGateway"]["directConnectGatewayId"] # if a vpc-id was supplied, attempt to attach it to the dxgw - if params['virtual_gateway_id']: + if params["virtual_gateway_id"]: associate_direct_connect_gateway(client, module, gateway_id) - resp = check_dxgw_association(client, - module, - gateway_id=gateway_id - ) + resp = check_dxgw_association( + client, + module, + gateway_id=gateway_id, + ) if resp["directConnectGatewayAssociations"]: changed = True @@ -300,23 +305,23 @@ def ensure_absent(client, module): changed = False result = dict() - dx_gateway_id = module.params.get('direct_connect_gateway_id') + dx_gateway_id = module.params.get("direct_connect_gateway_id") existing_dxgw = find_dx_gateway(client, module, dx_gateway_id) if existing_dxgw is not None: - resp = check_dxgw_association(client, module, - gateway_id=dx_gateway_id) + resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) if resp["directConnectGatewayAssociations"]: - for association in resp['directConnectGatewayAssociations']: - if association['associationState'] not in ['disassociating', 'disassociated']: - delete_association(client, module, - gateway_id=dx_gateway_id, - virtual_gateway_id=association['virtualGatewayId']) + for association in resp["directConnectGatewayAssociations"]: + if association["associationState"] not in ["disassociating", "disassociated"]: + delete_association( + client, + module, + gateway_id=dx_gateway_id, + virtual_gateway_id=association["virtualGatewayId"], + ) # wait for deleting association - timeout = time.time() + module.params.get('wait_timeout') + timeout = time.time() + module.params.get("wait_timeout") while time.time() < timeout: - resp = check_dxgw_association(client, - module, - gateway_id=dx_gateway_id) + resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) if resp["directConnectGatewayAssociations"] != []: time.sleep(15) else: @@ -324,43 +329,44 @@ def ensure_absent(client, module): try: resp = client.delete_direct_connect_gateway( - directConnectGatewayId=dx_gateway_id + directConnectGatewayId=dx_gateway_id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway") - result = resp['directConnectGateway'] + result = resp["directConnectGateway"] return changed def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(), amazon_asn=dict(), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), - wait_timeout=dict(type='int', default=320), + wait_timeout=dict(type="int", default=320), + ) + required_if = [("state", "present", ["name", "amazon_asn"]), ("state", "absent", ["direct_connect_gateway_id"])] + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, ) - required_if = [('state', 'present', ['name', 'amazon_asn']), - ('state', 'absent', ['direct_connect_gateway_id'])] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=required_if) - state = module.params.get('state') + state = module.params.get("state") try: - client = module.client('directconnect') + client = module.client("directconnect") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": (changed, results) = ensure_present(client, module) - elif state == 'absent': + elif state == "absent": changed = ensure_absent(client, module) results = {} module.exit_json(changed=changed, **camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index 8a50e3c7e89..9a532c63298 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -185,8 +185,8 @@ def lag_status(client, lag_id): def lag_exists(client, lag_id=None, lag_name=None, verify=True): - """ If verify=True, returns the LAG ID or None - If verify=False, returns the LAG's data (or an empty dict) + """If verify=True, returns the LAG ID or None + If verify=False, returns the LAG's data (or an empty dict) """ try: if lag_id: @@ -200,26 +200,24 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True): return {} else: failed_op = "Failed to describe DirectConnect link aggregation groups." - raise DirectConnectError(msg=failed_op, - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError(msg=failed_op, last_traceback=traceback.format_exc(), exception=e) match = [] # List of LAG IDs that are exact matches lag = [] # List of LAG data that are exact matches # look for matching connections - if len(response.get('lags', [])) == 1 and lag_id: - if response['lags'][0]['lagState'] != 'deleted': - match.append(response['lags'][0]['lagId']) - lag.append(response['lags'][0]) + if len(response.get("lags", [])) == 1 and lag_id: + if response["lags"][0]["lagState"] != "deleted": + match.append(response["lags"][0]["lagId"]) + lag.append(response["lags"][0]) else: - for each in response.get('lags', []): - if each['lagState'] != 'deleted': + for each in response.get("lags", []): + if each["lagState"] != "deleted": if not lag_id: - if lag_name == each['lagName']: - match.append(each['lagId']) + if lag_name == each["lagName"]: + match.append(each["lagId"]) else: - match.append(each['lagId']) + match.append(each["lagId"]) # verifying if the connections exists; if true, return connection identifier, otherwise return False if verify and len(match) == 1: @@ -237,36 +235,41 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True): def create_lag(client, num_connections, location, bandwidth, name, connection_id): if not name: - raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.", - last_traceback=None, - exception="") - - parameters = dict(numberOfConnections=num_connections, - location=location, - connectionsBandwidth=bandwidth, - lagName=name) + raise DirectConnectError( + msg="Failed to create a Direct Connect link aggregation group: name required.", + last_traceback=None, + exception="", + ) + + parameters = dict( + numberOfConnections=num_connections, location=location, connectionsBandwidth=bandwidth, lagName=name + ) if connection_id: parameters.update(connectionId=connection_id) try: lag = client.create_lag(**parameters) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Failed to create DirectConnect link aggregation group {0}".format(name), + last_traceback=traceback.format_exc(), + exception=e, + ) - return lag['lagId'] + return lag["lagId"] def delete_lag(client, lag_id): try: client.delete_lag(lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), + last_traceback=traceback.format_exc(), + exception=e, + ) -@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) +@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=["DirectConnectClientException"]) def _update_lag(client, lag_id, lag_name, min_links): params = {} if min_links: @@ -283,9 +286,9 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if min_links and min_links > num_connections: raise DirectConnectError( msg="The number of connections {0} must be greater than the minimum number of links " - "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), + "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), last_traceback=None, - exception=None + exception=None, ) while True: @@ -295,26 +298,32 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if wait and time.time() - start <= wait_timeout: continue msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id) - if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']: - msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links) - raise DirectConnectError(msg=msg, - last_traceback=traceback.format_exc(), - exception=e) + if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]: + msg += ( + "Unable to set the min number of links to {0} while the LAG connections are being requested".format( + min_links + ) + ) + raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) else: break def lag_changed(current_status, name, min_links): - """ Determines if a modifiable link aggregation group attribute has been modified. """ - return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks']) + """Determines if a modifiable link aggregation group attribute has been modified.""" + return (name and name != current_status["lagName"]) or (min_links and min_links != current_status["minimumLinks"]) -def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout): +def ensure_present( + client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout +): exists = lag_exists(client, lag_id, lag_name) if not exists and lag_id: - raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), - last_traceback=None, - exception="") + raise DirectConnectError( + msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), + last_traceback=None, + exception="", + ) # the connection is found; get the latest state and see if it needs to be updated if exists: @@ -336,27 +345,31 @@ def describe_virtual_interfaces(client, lag_id): try: response = client.describe_virtual_interfaces(connectionId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), - last_traceback=traceback.format_exc(), - exception=e) - return response.get('virtualInterfaces', []) + raise DirectConnectError( + msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), + last_traceback=traceback.format_exc(), + exception=e, + ) + return response.get("virtualInterfaces", []) def get_connections_and_virtual_interfaces(client, lag_id): virtual_interfaces = describe_virtual_interfaces(client, lag_id) - connections = lag_status(client, lag_id=lag_id).get('connections', []) + connections = lag_status(client, lag_id=lag_id).get("connections", []) return virtual_interfaces, connections def disassociate_vis(client, lag_id, virtual_interfaces): for vi in virtual_interfaces: - delete_virtual_interface(client, vi['virtualInterfaceId']) + delete_virtual_interface(client, vi["virtualInterfaceId"]) try: - response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId']) + response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), + last_traceback=traceback.format_exc(), + exception=e, + ) def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout): @@ -370,32 +383,38 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id) # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete - if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete: - raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " - "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " - "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " - "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), - last_traceback=None, - exception=None) + if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete: + raise DirectConnectError( + msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " + "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " + "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " + "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), + last_traceback=None, + exception=None, + ) # update min_links to be 0 so we can remove the LAG update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout) # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached for connection in connections: - disassociate_connection_and_lag(client, connection['connectionId'], lag_id) + disassociate_connection_and_lag(client, connection["connectionId"], lag_id) if delete_with_disassociation: - delete_connection(client, connection['connectionId']) + delete_connection(client, connection["connectionId"]) for vi in virtual_interfaces: - delete_virtual_interface(client, vi['virtualInterfaceId']) + delete_virtual_interface(client, vi["virtualInterfaceId"]) start_time = time.time() while True: try: delete_lag(client, lag_id) except DirectConnectError as e: - if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait: + if ( + ("until its Virtual Interfaces are deleted" in e.exception) + and (time.time() - start_time < wait_timeout) + and wait + ): continue else: return True @@ -403,54 +422,58 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(), link_aggregation_group_id=dict(), - num_connections=dict(type='int'), - min_links=dict(type='int'), + num_connections=dict(type="int"), + min_links=dict(type="int"), location=dict(), bandwidth=dict(), connection_id=dict(), - delete_with_disassociation=dict(type='bool', default=False), - force_delete=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=120), + delete_with_disassociation=dict(type="bool", default=False), + force_delete=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=120), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('link_aggregation_group_id', 'name')], - required_if=[('state', 'present', ('location', 'bandwidth'))], + required_one_of=[("link_aggregation_group_id", "name")], + required_if=[("state", "present", ("location", "bandwidth"))], ) try: - connection = module.client('directconnect') + connection = module.client("directconnect") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') + state = module.params.get("state") response = {} try: - if state == 'present': - changed, lag_id = ensure_present(connection, - num_connections=module.params.get("num_connections"), - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - location=module.params.get("location"), - bandwidth=module.params.get("bandwidth"), - connection_id=module.params.get("connection_id"), - min_links=module.params.get("min_links"), - wait=module.params.get("wait"), - wait_timeout=module.params.get("wait_timeout")) + if state == "present": + changed, lag_id = ensure_present( + connection, + num_connections=module.params.get("num_connections"), + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + location=module.params.get("location"), + bandwidth=module.params.get("bandwidth"), + connection_id=module.params.get("connection_id"), + min_links=module.params.get("min_links"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout"), + ) response = lag_status(connection, lag_id) elif state == "absent": - changed = ensure_absent(connection, - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - force_delete=module.params.get("force_delete"), - delete_with_disassociation=module.params.get("delete_with_disassociation"), - wait=module.params.get('wait'), - wait_timeout=module.params.get('wait_timeout')) + changed = ensure_absent( + connection, + lag_id=module.params.get("link_aggregation_group_id"), + lag_name=module.params.get("name"), + force_delete=module.params.get("force_delete"), + delete_with_disassociation=module.params.get("delete_with_disassociation"), + wait=module.params.get("wait"), + wait_timeout=module.params.get("wait_timeout"), + ) except DirectConnectError as e: if e.last_traceback: module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception)) @@ -460,5 +483,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py index bded2ab57ab..ab6ee9d4ea4 100644 --- a/directconnect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -264,61 +264,66 @@ def try_except_ClientError(failure_msg): - ''' - Wrapper for boto3 calls that uses AWSRetry and handles exceptions - ''' + """ + Wrapper for boto3 calls that uses AWSRetry and handles exceptions + """ + def wrapper(f): def run_func(*args, **kwargs): try: - result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) + result = AWSRetry.jittered_backoff( + retries=8, delay=5, catch_extra_error_codes=["DirectConnectClientException"] + )(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result + return run_func + return wrapper def find_unique_vi(client, connection_id, virtual_interface_id, name): - ''' - Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. - If multiple matches are found False is returned. If no matches are found None is returned. - ''' + """ + Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. + If multiple matches are found False is returned. If no matches are found None is returned. + """ # Get the virtual interfaces, filtering by the ID if provided. vi_params = {} if virtual_interface_id: - vi_params = {'virtualInterfaceId': virtual_interface_id} + vi_params = {"virtualInterfaceId": virtual_interface_id} - virtual_interfaces = try_except_ClientError( - failure_msg="Failed to describe virtual interface")( - client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces') + virtual_interfaces = try_except_ClientError(failure_msg="Failed to describe virtual interface")( + client.describe_virtual_interfaces + )(**vi_params).get("virtualInterfaces") # Remove deleting/deleted matches from the results. - virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')] + virtual_interfaces = [vi for vi in virtual_interfaces if vi["virtualInterfaceState"] not in ("deleting", "deleted")] matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id) return exact_match(matching_virtual_interfaces) def exact_match(virtual_interfaces): - ''' - Returns the virtual interface ID if one was found, - None if the virtual interface ID needs to be created, - False if an exact match was not found - ''' + """ + Returns the virtual interface ID if one was found, + None if the virtual interface ID needs to be created, + False if an exact match was not found + """ if not virtual_interfaces: return None if len(virtual_interfaces) == 1: - return virtual_interfaces[0]['virtualInterfaceId'] + return virtual_interfaces[0]["virtualInterfaceId"] else: return False def filter_virtual_interfaces(virtual_interfaces, name, connection_id): - ''' - Filters the available virtual interfaces to try to find a unique match - ''' + """ + Filters the available virtual interfaces to try to find a unique match + """ # Filter by name if provided. if name: matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name) @@ -339,52 +344,56 @@ def filter_virtual_interfaces(virtual_interfaces, name, connection_id): def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id): - ''' - Return virtual interfaces that have the connection_id associated - ''' - return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id] + """ + Return virtual interfaces that have the connection_id associated + """ + return [vi for vi in virtual_interfaces if vi["connectionId"] == connection_id] def find_virtual_interface_by_name(virtual_interfaces, name): - ''' - Return virtual interfaces that match the provided name - ''' - return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name] + """ + Return virtual interfaces that match the provided name + """ + return [vi for vi in virtual_interfaces if vi["virtualInterfaceName"] == name] def vi_state(client, virtual_interface_id): - ''' - Returns the state of the virtual interface. - ''' + """ + Returns the state of the virtual interface. + """ err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id) - vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id) - return vi['virtualInterfaces'][0] + vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)( + virtualInterfaceId=virtual_interface_id + ) + return vi["virtualInterfaces"][0] def assemble_params_for_creating_vi(params): - ''' - Returns kwargs to use in the call to create the virtual interface - - Params for public virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr - Params for private virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId - ''' - - public = params['public'] - name = params['name'] - vlan = params['vlan'] - bgp_asn = params['bgp_asn'] - auth_key = params['authentication_key'] - amazon_addr = params['amazon_address'] - customer_addr = params['customer_address'] - family_addr = params['address_type'] - cidr = params['cidr'] - virtual_gateway_id = params['virtual_gateway_id'] - direct_connect_gateway_id = params['direct_connect_gateway_id'] + """ + Returns kwargs to use in the call to create the virtual interface + + Params for public virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr + Params for private virtual interfaces: + virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId + """ + + public = params["public"] + name = params["name"] + vlan = params["vlan"] + bgp_asn = params["bgp_asn"] + auth_key = params["authentication_key"] + amazon_addr = params["amazon_address"] + customer_addr = params["customer_address"] + family_addr = params["address_type"] + cidr = params["cidr"] + virtual_gateway_id = params["virtual_gateway_id"] + direct_connect_gateway_id = params["direct_connect_gateway_id"] parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn) - opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr) + opt_params = dict( + authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr + ) for name, value in opt_params.items(): if value: @@ -392,68 +401,74 @@ def assemble_params_for_creating_vi(params): # virtual interface type specific parameters if public and cidr: - parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr] + parameters["routeFilterPrefixes"] = [{"cidr": c} for c in cidr] if not public: if virtual_gateway_id: - parameters['virtualGatewayId'] = virtual_gateway_id + parameters["virtualGatewayId"] = virtual_gateway_id elif direct_connect_gateway_id: - parameters['directConnectGatewayId'] = direct_connect_gateway_id + parameters["directConnectGatewayId"] = direct_connect_gateway_id return parameters def create_vi(client, public, associated_id, creation_params): - ''' - :param public: a boolean - :param associated_id: a link aggregation group ID or connection ID to associate - with the virtual interface. - :param creation_params: a dict of parameters to use in the AWS SDK call - :return The ID of the created virtual interface - ''' + """ + :param public: a boolean + :param associated_id: a link aggregation group ID or connection ID to associate + with the virtual interface. + :param creation_params: a dict of parameters to use in the AWS SDK call + :return The ID of the created virtual interface + """ err_msg = "Failed to create virtual interface" if public: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id, - newPublicVirtualInterface=creation_params) + vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)( + connectionId=associated_id, newPublicVirtualInterface=creation_params + ) else: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id, - newPrivateVirtualInterface=creation_params) - return vi['virtualInterfaceId'] + vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)( + connectionId=associated_id, newPrivateVirtualInterface=creation_params + ) + return vi["virtualInterfaceId"] def modify_vi(client, virtual_interface_id, connection_id): - ''' - Associate a new connection ID - ''' + """ + Associate a new connection ID + """ err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id) - try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id, - connectionId=connection_id) + try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)( + virtualInterfaceId=virtual_interface_id, connectionId=connection_id + ) def needs_modification(client, virtual_interface_id, connection_id): - ''' - Determine if the associated connection ID needs to be updated - ''' - return vi_state(client, virtual_interface_id).get('connectionId') != connection_id + """ + Determine if the associated connection ID needs to be updated + """ + return vi_state(client, virtual_interface_id).get("connectionId") != connection_id def ensure_state(connection, module): changed = False - state = module.params['state'] - connection_id = module.params['id_to_associate'] - public = module.params['public'] - name = module.params['name'] + state = module.params["state"] + connection_id = module.params["id_to_associate"] + public = module.params["public"] + name = module.params["name"] - virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name) + virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get("virtual_interface_id"), name) if virtual_interface_id is False: - module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " - "and connection_id options if applicable to find a unique match.") + module.fail_json( + msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " + "and connection_id options if applicable to find a unique match." + ) - if state == 'present': - - if not virtual_interface_id and module.params['virtual_interface_id']: - module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id'])) + if state == "present": + if not virtual_interface_id and module.params["virtual_interface_id"]: + module.fail_json( + msg="The virtual interface {0} does not exist.".format(module.params["virtual_interface_id"]) + ) elif not virtual_interface_id: assembled_params = assemble_params_for_creating_vi(module.params) @@ -478,31 +493,35 @@ def ensure_state(connection, module): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']), - public=dict(type='bool'), + state=dict(required=True, choices=["present", "absent"]), + id_to_associate=dict(required=True, aliases=["link_aggregation_group_id", "connection_id"]), + public=dict(type="bool"), name=dict(), - vlan=dict(type='int', default=100), - bgp_asn=dict(type='int', default=65000), + vlan=dict(type="int", default=100), + bgp_asn=dict(type="int", default=65000), authentication_key=dict(no_log=True), amazon_address=dict(), customer_address=dict(), address_type=dict(), - cidr=dict(type='list', elements='str'), + cidr=dict(type="list", elements="str"), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), - virtual_interface_id=dict() + virtual_interface_id=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_one_of=[['virtual_interface_id', 'name']], - required_if=[['state', 'present', ['public']], - ['public', True, ['amazon_address']], - ['public', True, ['customer_address']], - ['public', True, ['cidr']]], - mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=[["virtual_interface_id", "name"]], + required_if=[ + ["state", "present", ["public"]], + ["public", True, ["amazon_address"]], + ["public", True, ["customer_address"]], + ["public", True, ["cidr"]], + ], + mutually_exclusive=[["virtual_gateway_id", "direct_connect_gateway_id"]], + ) - connection = module.client('directconnect') + connection = module.client("directconnect") try: changed, latest_state = ensure_state(connection, module) @@ -515,5 +534,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/dms_endpoint.py b/dms_endpoint.py index 66b5dd9b2c3..b417003689d 100644 --- a/dms_endpoint.py +++ b/dms_endpoint.py @@ -346,8 +346,8 @@ @AWSRetry.jittered_backoff(**backoff_params) def dms_describe_tags(connection, **params): - """ checks if the endpoint exists """ - tags = connection.list_tags_for_resource(**params).get('TagList', []) + """checks if the endpoint exists""" + tags = connection.list_tags_for_resource(**params).get("TagList", []) return boto3_tag_list_to_ansible_dict(tags) @@ -355,15 +355,14 @@ def dms_describe_tags(connection, **params): def dms_describe_endpoints(connection, **params): try: endpoints = connection.describe_endpoints(**params) - except is_boto3_error_code('ResourceNotFoundFault'): + except is_boto3_error_code("ResourceNotFoundFault"): return None - return endpoints.get('Endpoints', None) + return endpoints.get("Endpoints", None) def describe_endpoint(connection, endpoint_identifier): - """ checks if the endpoint exists """ - endpoint_filter = dict(Name='endpoint-id', - Values=[endpoint_identifier]) + """checks if the endpoint exists""" + endpoint_filter = dict(Name="endpoint-id", Values=[endpoint_identifier]) try: endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -374,8 +373,8 @@ def describe_endpoint(connection, endpoint_identifier): endpoint = endpoints[0] try: - tags = dms_describe_tags(connection, ResourceArn=endpoint['EndpointArn']) - endpoint['tags'] = tags + tags = dms_describe_tags(connection, ResourceArn=endpoint["EndpointArn"]) + endpoint["tags"] = tags except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags") return endpoint @@ -384,7 +383,7 @@ def describe_endpoint(connection, endpoint_identifier): @AWSRetry.jittered_backoff(**backoff_params) def dms_delete_endpoint(client, **params): """deletes the DMS endpoint based on the EndpointArn""" - if module.params.get('wait'): + if module.params.get("wait"): return delete_dms_endpoint(client) else: return client.delete_endpoint(**params) @@ -392,19 +391,19 @@ def dms_delete_endpoint(client, **params): @AWSRetry.jittered_backoff(**backoff_params) def dms_create_endpoint(client, **params): - """ creates the DMS endpoint""" + """creates the DMS endpoint""" return client.create_endpoint(**params) @AWSRetry.jittered_backoff(**backoff_params) def dms_modify_endpoint(client, **params): - """ updates the endpoint""" + """updates the endpoint""" return client.modify_endpoint(**params) @AWSRetry.jittered_backoff(**backoff_params) def get_endpoint_deleted_waiter(client): - return client.get_waiter('endpoint_deleted') + return client.get_waiter("endpoint_deleted") @AWSRetry.jittered_backoff(**backoff_params) @@ -418,32 +417,22 @@ def dms_add_tags(client, **params): def endpoint_exists(endpoint): - """ Returns boolean based on the existence of the endpoint + """Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint :return: bool """ - return bool(len(endpoint['Endpoints'])) + return bool(len(endpoint["Endpoints"])) def delete_dms_endpoint(connection, endpoint_arn): try: - delete_arn = dict( - EndpointArn=endpoint_arn - ) - if module.params.get('wait'): - + delete_arn = dict(EndpointArn=endpoint_arn) + if module.params.get("wait"): delete_output = connection.delete_endpoint(**delete_arn) delete_waiter = get_endpoint_deleted_waiter(connection) delete_waiter.wait( - Filters=[{ - 'Name': 'endpoint-arn', - 'Values': [endpoint_arn] - - }], - WaiterConfig={ - 'Delay': module.params.get('timeout'), - 'MaxAttempts': module.params.get('retries') - } + Filters=[{"Name": "endpoint-arn", "Values": [endpoint_arn]}], + WaiterConfig={"Delay": module.params.get("timeout"), "MaxAttempts": module.params.get("retries")}, ) return delete_output else: @@ -458,71 +447,62 @@ def create_module_params(): :return: dict """ endpoint_parameters = dict( - EndpointIdentifier=module.params.get('endpointidentifier'), - EndpointType=module.params.get('endpointtype'), - EngineName=module.params.get('enginename'), - Username=module.params.get('username'), - Password=module.params.get('password'), - ServerName=module.params.get('servername'), - Port=module.params.get('port'), - DatabaseName=module.params.get('databasename'), - SslMode=module.params.get('sslmode') + EndpointIdentifier=module.params.get("endpointidentifier"), + EndpointType=module.params.get("endpointtype"), + EngineName=module.params.get("enginename"), + Username=module.params.get("username"), + Password=module.params.get("password"), + ServerName=module.params.get("servername"), + Port=module.params.get("port"), + DatabaseName=module.params.get("databasename"), + SslMode=module.params.get("sslmode"), ) - if module.params.get('EndpointArn'): - endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn') - if module.params.get('certificatearn'): - endpoint_parameters['CertificateArn'] = \ - module.params.get('certificatearn') + if module.params.get("EndpointArn"): + endpoint_parameters["EndpointArn"] = module.params.get("EndpointArn") + if module.params.get("certificatearn"): + endpoint_parameters["CertificateArn"] = module.params.get("certificatearn") - if module.params.get('dmstransfersettings'): - endpoint_parameters['DmsTransferSettings'] = \ - module.params.get('dmstransfersettings') + if module.params.get("dmstransfersettings"): + endpoint_parameters["DmsTransferSettings"] = module.params.get("dmstransfersettings") - if module.params.get('extraconnectionattributes'): - endpoint_parameters['ExtraConnectionAttributes'] =\ - module.params.get('extraconnectionattributes') + if module.params.get("extraconnectionattributes"): + endpoint_parameters["ExtraConnectionAttributes"] = module.params.get("extraconnectionattributes") - if module.params.get('kmskeyid'): - endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid') + if module.params.get("kmskeyid"): + endpoint_parameters["KmsKeyId"] = module.params.get("kmskeyid") - if module.params.get('tags'): - endpoint_parameters['Tags'] = module.params.get('tags') + if module.params.get("tags"): + endpoint_parameters["Tags"] = module.params.get("tags") - if module.params.get('serviceaccessrolearn'): - endpoint_parameters['ServiceAccessRoleArn'] = \ - module.params.get('serviceaccessrolearn') + if module.params.get("serviceaccessrolearn"): + endpoint_parameters["ServiceAccessRoleArn"] = module.params.get("serviceaccessrolearn") - if module.params.get('externaltabledefinition'): - endpoint_parameters['ExternalTableDefinition'] = \ - module.params.get('externaltabledefinition') + if module.params.get("externaltabledefinition"): + endpoint_parameters["ExternalTableDefinition"] = module.params.get("externaltabledefinition") - if module.params.get('dynamodbsettings'): - endpoint_parameters['DynamoDbSettings'] = \ - module.params.get('dynamodbsettings') + if module.params.get("dynamodbsettings"): + endpoint_parameters["DynamoDbSettings"] = module.params.get("dynamodbsettings") - if module.params.get('s3settings'): - endpoint_parameters['S3Settings'] = module.params.get('s3settings') + if module.params.get("s3settings"): + endpoint_parameters["S3Settings"] = module.params.get("s3settings") - if module.params.get('mongodbsettings'): - endpoint_parameters['MongoDbSettings'] = \ - module.params.get('mongodbsettings') + if module.params.get("mongodbsettings"): + endpoint_parameters["MongoDbSettings"] = module.params.get("mongodbsettings") - if module.params.get('kinesissettings'): - endpoint_parameters['KinesisSettings'] = \ - module.params.get('kinesissettings') + if module.params.get("kinesissettings"): + endpoint_parameters["KinesisSettings"] = module.params.get("kinesissettings") - if module.params.get('elasticsearchsettings'): - endpoint_parameters['ElasticsearchSettings'] = \ - module.params.get('elasticsearchsettings') + if module.params.get("elasticsearchsettings"): + endpoint_parameters["ElasticsearchSettings"] = module.params.get("elasticsearchsettings") - if module.params.get('wait'): - endpoint_parameters['wait'] = module.boolean(module.params.get('wait')) + if module.params.get("wait"): + endpoint_parameters["wait"] = module.boolean(module.params.get("wait")) - if module.params.get('timeout'): - endpoint_parameters['timeout'] = module.params.get('timeout') + if module.params.get("timeout"): + endpoint_parameters["timeout"] = module.params.get("timeout") - if module.params.get('retries'): - endpoint_parameters['retries'] = module.params.get('retries') + if module.params.get("retries"): + endpoint_parameters["retries"] = module.params.get("retries") return endpoint_parameters @@ -538,14 +518,16 @@ def compare_params(param_described): param_described = dict(param_described) modparams = create_module_params() # modify can't update tags - param_described.pop('Tags', None) - modparams.pop('Tags', None) + param_described.pop("Tags", None) + modparams.pop("Tags", None) changed = False for paramname in modparams: - if paramname == 'Password' or paramname in param_described \ - and param_described[paramname] == modparams[paramname] or \ - str(param_described[paramname]).lower() \ - == modparams[paramname]: + if ( + paramname == "Password" + or paramname in param_described + and param_described[paramname] == modparams[paramname] + or str(param_described[paramname]).lower() == modparams[paramname] + ): pass else: changed = True @@ -553,25 +535,24 @@ def compare_params(param_described): def modify_dms_endpoint(connection, endpoint): - arn = endpoint['EndpointArn'] + arn = endpoint["EndpointArn"] try: params = create_module_params() # modify can't update tags - params.pop('Tags', None) + params.pop("Tags", None) return dms_modify_endpoint(connection, EndpointArn=arn, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params) def ensure_tags(connection, endpoint): - desired_tags = module.params.get('tags', None) + desired_tags = module.params.get("tags", None) if desired_tags is None: return False - current_tags = endpoint.get('tags', {}) + current_tags = endpoint.get("tags", {}) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, - module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, module.params.get("purge_tags")) if not tags_to_remove and not tags_to_add: return False @@ -579,7 +560,7 @@ def ensure_tags(connection, endpoint): if module.check_mode: return True - arn = endpoint.get('EndpointArn') + arn = endpoint.get("EndpointArn") try: if tags_to_remove: @@ -609,36 +590,49 @@ def create_dms_endpoint(connection): def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), + state=dict(choices=["present", "absent"], default="present"), endpointidentifier=dict(required=True), - endpointtype=dict(choices=['source', 'target']), - enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb', - 'aurora', 'redshift', 's3', 'db2', 'azuredb', - 'sybase', 'dynamodb', 'mongodb', 'sqlserver'], - required=False), + endpointtype=dict(choices=["source", "target"]), + enginename=dict( + choices=[ + "mysql", + "oracle", + "postgres", + "mariadb", + "aurora", + "redshift", + "s3", + "db2", + "azuredb", + "sybase", + "dynamodb", + "mongodb", + "sqlserver", + ], + required=False, + ), username=dict(), password=dict(no_log=True), servername=dict(), - port=dict(type='int'), + port=dict(type="int"), databasename=dict(), extraconnectionattributes=dict(), kmskeyid=dict(no_log=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), certificatearn=dict(), - sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'], - default='none'), + sslmode=dict(choices=["none", "require", "verify-ca", "verify-full"], default="none"), serviceaccessrolearn=dict(), externaltabledefinition=dict(), - dynamodbsettings=dict(type='dict'), - s3settings=dict(type='dict'), - dmstransfersettings=dict(type='dict'), - mongodbsettings=dict(type='dict'), - kinesissettings=dict(type='dict'), - elasticsearchsettings=dict(type='dict'), - wait=dict(type='bool', default=False), - timeout=dict(type='int'), - retries=dict(type='int') + dynamodbsettings=dict(type="dict"), + s3settings=dict(type="dict"), + dmstransfersettings=dict(type="dict"), + mongodbsettings=dict(type="dict"), + kinesissettings=dict(type="dict"), + elasticsearchsettings=dict(type="dict"), + wait=dict(type="bool", default=False), + timeout=dict(type="int"), + retries=dict(type="int"), ) global module module = AnsibleAWSModule( @@ -650,49 +644,48 @@ def main(): ["wait", "True", ["timeout"]], ["wait", "True", ["retries"]], ], - supports_check_mode=False + supports_check_mode=False, ) exit_message = None changed = False - state = module.params.get('state') + state = module.params.get("state") - dmsclient = module.client('dms') - endpoint = describe_endpoint(dmsclient, - module.params.get('endpointidentifier')) - if state == 'present': + dmsclient = module.client("dms") + endpoint = describe_endpoint(dmsclient, module.params.get("endpointidentifier")) + if state == "present": if endpoint: changed |= ensure_tags(dmsclient, endpoint) params_changed = compare_params(endpoint) if params_changed: updated_dms = modify_dms_endpoint(dmsclient, endpoint) exit_message = updated_dms - endpoint = exit_message.get('Endpoint') + endpoint = exit_message.get("Endpoint") changed = True else: exit_message = "Endpoint Already Exists" else: exit_message = create_dms_endpoint(dmsclient) - endpoint = exit_message.get('Endpoint') + endpoint = exit_message.get("Endpoint") changed = True if changed: # modify and create don't return tags - tags = dms_describe_tags(dmsclient, ResourceArn=endpoint['EndpointArn']) - endpoint['tags'] = tags - elif state == 'absent': + tags = dms_describe_tags(dmsclient, ResourceArn=endpoint["EndpointArn"]) + endpoint["tags"] = tags + elif state == "absent": if endpoint: - delete_results = delete_dms_endpoint(dmsclient, endpoint['EndpointArn']) + delete_results = delete_dms_endpoint(dmsclient, endpoint["EndpointArn"]) exit_message = delete_results endpoint = None changed = True else: changed = False - exit_message = 'DMS Endpoint does not exist' + exit_message = "DMS Endpoint does not exist" - endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=['tags']) + endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=["tags"]) module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py index bd75df6d67b..7135aa14ea6 100644 --- a/dms_replication_subnet_group.py +++ b/dms_replication_subnet_group.py @@ -73,16 +73,15 @@ def describe_subnet_group(connection, subnet_group): """checks if instance exists""" try: - subnet_group_filter = dict(Name='replication-subnet-group-id', - Values=[subnet_group]) + subnet_group_filter = dict(Name="replication-subnet-group-id", Values=[subnet_group]) return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter]) except botocore.exceptions.ClientError: - return {'ReplicationSubnetGroups': []} + return {"ReplicationSubnetGroups": []} @AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_create(connection, **params): - """ creates the replication subnet group """ + """creates the replication subnet group""" return connection.create_replication_subnet_group(**params) @@ -93,17 +92,17 @@ def replication_subnet_group_modify(connection, **modify_params): @AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_delete(module, connection): - subnetid = module.params.get('identifier') + subnetid = module.params.get("identifier") delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) return connection.delete_replication_subnet_group(**delete_parameters) def replication_subnet_exists(subnet): - """ Returns boolean based on the existence of the endpoint + """Returns boolean based on the existence of the endpoint :param endpoint: dict containing the described endpoint :return: bool """ - return bool(len(subnet['ReplicationSubnetGroups'])) + return bool(len(subnet["ReplicationSubnetGroups"])) def create_module_params(module): @@ -113,9 +112,9 @@ def create_module_params(module): """ instance_parameters = dict( # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API - ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(), - ReplicationSubnetGroupDescription=module.params.get('description'), - SubnetIds=module.params.get('subnet_ids'), + ReplicationSubnetGroupIdentifier=module.params.get("identifier").lower(), + ReplicationSubnetGroupDescription=module.params.get("description"), + SubnetIds=module.params.get("subnet_ids"), ) return instance_parameters @@ -132,19 +131,18 @@ def compare_params(module, param_described): modparams = create_module_params(module) changed = False # need to sanitize values that get returned from the API - if 'VpcId' in param_described.keys(): - param_described.pop('VpcId') - if 'SubnetGroupStatus' in param_described.keys(): - param_described.pop('SubnetGroupStatus') + if "VpcId" in param_described.keys(): + param_described.pop("VpcId") + if "SubnetGroupStatus" in param_described.keys(): + param_described.pop("SubnetGroupStatus") for paramname in modparams.keys(): - if paramname in param_described.keys() and \ - param_described.get(paramname) == modparams[paramname]: + if paramname in param_described.keys() and param_described.get(paramname) == modparams[paramname]: pass - elif paramname == 'SubnetIds': + elif paramname == "SubnetIds": subnets = [] - for subnet in param_described.get('Subnets'): - subnets.append(subnet.get('SubnetIdentifier')) - for modulesubnet in modparams['SubnetIds']: + for subnet in param_described.get("Subnets"): + subnets.append(subnet.get("SubnetIdentifier")) + for modulesubnet in modparams["SubnetIds"]: if modulesubnet in subnets: pass else: @@ -170,23 +168,19 @@ def modify_replication_subnet_group(module, connection): def main(): argument_spec = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - identifier=dict(type='str', required=True), - description=dict(type='str', required=True), - subnet_ids=dict(type='list', elements='str', required=True), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + state=dict(type="str", choices=["present", "absent"], default="present"), + identifier=dict(type="str", required=True), + description=dict(type="str", required=True), + subnet_ids=dict(type="list", elements="str", required=True), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) exit_message = None changed = False - state = module.params.get('state') - dmsclient = module.client('dms') - subnet_group = describe_subnet_group(dmsclient, - module.params.get('identifier')) - if state == 'present': + state = module.params.get("state") + dmsclient = module.client("dms") + subnet_group = describe_subnet_group(dmsclient, module.params.get("identifier")) + if state == "present": if replication_subnet_exists(subnet_group): if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]): if not module.check_mode: @@ -203,7 +197,7 @@ def main(): else: exit_message = "Check mode enabled" - elif state == 'absent': + elif state == "absent": if replication_subnet_exists(subnet_group): if not module.check_mode: replication_subnet_group_delete(module, dmsclient) @@ -220,5 +214,5 @@ def main(): module.exit_json(changed=changed, msg=exit_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/dynamodb_table.py b/dynamodb_table.py index a059198d858..a9503735557 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -259,12 +259,19 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -DYNAMO_TYPE_DEFAULT = 'STRING' -INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name'] -INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity'] -INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] +DYNAMO_TYPE_DEFAULT = "STRING" +INDEX_REQUIRED_OPTIONS = ["name", "type", "hash_key_name"] +INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + [ + "hash_key_type", + "range_key_name", + "range_key_type", + "includes", + "read_capacity", + "write_capacity", +] +INDEX_TYPE_OPTIONS = ["all", "global_all", "global_include", "global_keys_only", "include", "keys_only"] # Map in both directions -DYNAMO_TYPE_MAP_LONG = {'STRING': 'S', 'NUMBER': 'N', 'BINARY': 'B'} +DYNAMO_TYPE_MAP_LONG = {"STRING": "S", "NUMBER": "N", "BINARY": "B"} DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items()) KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys()) @@ -273,58 +280,63 @@ # LimitExceededException/ResourceInUseException exceptions at you. This can be # pretty slow, so add plenty of retries... @AWSRetry.jittered_backoff( - retries=45, delay=5, max_delay=30, - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + retries=45, + delay=5, + max_delay=30, + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], ) def _update_table_with_long_retry(**changes): - return client.update_table( - TableName=module.params.get('name'), - **changes - ) + return client.update_table(TableName=module.params.get("name"), **changes) # ResourceNotFoundException is expected here if the table doesn't exist -@AWSRetry.jittered_backoff(catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"]) def _describe_table(**params): return client.describe_table(**params) def wait_exists(): - table_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + table_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") delay = min(wait_timeout, 5) max_attempts = wait_timeout // delay try: - waiter = client.get_waiter('table_exists') + waiter = client.get_waiter("table_exists") waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, TableName=table_name, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on table creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed while waiting on table creation') + module.fail_json_aws(e, msg="Timeout while waiting on table creation") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while waiting on table creation") def wait_not_exists(): - table_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + table_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") delay = min(wait_timeout, 5) max_attempts = wait_timeout // delay try: - waiter = client.get_waiter('table_not_exists') + waiter = client.get_waiter("table_not_exists") waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, TableName=table_name, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on table deletion') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed while waiting on table deletion') + module.fail_json_aws(e, msg="Timeout while waiting on table deletion") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while waiting on table deletion") def _short_type_to_long(short_key): @@ -360,21 +372,21 @@ def _decode_primary_index(current_table): # The schema/attribute definitions are a list of dicts which need the same # treatment as boto3's tag lists schema = boto3_tag_list_to_ansible_dict( - current_table.get('key_schema', []), + current_table.get("key_schema", []), # Map from 'HASH'/'RANGE' to attribute name - tag_name_key_name='key_type', - tag_value_key_name='attribute_name', + tag_name_key_name="key_type", + tag_value_key_name="attribute_name", ) attributes = boto3_tag_list_to_ansible_dict( - current_table.get('attribute_definitions', []), + current_table.get("attribute_definitions", []), # Map from attribute name to 'S'/'N'/'B'. - tag_name_key_name='attribute_name', - tag_value_key_name='attribute_type', + tag_name_key_name="attribute_name", + tag_value_key_name="attribute_type", ) - hash_key_name = schema.get('HASH') + hash_key_name = schema.get("HASH") hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None)) - range_key_name = schema.get('RANGE', None) + range_key_name = schema.get("RANGE", None) range_key_type = _short_type_to_long(attributes.get(range_key_name, None)) return dict( @@ -385,56 +397,56 @@ def _decode_primary_index(current_table): ) -def _decode_index(index_data, attributes, type_prefix=''): +def _decode_index(index_data, attributes, type_prefix=""): try: index_map = dict( - name=index_data['index_name'], + name=index_data["index_name"], ) index_data = dict(index_data) - index_data['attribute_definitions'] = attributes + index_data["attribute_definitions"] = attributes index_map.update(_decode_primary_index(index_data)) - throughput = index_data.get('provisioned_throughput', {}) - index_map['provisioned_throughput'] = throughput + throughput = index_data.get("provisioned_throughput", {}) + index_map["provisioned_throughput"] = throughput if throughput: - index_map['read_capacity'] = throughput.get('read_capacity_units') - index_map['write_capacity'] = throughput.get('write_capacity_units') + index_map["read_capacity"] = throughput.get("read_capacity_units") + index_map["write_capacity"] = throughput.get("write_capacity_units") - projection = index_data.get('projection', {}) + projection = index_data.get("projection", {}) if projection: - index_map['type'] = type_prefix + projection.get('projection_type') - index_map['includes'] = projection.get('non_key_attributes', []) + index_map["type"] = type_prefix + projection.get("projection_type") + index_map["includes"] = projection.get("non_key_attributes", []) return index_map except Exception as e: - module.fail_json_aws(e, msg='Decode failure', index_data=index_data) + module.fail_json_aws(e, msg="Decode failure", index_data=index_data) def compatability_results(current_table): if not current_table: return dict() - billing_mode = current_table.get('billing_mode') + billing_mode = current_table.get("billing_mode") primary_indexes = _decode_primary_index(current_table) - hash_key_name = primary_indexes.get('hash_key_name') - hash_key_type = primary_indexes.get('hash_key_type') - range_key_name = primary_indexes.get('range_key_name') - range_key_type = primary_indexes.get('range_key_type') + hash_key_name = primary_indexes.get("hash_key_name") + hash_key_type = primary_indexes.get("hash_key_type") + range_key_name = primary_indexes.get("range_key_name") + range_key_type = primary_indexes.get("range_key_type") indexes = list() - global_indexes = current_table.get('_global_index_map', {}) - local_indexes = current_table.get('_local_index_map', {}) + global_indexes = current_table.get("_global_index_map", {}) + local_indexes = current_table.get("_local_index_map", {}) for index in global_indexes: idx = dict(global_indexes[index]) - idx.pop('provisioned_throughput', None) + idx.pop("provisioned_throughput", None) indexes.append(idx) for index in local_indexes: idx = dict(local_indexes[index]) - idx.pop('provisioned_throughput', None) + idx.pop("provisioned_throughput", None) indexes.append(idx) compat_results = dict( @@ -445,72 +457,78 @@ def compatability_results(current_table): indexes=indexes, billing_mode=billing_mode, region=module.region, - table_name=current_table.get('table_name', None), - table_class=current_table.get('table_class_summary', {}).get('table_class', None), - table_status=current_table.get('table_status', None), - tags=current_table.get('tags', {}), + table_name=current_table.get("table_name", None), + table_class=current_table.get("table_class_summary", {}).get("table_class", None), + table_status=current_table.get("table_status", None), + tags=current_table.get("tags", {}), ) if billing_mode == "PROVISIONED": - throughput = current_table.get('provisioned_throughput', {}) - compat_results['read_capacity'] = throughput.get('read_capacity_units', None) - compat_results['write_capacity'] = throughput.get('write_capacity_units', None) + throughput = current_table.get("provisioned_throughput", {}) + compat_results["read_capacity"] = throughput.get("read_capacity_units", None) + compat_results["write_capacity"] = throughput.get("write_capacity_units", None) return compat_results def get_dynamodb_table(): - table_name = module.params.get('name') + table_name = module.params.get("name") try: table = _describe_table(TableName=table_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe table') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe table") - table = table['Table'] + table = table["Table"] try: - tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table['TableArn'])['Tags'] - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied when listing tags') + tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table["TableArn"])["Tags"] + except is_boto3_error_code("AccessDeniedException"): + module.warn("Permission denied when listing tags") tags = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to list table tags') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list table tags") tags = boto3_tag_list_to_ansible_dict(tags) table = camel_dict_to_snake_dict(table) # Put some of the values into places people will expect them - table['arn'] = table['table_arn'] - table['name'] = table['table_name'] - table['status'] = table['table_status'] - table['id'] = table['table_id'] - table['size'] = table['table_size_bytes'] - table['tags'] = tags + table["arn"] = table["table_arn"] + table["name"] = table["table_name"] + table["status"] = table["table_status"] + table["id"] = table["table_id"] + table["size"] = table["table_size_bytes"] + table["tags"] = tags - if 'table_class_summary' in table: - table['table_class'] = table['table_class_summary']['table_class'] + if "table_class_summary" in table: + table["table_class"] = table["table_class_summary"]["table_class"] # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST # and when updating the billing_mode - if 'billing_mode_summary' in table: - table['billing_mode'] = table['billing_mode_summary']['billing_mode'] + if "billing_mode_summary" in table: + table["billing_mode"] = table["billing_mode_summary"]["billing_mode"] else: - table['billing_mode'] = "PROVISIONED" + table["billing_mode"] = "PROVISIONED" # convert indexes into something we can easily search against - attributes = table['attribute_definitions'] + attributes = table["attribute_definitions"] global_index_map = dict() local_index_map = dict() - for index in table.get('global_secondary_indexes', []): - idx = _decode_index(index, attributes, type_prefix='global_') - global_index_map[idx['name']] = idx - for index in table.get('local_secondary_indexes', []): + for index in table.get("global_secondary_indexes", []): + idx = _decode_index(index, attributes, type_prefix="global_") + global_index_map[idx["name"]] = idx + for index in table.get("local_secondary_indexes", []): idx = _decode_index(index, attributes) - local_index_map[idx['name']] = idx - table['_global_index_map'] = global_index_map - table['_local_index_map'] = local_index_map + local_index_map[idx["name"]] = idx + table["_global_index_map"] = global_index_map + table["_local_index_map"] = local_index_map return table @@ -521,19 +539,19 @@ def _generate_attribute_map(): """ attributes = dict() - for index in (module.params, *module.params.get('indexes')): + for index in (module.params, *module.params.get("indexes")): # run through hash_key_name and range_key_name - for t in ['hash', 'range']: - key_name = index.get(t + '_key_name') + for t in ["hash", "range"]: + key_name = index.get(t + "_key_name") if not key_name: continue - key_type = index.get(t + '_key_type') or DYNAMO_TYPE_DEFAULT + key_type = index.get(t + "_key_type") or DYNAMO_TYPE_DEFAULT _type = _long_type_to_short(key_type) if key_name in attributes: if _type != attributes[key_name]: - module.fail_json(msg='Conflicting attribute type', - type_1=_type, type_2=attributes[key_name], - key_name=key_name) + module.fail_json( + msg="Conflicting attribute type", type_1=_type, type_2=attributes[key_name], key_name=key_name + ) else: attributes[key_name] = _type @@ -546,9 +564,7 @@ def _generate_attributes(): # Use ansible_dict_to_boto3_tag_list to generate the list of dicts # format we need attrs = ansible_dict_to_boto3_tag_list( - attributes, - tag_name_key_name='AttributeName', - tag_value_key_name='AttributeType' + attributes, tag_name_key_name="AttributeName", tag_value_key_name="AttributeType" ) return list(attrs) @@ -557,8 +573,8 @@ def _generate_throughput(params=None): if not params: params = module.params - read_capacity = params.get('read_capacity') or 1 - write_capacity = params.get('write_capacity') or 1 + read_capacity = params.get("read_capacity") or 1 + write_capacity = params.get("write_capacity") or 1 throughput = dict( ReadCapacityUnits=read_capacity, WriteCapacityUnits=write_capacity, @@ -572,56 +588,54 @@ def _generate_schema(params=None): params = module.params schema = list() - hash_key_name = params.get('hash_key_name') - range_key_name = params.get('range_key_name') + hash_key_name = params.get("hash_key_name") + range_key_name = params.get("range_key_name") if hash_key_name: - entry = _schema_dict(hash_key_name, 'HASH') + entry = _schema_dict(hash_key_name, "HASH") schema.append(entry) if range_key_name: - entry = _schema_dict(range_key_name, 'RANGE') + entry = _schema_dict(range_key_name, "RANGE") schema.append(entry) return schema def _primary_index_changes(current_table): - primary_index = _decode_primary_index(current_table) - hash_key_name = primary_index.get('hash_key_name') - _hash_key_name = module.params.get('hash_key_name') - hash_key_type = primary_index.get('hash_key_type') - _hash_key_type = module.params.get('hash_key_type') - range_key_name = primary_index.get('range_key_name') - _range_key_name = module.params.get('range_key_name') - range_key_type = primary_index.get('range_key_type') - _range_key_type = module.params.get('range_key_type') + hash_key_name = primary_index.get("hash_key_name") + _hash_key_name = module.params.get("hash_key_name") + hash_key_type = primary_index.get("hash_key_type") + _hash_key_type = module.params.get("hash_key_type") + range_key_name = primary_index.get("range_key_name") + _range_key_name = module.params.get("range_key_name") + range_key_type = primary_index.get("range_key_type") + _range_key_type = module.params.get("range_key_type") changed = list() if _hash_key_name and (_hash_key_name != hash_key_name): - changed.append('hash_key_name') + changed.append("hash_key_name") if _hash_key_type and (_hash_key_type != hash_key_type): - changed.append('hash_key_type') + changed.append("hash_key_type") if _range_key_name and (_range_key_name != range_key_name): - changed.append('range_key_name') + changed.append("range_key_name") if _range_key_type and (_range_key_type != range_key_type): - changed.append('range_key_type') + changed.append("range_key_type") return changed def _throughput_changes(current_table, params=None): - if not params: params = module.params - throughput = current_table.get('provisioned_throughput', {}) - read_capacity = throughput.get('read_capacity_units', None) - _read_capacity = params.get('read_capacity') or read_capacity - write_capacity = throughput.get('write_capacity_units', None) - _write_capacity = params.get('write_capacity') or write_capacity + throughput = current_table.get("provisioned_throughput", {}) + read_capacity = throughput.get("read_capacity_units", None) + _read_capacity = params.get("read_capacity") or read_capacity + write_capacity = throughput.get("write_capacity_units", None) + _write_capacity = params.get("write_capacity") or write_capacity if (read_capacity != _read_capacity) or (write_capacity != _write_capacity): return dict( @@ -641,14 +655,14 @@ def _generate_global_indexes(billing_mode): if billing_mode == "PAY_PER_REQUEST": include_throughput = False - for index in module.params.get('indexes'): - if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + for index in module.params.get("indexes"): + if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in index_exists: - module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) # Convert the type name to upper case and remove the global_ - index['type'] = index['type'].upper()[7:] + index["type"] = index["type"].upper()[7:] index = _generate_index(index, include_throughput) index_exists[name] = True indexes.append(index) @@ -660,14 +674,14 @@ def _generate_local_indexes(): index_exists = dict() indexes = list() - for index in module.params.get('indexes'): + for index in module.params.get("indexes"): index = dict() - if index.get('type') not in ['all', 'include', 'keys_only']: + if index.get("type") not in ["all", "include", "keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in index_exists: - module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) - index['type'] = index['type'].upper() + module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) + index["type"] = index["type"].upper() index = _generate_index(index, False) index_exists[name] = True indexes.append(index) @@ -677,32 +691,32 @@ def _generate_local_indexes(): def _generate_global_index_map(current_table): global_index_map = dict() - existing_indexes = current_table['_global_index_map'] - for index in module.params.get('indexes'): - if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']: + existing_indexes = current_table["_global_index_map"] + for index in module.params.get("indexes"): + if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in global_index_map: - module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name)) + module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case and remove the global_ - idx['type'] = idx['type'].upper()[7:] + idx["type"] = idx["type"].upper()[7:] global_index_map[name] = idx return global_index_map def _generate_local_index_map(current_table): local_index_map = dict() - existing_indexes = current_table['_local_index_map'] - for index in module.params.get('indexes'): - if index.get('type') not in ['all', 'include', 'keys_only']: + existing_indexes = current_table["_local_index_map"] + for index in module.params.get("indexes"): + if index.get("type") not in ["all", "include", "keys_only"]: continue - name = index.get('name') + name = index.get("name") if name in local_index_map: - module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name)) + module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case - idx['type'] = idx['type'].upper() + idx["type"] = idx["type"].upper() local_index_map[name] = idx return local_index_map @@ -710,27 +724,28 @@ def _generate_local_index_map(current_table): def _generate_index(index, include_throughput=True): key_schema = _generate_schema(index) throughput = _generate_throughput(index) - non_key_attributes = index['includes'] or [] + non_key_attributes = index["includes"] or [] projection = dict( - ProjectionType=index['type'], + ProjectionType=index["type"], ) - if index['type'] != 'ALL': + if index["type"] != "ALL": if non_key_attributes: - projection['NonKeyAttributes'] = non_key_attributes + projection["NonKeyAttributes"] = non_key_attributes else: if non_key_attributes: module.fail_json( "DynamoDB does not support specifying non-key-attributes ('includes') for " - "indexes of type 'all'. Index name: {0}".format(index['name'])) + "indexes of type 'all'. Index name: {0}".format(index["name"]) + ) idx = dict( - IndexName=index['name'], + IndexName=index["name"], KeySchema=key_schema, Projection=projection, ) if include_throughput: - idx['ProvisionedThroughput'] = throughput + idx["ProvisionedThroughput"] = throughput return idx @@ -741,15 +756,15 @@ def _attribute_changes(current_table): def _global_index_changes(current_table): - current_global_index_map = current_table['_global_index_map'] + current_global_index_map = current_table["_global_index_map"] global_index_map = _generate_global_index_map(current_table) - current_billing_mode = current_table.get('billing_mode') + current_billing_mode = current_table.get("billing_mode") - if module.params.get('billing_mode') is None: + if module.params.get("billing_mode") is None: billing_mode = current_billing_mode else: - billing_mode = module.params.get('billing_mode') + billing_mode = module.params.get("billing_mode") include_throughput = True @@ -760,7 +775,6 @@ def _global_index_changes(current_table): # TODO (future) it would be nice to add support for deleting an index for name in global_index_map: - idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput)) if name not in current_global_index_map: index_changes.append(dict(Create=idx)) @@ -797,37 +811,37 @@ def _update_table(current_table): # Get throughput / billing_mode changes throughput_changes = _throughput_changes(current_table) if throughput_changes: - changes['ProvisionedThroughput'] = throughput_changes + changes["ProvisionedThroughput"] = throughput_changes - current_billing_mode = current_table.get('billing_mode') - new_billing_mode = module.params.get('billing_mode') + current_billing_mode = current_table.get("billing_mode") + new_billing_mode = module.params.get("billing_mode") if new_billing_mode is None: new_billing_mode = current_billing_mode if current_billing_mode != new_billing_mode: - changes['BillingMode'] = new_billing_mode + changes["BillingMode"] = new_billing_mode # Update table_class use exisiting if none is defined - if module.params.get('table_class'): - if module.params.get('table_class') != current_table.get('table_class'): - changes['TableClass'] = module.params.get('table_class') + if module.params.get("table_class"): + if module.params.get("table_class") != current_table.get("table_class"): + changes["TableClass"] = module.params.get("table_class") global_index_changes = _global_index_changes(current_table) if global_index_changes: - changes['GlobalSecondaryIndexUpdates'] = global_index_changes + changes["GlobalSecondaryIndexUpdates"] = global_index_changes # Only one index can be changed at a time except if changing the billing mode, pass the first during the # main update and deal with the others on a slow retry to wait for # completion if current_billing_mode == new_billing_mode: if len(global_index_changes) > 1: - changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]] + changes["GlobalSecondaryIndexUpdates"] = [global_index_changes[0]] additional_global_index_changes = global_index_changes[1:] local_index_changes = _local_index_changes(current_table) if local_index_changes: - changes['LocalSecondaryIndexUpdates'] = local_index_changes + changes["LocalSecondaryIndexUpdates"] = local_index_changes if not changes: return False @@ -836,38 +850,41 @@ def _update_table(current_table): return True if global_index_changes or local_index_changes: - changes['AttributeDefinitions'] = _generate_attributes() + changes["AttributeDefinitions"] = _generate_attributes() try: - client.update_table( - aws_retry=True, - TableName=module.params.get('name'), - **changes - ) + client.update_table(aws_retry=True, TableName=module.params.get("name"), **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update table") if additional_global_index_changes: for index in additional_global_index_changes: try: - _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions']) + _update_table_with_long_retry( + GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes["AttributeDefinitions"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update table", changes=changes, - additional_global_index_changes=additional_global_index_changes) - - if module.params.get('wait'): + module.fail_json_aws( + e, + msg="Failed to update table", + changes=changes, + additional_global_index_changes=additional_global_index_changes, + ) + + if module.params.get("wait"): wait_exists() return True def _update_tags(current_table): - _tags = module.params.get('tags') + _tags = module.params.get("tags") if _tags is None: return False - tags_to_add, tags_to_remove = compare_aws_tags(current_table['tags'], module.params.get('tags'), - purge_tags=module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + current_table["tags"], module.params.get("tags"), purge_tags=module.params.get("purge_tags") + ) # If neither need updating we can return already if not (tags_to_add or tags_to_remove): @@ -880,7 +897,7 @@ def _update_tags(current_table): try: client.tag_resource( aws_retry=True, - ResourceArn=current_table['arn'], + ResourceArn=current_table["arn"], Tags=ansible_dict_to_boto3_tag_list(tags_to_add), ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -889,7 +906,7 @@ def _update_tags(current_table): try: client.untag_resource( aws_retry=True, - ResourceArn=current_table['arn'], + ResourceArn=current_table["arn"], TagKeys=tags_to_remove, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -901,28 +918,32 @@ def _update_tags(current_table): def update_table(current_table): primary_index_changes = _primary_index_changes(current_table) if primary_index_changes: - module.fail_json("DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format(primary_index_changes)) + module.fail_json( + "DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format( + primary_index_changes + ) + ) changed = False changed |= _update_table(current_table) changed |= _update_tags(current_table) - if module.params.get('wait'): + if module.params.get("wait"): wait_exists() return changed def create_table(): - table_name = module.params.get('name') - table_class = module.params.get('table_class') - hash_key_name = module.params.get('hash_key_name') - billing_mode = module.params.get('billing_mode') + table_name = module.params.get("name") + table_class = module.params.get("table_class") + hash_key_name = module.params.get("hash_key_name") + billing_mode = module.params.get("billing_mode") if billing_mode is None: billing_mode = "PROVISIONED" - tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {}) + tags = ansible_dict_to_boto3_tag_list(module.params.get("tags") or {}) if not hash_key_name: module.fail_json('"hash_key_name" must be provided when creating a new table.') @@ -950,20 +971,20 @@ def create_table(): ) if table_class: - params['TableClass'] = table_class + params["TableClass"] = table_class if billing_mode == "PROVISIONED": - params['ProvisionedThroughput'] = throughput + params["ProvisionedThroughput"] = throughput if local_indexes: - params['LocalSecondaryIndexes'] = local_indexes + params["LocalSecondaryIndexes"] = local_indexes if global_indexes: - params['GlobalSecondaryIndexes'] = global_indexes + params["GlobalSecondaryIndexes"] = global_indexes try: client.create_table(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to create table') + module.fail_json_aws(e, msg="Failed to create table") - if module.params.get('wait'): + if module.params.get("wait"): wait_exists() return True @@ -976,30 +997,34 @@ def delete_table(current_table): if module.check_mode: return True - table_name = module.params.get('name') + table_name = module.params.get("name") # If an index is mid-update then we have to wait for the update to complete # before deletion will succeed long_retry = AWSRetry.jittered_backoff( - retries=45, delay=5, max_delay=30, - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'], + retries=45, + delay=5, + max_delay=30, + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"], ) try: long_retry(client.delete_table)(TableName=table_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete table') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete table") - if module.params.get('wait'): + if module.params.get("wait"): wait_not_exists() return True def main(): - global module global client @@ -1007,36 +1032,36 @@ def main(): # different parameters, use a separate namespace for names, # and local indexes can't be updated. index_options = dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), # It would be nice to make this optional, but because Local and Global # indexes are mixed in here we need this to be able to tell to which # group of indexes the index belongs. - type=dict(type='str', required=True, choices=INDEX_TYPE_OPTIONS), - hash_key_name=dict(type='str', required=False), - hash_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), - range_key_name=dict(type='str', required=False), - range_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES), - includes=dict(type='list', required=False, elements='str'), - read_capacity=dict(type='int', required=False), - write_capacity=dict(type='int', required=False), + type=dict(type="str", required=True, choices=INDEX_TYPE_OPTIONS), + hash_key_name=dict(type="str", required=False), + hash_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), + range_key_name=dict(type="str", required=False), + range_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), + includes=dict(type="list", required=False, elements="str"), + read_capacity=dict(type="int", required=False), + write_capacity=dict(type="int", required=False), ) argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, type='str'), - hash_key_name=dict(type='str'), - hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - range_key_name=dict(type='str'), - range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES), - billing_mode=dict(type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']), - read_capacity=dict(type='int'), - write_capacity=dict(type='int'), - indexes=dict(default=[], type='list', elements='dict', options=index_options), - table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, type="str"), + hash_key_name=dict(type="str"), + hash_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), + range_key_name=dict(type="str"), + range_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), + billing_mode=dict(type="str", choices=["PROVISIONED", "PAY_PER_REQUEST"]), + read_capacity=dict(type="int"), + write_capacity=dict(type="int"), + indexes=dict(default=[], type="list", elements="dict", options=index_options), + table_class=dict(type="str", choices=["STANDARD", "STANDARD_INFREQUENT_ACCESS"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int", aliases=["wait_for_active_timeout"]), ) module = AnsibleAWSModule( @@ -1046,41 +1071,41 @@ def main(): ) retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'], + catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], ) - client = module.client('dynamodb', retry_decorator=retry_decorator) + client = module.client("dynamodb", retry_decorator=retry_decorator) - if module.params.get('table_class'): - module.require_botocore_at_least('1.23.18', reason='to set table_class') + if module.params.get("table_class"): + module.require_botocore_at_least("1.23.18", reason="to set table_class") current_table = get_dynamodb_table() changed = False table = None results = dict() - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": if current_table: changed |= update_table(current_table) else: changed |= create_table() table = get_dynamodb_table() - elif state == 'absent': + elif state == "absent": changed |= delete_table(current_table) compat_results = compatability_results(table) if compat_results: results.update(compat_results) - results['changed'] = changed + results["changed"] = changed if table: # These are used to pass computed data about, not needed for users - table.pop('_global_index_map', None) - table.pop('_local_index_map', None) - results['table'] = table + table.pop("_global_index_map", None) + table.pop("_local_index_map", None) + results["table"] = table module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py index bd1c7789903..eca236cf49a 100644 --- a/dynamodb_ttl.py +++ b/dynamodb_ttl.py @@ -71,48 +71,48 @@ def get_current_ttl_state(c, table_name): - '''Fetch the state dict for a table.''' + """Fetch the state dict for a table.""" current_state = c.describe_time_to_live(TableName=table_name) - return current_state.get('TimeToLiveDescription') + return current_state.get("TimeToLiveDescription") def does_state_need_changing(attribute_name, desired_state, current_spec): - '''Run checks to see if the table needs to be modified. Basically a dirty check.''' + """Run checks to see if the table needs to be modified. Basically a dirty check.""" if not current_spec: # we don't have an entry (or a table?) return True - if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']: + if desired_state.lower() == "enable" and current_spec.get("TimeToLiveStatus") not in ["ENABLING", "ENABLED"]: return True - if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']: + if desired_state.lower() == "disable" and current_spec.get("TimeToLiveStatus") not in ["DISABLING", "DISABLED"]: return True - if attribute_name != current_spec.get('AttributeName'): + if attribute_name != current_spec.get("AttributeName"): return True return False def set_ttl_state(c, table_name, state, attribute_name): - '''Set our specification. Returns the update_time_to_live specification dict, - which is different than the describe_* call.''' + """Set our specification. Returns the update_time_to_live specification dict, + which is different than the describe_* call.""" is_enabled = False - if state.lower() == 'enable': + if state.lower() == "enable": is_enabled = True ret = c.update_time_to_live( TableName=table_name, TimeToLiveSpecification={ - 'Enabled': is_enabled, - 'AttributeName': attribute_name - } + "Enabled": is_enabled, + "AttributeName": attribute_name, + }, ) - return ret.get('TimeToLiveSpecification') + return ret.get("TimeToLiveSpecification") def main(): argument_spec = dict( - state=dict(choices=['enable', 'disable']), + state=dict(choices=["enable", "disable"]), table_name=dict(required=True), attribute_name=dict(required=True), ) @@ -121,26 +121,28 @@ def main(): ) try: - dbclient = module.client('dynamodb') + dbclient = module.client("dynamodb") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - result = {'changed': False} - state = module.params['state'] + result = {"changed": False} + state = module.params["state"] # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the # methods so it's easier to do here. try: - current_state = get_current_ttl_state(dbclient, module.params['table_name']) + current_state = get_current_ttl_state(dbclient, module.params["table_name"]) - if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state): + if does_state_need_changing(module.params["attribute_name"], module.params["state"], current_state): # changes needed - new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name']) - result['current_status'] = new_state - result['changed'] = True + new_state = set_ttl_state( + dbclient, module.params["table_name"], module.params["state"], module.params["attribute_name"] + ) + result["current_status"] = new_state + result["changed"] = True else: # no changes needed - result['current_status'] = current_state + result["current_status"] = current_state except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Failed to get or update ttl state") @@ -152,5 +154,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 5af897cdb80..5d7e49bde90 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -157,67 +157,67 @@ def copy_image(module, ec2): image = None changed = False - tags = module.params.get('tags') - - params = {'SourceRegion': module.params.get('source_region'), - 'SourceImageId': module.params.get('source_image_id'), - 'Name': module.params.get('name'), - 'Description': module.params.get('description'), - 'Encrypted': module.params.get('encrypted'), - } - if module.params.get('kms_key_id'): - params['KmsKeyId'] = module.params.get('kms_key_id') + tags = module.params.get("tags") + + params = { + "SourceRegion": module.params.get("source_region"), + "SourceImageId": module.params.get("source_image_id"), + "Name": module.params.get("name"), + "Description": module.params.get("description"), + "Encrypted": module.params.get("encrypted"), + } + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params.get("kms_key_id") try: - if module.params.get('tag_equality'): - filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()] - filters.append(dict(Name='state', Values=['available', 'pending'])) + if module.params.get("tag_equality"): + filters = [{"Name": "tag:%s" % k, "Values": [v]} for (k, v) in module.params.get("tags").items()] + filters.append(dict(Name="state", Values=["available", "pending"])) images = ec2.describe_images(Filters=filters) - if len(images['Images']) > 0: - image = images['Images'][0] + if len(images["Images"]) > 0: + image = images["Images"][0] if not image: image = ec2.copy_image(**params) - image_id = image['ImageId'] + image_id = image["ImageId"] if tags: - ec2.create_tags(Resources=[image_id], - Tags=ansible_dict_to_boto3_tag_list(tags)) + ec2.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags)) changed = True - if module.params.get('wait'): + if module.params.get("wait"): delay = 15 - max_attempts = module.params.get('wait_timeout') // delay - image_id = image.get('ImageId') - ec2.get_waiter('image_available').wait( - ImageIds=[image_id], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + max_attempts = module.params.get("wait_timeout") // delay + image_id = image.get("ImageId") + ec2.get_waiter("image_available").wait( + ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) module.exit_json(changed=changed, **camel_dict_to_snake_dict(image)) except WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the image to become available') + module.fail_json_aws(e, msg="An error occurred waiting for the image to become available") except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not copy AMI") except Exception as e: - module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + module.fail_json(msg="Unhandled exception. (%s)" % to_native(e)) def main(): argument_spec = dict( source_region=dict(required=True), source_image_id=dict(required=True), - name=dict(default='default'), - description=dict(default=''), - encrypted=dict(type='bool', default=False, required=False), - kms_key_id=dict(type='str', required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - tags=dict(type='dict', aliases=['resource_tags']), - tag_equality=dict(type='bool', default=False)) + name=dict(default="default"), + description=dict(default=""), + encrypted=dict(type="bool", default=False, required=False), + kms_key_id=dict(type="str", required=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + tags=dict(type="dict", aliases=["resource_tags"]), + tag_equality=dict(type="bool", default=False), + ) module = AnsibleAWSModule(argument_spec=argument_spec) - ec2 = module.client('ec2') + ec2 = module.client("ec2") copy_image(module, ec2) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_carrier_gateway.py b/ec2_carrier_gateway.py index 3458170e393..6517879c0f8 100644 --- a/ec2_carrier_gateway.py +++ b/ec2_carrier_gateway.py @@ -159,9 +159,7 @@ def get_matching_cagw(self, vpc_id, carrier_gateway_id=None): cagw = None if len(cagws) > 1: - self._module.fail_json( - msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting" - ) + self._module.fail_json(msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting") elif cagws: cagw = camel_dict_to_snake_dict(cagws[0]) diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py index b858f9c4ead..19fc8eab7f5 100644 --- a/ec2_customer_gateway.py +++ b/ec2_customer_gateway.py @@ -121,21 +121,17 @@ class Ec2CustomerGatewayManager: - def __init__(self, module): self.module = module try: - self.ec2 = module.client('ec2') + self.ec2 = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) + @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=["IncorrectState"]) def ensure_cgw_absent(self, gw_id): - response = self.ec2.delete_customer_gateway( - DryRun=False, - CustomerGatewayId=gw_id - ) + response = self.ec2.delete_customer_gateway(DryRun=False, CustomerGatewayId=gw_id) return response def ensure_cgw_present(self, bgp_asn, ip_address): @@ -143,7 +139,7 @@ def ensure_cgw_present(self, bgp_asn, ip_address): bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, - Type='ipsec.1', + Type="ipsec.1", PublicIp=ip_address, BgpAsn=bgp_asn, ) @@ -156,11 +152,8 @@ def tag_cgw_name(self, gw_id, name): gw_id, ], Tags=[ - { - 'Key': 'Name', - 'Value': name - }, - ] + {"Key": "Name", "Value": name}, + ], ) return response @@ -169,86 +162,84 @@ def describe_gateways(self, ip_address): DryRun=False, Filters=[ { - 'Name': 'state', - 'Values': [ - 'available', - ] + "Name": "state", + "Values": [ + "available", + ], }, { - 'Name': 'ip-address', - 'Values': [ + "Name": "ip-address", + "Values": [ ip_address, - ] - } - ] + ], + }, + ], ) return response def main(): argument_spec = dict( - bgp_asn=dict(required=False, type='int'), + bgp_asn=dict(required=False, type="int"), ip_address=dict(required=True), name=dict(required=True), - routing=dict(default='dynamic', choices=['dynamic', 'static']), - state=dict(default='present', choices=['present', 'absent']), + routing=dict(default="dynamic", choices=["dynamic", "static"]), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ - ('routing', 'dynamic', ['bgp_asn']) - ] + ("routing", "dynamic", ["bgp_asn"]), + ], ) gw_mgr = Ec2CustomerGatewayManager(module) - name = module.params.get('name') + name = module.params.get("name") - existing = gw_mgr.describe_gateways(module.params['ip_address']) + existing = gw_mgr.describe_gateways(module.params["ip_address"]) results = dict(changed=False) - if module.params['state'] == 'present': - if existing['CustomerGateways']: - existing['CustomerGateway'] = existing['CustomerGateways'][0] - results['gateway'] = existing - if existing['CustomerGateway']['Tags']: - tag_array = existing['CustomerGateway']['Tags'] + if module.params["state"] == "present": + if existing["CustomerGateways"]: + existing["CustomerGateway"] = existing["CustomerGateways"][0] + results["gateway"] = existing + if existing["CustomerGateway"]["Tags"]: + tag_array = existing["CustomerGateway"]["Tags"] for key, value in enumerate(tag_array): - if value['Key'] == 'Name': - current_name = value['Value'] + if value["Key"] == "Name": + current_name = value["Value"] if current_name != name: - results['name'] = gw_mgr.tag_cgw_name( - results['gateway']['CustomerGateway']['CustomerGatewayId'], - module.params['name'], + results["name"] = gw_mgr.tag_cgw_name( + results["gateway"]["CustomerGateway"]["CustomerGatewayId"], + module.params["name"], ) - results['changed'] = True + results["changed"] = True else: if not module.check_mode: - results['gateway'] = gw_mgr.ensure_cgw_present( - module.params['bgp_asn'], - module.params['ip_address'], + results["gateway"] = gw_mgr.ensure_cgw_present( + module.params["bgp_asn"], + module.params["ip_address"], ) - results['name'] = gw_mgr.tag_cgw_name( - results['gateway']['CustomerGateway']['CustomerGatewayId'], - module.params['name'], + results["name"] = gw_mgr.tag_cgw_name( + results["gateway"]["CustomerGateway"]["CustomerGatewayId"], + module.params["name"], ) - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': - if existing['CustomerGateways']: - existing['CustomerGateway'] = existing['CustomerGateways'][0] - results['gateway'] = existing + elif module.params["state"] == "absent": + if existing["CustomerGateways"]: + existing["CustomerGateway"] = existing["CustomerGateways"][0] + results["gateway"] = existing if not module.check_mode: - results['gateway'] = gw_mgr.ensure_cgw_absent( - existing['CustomerGateway']['CustomerGatewayId'] - ) - results['changed'] = True + results["gateway"] = gw_mgr.ensure_cgw_absent(existing["CustomerGateway"]["CustomerGatewayId"]) + results["changed"] = True pretty_results = camel_dict_to_snake_dict(results) module.exit_json(**pretty_results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py index d0674f52b19..976d3f370d2 100644 --- a/ec2_customer_gateway_info.py +++ b/ec2_customer_gateway_info.py @@ -95,44 +95,46 @@ def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj + return obj.isoformat() if hasattr(obj, "isoformat") else obj def list_customer_gateways(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["CustomerGatewayIds"] = module.params.get("customer_gateway_ids") try: result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler)) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']] + snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result["CustomerGateways"]] if snaked_customer_gateways: for customer_gateway in snaked_customer_gateways: - customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', [])) - customer_gateway_name = customer_gateway['tags'].get('Name') + customer_gateway["tags"] = boto3_tag_list_to_ansible_dict(customer_gateway.get("tags", [])) + customer_gateway_name = customer_gateway["tags"].get("Name") if customer_gateway_name: - customer_gateway['customer_gateway_name'] = customer_gateway_name + customer_gateway["customer_gateway_name"] = customer_gateway_name module.exit_json(changed=False, customer_gateways=snaked_customer_gateways) def main(): - argument_spec = dict( - customer_gateway_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + customer_gateway_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict") ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['customer_gateway_ids', 'filters']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ["customer_gateway_ids", "filters"], + ], + supports_check_mode=True, + ) - connection = module.client('ec2') + connection = module.client("ec2") list_customer_gateways(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_launch_template.py b/ec2_launch_template.py index b807d3aa09f..8e1240d285f 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -446,60 +446,85 @@ def determine_iam_role(module, name_or_arn): - if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): - return {'arn': name_or_arn} - iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + if re.match(r"^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$", name_or_arn): + return {"arn": name_or_arn} + iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return {'arn': role['InstanceProfile']['Arn']} - except is_boto3_error_code('NoSuchEntity') as e: + return {"arn": role["InstanceProfile"]["Arn"]} + except is_boto3_error_code("NoSuchEntity") as e: module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) + module.fail_json_aws( + e, + msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format( + name_or_arn + ), + ) def existing_templates(module): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) matches = None try: - if module.params.get('template_id'): - matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')], aws_retry=True) - elif module.params.get('template_name'): - matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')], aws_retry=True) - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: + if module.params.get("template_id"): + matches = ec2.describe_launch_templates( + LaunchTemplateIds=[module.params.get("template_id")], aws_retry=True + ) + elif module.params.get("template_name"): + matches = ec2.describe_launch_templates( + LaunchTemplateNames=[module.params.get("template_name")], aws_retry=True + ) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException") as e: # no named template was found, return nothing/empty versions return None, [] - except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( - module.params.get('launch_template_id'))) - except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="Launch template with ID {0} is not a valid ID. It should start with `lt-....`".format( + module.params.get("launch_template_id") + ), + ) + except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except module.fail_json_aws( - e, msg='Launch template with ID {0} could not be found, please supply a name ' - 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) + e, + msg="Launch template with ID {0} could not be found, please supply a name " + "instead so that a new template can be created".format(module.params.get("launch_template_id")), + ) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') + module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.") else: - template = matches['LaunchTemplates'][0] - template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] + template = matches["LaunchTemplates"][0] + template_id, template_version, template_default = ( + template["LaunchTemplateId"], + template["LatestVersionNumber"], + template["DefaultVersionNumber"], + ) try: - return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)['LaunchTemplateVersions'] + return ( + template, + ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)[ + "LaunchTemplateVersions" + ], + ) except (ClientError, BotoCoreError, WaiterError) as e: - module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id)) + module.fail_json_aws( + e, + msg="Could not find launch template versions for {0} (ID: {1}).".format( + template["LaunchTemplateName"], template_id + ), + ) def params_to_launch_data(module, template_params): - if template_params.get('tags'): - tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags')) - template_params['tag_specifications'] = [ - { - 'resource_type': r_type, - 'tags': tag_list - } - for r_type in ('instance', 'volume') + if template_params.get("tags"): + tag_list = ansible_dict_to_boto3_tag_list(template_params.get("tags")) + template_params["tag_specifications"] = [ + {"resource_type": r_type, "tags": tag_list} for r_type in ("instance", "volume") ] - del template_params['tags'] - if module.params.get('iam_instance_profile'): - template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile']) + del template_params["tags"] + if module.params.get("iam_instance_profile"): + template_params["iam_instance_profile"] = determine_iam_role(module, module.params["iam_instance_profile"]) params = snake_dict_to_camel_dict( dict((k, v) for k, v in template_params.items() if v is not None), capitalize_first=True, @@ -508,71 +533,82 @@ def params_to_launch_data(module, template_params): def delete_template(module): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) deleted_versions = [] if template or template_versions: - non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']] + non_default_versions = [to_text(t["VersionNumber"]) for t in template_versions if not t["DefaultVersion"]] if non_default_versions: try: v_resp = ec2.delete_launch_template_versions( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], Versions=non_default_versions, aws_retry=True, ) - if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: - module.warn('Failed to delete template versions {0} on launch template {1}'.format( - v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'], - template['LaunchTemplateId'], - )) - deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']] + if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]: + module.warn( + "Failed to delete template versions {0} on launch template {1}".format( + v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"], + template["LaunchTemplateId"], + ) + ) + deleted_versions = [ + camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"] + ] except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId'])) + module.fail_json_aws( + e, + msg="Could not delete existing versions of the launch template {0}".format( + template["LaunchTemplateId"] + ), + ) try: resp = ec2.delete_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], aws_retry=True, ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId'])) + module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template["LaunchTemplateId"])) return { - 'deleted_versions': deleted_versions, - 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), - 'changed': True, + "deleted_versions": deleted_versions, + "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]), + "changed": True, } else: - return {'changed': False} + return {"changed": False} def create_or_update(module, template_options): - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound'])) + ec2 = module.client( + "ec2", retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidLaunchTemplateId.NotFound"]) + ) template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) - if lt_data.get('MetadataOptions'): - if not module.botocore_at_least('1.23.30'): + if lt_data.get("MetadataOptions"): + if not module.botocore_at_least("1.23.30"): # fail only if enabled is requested - if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled': - module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') + if lt_data["MetadataOptions"].get("InstanceMetadataTags") == "enabled": + module.require_botocore_at_least("1.23.30", reason="to set instance_metadata_tags") # pop if it's not requested to keep backwards compatibility. # otherwise the modules failes because parameters are set due default values - lt_data['MetadataOptions'].pop('InstanceMetadataTags') + lt_data["MetadataOptions"].pop("InstanceMetadataTags") - if not module.botocore_at_least('1.21.29'): + if not module.botocore_at_least("1.21.29"): # fail only if enabled is requested - if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled': - module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') + if lt_data["MetadataOptions"].get("HttpProtocolIpv6") == "enabled": + module.require_botocore_at_least("1.21.29", reason="to set http_protocol_ipv6") # pop if it's not requested to keep backwards compatibility. # otherwise the modules failes because parameters are set due default values - lt_data['MetadataOptions'].pop('HttpProtocolIpv6') + lt_data["MetadataOptions"].pop("HttpProtocolIpv6") if not (template or template_versions): # create a full new one try: resp = ec2.create_launch_template( - LaunchTemplateName=module.params['template_name'], + LaunchTemplateName=module.params["template_name"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, @@ -580,26 +616,26 @@ def create_or_update(module, template_options): except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create launch template") template, template_versions = existing_templates(module) - out['changed'] = True + out["changed"] = True elif template and template_versions: most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( "VersionDescription", "" ): - out['changed'] = False + out["changed"] = False return out try: - if module.params.get('source_version') in (None, ''): + if module.params.get("source_version") in (None, ""): resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, VersionDescription=str(module.params["version_description"]), aws_retry=True, ) - elif module.params.get('source_version') == 'latest': + elif module.params.get("source_version") == "latest": resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, SourceVersion=str(most_recent["VersionNumber"]), @@ -608,15 +644,24 @@ def create_or_update(module, template_options): ) else: try: - int(module.params.get('source_version')) + int(module.params.get("source_version")) except ValueError: - module.fail_json(msg='source_version param was not a valid integer, got "{0}"'.format(module.params.get('source_version'))) + module.fail_json( + msg='source_version param was not a valid integer, got "{0}"'.format( + module.params.get("source_version") + ) + ) # get source template version - source_version = next((v for v in template_versions if v['VersionNumber'] == int(module.params.get('source_version'))), None) + source_version = next( + (v for v in template_versions if v["VersionNumber"] == int(module.params.get("source_version"))), + None, + ) if source_version is None: - module.fail_json(msg='source_version does not exist, got "{0}"'.format(module.params.get('source_version'))) + module.fail_json( + msg='source_version does not exist, got "{0}"'.format(module.params.get("source_version")) + ) resp = ec2.create_launch_template_version( - LaunchTemplateId=template['LaunchTemplateId'], + LaunchTemplateId=template["LaunchTemplateId"], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, SourceVersion=str(source_version["VersionNumber"]), @@ -624,31 +669,35 @@ def create_or_update(module, template_options): aws_retry=True, ) - if module.params.get('default_version') in (None, ''): + if module.params.get("default_version") in (None, ""): # no need to do anything, leave the existing version as default pass - elif module.params.get('default_version') == 'latest': + elif module.params.get("default_version") == "latest": set_default = ec2.modify_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], - DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']), + LaunchTemplateId=template["LaunchTemplateId"], + DefaultVersion=to_text(resp["LaunchTemplateVersion"]["VersionNumber"]), ClientToken=uuid4().hex, aws_retry=True, ) else: try: - int(module.params.get('default_version')) + int(module.params.get("default_version")) except ValueError: - module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version'))) + module.fail_json( + msg='default_version param was not a valid integer, got "{0}"'.format( + module.params.get("default_version") + ) + ) set_default = ec2.modify_launch_template( - LaunchTemplateId=template['LaunchTemplateId'], - DefaultVersion=to_text(int(module.params.get('default_version'))), + LaunchTemplateId=template["LaunchTemplateId"], + DefaultVersion=to_text(int(module.params.get("default_version"))), ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create subsequent launch template version") template, template_versions = existing_templates(module) - out['changed'] = True + out["changed"] = True return out @@ -658,43 +707,38 @@ def format_module_output(module): template = camel_dict_to_snake_dict(template) template_versions = [camel_dict_to_snake_dict(v) for v in template_versions] for v in template_versions: - for ts in (v['launch_template_data'].get('tag_specifications') or []): - ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags')) + for ts in v["launch_template_data"].get("tag_specifications") or []: + ts["tags"] = boto3_tag_list_to_ansible_dict(ts.pop("tags")) output.update(dict(template=template, versions=template_versions)) - output['default_template'] = [ - v for v in template_versions - if v.get('default_version') + output["default_template"] = [v for v in template_versions if v.get("default_version")][0] + output["latest_template"] = [ + v + for v in template_versions + if (v.get("version_number") and int(v["version_number"]) == int(template["latest_version_number"])) ][0] - output['latest_template'] = [ - v for v in template_versions - if ( - v.get('version_number') and - int(v['version_number']) == int(template['latest_version_number']) - ) - ][0] - if "version_number" in output['default_template']: - output['default_version'] = output['default_template']['version_number'] - if "version_number" in output['latest_template']: - output['latest_version'] = output['latest_template']['version_number'] + if "version_number" in output["default_template"]: + output["default_version"] = output["default_template"]["version_number"] + if "version_number" in output["latest_template"]: + output["latest_version"] = output["latest_template"]["version_number"] return output def main(): template_options = dict( block_device_mappings=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( device_name=dict(), ebs=dict( - type='dict', + type="dict", options=dict( - delete_on_termination=dict(type='bool'), - encrypted=dict(type='bool'), - iops=dict(type='int'), + delete_on_termination=dict(type="bool"), + encrypted=dict(type="bool"), + iops=dict(type="int"), kms_key_id=dict(), snapshot_id=dict(), - volume_size=dict(type='int'), + volume_size=dict(type="int"), volume_type=dict(), ), ), @@ -703,39 +747,39 @@ def main(): ), ), cpu_options=dict( - type='dict', + type="dict", options=dict( - core_count=dict(type='int'), - threads_per_core=dict(type='int'), + core_count=dict(type="int"), + threads_per_core=dict(type="int"), ), ), credit_specification=dict( - dict(type='dict'), + dict(type="dict"), options=dict( cpu_credits=dict(), ), ), - disable_api_termination=dict(type='bool'), - ebs_optimized=dict(type='bool'), + disable_api_termination=dict(type="bool"), + ebs_optimized=dict(type="bool"), elastic_gpu_specifications=dict( options=dict(type=dict()), - type='list', - elements='dict', + type="list", + elements="dict", ), iam_instance_profile=dict(), image_id=dict(), - instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']), + instance_initiated_shutdown_behavior=dict(choices=["stop", "terminate"]), instance_market_options=dict( - type='dict', + type="dict", options=dict( market_type=dict(), spot_options=dict( - type='dict', + type="dict", options=dict( - block_duration_minutes=dict(type='int'), - instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']), + block_duration_minutes=dict(type="int"), + instance_interruption_behavior=dict(choices=["hibernate", "stop", "terminate"]), max_price=dict(), - spot_instance_type=dict(choices=['one-time', 'persistent']), + spot_instance_type=dict(choices=["one-time", "persistent"]), ), ), ), @@ -744,32 +788,30 @@ def main(): kernel_id=dict(), key_name=dict(), monitoring=dict( - type='dict', - options=dict( - enabled=dict(type='bool') - ), + type="dict", + options=dict(enabled=dict(type="bool")), ), metadata_options=dict( - type='dict', + type="dict", options=dict( - http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), - http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional'), - http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), - instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), - ) + http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"), + http_put_response_hop_limit=dict(type="int", default=1), + http_tokens=dict(choices=["optional", "required"], default="optional"), + http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"), + instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"), + ), ), network_interfaces=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - associate_public_ip_address=dict(type='bool'), - delete_on_termination=dict(type='bool'), + associate_public_ip_address=dict(type="bool"), + delete_on_termination=dict(type="bool"), description=dict(), - device_index=dict(type='int'), - groups=dict(type='list', elements='str'), - ipv6_address_count=dict(type='int'), - ipv6_addresses=dict(type='list', elements='str'), + device_index=dict(type="int"), + groups=dict(type="list", elements="str"), + ipv6_address_count=dict(type="int"), + ipv6_addresses=dict(type="list", elements="str"), network_interface_id=dict(), private_ip_address=dict(), subnet_id=dict(), @@ -783,12 +825,12 @@ def main(): host_id=dict(), tenancy=dict(), ), - type='dict', + type="dict", ), ram_disk_id=dict(), - security_group_ids=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), + security_group_ids=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), user_data=dict(), ) @@ -806,25 +848,25 @@ def main(): module = AnsibleAWSModule( argument_spec=arg_spec, required_one_of=[ - ('template_name', 'template_id') + ("template_name", "template_id"), ], - supports_check_mode=True + supports_check_mode=True, ) - for interface in (module.params.get('network_interfaces') or []): - if interface.get('ipv6_addresses'): - interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']] + for interface in module.params.get("network_interfaces") or []: + if interface.get("ipv6_addresses"): + interface["ipv6_addresses"] = [{"ipv6_address": x} for x in interface["ipv6_addresses"]] - if module.params.get('state') == 'present': + if module.params.get("state") == "present": out = create_or_update(module, template_options) out.update(format_module_output(module)) - elif module.params.get('state') == 'absent': + elif module.params.get("state") == "absent": out = delete_template(module) else: - module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state'))) + module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get("state"))) module.exit_json(**out) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 8687ded59d1..4e1967c846d 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -118,40 +118,32 @@ def search_placement_group(connection, module): """ name = module.params.get("name") try: - response = connection.describe_placement_groups( - Filters=[{ - "Name": "group-name", - "Values": [name] - }]) + response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Couldn't find placement group named [%s]" % name) + module.fail_json_aws(e, msg="Couldn't find placement group named [%s]" % name) - if len(response['PlacementGroups']) != 1: + if len(response["PlacementGroups"]) != 1: return None else: - placement_group = response['PlacementGroups'][0] + placement_group = response["PlacementGroups"][0] return { - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], } -@AWSRetry.exponential_backoff(catch_extra_error_codes=['InvalidPlacementGroup.Unknown']) +@AWSRetry.exponential_backoff(catch_extra_error_codes=["InvalidPlacementGroup.Unknown"]) def get_placement_group_information(connection, name): """ Retrieve information about a placement group. """ - response = connection.describe_placement_groups( - GroupNames=[name] - ) - placement_group = response['PlacementGroups'][0] + response = connection.describe_placement_groups(GroupNames=[name]) + placement_group = response["PlacementGroups"][0] return { - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], } @@ -161,32 +153,34 @@ def create_placement_group(connection, module): strategy = module.params.get("strategy") partition_count = module.params.get("partition_count") - if strategy != 'partition' and partition_count: - module.fail_json( - msg="'partition_count' can only be set when strategy is set to 'partition'.") + if strategy != "partition" and partition_count: + module.fail_json(msg="'partition_count' can only be set when strategy is set to 'partition'.") params = {} - params['GroupName'] = name - params['Strategy'] = strategy + params["GroupName"] = name + params["Strategy"] = strategy if partition_count: - params['PartitionCount'] = partition_count - params['DryRun'] = module.check_mode + params["PartitionCount"] = partition_count + params["DryRun"] = module.check_mode try: connection.create_placement_group(**params) - except is_boto3_error_code('DryRunOperation'): - module.exit_json(changed=True, placement_group={ - "name": name, - "state": 'DryRun', - "strategy": strategy, - }) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg="Couldn't create placement group [%s]" % name) - - module.exit_json(changed=True, - placement_group=get_placement_group_information(connection, name)) + except is_boto3_error_code("DryRunOperation"): + module.exit_json( + changed=True, + placement_group={ + "name": name, + "state": "DryRun", + "strategy": strategy, + }, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't create placement group [%s]" % name) + + module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name)) @AWSRetry.exponential_backoff() @@ -194,52 +188,44 @@ def delete_placement_group(connection, module): name = module.params.get("name") try: - connection.delete_placement_group( - GroupName=name, DryRun=module.check_mode) + connection.delete_placement_group(GroupName=name, DryRun=module.check_mode) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Couldn't delete placement group [%s]" % name) + module.fail_json_aws(e, msg="Couldn't delete placement group [%s]" % name) module.exit_json(changed=True) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - partition_count=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition']) + name=dict(required=True, type="str"), + partition_count=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + strategy=dict(default="cluster", choices=["cluster", "spread", "partition"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") state = module.params.get("state") - if state == 'present': + if state == "present": placement_group = search_placement_group(connection, module) if placement_group is None: create_placement_group(connection, module) else: strategy = module.params.get("strategy") - if placement_group['strategy'] == strategy: - module.exit_json( - changed=False, placement_group=placement_group) + if placement_group["strategy"] == strategy: + module.exit_json(changed=False, placement_group=placement_group) else: name = module.params.get("name") module.fail_json( - msg=("Placement group '{}' exists, can't change strategy" + - " from '{}' to '{}'").format( - name, - placement_group['strategy'], - strategy)) + msg=("Placement group '{}' exists, can't change strategy" + " from '{}' to '{}'").format( + name, placement_group["strategy"], strategy + ) + ) - elif state == 'absent': + elif state == "absent": placement_group = search_placement_group(connection, module) if placement_group is None: module.exit_json(changed=False) @@ -247,5 +233,5 @@ def main(): delete_placement_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index bc9d717e49d..970cd302636 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -85,42 +85,45 @@ def get_placement_groups_details(connection, module): try: if len(names) > 0: response = connection.describe_placement_groups( - Filters=[{ - "Name": "group-name", - "Values": names - }]) + Filters=[ + { + "Name": "group-name", + "Values": names, + } + ] + ) else: response = connection.describe_placement_groups() except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg="Couldn't find placement groups named [%s]" % names) + module.fail_json_aws(e, msg="Couldn't find placement groups named [%s]" % names) results = [] - for placement_group in response['PlacementGroups']: - results.append({ - "name": placement_group['GroupName'], - "state": placement_group['State'], - "strategy": placement_group['Strategy'], - }) + for placement_group in response["PlacementGroups"]: + results.append( + { + "name": placement_group["GroupName"], + "state": placement_group["State"], + "strategy": placement_group["Strategy"], + } + ) return results def main(): argument_spec = dict( - names=dict(type='list', default=[], elements='str') + names=dict(type="list", default=[], elements="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('ec2') + connection = module.client("ec2") placement_groups = get_placement_groups_details(connection, module) module.exit_json(changed=False, placement_groups=placement_groups) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py index 59d0582c048..ce73191cb79 100644 --- a/ec2_snapshot_copy.py +++ b/ec2_snapshot_copy.py @@ -126,34 +126,33 @@ def copy_snapshot(module, ec2): """ params = { - 'SourceRegion': module.params.get('source_region'), - 'SourceSnapshotId': module.params.get('source_snapshot_id'), - 'Description': module.params.get('description') + "SourceRegion": module.params.get("source_region"), + "SourceSnapshotId": module.params.get("source_snapshot_id"), + "Description": module.params.get("description"), } - if module.params.get('encrypted'): - params['Encrypted'] = True + if module.params.get("encrypted"): + params["Encrypted"] = True - if module.params.get('kms_key_id'): - params['KmsKeyId'] = module.params.get('kms_key_id') + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params.get("kms_key_id") - if module.params.get('tags'): - params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags'), types=['snapshot']) + if module.params.get("tags"): + params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), types=["snapshot"]) try: - snapshot_id = ec2.copy_snapshot(**params)['SnapshotId'] - if module.params.get('wait'): + snapshot_id = ec2.copy_snapshot(**params)["SnapshotId"] + if module.params.get("wait"): delay = 15 # Add one to max_attempts as wait() increment # its counter before assessing it for time.sleep() - max_attempts = (module.params.get('wait_timeout') // delay) + 1 - ec2.get_waiter('snapshot_completed').wait( - SnapshotIds=[snapshot_id], - WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) + max_attempts = (module.params.get("wait_timeout") // delay) + 1 + ec2.get_waiter("snapshot_completed").wait( + SnapshotIds=[snapshot_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.') + module.fail_json_aws(e, msg="An error occurred waiting for the snapshot to become available.") module.exit_json(changed=True, snapshot_id=snapshot_id) @@ -162,23 +161,23 @@ def main(): argument_spec = dict( source_region=dict(required=True), source_snapshot_id=dict(required=True), - description=dict(default=''), - encrypted=dict(type='bool', default=False, required=False), - kms_key_id=dict(type='str', required=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - tags=dict(type='dict', aliases=['resource_tags']), + description=dict(default=""), + encrypted=dict(type="bool", default=False, required=False), + kms_key_id=dict(type="str", required=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + tags=dict(type="dict", aliases=["resource_tags"]), ) module = AnsibleAWSModule(argument_spec=argument_spec) try: - client = module.client('ec2') + client = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") copy_snapshot(module, client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 832d16defc9..8c6282d0b0f 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -230,32 +230,31 @@ class AnsibleEc2Tgw(object): - def __init__(self, module, results): self._module = module self._results = results retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['IncorrectState'], + catch_extra_error_codes=["IncorrectState"], ) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) self._connection = connection self._check_mode = self._module.check_mode def process(self): - """ Process the request based on state parameter . - state = present will search for an existing tgw based and return the object data. - if no object is found it will be created - - state = absent will attempt to remove the tgw however will fail if it still has - attachments or associations - """ - description = self._module.params.get('description') - state = self._module.params.get('state', 'present') - tgw_id = self._module.params.get('transit_gateway_id') - - if state == 'present': + """Process the request based on state parameter . + state = present will search for an existing tgw based and return the object data. + if no object is found it will be created + + state = absent will attempt to remove the tgw however will fail if it still has + attachments or associations + """ + description = self._module.params.get("description") + state = self._module.params.get("state", "present") + tgw_id = self._module.params.get("transit_gateway_id") + + if state == "present": self.ensure_tgw_present(tgw_id, description) - elif state == 'absent': + elif state == "absent": self.ensure_tgw_absent(tgw_id, description) def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): @@ -279,13 +278,13 @@ def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): if transit_gateway: if self._check_mode: - transit_gateway['state'] = status + transit_gateway["state"] = status - if transit_gateway.get('state') == status: + if transit_gateway.get("state") == status: status_achieved = True break - elif transit_gateway.get('state') == 'failed': + elif transit_gateway.get("state") == "failed": break else: @@ -295,13 +294,12 @@ def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): self._module.fail_json_aws(e) if not status_achieved: - self._module.fail_json( - msg="Wait time out reached, while waiting for results") + self._module.fail_json(msg="Wait time out reached, while waiting for results") return transit_gateway def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): - """ search for an existing tgw by either tgw_id or description + """search for an existing tgw by either tgw_id or description :param tgw_id: The AWS id of the transit gateway :param description: The description of the transit gateway. :param skip_deleted: ignore deleted transit gateways @@ -309,7 +307,7 @@ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): """ filters = [] if tgw_id: - filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id}) + filters = ansible_dict_to_boto3_filter_list({"transit-gateway-id": tgw_id}) try: response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters) @@ -319,20 +317,21 @@ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): tgw = None tgws = [] - if len(response.get('TransitGateways', [])) == 1 and tgw_id: - if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted: - tgws.extend(response['TransitGateways']) + if len(response.get("TransitGateways", [])) == 1 and tgw_id: + if (response["TransitGateways"][0]["State"] != "deleted") or not skip_deleted: + tgws.extend(response["TransitGateways"]) - for gateway in response.get('TransitGateways', []): - if description == gateway['Description'] and gateway['State'] != 'deleted': + for gateway in response.get("TransitGateways", []): + if description == gateway["Description"] and gateway["State"] != "deleted": tgws.append(gateway) if len(tgws) > 1: self._module.fail_json( - msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description)) + msg="EC2 returned more than one transit Gateway for description {0}, aborting".format(description) + ) elif tgws: - tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags']) - tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags']) + tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"]) + tgw["tags"] = boto3_tag_list_to_ansible_dict(tgws[0]["Tags"]) return tgw @@ -352,31 +351,31 @@ def create_tgw(self, description): :return dict: transit gateway object """ options = dict() - wait = self._module.params.get('wait') - wait_timeout = self._module.params.get('wait_timeout') + wait = self._module.params.get("wait") + wait_timeout = self._module.params.get("wait_timeout") - if self._module.params.get('asn'): - options['AmazonSideAsn'] = self._module.params.get('asn') + if self._module.params.get("asn"): + options["AmazonSideAsn"] = self._module.params.get("asn") - options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach')) - options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate')) - options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate')) - options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support')) - options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support')) + options["AutoAcceptSharedAttachments"] = self.enable_option_flag(self._module.params.get("auto_attach")) + options["DefaultRouteTableAssociation"] = self.enable_option_flag(self._module.params.get("auto_associate")) + options["DefaultRouteTablePropagation"] = self.enable_option_flag(self._module.params.get("auto_propagate")) + options["VpnEcmpSupport"] = self.enable_option_flag(self._module.params.get("vpn_ecmp_support")) + options["DnsSupport"] = self.enable_option_flag(self._module.params.get("dns_support")) try: response = self._connection.create_transit_gateway(Description=description, Options=options) except (ClientError, BotoCoreError) as e: self._module.fail_json_aws(e) - tgw_id = response['TransitGateway']['TransitGatewayId'] + tgw_id = response["TransitGateway"]["TransitGatewayId"] if wait: result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available") else: result = self.get_matching_tgw(tgw_id=tgw_id) - self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id'])) + self._results["msg"] = " Transit gateway {0} created".format(result["transit_gateway_id"]) return result @@ -387,8 +386,8 @@ def delete_tgw(self, tgw_id): :param tgw_id: The id of the transit gateway :return dict: transit gateway object """ - wait = self._module.params.get('wait') - wait_timeout = self._module.params.get('wait_timeout') + wait = self._module.params.get("wait") + wait_timeout = self._module.params.get("wait_timeout") try: response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id) @@ -396,11 +395,13 @@ def delete_tgw(self, tgw_id): self._module.fail_json_aws(e) if wait: - result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False) + result = self.wait_for_status( + wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False + ) else: result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) - self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id)) + self._results["msg"] = " Transit gateway {0} deleted".format(tgw_id) return result @@ -417,25 +418,27 @@ def ensure_tgw_present(self, tgw_id=None, description=None): if tgw is None: if self._check_mode: - self._results['changed'] = True - self._results['transit_gateway_id'] = None + self._results["changed"] = True + self._results["transit_gateway_id"] = None return self._results try: if not description: self._module.fail_json(msg="Failed to create Transit Gateway: description argument required") tgw = self.create_tgw(description) - self._results['changed'] = True + self._results["changed"] = True except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg='Unable to create Transit Gateway') - - self._results['changed'] |= ensure_ec2_tags( - self._connection, self._module, tgw['transit_gateway_id'], - tags=self._module.params.get('tags'), - purge_tags=self._module.params.get('purge_tags'), + self._module.fail_json_aws(e, msg="Unable to create Transit Gateway") + + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + tgw["transit_gateway_id"], + tags=self._module.params.get("tags"), + purge_tags=self._module.params.get("purge_tags"), ) - self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id']) + self._results["transit_gateway"] = self.get_matching_tgw(tgw_id=tgw["transit_gateway_id"]) return self._results @@ -447,21 +450,22 @@ def ensure_tgw_absent(self, tgw_id=None, description=None): :param description: The description of the transit gateway. :return doct: transit gateway object """ - self._results['transit_gateway_id'] = None + self._results["transit_gateway_id"] = None tgw = self.get_matching_tgw(tgw_id, description) if tgw is not None: if self._check_mode: - self._results['changed'] = True + self._results["changed"] = True return self._results try: - tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id']) - self._results['changed'] = True - self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'], - skip_deleted=False) + tgw = self.delete_tgw(tgw_id=tgw["transit_gateway_id"]) + self._results["changed"] = True + self._results["transit_gateway"] = self.get_matching_tgw( + tgw_id=tgw["transit_gateway_id"], skip_deleted=False + ) except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway') + self._module.fail_json_aws(e, msg="Unable to delete Transit Gateway") return self._results @@ -473,24 +477,24 @@ def setup_module_object(): """ argument_spec = dict( - asn=dict(type='int'), - auto_associate=dict(type='bool', default=True), - auto_attach=dict(type='bool', default=False), - auto_propagate=dict(type='bool', default=True), - description=dict(type='str'), - dns_support=dict(type='bool', default=True), - purge_tags=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - transit_gateway_id=dict(type='str'), - vpn_ecmp_support=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300) + asn=dict(type="int"), + auto_associate=dict(type="bool", default=True), + auto_attach=dict(type="bool", default=False), + auto_propagate=dict(type="bool", default=True), + description=dict(type="str"), + dns_support=dict(type="bool", default=True), + purge_tags=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + transit_gateway_id=dict(type="str"), + vpn_ecmp_support=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_one_of=[('description', 'transit_gateway_id')], + required_one_of=[("description", "transit_gateway_id")], supports_check_mode=True, ) @@ -498,12 +502,9 @@ def setup_module_object(): def main(): - module = setup_module_object() - results = dict( - changed=False - ) + results = dict(changed=False) tgw_manager = AnsibleEc2Tgw(module=module, results=results) tgw_manager.process() @@ -511,5 +512,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py index 5053c8d65d8..b25346b84b8 100644 --- a/ec2_transit_gateway_info.py +++ b/ec2_transit_gateway_info.py @@ -177,11 +177,10 @@ class AnsibleEc2TgwInfo(object): - def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client('ec2') + self._connection = self._module.client("ec2") self._check_mode = self._module.check_mode @AWSRetry.exponential_backoff() @@ -193,8 +192,8 @@ def describe_transit_gateways(self): connection : boto3 client connection object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(self._module.params['filters']) - transit_gateway_ids = self._module.params['transit_gateway_ids'] + filters = ansible_dict_to_boto3_filter_list(self._module.params["filters"]) + transit_gateway_ids = self._module.params["transit_gateway_ids"] # init empty list for return vars transit_gateway_info = list() @@ -202,17 +201,18 @@ def describe_transit_gateways(self): # Get the basic transit gateway info try: response = self._connection.describe_transit_gateways( - TransitGatewayIds=transit_gateway_ids, Filters=filters) - except is_boto3_error_code('InvalidTransitGatewayID.NotFound'): - self._results['transit_gateways'] = [] + TransitGatewayIds=transit_gateway_ids, Filters=filters + ) + except is_boto3_error_code("InvalidTransitGatewayID.NotFound"): + self._results["transit_gateways"] = [] return - for transit_gateway in response['TransitGateways']: - transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags'])) + for transit_gateway in response["TransitGateways"]: + transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=["Tags"])) # convert tag list to ansible dict - transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', [])) + transit_gateway_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(transit_gateway.get("Tags", [])) - self._results['transit_gateways'] = transit_gateway_info + self._results["transit_gateways"] = transit_gateway_info return @@ -223,8 +223,8 @@ def setup_module_object(): """ argument_spec = dict( - transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']), - filters=dict(type='dict', default={}) + transit_gateway_ids=dict(type="list", default=[], elements="str", aliases=["transit_gateway_id"]), + filters=dict(type="dict", default={}), ) module = AnsibleAWSModule( @@ -236,12 +236,9 @@ def setup_module_object(): def main(): - module = setup_module_object() - results = dict( - changed=False - ) + results = dict(changed=False) tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results) try: @@ -252,5 +249,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py index 2878fbf9129..301fefb0513 100644 --- a/ec2_transit_gateway_vpc_attachment.py +++ b/ec2_transit_gateway_vpc_attachment.py @@ -221,25 +221,24 @@ def main(): - argument_spec = dict( - state=dict(type='str', required=False, default='present', choices=['absent', 'present']), - transit_gateway=dict(type='str', required=False, aliases=['transit_gateway_id']), - id=dict(type='str', required=False, aliases=['attachment_id']), - name=dict(type='str', required=False), - subnets=dict(type='list', elements='str', required=False), - purge_subnets=dict(type='bool', required=False, default=True), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - appliance_mode_support=dict(type='bool', required=False), - dns_support=dict(type='bool', required=False), - ipv6_support=dict(type='bool', required=False), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + state=dict(type="str", required=False, default="present", choices=["absent", "present"]), + transit_gateway=dict(type="str", required=False, aliases=["transit_gateway_id"]), + id=dict(type="str", required=False, aliases=["attachment_id"]), + name=dict(type="str", required=False), + subnets=dict(type="list", elements="str", required=False), + purge_subnets=dict(type="bool", required=False, default=True), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + appliance_mode_support=dict(type="bool", required=False), + dns_support=dict(type="bool", required=False), + ipv6_support=dict(type="bool", required=False), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) one_of = [ - ['id', 'transit_gateway', 'name'], + ["id", "transit_gateway", "name"], ] module = AnsibleAWSModule( @@ -248,55 +247,68 @@ def main(): required_one_of=one_of, ) - attach_id = module.params.get('id', None) - tgw = module.params.get('transit_gateway', None) - name = module.params.get('name', None) - tags = module.params.get('tags', None) - purge_tags = module.params.get('purge_tags') - state = module.params.get('state') - subnets = module.params.get('subnets', None) - purge_subnets = module.params.get('purge_subnets') + attach_id = module.params.get("id", None) + tgw = module.params.get("transit_gateway", None) + name = module.params.get("name", None) + tags = module.params.get("tags", None) + purge_tags = module.params.get("purge_tags") + state = module.params.get("state") + subnets = module.params.get("subnets", None) + purge_subnets = module.params.get("purge_subnets") # When not provided with an ID see if one exists. if not attach_id: search_manager = TransitGatewayVpcAttachmentManager(module=module) filters = dict() if tgw: - filters['transit-gateway-id'] = tgw + filters["transit-gateway-id"] = tgw if name: - filters['tag:Name'] = name + filters["tag:Name"] = name if subnets: vpc_id = search_manager.subnets_to_vpc(subnets) - filters['vpc-id'] = vpc_id + filters["vpc-id"] = vpc_id # Attachments lurk in a 'deleted' state, for a while, ignore them so we # can reuse the names - filters['state'] = [ - 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', - 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + filters["state"] = [ + "available", + "deleting", + "failed", + "failing", + "initiatingRequest", + "modifying", + "pendingAcceptance", + "pending", + "rollingBack", + "rejected", + "rejecting", ] attachments = search_manager.list(filters=filters) if len(attachments) > 1: - module.fail_json('Multiple matching attachments found, provide an ID', attachments=attachments) + module.fail_json("Multiple matching attachments found, provide an ID", attachments=attachments) # If we find a match then we'll modify it by ID, otherwise we'll be # creating a new RTB. if attachments: - attach_id = attachments[0]['transit_gateway_attachment_id'] + attach_id = attachments[0]["transit_gateway_attachment_id"] manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': + if state == "absent": manager.delete() else: if not attach_id: if not tgw: - module.fail_json('No existing attachment found. To create a new attachment' - ' the `transit_gateway` parameter must be provided.') + module.fail_json( + "No existing attachment found. To create a new attachment" + " the `transit_gateway` parameter must be provided." + ) if not subnets: - module.fail_json('No existing attachment found. To create a new attachment' - ' the `subnets` parameter must be provided.') + module.fail_json( + "No existing attachment found. To create a new attachment" + " the `subnets` parameter must be provided." + ) # name is just a special case of tags. if name: @@ -310,9 +322,9 @@ def main(): manager.set_transit_gateway(tgw) manager.set_subnets(subnets, purge_subnets) manager.set_tags(tags, purge_tags) - manager.set_dns_support(module.params.get('dns_support', None)) - manager.set_ipv6_support(module.params.get('ipv6_support', None)) - manager.set_appliance_mode_support(module.params.get('appliance_mode_support', None)) + manager.set_dns_support(module.params.get("dns_support", None)) + manager.set_ipv6_support(module.params.get("ipv6_support", None)) + manager.set_appliance_mode_support(module.params.get("appliance_mode_support", None)) manager.flush_changes() results = dict( @@ -320,7 +332,7 @@ def main(): attachments=[manager.updated_resource], ) if manager.changed: - results['diff'] = dict( + results["diff"] = dict( before=manager.original_resource, after=manager.updated_resource, ) @@ -328,5 +340,5 @@ def main(): module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py index 49c03ff432c..a665e4080cc 100644 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ b/ec2_transit_gateway_vpc_attachment_info.py @@ -147,17 +147,16 @@ def main(): - argument_spec = dict( - id=dict(type='str', required=False, aliases=['attachment_id']), - name=dict(type='str', required=False), - filters=dict(type='dict', required=False), - include_deleted=dict(type='bool', required=False, default=False) + id=dict(type="str", required=False, aliases=["attachment_id"]), + name=dict(type="str", required=False), + filters=dict(type="dict", required=False), + include_deleted=dict(type="bool", required=False, default=False), ) mutually_exclusive = [ - ['id', 'name'], - ['id', 'filters'], + ["id", "name"], + ["id", "filters"], ] module = AnsibleAWSModule( @@ -165,22 +164,31 @@ def main(): supports_check_mode=True, ) - name = module.params.get('name', None) - id = module.params.get('id', None) - opt_filters = module.params.get('filters', None) + name = module.params.get("name", None) + id = module.params.get("id", None) + opt_filters = module.params.get("filters", None) search_manager = TransitGatewayVpcAttachmentManager(module=module) filters = dict() if name: - filters['tag:Name'] = name + filters["tag:Name"] = name - if not module.params.get('include_deleted'): + if not module.params.get("include_deleted"): # Attachments lurk in a 'deleted' state, for a while, ignore them so we # can reuse the names - filters['state'] = [ - 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying', - 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting' + filters["state"] = [ + "available", + "deleting", + "failed", + "failing", + "initiatingRequest", + "modifying", + "pendingAcceptance", + "pending", + "rollingBack", + "rejected", + "rejecting", ] if opt_filters: @@ -191,5 +199,5 @@ def main(): module.exit_json(changed=False, attachments=attachments, filters=filters) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index b6fb0b837f1..b15bec20f06 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -81,16 +81,20 @@ def delete_eigw(module, connection, eigw_id): try: response = connection.delete_egress_only_internet_gateway( - aws_retry=True, - DryRun=module.check_mode, - EgressOnlyInternetGatewayId=eigw_id) - except is_boto3_error_code('DryRunOperation'): + aws_retry=True, DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id + ) + except is_boto3_error_code("DryRunOperation"): changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id) + ) if not module.check_mode: - changed = response.get('ReturnCode', False) + changed = response.get("ReturnCode", False) return changed @@ -108,29 +112,35 @@ def create_eigw(module, connection, vpc_id): try: response = connection.create_egress_only_internet_gateway( - aws_retry=True, - DryRun=module.check_mode, - VpcId=vpc_id) - except is_boto3_error_code('DryRunOperation'): + aws_retry=True, DryRun=module.check_mode, VpcId=vpc_id + ) + except is_boto3_error_code("DryRunOperation"): # When boto3 method is run with DryRun=True it returns an error on success # We need to catch the error and return something valid changed = True - except is_boto3_error_code('InvalidVpcID.NotFound') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) if not module.check_mode: - gateway = response.get('EgressOnlyInternetGateway', {}) - state = gateway.get('Attachments', [{}])[0].get('State') - gateway_id = gateway.get('EgressOnlyInternetGatewayId') + gateway = response.get("EgressOnlyInternetGateway", {}) + state = gateway.get("Attachments", [{}])[0].get("State") + gateway_id = gateway.get("EgressOnlyInternetGatewayId") - if gateway_id and state in ('attached', 'attaching'): + if gateway_id and state in ("attached", "attaching"): changed = True else: # EIGW gave back a bad attachment state or an invalid response so we error out - module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id), - **camel_dict_to_snake_dict(response)) + module.fail_json( + msg="Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response".format( + vpc_id + ), + **camel_dict_to_snake_dict(response), + ) return changed, gateway_id @@ -146,45 +156,41 @@ def describe_eigws(module, connection, vpc_id): gateway_id = None try: - response = connection.describe_egress_only_internet_gateways( - aws_retry=True) + response = connection.describe_egress_only_internet_gateways(aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways") - for eigw in response.get('EgressOnlyInternetGateways', []): - for attachment in eigw.get('Attachments', []): - if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'): - gateway_id = eigw.get('EgressOnlyInternetGatewayId') + for eigw in response.get("EgressOnlyInternetGateways", []): + for attachment in eigw.get("Attachments", []): + if attachment.get("VpcId") == vpc_id and attachment.get("State") in ("attached", "attaching"): + gateway_id = eigw.get("EgressOnlyInternetGatewayId") return gateway_id def main(): - argument_spec = dict( - vpc_id=dict(required=True), - state=dict(default='present', choices=['present', 'absent']) - ) + argument_spec = dict(vpc_id=dict(required=True), state=dict(default="present", choices=["present", "absent"])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) - vpc_id = module.params.get('vpc_id') - state = module.params.get('state') + vpc_id = module.params.get("vpc_id") + state = module.params.get("state") eigw_id = describe_eigws(module, connection, vpc_id) result = dict(gateway_id=eigw_id, vpc_id=vpc_id) changed = False - if state == 'present' and not eigw_id: - changed, result['gateway_id'] = create_eigw(module, connection, vpc_id) - elif state == 'absent' and eigw_id: + if state == "present" and not eigw_id: + changed, result["gateway_id"] = create_eigw(module, connection, vpc_id) + elif state == "absent" and eigw_id: changed = delete_eigw(module, connection, eigw_id) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index fa34ccd8118..022f058d0f9 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -167,33 +167,33 @@ # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58} +PROTOCOL_NUMBERS = {"all": -1, "icmp": 1, "tcp": 6, "udp": 17, "ipv6-icmp": 58} # Utility methods def icmp_present(entry): - if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]: + if len(entry) == 6 and entry[1] in ["icmp", "ipv6-icmp"] or entry[1] in [1, 58]: return True def subnets_removed(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) - associations = results['NetworkAcls'][0]['Associations'] - subnet_ids = [assoc['SubnetId'] for assoc in associations] + associations = results["NetworkAcls"][0]["Associations"] + subnet_ids = [assoc["SubnetId"] for assoc in associations] return [subnet for subnet in subnet_ids if subnet not in subnets] def subnets_added(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) - associations = results['NetworkAcls'][0]['Associations'] - subnet_ids = [assoc['SubnetId'] for assoc in associations] + associations = results["NetworkAcls"][0]["Associations"] + subnet_ids = [assoc["SubnetId"] for assoc in associations] return [subnet for subnet in subnets if subnet not in subnet_ids] def subnets_changed(nacl, client, module): changed = False - vpc_id = module.params.get('vpc_id') - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + vpc_id = module.params.get("vpc_id") + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] subnets = subnets_to_associate(nacl, client, module) if not subnets: default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] @@ -219,40 +219,41 @@ def subnets_changed(nacl, client, module): def nacls_changed(nacl, client, module): changed = False params = dict() - params['egress'] = module.params.get('egress') - params['ingress'] = module.params.get('ingress') + params["egress"] = module.params.get("egress") + params["ingress"] = module.params.get("ingress") - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] nacl = describe_network_acl(client, module) - entries = nacl['NetworkAcls'][0]['Entries'] - egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767] - ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767] - if rules_changed(egress, params['egress'], True, nacl_id, client, module): + entries = nacl["NetworkAcls"][0]["Entries"] + egress = [rule for rule in entries if rule["Egress"] is True and rule["RuleNumber"] < 32767] + ingress = [rule for rule in entries if rule["Egress"] is False and rule["RuleNumber"] < 32767] + if rules_changed(egress, params["egress"], True, nacl_id, client, module): changed = True - if rules_changed(ingress, params['ingress'], False, nacl_id, client, module): + if rules_changed(ingress, params["ingress"], False, nacl_id, client, module): changed = True return changed def tags_changed(nacl_id, client, module): - tags = module.params.get('tags') - name = module.params.get('name') - purge_tags = module.params.get('purge_tags') + tags = module.params.get("tags") + name = module.params.get("name") + purge_tags = module.params.get("purge_tags") if name is None and tags is None: return False - if module.params.get('tags') is None: + if module.params.get("tags") is None: # Only purge tags if tags is explicitly set to {} and purge_tags is True purge_tags = False new_tags = dict() - if module.params.get('name') is not None: - new_tags['Name'] = module.params.get('name') - new_tags.update(module.params.get('tags') or {}) + if module.params.get("name") is not None: + new_tags["Name"] = module.params.get("name") + new_tags.update(module.params.get("tags") or {}) - return ensure_ec2_tags(client, module, nacl_id, tags=new_tags, - purge_tags=purge_tags, retry_codes=['InvalidNetworkAclID.NotFound']) + return ensure_ec2_tags( + client, module, nacl_id, tags=new_tags, purge_tags=purge_tags, retry_codes=["InvalidNetworkAclID.NotFound"] + ) def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): @@ -267,60 +268,60 @@ def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): if removed_rules: params = dict() for rule in removed_rules: - params['NetworkAclId'] = nacl_id - params['RuleNumber'] = rule['RuleNumber'] - params['Egress'] = Egress + params["NetworkAclId"] = nacl_id + params["RuleNumber"] = rule["RuleNumber"] + params["Egress"] = Egress delete_network_acl_entry(params, client, module) changed = True added_rules = [x for x in rules if x not in aws_rules] if added_rules: for rule in added_rules: - rule['NetworkAclId'] = nacl_id + rule["NetworkAclId"] = nacl_id create_network_acl_entry(rule, client, module) changed = True return changed def is_ipv6(cidr): - return ':' in cidr + return ":" in cidr def process_rule_entry(entry, Egress): params = dict() - params['RuleNumber'] = entry[0] - params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) - params['RuleAction'] = entry[2] - params['Egress'] = Egress + params["RuleNumber"] = entry[0] + params["Protocol"] = str(PROTOCOL_NUMBERS[entry[1]]) + params["RuleAction"] = entry[2] + params["Egress"] = Egress if is_ipv6(entry[3]): - params['Ipv6CidrBlock'] = entry[3] + params["Ipv6CidrBlock"] = entry[3] else: - params['CidrBlock'] = entry[3] + params["CidrBlock"] = entry[3] if icmp_present(entry): - params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} + params["IcmpTypeCode"] = {"Type": int(entry[4]), "Code": int(entry[5])} else: if entry[6] or entry[7]: - params['PortRange'] = {"From": entry[6], 'To': entry[7]} + params["PortRange"] = {"From": entry[6], "To": entry[7]} return params def restore_default_associations(assoc_ids, default_nacl_id, client, module): if assoc_ids: params = dict() - params['NetworkAclId'] = default_nacl_id[0] + params["NetworkAclId"] = default_nacl_id[0] for assoc_id in assoc_ids: - params['AssociationId'] = assoc_id + params["AssociationId"] = assoc_id restore_default_acl_association(params, client, module) return True def construct_acl_entries(nacl, client, module): - for entry in module.params.get('ingress'): + for entry in module.params.get("ingress"): params = process_rule_entry(entry, Egress=False) - params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] create_network_acl_entry(params, client, module) - for rule in module.params.get('egress'): + for rule in module.params.get("egress"): params = process_rule_entry(rule, Egress=True) - params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] create_network_acl_entry(params, client, module) @@ -328,21 +329,21 @@ def construct_acl_entries(nacl, client, module): def setup_network_acl(client, module): changed = False nacl = describe_network_acl(client, module) - if not nacl['NetworkAcls']: + if not nacl["NetworkAcls"]: tags = {} - if module.params.get('name'): - tags['Name'] = module.params.get('name') - tags.update(module.params.get('tags') or {}) - nacl = create_network_acl(module.params.get('vpc_id'), client, module, tags) - nacl_id = nacl['NetworkAcl']['NetworkAclId'] + if module.params.get("name"): + tags["Name"] = module.params.get("name") + tags.update(module.params.get("tags") or {}) + nacl = create_network_acl(module.params.get("vpc_id"), client, module, tags) + nacl_id = nacl["NetworkAcl"]["NetworkAclId"] subnets = subnets_to_associate(nacl, client, module) replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) changed = True - return changed, nacl['NetworkAcl']['NetworkAclId'] + return changed, nacl["NetworkAcl"]["NetworkAclId"] else: changed = False - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] changed |= subnets_changed(nacl, client, module) changed |= nacls_changed(nacl, client, module) changed |= tags_changed(nacl_id, client, module) @@ -353,11 +354,11 @@ def remove_network_acl(client, module): changed = False result = dict() nacl = describe_network_acl(client, module) - if nacl['NetworkAcls']: - nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] - vpc_id = nacl['NetworkAcls'][0]['VpcId'] - associations = nacl['NetworkAcls'][0]['Associations'] - assoc_ids = [a['NetworkAclAssociationId'] for a in associations] + if nacl["NetworkAcls"]: + nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] + vpc_id = nacl["NetworkAcls"][0]["VpcId"] + associations = nacl["NetworkAcls"][0]["Associations"] + assoc_ids = [a["NetworkAclAssociationId"] for a in associations] default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) if not default_nacl_id: result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} @@ -384,7 +385,7 @@ def _create_network_acl(client, *args, **kwargs): def create_network_acl(vpc_id, client, module, tags): params = dict(VpcId=vpc_id) if tags: - params['TagSpecifications'] = boto3_tag_specifications(tags, ['network-acl']) + params["TagSpecifications"] = boto3_tag_specifications(tags, ["network-acl"]) try: if module.check_mode: nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) @@ -395,7 +396,7 @@ def create_network_acl(vpc_id, client, module, tags): return nacl -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _create_network_acl_entry(client, *args, **kwargs): return client.create_network_acl_entry(*args, **kwargs) @@ -421,7 +422,7 @@ def delete_network_acl(nacl_id, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _delete_network_acl_entry(client, *args, **kwargs): return client.delete_network_acl_entry(*args, **kwargs) @@ -439,7 +440,7 @@ def _describe_network_acls(client, **kwargs): return client.describe_network_acls(**kwargs) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _describe_network_acls_retry_missing(client, **kwargs): return client.describe_network_acls(**kwargs) @@ -448,25 +449,23 @@ def describe_acl_associations(subnets, client, module): if not subnets: return [] try: - results = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'association.subnet-id', 'Values': subnets} - ]) + results = _describe_network_acls_retry_missing( + client, Filters=[{"Name": "association.subnet-id", "Values": subnets}] + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - associations = results['NetworkAcls'][0]['Associations'] - return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets] + associations = results["NetworkAcls"][0]["Associations"] + return [a["NetworkAclAssociationId"] for a in associations if a["SubnetId"] in subnets] def describe_network_acl(client, module): try: - if module.params.get('nacl_id'): - nacl = _describe_network_acls(client, Filters=[ - {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]} - ]) + if module.params.get("nacl_id"): + nacl = _describe_network_acls( + client, Filters=[{"Name": "network-acl-id", "Values": [module.params.get("nacl_id")]}] + ) else: - nacl = _describe_network_acls(client, Filters=[ - {'Name': 'tag:Name', 'Values': [module.params.get('name')]} - ]) + nacl = _describe_network_acls(client, Filters=[{"Name": "tag:Name", "Values": [module.params.get("name")]}]) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) return nacl @@ -481,38 +480,37 @@ def find_acl_by_id(nacl_id, client, module): def find_default_vpc_nacl(vpc_id, client, module): try: - response = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'vpc-id', 'Values': [vpc_id]}]) + response = _describe_network_acls_retry_missing(client, Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - nacls = response['NetworkAcls'] - return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True] + nacls = response["NetworkAcls"] + return [n["NetworkAclId"] for n in nacls if n["IsDefault"] is True] def find_subnet_ids_by_nacl_id(nacl_id, client, module): try: - results = _describe_network_acls_retry_missing(client, Filters=[ - {'Name': 'association.network-acl-id', 'Values': [nacl_id]} - ]) + results = _describe_network_acls_retry_missing( + client, Filters=[{"Name": "association.network-acl-id", "Values": [nacl_id]}] + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - if results['NetworkAcls']: - associations = results['NetworkAcls'][0]['Associations'] - return [s['SubnetId'] for s in associations if s['SubnetId']] + if results["NetworkAcls"]: + associations = results["NetworkAcls"][0]["Associations"] + return [s["SubnetId"] for s in associations if s["SubnetId"]] else: return [] -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_association(client, *args, **kwargs): return client.replace_network_acl_association(*args, **kwargs) def replace_network_acl_association(nacl_id, subnets, client, module): params = dict() - params['NetworkAclId'] = nacl_id + params["NetworkAclId"] = nacl_id for association in describe_acl_associations(subnets, client, module): - params['AssociationId'] = association + params["AssociationId"] = association try: if not module.check_mode: _replace_network_acl_association(client, **params) @@ -520,7 +518,7 @@ def replace_network_acl_association(nacl_id, subnets, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_entry(client, *args, **kwargs): return client.replace_network_acl_entry(*args, **kwargs) @@ -528,7 +526,7 @@ def _replace_network_acl_entry(client, *args, **kwargs): def replace_network_acl_entry(entries, Egress, nacl_id, client, module): for entry in entries: params = entry - params['NetworkAclId'] = nacl_id + params["NetworkAclId"] = nacl_id try: if not module.check_mode: _replace_network_acl_entry(client, **params) @@ -536,7 +534,7 @@ def replace_network_acl_entry(entries, Egress, nacl_id, client, module): module.fail_json_aws(e) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) def _replace_network_acl_association(client, *args, **kwargs): return client.replace_network_acl_association(*args, **kwargs) @@ -555,25 +553,23 @@ def _describe_subnets(client, *args, **kwargs): def subnets_to_associate(nacl, client, module): - params = list(module.params.get('subnets')) + params = list(module.params.get("subnets")) if not params: return [] all_found = [] if any(x.startswith("subnet-") for x in params): try: - subnets = _describe_subnets(client, Filters=[ - {'Name': 'subnet-id', 'Values': params}]) - all_found.extend(subnets.get('Subnets', [])) + subnets = _describe_subnets(client, Filters=[{"Name": "subnet-id", "Values": params}]) + all_found.extend(subnets.get("Subnets", [])) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) if len(params) != len(all_found): try: - subnets = _describe_subnets(client, Filters=[ - {'Name': 'tag:Name', 'Values': params}]) - all_found.extend(subnets.get('Subnets', [])) + subnets = _describe_subnets(client, Filters=[{"Name": "tag:Name", "Values": params}]) + all_found.extend(subnets.get("Subnets", [])) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId'))) + return list(set(s["SubnetId"] for s in all_found if s.get("SubnetId"))) def main(): @@ -581,29 +577,31 @@ def main(): vpc_id=dict(), name=dict(), nacl_id=dict(), - subnets=dict(required=False, type='list', default=list(), elements='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - ingress=dict(required=False, type='list', default=list(), elements='list'), - egress=dict(required=False, type='list', default=list(), elements='list'), - state=dict(default='present', choices=['present', 'absent']), + subnets=dict(required=False, type="list", default=list(), elements="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), + ingress=dict(required=False, type="list", default=list(), elements="list"), + egress=dict(required=False, type="list", default=list(), elements="list"), + state=dict(default="present", choices=["present", "absent"]), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[["name", "nacl_id"]], + required_if=[["state", "present", ["vpc_id"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[['name', 'nacl_id']], - required_if=[['state', 'present', ['vpc_id']]]) - state = module.params.get('state').lower() + state = module.params.get("state").lower() - client = module.client('ec2') + client = module.client("ec2") invocations = { "present": setup_network_acl, - "absent": remove_network_acl + "absent": remove_network_acl, } (changed, results) = invocations[state](client, module) module.exit_json(changed=changed, nacl_id=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index 3d37cf26524..ecf530a9d74 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -119,11 +119,10 @@ # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'} +PROTOCOL_NAMES = {"-1": "all", "1": "icmp", "6": "tcp", "17": "udp"} def list_ec2_vpc_nacls(connection, module): - nacl_ids = module.params.get("nacl_ids") filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) @@ -132,86 +131,97 @@ def list_ec2_vpc_nacls(connection, module): try: nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters) - except is_boto3_error_code('InvalidNetworkAclID.NotFound'): - module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidNetworkAclID.NotFound"): + module.fail_json(msg="Unable to describe ACL. NetworkAcl does not exist") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_nacls = [] - for nacl in nacls['NetworkAcls']: + for nacl in nacls["NetworkAcls"]: snaked_nacls.append(camel_dict_to_snake_dict(nacl)) # Turn the boto3 result in to ansible friendly tag dictionary for nacl in snaked_nacls: - if 'tags' in nacl: - nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value') - if 'entries' in nacl: - nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] - if entry['rule_number'] < 32767 and entry['egress']] - nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries'] - if entry['rule_number'] < 32767 and not entry['egress']] - del nacl['entries'] - if 'associations' in nacl: - nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']] - del nacl['associations'] - if 'network_acl_id' in nacl: - nacl['nacl_id'] = nacl['network_acl_id'] - del nacl['network_acl_id'] + if "tags" in nacl: + nacl["tags"] = boto3_tag_list_to_ansible_dict(nacl["tags"], "key", "value") + if "entries" in nacl: + nacl["egress"] = [ + nacl_entry_to_list(entry) + for entry in nacl["entries"] + if entry["rule_number"] < 32767 and entry["egress"] + ] + nacl["ingress"] = [ + nacl_entry_to_list(entry) + for entry in nacl["entries"] + if entry["rule_number"] < 32767 and not entry["egress"] + ] + del nacl["entries"] + if "associations" in nacl: + nacl["subnets"] = [a["subnet_id"] for a in nacl["associations"]] + del nacl["associations"] + if "network_acl_id" in nacl: + nacl["nacl_id"] = nacl["network_acl_id"] + del nacl["network_acl_id"] module.exit_json(nacls=snaked_nacls) def nacl_entry_to_list(entry): - # entry list format # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to] elist = [] - elist.append(entry['rule_number']) + elist.append(entry["rule_number"]) - if entry.get('protocol') in PROTOCOL_NAMES: - elist.append(PROTOCOL_NAMES[entry['protocol']]) + if entry.get("protocol") in PROTOCOL_NAMES: + elist.append(PROTOCOL_NAMES[entry["protocol"]]) else: - elist.append(entry.get('protocol')) + elist.append(entry.get("protocol")) - elist.append(entry['rule_action']) + elist.append(entry["rule_action"]) - if entry.get('cidr_block'): - elist.append(entry['cidr_block']) - elif entry.get('ipv6_cidr_block'): - elist.append(entry['ipv6_cidr_block']) + if entry.get("cidr_block"): + elist.append(entry["cidr_block"]) + elif entry.get("ipv6_cidr_block"): + elist.append(entry["ipv6_cidr_block"]) else: elist.append(None) elist = elist + [None, None, None, None] - if entry['protocol'] in ('1', '58'): - elist[4] = entry.get('icmp_type_code', {}).get('type') - elist[5] = entry.get('icmp_type_code', {}).get('code') + if entry["protocol"] in ("1", "58"): + elist[4] = entry.get("icmp_type_code", {}).get("type") + elist[5] = entry.get("icmp_type_code", {}).get("code") - if entry['protocol'] not in ('1', '6', '17', '58'): + if entry["protocol"] not in ("1", "6", "17", "58"): elist[6] = 0 elist[7] = 65535 - elif 'port_range' in entry: - elist[6] = entry['port_range']['from'] - elist[7] = entry['port_range']['to'] + elif "port_range" in entry: + elist[6] = entry["port_range"]["from"] + elist[7] = entry["port_range"]["to"] return elist def main(): - argument_spec = dict( - nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'), - filters=dict(default={}, type='dict')) + nacl_ids=dict(default=[], type="list", aliases=["nacl_id"], elements="str"), + filters=dict(default={}, type="dict"), + ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) list_ec2_vpc_nacls(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py index a5af559cc9d..465c9c852eb 100644 --- a/ec2_vpc_peer.py +++ b/ec2_vpc_peer.py @@ -370,15 +370,13 @@ def wait_for_state(client, module, state, pcx_id): - waiter = client.get_waiter('vpc_peering_connection_exists') + waiter = client.get_waiter("vpc_peering_connection_exists") peer_filter = { - 'vpc-peering-connection-id': pcx_id, - 'status-code': state, + "vpc-peering-connection-id": pcx_id, + "status-code": state, } try: - waiter.wait( - Filters=ansible_dict_to_boto3_filter_list(peer_filter) - ) + waiter.wait(Filters=ansible_dict_to_boto3_filter_list(peer_filter)) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, "Failed to wait for state change") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -387,18 +385,18 @@ def wait_for_state(client, module, state, pcx_id): def describe_peering_connections(params, client): peer_filter = { - 'requester-vpc-info.vpc-id': params['VpcId'], - 'accepter-vpc-info.vpc-id': params['PeerVpcId'], + "requester-vpc-info.vpc-id": params["VpcId"], + "accepter-vpc-info.vpc-id": params["PeerVpcId"], } result = client.describe_vpc_peering_connections( aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(peer_filter), ) - if result['VpcPeeringConnections'] == []: + if result["VpcPeeringConnections"] == []: # Try again with the VPC/Peer relationship reversed peer_filter = { - 'requester-vpc-info.vpc-id': params['PeerVpcId'], - 'accepter-vpc-info.vpc-id': params['VpcId'], + "requester-vpc-info.vpc-id": params["PeerVpcId"], + "accepter-vpc-info.vpc-id": params["VpcId"], } result = client.describe_vpc_peering_connections( aws_retry=True, @@ -409,29 +407,32 @@ def describe_peering_connections(params, client): def is_active(peering_conn): - return peering_conn['Status']['Code'] == 'active' + return peering_conn["Status"]["Code"] == "active" def is_pending(peering_conn): - return peering_conn['Status']['Code'] == 'pending-acceptance' + return peering_conn["Status"]["Code"] == "pending-acceptance" def create_peer_connection(client, module): changed = False params = dict() - params['VpcId'] = module.params.get('vpc_id') - params['PeerVpcId'] = module.params.get('peer_vpc_id') - if module.params.get('peer_region'): - params['PeerRegion'] = module.params.get('peer_region') - if module.params.get('peer_owner_id'): - params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) + params["VpcId"] = module.params.get("vpc_id") + params["PeerVpcId"] = module.params.get("peer_vpc_id") + if module.params.get("peer_region"): + params["PeerRegion"] = module.params.get("peer_region") + if module.params.get("peer_owner_id"): + params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) peering_conns = describe_peering_connections(params, client) - for peering_conn in peering_conns['VpcPeeringConnections']: - pcx_id = peering_conn['VpcPeeringConnectionId'] - if ensure_ec2_tags(client, module, pcx_id, - purge_tags=module.params.get('purge_tags'), - tags=module.params.get('tags'), - ): + for peering_conn in peering_conns["VpcPeeringConnections"]: + pcx_id = peering_conn["VpcPeeringConnectionId"] + if ensure_ec2_tags( + client, + module, + pcx_id, + purge_tags=module.params.get("purge_tags"), + tags=module.params.get("tags"), + ): changed = True if is_active(peering_conn): return (changed, peering_conn) @@ -439,54 +440,59 @@ def create_peer_connection(client, module): return (changed, peering_conn) try: peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) - pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] - if module.params.get('tags'): + pcx_id = peering_conn["VpcPeeringConnection"]["VpcPeeringConnectionId"] + if module.params.get("tags"): # Once the minimum botocore version is bumped to > 1.17.24 # (hopefully community.aws 3.0.0) we can add the tags to the # creation parameters - add_ec2_tags(client, module, pcx_id, module.params.get('tags'), - retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) - if module.params.get('wait'): - wait_for_state(client, module, 'pending-acceptance', pcx_id) + add_ec2_tags( + client, + module, + pcx_id, + module.params.get("tags"), + retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], + ) + if module.params.get("wait"): + wait_for_state(client, module, "pending-acceptance", pcx_id) changed = True - return (changed, peering_conn['VpcPeeringConnection']) + return (changed, peering_conn["VpcPeeringConnection"]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def remove_peer_connection(client, module): - pcx_id = module.params.get('peering_id') + pcx_id = module.params.get("peering_id") if pcx_id: peering_conn = get_peering_connection_by_id(pcx_id, client, module) else: params = dict() - params['VpcId'] = module.params.get('vpc_id') - params['PeerVpcId'] = module.params.get('peer_vpc_id') - params['PeerRegion'] = module.params.get('peer_region') - if module.params.get('peer_owner_id'): - params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) - peering_conn = describe_peering_connections(params, client)['VpcPeeringConnections'][0] + params["VpcId"] = module.params.get("vpc_id") + params["PeerVpcId"] = module.params.get("peer_vpc_id") + params["PeerRegion"] = module.params.get("peer_region") + if module.params.get("peer_owner_id"): + params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) + peering_conn = describe_peering_connections(params, client)["VpcPeeringConnections"][0] if not peering_conn: module.exit_json(changed=False) else: - pcx_id = pcx_id or peering_conn['VpcPeeringConnectionId'] + pcx_id = pcx_id or peering_conn["VpcPeeringConnectionId"] - if peering_conn['Status']['Code'] == 'deleted': - module.exit_json(msg='Connection in deleted state.', changed=False, peering_id=pcx_id) - if peering_conn['Status']['Code'] == 'rejected': + if peering_conn["Status"]["Code"] == "deleted": + module.exit_json(msg="Connection in deleted state.", changed=False, peering_id=pcx_id) + if peering_conn["Status"]["Code"] == "rejected": module.exit_json( - msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS', + msg="Connection has been rejected. State cannot be changed and will be removed automatically by AWS", changed=False, - peering_id=pcx_id + peering_id=pcx_id, ) try: params = dict() - params['VpcPeeringConnectionId'] = pcx_id + params["VpcPeeringConnectionId"] = pcx_id client.delete_vpc_peering_connection(aws_retry=True, **params) - if module.params.get('wait'): - wait_for_state(client, module, 'deleted', pcx_id) + if module.params.get("wait"): + wait_for_state(client, module, "deleted", pcx_id) module.exit_json(changed=True, peering_id=pcx_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) @@ -494,44 +500,55 @@ def remove_peer_connection(client, module): def get_peering_connection_by_id(peering_id, client, module): params = dict() - params['VpcPeeringConnectionIds'] = [peering_id] + params["VpcPeeringConnectionIds"] = [peering_id] try: vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params) - return vpc_peering_connection['VpcPeeringConnections'][0] - except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: - module.fail_json_aws(e, msg='Malformed connection ID') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error while describing peering connection by peering_id') + return vpc_peering_connection["VpcPeeringConnections"][0] + except is_boto3_error_code("InvalidVpcPeeringConnectionId.Malformed") as e: + module.fail_json_aws(e, msg="Malformed connection ID") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error while describing peering connection by peering_id") def accept_reject(state, client, module): changed = False params = dict() - peering_id = module.params.get('peering_id') - params['VpcPeeringConnectionId'] = peering_id + peering_id = module.params.get("peering_id") + params["VpcPeeringConnectionId"] = peering_id vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) - peering_status = vpc_peering_connection['Status']['Code'] + peering_status = vpc_peering_connection["Status"]["Code"] - if peering_status not in ['active', 'rejected']: + if peering_status not in ["active", "rejected"]: try: - if state == 'accept': + if state == "accept": client.accept_vpc_peering_connection(aws_retry=True, **params) - target_state = 'active' + target_state = "active" else: client.reject_vpc_peering_connection(aws_retry=True, **params) - target_state = 'rejected' - if module.params.get('tags'): - add_ec2_tags(client, module, peering_id, module.params.get('tags'), - retry_codes=['InvalidVpcPeeringConnectionID.NotFound']) + target_state = "rejected" + if module.params.get("tags"): + add_ec2_tags( + client, + module, + peering_id, + module.params.get("tags"), + retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], + ) changed = True - if module.params.get('wait'): + if module.params.get("wait"): wait_for_state(client, module, target_state, peering_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) - if ensure_ec2_tags(client, module, peering_id, - purge_tags=module.params.get('purge_tags'), - tags=module.params.get('tags'), - ): + if ensure_ec2_tags( + client, + module, + peering_id, + purge_tags=module.params.get("purge_tags"), + tags=module.params.get("tags"), + ): changed = True # Relaod peering conection infos to return latest state/params @@ -546,34 +563,36 @@ def main(): peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), - wait=dict(default=False, type='bool'), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + state=dict(default="present", choices=["present", "absent", "accept", "reject"]), + wait=dict(default=False, type="bool"), ) required_if = [ - ('state', 'present', ['vpc_id', 'peer_vpc_id']), - ('state', 'accept', ['peering_id']), - ('state', 'reject', ['peering_id']) + ("state", "present", ["vpc_id", "peer_vpc_id"]), + ("state", "accept", ["peering_id"]), + ("state", "reject", ["peering_id"]), ] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) - state = module.params.get('state') - peering_id = module.params.get('peering_id') - vpc_id = module.params.get('vpc_id') - peer_vpc_id = module.params.get('peer_vpc_id') + state = module.params.get("state") + peering_id = module.params.get("peering_id") + vpc_id = module.params.get("vpc_id") + peer_vpc_id = module.params.get("peer_vpc_id") try: - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": (changed, results) = create_peer_connection(client, module) - elif state == 'absent': + elif state == "absent": if not peering_id and (not vpc_id or not peer_vpc_id): - module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]') + module.fail_json( + msg="state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]" + ) remove_peer_connection(client, module) else: @@ -581,10 +600,12 @@ def main(): formatted_results = camel_dict_to_snake_dict(results) # Turn the resource tags from boto3 into an ansible friendly tag dictionary - formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', [])) + formatted_results["tags"] = boto3_tag_list_to_ansible_dict(formatted_results.get("tags", [])) - module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId']) + module.exit_json( + changed=changed, vpc_peering_connection=formatted_results, peering_id=results["VpcPeeringConnectionId"] + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py index 8faf64b8906..ee9fda32118 100644 --- a/ec2_vpc_peering_info.py +++ b/ec2_vpc_peering_info.py @@ -216,41 +216,43 @@ def get_vpc_peers(client, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('peer_connection_ids'): - params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("peer_connection_ids"): + params["VpcPeeringConnectionIds"] = module.params.get("peer_connection_ids") try: result = client.describe_vpc_peering_connections(aws_retry=True, **params) result = normalize_boto3_result(result) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe peering connections") - return result['VpcPeeringConnections'] + return result["VpcPeeringConnections"] def main(): argument_spec = dict( - filters=dict(default=dict(), type='dict'), - peer_connection_ids=dict(default=None, type='list', elements='str'), + filters=dict(default=dict(), type="dict"), + peer_connection_ids=dict(default=None, type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True,) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) try: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Turn the boto3 result in to ansible friendly_snaked_names results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)] # Turn the boto3 result in to ansible friendly tag dictionary for peer in results: - peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', [])) + peer["tags"] = boto3_tag_list_to_ansible_dict(peer.get("tags", [])) module.exit_json(result=results, vpc_peering_connections=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index 74aab4a077c..e59fe25839c 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -155,11 +155,14 @@ class VGWRetry(AWSRetry): @staticmethod def status_code_from_exception(error): - return (error.response['Error']['Code'], error.response['Error']['Message'],) + return ( + error.response["Error"]["Code"], + error.response["Error"]["Message"], + ) @staticmethod def found(response_code, catch_extra_error_codes=None): - retry_on = ['The maximum number of mutating objects has been reached.'] + retry_on = ["The maximum number of mutating objects has been reached."] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) @@ -179,37 +182,37 @@ def get_vgw_info(vgws): for vgw in vgws: vgw_info = { - 'id': vgw['VpnGatewayId'], - 'type': vgw['Type'], - 'state': vgw['State'], - 'vpc_id': None, - 'tags': dict() + "id": vgw["VpnGatewayId"], + "type": vgw["Type"], + "state": vgw["State"], + "vpc_id": None, + "tags": dict(), } - if vgw['Tags']: - vgw_info['tags'] = boto3_tag_list_to_ansible_dict(vgw['Tags']) + if vgw["Tags"]: + vgw_info["tags"] = boto3_tag_list_to_ansible_dict(vgw["Tags"]) - if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached': - vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId'] + if len(vgw["VpcAttachments"]) != 0 and vgw["VpcAttachments"][0]["State"] == "attached": + vgw_info["vpc_id"] = vgw["VpcAttachments"][0]["VpcId"] return vgw_info def wait_for_status(client, module, vpn_gateway_id, status): polling_increment_secs = 15 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + max_retries = module.params.get("wait_timeout") // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: response = find_vgw(client, module, vpn_gateway_id) - if response[0]['VpcAttachments'][0]['State'] == status: + if response[0]["VpcAttachments"][0]["State"] == status: status_achieved = True break else: time.sleep(polling_increment_secs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failure while waiting for status update') + module.fail_json_aws(e, msg="Failure while waiting for status update") result = response return status_achieved, result @@ -217,22 +220,21 @@ def wait_for_status(client, module, vpn_gateway_id, status): def attach_vgw(client, module, vpn_gateway_id): params = dict() - params['VpcId'] = module.params.get('vpc_id') + params["VpcId"] = module.params.get("vpc_id") try: # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State # as available several seconds before actually permitting a new attachment. # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185 - response = VGWRetry.jittered_backoff(retries=5, - catch_extra_error_codes=['InvalidParameterValue'] - )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id, - VpcId=params['VpcId']) + response = VGWRetry.jittered_backoff(retries=5, catch_extra_error_codes=["InvalidParameterValue"])( + client.attach_vpn_gateway + )(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to attach VPC') + module.fail_json_aws(e, msg="Failed to attach VPC") - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached') + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "attached") if not status_achieved: - module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console') + module.fail_json(msg="Error waiting for vpc to attach to vgw - please check the AWS console") result = response return result @@ -240,19 +242,19 @@ def attach_vgw(client, module, vpn_gateway_id): def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): params = dict() - params['VpcId'] = module.params.get('vpc_id') + params["VpcId"] = module.params.get("vpc_id") try: if vpc_id: response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True) else: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'], aws_retry=True) + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Failed to detach gateway') + module.fail_json_aws(e, "Failed to detach gateway") - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "detached") if not status_achieved: - module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console') + module.fail_json(msg="Error waiting for vpc to detach from vgw - please check the AWS console") result = response return result @@ -260,37 +262,37 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): def create_vgw(client, module): params = dict() - params['Type'] = module.params.get('type') - tags = module.params.get('tags') or {} - tags['Name'] = module.params.get('name') - params['TagSpecifications'] = boto3_tag_specifications(tags, ['vpn-gateway']) - if module.params.get('asn'): - params['AmazonSideAsn'] = module.params.get('asn') + params["Type"] = module.params.get("type") + tags = module.params.get("tags") or {} + tags["Name"] = module.params.get("name") + params["TagSpecifications"] = boto3_tag_specifications(tags, ["vpn-gateway"]) + if module.params.get("asn"): + params["AmazonSideAsn"] = module.params.get("asn") try: response = client.create_vpn_gateway(aws_retry=True, **params) - get_waiter( - client, 'vpn_gateway_exists' - ).wait( - VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] - ) + get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]]) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId'])) - except is_boto3_error_code('VpnGatewayLimitExceeded') as e: + module.fail_json_aws( + e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response["VpnGateway"]["VpnGatewayId"]) + ) + except is_boto3_error_code("VpnGatewayLimitExceeded") as e: module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to create gateway') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to create gateway") result = response return result def delete_vgw(client, module, vpn_gateway_id): - try: response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to delete gateway') + module.fail_json_aws(e, msg="Failed to delete gateway") # return the deleted VpnGatewayId as this is not included in the above response result = vpn_gateway_id @@ -299,13 +301,13 @@ def delete_vgw(client, module, vpn_gateway_id): def find_vpc(client, module): params = dict() - params['vpc_id'] = module.params.get('vpc_id') + params["vpc_id"] = module.params.get("vpc_id") - if params['vpc_id']: + if params["vpc_id"]: try: - response = client.describe_vpcs(VpcIds=[params['vpc_id']], aws_retry=True) + response = client.describe_vpcs(VpcIds=[params["vpc_id"]], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe VPC') + module.fail_json_aws(e, msg="Failed to describe VPC") result = response return result @@ -314,66 +316,68 @@ def find_vpc(client, module): def find_vgw(client, module, vpn_gateway_id=None): params = dict() if vpn_gateway_id: - params['VpnGatewayIds'] = vpn_gateway_id + params["VpnGatewayIds"] = vpn_gateway_id else: - params['Filters'] = [ - {'Name': 'type', 'Values': [module.params.get('type')]}, - {'Name': 'tag:Name', 'Values': [module.params.get('name')]}, + params["Filters"] = [ + {"Name": "type", "Values": [module.params.get("type")]}, + {"Name": "tag:Name", "Values": [module.params.get("name")]}, ] - if module.params.get('state') == 'present': - params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']}) + if module.params.get("state") == "present": + params["Filters"].append({"Name": "state", "Values": ["pending", "available"]}) try: response = client.describe_vpn_gateways(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe gateway using filters') + module.fail_json_aws(e, msg="Failed to describe gateway using filters") - return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId']) + return sorted(response["VpnGateways"], key=lambda k: k["VpnGatewayId"]) def ensure_vgw_present(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been # found and we will not create another vgw. changed = False params = dict() result = dict() - params['Name'] = module.params.get('name') - params['VpcId'] = module.params.get('vpc_id') - params['Type'] = module.params.get('type') - params['Tags'] = module.params.get('tags') - params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + params["Name"] = module.params.get("name") + params["VpcId"] = module.params.get("vpc_id") + params["Type"] = module.params.get("type") + params["Tags"] = module.params.get("tags") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") # check that the vpc_id exists. If not, an exception is thrown - if params['VpcId']: + if params["VpcId"]: vpc = find_vpc(client, module) # check if a gateway matching our module args already exists existing_vgw = find_vgw(client, module) if existing_vgw != []: - vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - desired_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] + desired_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") if desired_tags is None: desired_tags = dict() purge_tags = False - tags = dict(Name=module.params.get('name')) + tags = dict(Name=module.params.get("name")) tags.update(desired_tags) - changed = ensure_ec2_tags(client, module, vpn_gateway_id, resource_type='vpn-gateway', - tags=tags, purge_tags=purge_tags) + changed = ensure_ec2_tags( + client, module, vpn_gateway_id, resource_type="vpn-gateway", tags=tags, purge_tags=purge_tags + ) # if a vpc_id was provided, check if it exists and if it's attached - if params['VpcId']: - - current_vpc_attachments = existing_vgw[0]['VpcAttachments'] - - if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached': - if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached': + if params["VpcId"]: + current_vpc_attachments = existing_vgw[0]["VpcAttachments"] + + if current_vpc_attachments != [] and current_vpc_attachments[0]["State"] == "attached": + if ( + current_vpc_attachments[0]["VpcId"] != params["VpcId"] + or current_vpc_attachments[0]["State"] != "attached" + ): # detach the existing vpc from the virtual gateway - vpc_to_detach = current_vpc_attachments[0]['VpcId'] + vpc_to_detach = current_vpc_attachments[0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) - get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id]) + get_waiter(client, "vpn_gateway_detached").wait(VpnGatewayIds=[vpn_gateway_id]) attached_vgw = attach_vgw(client, module, vpn_gateway_id) changed = True else: @@ -385,10 +389,10 @@ def ensure_vgw_present(client, module): else: existing_vgw = find_vgw(client, module, [vpn_gateway_id]) - if existing_vgw[0]['VpcAttachments'] != []: - if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if existing_vgw[0]["VpcAttachments"] != []: + if existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": # detach the vpc from the vgw - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) changed = True @@ -396,10 +400,10 @@ def ensure_vgw_present(client, module): # create a new vgw new_vgw = create_vgw(client, module) changed = True - vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId'] + vpn_gateway_id = new_vgw["VpnGateway"]["VpnGatewayId"] # if a vpc-id was supplied, attempt to attach it to the vgw - if params['VpcId']: + if params["VpcId"]: attached_vgw = attach_vgw(client, module, vpn_gateway_id) changed = True @@ -410,45 +414,46 @@ def ensure_vgw_present(client, module): def ensure_vgw_absent(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been # found and we will take steps to delete it. changed = False params = dict() result = dict() - params['Name'] = module.params.get('name') - params['VpcId'] = module.params.get('vpc_id') - params['Type'] = module.params.get('type') - params['Tags'] = module.params.get('tags') - params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + params["Name"] = module.params.get("name") + params["VpcId"] = module.params.get("vpc_id") + params["Type"] = module.params.get("type") + params["Tags"] = module.params.get("tags") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") # check if a gateway matching our module args already exists - if params['VpnGatewayIds']: - existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']]) - if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted': + if params["VpnGatewayIds"]: + existing_vgw_with_id = find_vgw(client, module, [params["VpnGatewayIds"]]) + if existing_vgw_with_id != [] and existing_vgw_with_id[0]["State"] != "deleted": existing_vgw = existing_vgw_with_id - if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': - if params['VpcId']: - if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: - module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": + if params["VpcId"]: + if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: + module.fail_json( + msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" + ) else: # detach the vpc from the vgw - detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId']) - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + detach_vgw(client, module, params["VpnGatewayIds"], params["VpcId"]) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] - detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach) - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] + detach_vgw(client, module, params["VpnGatewayIds"], vpc_to_detach) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: # no vpc's are attached so attempt to delete the vgw - deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) changed = True else: @@ -457,20 +462,22 @@ def ensure_vgw_absent(client, module): else: # Check that a name and type argument has been supplied if no vgw-id - if not module.params.get('name') or not module.params.get('type'): - module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied') + if not module.params.get("name") or not module.params.get("type"): + module.fail_json(msg="A name and type is required when no vgw-id and a status of 'absent' is supplied") existing_vgw = find_vgw(client, module) - if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted': - vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] - if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': - if params['VpcId']: - if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: - module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + if existing_vgw != [] and existing_vgw[0]["State"] != "deleted": + vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] + if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": + if params["VpcId"]: + if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: + module.fail_json( + msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" + ) else: # detach the vpc from the vgw - detach_vgw(client, module, vpn_gateway_id, params['VpcId']) + detach_vgw(client, module, vpn_gateway_id, params["VpcId"]) # now that the vpc has been detached, delete the vgw deleted_vgw = delete_vgw(client, module, vpn_gateway_id) @@ -478,7 +485,7 @@ def ensure_vgw_absent(client, module): else: # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) changed = True @@ -500,29 +507,28 @@ def ensure_vgw_absent(client, module): def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(), vpn_gateway_id=dict(), vpc_id=dict(), - asn=dict(type='int'), - wait_timeout=dict(type='int', default=320), - type=dict(default='ipsec.1', choices=['ipsec.1']), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + asn=dict(type="int"), + wait_timeout=dict(type="int", default=320), + type=dict(default="ipsec.1", choices=["ipsec.1"]), + tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['name']]]) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["name"]]]) - state = module.params.get('state').lower() + state = module.params.get("state").lower() - client = module.client('ec2', retry_decorator=VGWRetry.jittered_backoff(retries=10)) + client = module.client("ec2", retry_decorator=VGWRetry.jittered_backoff(retries=10)) - if state == 'present': + if state == "present": (changed, results) = ensure_vgw_present(client, module) else: (changed, results) = ensure_vgw_absent(client, module) module.exit_json(changed=changed, vgw=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py index c729b2fa5b8..d8bfcc78ecb 100644 --- a/ec2_vpc_vgw_info.py +++ b/ec2_vpc_vgw_info.py @@ -135,13 +135,13 @@ def get_virtual_gateway_info(virtual_gateway): - tags = virtual_gateway.get('Tags', []) + tags = virtual_gateway.get("Tags", []) resource_tags = boto3_tag_list_to_ansible_dict(tags) virtual_gateway_info = dict( - VpnGatewayId=virtual_gateway['VpnGatewayId'], - State=virtual_gateway['State'], - Type=virtual_gateway['Type'], - VpcAttachments=virtual_gateway['VpcAttachments'], + VpnGatewayId=virtual_gateway["VpnGatewayId"], + State=virtual_gateway["State"], + Type=virtual_gateway["Type"], + VpcAttachments=virtual_gateway["VpcAttachments"], Tags=tags, ResourceTags=resource_tags, ) @@ -151,32 +151,34 @@ def get_virtual_gateway_info(virtual_gateway): def list_virtual_gateways(client, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) if module.params.get("vpn_gateway_ids"): - params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids") + params["VpnGatewayIds"] = module.params.get("vpn_gateway_ids") try: all_virtual_gateways = client.describe_vpn_gateways(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to list gateways") - return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=['ResourceTags']) - for vgw in all_virtual_gateways['VpnGateways']] + return [ + camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=["ResourceTags"]) + for vgw in all_virtual_gateways["VpnGateways"] + ] def main(): argument_spec = dict( - filters=dict(type='dict', default=dict()), - vpn_gateway_ids=dict(type='list', default=None, elements='str'), + filters=dict(type="dict", default=dict()), + vpn_gateway_ids=dict(type="list", default=None, elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # call your function here results = list_virtual_gateways(connection, module) @@ -184,5 +186,5 @@ def main(): module.exit_json(virtual_gateways=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index b7e997fade0..8d8dc1467e1 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -321,11 +321,14 @@ def __init__(self, msg, exception=None): class VPNRetry(AWSRetry): @staticmethod def status_code_from_exception(error): - return (error.response['Error']['Code'], error.response['Error']['Message'],) + return ( + error.response["Error"]["Code"], + error.response["Error"]["Message"], + ) @staticmethod def found(response_code, catch_extra_error_codes=None): - retry_on = ['The maximum number of mutating objects has been reached.'] + retry_on = ["The maximum number of mutating objects has been reached."] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) @@ -340,14 +343,14 @@ def found(response_code, catch_extra_error_codes=None): def find_connection(connection, module_params, vpn_connection_id=None): - ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, - or raise an error if there were multiple viable connections. ''' + """Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, + or raise an error if there were multiple viable connections.""" - filters = module_params.get('filters') + filters = module_params.get("filters") # vpn_connection_id may be provided via module option; takes precedence over any filter values - if not vpn_connection_id and module_params.get('vpn_connection_id'): - vpn_connection_id = module_params.get('vpn_connection_id') + if not vpn_connection_id and module_params.get("vpn_connection_id"): + vpn_connection_id = module_params.get("vpn_connection_id") if not isinstance(vpn_connection_id, list) and vpn_connection_id: vpn_connection_id = [to_text(vpn_connection_id)] @@ -362,14 +365,13 @@ def find_connection(connection, module_params, vpn_connection_id=None): # see if there is a unique matching connection try: if vpn_connection_id: - existing_conn = connection.describe_vpn_connections(aws_retry=True, - VpnConnectionIds=vpn_connection_id, - Filters=formatted_filter) + existing_conn = connection.describe_vpn_connections( + aws_retry=True, VpnConnectionIds=vpn_connection_id, Filters=formatted_filter + ) else: existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while describing VPN connection.", - exception=e) + raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e) return find_connection_response(connections=existing_conn) @@ -377,48 +379,55 @@ def find_connection(connection, module_params, vpn_connection_id=None): def add_routes(connection, vpn_connection_id, routes_to_add): for route in routes_to_add: try: - connection.create_vpn_connection_route(aws_retry=True, - VpnConnectionId=vpn_connection_id, - DestinationCidrBlock=route) + connection.create_vpn_connection_route( + aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), + exception=e, + ) def remove_routes(connection, vpn_connection_id, routes_to_remove): for route in routes_to_remove: try: - connection.delete_vpn_connection_route(aws_retry=True, - VpnConnectionId=vpn_connection_id, - DestinationCidrBlock=route) + connection.delete_vpn_connection_route( + aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), + exception=e, + ) def create_filter(module_params, provided_filters): - """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """ - boto3ify_filter = {'cgw-config': 'customer-gateway-configuration', - 'static-routes-only': 'option.static-routes-only', - 'cidr': 'route.destination-cidr-block', - 'bgp': 'bgp-asn', - 'vpn': 'vpn-connection-id', - 'vgw': 'vpn-gateway-id', - 'tag-keys': 'tag-key', - 'tag-values': 'tag-value', - 'tags': 'tag', - 'cgw': 'customer-gateway-id'} + """Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task""" + boto3ify_filter = { + "cgw-config": "customer-gateway-configuration", + "static-routes-only": "option.static-routes-only", + "cidr": "route.destination-cidr-block", + "bgp": "bgp-asn", + "vpn": "vpn-connection-id", + "vgw": "vpn-gateway-id", + "tag-keys": "tag-key", + "tag-values": "tag-value", + "tags": "tag", + "cgw": "customer-gateway-id", + } # unmodifiable options and their filter name counterpart - param_to_filter = {"customer_gateway_id": "customer-gateway-id", - "vpn_gateway_id": "vpn-gateway-id", - "vpn_connection_id": "vpn-connection-id"} + param_to_filter = { + "customer_gateway_id": "customer-gateway-id", + "vpn_gateway_id": "vpn-gateway-id", + "vpn_connection_id": "vpn-connection-id", + } flat_filter_dict = {} formatted_filter = [] for raw_param in dict(provided_filters): - # fix filter names to be recognized by boto3 if raw_param in boto3ify_filter: param = boto3ify_filter[raw_param] @@ -429,14 +438,14 @@ def create_filter(module_params, provided_filters): raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param)) # reformat filters with special formats - if param == 'tag': + if param == "tag": for key in provided_filters[param]: - formatted_key = 'tag:' + key + formatted_key = "tag:" + key if isinstance(provided_filters[param][key], list): flat_filter_dict[formatted_key] = str(provided_filters[param][key]) else: flat_filter_dict[formatted_key] = [str(provided_filters[param][key])] - elif param == 'option.static-routes-only': + elif param == "option.static-routes-only": flat_filter_dict[param] = [str(provided_filters[param]).lower()] else: if isinstance(provided_filters[param], list): @@ -450,25 +459,25 @@ def create_filter(module_params, provided_filters): flat_filter_dict[param_to_filter[param]] = [module_params.get(param)] # change the flat dict into something boto3 will understand - formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()] + formatted_filter = [{"Name": key, "Values": value} for key, value in flat_filter_dict.items()] return formatted_filter def find_connection_response(connections=None): - """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, - returns None if the connection does not exist, raise an error if multiple matches are found. """ + """Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, + returns None if the connection does not exist, raise an error if multiple matches are found.""" # Found no connections - if not connections or 'VpnConnections' not in connections: + if not connections or "VpnConnections" not in connections: return None # Too many results - elif connections and len(connections['VpnConnections']) > 1: + elif connections and len(connections["VpnConnections"]) > 1: viable = [] - for each in connections['VpnConnections']: + for each in connections["VpnConnections"]: # deleted connections are not modifiable - if each['State'] not in ("deleted", "deleting"): + if each["State"] not in ("deleted", "deleting"): viable.append(each) if len(viable) == 1: # Found one viable result; return unique match @@ -477,20 +486,31 @@ def find_connection_response(connections=None): # Found a result but it was deleted already; since there was only one viable result create a new one return None else: - raise VPNConnectionException(msg="More than one matching VPN connection was found. " - "To modify or delete a VPN please specify vpn_connection_id or add filters.") + raise VPNConnectionException( + msg="More than one matching VPN connection was found. " + "To modify or delete a VPN please specify vpn_connection_id or add filters." + ) # Found unique match - elif connections and len(connections['VpnConnections']) == 1: + elif connections and len(connections["VpnConnections"]) == 1: # deleted connections are not modifiable - if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"): - return connections['VpnConnections'][0] - - -def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None): - """ Creates a VPN connection """ - - options = {'StaticRoutesOnly': static_only} + if connections["VpnConnections"][0]["State"] not in ("deleted", "deleting"): + return connections["VpnConnections"][0] + + +def create_connection( + connection, + customer_gateway_id, + static_only, + vpn_gateway_id, + connection_type, + max_attempts, + delay, + tunnel_options=None, +): + """Creates a VPN connection""" + + options = {"StaticRoutesOnly": static_only} if tunnel_options and len(tunnel_options) <= 2: t_opt = [] for m in tunnel_options: @@ -500,108 +520,102 @@ def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_ raise TypeError("non-dict list member") t_opt.append(m) if t_opt: - options['TunnelOptions'] = t_opt + options["TunnelOptions"] = t_opt if not (customer_gateway_id and vpn_gateway_id): - raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide " - "both vpn_gateway_id and customer_gateway_id.") + raise VPNConnectionException( + msg="No matching connection was found. To create a new connection you must provide " + "both vpn_gateway_id and customer_gateway_id." + ) try: - vpn = connection.create_vpn_connection(Type=connection_type, - CustomerGatewayId=customer_gateway_id, - VpnGatewayId=vpn_gateway_id, - Options=options) - connection.get_waiter('vpn_connection_available').wait( - VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + vpn = connection.create_vpn_connection( + Type=connection_type, CustomerGatewayId=customer_gateway_id, VpnGatewayId=vpn_gateway_id, Options=options + ) + connection.get_waiter("vpn_connection_available").wait( + VpnConnectionIds=[vpn["VpnConnection"]["VpnConnectionId"]], + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, ) except WaiterError as e: - raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']), - exception=e) + raise VPNConnectionException( + msg="Failed to wait for VPN connection {0} to be available".format(vpn["VpnConnection"]["VpnConnectionId"]), + exception=e, + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to create VPN connection", - exception=e) + raise VPNConnectionException(msg="Failed to create VPN connection", exception=e) - return vpn['VpnConnection'] + return vpn["VpnConnection"] def delete_connection(connection, vpn_connection_id, delay, max_attempts): - """ Deletes a VPN connection """ + """Deletes a VPN connection""" try: connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id) - connection.get_waiter('vpn_connection_deleted').wait( - VpnConnectionIds=[vpn_connection_id], - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts} + connection.get_waiter("vpn_connection_deleted").wait( + VpnConnectionIds=[vpn_connection_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) except WaiterError as e: - raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), exception=e + ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), - exception=e) + raise VPNConnectionException( + msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), exception=e + ) def add_tags(connection, vpn_connection_id, add): try: - connection.create_tags(aws_retry=True, - Resources=[vpn_connection_id], - Tags=add) + connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), - exception=e) + raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), exception=e) def remove_tags(connection, vpn_connection_id, remove): # format tags since they are a list in the format ['tag1', 'tag2', 'tag3'] - key_dict_list = [{'Key': tag} for tag in remove] + key_dict_list = [{"Key": tag} for tag in remove] try: - connection.delete_tags(aws_retry=True, - Resources=[vpn_connection_id], - Tags=key_dict_list) + connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), - exception=e) + raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), exception=e) def check_for_update(connection, module_params, vpn_connection_id): - """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """ - tags = module_params.get('tags') - routes = module_params.get('routes') - purge_tags = module_params.get('purge_tags') - purge_routes = module_params.get('purge_routes') + """Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change.""" + tags = module_params.get("tags") + routes = module_params.get("routes") + purge_tags = module_params.get("purge_tags") + purge_routes = module_params.get("purge_routes") vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id) current_attrs = camel_dict_to_snake_dict(vpn_connection) # Initialize changes dict - changes = {'tags_to_add': [], - 'tags_to_remove': [], - 'routes_to_add': [], - 'routes_to_remove': []} + changes = {"tags_to_add": [], "tags_to_remove": [], "routes_to_add": [], "routes_to_remove": []} # Get changes to tags - current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value') + current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get("tags", []), "key", "value") if tags is None: - changes['tags_to_remove'] = [] - changes['tags_to_add'] = [] + changes["tags_to_remove"] = [] + changes["tags_to_add"] = [] else: - tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags) - changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) + tags_to_add, changes["tags_to_remove"] = compare_aws_tags(current_tags, tags, purge_tags) + changes["tags_to_add"] = ansible_dict_to_boto3_tag_list(tags_to_add) # Get changes to routes - if 'Routes' in vpn_connection: - current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']] + if "Routes" in vpn_connection: + current_routes = [route["DestinationCidrBlock"] for route in vpn_connection["Routes"]] if purge_routes: - changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes] - changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes] + changes["routes_to_remove"] = [old_route for old_route in current_routes if old_route not in routes] + changes["routes_to_add"] = [new_route for new_route in routes if new_route not in current_routes] # Check if nonmodifiable attributes are attempted to be modified for attribute in current_attrs: if attribute in ("tags", "routes", "state"): continue - elif attribute == 'options': - will_be = module_params.get('static_only', None) - is_now = bool(current_attrs[attribute]['static_routes_only']) - attribute = 'static_only' - elif attribute == 'type': + elif attribute == "options": + will_be = module_params.get("static_only", None) + is_now = bool(current_attrs[attribute]["static_routes_only"]) + attribute = "static_only" + elif attribute == "type": will_be = module_params.get("connection_type", None) is_now = current_attrs[attribute] else: @@ -609,110 +623,116 @@ def check_for_update(connection, module_params, vpn_connection_id): will_be = module_params.get(attribute, None) if will_be is not None and to_text(will_be) != to_text(is_now): - raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " - "connection attributes are tags and routes. The value you tried to change it to " - "is {2}.".format(attribute, is_now, will_be)) + raise VPNConnectionException( + msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " + "connection attributes are tags and routes. The value you tried to change it to " + "is {2}.".format(attribute, is_now, will_be) + ) return changes def make_changes(connection, vpn_connection_id, changes): - """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', - the values of which are lists (generated by check_for_update()). + """changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', + the values of which are lists (generated by check_for_update()). """ changed = False - if changes['tags_to_add']: + if changes["tags_to_add"]: changed = True - add_tags(connection, vpn_connection_id, changes['tags_to_add']) + add_tags(connection, vpn_connection_id, changes["tags_to_add"]) - if changes['tags_to_remove']: + if changes["tags_to_remove"]: changed = True - remove_tags(connection, vpn_connection_id, changes['tags_to_remove']) + remove_tags(connection, vpn_connection_id, changes["tags_to_remove"]) - if changes['routes_to_add']: + if changes["routes_to_add"]: changed = True - add_routes(connection, vpn_connection_id, changes['routes_to_add']) + add_routes(connection, vpn_connection_id, changes["routes_to_add"]) - if changes['routes_to_remove']: + if changes["routes_to_remove"]: changed = True - remove_routes(connection, vpn_connection_id, changes['routes_to_remove']) + remove_routes(connection, vpn_connection_id, changes["routes_to_remove"]) return changed def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None): - """ Returns the changes that would be made to a VPN Connection """ - state = module_params.get('state') - if state == 'absent': + """Returns the changes that would be made to a VPN Connection""" + state = module_params.get("state") + if state == "absent": if vpn_connection_id: return True, {} else: return False, {} changed = False - results = {'customer_gateway_configuration': '', - 'customer_gateway_id': module_params.get('customer_gateway_id'), - 'vpn_gateway_id': module_params.get('vpn_gateway_id'), - 'options': {'static_routes_only': module_params.get('static_only')}, - 'routes': [module_params.get('routes')]} + results = { + "customer_gateway_configuration": "", + "customer_gateway_id": module_params.get("customer_gateway_id"), + "vpn_gateway_id": module_params.get("vpn_gateway_id"), + "options": {"static_routes_only": module_params.get("static_only")}, + "routes": [module_params.get("routes")], + } # get combined current tags and tags to set - present_tags = module_params.get('tags') + present_tags = module_params.get("tags") if present_tags is None: pass - elif current_state and 'Tags' in current_state: - current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags']) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get('purge_tags')) + elif current_state and "Tags" in current_state: + current_tags = boto3_tag_list_to_ansible_dict(current_state["Tags"]) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get("purge_tags")) changed |= bool(tags_to_remove) or bool(tags_to_add) - if module_params.get('purge_tags'): + if module_params.get("purge_tags"): current_tags = {} current_tags.update(present_tags) - results['tags'] = current_tags - elif module_params.get('tags'): + results["tags"] = current_tags + elif module_params.get("tags"): changed = True if present_tags: - results['tags'] = present_tags + results["tags"] = present_tags # get combined current routes and routes to add - present_routes = module_params.get('routes') - if current_state and 'Routes' in current_state: - current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']] - if module_params.get('purge_routes'): + present_routes = module_params.get("routes") + if current_state and "Routes" in current_state: + current_routes = [route["DestinationCidrBlock"] for route in current_state["Routes"]] + if module_params.get("purge_routes"): if set(current_routes) != set(present_routes): changed = True elif set(present_routes) != set(current_routes): if not set(present_routes) < set(current_routes): changed = True present_routes.extend([route for route in current_routes if route not in present_routes]) - elif module_params.get('routes'): + elif module_params.get("routes"): changed = True - results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] + results["routes"] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] # return the vpn_connection_id if it's known if vpn_connection_id: - results['vpn_connection_id'] = vpn_connection_id + results["vpn_connection_id"] = vpn_connection_id else: changed = True - results['vpn_connection_id'] = 'vpn-XXXXXXXX' + results["vpn_connection_id"] = "vpn-XXXXXXXX" return changed, results def ensure_present(connection, module_params, check_mode=False): - """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """ + """Creates and adds tags to a VPN connection. If the connection already exists update tags.""" vpn_connection = find_connection(connection, module_params) changed = False - delay = module_params.get('delay') - max_attempts = module_params.get('wait_timeout') // delay + delay = module_params.get("delay") + max_attempts = module_params.get("wait_timeout") // delay # No match but vpn_connection_id was specified. - if not vpn_connection and module_params.get('vpn_connection_id'): - raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?") + if not vpn_connection and module_params.get("vpn_connection_id"): + raise VPNConnectionException( + msg="There is no VPN connection available or pending with that id. Did you delete it?" + ) # Unique match was found. Check if attributes provided differ. elif vpn_connection: - vpn_connection_id = vpn_connection['VpnConnectionId'] + vpn_connection_id = vpn_connection["VpnConnectionId"] # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove changes = check_for_update(connection, module_params, vpn_connection_id) if check_mode: @@ -724,38 +744,42 @@ def ensure_present(connection, module_params, check_mode=False): changed = True if check_mode: return get_check_mode_results(connection, module_params) - vpn_connection = create_connection(connection, - customer_gateway_id=module_params.get('customer_gateway_id'), - static_only=module_params.get('static_only'), - vpn_gateway_id=module_params.get('vpn_gateway_id'), - connection_type=module_params.get('connection_type'), - tunnel_options=module_params.get('tunnel_options'), - max_attempts=max_attempts, - delay=delay) - changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId']) - make_changes(connection, vpn_connection['VpnConnectionId'], changes) + vpn_connection = create_connection( + connection, + customer_gateway_id=module_params.get("customer_gateway_id"), + static_only=module_params.get("static_only"), + vpn_gateway_id=module_params.get("vpn_gateway_id"), + connection_type=module_params.get("connection_type"), + tunnel_options=module_params.get("tunnel_options"), + max_attempts=max_attempts, + delay=delay, + ) + changes = check_for_update(connection, module_params, vpn_connection["VpnConnectionId"]) + make_changes(connection, vpn_connection["VpnConnectionId"], changes) # get latest version if a change has been made and make tags output nice before returning it if vpn_connection: - vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId']) - if 'Tags' in vpn_connection: - vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags']) + vpn_connection = find_connection(connection, module_params, vpn_connection["VpnConnectionId"]) + if "Tags" in vpn_connection: + vpn_connection["Tags"] = boto3_tag_list_to_ansible_dict(vpn_connection["Tags"]) return changed, vpn_connection def ensure_absent(connection, module_params, check_mode=False): - """ Deletes a VPN connection if it exists. """ + """Deletes a VPN connection if it exists.""" vpn_connection = find_connection(connection, module_params) if check_mode: - return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None) + return get_check_mode_results( + connection, module_params, vpn_connection["VpnConnectionId"] if vpn_connection else None + ) - delay = module_params.get('delay') - max_attempts = module_params.get('wait_timeout') // delay + delay = module_params.get("delay") + max_attempts = module_params.get("wait_timeout") // delay if vpn_connection: - delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts) + delete_connection(connection, vpn_connection["VpnConnectionId"], delay=delay, max_attempts=max_attempts) changed = True else: changed = False @@ -765,32 +789,31 @@ def ensure_absent(connection, module_params, check_mode=False): def main(): argument_spec = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - filters=dict(type='dict', default={}), - vpn_gateway_id=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - connection_type=dict(default='ipsec.1', type='str'), - tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'), - static_only=dict(default=False, type='bool'), - customer_gateway_id=dict(type='str'), - vpn_connection_id=dict(type='str'), - purge_tags=dict(type='bool', default=True), - routes=dict(type='list', default=[], elements='str'), - purge_routes=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=600), - delay=dict(type='int', default=15), + state=dict(type="str", default="present", choices=["present", "absent"]), + filters=dict(type="dict", default={}), + vpn_gateway_id=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + connection_type=dict(default="ipsec.1", type="str"), + tunnel_options=dict(no_log=True, type="list", default=[], elements="dict"), + static_only=dict(default=False, type="bool"), + customer_gateway_id=dict(type="str"), + vpn_connection_id=dict(type="str"), + purge_tags=dict(type="bool", default=True), + routes=dict(type="list", default=[], elements="str"), + purge_routes=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=600), + delay=dict(type="int", default=15), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + connection = module.client("ec2", retry_decorator=VPNRetry.jittered_backoff(retries=10)) - state = module.params.get('state') + state = module.params.get("state") parameters = dict(module.params) try: - if state == 'present': + if state == "present": changed, response = ensure_present(connection, parameters, module.check_mode) - elif state == 'absent': + elif state == "absent": changed, response = ensure_absent(connection, parameters, module.check_mode) except VPNConnectionException as e: if e.exception: @@ -801,5 +824,5 @@ def main(): module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py index 5070af22266..95d8a8ca7e5 100644 --- a/ec2_vpc_vpn_info.py +++ b/ec2_vpc_vpn_info.py @@ -175,14 +175,14 @@ def date_handler(obj): - return obj.isoformat() if hasattr(obj, 'isoformat') else obj + return obj.isoformat() if hasattr(obj, "isoformat") else obj def list_vpn_connections(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['VpnConnectionIds'] = module.params.get('vpn_connection_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["VpnConnectionIds"] = module.params.get("vpn_connection_ids") try: result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler)) @@ -190,28 +190,29 @@ def list_vpn_connections(connection, module): module.fail_json_aws(e, msg="Cannot validate JSON data") except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']] + snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result["VpnConnections"]] if snaked_vpn_connections: for vpn_connection in snaked_vpn_connections: - vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', [])) + vpn_connection["tags"] = boto3_tag_list_to_ansible_dict(vpn_connection.get("tags", [])) module.exit_json(changed=False, vpn_connections=snaked_vpn_connections) def main(): - argument_spec = dict( - vpn_connection_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + vpn_connection_ids=dict(default=[], type="list", elements="str"), + filters=dict(default={}, type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['vpn_connection_ids', 'filters']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[["vpn_connection_ids", "filters"]], + supports_check_mode=True, + ) - connection = module.client('ec2') + connection = module.client("ec2") list_vpn_connections(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ec2_win_password.py b/ec2_win_password.py index aec9940cd30..d1553c91aae 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -123,40 +123,40 @@ def setup_module_object(): argument_spec = dict( instance_id=dict(required=True), - key_file=dict(required=False, default=None, type='path'), + key_file=dict(required=False, default=None, type="path"), key_passphrase=dict(no_log=True, default=None, required=False), key_data=dict(no_log=True, default=None, required=False), - wait=dict(type='bool', default=False, required=False), - wait_timeout=dict(default=120, required=False, type='int'), + wait=dict(type="bool", default=False, required=False), + wait_timeout=dict(default=120, required=False, type="int"), ) - mutually_exclusive = [['key_file', 'key_data']] + mutually_exclusive = [["key_file", "key_data"]] module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) return module def _get_password(module, client, instance_id): try: - data = client.get_password_data(aws_retry=True, InstanceId=instance_id)['PasswordData'] + data = client.get_password_data(aws_retry=True, InstanceId=instance_id)["PasswordData"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to get password data') + module.fail_json_aws(e, msg="Failed to get password data") return data def ec2_win_password(module): - instance_id = module.params.get('instance_id') - key_file = module.params.get('key_file') - if module.params.get('key_passphrase') is None: + instance_id = module.params.get("instance_id") + key_file = module.params.get("key_file") + if module.params.get("key_passphrase") is None: b_key_passphrase = None else: - b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict') - if module.params.get('key_data') is None: + b_key_passphrase = to_bytes(module.params.get("key_passphrase"), errors="surrogate_or_strict") + if module.params.get("key_data") is None: b_key_data = None else: - b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + b_key_data = to_bytes(module.params.get("key_data"), errors="surrogate_or_strict") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) if wait: start = datetime.datetime.now() @@ -178,7 +178,7 @@ def ec2_win_password(module): if key_file is not None and b_key_data is None: try: - with open(key_file, 'rb') as f: + with open(key_file, "rb") as f: key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) except IOError as e: # Handle bad files @@ -198,7 +198,7 @@ def ec2_win_password(module): decrypted = None if decrypted is None: - module.fail_json(msg="unable to decrypt password", win_password='', changed=False) + module.fail_json(msg="unable to decrypt password", win_password="", changed=False) else: if wait: elapsed = datetime.datetime.now() - start @@ -211,10 +211,10 @@ def main(): module = setup_module_object() if not HAS_CRYPTOGRAPHY: - module.fail_json(msg='cryptography package required for this module.') + module.fail_json(msg="cryptography package required for this module.") ec2_win_password(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_attribute.py b/ecs_attribute.py index c6931fc4f53..085761b19c3 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -135,7 +135,7 @@ def __iter__(self): @staticmethod def _validate_attrs(attrs): - return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs) + return all(tuple(attr.keys()) in (("name", "value"), ("value", "name")) for attr in attrs) def _parse_attrs(self, attrs): attrs_parsed = [] @@ -144,20 +144,18 @@ def _parse_attrs(self, attrs): if len(attr) != 1: self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr)) name, value = list(attr.items())[0] - attrs_parsed.append({'name': name, 'value': value}) + attrs_parsed.append({"name": name, "value": value}) elif isinstance(attr, str): - attrs_parsed.append({'name': attr, 'value': None}) + attrs_parsed.append({"name": attr, "value": None}) else: self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs)) return attrs_parsed def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False): - attr_obj = {'targetType': 'container-instance', - 'targetId': ecs_arn, - 'name': name} + attr_obj = {"targetType": "container-instance", "targetId": ecs_arn, "name": name} if not skip_value and value is not None: - attr_obj['value'] = value + attr_obj["value"] = value return attr_obj @@ -186,23 +184,25 @@ def __init__(self, module, cluster, ec2_id): self.ec2_id = ec2_id try: - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") self.ecs_arn = self._get_ecs_arn() def _get_ecs_arn(self): try: - ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns'] - ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster, - containerInstances=ecs_instances_arns)['containerInstances'] + ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)["containerInstanceArns"] + ec2_instances = self.ecs.describe_container_instances( + cluster=self.cluster, containerInstances=ecs_instances_arns + )["containerInstances"] except (ClientError, EndpointConnectionError) as e: self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) try: - ecs_arn = next(inst for inst in ec2_instances - if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn'] + ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[ + "containerInstanceArn" + ] except StopIteration: self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster)) @@ -211,16 +211,16 @@ def _get_ecs_arn(self): def attrs_put(self, attrs): """Puts attributes on ECS container instance""" try: - self.ecs.put_attributes(cluster=self.cluster, - attributes=attrs.get_for_ecs_arn(self.ecs_arn)) + self.ecs.put_attributes(cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn)) except ClientError as e: self.module.fail_json(msg=str(e)) def attrs_delete(self, attrs): """Deletes attributes from ECS container instance.""" try: - self.ecs.delete_attributes(cluster=self.cluster, - attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True)) + self.ecs.delete_attributes( + cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True) + ) except ClientError as e: self.module.fail_json(msg=str(e)) @@ -229,33 +229,33 @@ def attrs_get_by_name(self, attrs): Returns EcsAttributes object containing attributes from ECS container instance with names matching to attrs.attributes (EcsAttributes Object). """ - attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']} - for attr in attrs] + attr_objs = [{"targetType": "container-instance", "attributeName": attr["name"]} for attr in attrs] try: - matched_ecs_targets = [attr_found for attr_obj in attr_objs - for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']] + matched_ecs_targets = [ + attr_found + for attr_obj in attr_objs + for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"] + ] except ClientError as e: self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) - matched_objs = [target for target in matched_ecs_targets - if target['targetId'] == self.ecs_arn] + matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn] - results = [{'name': match['name'], 'value': match.get('value', None)} - for match in matched_objs] + results = [{"name": match["name"], "value": match.get("value", None)} for match in matched_objs] return EcsAttributes(self.module, results) def main(): argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - cluster=dict(required=True, type='str'), - ec2_instance_id=dict(required=True, type='str'), - attributes=dict(required=True, type='list', elements='dict'), + state=dict(required=False, default="present", choices=["present", "absent"]), + cluster=dict(required=True, type="str"), + ec2_instance_id=dict(required=True, type="str"), + attributes=dict(required=True, type="list", elements="dict"), ) - required_together = [['cluster', 'ec2_instance_id', 'attributes']] + required_together = [["cluster", "ec2_instance_id", "attributes"]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -263,39 +263,43 @@ def main(): required_together=required_together, ) - cluster = module.params['cluster'] - ec2_instance_id = module.params['ec2_instance_id'] - attributes = module.params['attributes'] + cluster = module.params["cluster"] + ec2_instance_id = module.params["ec2_instance_id"] + attributes = module.params["attributes"] conti = Ec2EcsInstance(module, cluster, ec2_instance_id) attrs = EcsAttributes(module, attributes) - results = {'changed': False, - 'attributes': [ - {'cluster': cluster, - 'ec2_instance_id': ec2_instance_id, - 'attributes': attributes} - ]} + results = { + "changed": False, + "attributes": [ + { + "cluster": cluster, + "ec2_instance_id": ec2_instance_id, + "attributes": attributes, + } + ], + } attrs_present = conti.attrs_get_by_name(attrs) - if module.params['state'] == 'present': + if module.params["state"] == "present": attrs_diff = attrs.diff(attrs_present) if not attrs_diff: module.exit_json(**results) conti.attrs_put(attrs_diff) - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not attrs_present: module.exit_json(**results) conti.attrs_delete(attrs_present) - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_cluster.py b/ecs_cluster.py index 7520cd0abc9..e627cd98f1b 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -182,27 +182,25 @@ class EcsClusterManager: def __init__(self, module): self.module = module try: - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'): + def find_in_array(self, array_of_clusters, cluster_name, field_name="clusterArn"): for c in array_of_clusters: if c[field_name].endswith(cluster_name): return c return None def describe_cluster(self, cluster_name): - response = self.ecs.describe_clusters(clusters=[ - cluster_name - ]) - if len(response['failures']) > 0: - c = self.find_in_array(response['failures'], cluster_name, 'arn') - if c and c['reason'] == 'MISSING': + response = self.ecs.describe_clusters(clusters=[cluster_name]) + if len(response["failures"]) > 0: + c = self.find_in_array(response["failures"], cluster_name, "arn") + if c and c["reason"] == "MISSING": return None # fall thru and look through found ones - if len(response['clusters']) > 0: - c = self.find_in_array(response['clusters'], cluster_name) + if len(response["clusters"]) > 0: + c = self.find_in_array(response["clusters"], cluster_name) if c: return c raise Exception("Unknown problem describing cluster %s." % cluster_name) @@ -210,48 +208,49 @@ def describe_cluster(self, cluster_name): def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(clusterName=cluster_name) if capacity_providers: - params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) if capacity_provider_strategy: - params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) response = self.ecs.create_cluster(**params) - return response['cluster'] + return response["cluster"] def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(cluster=cluster_name) if capacity_providers: - params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) else: - params['capacityProviders'] = [] + params["capacityProviders"] = [] if capacity_provider_strategy: - params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) else: - params['defaultCapacityProviderStrategy'] = [] + params["defaultCapacityProviderStrategy"] = [] response = self.ecs.put_cluster_capacity_providers(**params) - return response['cluster'] + return response["cluster"] def delete_cluster(self, clusterName): return self.ecs.delete_cluster(cluster=clusterName) def main(): - argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'has_instances']), - name=dict(required=True, type='str'), - delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10), - purge_capacity_providers=dict(required=False, type='bool', default=False), - capacity_providers=dict(required=False, type='list', elements='str'), - capacity_provider_strategy=dict(required=False, - type='list', - elements='dict', - options=dict(capacity_provider=dict(type='str'), - weight=dict(type='int'), - base=dict(type='int', default=0) - ) - ), + state=dict(required=True, choices=["present", "absent", "has_instances"]), + name=dict(required=True, type="str"), + delay=dict(required=False, type="int", default=10), + repeat=dict(required=False, type="int", default=10), + purge_capacity_providers=dict(required=False, type="bool", default=False), + capacity_providers=dict(required=False, type="list", elements="str"), + capacity_provider_strategy=dict( + required=False, + type="list", + elements="dict", + options=dict( + capacity_provider=dict(type="str"), + weight=dict(type="int"), + base=dict(type="int", default=0), + ), + ), ) - required_together = [['state', 'name']] + required_together = [["state", "name"]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -261,19 +260,19 @@ def main(): cluster_mgr = EcsClusterManager(module) try: - existing = cluster_mgr.describe_cluster(module.params['name']) + existing = cluster_mgr.describe_cluster(module.params["name"]) except Exception as e: - module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e)) + module.fail_json(msg="Exception describing cluster '" + module.params["name"] + "': " + str(e)) results = dict(changed=False) - if module.params['state'] == 'present': + if module.params["state"] == "present": # Pull requested and existing capacity providers and strategies. - purge_capacity_providers = module.params['purge_capacity_providers'] - requested_cp = module.params['capacity_providers'] - requested_cps = module.params['capacity_provider_strategy'] - if existing and 'status' in existing and existing['status'] == "ACTIVE": - existing_cp = existing['capacityProviders'] - existing_cps = existing['defaultCapacityProviderStrategy'] + purge_capacity_providers = module.params["purge_capacity_providers"] + requested_cp = module.params["capacity_providers"] + requested_cps = module.params["capacity_provider_strategy"] + if existing and "status" in existing and existing["status"] == "ACTIVE": + existing_cp = existing["capacityProviders"] + existing_cps = existing["defaultCapacityProviderStrategy"] if requested_cp is None: requested_cp = [] @@ -292,9 +291,12 @@ def main(): # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. if not purge_capacity_providers: - module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.' - ' To maintain the existing behaviour explicitly set purge_capacity_providers=true', - date='2024-06-01', collection_name='community.aws') + module.deprecate( + "After 2024-06-01 the default value of purge_capacity_providers will change from false to true." + " To maintain the existing behaviour explicitly set purge_capacity_providers=true", + date="2024-06-01", + collection_name="community.aws", + ) cps_update_needed = False requested_cp = existing_cp requested_cps = existing_cps @@ -302,57 +304,67 @@ def main(): # If either the providers or strategy differ, update the cluster. if requested_cp != existing_cp or cps_update_needed: if not module.check_mode: - results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps) - results['changed'] = True + results["cluster"] = cluster_mgr.update_cluster( + cluster_name=module.params["name"], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps, + ) + results["changed"] = True else: - results['cluster'] = existing + results["cluster"] = existing else: if not module.check_mode: # doesn't exist. create it. - results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps) - results['changed'] = True + results["cluster"] = cluster_mgr.create_cluster( + cluster_name=module.params["name"], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps, + ) + results["changed"] = True # delete the cluster - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - results['cluster'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + results["cluster"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: - cluster_mgr.delete_cluster(module.params['name']) - results['changed'] = True - elif module.params['state'] == 'has_instances': + cluster_mgr.delete_cluster(module.params["name"]) + results["changed"] = True + elif module.params["state"] == "has_instances": if not existing: - module.fail_json(msg="Cluster '" + module.params['name'] + " not found.") + module.fail_json(msg="Cluster '" + module.params["name"] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted - delay = module.params['delay'] - repeat = module.params['repeat'] + delay = module.params["delay"] + repeat = module.params["repeat"] time.sleep(delay) count = 0 for i in range(repeat): - existing = cluster_mgr.describe_cluster(module.params['name']) - count = existing['registeredContainerInstancesCount'] + existing = cluster_mgr.describe_cluster(module.params["name"]) + count = existing["registeredContainerInstancesCount"] if count > 0: - results['changed'] = True + results["changed"] = True break time.sleep(delay) if count == 0 and i is repeat - 1: - module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.") + module.fail_json( + msg="Cluster instance count still zero after " + + str(repeat) + + " tries of " + + str(delay) + + " seconds each." + ) return module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_ecr.py b/ecs_ecr.py index 4b5ce1ebe75..1e6efd7b331 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -245,45 +245,46 @@ def build_kwargs(registry_id): class EcsEcr: def __init__(self, module): - self.ecr = module.client('ecr') - self.sts = module.client('sts') + self.ecr = module.client("ecr") + self.sts = module.client("sts") self.check_mode = module.check_mode self.changed = False self.skipped = False def get_repository(self, registry_id, name): try: - res = self.ecr.describe_repositories( - repositoryNames=[name], **build_kwargs(registry_id)) - repos = res.get('repositories') + res = self.ecr.describe_repositories(repositoryNames=[name], **build_kwargs(registry_id)) + repos = res.get("repositories") return repos and repos[0] - except is_boto3_error_code('RepositoryNotFoundException'): + except is_boto3_error_code("RepositoryNotFoundException"): return None def get_repository_policy(self, registry_id, name): try: - res = self.ecr.get_repository_policy( - repositoryName=name, **build_kwargs(registry_id)) - text = res.get('policyText') + res = self.ecr.get_repository_policy(repositoryName=name, **build_kwargs(registry_id)) + text = res.get("policyText") return text and json.loads(text) - except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): + except is_boto3_error_code(["RepositoryNotFoundException", "RepositoryPolicyNotFoundException"]): return None def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): if registry_id: - default_registry_id = self.sts.get_caller_identity().get('Account') + default_registry_id = self.sts.get_caller_identity().get("Account") if registry_id != default_registry_id: - raise Exception('Cannot create repository in registry {0}.' - 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + raise Exception( + "Cannot create repository in registry {0}." + "Would be created in {1} instead.".format(registry_id, default_registry_id) + ) if encryption_configuration is None: - encryption_configuration = dict(encryptionType='AES256') + encryption_configuration = dict(encryptionType="AES256") if not self.check_mode: repo = self.ecr.create_repository( repositoryName=name, imageTagMutability=image_tag_mutability, - encryptionConfiguration=encryption_configuration).get('repository') + encryptionConfiguration=encryption_configuration, + ).get("repository") self.changed = True return repo else: @@ -293,10 +294,8 @@ def create_repository(self, registry_id, name, image_tag_mutability, encryption_ def set_repository_policy(self, registry_id, name, policy_text, force): if not self.check_mode: policy = self.ecr.set_repository_policy( - repositoryName=name, - policyText=policy_text, - force=force, - **build_kwargs(registry_id)) + repositoryName=name, policyText=policy_text, force=force, **build_kwargs(registry_id) + ) self.changed = True return policy else: @@ -304,15 +303,13 @@ def set_repository_policy(self, registry_id, name, policy_text, force): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = '{0}:{1}'.format(registry_id, name) - raise Exception( - 'could not find repository {0}'.format(printable)) + printable = "{0}:{1}".format(registry_id, name) + raise Exception("could not find repository {0}".format(printable)) return def delete_repository(self, registry_id, name, force): if not self.check_mode: - repo = self.ecr.delete_repository( - repositoryName=name, force=force, **build_kwargs(registry_id)) + repo = self.ecr.delete_repository(repositoryName=name, force=force, **build_kwargs(registry_id)) self.changed = True return repo else: @@ -324,8 +321,7 @@ def delete_repository(self, registry_id, name, force): def delete_repository_policy(self, registry_id, name): if not self.check_mode: - policy = self.ecr.delete_repository_policy( - repositoryName=name, **build_kwargs(registry_id)) + policy = self.ecr.delete_repository_policy(repositoryName=name, **build_kwargs(registry_id)) self.changed = True return policy else: @@ -337,36 +333,33 @@ def delete_repository_policy(self, registry_id, name): def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration): repo = self.get_repository(registry_id, name) - current_mutability_configuration = repo.get('imageTagMutability') + current_mutability_configuration = repo.get("imageTagMutability") if current_mutability_configuration != new_mutability_configuration: if not self.check_mode: self.ecr.put_image_tag_mutability( - repositoryName=name, - imageTagMutability=new_mutability_configuration, - **build_kwargs(registry_id)) + repositoryName=name, imageTagMutability=new_mutability_configuration, **build_kwargs(registry_id) + ) else: self.skipped = True self.changed = True - repo['imageTagMutability'] = new_mutability_configuration + repo["imageTagMutability"] = new_mutability_configuration return repo def get_lifecycle_policy(self, registry_id, name): try: - res = self.ecr.get_lifecycle_policy( - repositoryName=name, **build_kwargs(registry_id)) - text = res.get('lifecyclePolicyText') + res = self.ecr.get_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) + text = res.get("lifecyclePolicyText") return text and json.loads(text) - except is_boto3_error_code(['LifecyclePolicyNotFoundException', 'RepositoryNotFoundException']): + except is_boto3_error_code(["LifecyclePolicyNotFoundException", "RepositoryNotFoundException"]): return None def put_lifecycle_policy(self, registry_id, name, policy_text): if not self.check_mode: policy = self.ecr.put_lifecycle_policy( - repositoryName=name, - lifecyclePolicyText=policy_text, - **build_kwargs(registry_id)) + repositoryName=name, lifecyclePolicyText=policy_text, **build_kwargs(registry_id) + ) self.changed = True return policy else: @@ -374,15 +367,13 @@ def put_lifecycle_policy(self, registry_id, name, policy_text): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = '{0}:{1}'.format(registry_id, name) - raise Exception( - 'could not find repository {0}'.format(printable)) + printable = "{0}:{1}".format(registry_id, name) + raise Exception("could not find repository {0}".format(printable)) return def purge_lifecycle_policy(self, registry_id, name): if not self.check_mode: - policy = self.ecr.delete_lifecycle_policy( - repositoryName=name, **build_kwargs(registry_id)) + policy = self.ecr.delete_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) self.changed = True return policy else: @@ -396,14 +387,11 @@ def put_image_scanning_configuration(self, registry_id, name, scan_on_push): if not self.check_mode: if registry_id: scan = self.ecr.put_image_scanning_configuration( - registryId=registry_id, - repositoryName=name, - imageScanningConfiguration={'scanOnPush': scan_on_push} + registryId=registry_id, repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} ) else: scan = self.ecr.put_image_scanning_configuration( - repositoryName=name, - imageScanningConfiguration={'scanOnPush': scan_on_push} + repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} ) self.changed = True return scan @@ -413,11 +401,11 @@ def put_image_scanning_configuration(self, registry_id, name, scan_on_push): def sort_lists_of_strings(policy): - for statement_index in range(0, len(policy.get('Statement', []))): - for key in policy['Statement'][statement_index]: - value = policy['Statement'][statement_index][key] + for statement_index in range(0, len(policy.get("Statement", []))): + for key in policy["Statement"][statement_index]: + value = policy["Statement"][statement_index][key] if isinstance(value, list) and all(isinstance(item, string_types) for item in value): - policy['Statement'][statement_index][key] = sorted(value) + policy["Statement"][statement_index][key] = sorted(value) return policy @@ -425,145 +413,138 @@ def run(ecr, params): # type: (EcsEcr, dict, int) -> Tuple[bool, dict] result = {} try: - name = params['name'] - state = params['state'] - policy_text = params['policy'] - purge_policy = params['purge_policy'] - force_absent = params['force_absent'] - registry_id = params['registry_id'] - force_set_policy = params['force_set_policy'] - image_tag_mutability = params['image_tag_mutability'].upper() - lifecycle_policy_text = params['lifecycle_policy'] - purge_lifecycle_policy = params['purge_lifecycle_policy'] - scan_on_push = params['scan_on_push'] - encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration']) + name = params["name"] + state = params["state"] + policy_text = params["policy"] + purge_policy = params["purge_policy"] + force_absent = params["force_absent"] + registry_id = params["registry_id"] + force_set_policy = params["force_set_policy"] + image_tag_mutability = params["image_tag_mutability"].upper() + lifecycle_policy_text = params["lifecycle_policy"] + purge_lifecycle_policy = params["purge_lifecycle_policy"] + scan_on_push = params["scan_on_push"] + encryption_configuration = snake_dict_to_camel_dict(params["encryption_configuration"]) # Parse policies, if they are given try: policy = policy_text and json.loads(policy_text) except ValueError: - result['policy'] = policy_text - result['msg'] = 'Could not parse policy' + result["policy"] = policy_text + result["msg"] = "Could not parse policy" return False, result try: - lifecycle_policy = \ - lifecycle_policy_text and json.loads(lifecycle_policy_text) + lifecycle_policy = lifecycle_policy_text and json.loads(lifecycle_policy_text) except ValueError: - result['lifecycle_policy'] = lifecycle_policy_text - result['msg'] = 'Could not parse lifecycle_policy' + result["lifecycle_policy"] = lifecycle_policy_text + result["msg"] = "Could not parse lifecycle_policy" return False, result - result['state'] = state - result['created'] = False + result["state"] = state + result["created"] = False repo = ecr.get_repository(registry_id, name) - if state == 'present': - result['created'] = False + if state == "present": + result["created"] = False if not repo: - repo = ecr.create_repository( - registry_id, name, image_tag_mutability, encryption_configuration) - result['changed'] = True - result['created'] = True + repo = ecr.create_repository(registry_id, name, image_tag_mutability, encryption_configuration) + result["changed"] = True + result["created"] = True else: if encryption_configuration is not None: - if repo.get('encryptionConfiguration') != encryption_configuration: - result['msg'] = 'Cannot modify repository encryption type' + if repo.get("encryptionConfiguration") != encryption_configuration: + result["msg"] = "Cannot modify repository encryption type" return False, result repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) - result['repository'] = repo + result["repository"] = repo if purge_lifecycle_policy: - original_lifecycle_policy = \ - ecr.get_lifecycle_policy(registry_id, name) + original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) - result['lifecycle_policy'] = None + result["lifecycle_policy"] = None if original_lifecycle_policy: ecr.purge_lifecycle_policy(registry_id, name) - result['changed'] = True + result["changed"] = True elif lifecycle_policy_text is not None: try: - result['lifecycle_policy'] = lifecycle_policy - original_lifecycle_policy = ecr.get_lifecycle_policy( - registry_id, name) + result["lifecycle_policy"] = lifecycle_policy + original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) if compare_policies(original_lifecycle_policy, lifecycle_policy): - ecr.put_lifecycle_policy(registry_id, name, - lifecycle_policy_text) - result['changed'] = True + ecr.put_lifecycle_policy(registry_id, name, lifecycle_policy_text) + result["changed"] = True except Exception: # Some failure w/ the policy. It's helpful to know what the # policy is. - result['lifecycle_policy'] = lifecycle_policy_text + result["lifecycle_policy"] = lifecycle_policy_text raise if purge_policy: original_policy = ecr.get_repository_policy(registry_id, name) - result['policy'] = None + result["policy"] = None if original_policy: ecr.delete_repository_policy(registry_id, name) - result['changed'] = True + result["changed"] = True elif policy_text is not None: try: # Sort any lists containing only string types policy = sort_lists_of_strings(policy) - result['policy'] = policy + result["policy"] = policy - original_policy = ecr.get_repository_policy( - registry_id, name) + original_policy = ecr.get_repository_policy(registry_id, name) if original_policy: original_policy = sort_lists_of_strings(original_policy) if compare_policies(original_policy, policy): - ecr.set_repository_policy( - registry_id, name, policy_text, force_set_policy) - result['changed'] = True + ecr.set_repository_policy(registry_id, name, policy_text, force_set_policy) + result["changed"] = True except Exception: # Some failure w/ the policy. It's helpful to know what the # policy is. - result['policy'] = policy_text + result["policy"] = policy_text raise else: original_policy = ecr.get_repository_policy(registry_id, name) if original_policy: - result['policy'] = original_policy + result["policy"] = original_policy original_scan_on_push = ecr.get_repository(registry_id, name) if original_scan_on_push is not None: - if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']: - result['changed'] = True - result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push + if scan_on_push != original_scan_on_push["imageScanningConfiguration"]["scanOnPush"]: + result["changed"] = True + result["repository"]["imageScanningConfiguration"]["scanOnPush"] = scan_on_push response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) - elif state == 'absent': - result['name'] = name + elif state == "absent": + result["name"] = name if repo: ecr.delete_repository(registry_id, name, force_absent) - result['changed'] = True + result["changed"] = True except Exception as err: msg = str(err) if isinstance(err, botocore.exceptions.ClientError): msg = boto_exception(err) - result['msg'] = msg - result['exception'] = traceback.format_exc() + result["msg"] = msg + result["exception"] = traceback.format_exc() return False, result if ecr.skipped: - result['skipped'] = True + result["skipped"] = True if ecr.changed: - result['changed'] = True + result["changed"] = True return True, result @@ -572,34 +553,37 @@ def main(): argument_spec = dict( name=dict(required=True), registry_id=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], - default='present'), - force_absent=dict(required=False, type='bool', default=False), - force_set_policy=dict(required=False, type='bool', default=False), - policy=dict(required=False, type='json'), - image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], - default='mutable'), - purge_policy=dict(required=False, type='bool'), - lifecycle_policy=dict(required=False, type='json'), - purge_lifecycle_policy=dict(required=False, type='bool'), - scan_on_push=(dict(required=False, type='bool', default=False)), + state=dict(required=False, choices=["present", "absent"], default="present"), + force_absent=dict(required=False, type="bool", default=False), + force_set_policy=dict(required=False, type="bool", default=False), + policy=dict(required=False, type="json"), + image_tag_mutability=dict(required=False, choices=["mutable", "immutable"], default="mutable"), + purge_policy=dict(required=False, type="bool"), + lifecycle_policy=dict(required=False, type="json"), + purge_lifecycle_policy=dict(required=False, type="bool"), + scan_on_push=(dict(required=False, type="bool", default=False)), encryption_configuration=dict( required=False, - type='dict', + type="dict", options=dict( - encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']), - kms_key=dict(required=False, type='str', no_log=False), + encryption_type=dict(required=False, type="str", default="AES256", choices=["AES256", "KMS"]), + kms_key=dict(required=False, type="str", no_log=False), ), required_if=[ - ['encryption_type', 'KMS', ['kms_key']], + ["encryption_type", "KMS", ["kms_key"]], ], ), ) mutually_exclusive = [ - ['policy', 'purge_policy'], - ['lifecycle_policy', 'purge_lifecycle_policy']] - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + ["policy", "purge_policy"], + ["lifecycle_policy", "purge_lifecycle_policy"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) ecr = EcsEcr(module) passed, result = run(ecr, module.params) @@ -610,5 +594,5 @@ def main(): module.fail_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_service.py b/ecs_service.py index 2009dc3b54a..af5ad567dc8 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -697,13 +697,13 @@ DEPLOYMENT_CONTROLLER_TYPE_MAP = { - 'type': 'str', + "type": "str", } DEPLOYMENT_CONFIGURATION_TYPE_MAP = { - 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int', - 'deployment_circuit_breaker': 'dict', + "maximum_percent": "int", + "minimum_healthy_percent": "int", + "deployment_circuit_breaker": "dict", } @@ -712,32 +712,32 @@ class EcsServiceManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') - self.ec2 = module.client('ec2') + self.ecs = module.client("ecs") + self.ec2 = module.client("ec2") def format_network_configuration(self, network_config): result = dict() - if network_config['subnets'] is not None: - result['subnets'] = network_config['subnets'] + if network_config["subnets"] is not None: + result["subnets"] = network_config["subnets"] else: self.module.fail_json(msg="Network configuration must include subnets") - if network_config['security_groups'] is not None: - groups = network_config['security_groups'] - if any(not sg.startswith('sg-') for sg in groups): + if network_config["security_groups"] is not None: + groups = network_config["security_groups"] + if any(not sg.startswith("sg-") for sg in groups): try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result['securityGroups'] = groups - if network_config['assign_public_ip'] is not None: - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" + result["securityGroups"] = groups + if network_config["assign_public_ip"] is not None: + if network_config["assign_public_ip"] is True: + result["assignPublicIp"] = "ENABLED" else: - result['assignPublicIp'] = "DISABLED" + result["assignPublicIp"] = "DISABLED" return dict(awsvpcConfiguration=result) - def find_in_array(self, array_of_services, service_name, field_name='serviceArn'): + def find_in_array(self, array_of_services, service_name, field_name="serviceArn"): for c in array_of_services: if c[field_name].endswith(service_name): return c @@ -747,18 +747,18 @@ def describe_service(self, cluster_name, service_name): response = self.ecs.describe_services( cluster=cluster_name, services=[service_name], - include=['TAGS'], + include=["TAGS"], ) - msg = '' + msg = "" - if len(response['failures']) > 0: - c = self.find_in_array(response['failures'], service_name, 'arn') - msg += ", failure reason is " + c['reason'] - if c and c['reason'] == 'MISSING': + if len(response["failures"]) > 0: + c = self.find_in_array(response["failures"], service_name, "arn") + msg += ", failure reason is " + c["reason"] + if c and c["reason"] == "MISSING": return None # fall thru and look through found ones - if len(response['services']) > 0: - c = self.find_in_array(response['services'], service_name) + if len(response["services"]) > 0: + c = self.find_in_array(response["services"], service_name) if c: return c raise Exception("Unknown problem describing service %s." % service_name) @@ -768,21 +768,21 @@ def is_matching_service(self, expected, existing): # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3 # but the user is just entering # ansible-fargate-nginx:3 - if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: - if existing.get('deploymentController', {}).get('type', None) != 'CODE_DEPLOY': + if expected["task_definition"] != existing["taskDefinition"].split("/")[-1]: + if existing.get("deploymentController", {}).get("type", None) != "CODE_DEPLOY": return False - if expected.get('health_check_grace_period_seconds'): - if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): + if expected.get("health_check_grace_period_seconds"): + if expected.get("health_check_grace_period_seconds") != existing.get("healthCheckGracePeriodSeconds"): return False - if (expected['load_balancers'] or []) != existing['loadBalancers']: + if (expected["load_balancers"] or []) != existing["loadBalancers"]: return False - if (expected['propagate_tags'] or "NONE") != existing['propagateTags']: + if (expected["propagate_tags"] or "NONE") != existing["propagateTags"]: return False - if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}): + if boto3_tag_list_to_ansible_dict(existing.get("tags", [])) != (expected["tags"] or {}): return False if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False): @@ -790,8 +790,8 @@ def is_matching_service(self, expected, existing): # expected is params. DAEMON scheduling strategy returns desired count equal to # number of instances running; don't check desired count if scheduling strat is daemon - if (expected['scheduling_strategy'] != 'DAEMON'): - if (expected['desired_count'] or 0) != existing['desiredCount']: + if expected["scheduling_strategy"] != "DAEMON": + if (expected["desired_count"] or 0) != existing["desiredCount"]: return False return True @@ -820,7 +820,6 @@ def create_service( propagate_tags, enable_execute_command, ): - params = dict( cluster=cluster_name, serviceName=service_name, @@ -829,47 +828,49 @@ def create_service( clientToken=client_token, role=role, deploymentConfiguration=deployment_configuration, - placementStrategy=placement_strategy + placementStrategy=placement_strategy, ) if network_configuration: - params['networkConfiguration'] = network_configuration + params["networkConfiguration"] = network_configuration if deployment_controller: - params['deploymentController'] = deployment_controller + params["deploymentController"] = deployment_controller if launch_type: - params['launchType'] = launch_type + params["launchType"] = launch_type if platform_version: - params['platformVersion'] = platform_version + params["platformVersion"] = platform_version if self.health_check_setable(params) and health_check_grace_period_seconds is not None: - params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds if service_registries: - params['serviceRegistries'] = service_registries + params["serviceRegistries"] = service_registries # filter placement_constraint and left only those where value is not None # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation if placement_constraints: - params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints] + params["placementConstraints"] = [ + {key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints + ] # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if capacity_provider_strategy: - params['capacityProviderStrategy'] = capacity_provider_strategy + params["capacityProviderStrategy"] = capacity_provider_strategy if propagate_tags: - params['propagateTags'] = propagate_tags + params["propagateTags"] = propagate_tags # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if tags: - params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") if scheduling_strategy: - params['schedulingStrategy'] = scheduling_strategy + params["schedulingStrategy"] = scheduling_strategy if enable_execute_command: params["enableExecuteCommand"] = enable_execute_command response = self.ecs.create_service(**params) - return self.jsonize(response['service']) + return self.jsonize(response["service"]) def update_service( self, @@ -893,242 +894,264 @@ def update_service( cluster=cluster_name, service=service_name, taskDefinition=task_definition, - deploymentConfiguration=deployment_configuration) + deploymentConfiguration=deployment_configuration, + ) # filter placement_constraint and left only those where value is not None # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation if placement_constraints: - params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints] + params["placementConstraints"] = [ + {key: value for key, value in constraint.items() if value is not None} + for constraint in placement_constraints + ] if purge_placement_constraints and not placement_constraints: - params['placementConstraints'] = [] + params["placementConstraints"] = [] if placement_strategy: - params['placementStrategy'] = placement_strategy + params["placementStrategy"] = placement_strategy if purge_placement_strategy and not placement_strategy: - params['placementStrategy'] = [] + params["placementStrategy"] = [] if network_configuration: - params['networkConfiguration'] = network_configuration + params["networkConfiguration"] = network_configuration if force_new_deployment: - params['forceNewDeployment'] = force_new_deployment + params["forceNewDeployment"] = force_new_deployment if capacity_provider_strategy: - params['capacityProviderStrategy'] = capacity_provider_strategy + params["capacityProviderStrategy"] = capacity_provider_strategy if health_check_grace_period_seconds is not None: - params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds + params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds # desired count is not required if scheduling strategy is daemon if desired_count is not None: - params['desiredCount'] = desired_count + params["desiredCount"] = desired_count if enable_execute_command is not None: params["enableExecuteCommand"] = enable_execute_command if load_balancers: - params['loadBalancers'] = load_balancers + params["loadBalancers"] = load_balancers response = self.ecs.update_service(**params) - return self.jsonize(response['service']) + return self.jsonize(response["service"]) def jsonize(self, service): # some fields are datetime which is not JSON serializable # make them strings - if 'createdAt' in service: - service['createdAt'] = str(service['createdAt']) - if 'deployments' in service: - for d in service['deployments']: - if 'createdAt' in d: - d['createdAt'] = str(d['createdAt']) - if 'updatedAt' in d: - d['updatedAt'] = str(d['updatedAt']) - if 'events' in service: - for e in service['events']: - if 'createdAt' in e: - e['createdAt'] = str(e['createdAt']) + if "createdAt" in service: + service["createdAt"] = str(service["createdAt"]) + if "deployments" in service: + for d in service["deployments"]: + if "createdAt" in d: + d["createdAt"] = str(d["createdAt"]) + if "updatedAt" in d: + d["updatedAt"] = str(d["updatedAt"]) + if "events" in service: + for e in service["events"]: + if "createdAt" in e: + e["createdAt"] = str(e["createdAt"]) return service def delete_service(self, service, cluster=None, force=False): return self.ecs.delete_service(cluster=cluster, service=service, force=force) def health_check_setable(self, params): - load_balancers = params.get('loadBalancers', []) + load_balancers = params.get("loadBalancers", []) return len(load_balancers) > 0 def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'deleting']), - name=dict(required=True, type='str', aliases=['service']), - cluster=dict(required=False, type='str', default='default'), - task_definition=dict(required=False, type='str'), - load_balancers=dict(required=False, default=[], type='list', elements='dict'), - desired_count=dict(required=False, type='int'), - client_token=dict(required=False, default='', type='str', no_log=False), - role=dict(required=False, default='', type='str'), - delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10), - force_new_deployment=dict(required=False, default=False, type='bool'), - force_deletion=dict(required=False, default=False, type='bool'), - deployment_controller=dict(required=False, default={}, type='dict'), - deployment_configuration=dict(required=False, default={}, type='dict'), - wait=dict(required=False, default=False, type='bool'), + state=dict(required=True, choices=["present", "absent", "deleting"]), + name=dict(required=True, type="str", aliases=["service"]), + cluster=dict(required=False, type="str", default="default"), + task_definition=dict(required=False, type="str"), + load_balancers=dict(required=False, default=[], type="list", elements="dict"), + desired_count=dict(required=False, type="int"), + client_token=dict(required=False, default="", type="str", no_log=False), + role=dict(required=False, default="", type="str"), + delay=dict(required=False, type="int", default=10), + repeat=dict(required=False, type="int", default=10), + force_new_deployment=dict(required=False, default=False, type="bool"), + force_deletion=dict(required=False, default=False, type="bool"), + deployment_controller=dict(required=False, default={}, type="dict"), + deployment_configuration=dict(required=False, default={}, type="dict"), + wait=dict(required=False, default=False, type="bool"), placement_constraints=dict( required=False, default=[], - type='list', - elements='dict', - options=dict( - type=dict(type='str'), - expression=dict(required=False, type='str') - ) + type="list", + elements="dict", + options=dict(type=dict(type="str"), expression=dict(required=False, type="str")), ), - purge_placement_constraints=dict(required=False, default=False, type='bool'), + purge_placement_constraints=dict(required=False, default=False, type="bool"), placement_strategy=dict( required=False, default=[], - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - type=dict(type='str'), - field=dict(type='str'), - ) + type=dict(type="str"), + field=dict(type="str"), + ), ), - purge_placement_strategy=dict(required=False, default=False, type='bool'), - health_check_grace_period_seconds=dict(required=False, type='int'), - network_configuration=dict(required=False, type='dict', options=dict( - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - assign_public_ip=dict(type='bool') - )), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - platform_version=dict(required=False, type='str'), - service_registries=dict(required=False, type='list', default=[], elements='dict'), - scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']), + purge_placement_strategy=dict(required=False, default=False, type="bool"), + health_check_grace_period_seconds=dict(required=False, type="int"), + network_configuration=dict( + required=False, + type="dict", + options=dict( + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + assign_public_ip=dict(type="bool"), + ), + ), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), + platform_version=dict(required=False, type="str"), + service_registries=dict(required=False, type="list", default=[], elements="dict"), + scheduling_strategy=dict(required=False, choices=["DAEMON", "REPLICA"]), capacity_provider_strategy=dict( required=False, - type='list', + type="list", default=[], - elements='dict', + elements="dict", options=dict( - capacity_provider=dict(type='str'), - weight=dict(type='int'), - base=dict(type='int') - ) + capacity_provider=dict(type="str"), + weight=dict(type="int"), + base=dict(type="int"), + ), ), propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]), tags=dict(required=False, type="dict"), enable_execute_command=dict(required=False, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['network_configuration'])], - required_together=[['load_balancers', 'role']], - mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[("launch_type", "FARGATE", ["network_configuration"])], + required_together=[["load_balancers", "role"]], + mutually_exclusive=[["launch_type", "capacity_provider_strategy"]], + ) - if module.params['state'] == 'present': - if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None: - module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') - if module.params['task_definition'] is None and not module.params['force_new_deployment']: - module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.') + if module.params["state"] == "present": + if module.params["scheduling_strategy"] == "REPLICA" and module.params["desired_count"] is None: + module.fail_json(msg="state is present, scheduling_strategy is REPLICA; missing desired_count") + if module.params["task_definition"] is None and not module.params["force_new_deployment"]: + module.fail_json(msg="Either task_definition or force_new_deployment is required when status is present.") - if len(module.params['capacity_provider_strategy']) > 6: - module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') + if len(module.params["capacity_provider_strategy"]) > 6: + module.fail_json(msg="AWS allows a maximum of six capacity providers in the strategy.") service_mgr = EcsServiceManager(module) - if module.params['network_configuration']: - network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) + if module.params["network_configuration"]: + network_configuration = service_mgr.format_network_configuration(module.params["network_configuration"]) else: network_configuration = None - deployment_controller = map_complex_type(module.params['deployment_controller'], - DEPLOYMENT_CONTROLLER_TYPE_MAP) + deployment_controller = map_complex_type(module.params["deployment_controller"], DEPLOYMENT_CONTROLLER_TYPE_MAP) deploymentController = snake_dict_to_camel_dict(deployment_controller) - deployment_configuration = map_complex_type(module.params['deployment_configuration'], - DEPLOYMENT_CONFIGURATION_TYPE_MAP) + deployment_configuration = map_complex_type( + module.params["deployment_configuration"], DEPLOYMENT_CONFIGURATION_TYPE_MAP + ) deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) - serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) - capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy'])) + serviceRegistries = list(map(snake_dict_to_camel_dict, module.params["service_registries"])) + capacityProviders = list(map(snake_dict_to_camel_dict, module.params["capacity_provider_strategy"])) try: - existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) except Exception as e: - module.fail_json_aws(e, - msg="Exception describing service '{0}' in cluster '{1}'" - .format(module.params['name'], module.params['cluster'])) + module.fail_json_aws( + e, + msg="Exception describing service '{0}' in cluster '{1}'".format( + module.params["name"], module.params["cluster"] + ), + ) results = dict(changed=False) - if module.params['state'] == 'present': - + if module.params["state"] == "present": matching = False update = False - if existing and 'status' in existing and existing['status'] == "ACTIVE": - if module.params['force_new_deployment']: + if existing and "status" in existing and existing["status"] == "ACTIVE": + if module.params["force_new_deployment"]: update = True elif service_mgr.is_matching_service(module.params, existing): matching = True - results['service'] = existing + results["service"] = existing else: update = True if not matching: if not module.check_mode: - - role = module.params['role'] - clientToken = module.params['client_token'] + role = module.params["role"] + clientToken = module.params["client_token"] loadBalancers = [] - for loadBalancer in module.params['load_balancers']: - if 'containerPort' in loadBalancer: - loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + for loadBalancer in module.params["load_balancers"]: + if "containerPort" in loadBalancer: + loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) loadBalancers.append(loadBalancer) for loadBalancer in loadBalancers: - if 'containerPort' in loadBalancer: - loadBalancer['containerPort'] = int(loadBalancer['containerPort']) + if "containerPort" in loadBalancer: + loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) if update: # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature - if module.params['scheduling_strategy']: - if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: - module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service") - - if module.params['service_registries']: - if (existing['serviceRegistries'] or []) != serviceRegistries: - module.fail_json(msg="It is not possible to update the service registries of an existing service") - if module.params['capacity_provider_strategy']: - if 'launchType' in existing.keys(): - module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.") - if module.params['launch_type']: - if 'capacityProviderStrategy' in existing.keys(): - module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") - if (existing['loadBalancers'] or []) != loadBalancers: + if module.params["scheduling_strategy"]: + if (existing["schedulingStrategy"]) != module.params["scheduling_strategy"]: + module.fail_json( + msg="It is not possible to update the scheduling strategy of an existing service" + ) + + if module.params["service_registries"]: + if (existing["serviceRegistries"] or []) != serviceRegistries: + module.fail_json( + msg="It is not possible to update the service registries of an existing service" + ) + if module.params["capacity_provider_strategy"]: + if "launchType" in existing.keys(): + module.fail_json( + msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy." + ) + if module.params["launch_type"]: + if "capacityProviderStrategy" in existing.keys(): + module.fail_json( + msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type." + ) + if (existing["loadBalancers"] or []) != loadBalancers: # fails if deployment type is not CODE_DEPLOY or ECS - if existing['deploymentController']['type'] not in ['CODE_DEPLOY', 'ECS']: - module.fail_json(msg="It is not possible to update the load balancers of an existing service") + if existing["deploymentController"]["type"] not in ["CODE_DEPLOY", "ECS"]: + module.fail_json( + msg="It is not possible to update the load balancers of an existing service" + ) - if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY': - task_definition = '' + if existing.get("deploymentController", {}).get("type", None) == "CODE_DEPLOY": + task_definition = "" network_configuration = [] else: - task_definition = module.params['task_definition'] + task_definition = module.params["task_definition"] - if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']: - module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service") + if module.params["propagate_tags"] and module.params["propagate_tags"] != existing["propagateTags"]: + module.fail_json( + msg="It is not currently supported to enable propagation tags of an existing service" + ) - if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']: + if ( + module.params["tags"] + and boto3_tag_list_to_ansible_dict(existing["tags"]) != module.params["tags"] + ): module.fail_json(msg="It is not currently supported to change tags of an existing service") - updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + updatedLoadBalancers = loadBalancers if existing["deploymentController"]["type"] == "ECS" else [] - if task_definition is None and module.params['force_new_deployment']: - task_definition = existing['taskDefinition'] + if task_definition is None and module.params["force_new_deployment"]: + task_definition = existing["taskDefinition"] try: # update required @@ -1180,76 +1203,73 @@ def main(): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create service") - if response.get('tags', None): - response['tags'] = boto3_tag_list_to_ansible_dict(response['tags']) - results['service'] = response + if response.get("tags", None): + response["tags"] = boto3_tag_list_to_ansible_dict(response["tags"]) + results["service"] = response - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - del existing['deployments'] - del existing['events'] - results['ansible_facts'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + del existing["deployments"] + del existing["events"] + results["ansible_facts"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: try: service_mgr.delete_service( - module.params['name'], - module.params['cluster'], - module.params['force_deletion'], + module.params["name"], + module.params["cluster"], + module.params["force_deletion"], ) # Wait for service to be INACTIVE prior to exiting - if module.params['wait']: - waiter = service_mgr.ecs.get_waiter('services_inactive') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("services_inactive") try: waiter.wait( - services=[module.params['name']], - cluster=module.params['cluster'], + services=[module.params["name"]], + cluster=module.params["cluster"], WaiterConfig={ - 'Delay': module.params['delay'], - 'MaxAttempts': module.params['repeat'] - } + "Delay": module.params["delay"], + "MaxAttempts": module.params["repeat"], + }, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for service removal') + module.fail_json_aws(e, "Timeout waiting for service removal") except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") - results['changed'] = True + results["changed"] = True - elif module.params['state'] == 'deleting': + elif module.params["state"] == "deleting": if not existing: - module.fail_json(msg="Service '" + module.params['name'] + " not found.") + module.fail_json(msg="Service '" + module.params["name"] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted - delay = module.params['delay'] - repeat = module.params['repeat'] + delay = module.params["delay"] + repeat = module.params["repeat"] time.sleep(delay) for i in range(repeat): - existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) - status = existing['status'] + existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) + status = existing["status"] if status == "INACTIVE": - results['changed'] = True + results["changed"] = True break time.sleep(delay) if i is repeat - 1: - module.fail_json( - msg="Service still not deleted after {0} tries of {1} seconds each." - .format(repeat, delay) - ) + module.fail_json(msg="Service still not deleted after {0} tries of {1} seconds each.".format(repeat, delay)) return module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_service_info.py b/ecs_service_info.py index 41dd999c9c1..02a6abff207 100644 --- a/ecs_service_info.py +++ b/ecs_service_info.py @@ -147,14 +147,14 @@ class EcsServiceManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') + self.ecs = module.client("ecs") @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_services_with_backoff(self, **kwargs): - paginator = self.ecs.get_paginator('list_services') + paginator = self.ecs.get_paginator("list_services") try: return paginator.paginate(**kwargs).build_full_result() - except is_boto3_error_code('ClusterNotFoundException') as e: + except is_boto3_error_code("ClusterNotFoundException") as e: self.module.fail_json_aws(e, "Could not find cluster to list services") @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) @@ -164,43 +164,43 @@ def describe_services_with_backoff(self, **kwargs): def list_services(self, cluster): fn_args = dict() if cluster and cluster is not None: - fn_args['cluster'] = cluster + fn_args["cluster"] = cluster try: response = self.list_services_with_backoff(**fn_args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't list ECS services") - relevant_response = dict(services=response['serviceArns']) + relevant_response = dict(services=response["serviceArns"]) return relevant_response def describe_services(self, cluster, services): fn_args = dict() if cluster and cluster is not None: - fn_args['cluster'] = cluster - fn_args['services'] = services + fn_args["cluster"] = cluster + fn_args["services"] = services try: response = self.describe_services_with_backoff(**fn_args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't describe ECS services") - running_services = [self.extract_service_from(service) for service in response.get('services', [])] - services_not_running = response.get('failures', []) + running_services = [self.extract_service_from(service) for service in response.get("services", [])] + services_not_running = response.get("failures", []) return running_services, services_not_running def extract_service_from(self, service): # some fields are datetime which is not JSON serializable # make them strings - if 'deployments' in service: - for d in service['deployments']: - if 'createdAt' in d: - d['createdAt'] = str(d['createdAt']) - if 'updatedAt' in d: - d['updatedAt'] = str(d['updatedAt']) - if 'events' in service: - if not self.module.params['events']: - del service['events'] + if "deployments" in service: + for d in service["deployments"]: + if "createdAt" in d: + d["createdAt"] = str(d["createdAt"]) + if "updatedAt" in d: + d["updatedAt"] = str(d["updatedAt"]) + if "events" in service: + if not self.module.params["events"]: + del service["events"] else: - for e in service['events']: - if 'createdAt' in e: - e['createdAt'] = str(e['createdAt']) + for e in service["events"]: + if "createdAt" in e: + e["createdAt"] = str(e["createdAt"]) return service @@ -208,38 +208,37 @@ def chunks(l, n): """Yield successive n-sized chunks from l.""" """ https://stackoverflow.com/a/312464 """ for i in range(0, len(l), n): - yield l[i:i + n] + yield l[i:i + n] # fmt: skip def main(): - argument_spec = dict( - details=dict(type='bool', default=False), - events=dict(type='bool', default=True), + details=dict(type="bool", default=False), + events=dict(type="bool", default=True), cluster=dict(), - service=dict(type='list', elements='str', aliases=['name']) + service=dict(type="list", elements="str", aliases=["name"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - show_details = module.params.get('details') + show_details = module.params.get("details") task_mgr = EcsServiceManager(module) if show_details: - if module.params['service']: - services = module.params['service'] + if module.params["service"]: + services = module.params["service"] else: - services = task_mgr.list_services(module.params['cluster'])['services'] + services = task_mgr.list_services(module.params["cluster"])["services"] ecs_info = dict(services=[], services_not_running=[]) for chunk in chunks(services, 10): - running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk) - ecs_info['services'].extend(running_services) - ecs_info['services_not_running'].extend(services_not_running) + running_services, services_not_running = task_mgr.describe_services(module.params["cluster"], chunk) + ecs_info["services"].extend(running_services) + ecs_info["services_not_running"].extend(services_not_running) else: - ecs_info = task_mgr.list_services(module.params['cluster']) + ecs_info = task_mgr.list_services(module.params["cluster"]) module.exit_json(changed=False, **ecs_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_tag.py b/ecs_tag.py index 7aac8dfb4a7..f11fc1f33ac 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -121,33 +121,32 @@ def get_tags(ecs, module, resource): try: - return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags']) + return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) + module.fail_json_aws(e, msg="Failed to fetch tags for resource {0}".format(resource)) def get_arn(ecs, module, cluster_name, resource_type, resource): - try: - if resource_type == 'cluster': + if resource_type == "cluster": description = ecs.describe_clusters(clusters=[resource]) - resource_arn = description['clusters'][0]['clusterArn'] - elif resource_type == 'task': + resource_arn = description["clusters"][0]["clusterArn"] + elif resource_type == "task": description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource]) - resource_arn = description['tasks'][0]['taskArn'] - elif resource_type == 'service': + resource_arn = description["tasks"][0]["taskArn"] + elif resource_type == "service": description = ecs.describe_services(cluster=cluster_name, services=[resource]) - resource_arn = description['services'][0]['serviceArn'] - elif resource_type == 'task_definition': + resource_arn = description["services"][0]["serviceArn"] + elif resource_type == "task_definition": description = ecs.describe_task_definition(taskDefinition=resource) - resource_arn = description['taskDefinition']['taskDefinitionArn'] - elif resource_type == 'container': + resource_arn = description["taskDefinition"]["taskDefinitionArn"] + elif resource_type == "container": description = ecs.describe_container_instances(clusters=[resource]) - resource_arn = description['containerInstances'][0]['containerInstanceArn'] + resource_arn = description["containerInstances"][0]["containerInstanceArn"] except (IndexError, KeyError): - module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource)) + module.fail_json(msg="Failed to find {0} {1}".format(resource_type, resource)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource)) + module.fail_json_aws(e, msg="Failed to find {0} {1}".format(resource_type, resource)) return resource_arn @@ -156,28 +155,28 @@ def main(): argument_spec = dict( cluster_name=dict(required=True), resource=dict(required=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container']) + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + resource_type=dict(default="cluster", choices=["cluster", "task", "service", "task_definition", "container"]), ) - required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])] + required_if = [("state", "present", ["tags"]), ("state", "absent", ["tags"])] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - resource_type = module.params['resource_type'] - cluster_name = module.params['cluster_name'] - if resource_type == 'cluster': + resource_type = module.params["resource_type"] + cluster_name = module.params["cluster_name"] + if resource_type == "cluster": resource = cluster_name else: - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - ecs = module.client('ecs') + ecs = module.client("ecs") resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource) @@ -186,7 +185,7 @@ def main(): add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) remove_tags = {} - if state == 'absent': + if state == "absent": for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] @@ -195,28 +194,28 @@ def main(): remove_tags[key] = current_tags[key] if remove_tags: - result['changed'] = True - result['removed_tags'] = remove_tags + result["changed"] = True + result["removed_tags"] = remove_tags if not module.check_mode: try: ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + module.fail_json_aws(e, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource)) - if state == 'present' and add_tags: - result['changed'] = True - result['added_tags'] = add_tags + if state == "present" and add_tags: + result["changed"] = True + result["added_tags"] = add_tags current_tags.update(add_tags) if not module.check_mode: try: - tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value') + tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value") ecs.tag_resource(resourceArn=resource_arn, tags=tags) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + module.fail_json_aws(e, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource)) - result['tags'] = get_tags(ecs, module, resource_arn) + result["tags"] = get_tags(ecs, module, resource_arn) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_task.py b/ecs_task.py index 6c693b317bc..dfd7d9a7902 100644 --- a/ecs_task.py +++ b/ecs_task.py @@ -258,29 +258,29 @@ class EcsExecManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs') - self.ec2 = module.client('ec2') + self.ecs = module.client("ecs") + self.ec2 = module.client("ec2") def format_network_configuration(self, network_config): result = dict() - if 'subnets' in network_config: - result['subnets'] = network_config['subnets'] + if "subnets" in network_config: + result["subnets"] = network_config["subnets"] else: self.module.fail_json(msg="Network configuration must include subnets") - if 'security_groups' in network_config: - groups = network_config['security_groups'] - if any(not sg.startswith('sg-') for sg in groups): + if "security_groups" in network_config: + groups = network_config["security_groups"] + if any(not sg.startswith("sg-") for sg in groups): try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId'] + vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result['securityGroups'] = groups - if 'assign_public_ip' in network_config: - if network_config['assign_public_ip'] is True: - result['assignPublicIp'] = "ENABLED" + result["securityGroups"] = groups + if "assign_public_ip" in network_config: + if network_config["assign_public_ip"] is True: + result["assignPublicIp"] = "ENABLED" else: - result['assignPublicIp'] = "DISABLED" + result["assignPublicIp"] = "DISABLED" return dict(awsvpcConfiguration=result) @@ -288,10 +288,10 @@ def list_tasks(self, cluster_name, service_name, status): response = self.ecs.list_tasks( cluster=cluster_name, family=service_name, - desiredStatus=status + desiredStatus=status, ) - if len(response['taskArns']) > 0: - for c in response['taskArns']: + if len(response["taskArns"]) > 0: + for c in response["taskArns"]: if c.endswith(service_name): return c return None @@ -299,14 +299,17 @@ def list_tasks(self, cluster_name, service_name, status): def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags): if overrides is None: overrides = dict() - params = dict(cluster=cluster, taskDefinition=task_definition, - overrides=overrides, count=count, startedBy=startedBy) - if self.module.params['network_configuration']: - params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + params = dict( + cluster=cluster, taskDefinition=task_definition, overrides=overrides, count=count, startedBy=startedBy + ) + if self.module.params["network_configuration"]: + params["networkConfiguration"] = self.format_network_configuration( + self.module.params["network_configuration"] + ) if launch_type: - params['launchType'] = launch_type + params["launchType"] = launch_type if tags: - params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") # TODO: need to check if long arn format enabled. try: @@ -314,168 +317,164 @@ def run_task(self, cluster, task_definition, overrides, count, startedBy, launch except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't run task") # include tasks and failures - return response['tasks'] + return response["tasks"] def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags): args = dict() if cluster: - args['cluster'] = cluster + args["cluster"] = cluster if task_definition: - args['taskDefinition'] = task_definition + args["taskDefinition"] = task_definition if overrides: - args['overrides'] = overrides + args["overrides"] = overrides if container_instances: - args['containerInstances'] = container_instances + args["containerInstances"] = container_instances if startedBy: - args['startedBy'] = startedBy - if self.module.params['network_configuration']: - args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration']) + args["startedBy"] = startedBy + if self.module.params["network_configuration"]: + args["networkConfiguration"] = self.format_network_configuration( + self.module.params["network_configuration"] + ) if tags: - args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + args["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") try: response = self.ecs.start_task(**args) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't start task") # include tasks and failures - return response['tasks'] + return response["tasks"] def stop_task(self, cluster, task): response = self.ecs.stop_task(cluster=cluster, task=task) - return response['task'] + return response["task"] def ecs_task_long_format_enabled(self): - account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True) - return account_support['settings'][0]['value'] == 'enabled' + account_support = self.ecs.list_account_settings(name="taskLongArnFormat", effectiveSettings=True) + return account_support["settings"][0]["value"] == "enabled" def main(): argument_spec = dict( - operation=dict(required=True, choices=['run', 'start', 'stop']), - cluster=dict(required=False, type='str', default='default'), # R S P - task_definition=dict(required=False, type='str'), # R* S* - overrides=dict(required=False, type='dict'), # R S - count=dict(required=False, type='int'), # R - task=dict(required=False, type='str'), # P* - container_instances=dict(required=False, type='list', elements='str'), # S* - started_by=dict(required=False, type='str'), # R S - network_configuration=dict(required=False, type='dict'), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - wait=dict(required=False, default=False, type='bool'), + operation=dict(required=True, choices=["run", "start", "stop"]), + cluster=dict(required=False, type="str", default="default"), # R S P + task_definition=dict(required=False, type="str"), # R* S* + overrides=dict(required=False, type="dict"), # R S + count=dict(required=False, type="int"), # R + task=dict(required=False, type="str"), # P* + container_instances=dict(required=False, type="list", elements="str"), # S* + started_by=dict(required=False, type="str"), # R S + network_configuration=dict(required=False, type="dict"), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + wait=dict(required=False, default=False, type="bool"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[ - ('launch_type', 'FARGATE', ['network_configuration']), - ('operation', 'run', ['task_definition']), - ('operation', 'start', [ - 'task_definition', - 'container_instances' - ]), - ('operation', 'stop', ['task_definition', 'task']), - ]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ("launch_type", "FARGATE", ["network_configuration"]), + ("operation", "run", ["task_definition"]), + ("operation", "start", ["task_definition", "container_instances"]), + ("operation", "stop", ["task_definition", "task"]), + ], + ) # Validate Inputs - if module.params['operation'] == 'run': - task_to_list = module.params['task_definition'] + if module.params["operation"] == "run": + task_to_list = module.params["task_definition"] status_type = "RUNNING" - if module.params['operation'] == 'start': - task_to_list = module.params['task'] + if module.params["operation"] == "start": + task_to_list = module.params["task"] status_type = "RUNNING" - if module.params['operation'] == 'stop': - task_to_list = module.params['task_definition'] + if module.params["operation"] == "stop": + task_to_list = module.params["task_definition"] status_type = "STOPPED" service_mgr = EcsExecManager(module) - if module.params['tags']: + if module.params["tags"]: if not service_mgr.ecs_task_long_format_enabled(): module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags") - existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type) + existing = service_mgr.list_tasks(module.params["cluster"], task_to_list, status_type) results = dict(changed=False) - if module.params['operation'] == 'run': + if module.params["operation"] == "run": if existing: # TBD - validate the rest of the details - results['task'] = existing + results["task"] = existing else: if not module.check_mode: - # run_task returns a list of tasks created tasks = service_mgr.run_task( - module.params['cluster'], - module.params['task_definition'], - module.params['overrides'], - module.params['count'], - module.params['started_by'], - module.params['launch_type'], - module.params['tags'], + module.params["cluster"], + module.params["task_definition"], + module.params["overrides"], + module.params["count"], + module.params["started_by"], + module.params["launch_type"], + module.params["tags"], ) # Wait for task(s) to be running prior to exiting - if module.params['wait']: - - waiter = service_mgr.ecs.get_waiter('tasks_running') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("tasks_running") try: waiter.wait( - tasks=[task['taskArn'] for task in tasks], - cluster=module.params['cluster'], + tasks=[task["taskArn"] for task in tasks], + cluster=module.params["cluster"], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for tasks to run') + module.fail_json_aws(e, "Timeout waiting for tasks to run") - results['task'] = tasks + results["task"] = tasks - results['changed'] = True + results["changed"] = True - elif module.params['operation'] == 'start': + elif module.params["operation"] == "start": if existing: # TBD - validate the rest of the details - results['task'] = existing + results["task"] = existing else: if not module.check_mode: - results['task'] = service_mgr.start_task( - module.params['cluster'], - module.params['task_definition'], - module.params['overrides'], - module.params['container_instances'], - module.params['started_by'], - module.params['tags'], + results["task"] = service_mgr.start_task( + module.params["cluster"], + module.params["task_definition"], + module.params["overrides"], + module.params["container_instances"], + module.params["started_by"], + module.params["tags"], ) - results['changed'] = True + results["changed"] = True - elif module.params['operation'] == 'stop': + elif module.params["operation"] == "stop": if existing: - results['task'] = existing + results["task"] = existing else: if not module.check_mode: # it exists, so we should delete it and mark changed. # return info about the cluster deleted - results['task'] = service_mgr.stop_task( - module.params['cluster'], - module.params['task'] - ) + results["task"] = service_mgr.stop_task(module.params["cluster"], module.params["task"]) # Wait for task to be stopped prior to exiting - if module.params['wait']: - - waiter = service_mgr.ecs.get_waiter('tasks_stopped') + if module.params["wait"]: + waiter = service_mgr.ecs.get_waiter("tasks_stopped") try: waiter.wait( - tasks=[module.params['task']], - cluster=module.params['cluster'], + tasks=[module.params["task"]], + cluster=module.params["cluster"], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, 'Timeout waiting for task to stop') + module.fail_json_aws(e, "Timeout waiting for task to stop") - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 16fcab712c4..0a8e413dbcd 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -792,49 +792,62 @@ class EcsTaskManager: def __init__(self, module): self.module = module - self.ecs = module.client('ecs', AWSRetry.jittered_backoff()) + self.ecs = module.client("ecs", AWSRetry.jittered_backoff()) def describe_task(self, task_name): try: response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name) - return response['taskDefinition'] + return response["taskDefinition"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: return None - def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, - volumes, launch_type, cpu, memory, placement_constraints): + def register_task( + self, + family, + task_role_arn, + execution_role_arn, + network_mode, + container_definitions, + volumes, + launch_type, + cpu, + memory, + placement_constraints, + ): validated_containers = [] # Ensures the number parameters are int as required by the AWS SDK for container in container_definitions: - for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'): + for param in ("memory", "cpu", "memoryReservation", "startTimeout", "stopTimeout"): if param in container: container[param] = int(container[param]) - if 'portMappings' in container: - for port_mapping in container['portMappings']: - for port in ('hostPort', 'containerPort'): + if "portMappings" in container: + for port_mapping in container["portMappings"]: + for port in ("hostPort", "containerPort"): if port in port_mapping: port_mapping[port] = int(port_mapping[port]) - if network_mode == 'awsvpc' and 'hostPort' in port_mapping: - if port_mapping['hostPort'] != port_mapping.get('containerPort'): - self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as " - "container port or not be set") - - if 'linuxParameters' in container: - for linux_param in container.get('linuxParameters'): - if linux_param == 'tmpfs': - for tmpfs_param in container['linuxParameters']['tmpfs']: - if 'size' in tmpfs_param: - tmpfs_param['size'] = int(tmpfs_param['size']) - - for param in ('maxSwap', 'swappiness', 'sharedMemorySize'): + if network_mode == "awsvpc" and "hostPort" in port_mapping: + if port_mapping["hostPort"] != port_mapping.get("containerPort"): + self.module.fail_json( + msg="In awsvpc network mode, host port must be set to the same as " + "container port or not be set" + ) + + if "linuxParameters" in container: + for linux_param in container.get("linuxParameters"): + if linux_param == "tmpfs": + for tmpfs_param in container["linuxParameters"]["tmpfs"]: + if "size" in tmpfs_param: + tmpfs_param["size"] = int(tmpfs_param["size"]) + + for param in ("maxSwap", "swappiness", "sharedMemorySize"): if param in linux_param: - container['linuxParameters'][param] = int(container['linuxParameters'][param]) + container["linuxParameters"][param] = int(container["linuxParameters"][param]) - if 'ulimits' in container: - for limits_mapping in container['ulimits']: - for limit in ('softLimit', 'hardLimit'): + if "ulimits" in container: + for limits_mapping in container["ulimits"]: + for limit in ("softLimit", "hardLimit"): if limit in limits_mapping: limits_mapping[limit] = int(limits_mapping[limit]) @@ -844,47 +857,42 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, family=family, taskRoleArn=task_role_arn, containerDefinitions=container_definitions, - volumes=volumes + volumes=volumes, ) - if network_mode != 'default': - params['networkMode'] = network_mode + if network_mode != "default": + params["networkMode"] = network_mode if cpu: - params['cpu'] = cpu + params["cpu"] = cpu if memory: - params['memory'] = memory + params["memory"] = memory if launch_type: - params['requiresCompatibilities'] = [launch_type] + params["requiresCompatibilities"] = [launch_type] if execution_role_arn: - params['executionRoleArn'] = execution_role_arn + params["executionRoleArn"] = execution_role_arn if placement_constraints: - params['placementConstraints'] = placement_constraints + params["placementConstraints"] = placement_constraints try: response = self.ecs.register_task_definition(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to register task") - return response['taskDefinition'] + return response["taskDefinition"] def describe_task_definitions(self, family): - data = { - "taskDefinitionArns": [], - "nextToken": None - } + data = {"taskDefinitionArns": [], "nextToken": None} def fetch(): # Boto3 is weird about params passed, so only pass nextToken if we have a value - params = { - 'familyPrefix': family - } + params = {"familyPrefix": family} - if data['nextToken']: - params['nextToken'] = data['nextToken'] + if data["nextToken"]: + params["nextToken"] = data["nextToken"] result = self.ecs.list_task_definitions(**params) - data['taskDefinitionArns'] += result['taskDefinitionArns'] - data['nextToken'] = result.get('nextToken', None) - return data['nextToken'] is not None + data["taskDefinitionArns"] += result["taskDefinitionArns"] + data["nextToken"] = result.get("nextToken", None) + return data["nextToken"] is not None # Fetch all the arns, possibly across multiple pages while fetch(): @@ -893,118 +901,134 @@ def fetch(): # Return the full descriptions of the task definitions, sorted ascending by revision return list( sorted( - [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], - key=lambda td: td['revision'] + [ + self.ecs.describe_task_definition(taskDefinition=arn)["taskDefinition"] + for arn in data["taskDefinitionArns"] + ], + key=lambda td: td["revision"], ) ) def deregister_task(self, taskArn): response = self.ecs.deregister_task_definition(taskDefinition=taskArn) - return response['taskDefinition'] + return response["taskDefinition"] def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), - arn=dict(required=False, type='str'), - family=dict(required=False, type='str'), - revision=dict(required=False, type='int'), - force_create=dict(required=False, default=False, type='bool'), - containers=dict(required=True, type='list', elements='dict'), - network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'), - task_role_arn=dict(required=False, default='', type='str'), - execution_role_arn=dict(required=False, default='', type='str'), - volumes=dict(required=False, type='list', elements='dict'), - launch_type=dict(required=False, choices=['EC2', 'FARGATE']), + state=dict(required=True, choices=["present", "absent"]), + arn=dict(required=False, type="str"), + family=dict(required=False, type="str"), + revision=dict(required=False, type="int"), + force_create=dict(required=False, default=False, type="bool"), + containers=dict(required=True, type="list", elements="dict"), + network_mode=dict( + required=False, default="bridge", choices=["default", "bridge", "host", "none", "awsvpc"], type="str" + ), + task_role_arn=dict(required=False, default="", type="str"), + execution_role_arn=dict(required=False, default="", type="str"), + volumes=dict(required=False, type="list", elements="dict"), + launch_type=dict(required=False, choices=["EC2", "FARGATE"]), cpu=dict(), - memory=dict(required=False, type='str'), - placement_constraints=dict(required=False, type='list', elements='dict', - options=dict(type=dict(type='str'), expression=dict(type='str'))), + memory=dict(required=False, type="str"), + placement_constraints=dict( + required=False, + type="list", + elements="dict", + options=dict(type=dict(type="str"), expression=dict(type="str")), + ), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])] - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[("launch_type", "FARGATE", ["cpu", "memory"])], + ) task_to_describe = None task_mgr = EcsTaskManager(module) results = dict(changed=False) - if module.params['state'] == 'present': - if 'containers' not in module.params or not module.params['containers']: + if module.params["state"] == "present": + if "containers" not in module.params or not module.params["containers"]: module.fail_json(msg="To use task definitions, a list of containers must be specified") - if 'family' not in module.params or not module.params['family']: + if "family" not in module.params or not module.params["family"]: module.fail_json(msg="To use task definitions, a family must be specified") - network_mode = module.params['network_mode'] - launch_type = module.params['launch_type'] - placement_constraints = module.params['placement_constraints'] - if launch_type == 'FARGATE': - if network_mode != 'awsvpc': + network_mode = module.params["network_mode"] + launch_type = module.params["launch_type"] + placement_constraints = module.params["placement_constraints"] + if launch_type == "FARGATE": + if network_mode != "awsvpc": module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") if placement_constraints: module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate") - for container in module.params['containers']: - if container.get('links') and network_mode == 'awsvpc': - module.fail_json(msg='links parameter is not supported if network mode is awsvpc.') + for container in module.params["containers"]: + if container.get("links") and network_mode == "awsvpc": + module.fail_json(msg="links parameter is not supported if network mode is awsvpc.") - for environment in container.get('environment', []): - environment['value'] = environment['value'] + for environment in container.get("environment", []): + environment["value"] = environment["value"] - for environment_file in container.get('environmentFiles', []): - if environment_file['type'] != 's3': - module.fail_json(msg='The only supported value for environmentFiles is s3.') + for environment_file in container.get("environmentFiles", []): + if environment_file["type"] != "s3": + module.fail_json(msg="The only supported value for environmentFiles is s3.") - for linux_param in container.get('linuxParameters', {}): - if linux_param == 'maxSwap' and launch_type == 'FARGATE': - module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.') + for linux_param in container.get("linuxParameters", {}): + if linux_param == "maxSwap" and launch_type == "FARGATE": + module.fail_json(msg="devices parameter is not supported with the FARGATE launch type.") - if linux_param == 'maxSwap' and launch_type == 'FARGATE': - module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.') - elif linux_param == 'maxSwap' and int(container['linuxParameters']['maxSwap']) < 0: - module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.') + if linux_param == "maxSwap" and launch_type == "FARGATE": + module.fail_json(msg="maxSwap parameter is not supported with the FARGATE launch type.") + elif linux_param == "maxSwap" and int(container["linuxParameters"]["maxSwap"]) < 0: + module.fail_json(msg="Accepted values for maxSwap are 0 or any positive integer.") - if ( - linux_param == 'swappiness' and - (int(container['linuxParameters']['swappiness']) < 0 or int(container['linuxParameters']['swappiness']) > 100) + if linux_param == "swappiness" and ( + int(container["linuxParameters"]["swappiness"]) < 0 + or int(container["linuxParameters"]["swappiness"]) > 100 ): - module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.') + module.fail_json(msg="Accepted values for swappiness are whole numbers between 0 and 100.") - if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE': - module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.') + if linux_param == "sharedMemorySize" and launch_type == "FARGATE": + module.fail_json(msg="sharedMemorySize parameter is not supported with the FARGATE launch type.") - if linux_param == 'tmpfs' and launch_type == 'FARGATE': - module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.') + if linux_param == "tmpfs" and launch_type == "FARGATE": + module.fail_json(msg="tmpfs parameter is not supported with the FARGATE launch type.") - if container.get('hostname') and network_mode == 'awsvpc': - module.fail_json(msg='hostname parameter is not supported when the awsvpc network mode is used.') + if container.get("hostname") and network_mode == "awsvpc": + module.fail_json(msg="hostname parameter is not supported when the awsvpc network mode is used.") - if container.get('extraHosts') and network_mode == 'awsvpc': - module.fail_json(msg='extraHosts parameter is not supported when the awsvpc network mode is used.') + if container.get("extraHosts") and network_mode == "awsvpc": + module.fail_json(msg="extraHosts parameter is not supported when the awsvpc network mode is used.") - family = module.params['family'] - existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) + family = module.params["family"] + existing_definitions_in_family = task_mgr.describe_task_definitions(module.params["family"]) - if 'revision' in module.params and module.params['revision']: + if "revision" in module.params and module.params["revision"]: # The definition specifies revision. We must guarantee that an active revision of that number will result from this. - revision = int(module.params['revision']) + revision = int(module.params["revision"]) # A revision has been explicitly specified. Attempt to locate a matching revision - tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision] + tasks_defs_for_revision = [td for td in existing_definitions_in_family if td["revision"] == revision] existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None - if existing and existing['status'] != "ACTIVE": + if existing and existing["status"] != "ACTIVE": # We cannot reactivate an inactive revision - module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision)) + module.fail_json( + msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision) + ) elif not existing: if not existing_definitions_in_family and revision != 1: - module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision) - elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision: - module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % - (revision, existing_definitions_in_family[-1]['revision'] + 1)) + module.fail_json( + msg="You have specified a revision of %d but a created revision would be 1" % revision + ) + elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision: + module.fail_json( + msg="You have specified a revision of %d but a created revision would be %d" + % (revision, existing_definitions_in_family[-1]["revision"] + 1) + ) else: existing = None @@ -1024,9 +1048,9 @@ def _right_has_values_of_left(left, right): if list_val not in right_list: # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp') # fill in that default if absent and see if it is in right_list then - if isinstance(list_val, dict) and not list_val.get('protocol'): + if isinstance(list_val, dict) and not list_val.get("protocol"): modified_list_val = dict(list_val) - modified_list_val.update(protocol='tcp') + modified_list_val.update(protocol="tcp") if modified_list_val in right_list: continue else: @@ -1036,24 +1060,32 @@ def _right_has_values_of_left(left, right): for k, v in right.items(): if v and k not in left: # 'essential' defaults to True when not specified - if k == 'essential' and v is True: + if k == "essential" and v is True: pass else: return False return True - def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, existing_task_definition): - if td['status'] != "ACTIVE": + def _task_definition_matches( + requested_volumes, + requested_containers, + requested_task_role_arn, + requested_launch_type, + existing_task_definition, + ): + if td["status"] != "ACTIVE": return None - if requested_task_role_arn != td.get('taskRoleArn', ""): + if requested_task_role_arn != td.get("taskRoleArn", ""): return None - if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []): + if requested_launch_type is not None and requested_launch_type not in td.get( + "requiresCompatibilities", [] + ): return None - existing_volumes = td.get('volumes', []) or [] + existing_volumes = td.get("volumes", []) or [] if len(requested_volumes) != len(existing_volumes): # Nope. @@ -1071,7 +1103,7 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ if not found: return None - existing_containers = td.get('containerDefinitions', []) or [] + existing_containers = td.get("containerDefinitions", []) or [] if len(requested_containers) != len(existing_containers): # Nope. @@ -1092,42 +1124,50 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested for td in existing_definitions_in_family: - requested_volumes = module.params['volumes'] or [] - requested_containers = module.params['containers'] or [] - requested_task_role_arn = module.params['task_role_arn'] - requested_launch_type = module.params['launch_type'] - existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td) + requested_volumes = module.params["volumes"] or [] + requested_containers = module.params["containers"] or [] + requested_task_role_arn = module.params["task_role_arn"] + requested_launch_type = module.params["launch_type"] + existing = _task_definition_matches( + requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td + ) if existing: break - if existing and not module.params.get('force_create'): + if existing and not module.params.get("force_create"): # Awesome. Have an existing one. Nothing to do. - results['taskdefinition'] = existing + results["taskdefinition"] = existing else: if not module.check_mode: # Doesn't exist. create it. - volumes = module.params.get('volumes', []) or [] - results['taskdefinition'] = task_mgr.register_task(module.params['family'], - module.params['task_role_arn'], - module.params['execution_role_arn'], - module.params['network_mode'], - module.params['containers'], - volumes, - module.params['launch_type'], - module.params['cpu'], - module.params['memory'], - module.params['placement_constraints'],) - results['changed'] = True - - elif module.params['state'] == 'absent': + volumes = module.params.get("volumes", []) or [] + results["taskdefinition"] = task_mgr.register_task( + module.params["family"], + module.params["task_role_arn"], + module.params["execution_role_arn"], + module.params["network_mode"], + module.params["containers"], + volumes, + module.params["launch_type"], + module.params["cpu"], + module.params["memory"], + module.params["placement_constraints"], + ) + results["changed"] = True + + elif module.params["state"] == "absent": # When de-registering a task definition, we can specify the ARN OR the family and revision. - if module.params['state'] == 'absent': - if 'arn' in module.params and module.params['arn'] is not None: - task_to_describe = module.params['arn'] - elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \ - module.params['revision'] is not None: - task_to_describe = module.params['family'] + ":" + str(module.params['revision']) + if module.params["state"] == "absent": + if "arn" in module.params and module.params["arn"] is not None: + task_to_describe = module.params["arn"] + elif ( + "family" in module.params + and module.params["family"] is not None + and "revision" in module.params + and module.params["revision"] is not None + ): + task_to_describe = module.params["family"] + ":" + str(module.params["revision"]) else: module.fail_json(msg="To use task definitions, an arn or family and revision must be specified") @@ -1137,16 +1177,16 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ pass else: # It exists, so we should delete it and mark changed. Return info about the task definition deleted - results['taskdefinition'] = existing - if 'status' in existing and existing['status'] == "INACTIVE": - results['changed'] = False + results["taskdefinition"] = existing + if "status" in existing and existing["status"] == "INACTIVE": + results["changed"] = False else: if not module.check_mode: task_mgr.deregister_task(task_to_describe) - results['changed'] = True + results["changed"] = True module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py index b619cd4c4be..5e235096d96 100644 --- a/ecs_taskdefinition_info.py +++ b/ecs_taskdefinition_info.py @@ -359,20 +359,20 @@ def main(): argument_spec = dict( - task_definition=dict(required=True, type='str') + task_definition=dict(required=True, type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - ecs = module.client('ecs') + ecs = module.client("ecs") try: - ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition'] + ecs_td = ecs.describe_task_definition(taskDefinition=module.params["task_definition"])["taskDefinition"] except botocore.exceptions.ClientError: ecs_td = {} module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/efs.py b/efs.py index c3d1bc6830a..c1d9f247b34 100644 --- a/efs.py +++ b/efs.py @@ -267,35 +267,34 @@ def _index_by_key(key, items): class EFSConnection(object): - DEFAULT_WAIT_TIMEOUT_SECONDS = 0 - STATE_CREATING = 'creating' - STATE_AVAILABLE = 'available' - STATE_DELETING = 'deleting' - STATE_DELETED = 'deleted' + STATE_CREATING = "creating" + STATE_AVAILABLE = "available" + STATE_DELETING = "deleting" + STATE_DELETED = "deleted" def __init__(self, module): - self.connection = module.client('efs') + self.connection = module.client("efs") region = module.region self.module = module self.region = region - self.wait = module.params.get('wait') - self.wait_timeout = module.params.get('wait_timeout') + self.wait = module.params.get("wait") + self.wait_timeout = module.params.get("wait_timeout") def get_file_systems(self, **kwargs): """ - Returns generator of file systems including all attributes of FS + Returns generator of file systems including all attributes of FS """ items = iterate_all( - 'FileSystems', + "FileSystems", self.connection.describe_file_systems, - **kwargs + **kwargs, ) for item in items: - item['Name'] = item['CreationToken'] - item['CreationTime'] = str(item['CreationTime']) + item["Name"] = item["CreationToken"] + item["CreationTime"] = str(item["CreationTime"]) """ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose @@ -303,90 +302,92 @@ def get_file_systems(self, **kwargs): AWS documentation is available here: https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html """ - item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - if 'Timestamp' in item['SizeInBytes']: - item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) - if item['LifeCycleState'] == self.STATE_AVAILABLE: - item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId']) - item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId'])) + item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + if "Timestamp" in item["SizeInBytes"]: + item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) + if item["LifeCycleState"] == self.STATE_AVAILABLE: + item["Tags"] = self.get_tags(FileSystemId=item["FileSystemId"]) + item["MountTargets"] = list(self.get_mount_targets(FileSystemId=item["FileSystemId"])) else: - item['Tags'] = {} - item['MountTargets'] = [] + item["Tags"] = {} + item["MountTargets"] = [] yield item def get_tags(self, **kwargs): """ - Returns tag list for selected instance of EFS + Returns tag list for selected instance of EFS """ - tags = self.connection.describe_tags(**kwargs)['Tags'] + tags = self.connection.describe_tags(**kwargs)["Tags"] return tags def get_mount_targets(self, **kwargs): """ - Returns mount targets for selected instance of EFS + Returns mount targets for selected instance of EFS """ targets = iterate_all( - 'MountTargets', + "MountTargets", self.connection.describe_mount_targets, - **kwargs + **kwargs, ) for target in targets: - if target['LifeCycleState'] == self.STATE_AVAILABLE: - target['SecurityGroups'] = list(self.get_security_groups( - MountTargetId=target['MountTargetId'] - )) + if target["LifeCycleState"] == self.STATE_AVAILABLE: + target["SecurityGroups"] = list(self.get_security_groups(MountTargetId=target["MountTargetId"])) else: - target['SecurityGroups'] = [] + target["SecurityGroups"] = [] yield target def get_security_groups(self, **kwargs): """ - Returns security groups for selected instance of EFS + Returns security groups for selected instance of EFS """ return iterate_all( - 'SecurityGroups', + "SecurityGroups", self.connection.describe_mount_target_security_groups, - **kwargs + **kwargs, ) def get_file_system_id(self, name): """ - Returns ID of instance by instance name + Returns ID of instance by instance name """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - CreationToken=name - )) - return info and info['FileSystemId'] or None + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + CreationToken=name, + ) + ) + return info and info["FileSystemId"] or None def get_file_system_state(self, name, file_system_id=None): """ - Returns state of filesystem by EFS id/name + Returns state of filesystem by EFS id/name """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - CreationToken=name, - FileSystemId=file_system_id - )) - return info and info['LifeCycleState'] or self.STATE_DELETED + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + CreationToken=name, + FileSystemId=file_system_id, + ) + ) + return info and info["LifeCycleState"] or self.STATE_DELETED def get_mount_targets_in_state(self, file_system_id, states=None): """ - Returns states of mount targets of selected EFS with selected state(s) (optional) + Returns states of mount targets of selected EFS with selected state(s) (optional) """ targets = iterate_all( - 'MountTargets', + "MountTargets", self.connection.describe_mount_targets, - FileSystemId=file_system_id + FileSystemId=file_system_id, ) if states: if not isinstance(states, list): states = [states] - targets = filter(lambda target: target['LifeCycleState'] in states, targets) + targets = filter(lambda target: target["LifeCycleState"] in states, targets) return list(targets) @@ -394,47 +395,53 @@ def get_throughput_mode(self, **kwargs): """ Returns throughput mode for selected EFS instance """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - **kwargs - )) + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + **kwargs, + ) + ) - return info and info['ThroughputMode'] or None + return info and info["ThroughputMode"] or None def get_provisioned_throughput_in_mibps(self, **kwargs): """ Returns throughput mode for selected EFS instance """ - info = first_or_default(iterate_all( - 'FileSystems', - self.connection.describe_file_systems, - **kwargs - )) - return info.get('ProvisionedThroughputInMibps', None) + info = first_or_default( + iterate_all( + "FileSystems", + self.connection.describe_file_systems, + **kwargs, + ) + ) + return info.get("ProvisionedThroughputInMibps", None) - def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps): + def create_file_system( + self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps + ): """ - Creates new filesystem with selected name + Creates new filesystem with selected name """ changed = False state = self.get_file_system_state(name) params = {} - params['CreationToken'] = name - params['PerformanceMode'] = performance_mode + params["CreationToken"] = name + params["PerformanceMode"] = performance_mode if encrypt: - params['Encrypted'] = encrypt + params["Encrypted"] = encrypt if kms_key_id is not None: - params['KmsKeyId'] = kms_key_id + params["KmsKeyId"] = kms_key_id if throughput_mode: - params['ThroughputMode'] = throughput_mode + params["ThroughputMode"] = throughput_mode if provisioned_throughput_in_mibps: - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps if state in [self.STATE_DELETING, self.STATE_DELETED]: wait_for( lambda: self.get_file_system_state(name), - self.STATE_DELETED + self.STATE_DELETED, ) try: self.connection.create_file_system(**params) @@ -448,7 +455,7 @@ def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throug wait_for( lambda: self.get_file_system_state(name), self.STATE_AVAILABLE, - self.wait_timeout + self.wait_timeout, ) return changed @@ -465,14 +472,14 @@ def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mi current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id) params = dict() if throughput_mode and throughput_mode != current_mode: - params['ThroughputMode'] = throughput_mode + params["ThroughputMode"] = throughput_mode if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput: - params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps + params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps if len(params) > 0: wait_for( lambda: self.get_file_system_state(name), self.STATE_AVAILABLE, - self.wait_timeout + self.wait_timeout, ) try: self.connection.update_file_system(FileSystemId=fs_id, **params) @@ -490,11 +497,11 @@ def update_lifecycle_policy(self, name, transition_to_ia): if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: fs_id = self.get_file_system_id(name) current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id) - if transition_to_ia == 'None': + if transition_to_ia == "None": LifecyclePolicies = [] else: - LifecyclePolicies = [{'TransitionToIA': 'AFTER_' + transition_to_ia + '_DAYS'}] - if current_policies.get('LifecyclePolicies') != LifecyclePolicies: + LifecyclePolicies = [{"TransitionToIA": "AFTER_" + transition_to_ia + "_DAYS"}] + if current_policies.get("LifecyclePolicies") != LifecyclePolicies: response = self.connection.put_lifecycle_configuration( FileSystemId=fs_id, LifecyclePolicies=LifecyclePolicies, @@ -504,20 +511,19 @@ def update_lifecycle_policy(self, name, transition_to_ia): def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps): """ - Change attributes (mount targets and tags) of filesystem by name + Change attributes (mount targets and tags) of filesystem by name """ result = False fs_id = self.get_file_system_id(name) if tags is not None: - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags + ) if tags_to_delete: try: - self.connection.delete_tags( - FileSystemId=fs_id, - TagKeys=tags_to_delete - ) + self.connection.delete_tags(FileSystemId=fs_id, TagKeys=tags_to_delete) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to delete tags.") @@ -526,8 +532,7 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, if tags_need_modify: try: self.connection.create_tags( - FileSystemId=fs_id, - Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) + FileSystemId=fs_id, Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to create tags.") @@ -538,54 +543,56 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, incomplete_states = [self.STATE_CREATING, self.STATE_DELETING] wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0 + 0, ) - current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id)) - targets = _index_by_key('SubnetId', targets) + current_targets = _index_by_key("SubnetId", self.get_mount_targets(FileSystemId=fs_id)) + targets = _index_by_key("SubnetId", targets) - targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, - targets, True) + targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, targets, True) # To modify mount target it should be deleted and created again - changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'], - current_targets[sid], targets[sid])] + changed = [ + sid + for sid in intersection + if not targets_equal( + ["SubnetId", "IpAddress", "NetworkInterfaceId"], current_targets[sid], targets[sid] + ) + ] targets_to_delete = list(targets_to_delete) + changed targets_to_create = list(targets_to_create) + changed if targets_to_delete: for sid in targets_to_delete: - self.connection.delete_mount_target( - MountTargetId=current_targets[sid]['MountTargetId'] - ) + self.connection.delete_mount_target(MountTargetId=current_targets[sid]["MountTargetId"]) wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0 + 0, ) result = True if targets_to_create: for sid in targets_to_create: - self.connection.create_mount_target( - FileSystemId=fs_id, - **targets[sid] - ) + self.connection.create_mount_target(FileSystemId=fs_id, **targets[sid]) wait_for( lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), 0, - self.wait_timeout + self.wait_timeout, ) result = True # If no security groups were passed into the module, then do not change it. - security_groups_to_update = [sid for sid in intersection if - 'SecurityGroups' in targets[sid] and - current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']] + security_groups_to_update = [ + sid + for sid in intersection + if "SecurityGroups" in targets[sid] + and current_targets[sid]["SecurityGroups"] != targets[sid]["SecurityGroups"] + ] if security_groups_to_update: for sid in security_groups_to_update: self.connection.modify_mount_target_security_groups( - MountTargetId=current_targets[sid]['MountTargetId'], - SecurityGroups=targets[sid].get('SecurityGroups', None) + MountTargetId=current_targets[sid]["MountTargetId"], + SecurityGroups=targets[sid].get("SecurityGroups", None), ) result = True @@ -593,14 +600,14 @@ def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, def delete_file_system(self, name, file_system_id=None): """ - Removes EFS instance by id/name + Removes EFS instance by id/name """ result = False state = self.get_file_system_state(name, file_system_id) if state in [self.STATE_CREATING, self.STATE_AVAILABLE]: wait_for( lambda: self.get_file_system_state(name), - self.STATE_AVAILABLE + self.STATE_AVAILABLE, ) if not file_system_id: file_system_id = self.get_file_system_id(name) @@ -612,27 +619,27 @@ def delete_file_system(self, name, file_system_id=None): wait_for( lambda: self.get_file_system_state(name), self.STATE_DELETED, - self.wait_timeout + self.wait_timeout, ) return result def delete_mount_targets(self, file_system_id): """ - Removes mount targets by EFS id + Removes mount targets by EFS id """ wait_for( lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)), - 0 + 0, ) targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE) for target in targets: - self.connection.delete_mount_target(MountTargetId=target['MountTargetId']) + self.connection.delete_mount_target(MountTargetId=target["MountTargetId"]) wait_for( lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)), - 0 + 0, ) return len(targets) > 0 @@ -640,7 +647,7 @@ def delete_mount_targets(self, file_system_id): def iterate_all(attr, map_method, **kwargs): """ - Method creates iterator from result set + Method creates iterator from result set """ args = dict((key, value) for (key, value) in kwargs.items() if value is not None) wait = 1 @@ -649,11 +656,11 @@ def iterate_all(attr, map_method, **kwargs): data = map_method(**args) for elm in data[attr]: yield elm - if 'NextMarker' in data: - args['Marker'] = data['Nextmarker'] + if "NextMarker" in data: + args["Marker"] = data["Nextmarker"] continue break - except is_boto3_error_code('ThrottlingException'): + except is_boto3_error_code("ThrottlingException"): if wait < 600: sleep(wait) wait = wait * 2 @@ -664,7 +671,7 @@ def iterate_all(attr, map_method, **kwargs): def targets_equal(keys, a, b): """ - Method compare two mount targets by specified attributes + Method compare two mount targets by specified attributes """ for key in keys: if key in b and a[key] != b[key]: @@ -675,7 +682,7 @@ def targets_equal(keys, a, b): def dict_diff(dict1, dict2, by_key=False): """ - Helper method to calculate difference of two dictionaries + Helper method to calculate difference of two dictionaries """ keys1 = set(dict1.keys() if by_key else dict1.items()) keys2 = set(dict2.keys() if by_key else dict2.items()) @@ -687,7 +694,7 @@ def dict_diff(dict1, dict2, by_key=False): def first_or_default(items, default=None): """ - Helper method to fetch first element of list (if exists) + Helper method to fetch first element of list (if exists) """ for item in items: return item @@ -696,13 +703,13 @@ def first_or_default(items, default=None): def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS): """ - Helper method to wait for desired value returned by callback method + Helper method to wait for desired value returned by callback method """ wait_start = timestamp() while True: if callback() != value: if timeout != 0 and (timestamp() - wait_start > timeout): - raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)') + raise RuntimeError("Wait timeout exceeded (" + str(timeout) + " sec)") else: sleep(5) continue @@ -711,67 +718,82 @@ def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS def main(): """ - Module action handler + Module action handler """ argument_spec = dict( encrypt=dict(required=False, type="bool", default=False), - state=dict(required=False, type='str', choices=["present", "absent"], default="present"), - kms_key_id=dict(required=False, type='str', default=None), - purge_tags=dict(default=True, type='bool'), - id=dict(required=False, type='str', default=None), - name=dict(required=False, type='str', default=None), - tags=dict(required=False, type="dict", aliases=['resource_tags']), - targets=dict(required=False, type="list", default=[], elements='dict'), - performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), - transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None), - throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None), - provisioned_throughput_in_mibps=dict(required=False, type='float'), + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + kms_key_id=dict(required=False, type="str", default=None), + purge_tags=dict(default=True, type="bool"), + id=dict(required=False, type="str", default=None), + name=dict(required=False, type="str", default=None), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + targets=dict(required=False, type="list", default=[], elements="dict"), + performance_mode=dict( + required=False, type="str", choices=["general_purpose", "max_io"], default="general_purpose" + ), + transition_to_ia=dict(required=False, type="str", choices=["None", "7", "14", "30", "60", "90"], default=None), + throughput_mode=dict(required=False, type="str", choices=["bursting", "provisioned"], default=None), + provisioned_throughput_in_mibps=dict(required=False, type="float"), wait=dict(required=False, type="bool", default=False), - wait_timeout=dict(required=False, type="int", default=0) + wait_timeout=dict(required=False, type="int", default=0), ) module = AnsibleAWSModule(argument_spec=argument_spec) connection = EFSConnection(module) - name = module.params.get('name') - fs_id = module.params.get('id') - tags = module.params.get('tags') + name = module.params.get("name") + fs_id = module.params.get("id") + tags = module.params.get("tags") target_translations = { - 'ip_address': 'IpAddress', - 'security_groups': 'SecurityGroups', - 'subnet_id': 'SubnetId' + "ip_address": "IpAddress", + "security_groups": "SecurityGroups", + "subnet_id": "SubnetId", } - targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')] + targets = [ + dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get("targets") + ] performance_mode_translations = { - 'general_purpose': 'generalPurpose', - 'max_io': 'maxIO' + "general_purpose": "generalPurpose", + "max_io": "maxIO", } - encrypt = module.params.get('encrypt') - kms_key_id = module.params.get('kms_key_id') - performance_mode = performance_mode_translations[module.params.get('performance_mode')] - purge_tags = module.params.get('purge_tags') - transition_to_ia = module.params.get('transition_to_ia') - throughput_mode = module.params.get('throughput_mode') - provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps') - state = str(module.params.get('state')).lower() + encrypt = module.params.get("encrypt") + kms_key_id = module.params.get("kms_key_id") + performance_mode = performance_mode_translations[module.params.get("performance_mode")] + purge_tags = module.params.get("purge_tags") + transition_to_ia = module.params.get("transition_to_ia") + throughput_mode = module.params.get("throughput_mode") + provisioned_throughput_in_mibps = module.params.get("provisioned_throughput_in_mibps") + state = str(module.params.get("state")).lower() changed = False - if state == 'present': + if state == "present": if not name: - module.fail_json(msg='Name parameter is required for create') + module.fail_json(msg="Name parameter is required for create") - changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps) + changed = connection.create_file_system( + name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps + ) changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed - changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets, - throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed + changed = ( + connection.converge_file_system( + name=name, + tags=tags, + purge_tags=purge_tags, + targets=targets, + throughput_mode=throughput_mode, + provisioned_throughput_in_mibps=provisioned_throughput_in_mibps, + ) + or changed + ) if transition_to_ia: changed |= connection.update_lifecycle_policy(name, transition_to_ia) result = first_or_default(connection.get_file_systems(CreationToken=name)) - elif state == 'absent': + elif state == "absent": if not name and not fs_id: - module.fail_json(msg='Either name or id parameter is required for delete') + module.fail_json(msg="Either name or id parameter is required for delete") changed = connection.delete_file_system(name, fs_id) result = None @@ -780,5 +802,5 @@ def main(): module.exit_json(changed=changed, efs=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/efs_info.py b/efs_info.py index e73042555bb..533af10d84d 100644 --- a/efs_info.py +++ b/efs_info.py @@ -185,84 +185,86 @@ class EFSConnection(object): - STATE_CREATING = 'creating' - STATE_AVAILABLE = 'available' - STATE_DELETING = 'deleting' - STATE_DELETED = 'deleted' + STATE_CREATING = "creating" + STATE_AVAILABLE = "available" + STATE_DELETING = "deleting" + STATE_DELETED = "deleted" def __init__(self, module): try: - self.connection = module.client('efs') + self.connection = module.client("efs") self.module = module except Exception as e: module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) self.region = module.region - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def list_file_systems(self, **kwargs): """ Returns generator of file systems including all attributes of FS """ - paginator = self.connection.get_paginator('describe_file_systems') - return paginator.paginate(**kwargs).build_full_result()['FileSystems'] + paginator = self.connection.get_paginator("describe_file_systems") + return paginator.paginate(**kwargs).build_full_result()["FileSystems"] - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_tags(self, file_system_id): """ Returns tag list for selected instance of EFS """ - paginator = self.connection.get_paginator('describe_tags') - return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags']) + paginator = self.connection.get_paginator("describe_tags") + return boto3_tag_list_to_ansible_dict( + paginator.paginate(FileSystemId=file_system_id).build_full_result()["Tags"] + ) - @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_mount_targets(self, file_system_id): """ Returns mount targets for selected instance of EFS """ - paginator = self.connection.get_paginator('describe_mount_targets') - return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets'] + paginator = self.connection.get_paginator("describe_mount_targets") + return paginator.paginate(FileSystemId=file_system_id).build_full_result()["MountTargets"] - @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException']) + @AWSRetry.jittered_backoff(catch_extra_error_codes=["ThrottlingException"]) def get_security_groups(self, mount_target_id): """ Returns security groups for selected instance of EFS """ - return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups'] + return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)["SecurityGroups"] def get_mount_targets_data(self, file_systems): for item in file_systems: - if item['life_cycle_state'] == self.STATE_AVAILABLE: + if item["life_cycle_state"] == self.STATE_AVAILABLE: try: - mount_targets = self.get_mount_targets(item['file_system_id']) + mount_targets = self.get_mount_targets(item["file_system_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS targets") for mt in mount_targets: - item['mount_targets'].append(camel_dict_to_snake_dict(mt)) + item["mount_targets"].append(camel_dict_to_snake_dict(mt)) return file_systems def get_security_groups_data(self, file_systems): for item in file_systems: - if item['life_cycle_state'] == self.STATE_AVAILABLE: - for target in item['mount_targets']: - if target['life_cycle_state'] == self.STATE_AVAILABLE: + if item["life_cycle_state"] == self.STATE_AVAILABLE: + for target in item["mount_targets"]: + if target["life_cycle_state"] == self.STATE_AVAILABLE: try: - target['security_groups'] = self.get_security_groups(target['mount_target_id']) + target["security_groups"] = self.get_security_groups(target["mount_target_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS security groups") else: - target['security_groups'] = [] + target["security_groups"] = [] else: - item['tags'] = {} - item['mount_targets'] = [] + item["tags"] = {} + item["mount_targets"] = [] return file_systems def get_file_systems(self, file_system_id=None, creation_token=None): kwargs = dict() if file_system_id: - kwargs['FileSystemId'] = file_system_id + kwargs["FileSystemId"] = file_system_id if creation_token: - kwargs['CreationToken'] = creation_token + kwargs["CreationToken"] = creation_token try: file_systems = self.list_file_systems(**kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -270,7 +272,7 @@ def get_file_systems(self, file_system_id=None, creation_token=None): results = list() for item in file_systems: - item['CreationTime'] = str(item['CreationTime']) + item["CreationTime"] = str(item["CreationTime"]) """ In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose @@ -278,18 +280,18 @@ def get_file_systems(self, file_system_id=None, creation_token=None): AWS documentation is available here: U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) """ - item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) - item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) - if 'Timestamp' in item['SizeInBytes']: - item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + if "Timestamp" in item["SizeInBytes"]: + item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) result = camel_dict_to_snake_dict(item) - result['tags'] = {} - result['mount_targets'] = [] + result["tags"] = {} + result["mount_targets"] = [] # Set tags *after* doing camel to snake - if result['life_cycle_state'] == self.STATE_AVAILABLE: + if result["life_cycle_state"] == self.STATE_AVAILABLE: try: - result['tags'] = self.get_tags(result['file_system_id']) + result["tags"] = self.get_tags(result["file_system_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get EFS tags") results.append(result) @@ -301,13 +303,14 @@ def prefix_to_attr(attr_id): Helper method to convert ID prefix to mount target attribute """ attr_by_prefix = { - 'fsmt-': 'mount_target_id', - 'subnet-': 'subnet_id', - 'eni-': 'network_interface_id', - 'sg-': 'security_groups' + "fsmt-": "mount_target_id", + "subnet-": "subnet_id", + "eni-": "network_interface_id", + "sg-": "security_groups", } - return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items() - if str(attr_id).startswith(prefix)], 'ip_address') + return first_or_default( + [attr_name for (prefix, attr_name) in attr_by_prefix.items() if str(attr_id).startswith(prefix)], "ip_address" + ) def first_or_default(items, default=None): @@ -334,7 +337,7 @@ def has_targets(available, required): Helper method to determine if mount target requested already exists """ grouped = group_list_of_dict(available) - for (value, field) in required: + for value, field in required: if field not in grouped or value not in grouped[field]: return False return True @@ -357,35 +360,34 @@ def main(): """ argument_spec = dict( id=dict(), - name=dict(aliases=['creation_token']), + name=dict(aliases=["creation_token"]), tags=dict(type="dict", default={}), - targets=dict(type="list", default=[], elements='str') + targets=dict(type="list", default=[], elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) connection = EFSConnection(module) - name = module.params.get('name') - fs_id = module.params.get('id') - tags = module.params.get('tags') - targets = module.params.get('targets') + name = module.params.get("name") + fs_id = module.params.get("id") + tags = module.params.get("tags") + targets = module.params.get("targets") file_systems_info = connection.get_file_systems(fs_id, name) if tags: - file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)] + file_systems_info = [item for item in file_systems_info if has_tags(item["tags"], tags)] file_systems_info = connection.get_mount_targets_data(file_systems_info) file_systems_info = connection.get_security_groups_data(file_systems_info) if targets: targets = [(item, prefix_to_attr(item)) for item in targets] - file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)] + file_systems_info = [item for item in file_systems_info if has_targets(item["mount_targets"], targets)] module.exit_json(changed=False, efs=file_systems_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/efs_tag.py b/efs_tag.py index 10978c5bf2f..80eb5cc7b9c 100644 --- a/efs_tag.py +++ b/efs_tag.py @@ -112,35 +112,35 @@ def get_tags(efs, module, resource): - ''' + """ Get resource tags - ''' + """ try: - return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)['Tags']) + return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"]) except (BotoCoreError, ClientError) as get_tags_error: - module.fail_json_aws(get_tags_error, msg='Failed to fetch tags for resource {0}'.format(resource)) + module.fail_json_aws(get_tags_error, msg="Failed to fetch tags for resource {0}".format(resource)) def main(): - ''' + """ MAIN - ''' + """ argument_spec = dict( resource=dict(required=True), - tags=dict(type='dict', required=True, aliases=['resource_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']) + tags=dict(type="dict", required=True, aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - efs = module.client('efs', retry_decorator=AWSRetry.jittered_backoff()) + efs = module.client("efs", retry_decorator=AWSRetry.jittered_backoff()) current_tags = get_tags(efs, module, resource) @@ -148,7 +148,7 @@ def main(): remove_tags = {} - if state == 'absent': + if state == "absent": for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] @@ -157,28 +157,32 @@ def main(): remove_tags[key] = current_tags[key] if remove_tags: - result['changed'] = True - result['removed_tags'] = remove_tags + result["changed"] = True + result["removed_tags"] = remove_tags if not module.check_mode: try: efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as remove_tag_error: - module.fail_json_aws(remove_tag_error, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) + module.fail_json_aws( + remove_tag_error, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource) + ) - if state == 'present' and add_tags: - result['changed'] = True - result['added_tags'] = add_tags + if state == "present" and add_tags: + result["changed"] = True + result["added_tags"] = add_tags current_tags.update(add_tags) if not module.check_mode: try: tags = ansible_dict_to_boto3_tag_list(add_tags) efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws(set_tag_error, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) + module.fail_json_aws( + set_tag_error, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource) + ) - result['tags'] = get_tags(efs, module, resource) + result["tags"] = get_tags(efs, module, resource) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/eks_cluster.py b/eks_cluster.py index 699c74bdb5b..13ea5997d4d 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -177,28 +177,28 @@ def ensure_present(client, module): - name = module.params.get('name') - subnets = module.params['subnets'] - groups = module.params['security_groups'] - wait = module.params.get('wait') + name = module.params.get("name") + subnets = module.params["subnets"] + groups = module.params["security_groups"] + wait = module.params.get("wait") cluster = get_cluster(client, module) try: - ec2 = module.client('ec2') - vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId'] + ec2 = module.client("ec2") + vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])["Subnets"][0]["VpcId"] groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't lookup security groups") if cluster: - if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets): + if set(cluster["resourcesVpcConfig"]["subnetIds"]) != set(subnets): module.fail_json(msg="Cannot modify subnets of existing cluster") - if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups): + if set(cluster["resourcesVpcConfig"]["securityGroupIds"]) != set(groups): module.fail_json(msg="Cannot modify security groups of existing cluster") - if module.params.get('version') and module.params.get('version') != cluster['version']: + if module.params.get("version") and module.params.get("version") != cluster["version"]: module.fail_json(msg="Cannot modify version of existing cluster") if wait: - wait_until(client, module, 'cluster_active') + wait_until(client, module, "cluster_active") # Ensure that fields that are only available for active clusters are # included in the returned value cluster = get_cluster(client, module) @@ -208,24 +208,23 @@ def ensure_present(client, module): if module.check_mode: module.exit_json(changed=True) try: - params = dict(name=name, - roleArn=module.params['role_arn'], - resourcesVpcConfig=dict( - subnetIds=subnets, - securityGroupIds=groups), - ) - if module.params['version']: - params['version'] = module.params['version'] - if module.params['tags']: - params['tags'] = module.params['tags'] - cluster = client.create_cluster(**params)['cluster'] + params = dict( + name=name, + roleArn=module.params["role_arn"], + resourcesVpcConfig=dict(subnetIds=subnets, securityGroupIds=groups), + ) + if module.params["version"]: + params["version"] = module.params["version"] + if module.params["tags"]: + params["tags"] = module.params["tags"] + cluster = client.create_cluster(**params)["cluster"] except botocore.exceptions.EndpointConnectionError as e: module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create cluster %s" % name) if wait: - wait_until(client, module, 'cluster_active') + wait_until(client, module, "cluster_active") # Ensure that fields that are only available for active clusters are # included in the returned value cluster = get_cluster(client, module) @@ -234,44 +233,47 @@ def ensure_present(client, module): def ensure_absent(client, module): - name = module.params.get('name') + name = module.params.get("name") existing = get_cluster(client, module) - wait = module.params.get('wait') + wait = module.params.get("wait") if not existing: module.exit_json(changed=False) if not module.check_mode: try: - client.delete_cluster(name=module.params['name']) + client.delete_cluster(name=module.params["name"]) except botocore.exceptions.EndpointConnectionError as e: module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name) if wait: - wait_until(client, module, 'cluster_deleted') + wait_until(client, module, "cluster_deleted") module.exit_json(changed=True) def get_cluster(client, module): - name = module.params.get('name') + name = module.params.get("name") try: - return client.describe_cluster(name=name)['cluster'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_cluster(name=name)["cluster"] + except is_boto3_error_code("ResourceNotFoundException"): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get cluster %s" % name) -def wait_until(client, module, waiter_name='cluster_active'): - name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') +def wait_until(client, module, waiter_name="cluster_active"): + name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) - waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(name=name, WaiterConfig={"MaxAttempts": attempts}) def main(): @@ -279,27 +281,27 @@ def main(): name=dict(required=True), version=dict(), role_arn=dict(), - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - state=dict(choices=['absent', 'present'], default='present'), - tags=dict(type='dict', required=False), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(type="dict", required=False), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]], + required_if=[["state", "present", ["role_arn", "subnets", "security_groups"]]], supports_check_mode=True, ) - client = module.client('eks') + client = module.client("eks") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": ensure_present(client, module) else: ensure_absent(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index abc4dd09f90..71a632a2223 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -180,58 +180,58 @@ def validate_tags(client, module, fargate_profile): changed = False - desired_tags = module.params.get('tags') + desired_tags = module.params.get("tags") if desired_tags is None: return False try: - existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to list or compare tags for Fargate Profile %s" % module.params.get("name")) if tags_to_remove: changed = True if not module.check_mode: try: - client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove) + client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) if tags_to_add: changed = True if not module.check_mode: try: - client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add) + client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) return changed def create_or_update_fargate_profile(client, module): - name = module.params.get('name') - subnets = module.params['subnets'] - role_arn = module.params['role_arn'] - cluster_name = module.params['cluster_name'] - selectors = module.params['selectors'] - tags = module.params['tags'] or {} - wait = module.params.get('wait') + name = module.params.get("name") + subnets = module.params["subnets"] + role_arn = module.params["role_arn"] + cluster_name = module.params["cluster_name"] + selectors = module.params["selectors"] + tags = module.params["tags"] or {} + wait = module.params.get("wait") fargate_profile = get_fargate_profile(client, module, name, cluster_name) if fargate_profile: changed = False - if set(fargate_profile['podExecutionRoleArn']) != set(role_arn): + if set(fargate_profile["podExecutionRoleArn"]) != set(role_arn): module.fail_json(msg="Cannot modify Execution Role") - if set(fargate_profile['subnets']) != set(subnets): + if set(fargate_profile["subnets"]) != set(subnets): module.fail_json(msg="Cannot modify Subnets") - if fargate_profile['selectors'] != selectors: + if fargate_profile["selectors"] != selectors: module.fail_json(msg="Cannot modify Selectors") changed = validate_tags(client, module, fargate_profile) if wait: - wait_until(client, module, 'fargate_profile_active', name, cluster_name) + wait_until(client, module, "fargate_profile_active", name, cluster_name) fargate_profile = get_fargate_profile(client, module, name, cluster_name) module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile)) @@ -242,29 +242,30 @@ def create_or_update_fargate_profile(client, module): check_profiles_status(client, module, cluster_name) try: - params = dict(fargateProfileName=name, - podExecutionRoleArn=role_arn, - subnets=subnets, - clusterName=cluster_name, - selectors=selectors, - tags=tags - ) + params = dict( + fargateProfileName=name, + podExecutionRoleArn=role_arn, + subnets=subnets, + clusterName=cluster_name, + selectors=selectors, + tags=tags, + ) fargate_profile = client.create_fargate_profile(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name) if wait: - wait_until(client, module, 'fargate_profile_active', name, cluster_name) + wait_until(client, module, "fargate_profile_active", name, cluster_name) fargate_profile = get_fargate_profile(client, module, name, cluster_name) module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile)) def delete_fargate_profile(client, module): - name = module.params.get('name') - cluster_name = module.params['cluster_name'] + name = module.params.get("name") + cluster_name = module.params["cluster_name"] existing = get_fargate_profile(client, module, name, cluster_name) - wait = module.params.get('wait') + wait = module.params.get("wait") if not existing or existing["status"] == "DELETING": module.exit_json(changed=False) @@ -276,17 +277,20 @@ def delete_fargate_profile(client, module): module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name) if wait: - wait_until(client, module, 'fargate_profile_deleted', name, cluster_name) + wait_until(client, module, "fargate_profile_deleted", name, cluster_name) module.exit_json(changed=True) def get_fargate_profile(client, module, name, cluster_name): try: - return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)['fargateProfile'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)["fargateProfile"] + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get fargate profile") @@ -297,20 +301,24 @@ def check_profiles_status(client, module, cluster_name): for name in list_profiles["fargateProfileNames"]: fargate_profile = get_fargate_profile(client, module, name, cluster_name) - if fargate_profile["status"] == 'CREATING': - wait_until(client, module, 'fargate_profile_active', fargate_profile["fargateProfileName"], cluster_name) - elif fargate_profile["status"] == 'DELETING': - wait_until(client, module, 'fargate_profile_deleted', fargate_profile["fargateProfileName"], cluster_name) + if fargate_profile["status"] == "CREATING": + wait_until( + client, module, "fargate_profile_active", fargate_profile["fargateProfileName"], cluster_name + ) + elif fargate_profile["status"] == "DELETING": + wait_until( + client, module, "fargate_profile_deleted", fargate_profile["fargateProfileName"], cluster_name + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't not find EKS cluster") def wait_until(client, module, waiter_name, name, cluster_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) try: - waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="An error occurred waiting") @@ -320,34 +328,38 @@ def main(): name=dict(required=True), cluster_name=dict(required=True), role_arn=dict(), - subnets=dict(type='list', elements='str'), - selectors=dict(type='list', elements='dict', options=dict( - namespace=dict(type='str'), - labels=dict(type='dict', default={}) - )), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['absent', 'present'], default='present'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + subnets=dict(type="list", elements="str"), + selectors=dict( + type="list", + elements="dict", + options=dict( + namespace=dict(type="str"), + labels=dict(type="dict", default={}), + ), + ), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["absent", "present"], default="present"), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['role_arn', 'subnets', 'selectors']]], + required_if=[["state", "present", ["role_arn", "subnets", "selectors"]]], supports_check_mode=True, ) try: - client = module.client('eks') + client = module.client("eks") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't connect to AWS") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": create_or_update_fargate_profile(client, module) else: delete_fargate_profile(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/eks_nodegroup.py b/eks_nodegroup.py index f6ce192c96c..6704af1af09 100644 --- a/eks_nodegroup.py +++ b/eks_nodegroup.py @@ -362,29 +362,29 @@ def validate_tags(client, module, nodegroup): changed = False - desired_tags = module.params.get('tags') + desired_tags = module.params.get("tags") if desired_tags is None: return False try: - existing_tags = client.list_tags_for_resource(resourceArn=nodegroup['nodegroupArn'])['tags'] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) + existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"] + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to list or compare tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to list or compare tags for Nodegroup %s." % module.params.get("name")) if tags_to_remove: if not module.check_mode: changed = True try: - client.untag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tagKeys=tags_to_remove) + client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) if tags_to_add: if not module.check_mode: changed = True try: - client.tag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tags=tags_to_add) + client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) return changed @@ -405,24 +405,24 @@ def compare_taints(nodegroup_taints, param_taints): def validate_taints(client, module, nodegroup, param_taints): changed = False params = dict() - params['clusterName'] = nodegroup['clusterName'] - params['nodegroupName'] = nodegroup['nodegroupName'] - params['taints'] = [] - if 'taints' not in nodegroup: - nodegroup['taints'] = [] - taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup['taints'], param_taints) + params["clusterName"] = nodegroup["clusterName"] + params["nodegroupName"] = nodegroup["nodegroupName"] + params["taints"] = [] + if "taints" not in nodegroup: + nodegroup["taints"] = [] + taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup["taints"], param_taints) if taints_to_add_or_update: - params['taints']['addOrUpdateTaints'] = taints_to_add_or_update + params["taints"]["addOrUpdateTaints"] = taints_to_add_or_update if taints_to_unset: - params['taints']['removeTaints'] = taints_to_unset - if params['taints']: + params["taints"]["removeTaints"] = taints_to_unset + if params["taints"]: if not module.check_mode: changed = True try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set taints for Nodegroup %s.' % params['nodegroupName']) + module.fail_json_aws(e, msg="Unable to set taints for Nodegroup %s." % params["nodegroupName"]) return changed @@ -443,109 +443,114 @@ def compare_labels(nodegroup_labels, param_labels): def validate_labels(client, module, nodegroup, param_labels): changed = False params = dict() - params['clusterName'] = nodegroup['clusterName'] - params['nodegroupName'] = nodegroup['nodegroupName'] - params['labels'] = {} - labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup['labels'], param_labels) + params["clusterName"] = nodegroup["clusterName"] + params["nodegroupName"] = nodegroup["nodegroupName"] + params["labels"] = {} + labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup["labels"], param_labels) if labels_to_add_or_update: - params['labels']['addOrUpdateLabels'] = labels_to_add_or_update + params["labels"]["addOrUpdateLabels"] = labels_to_add_or_update if labels_to_unset: - params['labels']['removeLabels'] = labels_to_unset - if params['labels']: + params["labels"]["removeLabels"] = labels_to_unset + if params["labels"]: if not module.check_mode: changed = True try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set labels for Nodegroup %s.' % params['nodegroupName']) + module.fail_json_aws(e, msg="Unable to set labels for Nodegroup %s." % params["nodegroupName"]) return changed def compare_params(module, params, nodegroup): - for param in ['nodeRole', 'subnets', 'diskSize', 'instanceTypes', 'amiTypes', 'remoteAccess', 'capacityType']: + for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]: if (param in nodegroup) and (param in params): - if (nodegroup[param] != params[param]): + if nodegroup[param] != params[param]: module.fail_json(msg="Cannot modify parameter %s." % param) - if ('launchTemplate' not in nodegroup) and ('launchTemplate' in params): + if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params): module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") - if nodegroup['updateConfig'] != params['updateConfig']: + if nodegroup["updateConfig"] != params["updateConfig"]: return True - if nodegroup['scalingConfig'] != params['scalingConfig']: + if nodegroup["scalingConfig"] != params["scalingConfig"]: return True return False def compare_params_launch_template(module, params, nodegroup): - if 'launchTemplate' not in params: + if "launchTemplate" not in params: module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.") else: - for key in ['name', 'id']: - if (key in params['launchTemplate']) and (params['launchTemplate'][key] != nodegroup['launchTemplate'][key]): + for key in ["name", "id"]: + if (key in params["launchTemplate"]) and ( + params["launchTemplate"][key] != nodegroup["launchTemplate"][key] + ): module.fail_json(msg="Cannot modify Launch Template %s." % key) - if ('version' in params['launchTemplate']) and (params['launchTemplate']['version'] != nodegroup['launchTemplate']['version']): + if ("version" in params["launchTemplate"]) and ( + params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"] + ): return True return False def create_or_update_nodegroups(client, module): - changed = False params = dict() - params['nodegroupName'] = module.params['name'] - params['clusterName'] = module.params['cluster_name'] - params['nodeRole'] = module.params['node_role'] - params['subnets'] = module.params['subnets'] - params['tags'] = module.params['tags'] or {} - if module.params['ami_type'] is not None: - params['amiType'] = module.params['ami_type'] - if module.params['disk_size'] is not None: - params['diskSize'] = module.params['disk_size'] - if module.params['instance_types'] is not None: - params['instanceTypes'] = module.params['instance_types'] - if module.params['launch_template'] is not None: - params['launchTemplate'] = dict() - if module.params['launch_template']['id'] is not None: - params['launchTemplate']['id'] = module.params['launch_template']['id'] - if module.params['launch_template']['version'] is not None: - params['launchTemplate']['version'] = module.params['launch_template']['version'] - if module.params['launch_template']['name'] is not None: - params['launchTemplate']['name'] = module.params['launch_template']['name'] - if module.params['release_version'] is not None: - params['releaseVersion'] = module.params['release_version'] - if module.params['remote_access'] is not None: - params['remoteAccess'] = dict() - if module.params['remote_access']['ec2_ssh_key'] is not None: - params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key'] - if module.params['remote_access']['source_sg'] is not None: - params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg'] - if module.params['capacity_type'] is not None: - params['capacityType'] = module.params['capacity_type'].upper() - if module.params['labels'] is not None: - params['labels'] = module.params['labels'] - if module.params['taints'] is not None: - params['taints'] = module.params['taints'] - if module.params['update_config'] is not None: - params['updateConfig'] = dict() - if module.params['update_config']['max_unavailable'] is not None: - params['updateConfig']['maxUnavailable'] = module.params['update_config']['max_unavailable'] - if module.params['update_config']['max_unavailable_percentage'] is not None: - params['updateConfig']['maxUnavailablePercentage'] = module.params['update_config']['max_unavailable_percentage'] - if module.params['scaling_config'] is not None: - params['scalingConfig'] = snake_dict_to_camel_dict(module.params['scaling_config']) - - wait = module.params.get('wait') - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + params["nodegroupName"] = module.params["name"] + params["clusterName"] = module.params["cluster_name"] + params["nodeRole"] = module.params["node_role"] + params["subnets"] = module.params["subnets"] + params["tags"] = module.params["tags"] or {} + if module.params["ami_type"] is not None: + params["amiType"] = module.params["ami_type"] + if module.params["disk_size"] is not None: + params["diskSize"] = module.params["disk_size"] + if module.params["instance_types"] is not None: + params["instanceTypes"] = module.params["instance_types"] + if module.params["launch_template"] is not None: + params["launchTemplate"] = dict() + if module.params["launch_template"]["id"] is not None: + params["launchTemplate"]["id"] = module.params["launch_template"]["id"] + if module.params["launch_template"]["version"] is not None: + params["launchTemplate"]["version"] = module.params["launch_template"]["version"] + if module.params["launch_template"]["name"] is not None: + params["launchTemplate"]["name"] = module.params["launch_template"]["name"] + if module.params["release_version"] is not None: + params["releaseVersion"] = module.params["release_version"] + if module.params["remote_access"] is not None: + params["remoteAccess"] = dict() + if module.params["remote_access"]["ec2_ssh_key"] is not None: + params["remoteAccess"]["ec2SshKey"] = module.params["remote_access"]["ec2_ssh_key"] + if module.params["remote_access"]["source_sg"] is not None: + params["remoteAccess"]["sourceSecurityGroups"] = module.params["remote_access"]["source_sg"] + if module.params["capacity_type"] is not None: + params["capacityType"] = module.params["capacity_type"].upper() + if module.params["labels"] is not None: + params["labels"] = module.params["labels"] + if module.params["taints"] is not None: + params["taints"] = module.params["taints"] + if module.params["update_config"] is not None: + params["updateConfig"] = dict() + if module.params["update_config"]["max_unavailable"] is not None: + params["updateConfig"]["maxUnavailable"] = module.params["update_config"]["max_unavailable"] + if module.params["update_config"]["max_unavailable_percentage"] is not None: + params["updateConfig"]["maxUnavailablePercentage"] = module.params["update_config"][ + "max_unavailable_percentage" + ] + if module.params["scaling_config"] is not None: + params["scalingConfig"] = snake_dict_to_camel_dict(module.params["scaling_config"]) + + wait = module.params.get("wait") + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) if nodegroup: update_params = dict() - update_params['clusterName'] = params['clusterName'] - update_params['nodegroupName'] = params['nodegroupName'] + update_params["clusterName"] = params["clusterName"] + update_params["nodegroupName"] = params["nodegroupName"] - if 'launchTemplate' in nodegroup: + if "launchTemplate" in nodegroup: if compare_params_launch_template(module, params, nodegroup): - update_params['launchTemplate'] = params['launchTemplate'] + update_params["launchTemplate"] = params["launchTemplate"] if not module.check_mode: try: client.update_nodegroup_version(**update_params) @@ -555,10 +560,10 @@ def create_or_update_nodegroups(client, module): if compare_params(module, params, nodegroup): try: - if 'launchTemplate' in update_params: - update_params.pop('launchTemplate') - update_params['scalingConfig'] = params['scalingConfig'] - update_params['updateConfig'] = params['updateConfig'] + if "launchTemplate" in update_params: + update_params.pop("launchTemplate") + update_params["scalingConfig"] = params["scalingConfig"] + update_params["updateConfig"] = params["updateConfig"] if not module.check_mode: client.update_nodegroup_config(**update_params) @@ -570,15 +575,15 @@ def create_or_update_nodegroups(client, module): changed |= validate_tags(client, module, nodegroup) - changed |= validate_labels(client, module, nodegroup, params['labels']) + changed |= validate_labels(client, module, nodegroup, params["labels"]) - if 'taints' in nodegroup: - changed |= validate_taints(client, module, nodegroup, params['taints']) + if "taints" in nodegroup: + changed |= validate_taints(client, module, nodegroup, params["taints"]) if wait: - wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) + wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup)) @@ -588,22 +593,22 @@ def create_or_update_nodegroups(client, module): try: nodegroup = client.create_nodegroup(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params['nodegroupName']) + module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params["nodegroupName"]) if wait: - wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName']) - nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName']) + wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) + nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup)) def delete_nodegroups(client, module): - name = module.params.get('name') - clusterName = module.params['cluster_name'] + name = module.params.get("name") + clusterName = module.params["cluster_name"] existing = get_nodegroup(client, module, name, clusterName) - wait = module.params.get('wait') - if not existing or existing['status'] == 'DELETING': - module.exit_json(changed=False, msg='Nodegroup not exists or in DELETING status.') + wait = module.params.get("wait") + if not existing or existing["status"] == "DELETING": + module.exit_json(changed=False, msg="Nodegroup not exists or in DELETING status.") if not module.check_mode: try: client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) @@ -611,104 +616,138 @@ def delete_nodegroups(client, module): module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name) if wait: - wait_until(client, module, 'nodegroup_deleted', name, clusterName) + wait_until(client, module, "nodegroup_deleted", name, clusterName) module.exit_json(changed=True) def get_nodegroup(client, module, nodegroup_name, cluster_name): try: - return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)['nodegroup'] - except is_boto3_error_code('ResourceNotFoundException'): + return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)["nodegroup"] + except is_boto3_error_code("ResourceNotFoundException"): return None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name) def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) try: - waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={'MaxAttempts': attempts}) + waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={"MaxAttempts": attempts}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="An error occurred waiting") def main(): argument_spec = dict( - name=dict(type='str', required=True), - cluster_name=dict(type='str', required=True), + name=dict(type="str", required=True), + cluster_name=dict(type="str", required=True), node_role=dict(), - subnets=dict(type='list', elements='str'), - scaling_config=dict(type='dict', default={'min_size': 1, 'max_size': 2, 'desired_size': 1}, options=dict( - min_size=dict(type='int'), - max_size=dict(type='int'), - desired_size=dict(type='int') - )), - disk_size=dict(type='int'), - instance_types=dict(type='list', elements='str'), - ami_type=dict(choices=['AL2_x86_64', 'AL2_x86_64_GPU', 'AL2_ARM_64', 'CUSTOM', 'BOTTLEROCKET_ARM_64', 'BOTTLEROCKET_x86_64']), - remote_access=dict(type='dict', options=dict( - ec2_ssh_key=dict(no_log=True), - source_sg=dict(type='list', elements='str') - )), - update_config=dict(type='dict', default={'max_unavailable': 1}, options=dict( - max_unavailable=dict(type='int'), - max_unavailable_percentage=dict(type='int') - )), - labels=dict(type='dict', default={}), - taints=dict(type='list', elements='dict', default=[], options=dict( - key=dict(type='str', no_log=False,), - value=dict(type='str'), - effect=dict(type='str', choices=['NO_SCHEDULE', 'NO_EXECUTE', 'PREFER_NO_SCHEDULE']) - )), - launch_template=dict(type='dict', options=dict( - name=dict(type='str'), - version=dict(type='str'), - id=dict(type='str') - )), - capacity_type=dict(choices=['ON_DEMAND', 'SPOT'], default='ON_DEMAND'), + subnets=dict(type="list", elements="str"), + scaling_config=dict( + type="dict", + default={"min_size": 1, "max_size": 2, "desired_size": 1}, + options=dict( + min_size=dict(type="int"), + max_size=dict(type="int"), + desired_size=dict(type="int"), + ), + ), + disk_size=dict(type="int"), + instance_types=dict(type="list", elements="str"), + ami_type=dict( + choices=[ + "AL2_x86_64", + "AL2_x86_64_GPU", + "AL2_ARM_64", + "CUSTOM", + "BOTTLEROCKET_ARM_64", + "BOTTLEROCKET_x86_64", + ] + ), + remote_access=dict( + type="dict", + options=dict( + ec2_ssh_key=dict(no_log=True), + source_sg=dict(type="list", elements="str"), + ), + ), + update_config=dict( + type="dict", + default={"max_unavailable": 1}, + options=dict( + max_unavailable=dict(type="int"), + max_unavailable_percentage=dict(type="int"), + ), + ), + labels=dict(type="dict", default={}), + taints=dict( + type="list", + elements="dict", + default=[], + options=dict( + key=dict( + type="str", + no_log=False, + ), + value=dict(type="str"), + effect=dict(type="str", choices=["NO_SCHEDULE", "NO_EXECUTE", "PREFER_NO_SCHEDULE"]), + ), + ), + launch_template=dict( + type="dict", + options=dict( + name=dict(type="str"), + version=dict(type="str"), + id=dict(type="str"), + ), + ), + capacity_type=dict(choices=["ON_DEMAND", "SPOT"], default="ON_DEMAND"), release_version=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['absent', 'present'], default='present'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=1200, type='int') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["absent", "present"], default="present"), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=1200, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['node_role', 'subnets']]], + required_if=[["state", "present", ["node_role", "subnets"]]], mutually_exclusive=[ - ('launch_template', 'instance_types'), - ('launch_template', 'disk_size'), - ('launch_template', 'remote_access'), - ('launch_template', 'ami_type') + ("launch_template", "instance_types"), + ("launch_template", "disk_size"), + ("launch_template", "remote_access"), + ("launch_template", "ami_type"), ], supports_check_mode=True, ) - if module.params['launch_template'] is None: - if module.params['disk_size'] is None: - module.params['disk_size'] = 20 - if module.params['ami_type'] is None: - module.params['ami_type'] = "AL2_x86_64" - if module.params['instance_types'] is None: - module.params['instance_types'] = ["t3.medium"] + if module.params["launch_template"] is None: + if module.params["disk_size"] is None: + module.params["disk_size"] = 20 + if module.params["ami_type"] is None: + module.params["ami_type"] = "AL2_x86_64" + if module.params["instance_types"] is None: + module.params["instance_types"] = ["t3.medium"] else: - if (module.params['launch_template']['id'] is None) and (module.params['launch_template']['name'] is None): - module.exit_json(changed=False, msg='To use launch_template, it is necessary to inform the id or name.') + if (module.params["launch_template"]["id"] is None) and (module.params["launch_template"]["name"] is None): + module.exit_json(changed=False, msg="To use launch_template, it is necessary to inform the id or name.") try: - client = module.client('eks') + client = module.client("eks") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't connect to AWS.") - if module.params.get('state') == 'present': + if module.params.get("state") == "present": create_or_update_nodegroups(client, module) else: delete_nodegroups(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elasticache.py b/elasticache.py index 067134725d7..ac6ea78b69f 100644 --- a/elasticache.py +++ b/elasticache.py @@ -143,16 +143,29 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -class ElastiCacheManager(): +class ElastiCacheManager: """Handles elasticache creation and destruction""" - EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] - - def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_parameter_group, cache_subnet_group, - cache_security_groups, security_group_ids, zone, wait, - hard_modify): + EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"] + + def __init__( + self, + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ): self.module = module self.name = name self.engine = engine.lower() @@ -170,7 +183,7 @@ def __init__(self, module, name, engine, cache_engine_version, node_type, self.changed = False self.data = None - self.status = 'gone' + self.status = "gone" self.conn = self._get_elasticache_connection() self._refresh_data() @@ -195,32 +208,34 @@ def exists(self): def create(self): """Create an ElastiCache cluster""" - if self.status == 'available': + if self.status == "available": return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") else: msg = "'%s' is currently deleting. Cannot create." self.module.fail_json(msg=msg % self.name) - kwargs = dict(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeType=self.node_type, - Engine=self.engine, - EngineVersion=self.cache_engine_version, - CacheSecurityGroupNames=self.cache_security_groups, - SecurityGroupIds=self.security_group_ids, - CacheParameterGroupName=self.cache_parameter_group, - CacheSubnetGroupName=self.cache_subnet_group) + kwargs = dict( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeType=self.node_type, + Engine=self.engine, + EngineVersion=self.cache_engine_version, + CacheSecurityGroupNames=self.cache_security_groups, + SecurityGroupIds=self.security_group_ids, + CacheParameterGroupName=self.cache_parameter_group, + CacheSubnetGroupName=self.cache_subnet_group, + ) if self.cache_port is not None: - kwargs['Port'] = self.cache_port + kwargs["Port"] = self.cache_port if self.zone is not None: - kwargs['PreferredAvailabilityZone'] = self.zone + kwargs["PreferredAvailabilityZone"] = self.zone try: self.conn.create_cache_cluster(**kwargs) @@ -232,20 +247,20 @@ def create(self): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") return True def delete(self): """Destroy an ElastiCache cluster""" - if self.status == 'gone': + if self.status == "gone": return - if self.status == 'deleting': + if self.status == "deleting": if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") return - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: msg = "'%s' is currently %s. Cannot delete." self.module.fail_json(msg=msg % (self.name, self.status)) @@ -255,12 +270,12 @@ def delete(self): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Failed to delete cache cluster") - cache_cluster_data = response['CacheCluster'] + cache_cluster_data = response["CacheCluster"] self._refresh_data(cache_cluster_data) self.changed = True if self.wait: - self._wait_for_status('gone') + self._wait_for_status("gone") def sync(self): """Sync settings to cluster if required""" @@ -268,9 +283,9 @@ def sync(self): msg = "'%s' is %s. Cannot sync." self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status in ['creating', 'rebooting', 'modifying']: + if self.status in ["creating", "rebooting", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: # Cluster can only be synced if available. If we can't wait # for this, then just be done. @@ -294,14 +309,16 @@ def modify(self): """Modify the cache cluster. Note it's only possible to modify a few select options.""" nodes_to_remove = self._get_nodes_to_remove() try: - self.conn.modify_cache_cluster(CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeIdsToRemove=nodes_to_remove, - CacheSecurityGroupNames=self.cache_security_groups, - CacheParameterGroupName=self.cache_parameter_group, - SecurityGroupIds=self.security_group_ids, - ApplyImmediately=True, - EngineVersion=self.cache_engine_version) + self.conn.modify_cache_cluster( + CacheClusterId=self.name, + NumCacheNodes=self.num_nodes, + CacheNodeIdsToRemove=nodes_to_remove, + CacheSecurityGroupNames=self.cache_security_groups, + CacheParameterGroupName=self.cache_parameter_group, + SecurityGroupIds=self.security_group_ids, + ApplyImmediately=True, + EngineVersion=self.cache_engine_version, + ) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to modify cache cluster") @@ -309,27 +326,26 @@ def modify(self): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def reboot(self): """Reboot the cache cluster""" if not self.exists(): msg = "'%s' is %s. Cannot reboot." self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status == 'rebooting': + if self.status == "rebooting": return - if self.status in ['creating', 'modifying']: + if self.status in ["creating", "modifying"]: if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") else: msg = "'%s' is currently %s. Cannot reboot." self.module.fail_json(msg=msg % (self.name, self.status)) # Collect ALL nodes for reboot - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] try: - self.conn.reboot_cache_cluster(CacheClusterId=self.name, - CacheNodeIdsToReboot=cache_node_ids) + self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Failed to reboot cache cluster") @@ -337,26 +353,18 @@ def reboot(self): self.changed = True if self.wait: - self._wait_for_status('available') + self._wait_for_status("available") def get_info(self): """Return basic info about the cache cluster""" - info = { - 'name': self.name, - 'status': self.status - } + info = {"name": self.name, "status": self.status} if self.data: - info['data'] = self.data + info["data"] = self.data return info def _wait_for_status(self, awaited_status): """Wait for status to change from present status to awaited_status""" - status_map = { - 'creating': 'available', - 'rebooting': 'available', - 'modifying': 'available', - 'deleting': 'gone' - } + status_map = {"creating": "available", "rebooting": "available", "modifying": "available", "deleting": "gone"} if self.status == awaited_status: # No need to wait, we're already done return @@ -377,27 +385,24 @@ def _wait_for_status(self, awaited_status): def _requires_modification(self): """Check if cluster requires (nondestructive) modification""" # Check modifiable data attributes - modifiable_data = { - 'NumCacheNodes': self.num_nodes, - 'EngineVersion': self.cache_engine_version - } + modifiable_data = {"NumCacheNodes": self.num_nodes, "EngineVersion": self.cache_engine_version} for key, value in modifiable_data.items(): if value is not None and value and self.data[key] != value: return True # Check cache security groups cache_security_groups = [] - for sg in self.data['CacheSecurityGroups']: - cache_security_groups.append(sg['CacheSecurityGroupName']) + for sg in self.data["CacheSecurityGroups"]: + cache_security_groups.append(sg["CacheSecurityGroupName"]) if set(cache_security_groups) != set(self.cache_security_groups): return True # check vpc security groups if self.security_group_ids: vpc_security_groups = [] - security_groups = self.data.get('SecurityGroups', []) + security_groups = self.data.get("SecurityGroups", []) for sg in security_groups: - vpc_security_groups.append(sg['SecurityGroupId']) + vpc_security_groups.append(sg["SecurityGroupId"]) if set(vpc_security_groups) != set(self.security_group_ids): return True @@ -408,13 +413,13 @@ def _requires_destroy_and_create(self): Check whether a destroy and create is required to synchronize cluster. """ unmodifiable_data = { - 'node_type': self.data['CacheNodeType'], - 'engine': self.data['Engine'], - 'cache_port': self._get_port() + "node_type": self.data["CacheNodeType"], + "engine": self.data["Engine"], + "cache_port": self._get_port(), } # Only check for modifications if zone is specified if self.zone is not None: - unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] + unmodifiable_data["zone"] = self.data["PreferredAvailabilityZone"] for key, value in unmodifiable_data.items(): if getattr(self, key) is not None and getattr(self, key) != value: return True @@ -423,18 +428,18 @@ def _requires_destroy_and_create(self): def _get_elasticache_connection(self): """Get an elasticache connection""" try: - return self.module.client('elasticache') + return self.module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to connect to AWS') + self.module.fail_json_aws(e, msg="Failed to connect to AWS") def _get_port(self): """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data['Engine'] == 'memcached': - return self.data['ConfigurationEndpoint']['Port'] - elif self.data['Engine'] == 'redis': + if self.data["Engine"] == "memcached": + return self.data["ConfigurationEndpoint"]["Port"] + elif self.data["Engine"] == "redis": # Redis only supports a single node (presently) so just use # the first and only - return self.data['CacheNodes'][0]['Endpoint']['Port'] + return self.data["CacheNodes"][0]["Endpoint"]["Port"] def _refresh_data(self, cache_cluster_data=None): """Refresh data about this cache cluster""" @@ -442,25 +447,25 @@ def _refresh_data(self, cache_cluster_data=None): if cache_cluster_data is None: try: response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): self.data = None - self.status = 'gone' + self.status = "gone" return except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Failed to describe cache clusters") - cache_cluster_data = response['CacheClusters'][0] + cache_cluster_data = response["CacheClusters"][0] self.data = cache_cluster_data - self.status = self.data['CacheClusterStatus'] + self.status = self.data["CacheClusterStatus"] # The documentation for elasticache lies -- status on rebooting is set # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it # here to make status checks etc. more sane. - if self.status == 'rebooting cache cluster nodes': - self.status = 'rebooting' + if self.status == "rebooting cache cluster nodes": + self.status = "rebooting" def _get_nodes_to_remove(self): """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes + num_nodes_to_remove = self.data["NumCacheNodes"] - self.num_nodes if num_nodes_to_remove <= 0: return [] @@ -468,76 +473,83 @@ def _get_nodes_to_remove(self): msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." self.module.fail_json(msg=msg % self.name) - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] + cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] return cache_node_ids[-num_nodes_to_remove:] def main(): - """ elasticache ansible module """ + """elasticache ansible module""" argument_spec = dict( - state=dict(required=True, choices=['present', 'absent', 'rebooted']), + state=dict(required=True, choices=["present", "absent", "rebooted"]), name=dict(required=True), - engine=dict(default='memcached'), + engine=dict(default="memcached"), cache_engine_version=dict(default=""), - node_type=dict(default='cache.t2.small'), - num_nodes=dict(default=1, type='int'), + node_type=dict(default="cache.t2.small"), + num_nodes=dict(default=1, type="int"), # alias for compat with the original PR 1950 - cache_parameter_group=dict(default="", aliases=['parameter_group']), - cache_port=dict(type='int'), + cache_parameter_group=dict(default="", aliases=["parameter_group"]), + cache_port=dict(type="int"), cache_subnet_group=dict(default=""), - cache_security_groups=dict(default=[], type='list', elements='str'), - security_group_ids=dict(default=[], type='list', elements='str'), + cache_security_groups=dict(default=[], type="list", elements="str"), + security_group_ids=dict(default=[], type="list", elements="str"), zone=dict(), - wait=dict(default=True, type='bool'), - hard_modify=dict(type='bool'), + wait=dict(default=True, type="bool"), + hard_modify=dict(type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, ) - name = module.params['name'] - state = module.params['state'] - engine = module.params['engine'] - cache_engine_version = module.params['cache_engine_version'] - node_type = module.params['node_type'] - num_nodes = module.params['num_nodes'] - cache_port = module.params['cache_port'] - cache_subnet_group = module.params['cache_subnet_group'] - cache_security_groups = module.params['cache_security_groups'] - security_group_ids = module.params['security_group_ids'] - zone = module.params['zone'] - wait = module.params['wait'] - hard_modify = module.params['hard_modify'] - cache_parameter_group = module.params['cache_parameter_group'] + name = module.params["name"] + state = module.params["state"] + engine = module.params["engine"] + cache_engine_version = module.params["cache_engine_version"] + node_type = module.params["node_type"] + num_nodes = module.params["num_nodes"] + cache_port = module.params["cache_port"] + cache_subnet_group = module.params["cache_subnet_group"] + cache_security_groups = module.params["cache_security_groups"] + security_group_ids = module.params["security_group_ids"] + zone = module.params["zone"] + wait = module.params["wait"] + hard_modify = module.params["hard_modify"] + cache_parameter_group = module.params["cache_parameter_group"] if cache_subnet_group and cache_security_groups: module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups") - if state == 'present' and not num_nodes: + if state == "present" and not num_nodes: module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, - num_nodes, cache_port, - cache_parameter_group, - cache_subnet_group, - cache_security_groups, - security_group_ids, zone, wait, - hard_modify) + elasticache_manager = ElastiCacheManager( + module, + name, + engine, + cache_engine_version, + node_type, + num_nodes, + cache_port, + cache_parameter_group, + cache_subnet_group, + cache_security_groups, + security_group_ids, + zone, + wait, + hard_modify, + ) - if state == 'present': + if state == "present": elasticache_manager.ensure_present() - elif state == 'absent': + elif state == "absent": elasticache_manager.ensure_absent() - elif state == 'rebooted': + elif state == "rebooted": elasticache_manager.ensure_rebooted() - facts_result = dict(changed=elasticache_manager.changed, - elasticache=elasticache_manager.get_info()) + facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info()) module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elasticache_info.py b/elasticache_info.py index 31283cd18aa..28b31f76a7f 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -418,40 +418,40 @@ @AWSRetry.exponential_backoff() def describe_cache_clusters_with_backoff(client, cluster_id=None): - paginator = client.get_paginator('describe_cache_clusters') + paginator = client.get_paginator("describe_cache_clusters") params = dict(ShowCacheNodeInfo=True) if cluster_id: - params['CacheClusterId'] = cluster_id + params["CacheClusterId"] = cluster_id try: response = paginator.paginate(**params).build_full_result() - except is_boto3_error_code('CacheClusterNotFound'): + except is_boto3_error_code("CacheClusterNotFound"): return [] - return response['CacheClusters'] + return response["CacheClusters"] @AWSRetry.exponential_backoff() def describe_replication_group_with_backoff(client, replication_group_id): try: response = client.describe_replication_groups(ReplicationGroupId=replication_group_id) - except is_boto3_error_code('ReplicationGroupNotFoundFault'): + except is_boto3_error_code("ReplicationGroupNotFoundFault"): return None - return response['ReplicationGroups'][0] + return response["ReplicationGroups"][0] @AWSRetry.exponential_backoff() def get_elasticache_tags_with_backoff(client, cluster_id): - return client.list_tags_for_resource(ResourceName=cluster_id)['TagList'] + return client.list_tags_for_resource(ResourceName=cluster_id)["TagList"] def get_aws_account_id(module): try: - client = module.client('sts') + client = module.client("sts") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Can't authorize connection") try: - return client.get_caller_identity()['Account'] + return client.get_caller_identity()["Account"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain AWS account id") @@ -459,16 +459,15 @@ def get_aws_account_id(module): def get_elasticache_clusters(client, module): region = module.region try: - clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name')) + clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get("name")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain cache cluster info") account_id = get_aws_account_id(module) results = [] for cluster in clusters: - cluster = camel_dict_to_snake_dict(cluster) - arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id']) + arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster["cache_cluster_id"]) try: tags = get_elasticache_tags_with_backoff(client, arn) except is_boto3_error_code("CacheClusterNotFound"): @@ -477,17 +476,17 @@ def get_elasticache_clusters(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") - cluster['tags'] = boto3_tag_list_to_ansible_dict(tags) + cluster["tags"] = boto3_tag_list_to_ansible_dict(tags) - if cluster.get('replication_group_id', None): + if cluster.get("replication_group_id", None): try: - replication_group = describe_replication_group_with_backoff(client, cluster['replication_group_id']) + replication_group = describe_replication_group_with_backoff(client, cluster["replication_group_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain replication group info") if replication_group is not None: replication_group = camel_dict_to_snake_dict(replication_group) - cluster['replication_group'] = replication_group + cluster["replication_group"] = replication_group results.append(cluster) return results @@ -499,10 +498,10 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('elasticache') + client = module.client("elasticache") module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 04a789bd59f..1e5a1c63b6f 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -119,9 +119,11 @@ def create(module, conn, name, group_family, description): - """ Create ElastiCache parameter group. """ + """Create ElastiCache parameter group.""" try: - response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description) + response = conn.create_cache_parameter_group( + CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create cache parameter group.") @@ -129,7 +131,7 @@ def create(module, conn, name, group_family, description): def delete(module, conn, name): - """ Delete ElastiCache parameter group. """ + """Delete ElastiCache parameter group.""" try: conn.delete_cache_parameter_group(CacheParameterGroupName=name) response = {} @@ -140,7 +142,7 @@ def delete(module, conn, name): def make_current_modifiable_param_dict(module, conn, name): - """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" + """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" current_info = get_info(conn, name) if current_info is False: module.fail_json(msg="Could not connect to the cache parameter group %s." % name) @@ -157,7 +159,7 @@ def make_current_modifiable_param_dict(module, conn, name): def check_valid_modification(module, values, modifiable_params): - """ Check if the parameters and values in values are valid. """ + """Check if the parameters and values in values are valid.""" changed_with_update = False for parameter in values: @@ -165,7 +167,10 @@ def check_valid_modification(module, values, modifiable_params): # check valid modifiable parameters if parameter not in modifiable_params: - module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys())) + module.fail_json( + msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." + % (parameter, modifiable_params.keys()) + ) # check allowed datatype for modified parameters str_to_type = {"integer": int, "string": string_types} @@ -180,18 +185,24 @@ def check_valid_modification(module, values, modifiable_params): if isinstance(new_value, bool): values[parameter] = 1 if new_value else 0 else: - module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % - (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + module.fail_json( + msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." + % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + ) else: - module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." % - (new_value, type(new_value), parameter, modifiable_params[parameter][1])) + module.fail_json( + msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." + % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + ) # check allowed values for modifiable parameters choices = modifiable_params[parameter][0] if choices: if not (to_text(new_value) in choices or isinstance(new_value, int)): - module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." % - (new_value, parameter, choices)) + module.fail_json( + msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." + % (new_value, parameter, choices) + ) # check if a new value is different from current value if to_text(values[parameter]) != modifiable_params[parameter][2]: @@ -201,7 +212,7 @@ def check_valid_modification(module, values, modifiable_params): def check_changed_parameter_values(values, old_parameters, new_parameters): - """ Checking if the new values are different than the old values. """ + """Checking if the new values are different than the old values.""" changed_with_update = False # if the user specified parameters to reset, only check those for change @@ -221,21 +232,23 @@ def check_changed_parameter_values(values, old_parameters, new_parameters): def modify(module, conn, name, values): - """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """ + """Modify ElastiCache parameter group to reflect the new information if it differs from the current.""" # compares current group parameters with the parameters we've specified to to a value to see if this will change the group format_parameters = [] for key in values: value = to_text(values[key]) - format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + format_parameters.append({"ParameterName": key, "ParameterValue": value}) try: - response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters) + response = conn.modify_cache_parameter_group( + CacheParameterGroupName=name, ParameterNameValues=format_parameters + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to modify cache parameter group.") return response def reset(module, conn, name, values): - """ Reset ElastiCache parameter group if the current information is different from the new information. """ + """Reset ElastiCache parameter group if the current information is different from the new information.""" # used to compare with the reset parameters' dict to see if there have been changes old_parameters_dict = make_current_modifiable_param_dict(module, conn, name) @@ -247,12 +260,14 @@ def reset(module, conn, name, values): format_parameters = [] for key in values: value = to_text(values[key]) - format_parameters.append({'ParameterName': key, 'ParameterValue': value}) + format_parameters.append({"ParameterName": key, "ParameterValue": value}) else: all_parameters = True try: - response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters) + response = conn.reset_cache_parameter_group( + CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to reset cache parameter group.") @@ -264,7 +279,7 @@ def reset(module, conn, name, values): def get_info(conn, name): - """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """ + """Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access.""" try: data = conn.describe_cache_parameters(CacheParameterGroupName=name) return data @@ -274,36 +289,50 @@ def get_info(conn, name): def main(): argument_spec = dict( - group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']), - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - state=dict(required=True, choices=['present', 'absent', 'reset']), - values=dict(type='dict'), + group_family=dict( + type="str", + choices=[ + "memcached1.4", + "memcached1.5", + "redis2.6", + "redis2.8", + "redis3.2", + "redis4.0", + "redis5.0", + "redis6.x", + ], + ), + name=dict(required=True, type="str"), + description=dict(default="", type="str"), + state=dict(required=True, choices=["present", "absent", "reset"]), + values=dict(type="dict"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - parameter_group_family = module.params.get('group_family') - parameter_group_name = module.params.get('name') - group_description = module.params.get('description') - state = module.params.get('state') - values = module.params.get('values') + parameter_group_family = module.params.get("group_family") + parameter_group_name = module.params.get("name") + group_description = module.params.get("description") + state = module.params.get("state") + values = module.params.get("values") try: - connection = module.client('elasticache') + connection = module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") exists = get_info(connection, parameter_group_name) # check that the needed requirements are available - if state == 'present' and not (exists or parameter_group_family): + if state == "present" and not (exists or parameter_group_family): module.fail_json(msg="Creating a group requires a family group.") - elif state == 'reset' and not exists: - module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name) + elif state == "reset" and not exists: + module.fail_json( + msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name + ) # Taking action changed = False - if state == 'present': + if state == "present": if exists: # confirm that the group exists without any actions if not values: @@ -316,19 +345,21 @@ def main(): response = modify(module, connection, parameter_group_name, values) # create group else: - response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description) + response, changed = create( + module, connection, parameter_group_name, parameter_group_family, group_description + ) if values: modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) changed, values = check_valid_modification(module, values, modifiable_params) response = modify(module, connection, parameter_group_name, values) - elif state == 'absent': + elif state == "absent": if exists: # delete group response, changed = delete(module, connection, parameter_group_name) else: response = {} changed = False - elif state == 'reset': + elif state == "reset": response, changed = reset(module, connection, parameter_group_name, values) facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response)) @@ -336,5 +367,5 @@ def main(): module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index e477fc86aa5..66c9cb9da57 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -122,13 +122,13 @@ def create(module, connection, replication_id, cluster_id, name): - """ Create an ElastiCache backup. """ + """Create an ElastiCache backup.""" try: - response = connection.create_snapshot(ReplicationGroupId=replication_id, - CacheClusterId=cluster_id, - SnapshotName=name) + response = connection.create_snapshot( + ReplicationGroupId=replication_id, CacheClusterId=cluster_id, SnapshotName=name + ) changed = True - except is_boto3_error_code('SnapshotAlreadyExistsFault'): + except is_boto3_error_code("SnapshotAlreadyExistsFault"): response = {} changed = False except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except @@ -137,11 +137,9 @@ def create(module, connection, replication_id, cluster_id, name): def copy(module, connection, name, target, bucket): - """ Copy an ElastiCache backup. """ + """Copy an ElastiCache backup.""" try: - response = connection.copy_snapshot(SourceSnapshotName=name, - TargetSnapshotName=target, - TargetBucket=bucket) + response = connection.copy_snapshot(SourceSnapshotName=name, TargetSnapshotName=target, TargetBucket=bucket) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to copy the snapshot.") @@ -149,16 +147,18 @@ def copy(module, connection, name, target, bucket): def delete(module, connection, name): - """ Delete an ElastiCache backup. """ + """Delete an ElastiCache backup.""" try: response = connection.delete_snapshot(SnapshotName=name) changed = True - except is_boto3_error_code('SnapshotNotFoundFault'): + except is_boto3_error_code("SnapshotNotFoundFault"): response = {} changed = False - except is_boto3_error_code('InvalidSnapshotState'): # pylint: disable=duplicate-except - module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." - "You may need to wait a few minutes.") + except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except + module.fail_json( + msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." + "You may need to wait a few minutes." + ) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete the snapshot.") return response, changed @@ -166,38 +166,38 @@ def delete(module, connection, name): def main(): argument_spec = dict( - name=dict(required=True, type='str'), - state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), - replication_id=dict(type='str'), - cluster_id=dict(type='str'), - target=dict(type='str'), - bucket=dict(type='str'), + name=dict(required=True, type="str"), + state=dict(required=True, type="str", choices=["present", "absent", "copy"]), + replication_id=dict(type="str"), + cluster_id=dict(type="str"), + target=dict(type="str"), + bucket=dict(type="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec) - name = module.params.get('name') - state = module.params.get('state') - replication_id = module.params.get('replication_id') - cluster_id = module.params.get('cluster_id') - target = module.params.get('target') - bucket = module.params.get('bucket') + name = module.params.get("name") + state = module.params.get("state") + replication_id = module.params.get("replication_id") + cluster_id = module.params.get("cluster_id") + target = module.params.get("target") + bucket = module.params.get("bucket") try: - connection = module.client('elasticache') + connection = module.client("elasticache") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") changed = False response = {} - if state == 'present': + if state == "present": if not all((replication_id, cluster_id)): module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'") response, changed = create(module, connection, replication_id, cluster_id, name) - elif state == 'absent': + elif state == "absent": response, changed = delete(module, connection, name) - elif state == 'copy': + elif state == "copy": if not all((target, bucket)): module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.") response, changed = copy(module, connection, name, target, bucket) @@ -207,5 +207,5 @@ def main(): module.exit_json(**facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py index 6353c72837b..f7740e696fb 100644 --- a/elasticache_subnet_group.py +++ b/elasticache_subnet_group.py @@ -113,10 +113,13 @@ def get_subnet_group(name): groups = client.describe_cache_subnet_groups( aws_retry=True, CacheSubnetGroupName=name, - )['CacheSubnetGroups'] - except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + )["CacheSubnetGroups"] + except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe subnet group") if not groups: @@ -130,19 +133,18 @@ def get_subnet_group(name): subnet_group = camel_dict_to_snake_dict(groups[0]) - subnet_group['name'] = subnet_group['cache_subnet_group_name'] - subnet_group['description'] = subnet_group['cache_subnet_group_description'] + subnet_group["name"] = subnet_group["cache_subnet_group_name"] + subnet_group["description"] = subnet_group["cache_subnet_group_description"] - subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) - subnet_group['subnet_ids'] = subnet_ids + subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) + subnet_group["subnet_ids"] = subnet_ids return subnet_group def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + module.fail_json(msg="At least one subnet must be provided when creating a subnet group") if module.check_mode: return True @@ -163,13 +165,13 @@ def create_subnet_group(name, description, subnets): def update_subnet_group(subnet_group, name, description, subnets): update_params = dict() - if description and subnet_group['description'] != description: - update_params['CacheSubnetGroupDescription'] = description + if description and subnet_group["description"] != description: + update_params["CacheSubnetGroupDescription"] = description if subnets: - old_subnets = set(subnet_group['subnet_ids']) + old_subnets = set(subnet_group["subnet_ids"]) new_subnets = set(subnets) if old_subnets != new_subnets: - update_params['SubnetIds'] = list(subnets) + update_params["SubnetIds"] = list(subnets) if not update_params: return False @@ -190,7 +192,6 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): - if module.check_mode: return True @@ -200,20 +201,23 @@ def delete_subnet_group(name): CacheSubnetGroupName=name, ) return True - except is_boto3_error_code('CacheSubnetGroupNotFoundFault'): + except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): # AWS is "eventually consistent", cope with the race conditions where # deletion hadn't completed when we ran describe return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list', elements='str'), + subnets=dict(required=False, type="list", elements="str"), ) global module @@ -224,17 +228,17 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name').lower() - description = module.params.get('description') - subnets = module.params.get('subnets') + state = module.params.get("state") + name = module.params.get("name").lower() + description = module.params.get("description") + subnets = module.params.get("subnets") - client = module.client('elasticache', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("elasticache", retry_decorator=AWSRetry.jittered_backoff()) subnet_group = get_subnet_group(name) changed = False - if state == 'present': + if state == "present": if not subnet_group: result = create_subnet_group(name, description, subnets) changed |= result @@ -251,5 +255,5 @@ def main(): module.exit_json(changed=changed, cache_subnet_group=subnet_group) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elasticbeanstalk_app.py b/elasticbeanstalk_app.py index c6333379456..bf11afbb2bf 100644 --- a/elasticbeanstalk_app.py +++ b/elasticbeanstalk_app.py @@ -112,24 +112,24 @@ def list_apps(ebs, app_name, module): def check_app(ebs, app, module): - app_name = module.params['app_name'] - description = module.params['description'] - state = module.params['state'] - terminate_by_force = module.params['terminate_by_force'] + app_name = module.params["app_name"] + description = module.params["description"] + state = module.params["state"] + terminate_by_force = module.params["terminate_by_force"] result = {} - if state == 'present' and app is None: + if state == "present" and app is None: result = dict(changed=True, output="App would be created") - elif state == 'present' and app.get("Description", None) != description: + elif state == "present" and app.get("Description", None) != description: result = dict(changed=True, output="App would be updated", app=app) - elif state == 'present' and app.get("Description", None) == description: + elif state == "present" and app.get("Description", None) == description: result = dict(changed=False, output="App is up-to-date", app=app) - elif state == 'absent' and app is None: + elif state == "absent" and app is None: result = dict(changed=False, output="App does not exist", app={}) - elif state == 'absent' and app is not None: + elif state == "absent" and app is not None: result = dict(changed=True, output="App will be deleted", app=app) - elif state == 'absent' and app is not None and terminate_by_force is True: + elif state == "absent" and app is not None and terminate_by_force is True: result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app) module.exit_json(**result) @@ -145,37 +145,36 @@ def filter_empty(**kwargs): def main(): argument_spec = dict( - app_name=dict(aliases=['name'], type='str', required=False), + app_name=dict(aliases=["name"], type="str", required=False), description=dict(), - state=dict(choices=['present', 'absent'], default='present'), - terminate_by_force=dict(type='bool', default=False, required=False) + state=dict(choices=["present", "absent"], default="present"), + terminate_by_force=dict(type="bool", default=False, required=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - app_name = module.params['app_name'] - description = module.params['description'] - state = module.params['state'] - terminate_by_force = module.params['terminate_by_force'] + app_name = module.params["app_name"] + description = module.params["description"] + state = module.params["state"] + terminate_by_force = module.params["terminate_by_force"] if app_name is None: module.fail_json(msg='Module parameter "app_name" is required') result = {} - ebs = module.client('elasticbeanstalk') + ebs = module.client("elasticbeanstalk") app = describe_app(ebs, app_name, module) if module.check_mode: check_app(ebs, app, module) - module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.') + module.fail_json(msg="ASSERTION FAILURE: check_app() should not return control.") - if state == 'present': + if state == "present": if app is None: try: - create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, - Description=description)) + create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not create application") @@ -200,7 +199,7 @@ def main(): else: if app is None: - result = dict(changed=False, output='Application not found', app={}) + result = dict(changed=False, output="Application not found", app={}) else: try: if terminate_by_force: @@ -209,9 +208,12 @@ def main(): else: ebs.delete_application(ApplicationName=app_name) changed = True - except is_boto3_error_message('It is currently pending deletion'): + except is_boto3_error_message("It is currently pending deletion"): changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Cannot terminate app") result = dict(changed=changed, app=app) @@ -219,5 +221,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py index 3d3d43d4e71..db3fd46ac48 100644 --- a/elb_classic_lb_info.py +++ b/elb_classic_lb_info.py @@ -161,63 +161,79 @@ def list_elbs(connection, load_balancer_names): def describe_elb(connection, lb): description = camel_dict_to_snake_dict(lb) - name = lb['LoadBalancerName'] - instances = lb.get('Instances', []) - description['tags'] = get_tags(connection, name) - description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService') - description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService') - description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown') - description['attributes'] = get_lb_attributes(connection, name) + name = lb["LoadBalancerName"] + instances = lb.get("Instances", []) + description["tags"] = get_tags(connection, name) + description["instances_inservice"], description["instances_inservice_count"] = lb_instance_health( + connection, name, instances, "InService" + ) + description["instances_outofservice"], description["instances_outofservice_count"] = lb_instance_health( + connection, name, instances, "OutOfService" + ) + description["instances_unknownservice"], description["instances_unknownservice_count"] = lb_instance_health( + connection, name, instances, "Unknown" + ) + description["attributes"] = get_lb_attributes(connection, name) return description @AWSRetry.jittered_backoff() def get_all_lb(connection): - paginator = connection.get_paginator('describe_load_balancers') - return paginator.paginate().build_full_result()['LoadBalancerDescriptions'] + paginator = connection.get_paginator("describe_load_balancers") + return paginator.paginate().build_full_result()["LoadBalancerDescriptions"] def get_lb(connection, load_balancer_name): try: - return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0] - except is_boto3_error_code('LoadBalancerNotFound'): + return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])[ + "LoadBalancerDescriptions" + ][0] + except is_boto3_error_code("LoadBalancerNotFound"): return [] def get_lb_attributes(connection, load_balancer_name): - attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get('LoadBalancerAttributes', {}) + attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get( + "LoadBalancerAttributes", {} + ) return camel_dict_to_snake_dict(attributes) def get_tags(connection, load_balancer_name): - tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])['TagDescriptions'] + tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])["TagDescriptions"] if not tags: return {} - return boto3_tag_list_to_ansible_dict(tags[0]['Tags']) + return boto3_tag_list_to_ansible_dict(tags[0]["Tags"]) def lb_instance_health(connection, load_balancer_name, instances, state): - instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', []) - instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state] + instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get( + "InstanceStates", [] + ) + instate = [instance["InstanceId"] for instance in instance_states if instance["State"] == state] return instate, len(instate) def main(): argument_spec = dict( - names=dict(default=[], type='list', elements='str') + names=dict(default=[], type="list", elements="str"), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)) + connection = module.client( + "elb", retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY) + ) try: - elbs = list_elbs(connection, module.params.get('names')) + elbs = list_elbs(connection, module.params.get("names")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get load balancer information.") module.exit_json(elbs=elbs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_instance.py b/elb_instance.py index fe1128c9661..2d6ca291968 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -107,9 +107,9 @@ class ElbManager: def __init__(self, module, instance_id=None, ec2_elbs=None): retry_decorator = AWSRetry.jittered_backoff() self.module = module - self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator) - self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator) - self.client_elb = module.client('elb', retry_decorator=retry_decorator) + self.client_asg = module.client("autoscaling", retry_decorator=retry_decorator) + self.client_ec2 = module.client("ec2", retry_decorator=retry_decorator) + self.client_elb = module.client("elb", retry_decorator=retry_decorator) self.instance_id = instance_id self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False @@ -120,11 +120,11 @@ def deregister(self, wait, timeout): to report it out-of-service""" for lb in self.lbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id not in instance_ids: continue - self.updated_elbs.add(lb['LoadBalancerName']) + self.updated_elbs.add(lb["LoadBalancerName"]) if self.module.check_mode: self.changed = True @@ -133,12 +133,13 @@ def deregister(self, wait, timeout): try: self.client_elb.deregister_instances_from_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer', - load_balancer=lb, instance=self.instance_id) + self.module.fail_json_aws( + e, "Failed to deregister instance from load balancer", load_balancer=lb, instance=self.instance_id + ) # The ELB is changing state in some way. Either an instance that's # InService is moving to OutOfService, or an instance that's @@ -147,17 +148,17 @@ def deregister(self, wait, timeout): if wait: for lb in self.lbs: - self._await_elb_instance_state(lb, 'Deregistered', timeout) + self._await_elb_instance_state(lb, "Deregistered", timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id in instance_ids: continue - self.updated_elbs.add(lb['LoadBalancerName']) + self.updated_elbs.add(lb["LoadBalancerName"]) if enable_availability_zone: self.changed |= self._enable_availailability_zone(lb) @@ -169,31 +170,32 @@ def register(self, wait, enable_availability_zone, timeout): try: self.client_elb.register_instances_with_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to register instance with load balancer', - load_balancer=lb, instance=self.instance_id) + self.module.fail_json_aws( + e, "Failed to register instance with load balancer", load_balancer=lb, instance=self.instance_id + ) self.changed = True if wait: for lb in self.lbs: - self._await_elb_instance_state(lb, 'InService', timeout) + self._await_elb_instance_state(lb, "InService", timeout) @AWSRetry.jittered_backoff() def _describe_elbs(self, **params): - paginator = self.client_elb.get_paginator('describe_load_balancers') + paginator = self.client_elb.get_paginator("describe_load_balancers") results = paginator.paginate(**params).build_full_result() - return results['LoadBalancerDescriptions'] + return results["LoadBalancerDescriptions"] def exists(self, lbtest): - """ Verify that the named ELB actually exists """ + """Verify that the named ELB actually exists""" found = False for lb in self.lbs: - if lb['LoadBalancerName'] == lbtest: + if lb["LoadBalancerName"] == lbtest: found = True break return found @@ -203,9 +205,9 @@ def _enable_availailability_zone(self, lb): Returns True if the zone was enabled or False if no change was made. lb: load balancer""" instance = self._get_instance() - desired_zone = instance['Placement']['AvailabilityZone'] + desired_zone = instance["Placement"]["AvailabilityZone"] - if desired_zone in lb['AvailabilityZones']: + if desired_zone in lb["AvailabilityZones"]: return False if self.module.check_mode: @@ -214,12 +216,11 @@ def _enable_availailability_zone(self, lb): try: self.client_elb.enable_availability_zones_for_load_balancer( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], AvailabilityZones=[desired_zone], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers', - load_balancer=lb, zone=desired_zone) + self.module.fail_json_aws(e, "Failed to enable AZ on load balancers", load_balancer=lb, zone=desired_zone) return True @@ -233,27 +234,29 @@ def _await_elb_instance_state(self, lb, awaited_state, timeout): if awaited_state == initial_state: return - if awaited_state == 'InService': - waiter = self.client_elb.get_waiter('instance_in_service') - elif awaited_state == 'Deregistered': - waiter = self.client_elb.get_waiter('instance_deregistered') - elif awaited_state == 'OutOfService': - waiter = self.client_elb.get_waiter('instance_deregistered') + if awaited_state == "InService": + waiter = self.client_elb.get_waiter("instance_in_service") + elif awaited_state == "Deregistered": + waiter = self.client_elb.get_waiter("instance_deregistered") + elif awaited_state == "OutOfService": + waiter = self.client_elb.get_waiter("instance_deregistered") else: - self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state) + self.module.fail_json(msg="Could not wait for unknown state", awaited_state=awaited_state) try: waiter.wait( - LoadBalancerName=lb['LoadBalancerName'], + LoadBalancerName=lb["LoadBalancerName"], Instances=[{"InstanceId": self.instance_id}], - WaiterConfig={'Delay': 1, 'MaxAttempts': timeout}, + WaiterConfig={"Delay": 1, "MaxAttempts": timeout}, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state', - awaited_state=awaited_state) + self.module.fail_json_aws( + e, msg="Timeout waiting for instance to reach desired state", awaited_state=awaited_state + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state', - awaited_state=awaited_state) + self.module.fail_json_aws( + e, msg="Error while waiting for instance to reach desired state", awaited_state=awaited_state + ) return @@ -265,18 +268,21 @@ def _get_instance_health(self, lb): try: status = self.client_elb.describe_instance_health( aws_retry=True, - LoadBalancerName=lb['LoadBalancerName'], - Instances=[{'InstanceId': self.instance_id}], - )['InstanceStates'] - except is_boto3_error_code('InvalidInstance'): + LoadBalancerName=lb["LoadBalancerName"], + Instances=[{"InstanceId": self.instance_id}], + )["InstanceStates"] + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg='Failed to get instance health') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Failed to get instance health") if not status: return None - return status[0]['State'] + return status[0]["State"] def _get_instance_lbs(self, ec2_elbs=None): """Returns a list of ELBs attached to self.instance_id @@ -289,12 +295,12 @@ def _get_instance_lbs(self, ec2_elbs=None): ec2_elbs = self._get_auto_scaling_group_lbs() if ec2_elbs: - list_params['LoadBalancerNames'] = ec2_elbs + list_params["LoadBalancerNames"] = ec2_elbs try: elbs = self._describe_elbs(**list_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, 'Failed to describe load balancers') + self.module.fail_json_aws(e, "Failed to describe load balancers") if ec2_elbs: return elbs @@ -303,7 +309,7 @@ def _get_instance_lbs(self, ec2_elbs=None): # of. lbs = [] for lb in elbs: - instance_ids = [i['InstanceId'] for i in lb['Instances']] + instance_ids = [i["InstanceId"] for i in lb["Instances"]] if self.instance_id in instance_ids: lbs.append(lb) @@ -311,14 +317,14 @@ def _get_instance_lbs(self, ec2_elbs=None): def _get_auto_scaling_group_lbs(self): """Returns a list of ELBs associated with self.instance_id - indirectly through its auto scaling group membership""" + indirectly through its auto scaling group membership""" try: asg_instances = self.client_asg.describe_auto_scaling_instances( - aws_retry=True, - InstanceIds=[self.instance_id])['AutoScalingInstances'] + aws_retry=True, InstanceIds=[self.instance_id] + )["AutoScalingInstances"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") if len(asg_instances) > 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") @@ -327,42 +333,40 @@ def _get_auto_scaling_group_lbs(self): # Instance isn't a member of an ASG return [] - asg_name = asg_instances[0]['AutoScalingGroupName'] + asg_name = asg_instances[0]["AutoScalingGroupName"] try: asg_instances = self.client_asg.describe_auto_scaling_groups( - aws_retry=True, - AutoScalingGroupNames=[asg_name])['AutoScalingGroups'] + aws_retry=True, AutoScalingGroupNames=[asg_name] + )["AutoScalingGroups"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") if len(asg_instances) != 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - return asg_instances[0]['LoadBalancerNames'] + return asg_instances[0]["LoadBalancerNames"] def _get_instance(self): """Returns the description of an instance""" try: - result = self.client_ec2.describe_instances( - aws_retry=True, - InstanceIds=[self.instance_id]) + result = self.client_ec2.describe_instances(aws_retry=True, InstanceIds=[self.instance_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - return result['Reservations'][0]['Instances'][0] + self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") + return result["Reservations"][0]["Instances"][0] def main(): argument_spec = dict( - state={'required': True, 'choices': ['present', 'absent']}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, - wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'required': False, 'default': 0, 'type': 'int'}, + state={"required": True, "choices": ["present", "absent"]}, + instance_id={"required": True}, + ec2_elbs={"default": None, "required": False, "type": "list", "elements": "str"}, + enable_availability_zone={"default": True, "required": False, "type": "bool"}, + wait={"required": False, "default": True, "type": "bool"}, + wait_timeout={"required": False, "default": 0, "type": "int"}, ) required_if = [ - ('state', 'present', ['ec2_elbs']), + ("state", "present", ["ec2_elbs"]), ] module = AnsibleAWSModule( @@ -371,11 +375,11 @@ def main(): supports_check_mode=True, ) - ec2_elbs = module.params['ec2_elbs'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - timeout = module.params['wait_timeout'] - instance_id = module.params['instance_id'] + ec2_elbs = module.params["ec2_elbs"] + wait = module.params["wait"] + enable_availability_zone = module.params["enable_availability_zone"] + timeout = module.params["wait_timeout"] + instance_id = module.params["instance_id"] elb_man = ElbManager(module, instance_id, ec2_elbs) @@ -384,9 +388,9 @@ def main(): if not elb_man.exists(elb): module.fail_json(msg="ELB {0} does not exist".format(elb)) - if module.params['state'] == 'present': + if module.params["state"] == "present": elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": elb_man.deregister(wait, timeout) module.exit_json( @@ -395,5 +399,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_network_lb.py b/elb_network_lb.py index 069882dc90c..76e2454aa65 100644 --- a/elb_network_lb.py +++ b/elb_network_lb.py @@ -349,10 +349,12 @@ def create_or_update_elb(elb_obj): # Tags - only need to play with tags if tags parameter has been set to something if elb_obj.tags is not None: - # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(elb_obj.elb["tags"]), + boto3_tag_list_to_ansible_dict(elb_obj.tags), + elb_obj.purge_tags, + ) if tags_to_delete: elb_obj.delete_tags(tags_to_delete) @@ -369,25 +371,29 @@ def create_or_update_elb(elb_obj): elb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb["LoadBalancerArn"]) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb["LoadBalancerArn"]) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb["LoadBalancerArn"] + ) listener_obj.modify() listeners_obj.changed = True @@ -396,8 +402,8 @@ def create_or_update_elb(elb_obj): elb_obj.changed = True # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) + if elb_obj.module.params.get("ip_address_type") is not None: + elb_obj.modify_ip_address_type(elb_obj.module.params.get("ip_address_type")) # Update the objects to pickup changes # Get the ELB again @@ -410,24 +416,20 @@ def create_or_update_elb(elb_obj): # Convert to snake_case and merge in everything we want to return to the user snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) - snaked_elb['listeners'] = [] + snaked_elb["listeners"] = [] for listener in listeners_obj.current_listeners: - snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + snaked_elb["listeners"].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + snaked_elb["tags"] = boto3_tag_list_to_ansible_dict(snaked_elb["tags"]) # ip address type - snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() + snaked_elb["ip_address_type"] = elb_obj.get_elb_ip_address_type() - elb_obj.module.exit_json( - changed=elb_obj.changed, - load_balancer=snaked_elb, - **snaked_elb) + elb_obj.module.exit_json(changed=elb_obj.changed, load_balancer=snaked_elb, **snaked_elb) def delete_elb(elb_obj): - if elb_obj.elb: elb_obj.delete() @@ -435,42 +437,42 @@ def delete_elb(elb_obj): def main(): - - argument_spec = ( - dict( - cross_zone_load_balancing=dict(type='bool'), - deletion_protection=dict(type='bool'), - listeners=dict(type='list', - elements='dict', - options=dict( - Protocol=dict(type='str', required=True), - Port=dict(type='int', required=True), - SslPolicy=dict(type='str'), - Certificates=dict(type='list', elements='dict'), - DefaultActions=dict(type='list', required=True, elements='dict') - ) - ), - name=dict(required=True, type='str'), - purge_listeners=dict(default=True, type='bool'), - purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list', elements='str'), - subnet_mappings=dict(type='list', elements='dict'), - scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], type='str', default='present'), - tags=dict(type='dict', aliases=['resource_tags']), - wait_timeout=dict(type='int'), - wait=dict(type='bool'), - ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) - ) + argument_spec = dict( + cross_zone_load_balancing=dict(type="bool"), + deletion_protection=dict(type="bool"), + listeners=dict( + type="list", + elements="dict", + options=dict( + Protocol=dict(type="str", required=True), + Port=dict(type="int", required=True), + SslPolicy=dict(type="str"), + Certificates=dict(type="list", elements="dict"), + DefaultActions=dict(type="list", required=True, elements="dict"), + ), + ), + name=dict(required=True, type="str"), + purge_listeners=dict(default=True, type="bool"), + purge_tags=dict(default=True, type="bool"), + subnets=dict(type="list", elements="str"), + subnet_mappings=dict(type="list", elements="dict"), + scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]), + state=dict(choices=["present", "absent"], type="str", default="present"), + tags=dict(type="dict", aliases=["resource_tags"]), + wait_timeout=dict(type="int"), + wait=dict(type="bool"), + ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]), ) required_if = [ - ('state', 'present', ('subnets', 'subnet_mappings',), True) + ["state", "present", ["subnets", "subnet_mappings"], True], ] - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=required_if, - mutually_exclusive=[['subnets', 'subnet_mappings']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, + mutually_exclusive=[["subnets", "subnet_mappings"]], + ) # Check for subnets or subnet_mappings if state is present state = module.params.get("state") @@ -480,20 +482,20 @@ def main(): if listeners is not None: for listener in listeners: for key in listener.keys(): - protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP'] - if key == 'Protocol' and listener[key] not in protocols_list: + protocols_list = ["TCP", "TLS", "UDP", "TCP_UDP"] + if key == "Protocol" and listener[key] not in protocols_list: module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list)) - connection = module.client('elbv2') - connection_ec2 = module.client('ec2') + connection = module.client("elbv2") + connection_ec2 = module.client("ec2") elb = NetworkLoadBalancer(connection, connection_ec2, module) - if state == 'present': + if state == "present": create_or_update_elb(elb) else: delete_elb(elb) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_target.py b/elb_target.py index fd05cd67a3a..cab7b10aef8 100644 --- a/elb_target.py +++ b/elb_target.py @@ -127,24 +127,23 @@ from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) def describe_target_groups_with_backoff(connection, tg_name): return connection.describe_target_groups(Names=[tg_name]) def convert_tg_name_to_arn(connection, module, tg_name): - try: response = describe_target_groups_with_backoff(connection, tg_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name)) - tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + tg_arn = response["TargetGroups"][0]["TargetGroupArn"] return tg_arn -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound']) +@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) def describe_targets_with_backoff(connection, tg_arn, target): if target is None: tg = [] @@ -155,7 +154,6 @@ def describe_targets_with_backoff(connection, tg_arn, target): def describe_targets(connection, module, tg_arn, target=None): - """ Describe targets in a target group @@ -167,7 +165,7 @@ def describe_targets(connection, module, tg_arn, target=None): """ try: - targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions'] + targets = describe_targets_with_backoff(connection, tg_arn, target)["TargetHealthDescriptions"] if not targets: return {} return targets[0] @@ -181,7 +179,6 @@ def register_target_with_backoff(connection, target_group_arn, target): def register_target(connection, module): - """ Registers a target to a target group @@ -203,26 +200,32 @@ def register_target(connection, module): target = dict(Id=target_id) if target_az: - target['AvailabilityZone'] = target_az + target["AvailabilityZone"] = target_az if target_port: - target['Port'] = target_port + target["Port"] = target_port target_description = describe_targets(connection, module, target_group_arn, target) - if 'Reason' in target_description['TargetHealth']: - if target_description['TargetHealth']['Reason'] == "Target.NotRegistered": + if "Reason" in target_description["TargetHealth"]: + if target_description["TargetHealth"]["Reason"] == "Target.NotRegistered": try: register_target_with_backoff(connection, target_group_arn, target) changed = True if target_status: - target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) + target_status_check( + connection, module, target_group_arn, target, target_status, target_status_timeout + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target)) # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) - module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + module.exit_json( + changed=changed, + target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), + target_group_arn=target_group_arn, + ) @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -231,7 +234,6 @@ def deregister_target_with_backoff(connection, target_group_arn, target): def deregister_target(connection, module): - """ Deregisters a target to a target group @@ -253,18 +255,18 @@ def deregister_target(connection, module): target = dict(Id=target_id) if target_port: - target['Port'] = target_port + target["Port"] = target_port target_description = describe_targets(connection, module, target_group_arn, target) - current_target_state = target_description['TargetHealth']['State'] - current_target_reason = target_description['TargetHealth'].get('Reason') + current_target_state = target_description["TargetHealth"]["State"] + current_target_reason = target_description["TargetHealth"].get("Reason") needs_deregister = False - if deregister_unused and current_target_state == 'unused': - if current_target_reason != 'Target.NotRegistered': + if deregister_unused and current_target_state == "unused": + if current_target_reason != "Target.NotRegistered": needs_deregister = True - elif current_target_state not in ['unused', 'draining']: + elif current_target_state not in ["unused", "draining"]: needs_deregister = True if needs_deregister: @@ -274,9 +276,11 @@ def deregister_target(connection, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Unable to deregister target {0}".format(target)) else: - if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining': - module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " + - "To force deregistration use the 'deregister_unused' option.") + if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": + module.warn( + warning="Your specified target has an 'unused' state but is still registered to the target group. " + + "To force deregistration use the 'deregister_unused' option." + ) if target_status: target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) @@ -284,53 +288,62 @@ def deregister_target(connection, module): # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) - module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn) + module.exit_json( + changed=changed, + target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), + target_group_arn=target_group_arn, + ) def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout): reached_state = False timeout = target_status_timeout + time() while time() < timeout: - health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State'] + health_state = describe_targets(connection, module, target_group_arn, target)["TargetHealth"]["State"] if health_state == target_status: reached_state = True break sleep(1) if not reached_state: - module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state)) + module.fail_json( + msg="Status check timeout of {0} exceeded, last status was {1}: ".format( + target_status_timeout, health_state + ) + ) def main(): - argument_spec = dict( - deregister_unused=dict(type='bool', default=False), - target_az=dict(type='str'), - target_group_arn=dict(type='str'), - target_group_name=dict(type='str'), - target_id=dict(type='str', required=True), - target_port=dict(type='int'), - target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'), - target_status_timeout=dict(type='int', default=60), - state=dict(required=True, choices=['present', 'absent'], type='str'), + deregister_unused=dict(type="bool", default=False), + target_az=dict(type="str"), + target_group_arn=dict(type="str"), + target_group_name=dict(type="str"), + target_id=dict(type="str", required=True), + target_port=dict(type="int"), + target_status=dict( + choices=["initial", "healthy", "unhealthy", "unused", "draining", "unavailable"], type="str" + ), + target_status_timeout=dict(type="int", default=60), + state=dict(required=True, choices=["present", "absent"], type="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['target_group_arn', 'target_group_name']], + mutually_exclusive=[["target_group_arn", "target_group_name"]], ) try: - connection = module.client('elbv2') + connection = module.client("elbv2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") state = module.params.get("state") - if state == 'present': + if state == "present": register_target(connection, module) else: deregister_target(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_target_group.py b/elb_target_group.py index 16cafc958e3..bbab1507d2d 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -459,45 +459,52 @@ def get_tg_attributes(connection, module, tg_arn): try: _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True) - tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes']) + tg_attributes = boto3_tag_list_to_ansible_dict(_attributes["Attributes"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansible friendly - return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items()) + return dict((k.replace(".", "_"), v) for k, v in tg_attributes.items()) def get_target_group_tags(connection, module, target_group_arn): try: _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True) - return _tags['TagDescriptions'][0]['Tags'] + return _tags["TagDescriptions"][0]["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group tags") def get_target_group(connection, module, retry_missing=False): - extra_codes = ['TargetGroupNotFound'] if retry_missing else [] + extra_codes = ["TargetGroupNotFound"] if retry_missing else [] try: - target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")]) + target_group_paginator = connection.get_paginator("describe_target_groups").paginate( + Names=[module.params.get("name")] + ) jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes) result = jittered_retry(target_group_paginator.build_full_result)() - except is_boto3_error_code('TargetGroupNotFound'): + except is_boto3_error_code("TargetGroupNotFound"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get target group") - return result['TargetGroups'][0] + return result["TargetGroups"][0] def wait_for_status(connection, module, target_group_arn, targets, status): polling_increment_secs = 5 - max_retries = (module.params.get('wait_timeout') // polling_increment_secs) + max_retries = module.params.get("wait_timeout") // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: - response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True) - if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status: + response = connection.describe_target_health( + TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True + ) + if response["TargetHealthDescriptions"][0]["TargetHealth"]["State"] == status: status_achieved = True break else: @@ -527,172 +534,204 @@ def create_or_update_attributes(connection, module, target_group, new_target_gro update_attributes = [] # Get current attributes - current_tg_attributes = get_tg_attributes(connection, module, target_group['TargetGroupArn']) + current_tg_attributes = get_tg_attributes(connection, module, target_group["TargetGroupArn"]) if deregistration_delay_timeout is not None: - if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: - update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if str(deregistration_delay_timeout) != current_tg_attributes["deregistration_delay_timeout_seconds"]: + update_attributes.append( + {"Key": "deregistration_delay.timeout_seconds", "Value": str(deregistration_delay_timeout)} + ) if deregistration_connection_termination is not None: - if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": - update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) + if ( + deregistration_connection_termination + and current_tg_attributes.get("deregistration_delay_connection_termination_enabled") != "true" + ): + update_attributes.append({"Key": "deregistration_delay.connection_termination.enabled", "Value": "true"}) if stickiness_enabled is not None: - if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": - update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) + if stickiness_enabled and current_tg_attributes["stickiness_enabled"] != "true": + update_attributes.append({"Key": "stickiness.enabled", "Value": "true"}) if stickiness_lb_cookie_duration is not None: - if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)}) + if str(stickiness_lb_cookie_duration) != current_tg_attributes["stickiness_lb_cookie_duration_seconds"]: + update_attributes.append( + {"Key": "stickiness.lb_cookie.duration_seconds", "Value": str(stickiness_lb_cookie_duration)} + ) if stickiness_type is not None: - if stickiness_type != current_tg_attributes.get('stickiness_type'): - update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type}) + if stickiness_type != current_tg_attributes.get("stickiness_type"): + update_attributes.append({"Key": "stickiness.type", "Value": stickiness_type}) if stickiness_app_cookie_name is not None: - if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'): - update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)}) + if stickiness_app_cookie_name != current_tg_attributes.get("stickiness_app_cookie_name"): + update_attributes.append( + {"Key": "stickiness.app_cookie.cookie_name", "Value": str(stickiness_app_cookie_name)} + ) if stickiness_app_cookie_duration is not None: - if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']: - update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)}) + if str(stickiness_app_cookie_duration) != current_tg_attributes["stickiness_app_cookie_duration_seconds"]: + update_attributes.append( + {"Key": "stickiness.app_cookie.duration_seconds", "Value": str(stickiness_app_cookie_duration)} + ) if preserve_client_ip_enabled is not None: - if target_type not in ('udp', 'tcp_udp'): - if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'): - update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()}) + if target_type not in ("udp", "tcp_udp"): + if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get("preserve_client_ip_enabled"): + update_attributes.append( + {"Key": "preserve_client_ip.enabled", "Value": str(preserve_client_ip_enabled).lower()} + ) if proxy_protocol_v2_enabled is not None: - if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'): - update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()}) + if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get("proxy_protocol_v2_enabled"): + update_attributes.append( + {"Key": "proxy_protocol_v2.enabled", "Value": str(proxy_protocol_v2_enabled).lower()} + ) if load_balancing_algorithm_type is not None: - if str(load_balancing_algorithm_type) != current_tg_attributes['load_balancing_algorithm_type']: - update_attributes.append({'Key': 'load_balancing.algorithm.type', 'Value': str(load_balancing_algorithm_type)}) + if str(load_balancing_algorithm_type) != current_tg_attributes["load_balancing_algorithm_type"]: + update_attributes.append( + {"Key": "load_balancing.algorithm.type", "Value": str(load_balancing_algorithm_type)} + ) if update_attributes: try: - connection.modify_target_group_attributes(TargetGroupArn=target_group['TargetGroupArn'], Attributes=update_attributes, aws_retry=True) + connection.modify_target_group_attributes( + TargetGroupArn=target_group["TargetGroupArn"], Attributes=update_attributes, aws_retry=True + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state if new_target_group: - connection.delete_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + connection.delete_target_group(TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True) module.fail_json_aws(e, msg="Couldn't delete target group") return changed def create_or_update_target_group(connection, module): - changed = False new_target_group = False params = dict() target_type = module.params.get("target_type") - params['Name'] = module.params.get("name") - params['TargetType'] = target_type + params["Name"] = module.params.get("name") + params["TargetType"] = target_type if target_type != "lambda": - params['Protocol'] = module.params.get("protocol").upper() - if module.params.get('protocol_version') is not None: - params['ProtocolVersion'] = module.params.get('protocol_version') - params['Port'] = module.params.get("port") - params['VpcId'] = module.params.get("vpc_id") + params["Protocol"] = module.params.get("protocol").upper() + if module.params.get("protocol_version") is not None: + params["ProtocolVersion"] = module.params.get("protocol_version") + params["Port"] = module.params.get("port") + params["VpcId"] = module.params.get("vpc_id") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") health_option_keys = [ - "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout", - "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes" + "health_check_path", + "health_check_protocol", + "health_check_interval", + "health_check_timeout", + "healthy_threshold_count", + "unhealthy_threshold_count", + "successful_response_codes", ] health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys) # Set health check if anything set if health_options: - if module.params.get("health_check_protocol") is not None: - params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper() + params["HealthCheckProtocol"] = module.params.get("health_check_protocol").upper() if module.params.get("health_check_port") is not None: - params['HealthCheckPort'] = module.params.get("health_check_port") + params["HealthCheckPort"] = module.params.get("health_check_port") if module.params.get("health_check_interval") is not None: - params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval") + params["HealthCheckIntervalSeconds"] = module.params.get("health_check_interval") if module.params.get("health_check_timeout") is not None: - params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout") + params["HealthCheckTimeoutSeconds"] = module.params.get("health_check_timeout") if module.params.get("healthy_threshold_count") is not None: - params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count") + params["HealthyThresholdCount"] = module.params.get("healthy_threshold_count") if module.params.get("unhealthy_threshold_count") is not None: - params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count") + params["UnhealthyThresholdCount"] = module.params.get("unhealthy_threshold_count") # Only need to check response code and path for http(s) health checks protocol = module.params.get("health_check_protocol") - if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']: - + if protocol is not None and protocol.upper() in ["HTTP", "HTTPS"]: if module.params.get("health_check_path") is not None: - params['HealthCheckPath'] = module.params.get("health_check_path") + params["HealthCheckPath"] = module.params.get("health_check_path") if module.params.get("successful_response_codes") is not None: - params['Matcher'] = {} - code_key = 'HttpCode' - protocol_version = module.params.get('protocol_version') + params["Matcher"] = {} + code_key = "HttpCode" + protocol_version = module.params.get("protocol_version") if protocol_version is not None and protocol_version.upper() == "GRPC": - code_key = 'GrpcCode' - params['Matcher'][code_key] = module.params.get("successful_response_codes") + code_key = "GrpcCode" + params["Matcher"][code_key] = module.params.get("successful_response_codes") # Get target group target_group = get_target_group(connection, module) if target_group: - diffs = [param for param in ('Port', 'Protocol', 'VpcId') - if target_group.get(param) != params.get(param)] + diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)] if diffs: - module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % - ", ".join(diffs)) + module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % ", ".join(diffs)) # Target group exists so check health check parameters match what has been passed health_check_params = dict() # Modify health check if anything set if health_options: - # Health check protocol - if 'HealthCheckProtocol' in params and target_group['HealthCheckProtocol'] != params['HealthCheckProtocol']: - health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol'] + if "HealthCheckProtocol" in params and target_group["HealthCheckProtocol"] != params["HealthCheckProtocol"]: + health_check_params["HealthCheckProtocol"] = params["HealthCheckProtocol"] # Health check port - if 'HealthCheckPort' in params and target_group['HealthCheckPort'] != params['HealthCheckPort']: - health_check_params['HealthCheckPort'] = params['HealthCheckPort'] + if "HealthCheckPort" in params and target_group["HealthCheckPort"] != params["HealthCheckPort"]: + health_check_params["HealthCheckPort"] = params["HealthCheckPort"] # Health check interval - if 'HealthCheckIntervalSeconds' in params and target_group['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']: - health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds'] + if ( + "HealthCheckIntervalSeconds" in params + and target_group["HealthCheckIntervalSeconds"] != params["HealthCheckIntervalSeconds"] + ): + health_check_params["HealthCheckIntervalSeconds"] = params["HealthCheckIntervalSeconds"] # Health check timeout - if 'HealthCheckTimeoutSeconds' in params and target_group['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']: - health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds'] + if ( + "HealthCheckTimeoutSeconds" in params + and target_group["HealthCheckTimeoutSeconds"] != params["HealthCheckTimeoutSeconds"] + ): + health_check_params["HealthCheckTimeoutSeconds"] = params["HealthCheckTimeoutSeconds"] # Healthy threshold - if 'HealthyThresholdCount' in params and target_group['HealthyThresholdCount'] != params['HealthyThresholdCount']: - health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount'] + if ( + "HealthyThresholdCount" in params + and target_group["HealthyThresholdCount"] != params["HealthyThresholdCount"] + ): + health_check_params["HealthyThresholdCount"] = params["HealthyThresholdCount"] # Unhealthy threshold - if 'UnhealthyThresholdCount' in params and target_group['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']: - health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount'] + if ( + "UnhealthyThresholdCount" in params + and target_group["UnhealthyThresholdCount"] != params["UnhealthyThresholdCount"] + ): + health_check_params["UnhealthyThresholdCount"] = params["UnhealthyThresholdCount"] # Only need to check response code and path for http(s) health checks - if target_group['HealthCheckProtocol'] in ['HTTP', 'HTTPS']: + if target_group["HealthCheckProtocol"] in ["HTTP", "HTTPS"]: # Health check path - if 'HealthCheckPath' in params and target_group['HealthCheckPath'] != params['HealthCheckPath']: - health_check_params['HealthCheckPath'] = params['HealthCheckPath'] + if "HealthCheckPath" in params and target_group["HealthCheckPath"] != params["HealthCheckPath"]: + health_check_params["HealthCheckPath"] = params["HealthCheckPath"] # Matcher (successful response codes) # TODO: required and here? - if 'Matcher' in params: - code_key = 'HttpCode' - if target_group['ProtocolVersion'] == 'GRPC': - code_key = 'GrpcCode' - current_matcher_list = target_group['Matcher'][code_key].split(',') - requested_matcher_list = params['Matcher'][code_key].split(',') + if "Matcher" in params: + code_key = "HttpCode" + if target_group["ProtocolVersion"] == "GRPC": + code_key = "GrpcCode" + current_matcher_list = target_group["Matcher"][code_key].split(",") + requested_matcher_list = params["Matcher"][code_key].split(",") if set(current_matcher_list) != set(requested_matcher_list): - health_check_params['Matcher'] = {} - health_check_params['Matcher'][code_key] = ','.join(requested_matcher_list) + health_check_params["Matcher"] = {} + health_check_params["Matcher"][code_key] = ",".join(requested_matcher_list) try: if health_check_params: - connection.modify_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True, **health_check_params) + connection.modify_target_group( + TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True, **health_check_params + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update target group") @@ -703,27 +742,27 @@ def create_or_update_target_group(connection, module): # describe_target_health seems to be the only way to get them try: current_targets = connection.describe_target_health( - TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True) + TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group health") if module.params.get("targets"): - if target_type != "lambda": - params['Targets'] = module.params.get("targets") + params["Targets"] = module.params.get("targets") # Correct type of target ports - for target in params['Targets']: - target['Port'] = int(target.get('Port', module.params.get('port'))) + for target in params["Targets"]: + target["Port"] = int(target.get("Port", module.params.get("port"))) current_instance_ids = [] - for instance in current_targets['TargetHealthDescriptions']: - current_instance_ids.append(instance['Target']['Id']) + for instance in current_targets["TargetHealthDescriptions"]: + current_instance_ids.append(instance["Target"]["Id"]) new_instance_ids = [] - for instance in params['Targets']: - new_instance_ids.append(instance['Id']) + for instance in params["Targets"]: + new_instance_ids.append(instance["Id"]) add_instances = set(new_instance_ids) - set(current_instance_ids) @@ -738,37 +777,49 @@ def create_or_update_target_group(connection, module): changed = True try: - connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_add, aws_retry=True) + connection.register_targets( + TargetGroupArn=target_group["TargetGroupArn"], Targets=instances_to_add, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_add, 'healthy') + connection, module, target_group["TargetGroupArn"], instances_to_add, "healthy" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target registration to be healthy - please check the AWS console') + msg="Error waiting for target registration to be healthy - please check the AWS console" + ) remove_instances = set(current_instance_ids) - set(new_instance_ids) if remove_instances: instances_to_remove = [] - for target in current_targets['TargetHealthDescriptions']: - if target['Target']['Id'] in remove_instances: - instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + for target in current_targets["TargetHealthDescriptions"]: + if target["Target"]["Id"] in remove_instances: + instances_to_remove.append( + {"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]} + ) changed = True try: - connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets( + TargetGroupArn=target_group["TargetGroupArn"], + Targets=instances_to_remove, + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') + connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target deregistration - please check the AWS console') + msg="Error waiting for target deregistration - please check the AWS console" + ) # register lambda target else: @@ -786,40 +837,40 @@ def create_or_update_target_group(connection, module): if changed: if target.get("Id"): response = connection.register_targets( - TargetGroupArn=target_group['TargetGroupArn'], - Targets=[ - { - "Id": target['Id'] - } - ], - aws_retry=True + TargetGroupArn=target_group["TargetGroupArn"], + Targets=[{"Id": target["Id"]}], + aws_retry=True, ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't register targets") + module.fail_json_aws(e, msg="Couldn't register targets") else: if target_type != "lambda": - - current_instances = current_targets['TargetHealthDescriptions'] + current_instances = current_targets["TargetHealthDescriptions"] if current_instances: instances_to_remove = [] - for target in current_targets['TargetHealthDescriptions']: - instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']}) + for target in current_targets["TargetHealthDescriptions"]: + instances_to_remove.append({"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]}) changed = True try: - connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True) + connection.deregister_targets( + TargetGroupArn=target_group["TargetGroupArn"], + Targets=instances_to_remove, + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove targets") if module.params.get("wait"): status_achieved, registered_instances = wait_for_status( - connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused') + connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" + ) if not status_achieved: module.fail_json( - msg='Error waiting for target deregistration - please check the AWS console') + msg="Error waiting for target deregistration - please check the AWS console" + ) # remove lambda targets else: @@ -830,7 +881,10 @@ def create_or_update_target_group(connection, module): target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] if changed: connection.deregister_targets( - TargetGroupArn=target_group['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True) + TargetGroupArn=target_group["TargetGroupArn"], + Targets=[{"Id": target_to_remove}], + aws_retry=True, + ) else: try: connection.create_target_group(aws_retry=True, **params) @@ -843,33 +897,32 @@ def create_or_update_target_group(connection, module): if module.params.get("targets"): if target_type != "lambda": - params['Targets'] = module.params.get("targets") + params["Targets"] = module.params.get("targets") try: - connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=params['Targets'], aws_retry=True) + connection.register_targets( + TargetGroupArn=target_group["TargetGroupArn"], Targets=params["Targets"], aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't register targets") if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status(connection, module, target_group['TargetGroupArn'], params['Targets'], 'healthy') + status_achieved, registered_instances = wait_for_status( + connection, module, target_group["TargetGroupArn"], params["Targets"], "healthy" + ) if not status_achieved: - module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console') + module.fail_json( + msg="Error waiting for target registration to be healthy - please check the AWS console" + ) else: try: target = module.params.get("targets")[0] response = connection.register_targets( - TargetGroupArn=target_group['TargetGroupArn'], - Targets=[ - { - "Id": target["Id"] - } - ], - aws_retry=True + TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't register targets") + module.fail_json_aws(e, msg="Couldn't register targets") attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group) @@ -879,13 +932,17 @@ def create_or_update_target_group(connection, module): # Tags - only need to play with tags if tags parameter has been set to something if tags is not None: # Get tags - current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn']) + current_tags = get_target_group_tags(connection, module, target_group["TargetGroupArn"]) # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags + ) if tags_to_delete: try: - connection.remove_tags(ResourceArns=[target_group['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True) + connection.remove_tags( + ResourceArns=[target_group["TargetGroupArn"]], TagKeys=tags_to_delete, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags from target group") changed = True @@ -893,7 +950,11 @@ def create_or_update_target_group(connection, module): # Add/update tags if tags_need_modify: try: - connection.add_tags(ResourceArns=[target_group['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True) + connection.add_tags( + ResourceArns=[target_group["TargetGroupArn"]], + Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), + aws_retry=True, + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to target group") changed = True @@ -902,12 +963,14 @@ def create_or_update_target_group(connection, module): target_group = get_target_group(connection, module) # Get the target group attributes again - target_group.update(get_tg_attributes(connection, module, target_group['TargetGroupArn'])) + target_group.update(get_tg_attributes(connection, module, target_group["TargetGroupArn"])) # Convert target_group to snake_case snaked_tg = camel_dict_to_snake_dict(target_group) - snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, target_group['TargetGroupArn'])) + snaked_tg["tags"] = boto3_tag_list_to_ansible_dict( + get_target_group_tags(connection, module, target_group["TargetGroupArn"]) + ) module.exit_json(changed=changed, **snaked_tg) @@ -918,7 +981,7 @@ def delete_target_group(connection, module): if tg: try: - connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True) + connection.delete_target_group(TargetGroupArn=tg["TargetGroupArn"], aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete target group") @@ -927,66 +990,69 @@ def delete_target_group(connection, module): def main(): - protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', - 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] + protocols_list = ["http", "https", "tcp", "tls", "udp", "tcp_udp", "HTTP", "HTTPS", "TCP", "TLS", "UDP", "TCP_UDP"] argument_spec = dict( - deregistration_delay_timeout=dict(type='int'), - deregistration_connection_termination=dict(type='bool', default=False), + deregistration_delay_timeout=dict(type="int"), + deregistration_connection_termination=dict(type="bool", default=False), health_check_protocol=dict(choices=protocols_list), health_check_port=dict(), health_check_path=dict(), - health_check_interval=dict(type='int'), - health_check_timeout=dict(type='int'), - healthy_threshold_count=dict(type='int'), - modify_targets=dict(default=True, type='bool'), + health_check_interval=dict(type="int"), + health_check_timeout=dict(type="int"), + healthy_threshold_count=dict(type="int"), + modify_targets=dict(default=True, type="bool"), name=dict(required=True), - port=dict(type='int'), + port=dict(type="int"), protocol=dict(choices=protocols_list), - protocol_version=dict(type='str', choices=['GRPC', 'HTTP1', 'HTTP2']), - purge_tags=dict(default=True, type='bool'), - stickiness_enabled=dict(type='bool'), + protocol_version=dict(type="str", choices=["GRPC", "HTTP1", "HTTP2"]), + purge_tags=dict(default=True, type="bool"), + stickiness_enabled=dict(type="bool"), stickiness_type=dict(), - stickiness_lb_cookie_duration=dict(type='int'), - stickiness_app_cookie_duration=dict(type='int'), + stickiness_lb_cookie_duration=dict(type="int"), + stickiness_app_cookie_duration=dict(type="int"), stickiness_app_cookie_name=dict(), - load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']), - state=dict(required=True, choices=['present', 'absent']), + load_balancing_algorithm_type=dict(type="str", choices=["round_robin", "least_outstanding_requests"]), + state=dict(required=True, choices=["present", "absent"]), successful_response_codes=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']), - targets=dict(type='list', elements='dict'), - unhealthy_threshold_count=dict(type='int'), + tags=dict(type="dict", aliases=["resource_tags"]), + target_type=dict(choices=["instance", "ip", "lambda", "alb"]), + targets=dict(type="list", elements="dict"), + unhealthy_threshold_count=dict(type="int"), vpc_id=dict(), - preserve_client_ip_enabled=dict(type='bool'), - proxy_protocol_v2_enabled=dict(type='bool'), - wait_timeout=dict(type='int', default=200), - wait=dict(type='bool', default=False) + preserve_client_ip_enabled=dict(type="bool"), + proxy_protocol_v2_enabled=dict(type="bool"), + wait_timeout=dict(type="int", default=200), + wait=dict(type="bool", default=False), ) required_by = dict( - health_check_path=['health_check_protocol'], - successful_response_codes=['health_check_protocol'], + health_check_path=["health_check_protocol"], + successful_response_codes=["health_check_protocol"], ) required_if = [ - ['target_type', 'instance', ['protocol', 'port', 'vpc_id']], - ['target_type', 'ip', ['protocol', 'port', 'vpc_id']], - ['target_type', 'alb', ['protocol', 'port', 'vpc_id']], + ["target_type", "instance", ["protocol", "port", "vpc_id"]], + ["target_type", "ip", ["protocol", "port", "vpc_id"]], + ["target_type", "alb", ["protocol", "port", "vpc_id"]], ] module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if) - if module.params.get('target_type') is None: - module.params['target_type'] = 'instance' + if module.params.get("target_type") is None: + module.params["target_type"] = "instance" - connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - if module.params.get('state') == 'present': - if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None): - module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination") + if module.params.get("state") == "present": + if module.params.get("protocol") in ["http", "https", "HTTP", "HTTPS"] and module.params.get( + "deregistration_connection_termination", None + ): + module.fail_json( + msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination" + ) create_or_update_target_group(connection, module) else: delete_target_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_target_group_info.py b/elb_target_group_info.py index d58c2f248f5..bf02db21f15 100644 --- a/elb_target_group_info.py +++ b/elb_target_group_info.py @@ -220,40 +220,39 @@ @AWSRetry.jittered_backoff(retries=10) def get_paginator(**kwargs): - paginator = client.get_paginator('describe_target_groups') + paginator = client.get_paginator("describe_target_groups") return paginator.paginate(**kwargs).build_full_result() def get_target_group_attributes(target_group_arn): - try: - target_group_attributes = boto3_tag_list_to_ansible_dict(client.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes']) + target_group_attributes = boto3_tag_list_to_ansible_dict( + client.describe_target_group_attributes(TargetGroupArn=target_group_arn)["Attributes"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe target group attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley - return dict((k.replace('.', '_'), v) - for (k, v) in target_group_attributes.items()) + return dict((k.replace(".", "_"), v) for (k, v) in target_group_attributes.items()) def get_target_group_tags(target_group_arn): - try: - return boto3_tag_list_to_ansible_dict(client.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']) + return boto3_tag_list_to_ansible_dict( + client.describe_tags(ResourceArns=[target_group_arn])["TagDescriptions"][0]["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe group tags") def get_target_group_targets_health(target_group_arn): - try: - return client.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions'] + return client.describe_target_health(TargetGroupArn=target_group_arn)["TargetHealthDescriptions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get target health") def list_target_groups(): - load_balancer_arn = module.params.get("load_balancer_arn") target_group_arns = module.params.get("target_group_arns") names = module.params.get("names") @@ -268,24 +267,29 @@ def list_target_groups(): target_groups = get_paginator(TargetGroupArns=target_group_arns) if names: target_groups = get_paginator(Names=names) - except is_boto3_error_code('TargetGroupNotFound'): + except is_boto3_error_code("TargetGroupNotFound"): module.exit_json(target_groups=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list target groups") # Get the attributes and tags for each target group - for target_group in target_groups['TargetGroups']: - target_group.update(get_target_group_attributes(target_group['TargetGroupArn'])) + for target_group in target_groups["TargetGroups"]: + target_group.update(get_target_group_attributes(target_group["TargetGroupArn"])) # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']] + snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups["TargetGroups"]] # Get tags for each target group for snaked_target_group in snaked_target_groups: - snaked_target_group['tags'] = get_target_group_tags(snaked_target_group['target_group_arn']) + snaked_target_group["tags"] = get_target_group_tags(snaked_target_group["target_group_arn"]) if collect_targets_health: - snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict( - target) for target in get_target_group_targets_health(snaked_target_group['target_group_arn'])] + snaked_target_group["targets_health_description"] = [ + camel_dict_to_snake_dict(target) + for target in get_target_group_targets_health(snaked_target_group["target_group_arn"]) + ] module.exit_json(target_groups=snaked_target_groups) @@ -295,25 +299,25 @@ def main(): global client argument_spec = dict( - load_balancer_arn=dict(type='str'), - target_group_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str'), - collect_targets_health=dict(default=False, type='bool', required=False), + load_balancer_arn=dict(type="str"), + target_group_arns=dict(type="list", elements="str"), + names=dict(type="list", elements="str"), + collect_targets_health=dict(default=False, type="bool", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']], + mutually_exclusive=[["load_balancer_arn", "target_group_arns", "names"]], supports_check_mode=True, ) try: - client = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + client = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_target_groups() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/elb_target_info.py b/elb_target_info.py index 393e290e51b..e318f6c5b65 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -220,6 +220,7 @@ class Target(object): """Models a target in a target group""" + def __init__(self, target_id, port, az, raw_target_health): self.target_port = port self.target_id = target_id @@ -240,10 +241,7 @@ def __init__(self, **kwargs): self.targets = [] def add_target(self, target_id, target_port, target_az, raw_target_health): - self.targets.append(Target(target_id, - target_port, - target_az, - raw_target_health)) + self.targets.append(Target(target_id, target_port, target_az, raw_target_health)) def to_dict(self): object_dict = vars(self) @@ -255,28 +253,17 @@ def get_targets(self): class TargetInfoGatherer(object): - def __init__(self, module, instance_id, get_unused_target_groups): self.module = module try: - self.ec2 = self.module.client( - "ec2", - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + self.ec2 = self.module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, - msg="Couldn't connect to ec2" - ) + self.module.fail_json_aws(e, msg="Couldn't connect to ec2") try: - self.elbv2 = self.module.client( - "elbv2", - retry_decorator=AWSRetry.jittered_backoff(retries=10) - ) + self.elbv2 = self.module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not connect to elbv2" - ) + self.module.fail_json_aws(e, msg="Could not connect to elbv2") self.instance_id = instance_id self.get_unused_target_groups = get_unused_target_groups @@ -284,25 +271,19 @@ def __init__(self, module, instance_id, get_unused_target_groups): def _get_instance_ips(self): """Fetch all IPs associated with this instance so that we can determine - whether or not an instance is in an IP-based target group""" + whether or not an instance is in an IP-based target group""" try: # get ahold of the instance in the API - reservations = self.ec2.describe_instances( - InstanceIds=[self.instance_id], - aws_retry=True - )["Reservations"] + reservations = self.ec2.describe_instances(InstanceIds=[self.instance_id], aws_retry=True)["Reservations"] except (BotoCoreError, ClientError) as e: # typically this will happen if the instance doesn't exist - self.module.fail_json_aws(e, - msg="Could not get instance info" + - " for instance '%s'" % - (self.instance_id) - ) + self.module.fail_json_aws( + e, + msg="Could not get instance info for instance '%s'" % (self.instance_id), + ) if len(reservations) < 1: - self.module.fail_json( - msg="Instance ID %s could not be found" % self.instance_id - ) + self.module.fail_json(msg="Instance ID %s could not be found" % self.instance_id) instance = reservations[0]["Instances"][0] @@ -319,38 +300,36 @@ def _get_instance_ips(self): def _get_target_group_objects(self): """helper function to build a list of TargetGroup objects based on - the AWS API""" + the AWS API""" try: - paginator = self.elbv2.get_paginator( - "describe_target_groups" - ) + paginator = self.elbv2.get_paginator("describe_target_groups") tg_response = paginator.paginate().build_full_result() except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not describe target" + - " groups" - ) + self.module.fail_json_aws( + e, + msg="Could not describe target groups", + ) # build list of TargetGroup objects representing every target group in # the system target_groups = [] for each_tg in tg_response["TargetGroups"]: - if not self.get_unused_target_groups and \ - len(each_tg["LoadBalancerArns"]) < 1: + if not self.get_unused_target_groups and len(each_tg["LoadBalancerArns"]) < 1: # only collect target groups that actually are connected # to LBs continue target_groups.append( - TargetGroup(target_group_arn=each_tg["TargetGroupArn"], - target_group_type=each_tg["TargetType"], - ) + TargetGroup( + target_group_arn=each_tg["TargetGroupArn"], + target_group_type=each_tg["TargetType"], + ) ) return target_groups def _get_target_descriptions(self, target_groups): """Helper function to build a list of all the target descriptions - for this target in a target group""" + for this target in a target group""" # Build a list of all the target groups pointing to this instance # based on the previous list tgs = set() @@ -358,37 +337,25 @@ def _get_target_descriptions(self, target_groups): for tg in target_groups: try: # Get the list of targets for that target group - response = self.elbv2.describe_target_health( - TargetGroupArn=tg.target_group_arn, - aws_retry=True - ) + response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True) except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, - msg="Could not describe target " + - "health for target group %s" % - tg.target_group_arn - ) + self.module.fail_json_aws( + e, msg="Could not describe target " + "health for target group %s" % tg.target_group_arn + ) for t in response["TargetHealthDescriptions"]: # If the target group has this instance as a target, add to # list. This logic also accounts for the possibility of a # target being in the target group multiple times with # overridden ports - if t["Target"]["Id"] == self.instance_id or \ - t["Target"]["Id"] in self.instance_ips: - + if t["Target"]["Id"] == self.instance_id or t["Target"]["Id"] in self.instance_ips: # The 'AvailabilityZone' parameter is a weird one, see the # API docs for more. Basically it's only supposed to be # there under very specific circumstances, so we need # to account for that - az = t["Target"]["AvailabilityZone"] \ - if "AvailabilityZone" in t["Target"] \ - else None - - tg.add_target(t["Target"]["Id"], - t["Target"]["Port"], - az, - t["TargetHealth"]) + az = t["Target"]["AvailabilityZone"] if "AvailabilityZone" in t["Target"] else None + + tg.add_target(t["Target"]["Id"], t["Target"]["Port"], az, t["TargetHealth"]) # since tgs is a set, each target group will be added only # once, even though we call add on each successful match tgs.add(tg) @@ -406,8 +373,7 @@ def _get_target_groups(self): def main(): argument_spec = dict( instance_id={"required": True, "type": "str"}, - get_unused_target_groups={"required": False, - "default": True, "type": "bool"} + get_unused_target_groups={"required": False, "default": True, "type": "bool"}, ) module = AnsibleAWSModule( @@ -418,10 +384,7 @@ def main(): instance_id = module.params["instance_id"] get_unused_target_groups = module.params["get_unused_target_groups"] - tg_gatherer = TargetInfoGatherer(module, - instance_id, - get_unused_target_groups - ) + tg_gatherer = TargetInfoGatherer(module, instance_id, get_unused_target_groups) instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] diff --git a/glue_connection.py b/glue_connection.py index e9a6b306dac..b1c935929f8 100644 --- a/glue_connection.py +++ b/glue_connection.py @@ -186,13 +186,13 @@ def _get_glue_connection(connection, module): connection_name = module.params.get("name") connection_catalog_id = module.params.get("catalog_id") - params = {'Name': connection_name} + params = {"Name": connection_name} if connection_catalog_id is not None: - params['CatalogId'] = connection_catalog_id + params["CatalogId"] = connection_catalog_id try: - return connection.get_connection(aws_retry=True, **params)['Connection'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_connection(aws_retry=True, **params)["Connection"] + except is_boto3_error_code("EntityNotFoundException"): return None @@ -208,37 +208,50 @@ def _compare_glue_connection_params(user_params, current_params): # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value - if 'Description' not in current_params: - current_params['Description'] = "" - if 'MatchCriteria' not in current_params: - current_params['MatchCriteria'] = list() - if 'PhysicalConnectionRequirements' not in current_params: - current_params['PhysicalConnectionRequirements'] = dict() - current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = [] - current_params['PhysicalConnectionRequirements']['SubnetId'] = "" - - if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \ - != current_params['ConnectionProperties']: + if "Description" not in current_params: + current_params["Description"] = "" + if "MatchCriteria" not in current_params: + current_params["MatchCriteria"] = list() + if "PhysicalConnectionRequirements" not in current_params: + current_params["PhysicalConnectionRequirements"] = dict() + current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = [] + current_params["PhysicalConnectionRequirements"]["SubnetId"] = "" + + if ( + "ConnectionProperties" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["ConnectionProperties"] != current_params["ConnectionProperties"] + ): return True - if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \ - != current_params['ConnectionType']: + if ( + "ConnectionType" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["ConnectionType"] != current_params["ConnectionType"] + ): return True - if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']: + if ( + "Description" in user_params["ConnectionInput"] + and user_params["ConnectionInput"]["Description"] != current_params["Description"] + ): return True - if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']): + if "MatchCriteria" in user_params["ConnectionInput"] and set( + user_params["ConnectionInput"]["MatchCriteria"] + ) != set(current_params["MatchCriteria"]): return True - if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']: - if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \ - != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']): + if "PhysicalConnectionRequirements" in user_params["ConnectionInput"]: + if "SecurityGroupIdList" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] and set( + user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] + ) != set(current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"]): return True - if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \ - != current_params['PhysicalConnectionRequirements']['SubnetId']: + if ( + "SubnetId" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] + and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] + != current_params["PhysicalConnectionRequirements"]["SubnetId"] + ): return True - if 'AvailabilityZone' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \ - user_params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] \ - != current_params['PhysicalConnectionRequirements']['AvailabilityZone']: + if ( + "AvailabilityZone" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] + and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] + != current_params["PhysicalConnectionRequirements"]["AvailabilityZone"] + ): return True return False @@ -252,11 +265,11 @@ def _await_glue_connection(connection, module): while wait_timeout > time.time(): glue_connection = _get_glue_connection(connection, module) - if glue_connection and glue_connection.get('Name'): + if glue_connection and glue_connection.get("Name"): return glue_connection time.sleep(check_interval) - module.fail_json(msg='Timeout waiting for Glue connection %s' % module.params.get('name')) + module.fail_json(msg="Timeout waiting for Glue connection %s" % module.params.get("name")) def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): @@ -271,26 +284,30 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co changed = False params = dict() - params['ConnectionInput'] = dict() - params['ConnectionInput']['Name'] = module.params.get("name") - params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type") - params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties") + params["ConnectionInput"] = dict() + params["ConnectionInput"]["Name"] = module.params.get("name") + params["ConnectionInput"]["ConnectionType"] = module.params.get("connection_type") + params["ConnectionInput"]["ConnectionProperties"] = module.params.get("connection_properties") if module.params.get("catalog_id") is not None: - params['CatalogId'] = module.params.get("catalog_id") + params["CatalogId"] = module.params.get("catalog_id") if module.params.get("description") is not None: - params['ConnectionInput']['Description'] = module.params.get("description") + params["ConnectionInput"]["Description"] = module.params.get("description") if module.params.get("match_criteria") is not None: - params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria") + params["ConnectionInput"]["MatchCriteria"] = module.params.get("match_criteria") if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements'] = dict() + params["ConnectionInput"]["PhysicalConnectionRequirements"] = dict() if module.params.get("security_groups") is not None: # Get security group IDs from names - security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True) - params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids + security_group_ids = get_ec2_security_group_ids_from_names( + module.params.get("security_groups"), connection_ec2, boto3=True + ) + params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = security_group_ids if module.params.get("subnet_id") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id") + params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] = module.params.get("subnet_id") if module.params.get("availability_zone") is not None: - params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] = module.params.get("availability_zone") + params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] = module.params.get( + "availability_zone" + ) # If glue_connection is not None then check if it needs to be modified, else create it if glue_connection: @@ -298,7 +315,7 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co try: # We need to slightly modify the params for an update update_params = copy.deepcopy(params) - update_params['Name'] = update_params['ConnectionInput']['Name'] + update_params["Name"] = update_params["ConnectionInput"]["Name"] if not module.check_mode: connection.update_connection(aws_retry=True, **update_params) changed = True @@ -317,12 +334,17 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co glue_connection = _await_glue_connection(connection, module) if glue_connection: - module.deprecate("The 'connection_properties' return key is deprecated and will be replaced" - " by 'raw_connection_properties'. Both values are returned for now.", - date='2024-06-01', collection_name='community.aws') - glue_connection['RawConnectionProperties'] = glue_connection['ConnectionProperties'] + module.deprecate( + "The 'connection_properties' return key is deprecated and will be replaced" + " by 'raw_connection_properties'. Both values are returned for now.", + date="2024-06-01", + collection_name="community.aws", + ) + glue_connection["RawConnectionProperties"] = glue_connection["ConnectionProperties"] - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=['RawConnectionProperties'])) + module.exit_json( + changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=["RawConnectionProperties"]) + ) def delete_glue_connection(connection, module, glue_connection): @@ -336,9 +358,9 @@ def delete_glue_connection(connection, module, glue_connection): """ changed = False - params = {'ConnectionName': module.params.get("name")} + params = {"ConnectionName": module.params.get("name")} if module.params.get("catalog_id") is not None: - params['CatalogId'] = module.params.get("catalog_id") + params["CatalogId"] = module.params.get("catalog_id") if glue_connection: try: @@ -352,41 +374,41 @@ def delete_glue_connection(connection, module, glue_connection): def main(): - - argument_spec = ( - dict( - availability_zone=dict(type='str'), - catalog_id=dict(type='str'), - connection_properties=dict(type='dict'), - connection_type=dict(type='str', default='JDBC', choices=['CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK']), - description=dict(type='str'), - match_criteria=dict(type='list', elements='str'), - name=dict(required=True, type='str'), - security_groups=dict(type='list', elements='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - subnet_id=dict(type='str') - ) + argument_spec = dict( + availability_zone=dict(type="str"), + catalog_id=dict(type="str"), + connection_properties=dict(type="dict"), + connection_type=dict( + type="str", default="JDBC", choices=["CUSTOM", "JDBC", "KAFKA", "MARKETPLACE", "MONGODB", "NETWORK"] + ), + description=dict(type="str"), + match_criteria=dict(type="list", elements="str"), + name=dict(required=True, type="str"), + security_groups=dict(type="list", elements="str"), + state=dict(required=True, choices=["present", "absent"], type="str"), + subnet_id=dict(type="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['connection_properties']), - ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ("state", "present", ["connection_properties"]), + ("connection_type", "NETWORK", ["availability_zone", "security_groups", "subnet_id"]), + ], + supports_check_mode=True, + ) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection_glue = module.client('glue', retry_decorator=retry_decorator) - connection_ec2 = module.client('ec2', retry_decorator=retry_decorator) + connection_glue = module.client("glue", retry_decorator=retry_decorator) + connection_ec2 = module.client("ec2", retry_decorator=retry_decorator) glue_connection = _get_glue_connection(connection_glue, module) - if module.params.get("state") == 'present': + if module.params.get("state") == "present": create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection) else: delete_glue_connection(connection_glue, module, glue_connection) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/glue_crawler.py b/glue_crawler.py index 04c6cd3eb52..0a8598b6c7a 100644 --- a/glue_crawler.py +++ b/glue_crawler.py @@ -215,14 +215,17 @@ def _get_glue_crawler(connection, module, glue_crawler_name): - ''' + """ Get an AWS Glue crawler based on name. If not found, return None. - ''' + """ try: - return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)['Crawler'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)["Crawler"] + except is_boto3_error_code("EntityNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -238,39 +241,58 @@ def _trim_target(target): if not target: return None retval = target.copy() - if not retval.get('Exclusions', None): - retval.pop('Exclusions', None) + if not retval.get("Exclusions", None): + retval.pop("Exclusions", None) return retval def _compare_glue_crawler_params(user_params, current_params): - ''' + """ Compare Glue crawler params. If there is a difference, return True immediately else return False - ''' - if 'DatabaseName' in user_params and user_params['DatabaseName'] != current_params['DatabaseName']: + """ + if "DatabaseName" in user_params and user_params["DatabaseName"] != current_params["DatabaseName"]: return True - if 'Description' in user_params and user_params['Description'] != current_params['Description']: + if "Description" in user_params and user_params["Description"] != current_params["Description"]: return True - if 'RecrawlPolicy' in user_params and user_params['RecrawlPolicy'] != current_params['RecrawlPolicy']: + if "RecrawlPolicy" in user_params and user_params["RecrawlPolicy"] != current_params["RecrawlPolicy"]: return True - if 'Role' in user_params and user_params['Role'] != current_params['Role']: + if "Role" in user_params and user_params["Role"] != current_params["Role"]: return True - if 'SchemaChangePolicy' in user_params and user_params['SchemaChangePolicy'] != current_params['SchemaChangePolicy']: + if ( + "SchemaChangePolicy" in user_params + and user_params["SchemaChangePolicy"] != current_params["SchemaChangePolicy"] + ): return True - if 'TablePrefix' in user_params and user_params['TablePrefix'] != current_params['TablePrefix']: + if "TablePrefix" in user_params and user_params["TablePrefix"] != current_params["TablePrefix"]: return True - if 'Targets' in user_params: - if 'S3Targets' in user_params['Targets']: - if _trim_targets(user_params['Targets']['S3Targets']) != _trim_targets(current_params['Targets']['S3Targets']): + if "Targets" in user_params: + if "S3Targets" in user_params["Targets"]: + if _trim_targets(user_params["Targets"]["S3Targets"]) != _trim_targets( + current_params["Targets"]["S3Targets"] + ): return True - if 'JdbcTargets' in user_params['Targets'] and user_params['Targets']['JdbcTargets'] != current_params['Targets']['JdbcTargets']: - if _trim_targets(user_params['Targets']['JdbcTargets']) != _trim_targets(current_params['Targets']['JdbcTargets']): + if ( + "JdbcTargets" in user_params["Targets"] + and user_params["Targets"]["JdbcTargets"] != current_params["Targets"]["JdbcTargets"] + ): + if _trim_targets(user_params["Targets"]["JdbcTargets"]) != _trim_targets( + current_params["Targets"]["JdbcTargets"] + ): return True - if 'MongoDBTargets' in user_params['Targets'] and user_params['Targets']['MongoDBTargets'] != current_params['Targets']['MongoDBTargets']: + if ( + "MongoDBTargets" in user_params["Targets"] + and user_params["Targets"]["MongoDBTargets"] != current_params["Targets"]["MongoDBTargets"] + ): return True - if 'DynamoDBTargets' in user_params['Targets'] and user_params['Targets']['DynamoDBTargets'] != current_params['Targets']['DynamoDBTargets']: + if ( + "DynamoDBTargets" in user_params["Targets"] + and user_params["Targets"]["DynamoDBTargets"] != current_params["Targets"]["DynamoDBTargets"] + ): return True - if 'CatalogTargets' in user_params['Targets'] and user_params['Targets']['CatalogTargets'] != current_params['Targets']['CatalogTargets']: + if ( + "CatalogTargets" in user_params["Targets"] + and user_params["Targets"]["CatalogTargets"] != current_params["Targets"]["CatalogTargets"] + ): return True return False @@ -279,21 +301,23 @@ def _compare_glue_crawler_params(user_params, current_params): def ensure_tags(connection, module, glue_crawler): changed = False - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False account_id, partition = get_aws_account_info(module) - arn = 'arn:{0}:glue:{1}:{2}:crawler/{3}'.format(partition, module.region, account_id, module.params.get('name')) + arn = "arn:{0}:glue:{1}:{2}:crawler/{3}".format(partition, module.region, account_id, module.params.get("name")) try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg='Unable to get tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to get tags for Glue crawler %s" % module.params.get("name")) - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + existing_tags, module.params.get("tags"), module.params.get("purge_tags") + ) if tags_to_remove: changed = True @@ -301,7 +325,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) if tags_to_add: changed = True @@ -309,35 +333,37 @@ def ensure_tags(connection, module, glue_crawler): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) return changed def create_or_update_glue_crawler(connection, module, glue_crawler): - ''' + """ Create or update an AWS Glue crawler - ''' + """ changed = False params = dict() - params['Name'] = module.params.get('name') - params['Role'] = module.params.get('role') - params['Targets'] = module.params.get('targets') - if module.params.get('database_name') is not None: - params['DatabaseName'] = module.params.get('database_name') - if module.params.get('description') is not None: - params['Description'] = module.params.get('description') - if module.params.get('recrawl_policy') is not None: - params['RecrawlPolicy'] = snake_dict_to_camel_dict(module.params.get('recrawl_policy'), capitalize_first=True) - if module.params.get('role') is not None: - params['Role'] = module.params.get('role') - if module.params.get('schema_change_policy') is not None: - params['SchemaChangePolicy'] = snake_dict_to_camel_dict(module.params.get('schema_change_policy'), capitalize_first=True) - if module.params.get('table_prefix') is not None: - params['TablePrefix'] = module.params.get('table_prefix') - if module.params.get('targets') is not None: - params['Targets'] = module.params.get('targets') + params["Name"] = module.params.get("name") + params["Role"] = module.params.get("role") + params["Targets"] = module.params.get("targets") + if module.params.get("database_name") is not None: + params["DatabaseName"] = module.params.get("database_name") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("recrawl_policy") is not None: + params["RecrawlPolicy"] = snake_dict_to_camel_dict(module.params.get("recrawl_policy"), capitalize_first=True) + if module.params.get("role") is not None: + params["Role"] = module.params.get("role") + if module.params.get("schema_change_policy") is not None: + params["SchemaChangePolicy"] = snake_dict_to_camel_dict( + module.params.get("schema_change_policy"), capitalize_first=True + ) + if module.params.get("table_prefix") is not None: + params["TablePrefix"] = module.params.get("table_prefix") + if module.params.get("targets") is not None: + params["Targets"] = module.params.get("targets") if glue_crawler: if _compare_glue_crawler_params(params, glue_crawler): @@ -355,23 +381,26 @@ def create_or_update_glue_crawler(connection, module, glue_crawler): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - glue_crawler = _get_glue_crawler(connection, module, params['Name']) + glue_crawler = _get_glue_crawler(connection, module, params["Name"]) changed |= ensure_tags(connection, module, glue_crawler) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=['SchemaChangePolicy', 'RecrawlPolicy', 'Targets'])) + module.exit_json( + changed=changed, + **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=["SchemaChangePolicy", "RecrawlPolicy", "Targets"]), + ) def delete_glue_crawler(connection, module, glue_crawler): - ''' + """ Delete an AWS Glue crawler - ''' + """ changed = False if glue_crawler: try: if not module.check_mode: - connection.delete_crawler(aws_retry=True, Name=glue_crawler['Name']) + connection.delete_crawler(aws_retry=True, Name=glue_crawler["Name"]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -380,46 +409,39 @@ def delete_glue_crawler(connection, module, glue_crawler): def main(): - - argument_spec = ( - dict( - database_name=dict(type='str'), - description=dict(type='str'), - name=dict(required=True, type='str'), - purge_tags=dict(type='bool', default=True), - recrawl_policy=dict(type='dict', options=dict( - recrawl_behavior=dict(type='str') - )), - role=dict(type='str'), - schema_change_policy=dict(type='dict', options=dict( - delete_behavior=dict(type='str'), - update_behavior=dict(type='str') - )), - state=dict(required=True, choices=['present', 'absent'], type='str'), - table_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - targets=dict(type='dict') - ) + argument_spec = dict( + database_name=dict(type="str"), + description=dict(type="str"), + name=dict(required=True, type="str"), + purge_tags=dict(type="bool", default=True), + recrawl_policy=dict(type="dict", options=dict(recrawl_behavior=dict(type="str"))), + role=dict(type="str"), + schema_change_policy=dict( + type="dict", options=dict(delete_behavior=dict(type="str"), update_behavior=dict(type="str")) + ), + state=dict(required=True, choices=["present", "absent"], type="str"), + table_prefix=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + targets=dict(type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['role', 'targets']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["role", "targets"])], + supports_check_mode=True, + ) - connection = module.client('glue', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("glue", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - state = module.params.get('state') + state = module.params.get("state") - glue_crawler = _get_glue_crawler(connection, module, module.params.get('name')) + glue_crawler = _get_glue_crawler(connection, module, module.params.get("name")) - if state == 'present': + if state == "present": create_or_update_glue_crawler(connection, module, glue_crawler) else: delete_glue_crawler(connection, module, glue_crawler) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/glue_job.py b/glue_job.py index 6e979f28a9d..4740deed3c9 100644 --- a/glue_job.py +++ b/glue_job.py @@ -250,10 +250,13 @@ def _get_glue_job(connection, module, glue_job_name): :return: boto3 Glue job dict or None if not found """ try: - return connection.get_job(aws_retry=True, JobName=glue_job_name)['Job'] - except is_boto3_error_code('EntityNotFoundException'): + return connection.get_job(aws_retry=True, JobName=glue_job_name)["Job"] + except is_boto3_error_code("EntityNotFoundException"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) @@ -268,39 +271,43 @@ def _compare_glue_job_params(user_params, current_params): # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description # To counter this, add the key if it's missing with a blank value - if 'Description' not in current_params: - current_params['Description'] = "" - if 'DefaultArguments' not in current_params: - current_params['DefaultArguments'] = dict() + if "Description" not in current_params: + current_params["Description"] = "" + if "DefaultArguments" not in current_params: + current_params["DefaultArguments"] = dict() - if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']: + if "AllocatedCapacity" in user_params and user_params["AllocatedCapacity"] != current_params["AllocatedCapacity"]: return True - if 'Command' in user_params: - if user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']: + if "Command" in user_params: + if user_params["Command"]["ScriptLocation"] != current_params["Command"]["ScriptLocation"]: return True - if user_params['Command']['PythonVersion'] != current_params['Command']['PythonVersion']: + if user_params["Command"]["PythonVersion"] != current_params["Command"]["PythonVersion"]: return True - if 'Connections' in user_params and user_params['Connections'] != current_params['Connections']: + if "Connections" in user_params and user_params["Connections"] != current_params["Connections"]: return True - if 'DefaultArguments' in user_params and user_params['DefaultArguments'] != current_params['DefaultArguments']: + if "DefaultArguments" in user_params and user_params["DefaultArguments"] != current_params["DefaultArguments"]: return True - if 'Description' in user_params and user_params['Description'] != current_params['Description']: + if "Description" in user_params and user_params["Description"] != current_params["Description"]: return True - if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']: + if ( + "ExecutionProperty" in user_params + and user_params["ExecutionProperty"]["MaxConcurrentRuns"] + != current_params["ExecutionProperty"]["MaxConcurrentRuns"] + ): return True - if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: return True - if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']: + if "MaxRetries" in user_params and user_params["MaxRetries"] != current_params["MaxRetries"]: return True - if 'Role' in user_params and user_params['Role'] != current_params['Role']: + if "Role" in user_params and user_params["Role"] != current_params["Role"]: return True - if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']: + if "Timeout" in user_params and user_params["Timeout"] != current_params["Timeout"]: return True - if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']: + if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: return True - if 'WorkerType' in user_params and user_params['WorkerType'] != current_params['WorkerType']: + if "WorkerType" in user_params and user_params["WorkerType"] != current_params["WorkerType"]: return True - if 'NumberOfWorkers' in user_params and user_params['NumberOfWorkers'] != current_params['NumberOfWorkers']: + if "NumberOfWorkers" in user_params and user_params["NumberOfWorkers"] != current_params["NumberOfWorkers"]: return True return False @@ -309,21 +316,23 @@ def _compare_glue_job_params(user_params, current_params): def ensure_tags(connection, module, glue_job): changed = False - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False account_id, partition = get_aws_account_info(module) - arn = 'arn:{0}:glue:{1}:{2}:job/{3}'.format(partition, module.region, account_id, module.params.get('name')) + arn = "arn:{0}:glue:{1}:{2}:job/{3}".format(partition, module.region, account_id, module.params.get("name")) try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {}) + existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg='Unable to get tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to get tags for Glue job %s" % module.params.get("name")) - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags( + existing_tags, module.params.get("tags"), module.params.get("purge_tags") + ) if tags_to_remove: changed = True @@ -331,7 +340,7 @@ def ensure_tags(connection, module, glue_job): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) if tags_to_add: changed = True @@ -339,7 +348,7 @@ def ensure_tags(connection, module, glue_job): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name')) + module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) return changed @@ -356,42 +365,45 @@ def create_or_update_glue_job(connection, module, glue_job): changed = False params = dict() - params['Name'] = module.params.get("name") - params['Role'] = module.params.get("role") + params["Name"] = module.params.get("name") + params["Role"] = module.params.get("role") if module.params.get("allocated_capacity") is not None: - params['AllocatedCapacity'] = module.params.get("allocated_capacity") + params["AllocatedCapacity"] = module.params.get("allocated_capacity") if module.params.get("command_script_location") is not None: - params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")} + params["Command"] = { + "Name": module.params.get("command_name"), + "ScriptLocation": module.params.get("command_script_location"), + } if module.params.get("command_python_version") is not None: - params['Command']['PythonVersion'] = module.params.get("command_python_version") + params["Command"]["PythonVersion"] = module.params.get("command_python_version") if module.params.get("connections") is not None: - params['Connections'] = {'Connections': module.params.get("connections")} + params["Connections"] = {"Connections": module.params.get("connections")} if module.params.get("default_arguments") is not None: - params['DefaultArguments'] = module.params.get("default_arguments") + params["DefaultArguments"] = module.params.get("default_arguments") if module.params.get("description") is not None: - params['Description'] = module.params.get("description") + params["Description"] = module.params.get("description") if module.params.get("glue_version") is not None: - params['GlueVersion'] = module.params.get("glue_version") + params["GlueVersion"] = module.params.get("glue_version") if module.params.get("max_concurrent_runs") is not None: - params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")} + params["ExecutionProperty"] = {"MaxConcurrentRuns": module.params.get("max_concurrent_runs")} if module.params.get("max_retries") is not None: - params['MaxRetries'] = module.params.get("max_retries") + params["MaxRetries"] = module.params.get("max_retries") if module.params.get("timeout") is not None: - params['Timeout'] = module.params.get("timeout") + params["Timeout"] = module.params.get("timeout") if module.params.get("glue_version") is not None: - params['GlueVersion'] = module.params.get("glue_version") + params["GlueVersion"] = module.params.get("glue_version") if module.params.get("worker_type") is not None: - params['WorkerType'] = module.params.get("worker_type") + params["WorkerType"] = module.params.get("worker_type") if module.params.get("number_of_workers") is not None: - params['NumberOfWorkers'] = module.params.get("number_of_workers") + params["NumberOfWorkers"] = module.params.get("number_of_workers") # If glue_job is not None then check if it needs to be modified, else create it if glue_job: if _compare_glue_job_params(params, glue_job): try: # Update job needs slightly modified params - update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)} - del update_params['JobUpdate']['Name'] + update_params = {"JobName": params["Name"], "JobUpdate": copy.deepcopy(params)} + del update_params["JobUpdate"]["Name"] if not module.check_mode: connection.update_job(aws_retry=True, **update_params) changed = True @@ -405,11 +417,11 @@ def create_or_update_glue_job(connection, module, glue_job): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) - glue_job = _get_glue_job(connection, module, params['Name']) + glue_job = _get_glue_job(connection, module, params["Name"]) changed |= ensure_tags(connection, module, glue_job) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=['DefaultArguments'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=["DefaultArguments"])) def delete_glue_job(connection, module, glue_job): @@ -426,7 +438,7 @@ def delete_glue_job(connection, module, glue_job): if glue_job: try: if not module.check_mode: - connection.delete_job(aws_retry=True, JobName=glue_job['Name']) + connection.delete_job(aws_retry=True, JobName=glue_job["Name"]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -435,49 +447,45 @@ def delete_glue_job(connection, module, glue_job): def main(): - - argument_spec = ( - dict( - allocated_capacity=dict(type='int'), - command_name=dict(type='str', default='glueetl'), - command_python_version=dict(type='str'), - command_script_location=dict(type='str'), - connections=dict(type='list', elements='str'), - default_arguments=dict(type='dict'), - description=dict(type='str'), - glue_version=dict(type='str'), - max_concurrent_runs=dict(type='int'), - max_retries=dict(type='int'), - name=dict(required=True, type='str'), - number_of_workers=dict(type='int'), - purge_tags=dict(type='bool', default=True), - role=dict(type='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - timeout=dict(type='int'), - worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'), - ) + argument_spec = dict( + allocated_capacity=dict(type="int"), + command_name=dict(type="str", default="glueetl"), + command_python_version=dict(type="str"), + command_script_location=dict(type="str"), + connections=dict(type="list", elements="str"), + default_arguments=dict(type="dict"), + description=dict(type="str"), + glue_version=dict(type="str"), + max_concurrent_runs=dict(type="int"), + max_retries=dict(type="int"), + name=dict(required=True, type="str"), + number_of_workers=dict(type="int"), + purge_tags=dict(type="bool", default=True), + role=dict(type="str"), + state=dict(required=True, choices=["present", "absent"], type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + timeout=dict(type="int"), + worker_type=dict(choices=["Standard", "G.1X", "G.2X"], type="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['role', 'command_script_location']) - ], - supports_check_mode=True - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["role", "command_script_location"])], + supports_check_mode=True, + ) retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('glue', retry_decorator=retry_decorator) + connection = module.client("glue", retry_decorator=retry_decorator) state = module.params.get("state") glue_job = _get_glue_job(connection, module, module.params.get("name")) - if state == 'present': + if state == "present": create_or_update_glue_job(connection, module, glue_job) else: delete_glue_job(connection, module, glue_job) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_access_key.py b/iam_access_key.py index af472fbe8c6..a8f03d7bced 100644 --- a/iam_access_key.py +++ b/iam_access_key.py @@ -149,14 +149,15 @@ def delete_access_key(access_keys, user, access_key_id): UserName=user, AccessKeyId=access_key_id, ) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): # Generally occurs when race conditions have happened and someone # deleted the key while we were checking to see if it existed. return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user) - ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user)) return True @@ -171,9 +172,9 @@ def update_access_key(access_keys, user, access_key_id, enabled): access_key = access_keys.get(access_key_id) if enabled is not None: - desired_status = 'Active' if enabled else 'Inactive' - if access_key.get('status') != desired_status: - changes['Status'] = desired_status + desired_status = "Active" if enabled else "Inactive" + if access_key.get("status") != desired_status: + changes["Status"] = desired_status if not changes: return False @@ -182,15 +183,11 @@ def update_access_key(access_keys, user, access_key_id, enabled): return True try: - client.update_access_key( - aws_retry=True, - UserName=user, - AccessKeyId=access_key_id, - **changes - ) + client.update_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, changes=changes, + e, + changes=changes, msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user), ) return True @@ -201,7 +198,7 @@ def create_access_key(access_keys, user, rotate_keys, enabled): oldest_key = False if len(access_keys) > 1 and rotate_keys: - sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get('create_date', None)) + sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get("create_date", None)) oldest_key = sorted_keys[0] changed |= delete_access_key(access_keys, user, oldest_key) @@ -215,18 +212,18 @@ def create_access_key(access_keys, user, rotate_keys, enabled): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user)) results = camel_dict_to_snake_dict(results) - access_key = results.get('access_key') + access_key = results.get("access_key") access_key = normalize_boto3_result(access_key) # Update settings which can't be managed on creation if enabled is False: - access_key_id = access_key['access_key_id'] + access_key_id = access_key["access_key_id"] access_keys = {access_key_id: access_key} update_access_key(access_keys, user, access_key_id, enabled) - access_key['status'] = 'Inactive' + access_key["status"] = "Inactive" if oldest_key: - access_key['deleted_access_key'] = oldest_key + access_key["deleted_access_key"] = oldest_key return access_key @@ -235,67 +232,64 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg='Failed to get access keys for user "{0}"'.format(user) - ) + module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) if not results: return None results = camel_dict_to_snake_dict(results) - access_keys = results.get('access_key_metadata', []) + access_keys = results.get("access_key_metadata", []) if not access_keys: return [] access_keys = normalize_boto3_result(access_keys) - access_keys = {k['access_key_id']: k for k in access_keys} + access_keys = {k["access_key_id"]: k for k in access_keys} return access_keys def main(): - global module global client argument_spec = dict( - user_name=dict(required=True, type='str', aliases=['username']), - id=dict(required=False, type='str'), - state=dict(required=False, choices=['present', 'absent'], default='present'), - active=dict(required=False, type='bool', aliases=['enabled']), - rotate_keys=dict(required=False, type='bool', default=False), + user_name=dict(required=True, type="str", aliases=["username"]), + id=dict(required=False, type="str"), + state=dict(required=False, choices=["present", "absent"], default="present"), + active=dict(required=False, type="bool", aliases=["enabled"]), + rotate_keys=dict(required=False, type="bool", default=False), ) required_if = [ - ['state', 'absent', ('id')], + ["state", "absent", ("id")], ] mutually_exclusive = [ - ['rotate_keys', 'id'], + ["rotate_keys", "id"], ] module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) changed = False - state = module.params.get('state') - user = module.params.get('user_name') - access_key_id = module.params.get('id') - rotate_keys = module.params.get('rotate_keys') - enabled = module.params.get('active') + state = module.params.get("state") + user = module.params.get("user_name") + access_key_id = module.params.get("id") + rotate_keys = module.params.get("rotate_keys") + enabled = module.params.get("active") access_keys = get_access_keys(user) results = dict() - if state == 'absent': + if state == "absent": changed |= delete_access_key(access_keys, user, access_key_id) else: # If we have an ID then we should try to update it if access_key_id: changed |= update_access_key(access_keys, user, access_key_id, enabled) access_keys = get_access_keys(user) - results['access_key'] = access_keys.get(access_key_id, None) + results["access_key"] = access_keys.get(access_key_id, None) # Otherwise we try to create a new one else: secret_key = create_access_key(access_keys, user, rotate_keys, enabled) @@ -303,15 +297,15 @@ def main(): changed |= secret_key else: changed = True - results['access_key_id'] = secret_key.get('access_key_id', None) - results['secret_access_key'] = secret_key.pop('secret_access_key', None) - results['deleted_access_key_id'] = secret_key.pop('deleted_access_key', None) + results["access_key_id"] = secret_key.get("access_key_id", None) + results["secret_access_key"] = secret_key.pop("secret_access_key", None) + results["deleted_access_key_id"] = secret_key.pop("deleted_access_key", None) if secret_key: - results['access_key'] = secret_key + results["access_key"] = secret_key results = scrub_none_parameters(results) module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_access_key_info.py b/iam_access_key_info.py index 6573e657a18..22bbd564cb0 100644 --- a/iam_access_key_info.py +++ b/iam_access_key_info.py @@ -85,44 +85,38 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg='Failed to get access keys for user "{0}"'.format(user) - ) + module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) if not results: return None results = camel_dict_to_snake_dict(results) - access_keys = results.get('access_key_metadata', []) + access_keys = results.get("access_key_metadata", []) if not access_keys: return [] access_keys = normalize_boto3_result(access_keys) - access_keys = sorted(access_keys, key=lambda d: d.get('create_date', None)) + access_keys = sorted(access_keys, key=lambda d: d.get("create_date", None)) return access_keys def main(): - global module global client argument_spec = dict( - user_name=dict(required=True, type='str', aliases=['username']), + user_name=dict(required=True, type="str", aliases=["username"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) changed = False - user = module.params.get('user_name') + user = module.params.get("user_name") access_keys = get_access_keys(user) module.exit_json(changed=changed, access_keys=access_keys) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_group.py b/iam_group.py index 9dc43ec0a94..357671dbdc6 100644 --- a/iam_group.py +++ b/iam_group.py @@ -178,14 +178,13 @@ def compare_attached_group_policies(current_attached_policies, new_attached_policies): - # If new_attached_policies is None it means we want to remove all policies if len(current_attached_policies) > 0 and new_attached_policies is None: return False current_attached_policies_arn_list = [] for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) + current_attached_policies_arn_list.append(policy["PolicyArn"]) if set(current_attached_policies_arn_list) == set(new_attached_policies): return True @@ -194,7 +193,6 @@ def compare_attached_group_policies(current_attached_policies, new_attached_poli def compare_group_members(current_group_members, new_group_members): - # If new_attached_policies is None it means we want to remove all policies if len(current_group_members) > 0 and new_group_members is None: return False @@ -205,16 +203,15 @@ def compare_group_members(current_group_members, new_group_members): def convert_friendly_names_to_arns(connection, module, policy_names): - - if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): + if not any(not policy.startswith("arn:") for policy in policy_names if policy is not None): return policy_names allpolicies = {} - paginator = connection.get_paginator('list_policies') - policies = paginator.paginate().build_full_result()['Policies'] + paginator = connection.get_paginator("list_policies") + policies = paginator.paginate().build_full_result()["Policies"] for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] + allpolicies[policy["PolicyName"]] = policy["Arn"] + allpolicies[policy["Arn"]] = policy["Arn"] try: return [allpolicies[policy] for policy in policy_names] except KeyError as e: @@ -222,20 +219,19 @@ def convert_friendly_names_to_arns(connection, module, policy_names): def create_or_update_group(connection, module): - params = dict() - params['GroupName'] = module.params.get('name') - managed_policies = module.params.get('managed_policies') - users = module.params.get('users') - purge_users = module.params.get('purge_users') - purge_policies = module.params.get('purge_policies') + params["GroupName"] = module.params.get("name") + managed_policies = module.params.get("managed_policies") + users = module.params.get("users") + purge_users = module.params.get("purge_users") + purge_policies = module.params.get("purge_policies") changed = False if managed_policies: managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) # Get group try: - group = get_group(connection, module, params['GroupName']) + group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get group") @@ -252,11 +248,11 @@ def create_or_update_group(connection, module): module.fail_json_aws(e, msg="Couldn't create group") # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['GroupName']) + current_attached_policies = get_attached_policy_list(connection, module, params["GroupName"]) if not compare_attached_group_policies(current_attached_policies, managed_policies): current_attached_policies_arn_list = [] for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) + current_attached_policies_arn_list.append(policy["PolicyArn"]) # If managed_policies has a single empty element we want to remove all attached policies if purge_policies: @@ -265,9 +261,9 @@ def create_or_update_group(connection, module): changed = True if not module.check_mode: try: - connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) + connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName']) + module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params["GroupName"]) # If there are policies to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(managed_policies) - set(current_attached_policies_arn_list): @@ -276,22 +272,21 @@ def create_or_update_group(connection, module): if managed_policies != [None] and not module.check_mode: for policy_arn in managed_policies: try: - connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn) + connection.attach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName']) + module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params["GroupName"]) # Manage group memberships try: - current_group_members = get_group(connection, module, params['GroupName'])['Users'] + current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) current_group_members_list = [] for member in current_group_members: - current_group_members_list.append(member['UserName']) + current_group_members_list.append(member["UserName"]) if not compare_group_members(current_group_members_list, users): - if purge_users: for user in list(set(current_group_members_list) - set(users)): # Ensure we mark things have changed if any user gets purged @@ -299,9 +294,11 @@ def create_or_update_group(connection, module): # Skip actions for check mode if not module.check_mode: try: - connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) + connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName'])) + module.fail_json_aws( + e, msg="Couldn't remove user %s from group %s" % (user, params["GroupName"]) + ) # If there are users to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(users) - set(current_group_members_list): @@ -310,30 +307,29 @@ def create_or_update_group(connection, module): if users != [None] and not module.check_mode: for user in users: try: - connection.add_user_to_group(GroupName=params['GroupName'], UserName=user) + connection.add_user_to_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName'])) + module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params["GroupName"])) if module.check_mode: module.exit_json(changed=changed) # Get the group again try: - group = get_group(connection, module, params['GroupName']) + group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) def destroy_group(connection, module): - params = dict() - params['GroupName'] = module.params.get('name') + params["GroupName"] = module.params.get("name") try: - group = get_group(connection, module, params['GroupName']) + group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) if group: # Check mode means we would remove this group if module.check_mode: @@ -341,29 +337,29 @@ def destroy_group(connection, module): # Remove any attached policies otherwise deletion fails try: - for policy in get_attached_policy_list(connection, module, params['GroupName']): - connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn']) + for policy in get_attached_policy_list(connection, module, params["GroupName"]): + connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy["PolicyArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName']) + module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params["GroupName"]) # Remove any users in the group otherwise deletion fails current_group_members_list = [] try: - current_group_members = get_group(connection, module, params['GroupName'])['Users'] + current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) for member in current_group_members: - current_group_members_list.append(member['UserName']) + current_group_members_list.append(member["UserName"]) for user in current_group_members_list: try: - connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user) + connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName'])) + module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params["GroupName"])) try: connection.delete_group(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName']) + module.fail_json_aws(e, "Couldn't delete group %s" % params["GroupName"]) else: module.exit_json(changed=False) @@ -374,47 +370,45 @@ def destroy_group(connection, module): @AWSRetry.exponential_backoff() def get_group(connection, module, name): try: - paginator = connection.get_paginator('get_group') + paginator = connection.get_paginator("get_group") return paginator.paginate(GroupName=name).build_full_result() - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None @AWSRetry.exponential_backoff() def get_attached_policy_list(connection, module, name): - try: - paginator = connection.get_paginator('list_attached_group_policies') - return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies'] - except is_boto3_error_code('NoSuchEntity'): + paginator = connection.get_paginator("list_attached_group_policies") + return paginator.paginate(GroupName=name).build_full_result()["AttachedPolicies"] + except is_boto3_error_code("NoSuchEntity"): return None def main(): - argument_spec = dict( name=dict(required=True), - managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), - users=dict(default=[], type='list', elements='str'), - state=dict(choices=['present', 'absent'], required=True), - purge_users=dict(default=False, type='bool'), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) + managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), + users=dict(default=[], type="list", elements="str"), + state=dict(choices=["present", "absent"], required=True), + purge_users=dict(default=False, type="bool"), + purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('iam') + connection = module.client("iam") state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_group(connection, module) else: destroy_group(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_managed_policy.py b/iam_managed_policy.py index f590fcf9d64..0f6189ca454 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -141,8 +141,8 @@ @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_policies_with_backoff(): - paginator = client.get_paginator('list_policies') - return paginator.paginate(Scope='Local').build_full_result() + paginator = client.get_paginator("list_policies") + return paginator.paginate(Scope="Local").build_full_result() def get_policy_by_name(name): @@ -150,22 +150,23 @@ def get_policy_by_name(name): response = list_policies_with_backoff() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policies") - for policy in response['Policies']: - if policy['PolicyName'] == name: + for policy in response["Policies"]: + if policy["PolicyName"] == name: return policy return None def delete_oldest_non_default_version(policy): try: - versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] - if not v['IsDefaultVersion']] + versions = [ + v for v in client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] if not v["IsDefaultVersion"] + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") - versions.sort(key=lambda v: v['CreateDate'], reverse=True) + versions.sort(key=lambda v: v["CreateDate"], reverse=True) for v in versions[-1:]: try: - client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") @@ -173,15 +174,17 @@ def delete_oldest_non_default_version(policy): # This needs to return policy_version, changed def get_or_create_policy_version(policy, policy_document): try: - versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + versions = client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: - document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document'] + document = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"])["PolicyVersion"][ + "Document" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId'])) + module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v["VersionId"])) if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): return v, True @@ -197,23 +200,28 @@ def get_or_create_policy_version(policy, policy_document): # and if that doesn't work, delete the oldest non default policy version # and try again. try: - version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy["Arn"], PolicyDocument=policy_document)["PolicyVersion"] return version, True - except is_boto3_error_code('LimitExceeded'): + except is_boto3_error_code("LimitExceeded"): delete_oldest_non_default_version(policy) try: - version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy["Arn"], PolicyDocument=policy_document)[ + "PolicyVersion" + ] return version, True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: module.fail_json_aws(second_e, msg="Couldn't create policy version") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create policy version") def set_if_default(policy, policy_version, is_default): - if is_default and not policy_version['IsDefaultVersion']: + if is_default and not policy_version["IsDefaultVersion"]: try: - client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) + client.set_default_policy_version(PolicyArn=policy["Arn"], VersionId=policy_version["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't set default policy version") return True @@ -223,13 +231,14 @@ def set_if_default(policy, policy_version, is_default): def set_if_only(policy, policy_version, is_only): if is_only: try: - versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[ - 'Versions'] if not v['IsDefaultVersion']] + versions = [ + v for v in client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] if not v["IsDefaultVersion"] + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: - client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") return len(versions) > 0 @@ -238,39 +247,39 @@ def set_if_only(policy, policy_version, is_only): def detach_all_entities(policy, **kwargs): try: - entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) + entities = client.list_entities_for_policy(PolicyArn=policy["Arn"], **kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName'])) + module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy["PolicyName"])) - for g in entities['PolicyGroups']: + for g in entities["PolicyGroups"]: try: - client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) + client.detach_group_policy(PolicyArn=policy["Arn"], GroupName=g["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName'])) - for u in entities['PolicyUsers']: + module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g["GroupName"])) + for u in entities["PolicyUsers"]: try: - client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) + client.detach_user_policy(PolicyArn=policy["Arn"], UserName=u["UserName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName'])) - for r in entities['PolicyRoles']: + module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u["UserName"])) + for r in entities["PolicyRoles"]: try: - client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) + client.detach_role_policy(PolicyArn=policy["Arn"], RoleName=r["RoleName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName'])) - if entities['IsTruncated']: - detach_all_entities(policy, marker=entities['Marker']) + module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r["RoleName"])) + if entities["IsTruncated"]: + detach_all_entities(policy, marker=entities["Marker"]) def create_or_update_policy(existing_policy): - name = module.params.get('policy_name') - description = module.params.get('policy_description') - default = module.params.get('make_default') - only = module.params.get('only_version') + name = module.params.get("policy_name") + description = module.params.get("policy_description") + default = module.params.get("make_default") + only = module.params.get("only_version") policy = None - if module.params.get('policy') is not None: - policy = json.dumps(json.loads(module.params.get('policy'))) + if module.params.get("policy") is not None: + policy = json.dumps(json.loads(module.params.get("policy"))) if existing_policy is None: if module.check_mode: @@ -278,11 +287,11 @@ def create_or_update_policy(existing_policy): # Create policy when none already exists try: - rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) + rvalue = client.create_policy(PolicyName=name, Path="/", PolicyDocument=policy, Description=description) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue["Policy"])) else: policy_version, changed = get_or_create_policy_version(existing_policy, policy) changed = set_if_default(existing_policy, policy_version, default) or changed @@ -291,7 +300,7 @@ def create_or_update_policy(existing_policy): # If anything has changed we need to refresh the policy if changed: try: - updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy'] + updated_policy = client.get_policy(PolicyArn=existing_policy["Arn"])["Policy"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Couldn't get policy") @@ -310,21 +319,20 @@ def delete_policy(existing_policy): detach_all_entities(existing_policy) # Delete Versions try: - versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions'] + versions = client.list_policy_versions(PolicyArn=existing_policy["Arn"])["Versions"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: - if not v['IsDefaultVersion']: + if not v["IsDefaultVersion"]: try: - client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=existing_policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) + module.fail_json_aws(e, msg="Couldn't delete policy version {0}".format(v["VersionId"])) # Delete policy try: - client.delete_policy(PolicyArn=existing_policy['Arn']) + client.delete_policy(PolicyArn=existing_policy["Arn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName'])) + module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy["PolicyName"])) # This is the one case where we will return the old policy module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) @@ -338,34 +346,34 @@ def main(): argument_spec = dict( policy_name=dict(required=True), - policy_description=dict(default=''), - policy=dict(type='json'), - make_default=dict(type='bool', default=True), - only_version=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + policy_description=dict(default=""), + policy=dict(type="json"), + make_default=dict(type="bool", default=True), + only_version=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['policy']]], - supports_check_mode=True + required_if=[["state", "present", ["policy"]]], + supports_check_mode=True, ) - name = module.params.get('policy_name') - state = module.params.get('state') + name = module.params.get("policy_name") + state = module.params.get("state") try: - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") existing_policy = get_policy_by_name(name) - if state == 'present': + if state == "present": create_or_update_policy(existing_policy) else: delete_policy(existing_policy) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py index c0c434a9be9..44b38ab90f4 100644 --- a/iam_mfa_device_info.py +++ b/iam_mfa_device_info.py @@ -67,12 +67,12 @@ def list_mfa_devices(connection, module): - user_name = module.params.get('user_name') + user_name = module.params.get("user_name") changed = False args = {} if user_name is not None: - args['UserName'] = user_name + args["UserName"] = user_name try: response = connection.list_mfa_devices(**args) except ClientError as e: @@ -92,12 +92,12 @@ def main(): ) try: - connection = module.client('iam') + connection = module.client("iam") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_mfa_devices(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_password_policy.py b/iam_password_policy.py index a980511c2fa..7c93da4139f 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -109,16 +109,23 @@ class IAMConnection(object): def __init__(self, module): try: - self.connection = module.resource('iam') + self.connection = module.resource("iam") self.module = module except Exception as e: module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) def policy_to_dict(self, policy): policy_attributes = [ - 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry', - 'max_password_age', 'minimum_password_length', 'password_reuse_prevention', - 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters' + "allow_users_to_change_password", + "expire_passwords", + "hard_expiry", + "max_password_age", + "minimum_password_length", + "password_reuse_prevention", + "require_lowercase_characters", + "require_numbers", + "require_symbols", + "require_uppercase_characters", ] ret = {} for attr in policy_attributes: @@ -126,15 +133,15 @@ def policy_to_dict(self, policy): return ret def update_password_policy(self, module, policy): - min_pw_length = module.params.get('min_pw_length') - require_symbols = module.params.get('require_symbols') - require_numbers = module.params.get('require_numbers') - require_uppercase = module.params.get('require_uppercase') - require_lowercase = module.params.get('require_lowercase') - allow_pw_change = module.params.get('allow_pw_change') - pw_max_age = module.params.get('pw_max_age') - pw_reuse_prevent = module.params.get('pw_reuse_prevent') - pw_expire = module.params.get('pw_expire') + min_pw_length = module.params.get("min_pw_length") + require_symbols = module.params.get("require_symbols") + require_numbers = module.params.get("require_numbers") + require_uppercase = module.params.get("require_uppercase") + require_lowercase = module.params.get("require_lowercase") + allow_pw_change = module.params.get("allow_pw_change") + pw_max_age = module.params.get("pw_max_age") + pw_reuse_prevent = module.params.get("pw_reuse_prevent") + pw_expire = module.params.get("pw_expire") update_parameters = dict( MinimumPasswordLength=min_pw_length, @@ -143,7 +150,7 @@ def update_password_policy(self, module, policy): RequireUppercaseCharacters=require_uppercase, RequireLowercaseCharacters=require_lowercase, AllowUsersToChangePassword=allow_pw_change, - HardExpiry=pw_expire + HardExpiry=pw_expire, ) if pw_reuse_prevent: update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) @@ -162,15 +169,18 @@ def update_password_policy(self, module, policy): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") - changed = (original_policy != updated_policy) + changed = original_policy != updated_policy return (changed, updated_policy, camel_dict_to_snake_dict(results)) def delete_password_policy(self, policy): try: results = policy.delete() - except is_boto3_error_code('NoSuchEntity'): - self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchEntity"): + self.module.exit_json(changed=False, task_status={"IAM": "Couldn't find IAM Password Policy"}) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") return camel_dict_to_snake_dict(results) @@ -178,16 +188,16 @@ def delete_password_policy(self, policy): def main(): module = AnsibleAWSModule( argument_spec={ - 'state': dict(choices=['present', 'absent'], required=True), - 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6), - 'require_symbols': dict(type='bool', default=False), - 'require_numbers': dict(type='bool', default=False), - 'require_uppercase': dict(type='bool', default=False), - 'require_lowercase': dict(type='bool', default=False), - 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False), - 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0), - 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0), - 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False), + "state": dict(choices=["present", "absent"], required=True), + "min_pw_length": dict(type="int", aliases=["minimum_password_length"], default=6), + "require_symbols": dict(type="bool", default=False), + "require_numbers": dict(type="bool", default=False), + "require_uppercase": dict(type="bool", default=False), + "require_lowercase": dict(type="bool", default=False), + "allow_pw_change": dict(type="bool", aliases=["allow_password_change"], default=False), + "pw_max_age": dict(type="int", aliases=["password_max_age"], default=0), + "pw_reuse_prevent": dict(type="int", aliases=["password_reuse_prevent", "prevent_reuse"], default=0), + "pw_expire": dict(type="bool", aliases=["password_expire", "expire"], default=False), }, supports_check_mode=True, ) @@ -195,16 +205,16 @@ def main(): resource = IAMConnection(module) policy = resource.connection.AccountPasswordPolicy() - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": (changed, new_policy, update_result) = resource.update_password_policy(module, policy) - module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy) + module.exit_json(changed=changed, task_status={"IAM": update_result}, policy=new_policy) - if state == 'absent': + if state == "absent": delete_result = resource.delete_password_policy(policy) - module.exit_json(changed=True, task_status={'IAM': delete_result}) + module.exit_json(changed=True, task_status={"IAM": delete_result}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_role.py b/iam_role.py index 07463cd9736..3cafe85d2cb 100644 --- a/iam_role.py +++ b/iam_role.py @@ -236,44 +236,44 @@ @AWSRetry.jittered_backoff() def _list_policies(client): - paginator = client.get_paginator('list_policies') - return paginator.paginate().build_full_result()['Policies'] + paginator = client.get_paginator("list_policies") + return paginator.paginate().build_full_result()["Policies"] def wait_iam_exists(module, client): if module.check_mode: return - if not module.params.get('wait'): + if not module.params.get("wait"): return - role_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') + role_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") delay = min(wait_timeout, 5) max_attempts = wait_timeout // delay try: - waiter = client.get_waiter('role_exists') + waiter = client.get_waiter("role_exists") waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, RoleName=role_name, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on IAM role creation') + module.fail_json_aws(e, msg="Timeout while waiting on IAM role creation") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on IAM role creation') + module.fail_json_aws(e, msg="Failed while waiting on IAM role creation") def convert_friendly_names_to_arns(module, client, policy_names): - if not any(not policy.startswith('arn:') for policy in policy_names): + if not any(not policy.startswith("arn:") for policy in policy_names): return policy_names allpolicies = {} policies = _list_policies(client) for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] + allpolicies[policy["PolicyName"]] = policy["Arn"] + allpolicies[policy["Arn"]] = policy["Arn"] try: return [allpolicies[policy] for policy in policy_names] except KeyError as e: @@ -303,9 +303,12 @@ def remove_policies(module, client, policies_to_remove, role_name): try: client.detach_role_policy(RoleName=role_name, PolicyArn=policy, aws_retry=True) changed = True - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name)) return changed @@ -315,25 +318,28 @@ def remove_inline_policies(module, client, role_name): for policy in current_inline_policies: try: client.delete_role_policy(RoleName=role_name, PolicyName=policy, aws_retry=True) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name)) def generate_create_params(module): params = dict() - params['Path'] = module.params.get('path') - params['RoleName'] = module.params.get('name') - params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document') - if module.params.get('description') is not None: - params['Description'] = module.params.get('description') - if module.params.get('max_session_duration') is not None: - params['MaxSessionDuration'] = module.params.get('max_session_duration') - if module.params.get('boundary') is not None: - params['PermissionsBoundary'] = module.params.get('boundary') - if module.params.get('tags') is not None: - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + params["Path"] = module.params.get("path") + params["RoleName"] = module.params.get("name") + params["AssumeRolePolicyDocument"] = module.params.get("assume_role_policy_document") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("max_session_duration") is not None: + params["MaxSessionDuration"] = module.params.get("max_session_duration") + if module.params.get("boundary") is not None: + params["PermissionsBoundary"] = module.params.get("boundary") + if module.params.get("tags") is not None: + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) return params @@ -352,7 +358,7 @@ def create_basic_role(module, client): # 'Description' is documented as key of the role returned by create_role # but appears to be an AWS bug (the value is not returned using the AWS CLI either). # Get the role after creating it. - role = get_role_with_backoff(module, client, params['RoleName']) + role = get_role_with_backoff(module, client, params["RoleName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create role") @@ -368,10 +374,7 @@ def update_role_assumed_policy(module, client, role_name, target_assumed_policy, return True try: - client.update_assume_role_policy( - RoleName=role_name, - PolicyDocument=target_assumed_policy, - aws_retry=True) + client.update_assume_role_policy(RoleName=role_name, PolicyDocument=target_assumed_policy, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name)) return True @@ -407,7 +410,9 @@ def update_role_max_session_duration(module, client, role_name, target_duration, return True -def update_role_permissions_boundary(module, client, role_name, target_permissions_boundary, current_permissions_boundary): +def update_role_permissions_boundary( + module, client, role_name, target_permissions_boundary, current_permissions_boundary +): # Check PermissionsBoundary if target_permissions_boundary is None or target_permissions_boundary == current_permissions_boundary: return False @@ -415,14 +420,16 @@ def update_role_permissions_boundary(module, client, role_name, target_permissio if module.check_mode: return True - if target_permissions_boundary == '': + if target_permissions_boundary == "": try: client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name)) else: try: - client.put_role_permissions_boundary(RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True) + client.put_role_permissions_boundary( + RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name)) return True @@ -435,7 +442,7 @@ def update_managed_policies(module, client, role_name, managed_policies, purge_p # Get list of current attached managed policies current_attached_policies = get_attached_policy_list(module, client, role_name) - current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies] + current_attached_policies_arn_list = [policy["PolicyArn"] for policy in current_attached_policies] if len(managed_policies) == 1 and managed_policies[0] is None: managed_policies = [] @@ -460,18 +467,17 @@ def update_managed_policies(module, client, role_name, managed_policies, purge_p def create_or_update_role(module, client): - - role_name = module.params.get('name') - assumed_policy = module.params.get('assume_role_policy_document') - create_instance_profile = module.params.get('create_instance_profile') - description = module.params.get('description') - duration = module.params.get('max_session_duration') - path = module.params.get('path') - permissions_boundary = module.params.get('boundary') - purge_tags = module.params.get('purge_tags') - tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None - purge_policies = module.params.get('purge_policies') - managed_policies = module.params.get('managed_policies') + role_name = module.params.get("name") + assumed_policy = module.params.get("assume_role_policy_document") + create_instance_profile = module.params.get("create_instance_profile") + description = module.params.get("description") + duration = module.params.get("max_session_duration") + path = module.params.get("path") + permissions_boundary = module.params.get("boundary") + purge_tags = module.params.get("purge_tags") + tags = ansible_dict_to_boto3_tag_list(module.params.get("tags")) if module.params.get("tags") else None + purge_policies = module.params.get("purge_policies") + managed_policies = module.params.get("managed_policies") if managed_policies: # Attempt to list the policies early so we don't leave things behind if we can't find them. managed_policies = convert_friendly_names_to_arns(module, client, managed_policies) @@ -485,31 +491,33 @@ def create_or_update_role(module, client): if role is None: role = create_basic_role(module, client) - if not module.check_mode and module.params.get('wait'): + if not module.check_mode and module.params.get("wait"): wait_iam_exists(module, client) changed = True else: # Role exists - get current attributes - current_assumed_policy = role.get('AssumeRolePolicyDocument') - current_description = role.get('Description') - current_duration = role.get('MaxSessionDuration') - current_permissions_boundary = role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', '') + current_assumed_policy = role.get("AssumeRolePolicyDocument") + current_description = role.get("Description") + current_duration = role.get("MaxSessionDuration") + current_permissions_boundary = role.get("PermissionsBoundary", {}).get("PermissionsBoundaryArn", "") # Update attributes changed |= update_role_tags(module, client, role_name, tags, purge_tags) changed |= update_role_assumed_policy(module, client, role_name, assumed_policy, current_assumed_policy) changed |= update_role_description(module, client, role_name, description, current_description) changed |= update_role_max_session_duration(module, client, role_name, duration, current_duration) - changed |= update_role_permissions_boundary(module, client, role_name, permissions_boundary, current_permissions_boundary) + changed |= update_role_permissions_boundary( + module, client, role_name, permissions_boundary, current_permissions_boundary + ) - if not module.check_mode and module.params.get('wait'): + if not module.check_mode and module.params.get("wait"): wait_iam_exists(module, client) if create_instance_profile: changed |= create_instance_profiles(module, client, role_name, path) - if not module.check_mode and module.params.get('wait'): + if not module.check_mode and module.params.get("wait"): wait_iam_exists(module, client) changed |= update_managed_policies(module, client, role_name, managed_policies, purge_policies) @@ -517,24 +525,25 @@ def create_or_update_role(module, client): # Get the role again role = get_role(module, client, role_name) - role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name) - role['tags'] = get_role_tags(module, client) + role["AttachedPolicies"] = get_attached_policy_list(module, client, role_name) + role["tags"] = get_role_tags(module, client) - camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + camel_role = camel_dict_to_snake_dict(role, ignore_list=["tags"]) camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {}) module.exit_json(changed=changed, iam_role=camel_role, **camel_role) def create_instance_profiles(module, client, role_name, path): - # Fetch existing Profiles try: - instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)[ + "InstanceProfiles" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) # Profile already exists - if any(p['InstanceProfileName'] == role_name for p in instance_profiles): + if any(p["InstanceProfileName"] == role_name for p in instance_profiles): return False if module.check_mode: @@ -543,11 +552,14 @@ def create_instance_profiles(module, client, role_name, path): # Make sure an instance profile is created try: client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True) - except is_boto3_error_code('EntityAlreadyExists'): + except is_boto3_error_code("EntityAlreadyExists"): # If the profile already exists, no problem, move on. # Implies someone's changing things at the same time... return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name)) # And attach the role to the profile @@ -563,31 +575,39 @@ def remove_instance_profiles(module, client, role_name): delete_profiles = module.params.get("delete_instance_profile") try: - instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)['InstanceProfiles'] + instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)[ + "InstanceProfiles" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) # Remove the role from the instance profile(s) for profile in instance_profiles: - profile_name = profile['InstanceProfileName'] + profile_name = profile["InstanceProfileName"] try: if not module.check_mode: - client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name) + client.remove_role_from_instance_profile( + aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name + ) if profile_name == role_name: if delete_profiles: try: client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name)) + module.fail_json_aws( + e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name) + ) def destroy_role(module, client): - - role_name = module.params.get('name') + role_name = module.params.get("name") role = get_role(module, client, role_name) if role is None: @@ -603,9 +623,12 @@ def destroy_role(module, client): remove_inline_policies(module, client, role_name) try: client.delete_role(aws_retry=True, RoleName=role_name) - except is_boto3_error_code('NoSuchEntityException'): + except is_boto3_error_code("NoSuchEntityException"): module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete role") module.exit_json(changed=True) @@ -613,38 +636,43 @@ def destroy_role(module, client): def get_role_with_backoff(module, client, name): try: - return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role'] + return AWSRetry.jittered_backoff(catch_extra_error_codes=["NoSuchEntity"])(client.get_role)(RoleName=name)[ + "Role" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) def get_role(module, client, name): try: - return client.get_role(RoleName=name, aws_retry=True)['Role'] - except is_boto3_error_code('NoSuchEntity'): + return client.get_role(RoleName=name, aws_retry=True)["Role"] + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) def get_attached_policy_list(module, client, name): try: - return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies'] + return client.list_attached_role_policies(RoleName=name, aws_retry=True)["AttachedPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) def get_inline_policy_list(module, client, name): try: - return client.list_role_policies(RoleName=name, aws_retry=True)['PolicyNames'] + return client.list_role_policies(RoleName=name, aws_retry=True)["PolicyNames"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) def get_role_tags(module, client): - role_name = module.params.get('name') + role_name = module.params.get("name") try: - return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)["Tags"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) @@ -655,7 +683,9 @@ def update_role_tags(module, client, role_name, new_tags, purge_tags): new_tags = boto3_tag_list_to_ansible_dict(new_tags) try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags']) + existing_tags = boto3_tag_list_to_ansible_dict( + client.list_role_tags(RoleName=role_name, aws_retry=True)["Tags"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): existing_tags = {} @@ -668,69 +698,76 @@ def update_role_tags(module, client, role_name, new_tags, purge_tags): if tags_to_add: client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name) + module.fail_json_aws(e, msg="Unable to set tags for role %s" % role_name) changed = bool(tags_to_add) or bool(tags_to_remove) return changed def main(): - argument_spec = dict( - name=dict(type='str', required=True), - path=dict(type='str', default="/"), - assume_role_policy_document=dict(type='json'), - managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'), - max_session_duration=dict(type='int'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - description=dict(type='str'), - boundary=dict(type='str', aliases=['boundary_policy_arn']), - create_instance_profile=dict(type='bool', default=True), - delete_instance_profile=dict(type='bool', default=False), - purge_policies=dict(default=True, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int'), + name=dict(type="str", required=True), + path=dict(type="str", default="/"), + assume_role_policy_document=dict(type="json"), + managed_policies=dict(type="list", aliases=["managed_policy"], elements="str"), + max_session_duration=dict(type="int"), + state=dict(type="str", choices=["present", "absent"], default="present"), + description=dict(type="str"), + boundary=dict(type="str", aliases=["boundary_policy_arn"]), + create_instance_profile=dict(type="bool", default=True), + delete_instance_profile=dict(type="bool", default=False), + purge_policies=dict(default=True, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['assume_role_policy_document'])], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["assume_role_policy_document"])], + supports_check_mode=True, + ) - module.deprecate("All return values other than iam_role and changed have been deprecated and " - "will be removed in a release after 2023-12-01.", - date="2023-12-01", collection_name="community.aws") + module.deprecate( + "All return values other than iam_role and changed have been deprecated and " + "will be removed in a release after 2023-12-01.", + date="2023-12-01", + collection_name="community.aws", + ) - module.deprecate("In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - "iam_role.assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", collection_name="community.aws") + module.deprecate( + "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + "iam_role.assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="community.aws", + ) - if module.params.get('boundary'): - if module.params.get('create_instance_profile'): + if module.params.get("boundary"): + if module.params.get("create_instance_profile"): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") - if not module.params.get('boundary').startswith('arn:aws:iam'): + if not module.params.get("boundary").startswith("arn:aws:iam"): module.fail_json(msg="Boundary policy must be an ARN") - if module.params.get('max_session_duration'): - max_session_duration = module.params.get('max_session_duration') + if module.params.get("max_session_duration"): + max_session_duration = module.params.get("max_session_duration") if max_session_duration < 3600 or max_session_duration > 43200: module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)") - if module.params.get('path'): - path = module.params.get('path') - if not path.endswith('/') or not path.startswith('/'): + if module.params.get("path"): + path = module.params.get("path") + if not path.endswith("/") or not path.startswith("/"): module.fail_json(msg="path must begin and end with /") - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_role(module, client) - elif state == 'absent': + elif state == "absent": destroy_role(module, client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_role_info.py b/iam_role_info.py index e3bdb7695bf..a7576a131ec 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -168,70 +168,73 @@ @AWSRetry.jittered_backoff() def list_iam_roles_with_backoff(client, **kwargs): - paginator = client.get_paginator('list_roles') + paginator = client.get_paginator("list_roles") return paginator.paginate(**kwargs).build_full_result() @AWSRetry.jittered_backoff() def list_iam_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_role_policies') - return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames'] + paginator = client.get_paginator("list_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["PolicyNames"] @AWSRetry.jittered_backoff() def list_iam_attached_role_policies_with_backoff(client, role_name): - paginator = client.get_paginator('list_attached_role_policies') - return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies'] + paginator = client.get_paginator("list_attached_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["AttachedPolicies"] @AWSRetry.jittered_backoff() def list_iam_instance_profiles_for_role_with_backoff(client, role_name): - paginator = client.get_paginator('list_instance_profiles_for_role') - return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles'] + paginator = client.get_paginator("list_instance_profiles_for_role") + return paginator.paginate(RoleName=role_name).build_full_result()["InstanceProfiles"] def describe_iam_role(module, client, role): - name = role['RoleName'] + name = role["RoleName"] try: - role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name) + role["InlinePolicies"] = list_iam_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name) try: - role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name) + role["ManagedPolicies"] = list_iam_attached_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name) try: - role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name) + role["InstanceProfiles"] = list_iam_instance_profiles_for_role_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name) try: - role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags']) - del role['Tags'] + role["tags"] = boto3_tag_list_to_ansible_dict(role["Tags"]) + del role["Tags"] except KeyError: - role['tags'] = {} + role["tags"] = {} return role def describe_iam_roles(module, client): - name = module.params['name'] - path_prefix = module.params['path_prefix'] + name = module.params["name"] + path_prefix = module.params["path_prefix"] if name: try: - roles = [client.get_role(RoleName=name, aws_retry=True)['Role']] - except is_boto3_error_code('NoSuchEntity'): + roles = [client.get_role(RoleName=name, aws_retry=True)["Role"]] + except is_boto3_error_code("NoSuchEntity"): return [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) else: params = dict() if path_prefix: - if not path_prefix.startswith('/'): - path_prefix = '/' + path_prefix - if not path_prefix.endswith('/'): - path_prefix = path_prefix + '/' - params['PathPrefix'] = path_prefix + if not path_prefix.startswith("/"): + path_prefix = "/" + path_prefix + if not path_prefix.endswith("/"): + path_prefix = path_prefix + "/" + params["PathPrefix"] = path_prefix try: - roles = list_iam_roles_with_backoff(client, **params)['Roles'] + roles = list_iam_roles_with_backoff(client, **params)["Roles"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list IAM roles") return [normalize_role(describe_iam_role(module, client, role)) for role in roles] @@ -245,7 +248,7 @@ def normalize_profile(profile): def normalize_role(role): - new_role = camel_dict_to_snake_dict(role, ignore_list=['tags']) + new_role = camel_dict_to_snake_dict(role, ignore_list=["tags"]) new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") if role.get("InstanceProfiles"): role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] @@ -254,27 +257,32 @@ def normalize_role(role): def main(): """ - Module action handler + Module action handler """ argument_spec = dict( - name=dict(aliases=['role_name']), + name=dict(aliases=["role_name"]), path_prefix=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['name', 'path_prefix']]) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - module.deprecate("In a release after 2023-12-01 the contents of assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - ".assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", collection_name="community.aws") + module.deprecate( + "In a release after 2023-12-01 the contents of assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + ".assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="community.aws", + ) module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_saml_federation.py b/iam_saml_federation.py index e134588f7ef..238aa5d9a3f 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -106,7 +106,7 @@ def __init__(self, module): self.module = module try: - self.conn = module.client('iam') + self.conn = module.client("iam") except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Unknown AWS SDK error") @@ -133,10 +133,10 @@ def _delete_saml_provider(self, arn): def _get_provider_arn(self, name): providers = self._list_saml_providers() - for p in providers['SAMLProviderList']: - provider_name = p['Arn'].split('/', 1)[1] + for p in providers["SAMLProviderList"]: + provider_name = p["Arn"].split("/", 1)[1] if name == provider_name: - return p['Arn'] + return p["Arn"] return None @@ -144,7 +144,7 @@ def create_or_update_saml_provider(self, name, metadata): if not metadata: self.module.fail_json(msg="saml_metadata_document must be defined for present state") - res = {'changed': False} + res = {"changed": False} try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: @@ -156,38 +156,38 @@ def create_or_update_saml_provider(self, name, metadata): except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name)) - if metadata.strip() != resp['SAMLMetadataDocument'].strip(): + if metadata.strip() != resp["SAMLMetadataDocument"].strip(): # provider needs updating - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: resp = self._update_saml_provider(arn, metadata) - res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) + res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name)) else: - res['saml_provider'] = self._build_res(arn) + res["saml_provider"] = self._build_res(arn) else: # create - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: resp = self._create_saml_provider(metadata, name) - res['saml_provider'] = self._build_res(resp['SAMLProviderArn']) + res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name)) self.module.exit_json(**res) def delete_saml_provider(self, name): - res = {'changed': False} + res = {"changed": False} try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) if arn: # delete - res['changed'] = True + res["changed"] = True if not self.module.check_mode: try: self._delete_saml_provider(arn) @@ -202,7 +202,7 @@ def _build_res(self, arn): "arn": arn, "metadata_document": saml_provider["SAMLMetadataDocument"], "create_date": saml_provider["CreateDate"].isoformat(), - "expire_date": saml_provider["ValidUntil"].isoformat() + "expire_date": saml_provider["ValidUntil"].isoformat(), } @@ -210,26 +210,26 @@ def main(): argument_spec = dict( name=dict(required=True), saml_metadata_document=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'absent']), + state=dict(default="present", required=False, choices=["present", "absent"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[('state', 'present', ['saml_metadata_document'])] + required_if=[("state", "present", ["saml_metadata_document"])], ) - name = module.params['name'] - state = module.params.get('state') - saml_metadata_document = module.params.get('saml_metadata_document') + name = module.params["name"] + state = module.params.get("state") + saml_metadata_document = module.params.get("saml_metadata_document") sp_man = SAMLProviderManager(module) - if state == 'present': + if state == "present": sp_man.create_or_update_saml_provider(name, saml_metadata_document) - elif state == 'absent': + elif state == "absent": sp_man.delete_saml_provider(name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_server_certificate.py b/iam_server_certificate.py index 3ab35fb6864..dd8427dc15b 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -109,22 +109,22 @@ @AWSRetry.jittered_backoff() def _list_server_certficates(): - paginator = client.get_paginator('list_server_certificates') - return paginator.paginate().build_full_result()['ServerCertificateMetadataList'] + paginator = client.get_paginator("list_server_certificates") + return paginator.paginate().build_full_result()["ServerCertificateMetadataList"] def check_duplicate_cert(new_cert): - orig_cert_names = list(c['ServerCertificateName'] for c in _list_server_certficates()) + orig_cert_names = list(c["ServerCertificateName"] for c in _list_server_certficates()) for cert_name in orig_cert_names: cert = get_server_certificate(cert_name) if not cert: continue - cert_body = cert.get('certificate_body', None) + cert_body = cert.get("certificate_body", None) if not _compare_cert(new_cert, cert_body): continue module.fail_json( changed=False, - msg='This certificate already exists under the name {0} and dup_ok=False'.format(cert_name), + msg="This certificate already exists under the name {0} and dup_ok=False".format(cert_name), duplicate_cert=cert, ) @@ -137,25 +137,25 @@ def _compare_cert(cert_a, cert_b): # Trim out the whitespace before comparing the certs. While this could mean # an invalid cert 'matches' a valid cert, that's better than some stray # whitespace breaking things - cert_a.replace('\r', '') - cert_a.replace('\n', '') - cert_a.replace(' ', '') - cert_b.replace('\r', '') - cert_b.replace('\n', '') - cert_b.replace(' ', '') + cert_a.replace("\r", "") + cert_a.replace("\n", "") + cert_a.replace(" ", "") + cert_b.replace("\r", "") + cert_b.replace("\n", "") + cert_b.replace(" ", "") return cert_a == cert_b def update_server_certificate(current_cert): changed = False - cert = module.params.get('cert') - cert_chain = module.params.get('cert_chain') + cert = module.params.get("cert") + cert_chain = module.params.get("cert_chain") - if not _compare_cert(cert, current_cert.get('certificate_body', None)): - module.fail_json(msg='Modifying the certificate body is not supported by AWS') - if not _compare_cert(cert_chain, current_cert.get('certificate_chain', None)): - module.fail_json(msg='Modifying the chaining certificate is not supported by AWS') + if not _compare_cert(cert, current_cert.get("certificate_body", None)): + module.fail_json(msg="Modifying the certificate body is not supported by AWS") + if not _compare_cert(cert_chain, current_cert.get("certificate_chain", None)): + module.fail_json(msg="Modifying the chaining certificate is not supported by AWS") # We can't compare keys. if module.check_mode: @@ -168,15 +168,15 @@ def update_server_certificate(current_cert): def create_server_certificate(): - cert = module.params.get('cert') - key = module.params.get('key') - cert_chain = module.params.get('cert_chain') + cert = module.params.get("cert") + key = module.params.get("key") + cert_chain = module.params.get("cert_chain") - if not module.params.get('dup_ok'): + if not module.params.get("dup_ok"): check_duplicate_cert(cert) - path = module.params.get('path') - name = module.params.get('name') + path = module.params.get("path") + name = module.params.get("name") params = dict( ServerCertificateName=name, @@ -185,28 +185,25 @@ def create_server_certificate(): ) if cert_chain: - params['CertificateChain'] = cert_chain + params["CertificateChain"] = cert_chain if path: - params['Path'] = path + params["Path"] = path if module.check_mode: return True try: - client.upload_server_certificate( - aws_retry=True, - **params - ) + client.upload_server_certificate(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name)) + module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name)) return True def rename_server_certificate(current_cert): - name = module.params.get('name') - new_name = module.params.get('new_name') - new_path = module.params.get('new_path') + name = module.params.get("name") + new_name = module.params.get("new_name") + new_path = module.params.get("new_path") changes = dict() @@ -215,16 +212,16 @@ def rename_server_certificate(current_cert): current_cert = get_server_certificate(new_name) else: if new_name: - changes['NewServerCertificateName'] = new_name + changes["NewServerCertificateName"] = new_name - cert_metadata = current_cert.get('server_certificate_metadata', {}) + cert_metadata = current_cert.get("server_certificate_metadata", {}) if not current_cert: - module.fail_json(msg='Unable to find certificate {0}'.format(name)) + module.fail_json(msg="Unable to find certificate {0}".format(name)) - current_path = cert_metadata.get('path', None) + current_path = cert_metadata.get("path", None) if new_path and current_path != new_path: - changes['NewPath'] = new_path + changes["NewPath"] = new_path if not changes: return False @@ -233,14 +230,9 @@ def rename_server_certificate(current_cert): return True try: - client.update_server_certificate( - aws_retry=True, - ServerCertificateName=name, - **changes - ) + client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name), - changes=changes) + module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name), changes=changes) return True @@ -252,17 +244,20 @@ def delete_server_certificate(current_cert): if module.check_mode: return True - name = module.params.get('name') + name = module.params.get("name") try: result = client.delete_server_certificate( aws_retry=True, ServerCertificateName=name, ) - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete server certificate {0}'.format(name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete server certificate {0}".format(name)) return True @@ -275,11 +270,14 @@ def get_server_certificate(name): aws_retry=True, ServerCertificateName=name, ) - except is_boto3_error_code('NoSuchEntity'): + except is_boto3_error_code("NoSuchEntity"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to get server certificate {0}'.format(name)) - cert = dict(camel_dict_to_snake_dict(result.get('ServerCertificate'))) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get server certificate {0}".format(name)) + cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate"))) return cert @@ -289,75 +287,74 @@ def compatability_results(current_cert): if not current_cert: return compat_results - metadata = current_cert.get('server_certificate_metadata', {}) - - if current_cert.get('certificate_body', None): - compat_results['cert_body'] = current_cert.get('certificate_body') - if current_cert.get('certificate_chain', None): - compat_results['chain_cert_body'] = current_cert.get('certificate_chain') - if metadata.get('arn', None): - compat_results['arn'] = metadata.get('arn') - if metadata.get('expiration', None): - compat_results['expiration_date'] = metadata.get('expiration') - if metadata.get('path', None): - compat_results['cert_path'] = metadata.get('path') - if metadata.get('server_certificate_name', None): - compat_results['name'] = metadata.get('server_certificate_name') - if metadata.get('upload_date', None): - compat_results['upload_date'] = metadata.get('upload_date') + metadata = current_cert.get("server_certificate_metadata", {}) + + if current_cert.get("certificate_body", None): + compat_results["cert_body"] = current_cert.get("certificate_body") + if current_cert.get("certificate_chain", None): + compat_results["chain_cert_body"] = current_cert.get("certificate_chain") + if metadata.get("arn", None): + compat_results["arn"] = metadata.get("arn") + if metadata.get("expiration", None): + compat_results["expiration_date"] = metadata.get("expiration") + if metadata.get("path", None): + compat_results["cert_path"] = metadata.get("path") + if metadata.get("server_certificate_name", None): + compat_results["name"] = metadata.get("server_certificate_name") + if metadata.get("upload_date", None): + compat_results["upload_date"] = metadata.get("upload_date") return compat_results def main(): - global module global client argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), cert=dict(), key=dict(no_log=True), cert_chain=dict(), new_name=dict(), - path=dict(default='/'), + path=dict(default="/"), new_path=dict(), - dup_ok=dict(type='bool', default=True), + dup_ok=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['new_path', 'key'], - ['new_path', 'cert'], - ['new_path', 'cert_chain'], - ['new_name', 'key'], - ['new_name', 'cert'], - ['new_name', 'cert_chain'], + ["new_path", "key"], + ["new_path", "cert"], + ["new_path", "cert_chain"], + ["new_name", "key"], + ["new_name", "cert"], + ["new_name", "cert_chain"], ], supports_check_mode=True, ) - client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get('state') - name = module.params.get('name') - path = module.params.get('path') - new_name = module.params.get('new_name') - new_path = module.params.get('new_path') - dup_ok = module.params.get('dup_ok') + state = module.params.get("state") + name = module.params.get("name") + path = module.params.get("path") + new_name = module.params.get("new_name") + new_path = module.params.get("new_path") + dup_ok = module.params.get("dup_ok") current_cert = get_server_certificate(name) results = dict() - if state == 'absent': + if state == "absent": changed = delete_server_certificate(current_cert) if changed: - results['deleted_cert'] = name + results["deleted_cert"] = name else: - msg = 'Certificate with the name {0} already absent'.format(name) - results['msg'] = msg + msg = "Certificate with the name {0} already absent".format(name) + results["msg"] = msg else: if new_name or new_path: changed = rename_server_certificate(current_cert) @@ -371,16 +368,13 @@ def main(): changed = create_server_certificate() updated_cert = get_server_certificate(name) - results['server_certificate'] = updated_cert + results["server_certificate"] = updated_cert compat_results = compatability_results(updated_cert) if compat_results: results.update(compat_results) - module.exit_json( - changed=changed, - **results - ) + module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/iam_server_certificate_info.py b/iam_server_certificate_info.py index eb38a5f8b48..5504cb746fd 100644 --- a/iam_server_certificate_info.py +++ b/iam_server_certificate_info.py @@ -110,22 +110,24 @@ def get_server_certs(iam, name=None): results = dict() try: if name: - server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']] + server_certs = [iam.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]] else: - server_certs = iam.list_server_certificates()['ServerCertificateMetadataList'] + server_certs = iam.list_server_certificates()["ServerCertificateMetadataList"] for server_cert in server_certs: if not name: - server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate'] - cert_md = server_cert['ServerCertificateMetadata'] - results[cert_md['ServerCertificateName']] = { - 'certificate_body': server_cert['CertificateBody'], - 'server_certificate_id': cert_md['ServerCertificateId'], - 'server_certificate_name': cert_md['ServerCertificateName'], - 'arn': cert_md['Arn'], - 'path': cert_md['Path'], - 'expiration': cert_md['Expiration'].isoformat(), - 'upload_date': cert_md['UploadDate'].isoformat(), + server_cert = iam.get_server_certificate(ServerCertificateName=server_cert["ServerCertificateName"])[ + "ServerCertificate" + ] + cert_md = server_cert["ServerCertificateMetadata"] + results[cert_md["ServerCertificateName"]] = { + "certificate_body": server_cert["CertificateBody"], + "server_certificate_id": cert_md["ServerCertificateId"], + "server_certificate_name": cert_md["ServerCertificateName"], + "arn": cert_md["Arn"], + "path": cert_md["Path"], + "expiration": cert_md["Expiration"].isoformat(), + "upload_date": cert_md["UploadDate"].isoformat(), } except botocore.exceptions.ClientError: @@ -136,7 +138,7 @@ def get_server_certs(iam, name=None): def main(): argument_spec = dict( - name=dict(type='str'), + name=dict(type="str"), ) module = AnsibleAWSModule( @@ -145,14 +147,14 @@ def main(): ) try: - iam = module.client('iam') + iam = module.client("iam") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - cert_name = module.params.get('name') + cert_name = module.params.get("name") results = get_server_certs(iam, cert_name) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/inspector_target.py b/inspector_target.py index 8891fa34a67..f9ec6d53a84 100644 --- a/inspector_target.py +++ b/inspector_target.py @@ -116,11 +116,11 @@ def main(): argument_spec = dict( name=dict(required=True), - state=dict(choices=['absent', 'present'], default='present'), - tags=dict(type='dict'), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(type="dict"), ) - required_if = [['state', 'present', ['tags']]] + required_if = [["state", "present", ["tags"]]] module = AnsibleAWSModule( argument_spec=argument_spec, @@ -128,29 +128,37 @@ def main(): required_if=required_if, ) - name = module.params.get('name') - state = module.params.get('state').lower() - tags = module.params.get('tags') + name = module.params.get("name") + state = module.params.get("state").lower() + tags = module.params.get("tags") if tags: - tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value') + tags = ansible_dict_to_boto3_tag_list(tags, "key", "value") - client = module.client('inspector') + client = module.client("inspector") try: existing_target_arn = client.list_assessment_targets( - filter={'assessmentTargetNamePattern': name}, - ).get('assessmentTargetArns')[0] + filter={"assessmentTargetNamePattern": name}, + ).get( + "assessmentTargetArns" + )[0] existing_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[existing_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - existing_resource_group_arn = existing_target.get('resource_group_arn') - existing_resource_group_tags = client.describe_resource_groups( - resourceGroupArns=[existing_resource_group_arn], - ).get('resourceGroups')[0].get('tags') + existing_resource_group_arn = existing_target.get("resource_group_arn") + existing_resource_group_tags = ( + client.describe_resource_groups( + resourceGroupArns=[existing_resource_group_arn], + ) + .get("resourceGroups")[0] + .get("tags") + ) target_exists = True except ( @@ -161,23 +169,18 @@ def main(): except IndexError: target_exists = False - if state == 'present' and target_exists: + if state == "present" and target_exists: ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags) - ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict( - existing_resource_group_tags - ) - tags_to_add, tags_to_remove = compare_aws_tags( - ansible_dict_tags, - ansible_dict_existing_tags - ) + ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(existing_resource_group_tags) + tags_to_add, tags_to_remove = compare_aws_tags(ansible_dict_tags, ansible_dict_existing_tags) if not (tags_to_add or tags_to_remove): - existing_target.update({'tags': ansible_dict_existing_tags}) + existing_target.update({"tags": ansible_dict_existing_tags}) module.exit_json(changed=False, **existing_target) else: try: updated_resource_group_arn = client.create_resource_group( resourceGroupTags=tags, - ).get('resourceGroupArn') + ).get("resourceGroupArn") client.update_assessment_target( assessmentTargetArn=existing_target_arn, @@ -188,10 +191,12 @@ def main(): updated_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[existing_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - updated_target.update({'tags': ansible_dict_tags}) + updated_target.update({"tags": ansible_dict_tags}) module.exit_json(changed=True, **updated_target) except ( botocore.exceptions.BotoCoreError, @@ -199,24 +204,26 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to update target") - elif state == 'present' and not target_exists: + elif state == "present" and not target_exists: try: new_resource_group_arn = client.create_resource_group( resourceGroupTags=tags, - ).get('resourceGroupArn') + ).get("resourceGroupArn") new_target_arn = client.create_assessment_target( assessmentTargetName=name, resourceGroupArn=new_resource_group_arn, - ).get('assessmentTargetArn') + ).get("assessmentTargetArn") new_target = camel_dict_to_snake_dict( client.describe_assessment_targets( assessmentTargetArns=[new_target_arn], - ).get('assessmentTargets')[0] + ).get( + "assessmentTargets" + )[0] ) - new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)}) + new_target.update({"tags": boto3_tag_list_to_ansible_dict(tags)}) module.exit_json(changed=True, **new_target) except ( botocore.exceptions.BotoCoreError, @@ -224,7 +231,7 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to create target") - elif state == 'absent' and target_exists: + elif state == "absent" and target_exists: try: client.delete_assessment_target( assessmentTargetArn=existing_target_arn, @@ -236,9 +243,9 @@ def main(): ) as e: module.fail_json_aws(e, msg="trying to delete target") - elif state == 'absent' and not target_exists: + elif state == "absent" and not target_exists: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/kinesis_stream.py b/kinesis_stream.py index 2bcca6a4ad4..8147f60f3db 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -209,16 +209,14 @@ def get_tags(client, stream_name): Returns: Tuple (bool, str, dict) """ - err_msg = '' + err_msg = "" success = False params = { - 'StreamName': stream_name, + "StreamName": stream_name, } results = dict() try: - results = ( - client.list_tags_for_stream(**params)['Tags'] - ) + results = client.list_tags_for_stream(**params)["Tags"] success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -239,28 +237,26 @@ def find_stream(client, stream_name): Returns: Tuple (bool, str, dict) """ - err_msg = '' + err_msg = "" success = False params = { - 'StreamName': stream_name, + "StreamName": stream_name, } results = dict() has_more_shards = True shards = list() try: while has_more_shards: - results = ( - client.describe_stream(**params)['StreamDescription'] - ) - shards.extend(results.pop('Shards')) - has_more_shards = results['HasMoreShards'] + results = client.describe_stream(**params)["StreamDescription"] + shards.extend(results.pop("Shards")) + has_more_shards = results["HasMoreShards"] if has_more_shards: - params['ExclusiveStartShardId'] = shards[-1]['ShardId'] - results['Shards'] = shards - num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']]) - results['OpenShardsCount'] = len(shards) - num_closed_shards - results['ClosedShardsCount'] = num_closed_shards - results['ShardsCount'] = len(shards) + params["ExclusiveStartShardId"] = shards[-1]["ShardId"] + results["Shards"] = shards + num_closed_shards = len([s for s in shards if "EndingSequenceNumber" in s["SequenceNumberRange"]]) + results["OpenShardsCount"] = len(shards) - num_closed_shards + results["ClosedShardsCount"] = num_closed_shards + results["ShardsCount"] = len(shards) success = True except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -268,8 +264,7 @@ def find_stream(client, stream_name): return success, err_msg, results -def wait_for_status(client, stream_name, status, wait_timeout=300, - check_mode=False): +def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=False): """Wait for the status to change for a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client @@ -298,16 +293,14 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, while wait_timeout > time.time(): try: - find_success, find_msg, stream = ( - find_stream(client, stream_name) - ) + find_success, find_msg, stream = find_stream(client, stream_name) if check_mode: status_achieved = True break - elif status != 'DELETING': + elif status != "DELETING": if find_success and stream: - if stream.get('StreamStatus') == status: + if stream.get("StreamStatus") == status: status_achieved = True break @@ -329,7 +322,7 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, return status_achieved, err_msg, stream -def tags_action(client, stream_name, tags, action='create', check_mode=False): +def tags_action(client, stream_name, tags, action="create", check_mode=False): """Create or delete multiple tags from a Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -356,26 +349,26 @@ def tags_action(client, stream_name, tags, action='create', check_mode=False): """ success = False err_msg = "" - params = {'StreamName': stream_name} + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'create': - params['Tags'] = tags + if action == "create": + params["Tags"] = tags client.add_tags_to_stream(**params) success = True - elif action == 'delete': - params['TagKeys'] = tags + elif action == "delete": + params["TagKeys"] = tags client.remove_tags_from_stream(**params) success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) else: - if action == 'create': + if action == "create": success = True - elif action == 'delete': + elif action == "delete": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -405,32 +398,25 @@ def update_tags(client, stream_name, tags, check_mode=False): """ success = False changed = False - err_msg = '' - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + err_msg = "" + tag_success, tag_msg, current_tags = get_tags(client, stream_name) tags_to_set, tags_to_delete = compare_aws_tags( - current_tags, tags, + current_tags, + tags, purge_tags=True, ) if tags_to_delete: - delete_success, delete_msg = ( - tags_action( - client, stream_name, tags_to_delete, action='delete', - check_mode=check_mode - ) + delete_success, delete_msg = tags_action( + client, stream_name, tags_to_delete, action="delete", check_mode=check_mode ) if not delete_success: return delete_success, changed, delete_msg - tag_msg = 'Tags removed' + tag_msg = "Tags removed" if tags_to_set: - create_success, create_msg = ( - tags_action( - client, stream_name, tags_to_set, action='create', - check_mode=check_mode - ) + create_success, create_msg = tags_action( + client, stream_name, tags_to_set, action="create", check_mode=check_mode ) if create_success: changed = True @@ -439,8 +425,7 @@ def update_tags(client, stream_name, tags, check_mode=False): return success, changed, err_msg -def stream_action(client, stream_name, shard_count=1, action='create', - timeout=300, check_mode=False): +def stream_action(client, stream_name, shard_count=1, action="create", timeout=300, check_mode=False): """Create or Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -464,28 +449,26 @@ def stream_action(client, stream_name, shard_count=1, action='create', List (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'create': - params['ShardCount'] = shard_count + if action == "create": + params["ShardCount"] = shard_count client.create_stream(**params) success = True - elif action == 'delete': + elif action == "delete": client.delete_stream(**params) success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) else: - if action == 'create': + if action == "create": success = True - elif action == 'delete': + elif action == "delete": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -493,8 +476,9 @@ def stream_action(client, stream_name, shard_count=1, action='create', return success, err_msg -def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='', - timeout=300, check_mode=False): +def stream_encryption_action( + client, stream_name, action="start_encryption", encryption_type="", key_id="", timeout=300, check_mode=False +): """Create, Encrypt or Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -520,31 +504,29 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc List (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'start_encryption': - params['EncryptionType'] = encryption_type - params['KeyId'] = key_id + if action == "start_encryption": + params["EncryptionType"] = encryption_type + params["KeyId"] = key_id client.start_stream_encryption(**params) success = True - elif action == 'stop_encryption': - params['EncryptionType'] = encryption_type - params['KeyId'] = key_id + elif action == "stop_encryption": + params["EncryptionType"] = encryption_type + params["KeyId"] = key_id client.stop_stream_encryption(**params) success = True else: - err_msg = 'Invalid encryption action {0}'.format(action) + err_msg = "Invalid encryption action {0}".format(action) else: - if action == 'start_encryption': + if action == "start_encryption": success = True - elif action == 'stop_encryption': + elif action == "stop_encryption": success = True else: - err_msg = 'Invalid encryption action {0}'.format(action) + err_msg = "Invalid encryption action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -552,8 +534,7 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc return success, err_msg -def retention_action(client, stream_name, retention_period=24, - action='increase', check_mode=False): +def retention_action(client, stream_name, retention_period=24, action="increase", check_mode=False): """Increase or Decrease the retention of messages in the Kinesis stream. Args: client (botocore.client.EC2): Boto3 client. @@ -578,35 +559,29 @@ def retention_action(client, stream_name, retention_period=24, Tuple (bool, str) """ success = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} try: if not check_mode: - if action == 'increase': - params['RetentionPeriodHours'] = retention_period + if action == "increase": + params["RetentionPeriodHours"] = retention_period client.increase_stream_retention_period(**params) success = True - err_msg = ( - 'Retention Period increased successfully to {0}'.format(retention_period) - ) - elif action == 'decrease': - params['RetentionPeriodHours'] = retention_period + err_msg = "Retention Period increased successfully to {0}".format(retention_period) + elif action == "decrease": + params["RetentionPeriodHours"] = retention_period client.decrease_stream_retention_period(**params) success = True - err_msg = ( - 'Retention Period decreased successfully to {0}'.format(retention_period) - ) + err_msg = "Retention Period decreased successfully to {0}".format(retention_period) else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) else: - if action == 'increase': + if action == "increase": success = True - elif action == 'decrease': + elif action == "decrease": success = True else: - err_msg = 'Invalid action {0}'.format(action) + err_msg = "Invalid action {0}".format(action) except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -636,13 +611,10 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False Tuple (bool, str) """ success = True - err_msg = '' - params = { - 'StreamName': stream_name, - 'ScalingType': 'UNIFORM_SCALING' - } + err_msg = "" + params = {"StreamName": stream_name, "ScalingType": "UNIFORM_SCALING"} if not check_mode: - params['TargetShardCount'] = number_of_shards + params["TargetShardCount"] = number_of_shards try: client.update_shard_count(**params) except botocore.exceptions.ClientError as e: @@ -651,8 +623,17 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False return success, err_msg -def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None, - tags=None, wait=False, wait_timeout=300, check_mode=False): +def update( + client, + current_stream, + stream_name, + number_of_shards=1, + retention_period=None, + tags=None, + wait=False, + wait_timeout=300, + check_mode=False, +): """Update an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -692,43 +673,30 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe """ success = True changed = False - err_msg = '' + err_msg = "" if retention_period: if wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, False, wait_msg - if current_stream.get('StreamStatus') == 'ACTIVE': + if current_stream.get("StreamStatus") == "ACTIVE": retention_changed = False - if retention_period > current_stream['RetentionPeriodHours']: - retention_changed, retention_msg = ( - retention_action( - client, stream_name, retention_period, action='increase', - check_mode=check_mode - ) + if retention_period > current_stream["RetentionPeriodHours"]: + retention_changed, retention_msg = retention_action( + client, stream_name, retention_period, action="increase", check_mode=check_mode ) - elif retention_period < current_stream['RetentionPeriodHours']: - retention_changed, retention_msg = ( - retention_action( - client, stream_name, retention_period, action='decrease', - check_mode=check_mode - ) + elif retention_period < current_stream["RetentionPeriodHours"]: + retention_changed, retention_msg = retention_action( + client, stream_name, retention_period, action="decrease", check_mode=check_mode ) - elif retention_period == current_stream['RetentionPeriodHours']: - retention_msg = ( - 'Retention {0} is the same as {1}' - .format( - retention_period, - current_stream['RetentionPeriodHours'] - ) + elif retention_period == current_stream["RetentionPeriodHours"]: + retention_msg = "Retention {0} is the same as {1}".format( + retention_period, current_stream["RetentionPeriodHours"] ) success = True @@ -738,36 +706,27 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe err_msg = retention_msg if changed and wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, False, wait_msg elif changed and not wait: - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if current_stream['StreamStatus'] != 'ACTIVE': - err_msg = ( - 'Retention Period for {0} is in the process of updating' - .format(stream_name) - ) + if current_stream["StreamStatus"] != "ACTIVE": + err_msg = "Retention Period for {0} is in the process of updating".format(stream_name) return success, changed, err_msg else: err_msg = ( - 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' - .format(current_stream.get('StreamStatus', 'UNKNOWN')) + "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( + current_stream.get("StreamStatus", "UNKNOWN") + ) ) return success, changed, err_msg - if current_stream['OpenShardsCount'] != number_of_shards: - success, err_msg = ( - update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) - ) + if current_stream["OpenShardsCount"] != number_of_shards: + success, err_msg = update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) if not success: return success, changed, err_msg @@ -775,47 +734,42 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe changed = True if wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not wait_success: return wait_success, changed, wait_msg else: - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) - if stream_found and current_stream['StreamStatus'] != 'ACTIVE': - err_msg = ( - 'Number of shards for {0} is in the process of updating' - .format(stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) + if stream_found and current_stream["StreamStatus"] != "ACTIVE": + err_msg = "Number of shards for {0} is in the process of updating".format(stream_name) return success, changed, err_msg if tags: - tag_success, tag_changed, err_msg = ( - update_tags(client, stream_name, tags, check_mode=check_mode) - ) + tag_success, tag_changed, err_msg = update_tags(client, stream_name, tags, check_mode=check_mode) changed |= tag_changed if wait: - success, err_msg, status_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, status_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if success and changed: - err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name) + err_msg = "Kinesis Stream {0} updated successfully.".format(stream_name) elif success and not changed: - err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name) + err_msg = "Kinesis Stream {0} did not change.".format(stream_name) return success, changed, err_msg -def create_stream(client, stream_name, number_of_shards=1, retention_period=None, - tags=None, wait=False, wait_timeout=300, check_mode=False): +def create_stream( + client, + stream_name, + number_of_shards=1, + retention_period=None, + tags=None, + wait=False, + wait_timeout=300, + check_mode=False, +): """Create an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -847,99 +801,74 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None """ success = False changed = False - err_msg = '' + err_msg = "" results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait: - wait_success, wait_msg, current_stream = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + if stream_found and current_stream.get("StreamStatus") == "DELETING" and wait: + wait_success, wait_msg, current_stream = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - if stream_found and current_stream.get('StreamStatus') != 'DELETING': + if stream_found and current_stream.get("StreamStatus") != "DELETING": success, changed, err_msg = update( - client, current_stream, stream_name, number_of_shards, - retention_period, tags, wait, wait_timeout, check_mode=check_mode + client, + current_stream, + stream_name, + number_of_shards, + retention_period, + tags, + wait, + wait_timeout, + check_mode=check_mode, ) else: - create_success, create_msg = ( - stream_action( - client, stream_name, number_of_shards, action='create', - check_mode=check_mode - ) + create_success, create_msg = stream_action( + client, stream_name, number_of_shards, action="create", check_mode=check_mode ) if not create_success: changed = True - err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg) + err_msg = "Failed to create Kinesis stream: {0}".format(create_msg) return False, True, err_msg, {} else: changed = True if wait: - wait_success, wait_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) - ) - err_msg = ( - 'Kinesis Stream {0} is in the process of being created' - .format(stream_name) + wait_success, wait_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) + err_msg = "Kinesis Stream {0} is in the process of being created".format(stream_name) if not wait_success: return wait_success, True, wait_msg, results else: - err_msg = ( - 'Kinesis Stream {0} created successfully' - .format(stream_name) - ) + err_msg = "Kinesis Stream {0} created successfully".format(stream_name) if tags: - changed, err_msg = ( - tags_action( - client, stream_name, tags, action='create', - check_mode=check_mode - ) - ) + changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode) if changed: success = True if not success: return success, changed, err_msg, results - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) - if retention_period and current_stream.get('StreamStatus') == 'ACTIVE': - changed, err_msg = ( - retention_action( - client, stream_name, retention_period, action='increase', - check_mode=check_mode - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) + if retention_period and current_stream.get("StreamStatus") == "ACTIVE": + changed, err_msg = retention_action( + client, stream_name, retention_period, action="increase", check_mode=check_mode ) if changed: success = True if not success: return success, changed, err_msg, results else: - err_msg = ( - 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' - .format(current_stream.get('StreamStatus', 'UNKNOWN')) + err_msg = "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( + current_stream.get("StreamStatus", "UNKNOWN") ) success = create_success changed = True if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if check_mode: current_tags = tags @@ -947,13 +876,12 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results -def delete_stream(client, stream_name, wait=False, wait_timeout=300, - check_mode=False): +def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode=False): """Delete an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -977,44 +905,33 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300, """ success = False changed = False - err_msg = '' + err_msg = "" results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - success, err_msg = ( - stream_action( - client, stream_name, action='delete', check_mode=check_mode - ) - ) + success, err_msg = stream_action(client, stream_name, action="delete", check_mode=check_mode) if success: changed = True if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'DELETING', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "DELETING", wait_timeout, check_mode=check_mode ) - err_msg = 'Stream {0} deleted successfully'.format(stream_name) + err_msg = "Stream {0} deleted successfully".format(stream_name) if not success: return success, True, err_msg, results else: - err_msg = ( - 'Stream {0} is in the process of being deleted' - .format(stream_name) - ) + err_msg = "Stream {0} is in the process of being deleted".format(stream_name) else: success = True changed = False - err_msg = 'Stream {0} does not exist'.format(stream_name) + err_msg = "Stream {0} does not exist".format(stream_name) return success, changed, err_msg, results -def start_stream_encryption(client, stream_name, encryption_type='', key_id='', - wait=False, wait_timeout=300, check_mode=False): +def start_stream_encryption( + client, stream_name, encryption_type="", key_id="", wait=False, wait_timeout=300, check_mode=False +): """Start encryption on an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -1042,65 +959,56 @@ def start_stream_encryption(client, stream_name, encryption_type='', key_id='', """ success = False changed = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if (current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id): + if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id: changed = False success = True - err_msg = 'Kinesis Stream {0} encryption already configured.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption already configured.".format(stream_name) else: - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode - ) + success, err_msg = stream_encryption_action( + client, + stream_name, + action="start_encryption", + encryption_type=encryption_type, + key_id=key_id, + check_mode=check_mode, ) if success: changed = True if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption started successfully.".format(stream_name) if not success: return success, True, err_msg, results else: - err_msg = ( - 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name) - ) + err_msg = "Kinesis Stream {0} is in the process of starting encryption.".format(stream_name) else: success = True changed = False - err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name) + err_msg = "Kinesis Stream {0} does not exist".format(stream_name) if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if not current_tags: current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results -def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', - wait=True, wait_timeout=300, check_mode=False): +def stop_stream_encryption( + client, stream_name, encryption_type="", key_id="", wait=True, wait_timeout=300, check_mode=False +): """Stop encryption on an Amazon Kinesis Stream. Args: client (botocore.client.EC2): Boto3 client. @@ -1126,57 +1034,47 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', """ success = False changed = False - err_msg = '' - params = { - 'StreamName': stream_name - } + err_msg = "" + params = {"StreamName": stream_name} results = dict() - stream_found, stream_msg, current_stream = ( - find_stream(client, stream_name) - ) + stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: - if current_stream.get('EncryptionType') == 'KMS': - success, err_msg = ( - stream_encryption_action( - client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode - ) + if current_stream.get("EncryptionType") == "KMS": + success, err_msg = stream_encryption_action( + client, + stream_name, + action="stop_encryption", + key_id=key_id, + encryption_type=encryption_type, + check_mode=check_mode, ) changed = success if wait: - success, err_msg, results = ( - wait_for_status( - client, stream_name, 'ACTIVE', wait_timeout, - check_mode=check_mode - ) + success, err_msg, results = wait_for_status( + client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if not success: return success, True, err_msg, results - err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption stopped successfully.".format(stream_name) else: - err_msg = ( - 'Stream {0} is in the process of stopping encryption.'.format(stream_name) - ) - elif current_stream.get('EncryptionType') == 'NONE': + err_msg = "Stream {0} is in the process of stopping encryption.".format(stream_name) + elif current_stream.get("EncryptionType") == "NONE": success = True - err_msg = 'Kinesis Stream {0} encryption already stopped.'.format(stream_name) + err_msg = "Kinesis Stream {0} encryption already stopped.".format(stream_name) else: success = True changed = False - err_msg = 'Stream {0} does not exist.'.format(stream_name) + err_msg = "Stream {0} does not exist.".format(stream_name) if success: - stream_found, stream_msg, results = ( - find_stream(client, stream_name) - ) - tag_success, tag_msg, current_tags = ( - get_tags(client, stream_name) - ) + stream_found, stream_msg, results = find_stream(client, stream_name) + tag_success, tag_msg, current_tags = get_tags(client, stream_name) if not current_tags: current_tags = dict() results = camel_dict_to_snake_dict(results) - results['tags'] = current_tags + results["tags"] = current_tags return success, changed, err_msg, results @@ -1184,78 +1082,65 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='', def main(): argument_spec = dict( name=dict(required=True), - shards=dict(default=None, required=False, type='int'), - retention_period=dict(default=None, required=False, type='int'), - tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), - wait=dict(default=True, required=False, type='bool'), - wait_timeout=dict(default=300, required=False, type='int'), - state=dict(default='present', choices=['present', 'absent']), - encryption_type=dict(required=False, choices=['NONE', 'KMS']), - key_id=dict(required=False, type='str'), - encryption_state=dict(required=False, choices=['enabled', 'disabled']), + shards=dict(default=None, required=False, type="int"), + retention_period=dict(default=None, required=False, type="int"), + tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), + wait=dict(default=True, required=False, type="bool"), + wait_timeout=dict(default=300, required=False, type="int"), + state=dict(default="present", choices=["present", "absent"]), + encryption_type=dict(required=False, choices=["NONE", "KMS"]), + key_id=dict(required=False, type="str"), + encryption_state=dict(required=False, choices=["enabled", "disabled"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) - retention_period = module.params.get('retention_period') - stream_name = module.params.get('name') - shards = module.params.get('shards') - state = module.params.get('state') - tags = module.params.get('tags') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - encryption_type = module.params.get('encryption_type') - key_id = module.params.get('key_id') - encryption_state = module.params.get('encryption_state') + retention_period = module.params.get("retention_period") + stream_name = module.params.get("name") + shards = module.params.get("shards") + state = module.params.get("state") + tags = module.params.get("tags") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + encryption_type = module.params.get("encryption_type") + key_id = module.params.get("key_id") + encryption_state = module.params.get("encryption_state") - if state == 'present' and not shards: - module.fail_json(msg='Shards is required when state == present.') + if state == "present" and not shards: + module.fail_json(msg="Shards is required when state == present.") if retention_period: if retention_period < 24: - module.fail_json(msg='Retention period can not be less than 24 hours.') + module.fail_json(msg="Retention period can not be less than 24 hours.") check_mode = module.check_mode try: - client = module.client('kinesis') + client = module.client("kinesis") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': - success, changed, err_msg, results = ( - create_stream( - client, stream_name, shards, retention_period, tags, - wait, wait_timeout, check_mode - ) + if state == "present": + success, changed, err_msg, results = create_stream( + client, stream_name, shards, retention_period, tags, wait, wait_timeout, check_mode ) - if encryption_state == 'enabled': - success, changed, err_msg, results = ( - start_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) + if encryption_state == "enabled": + success, changed, err_msg, results = start_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode ) - elif encryption_state == 'disabled': - success, changed, err_msg, results = ( - stop_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) + elif encryption_state == "disabled": + success, changed, err_msg, results = stop_stream_encryption( + client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode ) - elif state == 'absent': - success, changed, err_msg, results = ( - delete_stream(client, stream_name, wait, wait_timeout, check_mode) - ) + elif state == "absent": + success, changed, err_msg, results = delete_stream(client, stream_name, wait, wait_timeout, check_mode) if success: - module.exit_json( - success=success, changed=changed, msg=err_msg, **results - ) + module.exit_json(success=success, changed=changed, msg=err_msg, **results) else: - module.fail_json( - success=success, changed=changed, msg=err_msg, result=results - ) + module.fail_json(success=success, changed=changed, msg=err_msg, result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/lightsail.py b/lightsail.py index 0739a042316..6fb83b26b1f 100644 --- a/lightsail.py +++ b/lightsail.py @@ -201,16 +201,15 @@ def find_instance_info(module, client, instance_name, fail_if_not_found=False): - try: res = client.get_instance(instanceName=instance_name) - except is_boto3_error_code('NotFoundException') as e: + except is_boto3_error_code("NotFoundException") as e: if fail_if_not_found: module.fail_json_aws(e) return None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - return res['instance'] + return res["instance"] def wait_for_instance_state(module, client, instance_name, states): @@ -218,19 +217,21 @@ def wait_for_instance_state(module, client, instance_name, states): `states` is a list of instance states that we are waiting for. """ - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") wait_max = time.time() + wait_timeout while wait_max > time.time(): try: instance = find_instance_info(module, client, instance_name) - if instance['state']['name'] in states: + if instance["state"]["name"] in states: break time.sleep(5) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) else: - module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -' - ' {1}'.format(instance_name, states)) + module.fail_json( + msg='Timed out waiting for instance "{0}" to get to one of the following states -' + " {1}".format(instance_name, states) + ) def update_public_ports(module, client, instance_name): @@ -244,7 +245,6 @@ def update_public_ports(module, client, instance_name): def create_or_update_instance(module, client, instance_name): - inst = find_instance_info(module, client, instance_name) if not inst: @@ -256,18 +256,18 @@ def create_or_update_instance(module, client, instance_name): "userData": module.params.get("user_data"), } - key_pair_name = module.params.get('key_pair_name') + key_pair_name = module.params.get("key_pair_name") if key_pair_name: - create_params['keyPairName'] = key_pair_name + create_params["keyPairName"] = key_pair_name try: client.create_instances(**create_params) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - wait = module.params.get('wait') + wait = module.params.get("wait") if wait: - desired_states = ['running'] + desired_states = ["running"] wait_for_instance_state(module, client, instance_name, desired_states) if module.params.get("public_ports") is not None: @@ -281,7 +281,6 @@ def create_or_update_instance(module, client, instance_name): def delete_instance(module, client, instance_name): - changed = False inst = find_instance_info(module, client, instance_name) @@ -289,7 +288,7 @@ def delete_instance(module, client, instance_name): module.exit_json(changed=changed, instance={}) # Wait for instance to exit transition state before deleting - desired_states = ['running', 'stopped'] + desired_states = ["running", "stopped"] wait_for_instance_state(module, client, instance_name, desired_states) try: @@ -330,13 +329,13 @@ def start_or_stop_instance(module, client, instance_name, state): inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) # Wait for instance to exit transition state before state change - desired_states = ['running', 'stopped'] + desired_states = ["running", "stopped"] wait_for_instance_state(module, client, instance_name, desired_states) # Try state change - if inst and inst['state']['name'] != state: + if inst and inst["state"]["name"] != state: try: - if state == 'running': + if state == "running": client.start_instance(instanceName=instance_name) else: client.stop_instance(instanceName=instance_name) @@ -346,7 +345,7 @@ def start_or_stop_instance(module, client, instance_name, state): # Grab current instance info inst = find_instance_info(module, client, instance_name) - wait = module.params.get('wait') + wait = module.params.get("wait") if wait: desired_states = [state] wait_for_instance_state(module, client, instance_name, desired_states) @@ -356,7 +355,6 @@ def start_or_stop_instance(module, client, instance_name, state): def main(): - argument_spec = dict( name=dict(type="str", required=True), state=dict( @@ -383,23 +381,24 @@ def main(): ), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]]) + module = AnsibleAWSModule( + argument_spec=argument_spec, required_if=[["state", "present", ("zone", "blueprint_id", "bundle_id")]] + ) - client = module.client('lightsail') + client = module.client("lightsail") - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_instance(module, client, name) - elif state == 'absent': + elif state == "absent": delete_instance(module, client, name) - elif state in ('running', 'stopped'): + elif state in ("running", "stopped"): start_or_stop_instance(module, client, name, state) - elif state in ('restarted', 'rebooted'): + elif state in ("restarted", "rebooted"): restart_instance(module, client, name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py index 961f451a4ee..40d10a86bb1 100644 --- a/lightsail_static_ip.py +++ b/lightsail_static_ip.py @@ -76,25 +76,23 @@ def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): - try: res = client.get_static_ip(staticIpName=static_ip_name) - except is_boto3_error_code('NotFoundException') as e: + except is_boto3_error_code("NotFoundException") as e: if fail_if_not_found: module.fail_json_aws(e) return None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - return res['staticIp'] + return res["staticIp"] def create_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) if inst: module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst)) else: - create_params = {'staticIpName': static_ip_name} + create_params = {"staticIpName": static_ip_name} try: client.allocate_static_ip(**create_params) @@ -107,7 +105,6 @@ def create_static_ip(module, client, static_ip_name): def delete_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) if inst is None: module.exit_json(changed=False, static_ip={}) @@ -123,24 +120,23 @@ def delete_static_ip(module, client, static_ip_name): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client = module.client('lightsail') + client = module.client("lightsail") - name = module.params.get('name') - state = module.params.get('state') + name = module.params.get("name") + state = module.params.get("state") - if state == 'present': + if state == "present": create_static_ip(module, client, name) - elif state == 'absent': + elif state == "absent": delete_static_ip(module, client, name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/mq_broker_info.py b/mq_broker_info.py index 65a3524db41..da04596f589 100644 --- a/mq_broker_info.py +++ b/mq_broker_info.py @@ -84,7 +84,10 @@ def get_broker_info(conn, module, broker_id): def main(): argument_spec = dict(broker_id=dict(type="str"), broker_name=dict(type="str")) required_one_of = ( - ("broker_name", "broker_id",), + ( + "broker_name", + "broker_id", + ), ) module = AnsibleAWSModule( diff --git a/msk_cluster.py b/msk_cluster.py index 6bf143509ae..960ae115bcb 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -341,9 +341,7 @@ def wait_for_cluster_state(client, module, arn, state="ACTIVE"): return if time.time() - start > timeout: module.fail_json( - msg="Timeout waiting for cluster {0} (desired state is '{1}')".format( - current_state, state - ) + msg="Timeout waiting for cluster {0} (desired state is '{1}')".format(current_state, state) ) time.sleep(check_interval) @@ -364,7 +362,7 @@ def prepare_create_options(module): "BrokerNodeGroupInfo": { "ClientSubnets": module.params["subnets"], "InstanceType": module.params["instance_type"], - } + }, } if module.params["security_groups"] and len(module.params["security_groups"]) != 0: @@ -372,9 +370,7 @@ def prepare_create_options(module): if module.params["ebs_volume_size"]: c_params["BrokerNodeGroupInfo"]["StorageInfo"] = { - "EbsStorageInfo": { - "VolumeSize": module.params.get("ebs_volume_size") - } + "EbsStorageInfo": {"VolumeSize": module.params.get("ebs_volume_size")} } if module.params["encryption"]: @@ -385,7 +381,7 @@ def prepare_create_options(module): } c_params["EncryptionInfo"]["EncryptionInTransit"] = { "ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"), - "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True) + "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True), } if module.params["authentication"]: @@ -425,12 +421,8 @@ def prepare_open_monitoring_options(module): open_monitoring = module.params["open_monitoring"] or {} m_params["OpenMonitoring"] = { "Prometheus": { - "JmxExporter": { - "EnabledInBroker": open_monitoring.get("jmx_exporter", False) - }, - "NodeExporter": { - "EnabledInBroker": open_monitoring.get("node_exporter", False) - } + "JmxExporter": {"EnabledInBroker": open_monitoring.get("jmx_exporter", False)}, + "NodeExporter": {"EnabledInBroker": open_monitoring.get("node_exporter", False)}, } } return m_params @@ -442,36 +434,26 @@ def prepare_logging_options(module): if logging.get("cloudwatch"): l_params["CloudWatchLogs"] = { "Enabled": module.params["logging"]["cloudwatch"].get("enabled"), - "LogGroup": module.params["logging"]["cloudwatch"].get("log_group") + "LogGroup": module.params["logging"]["cloudwatch"].get("log_group"), } else: - l_params["CloudWatchLogs"] = { - "Enabled": False - } + l_params["CloudWatchLogs"] = {"Enabled": False} if logging.get("firehose"): l_params["Firehose"] = { "Enabled": module.params["logging"]["firehose"].get("enabled"), - "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream") + "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream"), } else: - l_params["Firehose"] = { - "Enabled": False - } + l_params["Firehose"] = {"Enabled": False} if logging.get("s3"): l_params["S3"] = { "Enabled": module.params["logging"]["s3"].get("enabled"), "Bucket": module.params["logging"]["s3"].get("bucket"), - "Prefix": module.params["logging"]["s3"].get("prefix") + "Prefix": module.params["logging"]["s3"].get("prefix"), } else: - l_params["S3"] = { - "Enabled": False - } - return { - "LoggingInfo": { - "BrokerLogs": l_params - } - } + l_params["S3"] = {"Enabled": False} + return {"LoggingInfo": {"BrokerLogs": l_params}} def create_or_update_cluster(client, module): @@ -485,7 +467,6 @@ def create_or_update_cluster(client, module): cluster = find_cluster_by_name(client, module, module.params["name"]) if not cluster: - changed = True if module.check_mode: @@ -505,7 +486,6 @@ def create_or_update_cluster(client, module): wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE") else: - response["ClusterArn"] = cluster["ClusterArn"] response["changes"] = {} @@ -514,9 +494,7 @@ def create_or_update_cluster(client, module): "broker_count": { "current_value": cluster["NumberOfBrokerNodes"], "target_value": module.params.get("nodes"), - "update_params": { - "TargetNumberOfBrokerNodes": module.params.get("nodes") - } + "update_params": {"TargetNumberOfBrokerNodes": module.params.get("nodes")}, }, "broker_storage": { "current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"], @@ -525,14 +503,12 @@ def create_or_update_cluster(client, module): "TargetBrokerEBSVolumeInfo": [ {"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")} ] - } + }, }, "broker_type": { "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], "target_value": module.params.get("instance_type"), - "update_params": { - "TargetInstanceType": module.params.get("instance_type") - } + "update_params": {"TargetInstanceType": module.params.get("instance_type")}, }, "cluster_configuration": { "current_value": { @@ -546,44 +522,37 @@ def create_or_update_cluster(client, module): "update_params": { "ConfigurationInfo": { "Arn": module.params.get("configuration_arn"), - "Revision": module.params.get("configuration_revision") + "Revision": module.params.get("configuration_revision"), } - } + }, }, "cluster_kafka_version": { "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], "target_value": module.params.get("version"), - "update_params": { - "TargetKafkaVersion": module.params.get("version") - } + "update_params": {"TargetKafkaVersion": module.params.get("version")}, }, "enhanced_monitoring": { "current_value": cluster["EnhancedMonitoring"], "target_value": module.params.get("enhanced_monitoring"), "update_method": "update_monitoring", - "update_params": prepare_enhanced_monitoring_options(module) + "update_params": prepare_enhanced_monitoring_options(module), }, "open_monitoring": { - "current_value": { - "OpenMonitoring": cluster["OpenMonitoring"] - }, + "current_value": {"OpenMonitoring": cluster["OpenMonitoring"]}, "target_value": prepare_open_monitoring_options(module), "update_method": "update_monitoring", - "update_params": prepare_open_monitoring_options(module) + "update_params": prepare_open_monitoring_options(module), }, "logging": { - "current_value": { - "LoggingInfo": cluster["LoggingInfo"] - }, + "current_value": {"LoggingInfo": cluster["LoggingInfo"]}, "target_value": prepare_logging_options(module), "update_method": "update_monitoring", - "update_params": prepare_logging_options(module) - } + "update_params": prepare_logging_options(module), + }, } for method, options in msk_cluster_changes.items(): - - if 'botocore_version' in options: + if "botocore_version" in options: if not module.botocore_at_least(options["botocore_version"]): continue @@ -612,17 +581,13 @@ def create_or_update_cluster(client, module): ) try: response["changes"][method] = update_method( - ClusterArn=cluster["ClusterArn"], - CurrentVersion=version, - **options["update_params"] + ClusterArn=cluster["ClusterArn"], CurrentVersion=version, **options["update_params"] ) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, "Failed to update cluster via 'update_{0}'".format(method) - ) + module.fail_json_aws(e, "Failed to update cluster via 'update_{0}'".format(method)) if module.params["wait"]: wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") @@ -633,13 +598,13 @@ def create_or_update_cluster(client, module): def update_cluster_tags(client, module, arn): - new_tags = module.params.get('tags') + new_tags = module.params.get("tags") if new_tags is None: return False - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") try: - existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)['Tags'] + existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) @@ -659,7 +624,6 @@ def update_cluster_tags(client, module, arn): def delete_cluster(client, module): - cluster = find_cluster_by_name(client, module, module.params["name"]) if module.check_mode: @@ -688,7 +652,6 @@ def delete_cluster(client, module): def main(): - module_args = dict( name=dict(type="str", required=True), state=dict(type="str", choices=["present", "absent"], default="present"), @@ -717,10 +680,7 @@ def main(): type="dict", options=dict( in_cluster=dict(type="bool", default=True), - client_broker=dict( - choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], - default="TLS" - ), + client_broker=dict(choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], default="TLS"), ), ), ), @@ -780,23 +740,21 @@ def main(): ), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=3600), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[['state', 'present', ['version', 'configuration_arn', 'configuration_revision', 'subnets']]], - supports_check_mode=True + required_if=[["state", "present", ["version", "configuration_arn", "configuration_revision", "subnets"]]], + supports_check_mode=True, ) client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": if len(module.params["subnets"]) < 2: - module.fail_json( - msg="At least two client subnets should be provided" - ) + module.fail_json(msg="At least two client subnets should be provided") if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0: module.fail_json( msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" @@ -813,9 +771,7 @@ def main(): bootstrap_broker_string = {} if response.get("ClusterArn") and module.params["state"] == "present": try: - cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)[ - "ClusterInfo" - ] + cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)["ClusterInfo"] if cluster_info.get("State") == "ACTIVE": brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True) if brokers.get("BootstrapBrokerString"): @@ -828,9 +784,7 @@ def main(): ) as e: module.fail_json_aws( e, - "Can not obtain information about cluster {0}".format( - response["ClusterArn"] - ), + "Can not obtain information about cluster {0}".format(response["ClusterArn"]), ) module.exit_json( diff --git a/msk_config.py b/msk_config.py index 8dce485410f..5b67cd9924f 100644 --- a/msk_config.py +++ b/msk_config.py @@ -143,19 +143,13 @@ def find_active_config(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="failed to obtain kafka configurations") - active_configs = list( - item - for item in all_configs - if item["Name"] == name and item["State"] == "ACTIVE" - ) + active_configs = list(item for item in all_configs if item["Name"] == name and item["State"] == "ACTIVE") if active_configs: if len(active_configs) == 1: return active_configs[0] else: - module.fail_json_aws( - msg="found more than one active config with name '{0}'".format(name) - ) + module.fail_json_aws(msg="found more than one active config with name '{0}'".format(name)) return None @@ -192,7 +186,6 @@ def create_config(client, module): # create new configuration if not config: - if module.check_mode: return True, {} @@ -202,7 +195,7 @@ def create_config(client, module): Description=module.params.get("description"), KafkaVersions=module.params.get("kafka_versions"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True + aws_retry=True, ) except ( botocore.exceptions.BotoCoreError, @@ -213,7 +206,9 @@ def create_config(client, module): # update existing configuration (creates new revision) else: # it's required because 'config' doesn't contain 'ServerProperties' - response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"]) + response = get_configuration_revision( + client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"] + ) if not is_configuration_changed(module, response): return False, response @@ -226,7 +221,7 @@ def create_config(client, module): Arn=config["Arn"], Description=module.params.get("description"), ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True + aws_retry=True, ) except ( botocore.exceptions.BotoCoreError, @@ -267,7 +262,6 @@ def delete_config(client, module): def main(): - module_args = dict( name=dict(type="str", required=True), description=dict(type="str", default=""), diff --git a/networkfirewall.py b/networkfirewall.py index 9460701cc9a..2cab7e26dfc 100644 --- a/networkfirewall.py +++ b/networkfirewall.py @@ -274,29 +274,28 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False, aliases=['firewall_name']), - arn=dict(type='str', required=False, aliases=['firewall_arn']), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - description=dict(type='str', required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), - subnet_change_protection=dict(type='bool', required=False), - policy_change_protection=dict(type='bool', required=False, aliases=['firewall_policy_change_protection']), - delete_protection=dict(type='bool', required=False), - subnets=dict(type='list', elements='str', required=False), - purge_subnets=dict(type='bool', required=False, default=True), - policy=dict(type='str', required=False, aliases=['firewall_policy_arn']), + name=dict(type="str", required=False, aliases=["firewall_name"]), + arn=dict(type="str", required=False, aliases=["firewall_arn"]), + state=dict(type="str", required=False, default="present", choices=["present", "absent"]), + description=dict(type="str", required=False), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), + subnet_change_protection=dict(type="bool", required=False), + policy_change_protection=dict(type="bool", required=False, aliases=["firewall_policy_change_protection"]), + delete_protection=dict(type="bool", required=False), + subnets=dict(type="list", elements="str", required=False), + purge_subnets=dict(type="bool", required=False, default=True), + policy=dict(type="str", required=False, aliases=["firewall_policy_arn"]), ) mutually_exclusive = [ - ('arn', 'name',) + ["arn", "name"], ] required_one_of = [ - ('arn', 'name',) + ["arn", "name"], ] module = AnsibleAWSModule( @@ -306,30 +305,30 @@ def main(): required_one_of=required_one_of, ) - arn = module.params.get('arn') - name = module.params.get('name') - state = module.params.get('state') + arn = module.params.get("arn") + name = module.params.get("name") + state = module.params.get("state") manager = NetworkFirewallManager(module, name=name, arn=arn) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': - manager.set_delete_protection(module.params.get('delete_protection', None)) + if state == "absent": + manager.set_delete_protection(module.params.get("delete_protection", None)) manager.delete() else: if not manager.original_resource: - if not module.params.get('subnets', None): - module.fail_json('The subnets parameter must be provided on creation.') - if not module.params.get('policy', None): - module.fail_json('The policy parameter must be provided on creation.') - manager.set_description(module.params.get('description', None)) - manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) - manager.set_subnet_change_protection(module.params.get('subnet_change_protection', None)) - manager.set_policy_change_protection(module.params.get('policy_change_protection', None)) - manager.set_delete_protection(module.params.get('delete_protection', None)) - manager.set_subnets(module.params.get('subnets', None), module.params.get('purge_subnets', None)) - manager.set_policy(module.params.get('policy', None)) + if not module.params.get("subnets", None): + module.fail_json("The subnets parameter must be provided on creation.") + if not module.params.get("policy", None): + module.fail_json("The policy parameter must be provided on creation.") + manager.set_description(module.params.get("description", None)) + manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) + manager.set_subnet_change_protection(module.params.get("subnet_change_protection", None)) + manager.set_policy_change_protection(module.params.get("policy_change_protection", None)) + manager.set_delete_protection(module.params.get("delete_protection", None)) + manager.set_subnets(module.params.get("subnets", None), module.params.get("purge_subnets", None)) + manager.set_policy(module.params.get("policy", None)) manager.flush_changes() results = dict( @@ -341,9 +340,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/networkfirewall_info.py b/networkfirewall_info.py index 70395f75d9e..262a31067b8 100644 --- a/networkfirewall_info.py +++ b/networkfirewall_info.py @@ -190,24 +190,23 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), - vpc_ids=dict(type='list', required=False, elements='str', aliases=['vpcs', 'vpc_id']), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), + vpc_ids=dict(type="list", required=False, elements="str", aliases=["vpcs", "vpc_id"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name', 'vpc_ids',), + ["arn", "name", "vpc_ids"], ], ) - arn = module.params.get('arn') - name = module.params.get('name') - vpcs = module.params.get('vpc_ids') + arn = module.params.get("arn") + name = module.params.get("name") + vpcs = module.params.get("vpc_ids") manager = NetworkFirewallManager(module) @@ -216,20 +215,20 @@ def main(): if name or arn: firewall = manager.get_firewall(name=name, arn=arn) if firewall: - results['firewalls'] = [firewall] + results["firewalls"] = [firewall] else: - results['firewalls'] = [] + results["firewalls"] = [] else: if vpcs: firewall_list = manager.list(vpc_ids=vpcs) else: firewall_list = manager.list() - results['firewall_list'] = firewall_list + results["firewall_list"] = firewall_list firewalls = [manager.get_firewall(arn=f) for f in firewall_list] - results['firewalls'] = firewalls + results["firewalls"] = firewalls module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/networkfirewall_policy.py b/networkfirewall_policy.py index 61affcbc9ab..a1d389fe732 100644 --- a/networkfirewall_policy.py +++ b/networkfirewall_policy.py @@ -340,40 +340,46 @@ def main(): - custom_action_options = dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), # Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1" - publish_metric_dimension_value=dict(type='str', required=False, aliases=['publish_metric_dimension_values']), + publish_metric_dimension_value=dict(type="str", required=False, aliases=["publish_metric_dimension_values"]), # NetworkFirewallPolicyManager can cope with a list for future-proofing # publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']), ) argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - description=dict(type='str', required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']), - stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']), - stateful_default_actions=dict(type='list', elements='str', required=False), - stateless_default_actions=dict(type='list', elements='str', required=False), - stateless_fragment_default_actions=dict(type='list', elements='str', required=False), - stateful_rule_order=dict(type='str', required=False, choices=['strict', 'default'], aliases=['rule_order']), - stateless_custom_actions=dict(type='list', elements='dict', required=False, - options=custom_action_options, aliases=['custom_stateless_actions']), - purge_stateless_custom_actions=dict(type='bool', required=False, default=True, aliases=['purge_custom_stateless_actions']), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), + state=dict(type="str", required=False, default="present", choices=["present", "absent"]), + description=dict(type="str", required=False), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + stateful_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateful_groups"]), + stateless_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateless_groups"]), + stateful_default_actions=dict(type="list", elements="str", required=False), + stateless_default_actions=dict(type="list", elements="str", required=False), + stateless_fragment_default_actions=dict(type="list", elements="str", required=False), + stateful_rule_order=dict(type="str", required=False, choices=["strict", "default"], aliases=["rule_order"]), + stateless_custom_actions=dict( + type="list", + elements="dict", + required=False, + options=custom_action_options, + aliases=["custom_stateless_actions"], + ), + purge_stateless_custom_actions=dict( + type="bool", required=False, default=True, aliases=["purge_custom_stateless_actions"] + ), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) mutually_exclusive = [ - ('arn', 'name',) + ["arn", "name"], ] required_one_of = [ - ('arn', 'name',) + ["arn", "name"], ] module = AnsibleAWSModule( @@ -383,36 +389,36 @@ def main(): required_one_of=required_one_of, ) - arn = module.params.get('arn') - name = module.params.get('name') - state = module.params.get('state') + arn = module.params.get("arn") + name = module.params.get("name") + state = module.params.get("state") manager = NetworkFirewallPolicyManager(module, name=name, arn=arn) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - rule_order = module.params.get('stateful_rule_order') + rule_order = module.params.get("stateful_rule_order") if rule_order and rule_order != "default": - module.require_botocore_at_least('1.21.52', reason='to set the rule order') - if module.params.get('stateful_default_actions'): - module.require_botocore_at_least( - '1.21.52', reason='to set the default actions for stateful flows') + module.require_botocore_at_least("1.21.52", reason="to set the rule order") + if module.params.get("stateful_default_actions"): + module.require_botocore_at_least("1.21.52", reason="to set the default actions for stateful flows") - if state == 'absent': + if state == "absent": manager.delete() else: - manager.set_description(module.params.get('description', None)) - manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None)) + manager.set_description(module.params.get("description", None)) + manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) # Actions need to be defined before potentially consuming them manager.set_custom_stateless_actions( - module.params.get('stateless_custom_actions', None), - module.params.get('purge_stateless_custom_actions', True)), - manager.set_stateful_rule_order(module.params.get('stateful_rule_order', None)) - manager.set_stateful_rule_groups(module.params.get('stateful_rule_groups', None)) - manager.set_stateless_rule_groups(module.params.get('stateless_rule_groups', None)) - manager.set_stateful_default_actions(module.params.get('stateful_default_actions', None)) - manager.set_stateless_default_actions(module.params.get('stateless_default_actions', None)) - manager.set_stateless_fragment_default_actions(module.params.get('stateless_fragment_default_actions', None)) + module.params.get("stateless_custom_actions", None), + module.params.get("purge_stateless_custom_actions", True), + ), + manager.set_stateful_rule_order(module.params.get("stateful_rule_order", None)) + manager.set_stateful_rule_groups(module.params.get("stateful_rule_groups", None)) + manager.set_stateless_rule_groups(module.params.get("stateless_rule_groups", None)) + manager.set_stateful_default_actions(module.params.get("stateful_default_actions", None)) + manager.set_stateless_default_actions(module.params.get("stateless_default_actions", None)) + manager.set_stateless_fragment_default_actions(module.params.get("stateless_fragment_default_actions", None)) manager.flush_changes() @@ -425,9 +431,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/networkfirewall_policy_info.py b/networkfirewall_policy_info.py index 9f0de62e119..3bb92174513 100644 --- a/networkfirewall_policy_info.py +++ b/networkfirewall_policy_info.py @@ -218,22 +218,21 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False), - arn=dict(type='str', required=False), + name=dict(type="str", required=False), + arn=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name',), + ["arn", "name"], ], ) - arn = module.params.get('arn') - name = module.params.get('name') + arn = module.params.get("arn") + name = module.params.get("name") manager = NetworkFirewallPolicyManager(module) @@ -242,17 +241,17 @@ def main(): if name or arn: policy = manager.get_policy(name=name, arn=arn) if policy: - results['policies'] = [policy] + results["policies"] = [policy] else: - results['policies'] = [] + results["policies"] = [] else: policy_list = manager.list() - results['policy_list'] = policy_list + results["policy_list"] = policy_list policies = [manager.get_policy(arn=p) for p in policy_list] - results['policies'] = policies + results["policies"] = policies module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py index 2a10b6f4e69..a7800568619 100644 --- a/networkfirewall_rule_group.py +++ b/networkfirewall_rule_group.py @@ -712,101 +712,102 @@ def main(): - domain_list_spec = dict( - domain_names=dict(type='list', elements='str', required=True), - filter_http=dict(type='bool', required=False, default=False), - filter_https=dict(type='bool', required=False, default=False), - action=dict(type='str', required=True, choices=['allow', 'deny']), - source_ips=dict(type='list', elements='str', required=False), + domain_names=dict(type="list", elements="str", required=True), + filter_http=dict(type="bool", required=False, default=False), + filter_https=dict(type="bool", required=False, default=False), + action=dict(type="str", required=True, choices=["allow", "deny"]), + source_ips=dict(type="list", elements="str", required=False), ) rule_list_spec = dict( - action=dict(type='str', required=True, choices=['pass', 'drop', 'alert']), - protocol=dict(type='str', required=True), - source=dict(type='str', required=True), - source_port=dict(type='str', required=True), - direction=dict(type='str', required=False, default='forward', choices=['forward', 'any']), - destination=dict(type='str', required=True), - destination_port=dict(type='str', required=True), - sid=dict(type='int', required=True), - rule_options=dict(type='dict', required=False), + action=dict(type="str", required=True, choices=["pass", "drop", "alert"]), + protocol=dict(type="str", required=True), + source=dict(type="str", required=True), + source_port=dict(type="str", required=True), + direction=dict(type="str", required=False, default="forward", choices=["forward", "any"]), + destination=dict(type="str", required=True), + destination_port=dict(type="str", required=True), + sid=dict(type="int", required=True), + rule_options=dict(type="dict", required=False), ) argument_spec = dict( - arn=dict(type='str', required=False), - name=dict(type='str', required=False), - rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateful']), + arn=dict(type="str", required=False), + name=dict(type="str", required=False), + rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateful"]), # rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']), - state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), - capacity=dict(type='int', required=False), - rule_order=dict(type='str', required=False, aliases=['stateful_rule_order'], choices=['default', 'strict']), - description=dict(type='str', required=False), - ip_variables=dict(type='dict', required=False, aliases=['ip_set_variables']), - purge_ip_variables=dict(type='bool', required=False, aliases=['purge_ip_set_variables'], default=True), - port_variables=dict(type='dict', required=False, aliases=['port_set_variables']), - purge_port_variables=dict(type='bool', required=False, aliases=['purge_port_set_variables'], default=True), - rule_strings=dict(type='list', elements='str', required=False), - domain_list=dict(type='dict', options=domain_list_spec, required=False), - rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False), - tags=dict(type='dict', required=False, aliases=['resource_tags']), - purge_tags=dict(type='bool', required=False, default=True), - wait=dict(type='bool', required=False, default=True), - wait_timeout=dict(type='int', required=False), + state=dict(type="str", required=False, choices=["present", "absent"], default="present"), + capacity=dict(type="int", required=False), + rule_order=dict(type="str", required=False, aliases=["stateful_rule_order"], choices=["default", "strict"]), + description=dict(type="str", required=False), + ip_variables=dict(type="dict", required=False, aliases=["ip_set_variables"]), + purge_ip_variables=dict(type="bool", required=False, aliases=["purge_ip_set_variables"], default=True), + port_variables=dict(type="dict", required=False, aliases=["port_set_variables"]), + purge_port_variables=dict(type="bool", required=False, aliases=["purge_port_set_variables"], default=True), + rule_strings=dict(type="list", elements="str", required=False), + domain_list=dict(type="dict", options=domain_list_spec, required=False), + rule_list=dict( + type="list", elements="dict", aliases=["stateful_rule_list"], options=rule_list_spec, required=False + ), + tags=dict(type="dict", required=False, aliases=["resource_tags"]), + purge_tags=dict(type="bool", required=False, default=True), + wait=dict(type="bool", required=False, default=True), + wait_timeout=dict(type="int", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('name', 'arn'), - ('rule_strings', 'domain_list', 'rule_list'), - ('domain_list', 'ip_variables'), + ["name", "arn"], + ["rule_strings", "domain_list", "rule_list"], + ["domain_list", "ip_variables"], ], required_together=[ - ('name', 'rule_type'), + ["name", "rule_type"], ], required_one_of=[ - ('name', 'arn'), + ["name", "arn"], ], ) - module.require_botocore_at_least('1.19.20') + module.require_botocore_at_least("1.19.20") - state = module.params.get('state') - name = module.params.get('name') - arn = module.params.get('arn') - rule_type = module.params.get('rule_type') + state = module.params.get("state") + name = module.params.get("name") + arn = module.params.get("arn") + rule_type = module.params.get("rule_type") - if rule_type == 'stateless': - if module.params.get('rule_order'): - module.fail_json('rule_order can not be set for stateless rule groups') - if module.params.get('rule_strings'): - module.fail_json('rule_strings can only be used for stateful rule groups') - if module.params.get('rule_list'): - module.fail_json('rule_list can only be used for stateful rule groups') - if module.params.get('domain_list'): - module.fail_json('domain_list can only be used for stateful rule groups') + if rule_type == "stateless": + if module.params.get("rule_order"): + module.fail_json("rule_order can not be set for stateless rule groups") + if module.params.get("rule_strings"): + module.fail_json("rule_strings can only be used for stateful rule groups") + if module.params.get("rule_list"): + module.fail_json("rule_list can only be used for stateful rule groups") + if module.params.get("domain_list"): + module.fail_json("domain_list can only be used for stateful rule groups") - if module.params.get('rule_order'): - module.require_botocore_at_least('1.23.23', reason='to set the rule order') + if module.params.get("rule_order"): + module.require_botocore_at_least("1.23.23", reason="to set the rule order") manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type) - manager.set_wait(module.params.get('wait', None)) - manager.set_wait_timeout(module.params.get('wait_timeout', None)) + manager.set_wait(module.params.get("wait", None)) + manager.set_wait_timeout(module.params.get("wait_timeout", None)) - if state == 'absent': + if state == "absent": manager.delete() else: - manager.set_description(module.params.get('description')) - manager.set_capacity(module.params.get('capacity')) - manager.set_rule_order(module.params.get('rule_order')) - manager.set_ip_variables(module.params.get('ip_variables'), module.params.get('purge_ip_variables')) - manager.set_port_variables(module.params.get('port_variables'), module.params.get('purge_port_variables')) - manager.set_rule_string(module.params.get('rule_strings')) - manager.set_domain_list(module.params.get('domain_list')) - manager.set_rule_list(module.params.get('rule_list')) - manager.set_tags(module.params.get('tags'), module.params.get('purge_tags')) + manager.set_description(module.params.get("description")) + manager.set_capacity(module.params.get("capacity")) + manager.set_rule_order(module.params.get("rule_order")) + manager.set_ip_variables(module.params.get("ip_variables"), module.params.get("purge_ip_variables")) + manager.set_port_variables(module.params.get("port_variables"), module.params.get("purge_port_variables")) + manager.set_rule_string(module.params.get("rule_strings")) + manager.set_domain_list(module.params.get("domain_list")) + manager.set_rule_list(module.params.get("rule_list")) + manager.set_tags(module.params.get("tags"), module.params.get("purge_tags")) manager.flush_changes() @@ -819,9 +820,9 @@ def main(): before=manager.original_resource, after=manager.updated_resource, ) - results['diff'] = diff + results["diff"] = diff module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py index bcd83070b42..6d2dabe31c5 100644 --- a/networkfirewall_rule_group_info.py +++ b/networkfirewall_rule_group_info.py @@ -393,35 +393,34 @@ def main(): - argument_spec = dict( - name=dict(type='str', required=False), - rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateless', 'stateful']), - arn=dict(type='str', required=False), - scope=dict(type='str', required=False, choices=['managed', 'account']), + name=dict(type="str", required=False), + rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateless", "stateful"]), + arn=dict(type="str", required=False), + scope=dict(type="str", required=False, choices=["managed", "account"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('arn', 'name',), - ('arn', 'rule_type'), + ["arn", "name"], + ["arn", "rule_type"], ], required_together=[ - ('name', 'rule_type'), - ] + ["name", "rule_type"], + ], ) - module.require_botocore_at_least('1.19.20') + module.require_botocore_at_least("1.19.20") - arn = module.params.get('arn') - name = module.params.get('name') - rule_type = module.params.get('rule_type') - scope = module.params.get('scope') + arn = module.params.get("arn") + name = module.params.get("name") + rule_type = module.params.get("rule_type") + scope = module.params.get("scope") - if module.params.get('scope') == 'managed': - module.require_botocore_at_least('1.23.23', reason='to list managed rules') + if module.params.get("scope") == "managed": + module.require_botocore_at_least("1.23.23", reason="to list managed rules") manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type) @@ -430,18 +429,18 @@ def main(): if name or arn: rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn) if rule: - results['rule_groups'] = [rule] + results["rule_groups"] = [rule] else: - results['rule_groups'] = [] + results["rule_groups"] = [] else: rule_list = manager.list(scope=scope) - results['rule_list'] = rule_list - if scope != 'managed': + results["rule_list"] = rule_list + if scope != "managed": rules = [manager.get_rule_group(arn=r) for r in rule_list] - results['rule_groups'] = rules + results["rule_groups"] = rules module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/opensearch.py b/opensearch.py index e6635da499b..88055d1a6dd 100644 --- a/opensearch.py +++ b/opensearch.py @@ -514,16 +514,17 @@ def ensure_domain_absent(client, module): domain = get_domain_status(client, module, domain_name) if module.check_mode: - module.exit_json( - changed=True, msg="Would have deleted domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have deleted domain if not in check mode") try: client.delete_domain(DomainName=domain_name) changed = True except is_boto3_error_code("ResourceNotFoundException"): # The resource does not exist, or it has already been deleted return dict(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="trying to delete domain") # If we're not waiting for a delete to complete then we're all done @@ -535,7 +536,10 @@ def ensure_domain_absent(client, module): return dict(changed=changed) except is_boto3_error_code("ResourceNotFoundException"): return dict(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "awaiting domain deletion") @@ -560,8 +564,11 @@ def upgrade_domain(client, module, source_version, target_engine_version): # It's not possible to upgrade directly to the target version. # Check the module parameters to determine if this is allowed or not. if not module.params.get("allow_intermediate_upgrades"): - module.fail_json(msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( - source_version, target_engine_version, next_version)) + module.fail_json( + msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( + source_version, target_engine_version, next_version + ) + ) parameters = { "DomainName": domain_name, @@ -584,9 +591,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): # raised if it's not possible to upgrade to the target version. module.fail_json_aws( e, - msg="Couldn't upgrade domain {0} from {1} to {2}".format( - domain_name, current_version, next_version - ), + msg="Couldn't upgrade domain {0} from {1} to {2}".format(domain_name, current_version, next_version), ) if module.check_mode: @@ -602,9 +607,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): wait_for_domain_status(client, module, domain_name, "domain_available") -def set_cluster_config( - module, current_domain_config, desired_domain_config, change_set -): +def set_cluster_config(module, current_domain_config, desired_domain_config, change_set): changed = False cluster_config = desired_domain_config["ClusterConfig"] @@ -619,24 +622,16 @@ def set_cluster_config( if cluster_config["ZoneAwarenessEnabled"]: if cluster_opts.get("availability_zone_count") is not None: cluster_config["ZoneAwarenessConfig"] = { - "AvailabilityZoneCount": cluster_opts.get( - "availability_zone_count" - ), + "AvailabilityZoneCount": cluster_opts.get("availability_zone_count"), } if cluster_opts.get("dedicated_master") is not None: - cluster_config["DedicatedMasterEnabled"] = cluster_opts.get( - "dedicated_master" - ) + cluster_config["DedicatedMasterEnabled"] = cluster_opts.get("dedicated_master") if cluster_config["DedicatedMasterEnabled"]: if cluster_opts.get("dedicated_master_instance_type") is not None: - cluster_config["DedicatedMasterType"] = cluster_opts.get( - "dedicated_master_instance_type" - ) + cluster_config["DedicatedMasterType"] = cluster_opts.get("dedicated_master_instance_type") if cluster_opts.get("dedicated_master_instance_count") is not None: - cluster_config["DedicatedMasterCount"] = cluster_opts.get( - "dedicated_master_instance_count" - ) + cluster_config["DedicatedMasterCount"] = cluster_opts.get("dedicated_master_instance_count") if cluster_opts.get("warm_enabled") is not None: cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled") @@ -657,31 +652,20 @@ def set_cluster_config( if cold_storage_opts is not None and cold_storage_opts.get("enabled"): module.fail_json(msg="Cold Storage is not supported") cluster_config.pop("ColdStorageOptions", None) - if ( - current_domain_config is not None - and "ClusterConfig" in current_domain_config - ): + if current_domain_config is not None and "ClusterConfig" in current_domain_config: # Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff # will indicate a change must be done. current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None) else: # Elasticsearch 7.9 and above support ColdStorageOptions. - if ( - cold_storage_opts is not None - and cold_storage_opts.get("enabled") is not None - ): + if cold_storage_opts is not None and cold_storage_opts.get("enabled") is not None: cluster_config["ColdStorageOptions"] = { "Enabled": cold_storage_opts.get("enabled"), } - if ( - current_domain_config is not None - and current_domain_config["ClusterConfig"] != cluster_config - ): + if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config: change_set.append( - "ClusterConfig changed from {0} to {1}".format( - current_domain_config["ClusterConfig"], cluster_config - ) + "ClusterConfig changed from {0} to {1}".format(current_domain_config["ClusterConfig"], cluster_config) ) changed = True return changed @@ -708,22 +692,13 @@ def set_ebs_options(module, current_domain_config, desired_domain_config, change if ebs_opts.get("iops") is not None: ebs_config["Iops"] = ebs_opts.get("iops") - if ( - current_domain_config is not None - and current_domain_config["EBSOptions"] != ebs_config - ): - change_set.append( - "EBSOptions changed from {0} to {1}".format( - current_domain_config["EBSOptions"], ebs_config - ) - ) + if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config: + change_set.append("EBSOptions changed from {0} to {1}".format(current_domain_config["EBSOptions"], ebs_config)) changed = True return changed -def set_encryption_at_rest_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set): changed = False encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"] encryption_at_rest_opts = module.params.get("encryption_at_rest_options") @@ -737,14 +712,11 @@ def set_encryption_at_rest_options( } else: if encryption_at_rest_opts.get("kms_key_id") is not None: - encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get( - "kms_key_id" - ) + encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get("kms_key_id") if ( current_domain_config is not None - and current_domain_config["EncryptionAtRestOptions"] - != encryption_at_rest_config + and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config ): change_set.append( "EncryptionAtRestOptions changed from {0} to {1}".format( @@ -756,25 +728,18 @@ def set_encryption_at_rest_options( return changed -def set_node_to_node_encryption_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set): changed = False - node_to_node_encryption_config = desired_domain_config[ - "NodeToNodeEncryptionOptions" - ] + node_to_node_encryption_config = desired_domain_config["NodeToNodeEncryptionOptions"] node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options") if node_to_node_encryption_opts is None: return changed if node_to_node_encryption_opts.get("enabled") is not None: - node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get( - "enabled" - ) + node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get("enabled") if ( current_domain_config is not None - and current_domain_config["NodeToNodeEncryptionOptions"] - != node_to_node_encryption_config + and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config ): change_set.append( "NodeToNodeEncryptionOptions changed from {0} to {1}".format( @@ -838,9 +803,7 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change pass else: # Note the subnets may be the same but be listed in a different order. - if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set( - vpc_config["SubnetIds"] - ): + if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]): change_set.append( "SubnetIds changed from {0} to {1}".format( current_domain_config["VPCOptions"]["SubnetIds"], @@ -848,9 +811,7 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change ) ) changed = True - if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set( - vpc_config["SecurityGroupIds"] - ): + if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]): change_set.append( "SecurityGroup changed from {0} to {1}".format( current_domain_config["VPCOptions"]["SecurityGroupIds"], @@ -861,30 +822,21 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change return changed -def set_snapshot_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_snapshot_options(module, current_domain_config, desired_domain_config, change_set): changed = False snapshot_config = desired_domain_config["SnapshotOptions"] snapshot_opts = module.params.get("snapshot_options") if snapshot_opts is None: return changed if snapshot_opts.get("automated_snapshot_start_hour") is not None: - snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get( - "automated_snapshot_start_hour" - ) - if ( - current_domain_config is not None - and current_domain_config["SnapshotOptions"] != snapshot_config - ): + snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get("automated_snapshot_start_hour") + if current_domain_config is not None and current_domain_config["SnapshotOptions"] != snapshot_config: change_set.append("SnapshotOptions changed") changed = True return changed -def set_cognito_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_cognito_options(module, current_domain_config, desired_domain_config, change_set): changed = False cognito_config = desired_domain_config["CognitoOptions"] cognito_opts = module.params.get("cognito_options") @@ -900,28 +852,19 @@ def set_cognito_options( if cognito_opts.get("cognito_user_pool_id") is not None: cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id") if cognito_opts.get("cognito_identity_pool_id") is not None: - cognito_config["IdentityPoolId"] = cognito_opts.get( - "cognito_identity_pool_id" - ) + cognito_config["IdentityPoolId"] = cognito_opts.get("cognito_identity_pool_id") if cognito_opts.get("cognito_role_arn") is not None: cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") - if ( - current_domain_config is not None - and current_domain_config["CognitoOptions"] != cognito_config - ): + if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config: change_set.append( - "CognitoOptions changed from {0} to {1}".format( - current_domain_config["CognitoOptions"], cognito_config - ) + "CognitoOptions changed from {0} to {1}".format(current_domain_config["CognitoOptions"], cognito_config) ) changed = True return changed -def set_advanced_security_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set): changed = False advanced_security_config = desired_domain_config["AdvancedSecurityOptions"] advanced_security_opts = module.params.get("advanced_security_options") @@ -935,60 +878,44 @@ def set_advanced_security_options( } else: if advanced_security_opts.get("internal_user_database_enabled") is not None: - advanced_security_config[ - "InternalUserDatabaseEnabled" - ] = advanced_security_opts.get("internal_user_database_enabled") + advanced_security_config["InternalUserDatabaseEnabled"] = advanced_security_opts.get( + "internal_user_database_enabled" + ) master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserARN" - ] = master_user_opts.get("master_user_arn") + advanced_security_config["MasterUserOptions"]["MasterUserARN"] = master_user_opts.get("master_user_arn") if master_user_opts.get("master_user_name") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserName" - ] = master_user_opts.get("master_user_name") + advanced_security_config["MasterUserOptions"]["MasterUserName"] = master_user_opts.get( + "master_user_name" + ) if master_user_opts.get("master_user_password") is not None: - advanced_security_config["MasterUserOptions"][ - "MasterUserPassword" - ] = master_user_opts.get("master_user_password") + advanced_security_config["MasterUserOptions"]["MasterUserPassword"] = master_user_opts.get( + "master_user_password" + ) saml_opts = advanced_security_opts.get("saml_options") if saml_opts is not None: if saml_opts.get("enabled") is not None: - advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get( - "enabled" - ) + advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get("enabled") idp_opts = saml_opts.get("idp") if idp_opts is not None: if idp_opts.get("metadata_content") is not None: - advanced_security_config["SamlOptions"]["Idp"][ - "MetadataContent" - ] = idp_opts.get("metadata_content") + advanced_security_config["SamlOptions"]["Idp"]["MetadataContent"] = idp_opts.get("metadata_content") if idp_opts.get("entity_id") is not None: - advanced_security_config["SamlOptions"]["Idp"][ - "EntityId" - ] = idp_opts.get("entity_id") + advanced_security_config["SamlOptions"]["Idp"]["EntityId"] = idp_opts.get("entity_id") if saml_opts.get("master_user_name") is not None: - advanced_security_config["SamlOptions"][ - "MasterUserName" - ] = saml_opts.get("master_user_name") + advanced_security_config["SamlOptions"]["MasterUserName"] = saml_opts.get("master_user_name") if saml_opts.get("master_backend_role") is not None: - advanced_security_config["SamlOptions"][ - "MasterBackendRole" - ] = saml_opts.get("master_backend_role") + advanced_security_config["SamlOptions"]["MasterBackendRole"] = saml_opts.get("master_backend_role") if saml_opts.get("subject_key") is not None: - advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get( - "subject_key" - ) + advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get("subject_key") if saml_opts.get("roles_key") is not None: - advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get( - "roles_key" - ) + advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get("roles_key") if saml_opts.get("session_timeout_minutes") is not None: - advanced_security_config["SamlOptions"][ - "SessionTimeoutMinutes" - ] = saml_opts.get("session_timeout_minutes") + advanced_security_config["SamlOptions"]["SessionTimeoutMinutes"] = saml_opts.get( + "session_timeout_minutes" + ) if ( current_domain_config is not None @@ -1004,40 +931,27 @@ def set_advanced_security_options( return changed -def set_domain_endpoint_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set): changed = False domain_endpoint_config = desired_domain_config["DomainEndpointOptions"] domain_endpoint_opts = module.params.get("domain_endpoint_options") if domain_endpoint_opts is None: return changed if domain_endpoint_opts.get("enforce_https") is not None: - domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get( - "enforce_https" - ) + domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get("enforce_https") if domain_endpoint_opts.get("tls_security_policy") is not None: - domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get( - "tls_security_policy" - ) + domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get("tls_security_policy") if domain_endpoint_opts.get("custom_endpoint_enabled") is not None: - domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get( - "custom_endpoint_enabled" - ) + domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get("custom_endpoint_enabled") if domain_endpoint_config["CustomEndpointEnabled"]: if domain_endpoint_opts.get("custom_endpoint") is not None: - domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get( - "custom_endpoint" - ) + domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get("custom_endpoint") if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None: - domain_endpoint_config[ - "CustomEndpointCertificateArn" - ] = domain_endpoint_opts.get("custom_endpoint_certificate_arn") + domain_endpoint_config["CustomEndpointCertificateArn"] = domain_endpoint_opts.get( + "custom_endpoint_certificate_arn" + ) - if ( - current_domain_config is not None - and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config - ): + if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config: change_set.append( "DomainEndpointOptions changed from {0} to {1}".format( current_domain_config["DomainEndpointOptions"], domain_endpoint_config @@ -1047,9 +961,7 @@ def set_domain_endpoint_options( return changed -def set_auto_tune_options( - module, current_domain_config, desired_domain_config, change_set -): +def set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set): changed = False auto_tune_config = desired_domain_config["AutoTuneOptions"] auto_tune_opts = module.params.get("auto_tune_options") @@ -1080,15 +992,10 @@ def set_auto_tune_options( if duration_opt.get("unit") is not None: schedule_entry["Duration"]["Unit"] = duration_opt.get("unit") if s.get("cron_expression_for_recurrence") is not None: - schedule_entry["CronExpressionForRecurrence"] = s.get( - "cron_expression_for_recurrence" - ) + schedule_entry["CronExpressionForRecurrence"] = s.get("cron_expression_for_recurrence") auto_tune_config["MaintenanceSchedules"].append(schedule_entry) if current_domain_config is not None: - if ( - current_domain_config["AutoTuneOptions"]["DesiredState"] - != auto_tune_config["DesiredState"] - ): + if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]: change_set.append( "AutoTuneOptions.DesiredState changed from {0} to {1}".format( current_domain_config["AutoTuneOptions"]["DesiredState"], @@ -1096,10 +1003,7 @@ def set_auto_tune_options( ) ) changed = True - if ( - auto_tune_config["MaintenanceSchedules"] - != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"] - ): + if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]: change_set.append( "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format( current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"], @@ -1119,18 +1023,12 @@ def set_access_policy(module, current_domain_config, desired_domain_config, chan try: access_policy_config = json.dumps(access_policy_opt) except Exception as e: - module.fail_json( - msg="Failed to convert the policy into valid JSON: %s" % str(e) - ) + module.fail_json(msg="Failed to convert the policy into valid JSON: %s" % str(e)) if current_domain_config is not None: # Updating existing domain current_access_policy = json.loads(current_domain_config["AccessPolicies"]) if not compare_policies(current_access_policy, access_policy_opt): - change_set.append( - "AccessPolicy changed from {0} to {1}".format( - current_access_policy, access_policy_opt - ) - ) + change_set.append("AccessPolicy changed from {0} to {1}".format(current_access_policy, access_policy_opt)) changed = True desired_domain_config["AccessPolicies"] = access_policy_config else: @@ -1193,53 +1091,26 @@ def ensure_domain_present(client, module): # Validate the engine_version v = parse_version(module.params.get("engine_version")) if v is None: - module.fail_json( - "Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y" - ) + module.fail_json("Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y") desired_domain_config["EngineVersion"] = module.params.get("engine_version") changed = False change_set = [] # For check mode purpose - changed |= set_cluster_config( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_ebs_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_encryption_at_rest_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_node_to_node_encryption_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_vpc_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_snapshot_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_cognito_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_advanced_security_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_domain_endpoint_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_auto_tune_options( - module, current_domain_config, desired_domain_config, change_set - ) - changed |= set_access_policy( - module, current_domain_config, desired_domain_config, change_set - ) + changed |= set_cluster_config(module, current_domain_config, desired_domain_config, change_set) + changed |= set_ebs_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_vpc_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_snapshot_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_cognito_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set) + changed |= set_access_policy(module, current_domain_config, desired_domain_config, change_set) if current_domain_config is not None: - if ( - desired_domain_config["EngineVersion"] - != current_domain_config["EngineVersion"] - ): + if desired_domain_config["EngineVersion"] != current_domain_config["EngineVersion"]: changed = True change_set.append("EngineVersion changed") upgrade_domain( @@ -1263,22 +1134,16 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, msg="Couldn't update domain {0}".format(domain_name) - ) + module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) else: # Create new OpenSearch cluster if module.params.get("access_policies") is None: - module.fail_json( - "state is present but the following is missing: access_policies" - ) + module.fail_json("state is present but the following is missing: access_policies") changed = True if module.check_mode: - module.exit_json( - changed=True, msg="Would have created a domain if not in check mode" - ) + module.exit_json(changed=True, msg="Would have created a domain if not in check mode") try: response = client.create_domain(**desired_domain_config) domain = response["DomainStatus"] @@ -1287,22 +1152,16 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws( - e, msg="Couldn't update domain {0}".format(domain_name) - ) + module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) try: - existing_tags = boto3_tag_list_to_ansible_dict( - client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - ) + existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name) desired_tags = module.params["tags"] purge_tags = module.params["purge_tags"] - changed |= ensure_tags( - client, module, domain_arn, existing_tags, desired_tags, purge_tags - ) + changed |= ensure_tags(client, module, domain_arn, existing_tags, desired_tags, purge_tags) if module.params.get("wait") and not module.check_mode: wait_for_domain_status(client, module, domain_name, "domain_available") @@ -1313,7 +1172,6 @@ def ensure_domain_present(client, module): def main(): - module = AnsibleAWSModule( argument_spec=dict( state=dict(choices=["present", "absent"], default="present"), diff --git a/opensearch_info.py b/opensearch_info.py index 7d6d8bb94ac..ef49637f5cc 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -456,49 +456,52 @@ def domain_info(client, module): - domain_name = module.params.get('domain_name') - filter_tags = module.params.get('tags') + domain_name = module.params.get("domain_name") + filter_tags = module.params.get("tags") domain_list = [] if domain_name: domain_status = get_domain_status(client, module, domain_name) if domain_status: - domain_list.append({'DomainStatus': domain_status}) + domain_list.append({"DomainStatus": domain_status}) else: - domain_summary_list = client.list_domain_names()['DomainNames'] + domain_summary_list = client.list_domain_names()["DomainNames"] for d in domain_summary_list: - domain_status = get_domain_status(client, module, d['DomainName']) + domain_status = get_domain_status(client, module, d["DomainName"]) if domain_status: - domain_list.append({'DomainStatus': domain_status}) + domain_list.append({"DomainStatus": domain_status}) # Get the domain tags for domain in domain_list: current_domain_tags = None - domain_arn = domain['DomainStatus']['ARN'] + domain_arn = domain["DomainStatus"]["ARN"] try: current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - domain['Tags'] = boto3_tag_list_to_ansible_dict(current_domain_tags) + domain["Tags"] = boto3_tag_list_to_ansible_dict(current_domain_tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # This could potentially happen if a domain is deleted between the time # its domain status was queried and the tags were queried. - domain['Tags'] = {} + domain["Tags"] = {} # Filter by tags if filter_tags: for tag_key in filter_tags: try: - domain_list = [c for c in domain_list if ('Tags' in c) and (tag_key in c['Tags']) and (c['Tags'][tag_key] == filter_tags[tag_key])] + domain_list = [ + c + for c in domain_list + if ("Tags" in c) and (tag_key in c["Tags"]) and (c["Tags"][tag_key] == filter_tags[tag_key]) + ] except (TypeError, AttributeError) as e: module.fail_json(msg="OpenSearch tag filtering error", exception=e) # Get the domain config for idx, domain in enumerate(domain_list): - domain_name = domain['DomainStatus']['DomainName'] + domain_name = domain["DomainStatus"]["DomainName"] (domain_config, arn) = get_domain_config(client, module, domain_name) if domain_config: - domain['DomainConfig'] = domain_config - domain_list[idx] = camel_dict_to_snake_dict(domain, - ignore_list=['AdvancedOptions', 'Endpoints', 'Tags']) + domain["DomainConfig"] = domain_config + domain_list[idx] = camel_dict_to_snake_dict(domain, ignore_list=["AdvancedOptions", "Endpoints", "Tags"]) return dict(changed=False, domains=domain_list) @@ -507,7 +510,7 @@ def main(): module = AnsibleAWSModule( argument_spec=dict( domain_name=dict(required=False), - tags=dict(type='dict', required=False), + tags=dict(type="dict", required=False), ), supports_check_mode=True, ) @@ -521,5 +524,5 @@ def main(): module.exit_json(**domain_info(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/redshift.py b/redshift.py index e0efbefa02a..61b9e3aeb4a 100644 --- a/redshift.py +++ b/redshift.py @@ -276,10 +276,10 @@ def _ensure_tags(redshift, identifier, existing_tags, module): """Compares and update resource tags""" account_id = get_aws_account_id(module) - region = module.params.get('region') - resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier) - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + region = module.params.get("region") + resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}".format(region, account_id, identifier) + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags) @@ -302,78 +302,77 @@ def _ensure_tags(redshift, identifier, existing_tags, module): def _collect_facts(resource): """Transform cluster information to dict.""" facts = { - 'identifier': resource['ClusterIdentifier'], - 'status': resource['ClusterStatus'], - 'username': resource['MasterUsername'], - 'db_name': resource['DBName'], - 'maintenance_window': resource['PreferredMaintenanceWindow'], - 'enhanced_vpc_routing': resource['EnhancedVpcRouting'] - + "identifier": resource["ClusterIdentifier"], + "status": resource["ClusterStatus"], + "username": resource["MasterUsername"], + "db_name": resource["DBName"], + "maintenance_window": resource["PreferredMaintenanceWindow"], + "enhanced_vpc_routing": resource["EnhancedVpcRouting"], } - for node in resource['ClusterNodes']: - if node['NodeRole'] in ('SHARED', 'LEADER'): - facts['private_ip_address'] = node['PrivateIPAddress'] - if facts['enhanced_vpc_routing'] is False: - facts['public_ip_address'] = node['PublicIPAddress'] + for node in resource["ClusterNodes"]: + if node["NodeRole"] in ("SHARED", "LEADER"): + facts["private_ip_address"] = node["PrivateIPAddress"] + if facts["enhanced_vpc_routing"] is False: + facts["public_ip_address"] = node["PublicIPAddress"] else: - facts['public_ip_address'] = None + facts["public_ip_address"] = None break # Some parameters are not ready instantly if you don't wait for available # cluster status - facts['create_time'] = None - facts['url'] = None - facts['port'] = None - facts['availability_zone'] = None - facts['tags'] = {} - - if resource['ClusterStatus'] != "creating": - facts['create_time'] = resource['ClusterCreateTime'] - facts['url'] = resource['Endpoint']['Address'] - facts['port'] = resource['Endpoint']['Port'] - facts['availability_zone'] = resource['AvailabilityZone'] - facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags']) + facts["create_time"] = None + facts["url"] = None + facts["port"] = None + facts["availability_zone"] = None + facts["tags"] = {} + + if resource["ClusterStatus"] != "creating": + facts["create_time"] = resource["ClusterCreateTime"] + facts["url"] = resource["Endpoint"]["Address"] + facts["port"] = resource["Endpoint"]["Port"] + facts["availability_zone"] = resource["AvailabilityZone"] + facts["tags"] = boto3_tag_list_to_ansible_dict(resource["Tags"]) return facts @AWSRetry.jittered_backoff() def _describe_cluster(redshift, identifier): - ''' + """ Basic wrapper around describe_clusters with a retry applied - ''' - return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + """ + return redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] @AWSRetry.jittered_backoff() def _create_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around create_cluster with a retry applied - ''' + """ return redshift.create_cluster(**kwargs) # Simple wrapper around delete, try to avoid throwing an error if some other # operation is in progress -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) def _delete_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around delete_cluster with a retry applied. Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that we can still delete a cluster if some kind of change operation was in progress. - ''' + """ return redshift.delete_cluster(**kwargs) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState']) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) def _modify_cluster(redshift, **kwargs): - ''' + """ Basic wrapper around modify_cluster with a retry applied. Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases where another modification is still in progress - ''' + """ return redshift.modify_cluster(**kwargs) @@ -387,59 +386,71 @@ def create_cluster(module, redshift): Returns: """ - identifier = module.params.get('identifier') - node_type = module.params.get('node_type') - username = module.params.get('username') - password = module.params.get('password') - d_b_name = module.params.get('db_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - tags = module.params.get('tags') + identifier = module.params.get("identifier") + node_type = module.params.get("node_type") + username = module.params.get("username") + password = module.params.get("password") + d_b_name = module.params.get("db_name") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + tags = module.params.get("tags") changed = True # Package up the optional parameters params = {} - for p in ('cluster_type', 'cluster_security_groups', - 'vpc_security_group_ids', 'cluster_subnet_group_name', - 'availability_zone', 'preferred_maintenance_window', - 'cluster_parameter_group_name', - 'automated_snapshot_retention_period', 'port', - 'cluster_version', 'allow_version_upgrade', - 'number_of_nodes', 'publicly_accessible', 'encrypted', - 'elastic_ip', 'enhanced_vpc_routing'): + for p in ( + "cluster_type", + "cluster_security_groups", + "vpc_security_group_ids", + "cluster_subnet_group_name", + "availability_zone", + "preferred_maintenance_window", + "cluster_parameter_group_name", + "automated_snapshot_retention_period", + "port", + "cluster_version", + "allow_version_upgrade", + "number_of_nodes", + "publicly_accessible", + "encrypted", + "elastic_ip", + "enhanced_vpc_routing", + ): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: - params['d_b_name'] = d_b_name + params["d_b_name"] = d_b_name if tags: tags = ansible_dict_to_boto3_tag_list(tags) - params['tags'] = tags + params["tags"] = tags try: _describe_cluster(redshift, identifier) changed = False - except is_boto3_error_code('ClusterNotFound'): + except is_boto3_error_code("ClusterNotFound"): try: - _create_cluster(redshift, - ClusterIdentifier=identifier, - NodeType=node_type, - MasterUsername=username, - MasterUserPassword=password, - **snake_dict_to_camel_dict(params, capitalize_first=True)) + _create_cluster( + redshift, + ClusterIdentifier=identifier, + NodeType=node_type, + MasterUsername=username, + MasterUserPassword=password, + **snake_dict_to_camel_dict(params, capitalize_first=True), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_available') + waiter = redshift.get_waiter("cluster_available") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") try: @@ -448,7 +459,7 @@ def create_cluster(module, redshift): module.fail_json_aws(e, msg="Failed to describe cluster") if tags: - if _ensure_tags(redshift, identifier, resource['Tags'], module): + if _ensure_tags(redshift, identifier, resource["Tags"], module): changed = True resource = _describe_cluster(redshift, identifier) @@ -462,7 +473,7 @@ def describe_cluster(module, redshift): module: Ansible module object redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') + identifier = module.params.get("identifier") try: resource = _describe_cluster(redshift, identifier) @@ -480,13 +491,12 @@ def delete_cluster(module, redshift): redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + identifier = module.params.get("identifier") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") params = {} - for p in ('skip_final_cluster_snapshot', - 'final_cluster_snapshot_identifier'): + for p in ("skip_final_cluster_snapshot", "final_cluster_snapshot_identifier"): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: @@ -494,22 +504,21 @@ def delete_cluster(module, redshift): try: _delete_cluster( - redshift, - ClusterIdentifier=identifier, - **snake_dict_to_camel_dict(params, capitalize_first=True)) - except is_boto3_error_code('ClusterNotFound'): + redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) + ) + except is_boto3_error_code("ClusterNotFound"): return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_deleted') + waiter = redshift.get_waiter("cluster_deleted") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") @@ -524,62 +533,63 @@ def modify_cluster(module, redshift): redshift: authenticated redshift connection object """ - identifier = module.params.get('identifier') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') + identifier = module.params.get("identifier") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") # Package up the optional parameters params = {} - for p in ('cluster_type', 'cluster_security_groups', - 'vpc_security_group_ids', 'cluster_subnet_group_name', - 'availability_zone', 'preferred_maintenance_window', - 'cluster_parameter_group_name', - 'automated_snapshot_retention_period', 'port', 'cluster_version', - 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): + for p in ( + "cluster_type", + "cluster_security_groups", + "vpc_security_group_ids", + "cluster_subnet_group_name", + "availability_zone", + "preferred_maintenance_window", + "cluster_parameter_group_name", + "automated_snapshot_retention_period", + "port", + "cluster_version", + "allow_version_upgrade", + "number_of_nodes", + "new_cluster_identifier", + ): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) # enhanced_vpc_routing parameter change needs an exclusive request - if module.params.get('enhanced_vpc_routing') is not None: + if module.params.get("enhanced_vpc_routing") is not None: try: _modify_cluster( - redshift, - ClusterIdentifier=identifier, - EnhancedVpcRouting=module.params.get('enhanced_vpc_routing')) + redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing") + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if wait: attempts = wait_timeout // 60 - waiter = redshift.get_waiter('cluster_available') + waiter = redshift.get_waiter("cluster_available") try: - waiter.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts)) + waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, - msg="Timeout waiting for cluster enhanced vpc routing modification") + module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification") # change the rest try: _modify_cluster( - redshift, - ClusterIdentifier=identifier, - **snake_dict_to_camel_dict(params, capitalize_first=True)) + redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) - if module.params.get('new_cluster_identifier'): - identifier = module.params.get('new_cluster_identifier') + if module.params.get("new_cluster_identifier"): + identifier = module.params.get("new_cluster_identifier") if wait: attempts = wait_timeout // 60 - waiter2 = redshift.get_waiter('cluster_available') + waiter2 = redshift.get_waiter("cluster_available") try: - waiter2.wait( - ClusterIdentifier=identifier, - WaiterConfig=dict(MaxAttempts=attempts) - ) + waiter2.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster modification") try: @@ -587,85 +597,96 @@ def modify_cluster(module, redshift): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) - if _ensure_tags(redshift, identifier, resource['Tags'], module): - resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] + if _ensure_tags(redshift, identifier, resource["Tags"], module): + resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] return True, _collect_facts(resource) def main(): argument_spec = dict( - command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), + command=dict(choices=["create", "facts", "delete", "modify"], required=True), identifier=dict(required=True), - node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', - 'ds2.8xlarge', 'dc1.large', 'dc2.large', - 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', - 'dw2.large', 'dw2.8xlarge'], required=False), + node_type=dict( + choices=[ + "ds1.xlarge", + "ds1.8xlarge", + "ds2.xlarge", + "ds2.8xlarge", + "dc1.large", + "dc2.large", + "dc1.8xlarge", + "dw1.xlarge", + "dw1.8xlarge", + "dw2.large", + "dw2.8xlarge", + ], + required=False, + ), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(required=False), - cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), - cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'), - vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'), - skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], - type='bool', default=False), - final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), - cluster_subnet_group_name=dict(aliases=['subnet']), - availability_zone=dict(aliases=['aws_zone', 'zone']), - preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']), - cluster_parameter_group_name=dict(aliases=['param_group_name']), - automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'), - port=dict(type='int'), - cluster_version=dict(aliases=['version'], choices=['1.0']), - allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), - number_of_nodes=dict(type='int'), - publicly_accessible=dict(type='bool', default=False), - encrypted=dict(type='bool', default=False), + cluster_type=dict(choices=["multi-node", "single-node"], default="single-node"), + cluster_security_groups=dict(aliases=["security_groups"], type="list", elements="str"), + vpc_security_group_ids=dict(aliases=["vpc_security_groups"], type="list", elements="str"), + skip_final_cluster_snapshot=dict(aliases=["skip_final_snapshot"], type="bool", default=False), + final_cluster_snapshot_identifier=dict(aliases=["final_snapshot_id"], required=False), + cluster_subnet_group_name=dict(aliases=["subnet"]), + availability_zone=dict(aliases=["aws_zone", "zone"]), + preferred_maintenance_window=dict(aliases=["maintance_window", "maint_window"]), + cluster_parameter_group_name=dict(aliases=["param_group_name"]), + automated_snapshot_retention_period=dict(aliases=["retention_period"], type="int"), + port=dict(type="int"), + cluster_version=dict(aliases=["version"], choices=["1.0"]), + allow_version_upgrade=dict(aliases=["version_upgrade"], type="bool", default=True), + number_of_nodes=dict(type="int"), + publicly_accessible=dict(type="bool", default=False), + encrypted=dict(type="bool", default=False), elastic_ip=dict(required=False), - new_cluster_identifier=dict(aliases=['new_identifier']), - enhanced_vpc_routing=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True) + new_cluster_identifier=dict(aliases=["new_identifier"]), + enhanced_vpc_routing=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) required_if = [ - ('command', 'delete', ['skip_final_cluster_snapshot']), - ('command', 'create', ['node_type', - 'username', - 'password']) + ("command", "delete", ["skip_final_cluster_snapshot"]), + ("command", "create", ["node_type", "username", "password"]), ] module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=required_if + required_if=required_if, ) - command = module.params.get('command') - skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot') - final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier') + command = module.params.get("command") + skip_final_cluster_snapshot = module.params.get("skip_final_cluster_snapshot") + final_cluster_snapshot_identifier = module.params.get("final_cluster_snapshot_identifier") # can't use module basic required_if check for this case - if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: - module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False") + if command == "delete" and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: + module.fail_json( + msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False" + ) - conn = module.client('redshift') + conn = module.client("redshift") changed = True - if command == 'create': + if command == "create": (changed, cluster) = create_cluster(module, conn) - elif command == 'facts': + elif command == "facts": (changed, cluster) = describe_cluster(module, conn) - elif command == 'delete': + elif command == "delete": (changed, cluster) = delete_cluster(module, conn) - elif command == 'modify': + elif command == "modify": (changed, cluster) = modify_cluster(module, conn) module.exit_json(changed=changed, cluster=cluster) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index f8e0970f65c..f4d895cb1cb 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -89,16 +89,13 @@ class SnapshotController(object): - def __init__(self, client, cluster_name): self.client = client self.cluster_name = cluster_name def get_cluster_snapshot_copy_status(self): - response = self.client.describe_clusters( - ClusterIdentifier=self.cluster_name - ) - return response['Clusters'][0].get('ClusterSnapshotCopyStatus') + response = self.client.describe_clusters(ClusterIdentifier=self.cluster_name) + return response["Clusters"][0].get("ClusterSnapshotCopyStatus") def enable_snapshot_copy(self, destination_region, grant_name, retention_period): if grant_name: @@ -116,78 +113,80 @@ def enable_snapshot_copy(self, destination_region, grant_name, retention_period) ) def disable_snapshot_copy(self): - self.client.disable_snapshot_copy( - ClusterIdentifier=self.cluster_name - ) + self.client.disable_snapshot_copy(ClusterIdentifier=self.cluster_name) def modify_snapshot_copy_retention_period(self, retention_period): self.client.modify_snapshot_copy_retention_period( - ClusterIdentifier=self.cluster_name, - RetentionPeriod=retention_period + ClusterIdentifier=self.cluster_name, RetentionPeriod=retention_period ) def requesting_unsupported_modifications(actual, requested): - if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or - actual['DestinationRegion'] != requested['destination_region']): + if ( + actual["SnapshotCopyGrantName"] != requested["snapshot_copy_grant"] + or actual["DestinationRegion"] != requested["destination_region"] + ): return True return False def needs_update(actual, requested): - if actual['RetentionPeriod'] != requested['snapshot_retention_period']: + if actual["RetentionPeriod"] != requested["snapshot_retention_period"]: return True return False def run_module(): argument_spec = dict( - cluster_name=dict(type='str', required=True, aliases=['cluster']), - state=dict(type='str', choices=['present', 'absent'], default='present'), - region=dict(type='str', required=True, aliases=['source']), - destination_region=dict(type='str', required=True, aliases=['destination']), - snapshot_copy_grant=dict(type='str', aliases=['copy_grant']), - snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']), + cluster_name=dict(type="str", required=True, aliases=["cluster"]), + state=dict(type="str", choices=["present", "absent"], default="present"), + region=dict(type="str", required=True, aliases=["source"]), + destination_region=dict(type="str", required=True, aliases=["destination"]), + snapshot_copy_grant=dict(type="str", aliases=["copy_grant"]), + snapshot_retention_period=dict(type="int", required=True, aliases=["retention_period"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) result = dict( changed=False, - message='' + message="", ) - connection = module.client('redshift') + connection = module.client("redshift") - snapshot_controller = SnapshotController(client=connection, - cluster_name=module.params.get('cluster_name')) + snapshot_controller = SnapshotController(client=connection, cluster_name=module.params.get("cluster_name")) current_config = snapshot_controller.get_cluster_snapshot_copy_status() if current_config is not None: - if module.params.get('state') == 'present': + if module.params.get("state") == "present": if requesting_unsupported_modifications(current_config, module.params): - message = 'Cannot modify destination_region or grant_name. ' \ - 'Please disable cross-region snapshots, and re-run.' + message = ( + "Cannot modify destination_region or grant_name. " + "Please disable cross-region snapshots, and re-run." + ) module.fail_json(msg=message, **result) if needs_update(current_config, module.params): - result['changed'] = True + result["changed"] = True if not module.check_mode: snapshot_controller.modify_snapshot_copy_retention_period( - module.params.get('snapshot_retention_period') + module.params.get("snapshot_retention_period") ) else: - result['changed'] = True + result["changed"] = True if not module.check_mode: snapshot_controller.disable_snapshot_copy() else: - if module.params.get('state') == 'present': - result['changed'] = True + if module.params.get("state") == "present": + result["changed"] = True if not module.check_mode: - snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'), - module.params.get('snapshot_copy_grant'), - module.params.get('snapshot_retention_period')) + snapshot_controller.enable_snapshot_copy( + module.params.get("destination_region"), + module.params.get("snapshot_copy_grant"), + module.params.get("snapshot_retention_period"), + ) module.exit_json(**result) @@ -195,5 +194,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/redshift_info.py b/redshift_info.py index 2093dd38ad5..2a346167e24 100644 --- a/redshift_info.py +++ b/redshift_info.py @@ -287,31 +287,29 @@ def match_tags(tags_to_match, cluster): for key, value in tags_to_match.items(): - for tag in cluster['Tags']: - if key == tag['Key'] and value == tag['Value']: + for tag in cluster["Tags"]: + if key == tag["Key"] and value == tag["Value"]: return True return False def find_clusters(conn, module, identifier=None, tags=None): - try: - cluster_paginator = conn.get_paginator('describe_clusters') + cluster_paginator = conn.get_paginator("describe_clusters") clusters = cluster_paginator.paginate().build_full_result() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to fetch clusters.') + module.fail_json_aws(e, msg="Failed to fetch clusters.") matched_clusters = [] if identifier is not None: - identifier_prog = re.compile('^' + identifier) - - for cluster in clusters['Clusters']: + identifier_prog = re.compile("^" + identifier) + for cluster in clusters["Clusters"]: matched_identifier = True if identifier: - matched_identifier = identifier_prog.search(cluster['ClusterIdentifier']) + matched_identifier = identifier_prog.search(cluster["ClusterIdentifier"]) matched_tags = True if tags: @@ -324,24 +322,23 @@ def find_clusters(conn, module, identifier=None, tags=None): def main(): - argument_spec = dict( - cluster_identifier=dict(type='str', aliases=['identifier', 'name']), - tags=dict(type='dict') + cluster_identifier=dict(type="str", aliases=["identifier", "name"]), + tags=dict(type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) - cluster_identifier = module.params.get('cluster_identifier') - cluster_tags = module.params.get('tags') + cluster_identifier = module.params.get("cluster_identifier") + cluster_tags = module.params.get("tags") - redshift = module.client('redshift') + redshift = module.client("redshift") results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py index 724c064cbe9..3d693cc23ac 100644 --- a/redshift_subnet_group.py +++ b/redshift_subnet_group.py @@ -110,10 +110,13 @@ def get_subnet_group(name): groups = client.describe_cluster_subnet_groups( aws_retry=True, ClusterSubnetGroupName=name, - )['ClusterSubnetGroups'] - except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + )["ClusterSubnetGroups"] + except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe subnet group") if not groups: @@ -127,23 +130,22 @@ def get_subnet_group(name): # No support for managing tags yet, but make sure that we don't need to # change the return value structure after it's been available in a release. - tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags']) + tags = boto3_tag_list_to_ansible_dict(groups[0]["Tags"]) subnet_group = camel_dict_to_snake_dict(groups[0]) - subnet_group['tags'] = tags - subnet_group['name'] = subnet_group['cluster_subnet_group_name'] + subnet_group["tags"] = tags + subnet_group["name"] = subnet_group["cluster_subnet_group_name"] - subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets']) - subnet_group['subnet_ids'] = subnet_ids + subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) + subnet_group["subnet_ids"] = subnet_ids return subnet_group def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg='At least one subnet must be provided when creating a subnet group') + module.fail_json(msg="At least one subnet must be provided when creating a subnet group") if module.check_mode: return True @@ -164,13 +166,13 @@ def create_subnet_group(name, description, subnets): def update_subnet_group(subnet_group, name, description, subnets): update_params = dict() - if description and subnet_group['description'] != description: - update_params['Description'] = description + if description and subnet_group["description"] != description: + update_params["Description"] = description if subnets: - old_subnets = set(subnet_group['subnet_ids']) + old_subnets = set(subnet_group["subnet_ids"]) new_subnets = set(subnets) if old_subnets != new_subnets: - update_params['SubnetIds'] = list(subnets) + update_params["SubnetIds"] = list(subnets) if not update_params: return False @@ -179,8 +181,8 @@ def update_subnet_group(subnet_group, name, description, subnets): return True # Description is optional, SubnetIds is not - if 'SubnetIds' not in update_params: - update_params['SubnetIds'] = subnet_group['subnet_ids'] + if "SubnetIds" not in update_params: + update_params["SubnetIds"] = subnet_group["subnet_ids"] try: client.modify_cluster_subnet_group( @@ -195,7 +197,6 @@ def update_subnet_group(subnet_group, name, description, subnets): def delete_subnet_group(name): - if module.check_mode: return True @@ -205,20 +206,23 @@ def delete_subnet_group(name): ClusterSubnetGroupName=name, ) return True - except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'): + except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): # AWS is "eventually consistent", cope with the race conditions where # deletion hadn't completed when we ran describe return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete subnet group") def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, aliases=['group_name']), - description=dict(required=False, aliases=['group_description']), - subnets=dict(required=False, aliases=['group_subnets'], type='list', elements='str'), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, aliases=["group_name"]), + description=dict(required=False, aliases=["group_description"]), + subnets=dict(required=False, aliases=["group_subnets"], type="list", elements="str"), ) global module @@ -229,17 +233,17 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name') - description = module.params.get('description') - subnets = module.params.get('subnets') + state = module.params.get("state") + name = module.params.get("name") + description = module.params.get("description") + subnets = module.params.get("subnets") - client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("redshift", retry_decorator=AWSRetry.jittered_backoff()) subnet_group = get_subnet_group(name) changed = False - if state == 'present': + if state == "present": if not subnet_group: result = create_subnet_group(name, description, subnets) changed |= result @@ -255,9 +259,9 @@ def main(): compat_results = dict() if subnet_group: - compat_results['group'] = dict( - name=subnet_group['name'], - vpc_id=subnet_group['vpc_id'], + compat_results["group"] = dict( + name=subnet_group["name"], + vpc_id=subnet_group["vpc_id"], ) module.exit_json( @@ -267,5 +271,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_bucket_info.py b/s3_bucket_info.py index 20995539c45..ee4c0e2dd3f 100644 --- a/s3_bucket_info.py +++ b/s3_bucket_info.py @@ -424,18 +424,18 @@ def get_bucket_list(module, connection, name="", name_filter=""): # Get all buckets try: - buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets'] + buckets = camel_dict_to_snake_dict(connection.list_buckets())["buckets"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: module.fail_json_aws(err_code, msg="Failed to list buckets") # Filter buckets if requested if name_filter: for bucket in buckets: - if name_filter in bucket['name']: + if name_filter in bucket["name"]: filtered_buckets.append(bucket) elif name: for bucket in buckets: - if name == bucket['name']: + if name == bucket["name"]: filtered_buckets.append(bucket) # Return proper list (filtered or all) @@ -453,7 +453,7 @@ def get_buckets_facts(connection, buckets, requested_facts, transform_location): full_bucket_list = [] # Iterate over all buckets and append retrived facts to bucket for bucket in buckets: - bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location)) + bucket.update(get_bucket_details(connection, bucket["name"], requested_facts, transform_location)) full_bucket_list.append(bucket) return full_bucket_list @@ -467,14 +467,14 @@ def get_bucket_details(connection, name, requested_facts, transform_location): for key in requested_facts: if requested_facts[key]: - if key == 'bucket_location': + if key == "bucket_location": all_facts[key] = {} try: all_facts[key] = get_bucket_location(name, connection, transform_location) # we just pass on error - error means that resources is undefined except botocore.exceptions.ClientError: pass - elif key == 'bucket_tagging': + elif key == "bucket_tagging": all_facts[key] = {} try: all_facts[key] = get_bucket_tagging(name, connection) @@ -492,7 +492,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location): return all_facts -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_location(name, connection, transform_location=False): """ Get bucket location and optionally transform 'null' to 'us-east-1' @@ -502,16 +502,16 @@ def get_bucket_location(name, connection, transform_location=False): # Replace 'null' with 'us-east-1'? if transform_location: try: - if not data['LocationConstraint']: - data['LocationConstraint'] = 'us-east-1' + if not data["LocationConstraint"]: + data["LocationConstraint"] = "us-east-1" except KeyError: pass # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_tagging(name, connection): """ Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function @@ -519,15 +519,15 @@ def get_bucket_tagging(name, connection): data = connection.get_bucket_tagging(Bucket=name) try: - bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet']) + bucket_tags = boto3_tag_list_to_ansible_dict(data["TagSet"]) return bucket_tags except KeyError: # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_property(name, connection, get_api_name): """ Get bucket property @@ -537,7 +537,7 @@ def get_bucket_property(name, connection, get_api_name): data = api_function(Bucket=name) # Strip response metadata (not needed) - data.pop('ResponseMetadata', None) + data.pop("ResponseMetadata", None) return data @@ -547,27 +547,30 @@ def main(): :return: """ argument_spec = dict( - name=dict(type='str', default=""), - name_filter=dict(type='str', default=""), - bucket_facts=dict(type='dict', options=dict( - bucket_accelerate_configuration=dict(type='bool', default=False), - bucket_acl=dict(type='bool', default=False), - bucket_cors=dict(type='bool', default=False), - bucket_encryption=dict(type='bool', default=False), - bucket_lifecycle_configuration=dict(type='bool', default=False), - bucket_location=dict(type='bool', default=False), - bucket_logging=dict(type='bool', default=False), - bucket_notification_configuration=dict(type='bool', default=False), - bucket_ownership_controls=dict(type='bool', default=False), - bucket_policy=dict(type='bool', default=False), - bucket_policy_status=dict(type='bool', default=False), - bucket_replication=dict(type='bool', default=False), - bucket_request_payment=dict(type='bool', default=False), - bucket_tagging=dict(type='bool', default=False), - bucket_website=dict(type='bool', default=False), - public_access_block=dict(type='bool', default=False), - )), - transform_location=dict(type='bool', default=False) + name=dict(type="str", default=""), + name_filter=dict(type="str", default=""), + bucket_facts=dict( + type="dict", + options=dict( + bucket_accelerate_configuration=dict(type="bool", default=False), + bucket_acl=dict(type="bool", default=False), + bucket_cors=dict(type="bool", default=False), + bucket_encryption=dict(type="bool", default=False), + bucket_lifecycle_configuration=dict(type="bool", default=False), + bucket_location=dict(type="bool", default=False), + bucket_logging=dict(type="bool", default=False), + bucket_notification_configuration=dict(type="bool", default=False), + bucket_ownership_controls=dict(type="bool", default=False), + bucket_policy=dict(type="bool", default=False), + bucket_policy_status=dict(type="bool", default=False), + bucket_replication=dict(type="bool", default=False), + bucket_request_payment=dict(type="bool", default=False), + bucket_tagging=dict(type="bool", default=False), + bucket_website=dict(type="bool", default=False), + public_access_block=dict(type="bool", default=False), + ), + ), + transform_location=dict(type="bool", default=False), ) # Ensure we have an empty dict @@ -575,11 +578,15 @@ def main(): # Define mutually exclusive options mutually_exclusive = [ - ['name', 'name_filter'] + ["name", "name_filter"], ] # Including ec2 argument spec - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) # Get parameters name = module.params.get("name") @@ -590,29 +597,29 @@ def main(): # Set up connection connection = {} try: - connection = module.client('s3') + connection = module.client("s3") except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: - module.fail_json_aws(err_code, msg='Failed to connect to AWS') + module.fail_json_aws(err_code, msg="Failed to connect to AWS") # Get basic bucket list (name + creation date) bucket_list = get_bucket_list(module, connection, name, name_filter) # Add information about name/name_filter to result if name: - result['bucket_name'] = name + result["bucket_name"] = name elif name_filter: - result['bucket_name_filter'] = name_filter + result["bucket_name_filter"] = name_filter # Gather detailed information about buckets if requested bucket_facts = module.params.get("bucket_facts") if bucket_facts: - result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) + result["buckets"] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) else: - result['buckets'] = bucket_list + result["buckets"] = bucket_list module.exit_json(msg="Retrieved s3 info.", **result) # MAIN -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 0a8109b2adb..9ba6e5e6799 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -171,36 +171,33 @@ class AmazonBucket: def __init__(self, module, client): self.module = module self.client = client - self.bucket_name = module.params['bucket_name'] + self.bucket_name = module.params["bucket_name"] self.check_mode = module.check_mode self._full_config_cache = None def full_config(self): if self._full_config_cache is None: self._full_config_cache = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] + QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[] ) try: - config_lookup = self.client.get_bucket_notification_configuration( - Bucket=self.bucket_name) + config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg='{0}'.format(e)) + self.module.fail_json(msg="{0}".format(e)) # Handle different event targets - if config_lookup.get('QueueConfigurations'): - for queue_config in config_lookup.get('QueueConfigurations'): - self._full_config_cache['QueueConfigurations'].append(Config.from_api(queue_config)) + if config_lookup.get("QueueConfigurations"): + for queue_config in config_lookup.get("QueueConfigurations"): + self._full_config_cache["QueueConfigurations"].append(Config.from_api(queue_config)) - if config_lookup.get('TopicConfigurations'): - for topic_config in config_lookup.get('TopicConfigurations'): - self._full_config_cache['TopicConfigurations'].append(Config.from_api(topic_config)) + if config_lookup.get("TopicConfigurations"): + for topic_config in config_lookup.get("TopicConfigurations"): + self._full_config_cache["TopicConfigurations"].append(Config.from_api(topic_config)) - if config_lookup.get('LambdaFunctionConfigurations'): - for function_config in config_lookup.get('LambdaFunctionConfigurations'): - self._full_config_cache['LambdaFunctionConfigurations'].append(Config.from_api(function_config)) + if config_lookup.get("LambdaFunctionConfigurations"): + for function_config in config_lookup.get("LambdaFunctionConfigurations"): + self._full_config_cache["LambdaFunctionConfigurations"].append(Config.from_api(function_config)) return self._full_config_cache @@ -208,70 +205,59 @@ def current_config(self, config_name): # Iterate through configs and get current event config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.raw['Id'] == config_name: + if config.raw["Id"] == config_name: return config def apply_config(self, desired): - configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) # Iterate through existing configs then add the desired config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.name != desired.raw['Id']: + if config.name != desired.raw["Id"]: configs[target_configs].append(config.raw) - if self.module.params.get('queue_arn'): - configs['QueueConfigurations'].append(desired.raw) - if self.module.params.get('topic_arn'): - configs['TopicConfigurations'].append(desired.raw) - if self.module.params.get('lambda_function_arn'): - configs['LambdaFunctionConfigurations'].append(desired.raw) + if self.module.params.get("queue_arn"): + configs["QueueConfigurations"].append(desired.raw) + if self.module.params.get("topic_arn"): + configs["TopicConfigurations"].append(desired.raw) + if self.module.params.get("lambda_function_arn"): + configs["LambdaFunctionConfigurations"].append(desired.raw) self._upload_bucket_config(configs) return configs def delete_config(self, desired): - configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) # Iterate through existing configs omitting specified config for target_configs in self.full_config(): for config in self.full_config()[target_configs]: - if config.name != desired.raw['Id']: + if config.name != desired.raw["Id"]: configs[target_configs].append(config.raw) self._upload_bucket_config(configs) return configs def _upload_bucket_config(self, configs): - api_params = dict( - Bucket=self.bucket_name, - NotificationConfiguration=dict() - ) + api_params = dict(Bucket=self.bucket_name, NotificationConfiguration=dict()) # Iterate through available configs for target_configs in configs: if len(configs[target_configs]) > 0: - api_params['NotificationConfiguration'][target_configs] = configs[target_configs] + api_params["NotificationConfiguration"][target_configs] = configs[target_configs] if not self.check_mode: try: self.client.put_bucket_notification_configuration(**api_params) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg='{0}'.format(e)) + self.module.fail_json(msg="{0}".format(e)) class Config: def __init__(self, content): self._content = content - self.name = content.get('Id') + self.name = content.get("Id") @property def raw(self): @@ -287,41 +273,35 @@ def from_params(cls, **params): """Generate bucket notification params for target""" bucket_event_params = dict( - Id=params['event_name'], - Events=sorted(params['events']), + Id=params["event_name"], + Events=sorted(params["events"]), Filter=dict( Key=dict( FilterRules=[ - dict( - Name='Prefix', - Value=params['prefix'] - ), - dict( - Name='Suffix', - Value=params['suffix'] - ) + dict(Name="Prefix", Value=params["prefix"]), + dict(Name="Suffix", Value=params["suffix"]), ] ) - ) + ), ) # Handle different event targets - if params.get('queue_arn'): - bucket_event_params['QueueArn'] = params['queue_arn'] - if params.get('topic_arn'): - bucket_event_params['TopicArn'] = params['topic_arn'] - if params.get('lambda_function_arn'): - function_arn = params['lambda_function_arn'] + if params.get("queue_arn"): + bucket_event_params["QueueArn"] = params["queue_arn"] + if params.get("topic_arn"): + bucket_event_params["TopicArn"] = params["topic_arn"] + if params.get("lambda_function_arn"): + function_arn = params["lambda_function_arn"] qualifier = None - if params['lambda_version'] > 0: - qualifier = str(params['lambda_version']) - elif params['lambda_alias']: - qualifier = str(params['lambda_alias']) + if params["lambda_version"] > 0: + qualifier = str(params["lambda_version"]) + elif params["lambda_alias"]: + qualifier = str(params["lambda_alias"]) if qualifier: - params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + params["lambda_function_arn"] = "{0}:{1}".format(function_arn, qualifier) - bucket_event_params['LambdaFunctionArn'] = params['lambda_function_arn'] + bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"] return cls(bucket_event_params) @@ -331,66 +311,70 @@ def from_api(cls, config): def setup_module_object(): - event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', - 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', - 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', - 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', - 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] + event_types = [ + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:Post", + "s3:ObjectRestore:Completed", + "s3:ReducedRedundancyLostObject", + ] argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), event_name=dict(required=True), - lambda_function_arn=dict(aliases=['function_arn']), - queue_arn=dict(type='str'), - topic_arn=dict(type='str'), + lambda_function_arn=dict(aliases=["function_arn"]), + queue_arn=dict(type="str"), + topic_arn=dict(type="str"), bucket_name=dict(required=True), - events=dict(type='list', default=[], choices=event_types, elements='str'), - prefix=dict(default=''), - suffix=dict(default=''), + events=dict(type="list", default=[], choices=event_types, elements="str"), + prefix=dict(default=""), + suffix=dict(default=""), lambda_alias=dict(), - lambda_version=dict(type='int', default=0), + lambda_version=dict(type="int", default=0), ) mutually_exclusive = [ - ['queue_arn', 'topic_arn', 'lambda_function_arn'], - ['lambda_alias', 'lambda_version'] + ["queue_arn", "topic_arn", "lambda_function_arn"], + ["lambda_alias", "lambda_version"], ] return AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive, - required_if=[['state', 'present', ['events']]] + required_if=[["state", "present", ["events"]]], ) def main(): module = setup_module_object() - client = module.client('s3') + client = module.client("s3") bucket = AmazonBucket(module, client) - current = bucket.current_config(module.params['event_name']) + current = bucket.current_config(module.params["event_name"]) desired = Config.from_params(**module.params) - notification_configs = dict( - QueueConfigurations=[], - TopicConfigurations=[], - LambdaFunctionConfigurations=[] - ) + notification_configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) for target_configs in bucket.full_config(): for cfg in bucket.full_config()[target_configs]: notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw)) - state = module.params['state'] + state = module.params["state"] updated_configuration = dict() changed = False - if state == 'present': + if state == "present": if current != desired: updated_configuration = bucket.apply_config(desired) changed = True - elif state == 'absent': + elif state == "absent": if current: updated_configuration = bucket.delete_config(desired) changed = True @@ -400,9 +384,8 @@ def main(): for cfg in updated_configuration.get(target_configs, list()): notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg)) - module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict( - notification_configs)) + module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(notification_configs)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_cors.py b/s3_cors.py index 797c8cc5050..0d92ba56eac 100644 --- a/s3_cors.py +++ b/s3_cors.py @@ -109,13 +109,12 @@ def create_or_update_bucket_cors(connection, module): - name = module.params.get("name") rules = module.params.get("rules", []) changed = False try: - current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules'] + current_camel_rules = connection.get_bucket_cors(Bucket=name)["CORSRules"] except ClientError: current_camel_rules = [] @@ -126,7 +125,7 @@ def create_or_update_bucket_cors(connection, module): if changed: try: - cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules}) + cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name)) @@ -134,7 +133,6 @@ def create_or_update_bucket_cors(connection, module): def destroy_bucket_cors(connection, module): - name = module.params.get("name") changed = False @@ -148,24 +146,23 @@ def destroy_bucket_cors(connection, module): def main(): - argument_spec = dict( - name=dict(required=True, type='str'), - rules=dict(type='list', elements='dict'), - state=dict(type='str', choices=['present', 'absent'], required=True) + name=dict(required=True, type="str"), + rules=dict(type="list", elements="dict"), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = AnsibleAWSModule(argument_spec=argument_spec) - client = module.client('s3') + client = module.client("s3") state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_bucket_cors(client, module) - elif state == 'absent': + elif state == "absent": destroy_bucket_cors(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_lifecycle.py b/s3_lifecycle.py index b01402ebdb5..9d5ea3b8ff6 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -262,10 +262,13 @@ def fetch_rules(client, module, name): # Get the bucket's current lifecycle rules try: current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name) - current_lifecycle_rules = normalize_boto3_result(current_lifecycle['Rules']) - except is_boto3_error_code('NoSuchLifecycleConfiguration'): + current_lifecycle_rules = normalize_boto3_result(current_lifecycle["Rules"]) + except is_boto3_error_code("NoSuchLifecycleConfiguration"): current_lifecycle_rules = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) return current_lifecycle_rules @@ -292,58 +295,63 @@ def build_rule(client, module): rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) if rule_id is not None: - rule['ID'] = rule_id + rule["ID"] = rule_id if abort_incomplete_multipart_upload_days: - rule['AbortIncompleteMultipartUpload'] = { - 'DaysAfterInitiation': abort_incomplete_multipart_upload_days - } + rule["AbortIncompleteMultipartUpload"] = {"DaysAfterInitiation": abort_incomplete_multipart_upload_days} # Create expiration if expiration_days is not None: - rule['Expiration'] = dict(Days=expiration_days) + rule["Expiration"] = dict(Days=expiration_days) elif expiration_date is not None: - rule['Expiration'] = dict(Date=expiration_date.isoformat()) + rule["Expiration"] = dict(Date=expiration_date.isoformat()) elif expire_object_delete_marker is not None: - rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) + rule["Expiration"] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) if noncurrent_version_expiration_days or noncurrent_version_keep_newer: - rule['NoncurrentVersionExpiration'] = dict() + rule["NoncurrentVersionExpiration"] = dict() if noncurrent_version_expiration_days is not None: - rule['NoncurrentVersionExpiration']['NoncurrentDays'] = noncurrent_version_expiration_days + rule["NoncurrentVersionExpiration"]["NoncurrentDays"] = noncurrent_version_expiration_days if noncurrent_version_keep_newer is not None: - rule['NoncurrentVersionExpiration']['NewerNoncurrentVersions'] = noncurrent_version_keep_newer + rule["NoncurrentVersionExpiration"]["NewerNoncurrentVersions"] = noncurrent_version_keep_newer if transition_days is not None: - rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ] + rule["Transitions"] = [ + dict(Days=transition_days, StorageClass=storage_class.upper()), + ] elif transition_date is not None: - rule['Transitions'] = [dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), ] + rule["Transitions"] = [ + dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), + ] if transitions is not None: - if not rule.get('Transitions'): - rule['Transitions'] = [] + if not rule.get("Transitions"): + rule["Transitions"] = [] for transition in transitions: t_out = dict() - if transition.get('transition_date'): - t_out['Date'] = transition['transition_date'] - elif transition.get('transition_days') is not None: - t_out['Days'] = transition['transition_days'] - if transition.get('storage_class'): - t_out['StorageClass'] = transition['storage_class'].upper() - rule['Transitions'].append(t_out) + if transition.get("transition_date"): + t_out["Date"] = transition["transition_date"] + elif transition.get("transition_days") is not None: + t_out["Days"] = transition["transition_days"] + if transition.get("storage_class"): + t_out["StorageClass"] = transition["storage_class"].upper() + rule["Transitions"].append(t_out) if noncurrent_version_transition_days is not None: - rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days, - StorageClass=noncurrent_version_storage_class.upper()), ] + rule["NoncurrentVersionTransitions"] = [ + dict( + NoncurrentDays=noncurrent_version_transition_days, StorageClass=noncurrent_version_storage_class.upper() + ), + ] if noncurrent_version_transitions is not None: - if not rule.get('NoncurrentVersionTransitions'): - rule['NoncurrentVersionTransitions'] = [] + if not rule.get("NoncurrentVersionTransitions"): + rule["NoncurrentVersionTransitions"] = [] for noncurrent_version_transition in noncurrent_version_transitions: t_out = dict() - t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days'] - if noncurrent_version_transition.get('storage_class'): - t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper() - rule['NoncurrentVersionTransitions'].append(t_out) + t_out["NoncurrentDays"] = noncurrent_version_transition["transition_days"] + if noncurrent_version_transition.get("storage_class"): + t_out["StorageClass"] = noncurrent_version_transition["storage_class"].upper() + rule["NoncurrentVersionTransitions"].append(t_out) return rule @@ -360,23 +368,29 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru if current_lifecycle_rules: # If rule ID exists, use that for comparison otherwise compare based on prefix for existing_rule in current_lifecycle_rules: - if rule.get('ID') == existing_rule.get('ID') and rule['Filter'].get('Prefix', '') != existing_rule.get('Filter', {}).get('Prefix', ''): - existing_rule.pop('ID') - elif rule_id is None and rule['Filter'].get('Prefix', '') == existing_rule.get('Filter', {}).get('Prefix', ''): - existing_rule.pop('ID') - if rule.get('ID') == existing_rule.get('ID'): - changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration) + if rule.get("ID") == existing_rule.get("ID") and rule["Filter"].get("Prefix", "") != existing_rule.get( + "Filter", {} + ).get("Prefix", ""): + existing_rule.pop("ID") + elif rule_id is None and rule["Filter"].get("Prefix", "") == existing_rule.get("Filter", {}).get( + "Prefix", "" + ): + existing_rule.pop("ID") + if rule.get("ID") == existing_rule.get("ID"): + changed_, appended_ = update_or_append_rule( + rule, existing_rule, purge_transitions, lifecycle_configuration + ) changed = changed_ or changed appended = appended_ or appended else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) # If nothing appended then append now as the rule must not exist if not appended: - lifecycle_configuration['Rules'].append(rule) + lifecycle_configuration["Rules"].append(rule) changed = True else: - lifecycle_configuration['Rules'].append(rule) + lifecycle_configuration["Rules"].append(rule) changed = True return changed, lifecycle_configuration @@ -384,24 +398,24 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj): changed = False - if existing_rule['Status'] != new_rule['Status']: - if not new_rule.get('Transitions') and existing_rule.get('Transitions'): - new_rule['Transitions'] = existing_rule['Transitions'] - if not new_rule.get('Expiration') and existing_rule.get('Expiration'): - new_rule['Expiration'] = existing_rule['Expiration'] - if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'): - new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration'] - lifecycle_obj['Rules'].append(new_rule) + if existing_rule["Status"] != new_rule["Status"]: + if not new_rule.get("Transitions") and existing_rule.get("Transitions"): + new_rule["Transitions"] = existing_rule["Transitions"] + if not new_rule.get("Expiration") and existing_rule.get("Expiration"): + new_rule["Expiration"] = existing_rule["Expiration"] + if not new_rule.get("NoncurrentVersionExpiration") and existing_rule.get("NoncurrentVersionExpiration"): + new_rule["NoncurrentVersionExpiration"] = existing_rule["NoncurrentVersionExpiration"] + lifecycle_obj["Rules"].append(new_rule) changed = True appended = True else: if not purge_transitions: merge_transitions(new_rule, existing_rule) if compare_rule(new_rule, existing_rule, purge_transitions): - lifecycle_obj['Rules'].append(new_rule) + lifecycle_obj["Rules"].append(new_rule) appended = True else: - lifecycle_obj['Rules'].append(new_rule) + lifecycle_obj["Rules"].append(new_rule) changed = True appended = True return changed, appended @@ -415,24 +429,23 @@ def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None): # If an ID exists, use that otherwise compare based on prefix if rule_id is not None: for existing_rule in current_lifecycle_rules: - if rule_id == existing_rule['ID']: + if rule_id == existing_rule["ID"]: # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) else: for existing_rule in current_lifecycle_rules: - if prefix == existing_rule['Filter'].get('Prefix', ''): + if prefix == existing_rule["Filter"].get("Prefix", ""): # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: - lifecycle_configuration['Rules'].append(existing_rule) + lifecycle_configuration["Rules"].append(existing_rule) return changed, lifecycle_configuration def compare_rule(new_rule, old_rule, purge_transitions): - # Copy objects rule1 = deepcopy(new_rule) rule2 = deepcopy(old_rule) @@ -440,10 +453,10 @@ def compare_rule(new_rule, old_rule, purge_transitions): if purge_transitions: return rule1 == rule2 else: - transitions1 = rule1.pop('Transitions', []) - transitions2 = rule2.pop('Transitions', []) - noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', []) - noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', []) + transitions1 = rule1.pop("Transitions", []) + transitions2 = rule2.pop("Transitions", []) + noncurrent_transtions1 = rule1.pop("NoncurrentVersionTransitions", []) + noncurrent_transtions2 = rule2.pop("NoncurrentVersionTransitions", []) if rule1 != rule2: return False for transition in transitions1: @@ -461,38 +474,39 @@ def merge_transitions(updated_rule, updating_rule): # in updating_rule to updated_rule updated_transitions = {} updating_transitions = {} - for transition in updated_rule.get('Transitions', []): - updated_transitions[transition['StorageClass']] = transition - for transition in updating_rule.get('Transitions', []): - updating_transitions[transition['StorageClass']] = transition + for transition in updated_rule.get("Transitions", []): + updated_transitions[transition["StorageClass"]] = transition + for transition in updating_rule.get("Transitions", []): + updating_transitions[transition["StorageClass"]] = transition for storage_class, transition in updating_transitions.items(): if updated_transitions.get(storage_class) is None: - updated_rule['Transitions'].append(transition) + updated_rule["Transitions"].append(transition) def create_lifecycle_rule(client, module): - name = module.params.get("name") wait = module.params.get("wait") changed = False old_lifecycle_rules = fetch_rules(client, module, name) new_rule = build_rule(client, module) - (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - old_lifecycle_rules, - new_rule) + (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) if changed: # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): + aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration + ) + except is_boto3_error_message("At least one action needs to be specified in a rule"): # Amazon interpretted this as not changing anything changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules + ) _changed = changed _retries = 10 @@ -505,9 +519,7 @@ def create_lifecycle_rule(client, module): time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) if not _changed: _not_changed_cnt -= 1 _changed = True @@ -518,13 +530,17 @@ def create_lifecycle_rule(client, module): new_rules = fetch_rules(client, module, name) - module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, - old_rules=old_lifecycle_rules, _retries=_retries, - _config=lifecycle_configuration) + module.exit_json( + changed=changed, + new_rule=new_rule, + rules=new_rules, + old_rules=old_lifecycle_rules, + _retries=_retries, + _config=lifecycle_configuration, + ) def destroy_lifecycle_rule(client, module): - name = module.params.get("name") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") @@ -540,11 +556,10 @@ def destroy_lifecycle_rule(client, module): if changed: # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration try: - if lifecycle_obj['Rules']: + if lifecycle_obj["Rules"]: client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) + aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_obj + ) elif current_lifecycle_rules: changed = True client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) @@ -573,33 +588,32 @@ def destroy_lifecycle_rule(client, module): new_rules = fetch_rules(client, module, name) - module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, - _retries=_retries) + module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, _retries=_retries) def main(): - s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] + s3_storage_class = ["glacier", "onezone_ia", "standard_ia", "intelligent_tiering", "deep_archive"] argument_spec = dict( - name=dict(required=True, type='str'), - abort_incomplete_multipart_upload_days=dict(type='int'), - expiration_days=dict(type='int'), + name=dict(required=True, type="str"), + abort_incomplete_multipart_upload_days=dict(type="int"), + expiration_days=dict(type="int"), expiration_date=dict(), - expire_object_delete_marker=dict(type='bool'), - noncurrent_version_expiration_days=dict(type='int'), - noncurrent_version_keep_newer=dict(type='int'), - noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class), - noncurrent_version_transition_days=dict(type='int'), - noncurrent_version_transitions=dict(type='list', elements='dict'), + expire_object_delete_marker=dict(type="bool"), + noncurrent_version_expiration_days=dict(type="int"), + noncurrent_version_keep_newer=dict(type="int"), + noncurrent_version_storage_class=dict(default="glacier", type="str", choices=s3_storage_class), + noncurrent_version_transition_days=dict(type="int"), + noncurrent_version_transitions=dict(type="list", elements="dict"), prefix=dict(), rule_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - status=dict(default='enabled', choices=['enabled', 'disabled']), - storage_class=dict(default='glacier', type='str', choices=s3_storage_class), - transition_days=dict(type='int'), + state=dict(default="present", choices=["present", "absent"]), + status=dict(default="enabled", choices=["enabled", "disabled"]), + storage_class=dict(default="glacier", type="str", choices=s3_storage_class), + transition_days=dict(type="int"), transition_date=dict(), - transitions=dict(type='list', elements='dict'), - purge_transitions=dict(default=True, type='bool'), - wait=dict(type='bool', default=False) + transitions=dict(type="list", elements="dict"), + purge_transitions=dict(default=True, type="bool"), + wait=dict(type="bool", default=False), ) module = AnsibleAWSModule( @@ -618,7 +632,7 @@ def main(): }, ) - client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) expiration_date = module.params.get("expiration_date") transition_date = module.params.get("transition_date") @@ -626,43 +640,51 @@ def main(): if module.params.get("noncurrent_version_keep_newer"): module.require_botocore_at_least( - "1.23.12", - reason="to set number of versions to keep with noncurrent_version_keep_newer" + "1.23.12", reason="to set number of versions to keep with noncurrent_version_keep_newer" ) - if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix - - required_when_present = ('abort_incomplete_multipart_upload_days', - 'expiration_date', 'expiration_days', 'expire_object_delete_marker', - 'transition_date', 'transition_days', 'transitions', - 'noncurrent_version_expiration_days', - 'noncurrent_version_keep_newer', - 'noncurrent_version_transition_days', - 'noncurrent_version_transitions') + if state == "present" and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix + required_when_present = ( + "abort_incomplete_multipart_upload_days", + "expiration_date", + "expiration_days", + "expire_object_delete_marker", + "transition_date", + "transition_days", + "transitions", + "noncurrent_version_expiration_days", + "noncurrent_version_keep_newer", + "noncurrent_version_transition_days", + "noncurrent_version_transitions", + ) for param in required_when_present: if module.params.get(param) is None: break else: - msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present) + msg = "one of the following is required when 'state' is 'present': %s" % ", ".join(required_when_present) module.fail_json(msg=msg) # If dates have been set, make sure they're in a valid format if expiration_date: expiration_date = parse_date(expiration_date) if expiration_date is None: - module.fail_json(msg="expiration_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included") + module.fail_json( + msg="expiration_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included" + ) if transition_date: transition_date = parse_date(transition_date) if transition_date is None: - module.fail_json(msg="transition_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included") + module.fail_json( + msg="transition_date is not a valid ISO-8601 format." + " The time must be midnight and a timezone of GMT must be included" + ) - if state == 'present': + if state == "present": create_lifecycle_rule(client, module) - elif state == 'absent': + elif state == "absent": destroy_lifecycle_rule(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_logging.py b/s3_logging.py index 3db5fbf61e7..b2eda67d135 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -73,16 +73,15 @@ def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): - - if not bucket_logging.get('LoggingEnabled', False): + if not bucket_logging.get("LoggingEnabled", False): if target_bucket: return True return False - logging = bucket_logging['LoggingEnabled'] - if logging['TargetBucket'] != target_bucket: + logging = bucket_logging["LoggingEnabled"] + if logging["TargetBucket"] != target_bucket: return True - if logging['TargetPrefix'] != target_prefix: + if logging["TargetPrefix"] != target_prefix: return True return False @@ -90,18 +89,18 @@ def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): def verify_acls(connection, module, target_bucket): try: current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) - current_grants = current_acl['Grants'] - except is_boto3_error_code('NoSuchBucket'): + current_grants = current_acl["Grants"] + except is_boto3_error_code("NoSuchBucket"): module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to fetch target bucket ACL") required_grant = { - 'Grantee': { - 'URI': "http://acs.amazonaws.com/groups/s3/LogDelivery", - 'Type': 'Group' - }, - 'Permission': 'FULL_CONTROL' + "Grantee": {"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", "Type": "Group"}, + "Permission": "FULL_CONTROL", } for grant in current_grants: @@ -114,8 +113,8 @@ def verify_acls(connection, module, target_bucket): updated_acl = dict(current_acl) updated_grants = list(current_grants) updated_grants.append(required_grant) - updated_acl['Grants'] = updated_grants - del updated_acl['ResponseMetadata'] + updated_acl["Grants"] = updated_grants + del updated_acl["ResponseMetadata"] try: connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -125,7 +124,6 @@ def verify_acls(connection, module, target_bucket): def enable_bucket_logging(connection, module): - bucket_name = module.params.get("name") target_bucket = module.params.get("target_bucket") target_prefix = module.params.get("target_prefix") @@ -133,9 +131,12 @@ def enable_bucket_logging(connection, module): try: bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) - except is_boto3_error_code('NoSuchBucket'): + except is_boto3_error_code("NoSuchBucket"): module.fail_json(msg="Bucket '{0}' not found".format(bucket_name)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to fetch current logging status") try: @@ -152,11 +153,12 @@ def enable_bucket_logging(connection, module): aws_retry=True, Bucket=bucket_name, BucketLoggingStatus={ - 'LoggingEnabled': { - 'TargetBucket': target_bucket, - 'TargetPrefix': target_prefix, + "LoggingEnabled": { + "TargetBucket": target_bucket, + "TargetPrefix": target_prefix, } - }) + }, + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to enable bucket logging") @@ -166,7 +168,6 @@ def enable_bucket_logging(connection, module): def disable_bucket_logging(connection, module): - bucket_name = module.params.get("name") changed = False @@ -182,11 +183,9 @@ def disable_bucket_logging(connection, module): module.exit_json(changed=True) try: - response = AWSRetry.jittered_backoff( - catch_extra_error_codes=['InvalidTargetBucketForLogging'] - )(connection.put_bucket_logging)( - Bucket=bucket_name, BucketLoggingStatus={} - ) + response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])( + connection.put_bucket_logging + )(Bucket=bucket_name, BucketLoggingStatus={}) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to disable bucket logging") @@ -194,24 +193,23 @@ def disable_bucket_logging(connection, module): def main(): - argument_spec = dict( name=dict(required=True), target_bucket=dict(required=False, default=None), target_prefix=dict(required=False, default=""), - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": enable_bucket_logging(connection, module) - elif state == 'absent': + elif state == "absent": disable_bucket_logging(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index 333bb98cb67..90429ca64b4 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -110,41 +110,31 @@ def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): - payload = { - 'Id': mc_id - } + payload = {"Id": mc_id} # Just a filter_prefix or just a single tag filter is a special case if filter_prefix and not filter_tags: - payload['Filter'] = { - 'Prefix': filter_prefix - } + payload["Filter"] = {"Prefix": filter_prefix} elif not filter_prefix and len(filter_tags) == 1: - payload['Filter'] = { - 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0] - } + payload["Filter"] = {"Tag": ansible_dict_to_boto3_tag_list(filter_tags)[0]} # Otherwise we need to use 'And' elif filter_tags: - payload['Filter'] = { - 'And': { - 'Tags': ansible_dict_to_boto3_tag_list(filter_tags) - } - } + payload["Filter"] = {"And": {"Tags": ansible_dict_to_boto3_tag_list(filter_tags)}} if filter_prefix: - payload['Filter']['And']['Prefix'] = filter_prefix + payload["Filter"]["And"]["Prefix"] = filter_prefix return payload def create_or_update_metrics_configuration(client, module): - bucket_name = module.params.get('bucket_name') - mc_id = module.params.get('id') - filter_prefix = module.params.get('filter_prefix') - filter_tags = module.params.get('filter_tags') + bucket_name = module.params.get("bucket_name") + mc_id = module.params.get("id") + filter_prefix = module.params.get("filter_prefix") + filter_tags = module.params.get("filter_tags") try: response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - metrics_configuration = response['MetricsConfiguration'] - except is_boto3_error_code('NoSuchConfiguration'): + metrics_configuration = response["MetricsConfiguration"] + except is_boto3_error_code("NoSuchConfiguration"): metrics_configuration = None except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") @@ -160,10 +150,7 @@ def create_or_update_metrics_configuration(client, module): try: client.put_bucket_metrics_configuration( - aws_retry=True, - Bucket=bucket_name, - Id=mc_id, - MetricsConfiguration=new_configuration + aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration ) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id) @@ -172,12 +159,12 @@ def create_or_update_metrics_configuration(client, module): def delete_metrics_configuration(client, module): - bucket_name = module.params.get('bucket_name') - mc_id = module.params.get('id') + bucket_name = module.params.get("bucket_name") + mc_id = module.params.get("id") try: client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code('NoSuchConfiguration'): + except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") @@ -187,7 +174,7 @@ def delete_metrics_configuration(client, module): try: client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code('NoSuchConfiguration'): + except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id) @@ -197,29 +184,26 @@ def delete_metrics_configuration(client, module): def main(): argument_spec = dict( - bucket_name=dict(type='str', required=True), - id=dict(type='str', required=True), - filter_prefix=dict(type='str', required=False), - filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']), - state=dict(default='present', type='str', choices=['present', 'absent']), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + bucket_name=dict(type="str", required=True), + id=dict(type="str", required=True), + filter_prefix=dict(type="str", required=False), + filter_tags=dict(default={}, type="dict", required=False, aliases=["filter_tag"]), + state=dict(default="present", type="str", choices=["present", "absent"]), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') + state = module.params.get("state") try: - client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) + client = module.client("s3", retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if state == 'present': + if state == "present": create_or_update_metrics_configuration(client, module) - elif state == 'absent': + elif state == "absent": delete_metrics_configuration(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_sync.py b/s3_sync.py index 30a2e675f33..efc07efb150 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -261,25 +261,27 @@ def gather_files(fileroot, include=None, exclude=None): if os.path.isfile(fileroot): fullpath = fileroot fstat = os.stat(fullpath) - path_array = fileroot.split('/') + path_array = fileroot.split("/") chopped_path = path_array[-1] f_size = fstat[osstat.ST_SIZE] f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) + ret.append( + { + "fullpath": fullpath, + "chopped_path": chopped_path, + "modified_epoch": f_modified_epoch, + "bytes": f_size, + } + ) else: - for (dirpath, dirnames, filenames) in os.walk(fileroot): + for dirpath, dirnames, filenames in os.walk(fileroot): for fn in filenames: fullpath = os.path.join(dirpath, fn) # include/exclude if include: found = False - for x in include.split(','): + for x in include.split(","): if fnmatch.fnmatch(fn, x): found = True if not found: @@ -288,7 +290,7 @@ def gather_files(fileroot, include=None, exclude=None): if exclude: found = False - for x in exclude.split(','): + for x in exclude.split(","): if fnmatch.fnmatch(fn, x): found = True if found: @@ -299,36 +301,38 @@ def gather_files(fileroot, include=None, exclude=None): fstat = os.stat(fullpath) f_size = fstat[osstat.ST_SIZE] f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append({ - 'fullpath': fullpath, - 'chopped_path': chopped_path, - 'modified_epoch': f_modified_epoch, - 'bytes': f_size, - }) + ret.append( + { + "fullpath": fullpath, + "chopped_path": chopped_path, + "modified_epoch": f_modified_epoch, + "bytes": f_size, + } + ) # dirpath = path *to* the directory # dirnames = subdirs *in* our directory # filenames return ret -def calculate_s3_path(filelist, key_prefix=''): +def calculate_s3_path(filelist, key_prefix=""): ret = [] for fileentry in filelist: # don't modify the input dict retentry = fileentry.copy() - retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path']) + retentry["s3_path"] = os.path.join(key_prefix, fileentry["chopped_path"]) ret.append(retentry) return ret -def calculate_local_etag(filelist, key_prefix=''): - '''Really, "calculate md5", but since AWS uses their own format, we'll just call - it a "local etag". TODO optimization: only calculate if remote key exists.''' +def calculate_local_etag(filelist, key_prefix=""): + """Really, "calculate md5", but since AWS uses their own format, we'll just call + it a "local etag". TODO optimization: only calculate if remote key exists.""" ret = [] for fileentry in filelist: # don't modify the input dict retentry = fileentry.copy() - retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath']) + retentry["local_etag"] = calculate_multipart_etag(fileentry["fullpath"]) ret.append(retentry) return ret @@ -337,20 +341,20 @@ def determine_mimetypes(filelist, override_map): ret = [] for fileentry in filelist: retentry = fileentry.copy() - localfile = fileentry['fullpath'] + localfile = fileentry["fullpath"] # reminder: file extension is '.txt', not 'txt'. file_extension = os.path.splitext(localfile)[1] if override_map and override_map.get(file_extension): # override? use it. - retentry['mime_type'] = override_map[file_extension] + retentry["mime_type"] = override_map[file_extension] else: # else sniff it - retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False) + retentry["mime_type"], retentry["encoding"] = mimetypes.guess_type(localfile, strict=False) # might be None or '' from one of the above. Not a great type but better than nothing. - if not retentry['mime_type']: - retentry['mime_type'] = 'application/octet-stream' + if not retentry["mime_type"]: + retentry["mime_type"] = "application/octet-stream" ret.append(retentry) @@ -362,10 +366,10 @@ def head_s3(s3, bucket, s3keys): for entry in s3keys: retentry = entry.copy() try: - retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path']) + retentry["s3_head"] = s3.head_object(Bucket=bucket, Key=entry["s3_path"]) # 404 (Missing) - File doesn't exist, we'll need to upload # 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload - except is_boto3_error_code(['404', '403']): + except is_boto3_error_code(["404", "403"]): pass retkeys.append(retentry) return retkeys @@ -375,106 +379,127 @@ def filter_list(s3, bucket, s3filelist, strategy): keeplist = list(s3filelist) for e in keeplist: - e['_strategy'] = strategy + e["_strategy"] = strategy # init/fetch info from S3 if we're going to use it for comparisons - if not strategy == 'force': + if not strategy == "force": keeplist = head_s3(s3, bucket, s3filelist) # now actually run the strategies - if strategy == 'checksum': + if strategy == "checksum": for entry in keeplist: - if entry.get('s3_head'): + if entry.get("s3_head"): # since we have a remote s3 object, compare the values. - if entry['s3_head']['ETag'] == entry['local_etag']: + if entry["s3_head"]["ETag"] == entry["local_etag"]: # files match, so remove the entry - entry['skip_flag'] = True + entry["skip_flag"] = True else: # file etags don't match, keep the entry. pass else: # we don't have an etag, so we'll keep it. pass - elif strategy == 'date_size': + elif strategy == "date_size": for entry in keeplist: - if entry.get('s3_head'): + if entry.get("s3_head"): # fstat = entry['stat'] - local_modified_epoch = entry['modified_epoch'] - local_size = entry['bytes'] + local_modified_epoch = entry["modified_epoch"] + local_size = entry["bytes"] # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward. # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp() - remote_modified_datetime = entry['s3_head']['LastModified'] - delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())) + remote_modified_datetime = entry["s3_head"]["LastModified"] + delta = remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()) remote_modified_epoch = delta.seconds + (delta.days * 86400) - remote_size = entry['s3_head']['ContentLength'] + remote_size = entry["s3_head"]["ContentLength"] - entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch) - entry['whysize'] = '{0} / {1}'.format(local_size, remote_size) + entry["whytime"] = "{0} / {1}".format(local_modified_epoch, remote_modified_epoch) + entry["whysize"] = "{0} / {1}".format(local_size, remote_size) if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: - entry['skip_flag'] = True + entry["skip_flag"] = True else: - entry['why'] = "no s3_head" + entry["why"] = "no s3_head" # else: probably 'force'. Basically we don't skip with any with other strategies. else: pass # prune 'please skip' entries, if any. - return [x for x in keeplist if not x.get('skip_flag')] + return [x for x in keeplist if not x.get("skip_flag")] def upload_files(s3, bucket, filelist, params): ret = [] for entry in filelist: - args = { - 'ContentType': entry['mime_type'] - } - if params.get('permission'): - args['ACL'] = params['permission'] - if params.get('cache_control'): - args['CacheControl'] = params['cache_control'] - if params.get('storage_class'): - args['StorageClass'] = params['storage_class'] + args = {"ContentType": entry["mime_type"]} + if params.get("permission"): + args["ACL"] = params["permission"] + if params.get("cache_control"): + args["CacheControl"] = params["cache_control"] + if params.get("storage_class"): + args["StorageClass"] = params["storage_class"] # if this fails exception is caught in main() - s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None) + s3.upload_file(entry["fullpath"], bucket, entry["s3_path"], ExtraArgs=args, Callback=None, Config=None) ret.append(entry) return ret def remove_files(s3, sourcelist, params): - bucket = params.get('bucket') - key_prefix = params.get('key_prefix') - paginator = s3.get_paginator('list_objects_v2') - current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', [])) - keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist) + bucket = params.get("bucket") + key_prefix = params.get("key_prefix") + paginator = s3.get_paginator("list_objects_v2") + current_keys = set( + x["Key"] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get("Contents", []) + ) + keep_keys = set(to_text(source_file["s3_path"]) for source_file in sourcelist) delete_keys = list(current_keys - keep_keys) # can delete 1000 objects at a time - groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] + groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] # fmt:skip for keys in groups_of_keys: - s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]}) + s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": key} for key in keys]}) return delete_keys def main(): argument_spec = dict( - mode=dict(choices=['push'], default='push'), - file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'), + mode=dict(choices=["push"], default="push"), + file_change_strategy=dict(choices=["force", "date_size", "checksum"], default="date_size"), bucket=dict(required=True), - key_prefix=dict(required=False, default='', no_log=False), - file_root=dict(required=True, type='path'), - permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', - 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), - mime_map=dict(required=False, type='dict'), + key_prefix=dict(required=False, default="", no_log=False), + file_root=dict(required=True, type="path"), + permission=dict( + required=False, + choices=[ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", + ], + ), + mime_map=dict(required=False, type="dict"), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), - cache_control=dict(required=False, default=''), - delete=dict(required=False, type='bool', default=False), - storage_class=dict(required=False, default='STANDARD', - choices=['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA', - 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE', 'OUTPOSTS']), + cache_control=dict(required=False, default=""), + delete=dict(required=False, type="bool", default=False), + storage_class=dict( + required=False, + default="STANDARD", + choices=[ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE", + "OUTPOSTS", + ], + ), # future options: encoding, metadata, retries ) @@ -483,36 +508,43 @@ def main(): ) if not HAS_DATEUTIL: - module.fail_json(msg='dateutil required for this module') + module.fail_json(msg="dateutil required for this module") result = {} - mode = module.params['mode'] + mode = module.params["mode"] try: - s3 = module.client('s3') + s3 = module.client("s3") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - if mode == 'push': + if mode == "push": try: - result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) - result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map')) - result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix']) + result["filelist_initial"] = gather_files( + module.params["file_root"], exclude=module.params["exclude"], include=module.params["include"] + ) + result["filelist_typed"] = determine_mimetypes(result["filelist_initial"], module.params.get("mime_map")) + result["filelist_s3"] = calculate_s3_path(result["filelist_typed"], module.params["key_prefix"]) try: - result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3']) + result["filelist_local_etag"] = calculate_local_etag(result["filelist_s3"]) except ValueError as e: - if module.params['file_change_strategy'] == 'checksum': - module.fail_json_aws(e, 'Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy') - result['filelist_local_etag'] = result['filelist_s3'].copy() - result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) - result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) - - if module.params['delete']: - result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params) + if module.params["file_change_strategy"] == "checksum": + module.fail_json_aws( + e, + "Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy", + ) + result["filelist_local_etag"] = result["filelist_s3"].copy() + result["filelist_actionable"] = filter_list( + s3, module.params["bucket"], result["filelist_local_etag"], module.params["file_change_strategy"] + ) + result["uploads"] = upload_files(s3, module.params["bucket"], result["filelist_actionable"], module.params) + + if module.params["delete"]: + result["removed"] = remove_files(s3, result["filelist_local_etag"], module.params) # mark changed if we actually upload something. - if result.get('uploads') or result.get('removed'): - result['changed'] = True + if result.get("uploads") or result.get("removed"): + result["changed"] = True # result.update(filelist=actionable_filelist) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to push file") @@ -520,5 +552,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/s3_website.py b/s3_website.py index b73da51a68c..38c411b1fe2 100644 --- a/s3_website.py +++ b/s3_website.py @@ -172,40 +172,37 @@ def _create_redirect_dict(url): - redirect_dict = {} - url_split = url.split(':') + url_split = url.split(":") # Did we split anything? if len(url_split) == 2: - redirect_dict[u'Protocol'] = url_split[0] - redirect_dict[u'HostName'] = url_split[1].replace('//', '') + redirect_dict["Protocol"] = url_split[0] + redirect_dict["HostName"] = url_split[1].replace("//", "") elif len(url_split) == 1: - redirect_dict[u'HostName'] = url_split[0] + redirect_dict["HostName"] = url_split[0] else: - raise ValueError('Redirect URL appears invalid') + raise ValueError("Redirect URL appears invalid") return redirect_dict def _create_website_configuration(suffix, error_key, redirect_all_requests): - website_configuration = {} if error_key is not None: - website_configuration['ErrorDocument'] = {'Key': error_key} + website_configuration["ErrorDocument"] = {"Key": error_key} if suffix is not None: - website_configuration['IndexDocument'] = {'Suffix': suffix} + website_configuration["IndexDocument"] = {"Suffix": suffix} if redirect_all_requests is not None: - website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests) + website_configuration["RedirectAllRequestsTo"] = _create_redirect_dict(redirect_all_requests) return website_configuration def enable_or_update_bucket_as_website(client_connection, resource_connection, module): - bucket_name = module.params.get("name") redirect_all_requests = module.params.get("redirect_all_requests") # If redirect_all_requests is set then don't use the default suffix that has been set @@ -223,14 +220,19 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m try: website_config = client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code('NoSuchWebsiteConfiguration'): + except is_boto3_error_code("NoSuchWebsiteConfiguration"): website_config = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get website configuration") if website_config is None: try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to set bucket website configuration") @@ -238,18 +240,26 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m module.fail_json(msg=str(e)) else: try: - if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \ - (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \ - (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)): - + if ( + (suffix is not None and website_config["IndexDocument"]["Suffix"] != suffix) + or (error_key is not None and website_config["ErrorDocument"]["Key"] != error_key) + or ( + redirect_all_requests is not None + and website_config["RedirectAllRequestsTo"] != _create_redirect_dict(redirect_all_requests) + ) + ): try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") except KeyError as e: try: - bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + bucket_website.put( + WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") @@ -264,15 +274,17 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m def disable_bucket_as_website(client_connection, module): - changed = False bucket_name = module.params.get("name") try: client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code('NoSuchWebsiteConfiguration'): + except is_boto3_error_code("NoSuchWebsiteConfiguration"): module.exit_json(changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket website") try: @@ -285,36 +297,35 @@ def disable_bucket_as_website(client_connection, module): def main(): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['present', 'absent']), - suffix=dict(type='str', required=False, default='index.html'), - error_key=dict(type='str', required=False, no_log=False), - redirect_all_requests=dict(type='str', required=False), + name=dict(type="str", required=True), + state=dict(type="str", required=True, choices=["present", "absent"]), + suffix=dict(type="str", required=False, default="index.html"), + error_key=dict(type="str", required=False, no_log=False), + redirect_all_requests=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['redirect_all_requests', 'suffix'], - ['redirect_all_requests', 'error_key'] + ["redirect_all_requests", "suffix"], + ["redirect_all_requests", "error_key"], ], ) try: - client_connection = module.client('s3') - resource_connection = module.resource('s3') + client_connection = module.client("s3") + resource_connection = module.resource("s3") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") state = module.params.get("state") - if state == 'present': + if state == "present": enable_or_update_bucket_as_website(client_connection, resource_connection, module) - elif state == 'absent': + elif state == "absent": disable_bucket_as_website(client_connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index 4aea26ebfc2..f611d600967 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -235,9 +235,19 @@ class Secret(object): """An object representation of the Secret described by the self.module args""" + def __init__( - self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, - tags=None, lambda_arn=None, rotation_interval=None, replica_regions=None, + self, + name, + secret_type, + secret, + resource_policy=None, + description="", + kms_key_id=None, + tags=None, + lambda_arn=None, + rotation_interval=None, + replica_regions=None, ): self.name = name self.description = description @@ -258,9 +268,7 @@ def __init__( @property def create_args(self): - args = { - "Name": self.name - } + args = {"Name": self.name} if self.description: args["Description"] = self.description if self.kms_key_id: @@ -269,10 +277,9 @@ def create_args(self): add_replica_regions = [] for replica in self.replica_regions: if replica["kms_key_id"]: - add_replica_regions.append({'Region': replica["region"], - 'KmsKeyId': replica["kms_key_id"]}) + add_replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) else: - add_replica_regions.append({'Region': replica["region"]}) + add_replica_regions.append({"Region": replica["region"]}) args["AddReplicaRegions"] = add_replica_regions if self.tags: args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) @@ -281,9 +288,7 @@ def create_args(self): @property def update_args(self): - args = { - "SecretId": self.name - } + args = {"SecretId": self.name} if self.description: args["Description"] = self.description if self.kms_key_id: @@ -293,9 +298,7 @@ def update_args(self): @property def secret_resource_policy_args(self): - args = { - "SecretId": self.name - } + args = {"SecretId": self.name} if self.resource_policy: args["ResourcePolicy"] = self.resource_policy return args @@ -315,7 +318,7 @@ class SecretsManagerInterface(object): def __init__(self, module): self.module = module - self.client = self.module.client('secretsmanager') + self.client = self.module.client("secretsmanager") def get_secret(self, name): try: @@ -376,9 +379,7 @@ def remove_replication(self, name, regions): self.module.exit_json(changed=True) try: replica_regions = [] - response = self.client.remove_regions_from_replication( - SecretId=name, - RemoveReplicaRegions=regions) + response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") return response @@ -390,12 +391,10 @@ def replicate_secret(self, name, regions): replica_regions = [] for replica in regions: if replica["kms_key_id"]: - replica_regions.append({'Region': replica["region"], 'KmsKeyId': replica["kms_key_id"]}) + replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) else: - replica_regions.append({'Region': replica["region"]}) - response = self.client.replicate_secret_to_regions( - SecretId=name, - AddReplicaRegions=replica_regions) + replica_regions.append({"Region": replica["region"]}) + response = self.client.replicate_secret_to_regions(SecretId=name, AddReplicaRegions=replica_regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") return response @@ -436,7 +435,8 @@ def update_rotation(self, secret): response = self.client.rotate_secret( SecretId=secret.name, RotationLambdaARN=secret.rotation_lambda_arn, - RotationRules=secret.rotation_rules) + RotationRules=secret.rotation_rules, + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to rotate secret secret") else: @@ -476,7 +476,7 @@ def secrets_match(self, desired_secret, current_secret): if desired_secret.kms_key_id != current_secret.get("KmsKeyId"): return False current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name")) - if desired_secret.secret_type == 'SecretBinary': + if desired_secret.secret_type == "SecretBinary": desired_value = to_bytes(desired_secret.secret) else: desired_value = desired_secret.secret @@ -537,65 +537,69 @@ def compare_regions(desired_secret, current_secret): def main(): replica_args = dict( - region=dict(type='str', required=True), - kms_key_id=dict(type='str', required=False), + region=dict(type="str", required=True), + kms_key_id=dict(type="str", required=False), ) module = AnsibleAWSModule( argument_spec={ - 'name': dict(required=True), - 'state': dict(choices=['present', 'absent'], default='present'), - 'overwrite': dict(type='bool', default=True), - 'description': dict(default=""), - 'replica': dict(type='list', elements='dict', options=replica_args), - 'kms_key_id': dict(), - 'secret_type': dict(choices=['binary', 'string'], default="string"), - 'secret': dict(default="", no_log=True), - 'json_secret': dict(type='json', no_log=True), - 'resource_policy': dict(type='json', default=None), - 'tags': dict(type='dict', default=None, aliases=['resource_tags']), - 'purge_tags': dict(type='bool', default=True), - 'rotation_lambda': dict(), - 'rotation_interval': dict(type='int', default=30), - 'recovery_window': dict(type='int', default=30), + "name": dict(required=True), + "state": dict(choices=["present", "absent"], default="present"), + "overwrite": dict(type="bool", default=True), + "description": dict(default=""), + "replica": dict(type="list", elements="dict", options=replica_args), + "kms_key_id": dict(), + "secret_type": dict(choices=["binary", "string"], default="string"), + "secret": dict(default="", no_log=True), + "json_secret": dict(type="json", no_log=True), + "resource_policy": dict(type="json", default=None), + "tags": dict(type="dict", default=None, aliases=["resource_tags"]), + "purge_tags": dict(type="bool", default=True), + "rotation_lambda": dict(), + "rotation_interval": dict(type="int", default=30), + "recovery_window": dict(type="int", default=30), }, - mutually_exclusive=[['secret', 'json_secret']], + mutually_exclusive=[["secret", "json_secret"]], supports_check_mode=True, ) changed = False - state = module.params.get('state') + state = module.params.get("state") secrets_mgr = SecretsManagerInterface(module) - recovery_window = module.params.get('recovery_window') + recovery_window = module.params.get("recovery_window") secret = Secret( - module.params.get('name'), - module.params.get('secret_type'), - module.params.get('secret') or module.params.get('json_secret'), - description=module.params.get('description'), - replica_regions=module.params.get('replica'), - kms_key_id=module.params.get('kms_key_id'), - resource_policy=module.params.get('resource_policy'), - tags=module.params.get('tags'), - lambda_arn=module.params.get('rotation_lambda'), - rotation_interval=module.params.get('rotation_interval') + module.params.get("name"), + module.params.get("secret_type"), + module.params.get("secret") or module.params.get("json_secret"), + description=module.params.get("description"), + replica_regions=module.params.get("replica"), + kms_key_id=module.params.get("kms_key_id"), + resource_policy=module.params.get("resource_policy"), + tags=module.params.get("tags"), + lambda_arn=module.params.get("rotation_lambda"), + rotation_interval=module.params.get("rotation_interval"), ) - purge_tags = module.params.get('purge_tags') + purge_tags = module.params.get("purge_tags") current_secret = secrets_mgr.get_secret(secret.name) - if state == 'absent': + if state == "absent": if current_secret: if not current_secret.get("DeletedDate"): - result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + result = camel_dict_to_snake_dict( + secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) + ) changed = True elif current_secret.get("DeletedDate") and recovery_window == 0: - result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)) + result = camel_dict_to_snake_dict( + secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) + ) changed = True else: result = "secret already scheduled for deletion" else: result = "secret does not exist" - if state == 'present': + if state == "present": if current_secret is None: result = secrets_mgr.create_secret(secret) if secret.resource_policy and result.get("ARN"): @@ -607,7 +611,7 @@ def main(): secrets_mgr.restore_secret(secret.name) changed = True if not secrets_mgr.secrets_match(secret, current_secret): - overwrite = module.params.get('overwrite') + overwrite = module.params.get("overwrite") if overwrite: result = secrets_mgr.update_secret(secret) changed = True @@ -624,8 +628,8 @@ def main(): result = secrets_mgr.put_resource_policy(secret) changed = True - if module.params.get('tags') is not None: - current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) + if module.params.get("tags") is not None: + current_tags = boto3_tag_list_to_ansible_dict(current_secret.get("Tags", [])) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags) if tags_to_add: secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) @@ -643,12 +647,12 @@ def main(): changed = True result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) - if result.get('tags', None) is not None: - result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', [])) + if result.get("tags", None) is not None: + result["tags_dict"] = boto3_tag_list_to_ansible_dict(result.get("tags", [])) result.pop("response_metadata") module.exit_json(changed=changed, secret=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ses_identity.py b/ses_identity.py index df80b736b91..7a966da4a48 100644 --- a/ses_identity.py +++ b/ses_identity.py @@ -242,8 +242,10 @@ def get_verification_attributes(connection, module, identity, retries=0, retryDe try: response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity)) - identity_verification = response['VerificationAttributes'] + module.fail_json_aws( + e, msg="Failed to retrieve identity verification attributes for {identity}".format(identity=identity) + ) + identity_verification = response["VerificationAttributes"] if identity in identity_verification: break time.sleep(retryDelay) @@ -263,8 +265,10 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel try: response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity)) - notification_attributes = response['NotificationAttributes'] + module.fail_json_aws( + e, msg="Failed to retrieve identity notification attributes for {identity}".format(identity=identity) + ) + notification_attributes = response["NotificationAttributes"] # No clear AWS docs on when this happens, but it appears sometimes identities are not included in # in the notification attributes when the identity is first registered. Suspect that this is caused by @@ -280,7 +284,7 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel # something has gone very wrong. if len(notification_attributes) != 0: module.fail_json( - msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format( + msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format( identity, notification_attributes.keys(), ) @@ -292,9 +296,9 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel def desired_topic(module, notification_type): - arg_dict = module.params.get(notification_type.lower() + '_notifications') + arg_dict = module.params.get(notification_type.lower() + "_notifications") if arg_dict: - return arg_dict.get('topic', None) + return arg_dict.get("topic", None) else: return None @@ -304,7 +308,7 @@ def update_notification_topic(connection, module, identity, identity_notificatio if module.params.get(f"{notification_type.lower()}_notifications") is None: return False - topic_key = notification_type + 'Topic' + topic_key = notification_type + "Topic" if identity_notifications is None: # If there is no configuration for notifications cannot be being sent to topics # hence assume None as the current state. @@ -335,17 +339,20 @@ def update_notification_topic(connection, module, identity, identity_notificatio connection.set_identity_notification_topic(**request_kwargs) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format( - identity=identity, - notification_type=notification_type, - )) + module.fail_json_aws( + e, + msg="Failed to set identity notification topic for {identity} {notification_type}".format( + identity=identity, + notification_type=notification_type, + ), + ) return True return False def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type): - arg_dict = module.params.get(notification_type.lower() + '_notifications') - header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' + arg_dict = module.params.get(notification_type.lower() + "_notifications") + header_key = "HeadersIn" + notification_type + "NotificationsEnabled" if identity_notifications is None: # If there is no configuration for topic notifications, headers cannot be being # forwarded, hence assume false. @@ -358,21 +365,25 @@ def update_notification_topic_headers(connection, module, identity, identity_not # headers are not included since most API consumers would interpret absence as false. current = False - if arg_dict is not None and 'include_headers' in arg_dict: - required = arg_dict['include_headers'] + if arg_dict is not None and "include_headers" in arg_dict: + required = arg_dict["include_headers"] else: required = False if current != required: try: if not module.check_mode: - connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required, - aws_retry=True) + connection.set_identity_headers_in_notifications_enabled( + Identity=identity, NotificationType=notification_type, Enabled=required, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format( - identity=identity, - notification_type=notification_type, - )) + module.fail_json_aws( + e, + msg="Failed to set identity headers in notification for {identity} {notification_type}".format( + identity=identity, + notification_type=notification_type, + ), + ) return True return False @@ -383,51 +394,57 @@ def update_feedback_forwarding(connection, module, identity, identity_notificati # are being handled by SNS topics. So in the absence of identity_notifications # information existing feedback forwarding must be on. current = True - elif 'ForwardingEnabled' in identity_notifications: - current = identity_notifications['ForwardingEnabled'] + elif "ForwardingEnabled" in identity_notifications: + current = identity_notifications["ForwardingEnabled"] else: # If there is information on the notifications setup but no information on the # forwarding state it's pretty safe to assume forwarding is off. AWS API docs # suggest this information will always be included but best to be defensive current = False - required = module.params.get('feedback_forwarding') + required = module.params.get("feedback_forwarding") if current != required: try: if not module.check_mode: - connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True) + connection.set_identity_feedback_forwarding_enabled( + Identity=identity, ForwardingEnabled=required, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity)) + module.fail_json_aws( + e, msg="Failed to set identity feedback forwarding for {identity}".format(identity=identity) + ) return True return False def create_mock_notifications_response(module): resp = { - "ForwardingEnabled": module.params.get('feedback_forwarding'), + "ForwardingEnabled": module.params.get("feedback_forwarding"), } - for notification_type in ('Bounce', 'Complaint', 'Delivery'): - arg_dict = module.params.get(notification_type.lower() + '_notifications') - if arg_dict is not None and 'topic' in arg_dict: - resp[notification_type + 'Topic'] = arg_dict['topic'] - - header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled' - if arg_dict is not None and 'include_headers' in arg_dict: - resp[header_key] = arg_dict['include_headers'] + for notification_type in ("Bounce", "Complaint", "Delivery"): + arg_dict = module.params.get(notification_type.lower() + "_notifications") + if arg_dict is not None and "topic" in arg_dict: + resp[notification_type + "Topic"] = arg_dict["topic"] + + header_key = "HeadersIn" + notification_type + "NotificationsEnabled" + if arg_dict is not None and "include_headers" in arg_dict: + resp[header_key] = arg_dict["include_headers"] else: resp[header_key] = False return resp def update_identity_notifications(connection, module): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False identity_notifications = get_identity_notifications(connection, module, identity) - for notification_type in ('Bounce', 'Complaint', 'Delivery'): + for notification_type in ("Bounce", "Complaint", "Delivery"): changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type) - changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type) + changed |= update_notification_topic_headers( + connection, module, identity, identity_notifications, notification_type + ) changed |= update_feedback_forwarding(connection, module, identity, identity_notifications) @@ -440,25 +457,27 @@ def update_identity_notifications(connection, module): def validate_params_for_identity_present(module): - if module.params.get('feedback_forwarding') is False: - if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')): - module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " - "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics") + if module.params.get("feedback_forwarding") is False: + if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")): + module.fail_json( + msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " + "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + ) def create_or_update_identity(connection, module, region, account_id): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False verification_attributes = get_verification_attributes(connection, module, identity) if verification_attributes is None: try: if not module.check_mode: - if '@' in identity: + if "@" in identity: connection.verify_email_identity(EmailAddress=identity, aws_retry=True) else: connection.verify_domain_identity(Domain=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg="Failed to verify identity {identity}".format(identity=identity)) if module.check_mode: verification_attributes = { "VerificationStatus": "Pending", @@ -466,20 +485,22 @@ def create_or_update_identity(connection, module, region, account_id): else: verification_attributes = get_verification_attributes(connection, module, identity, retries=4) changed = True - elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'): - module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'], - verification_attributes=camel_dict_to_snake_dict(verification_attributes)) + elif verification_attributes["VerificationStatus"] not in ("Pending", "Success"): + module.fail_json( + msg="Identity " + identity + " in bad status " + verification_attributes["VerificationStatus"], + verification_attributes=camel_dict_to_snake_dict(verification_attributes), + ) if verification_attributes is None: - module.fail_json(msg='Unable to load identity verification attributes after registering identity.') + module.fail_json(msg="Unable to load identity verification attributes after registering identity.") notifications_changed, notification_attributes = update_identity_notifications(connection, module) changed |= notifications_changed if notification_attributes is None: - module.fail_json(msg='Unable to load identity notification attributes.') + module.fail_json(msg="Unable to load identity notification attributes.") - identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity + identity_arn = "arn:aws:ses:" + region + ":" + account_id + ":identity/" + identity module.exit_json( changed=changed, @@ -491,7 +512,7 @@ def create_or_update_identity(connection, module, region, account_id): def destroy_identity(connection, module): - identity = module.params.get('identity') + identity = module.params.get("identity") changed = False verification_attributes = get_verification_attributes(connection, module, identity) if verification_attributes is not None: @@ -499,7 +520,7 @@ def destroy_identity(connection, module): if not module.check_mode: connection.delete_identity(Identity=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity)) + module.fail_json_aws(e, msg="Failed to delete identity {identity}".format(identity=identity)) changed = True module.exit_json( @@ -509,44 +530,50 @@ def destroy_identity(connection, module): def get_account_id(module): - sts = module.client('sts') + sts = module.client("sts") try: caller_identity = sts.get_caller_identity() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve caller identity') - return caller_identity['Account'] + module.fail_json_aws(e, msg="Failed to retrieve caller identity") + return caller_identity["Account"] def main(): module = AnsibleAWSModule( argument_spec={ - "identity": dict(required=True, type='str'), - "state": dict(default='present', choices=['present', 'absent']), - "bounce_notifications": dict(type='dict'), - "complaint_notifications": dict(type='dict'), - "delivery_notifications": dict(type='dict'), - "feedback_forwarding": dict(default=True, type='bool'), + "identity": dict(required=True, type="str"), + "state": dict(default="present", choices=["present", "absent"]), + "bounce_notifications": dict(type="dict"), + "complaint_notifications": dict(type="dict"), + "delivery_notifications": dict(type="dict"), + "feedback_forwarding": dict(default=True, type="bool"), }, supports_check_mode=True, ) - for notification_type in ('bounce', 'complaint', 'delivery'): - param_name = notification_type + '_notifications' + for notification_type in ("bounce", "complaint", "delivery"): + param_name = notification_type + "_notifications" arg_dict = module.params.get(param_name) if arg_dict: - extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')] + extra_keys = [x for x in arg_dict.keys() if x not in ("topic", "include_headers")] if extra_keys: - module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') + module.fail_json( + msg="Unexpected keys " + + str(extra_keys) + + " in " + + param_name + + " valid keys are topic or include_headers" + ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": region = module.region account_id = get_account_id(module) validate_params_for_identity_present(module) @@ -555,5 +582,5 @@ def main(): destroy_identity(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ses_identity_policy.py b/ses_identity_policy.py index ed558307df5..a28d027549a 100644 --- a/ses_identity_policy.py +++ b/ses_identity_policy.py @@ -101,17 +101,17 @@ def get_identity_policy(connection, module, identity, policy_name): try: response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name)) - policies = response['Policies'] + module.fail_json_aws(e, msg="Failed to retrieve identity policy {policy}".format(policy=policy_name)) + policies = response["Policies"] if policy_name in policies: return policies[policy_name] return None def create_or_update_identity_policy(connection, module): - identity = module.params.get('identity') - policy_name = module.params.get('policy_name') - required_policy = module.params.get('policy') + identity = module.params.get("identity") + policy_name = module.params.get("policy_name") + required_policy = module.params.get("policy") required_policy_dict = json.loads(required_policy) changed = False @@ -121,9 +121,11 @@ def create_or_update_identity_policy(connection, module): changed = True try: if not module.check_mode: - connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True) + connection.put_identity_policy( + Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name)) + module.fail_json_aws(e, msg="Failed to put identity policy {policy}".format(policy=policy_name)) # Load the list of applied policies to include in the response. # In principle we should be able to just return the response, but given @@ -134,9 +136,9 @@ def create_or_update_identity_policy(connection, module): # # As a nice side benefit this also means the return is correct in check mode try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to list identity policies') + module.fail_json_aws(e, msg="Failed to list identity policies") if policy_name is not None and policy_name not in policies_present: policies_present = list(policies_present) policies_present.append(policy_name) @@ -147,20 +149,20 @@ def create_or_update_identity_policy(connection, module): def delete_identity_policy(connection, module): - identity = module.params.get('identity') - policy_name = module.params.get('policy_name') + identity = module.params.get("identity") + policy_name = module.params.get("policy_name") changed = False try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] + policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to list identity policies') + module.fail_json_aws(e, msg="Failed to list identity policies") if policy_name in policies_present: try: if not module.check_mode: connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name)) + module.fail_json_aws(e, msg="Failed to delete identity policy {policy}".format(policy=policy_name)) changed = True policies_present = list(policies_present) policies_present.remove(policy_name) @@ -174,12 +176,12 @@ def delete_identity_policy(connection, module): def main(): module = AnsibleAWSModule( argument_spec={ - 'identity': dict(required=True, type='str'), - 'state': dict(default='present', choices=['present', 'absent']), - 'policy_name': dict(required=True, type='str'), - 'policy': dict(type='json', default=None), + "identity": dict(required=True, type="str"), + "state": dict(default="present", choices=["present", "absent"]), + "policy_name": dict(required=True, type="str"), + "policy": dict(type="json", default=None), }, - required_if=[['state', 'present', ['policy']]], + required_if=[["state", "present", ["policy"]]], supports_check_mode=True, ) @@ -187,15 +189,15 @@ def main(): # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") - if state == 'present': + if state == "present": create_or_update_identity_policy(connection, module) else: delete_identity_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ses_rule_set.py b/ses_rule_set.py index 72730b1b28f..9915622ed7d 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -116,11 +116,11 @@ def list_rule_sets(client, module): response = client.list_receipt_rule_sets(aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't list rule sets.") - return response['RuleSets'] + return response["RuleSets"] def rule_set_in(name, rule_sets): - return any(s for s in rule_sets if s['Name'] == name) + return any(s for s in rule_sets if s["Name"] == name) def ruleset_active(client, module, name): @@ -128,8 +128,8 @@ def ruleset_active(client, module, name): active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't get the active rule set.") - if active_rule_set is not None and 'Metadata' in active_rule_set: - return name == active_rule_set['Metadata']['Name'] + if active_rule_set is not None and "Metadata" in active_rule_set: + return name == active_rule_set["Metadata"]["Name"] else: # Metadata was not set meaning there is no active rule set return False @@ -167,7 +167,7 @@ def update_active_rule_set(client, module, name, desired_active): def create_or_update_rule_set(client, module): - name = module.params.get('name') + name = module.params.get("name") check_mode = module.check_mode changed = False @@ -180,11 +180,13 @@ def create_or_update_rule_set(client, module): module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name)) changed = True rule_sets = list(rule_sets) - rule_sets.append({ - 'Name': name, - }) + rule_sets.append( + { + "Name": name, + } + ) - (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active')) + (active_changed, active) = update_active_rule_set(client, module, name, module.params.get("active")) changed |= active_changed module.exit_json( @@ -195,30 +197,32 @@ def create_or_update_rule_set(client, module): def remove_rule_set(client, module): - name = module.params.get('name') + name = module.params.get("name") check_mode = module.check_mode changed = False rule_sets = list_rule_sets(client, module) if rule_set_in(name, rule_sets): active = ruleset_active(client, module, name) - if active and not module.params.get('force'): + if active and not module.params.get("force"): module.fail_json( - msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name), + msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format( + name + ), error={ "code": "CannotDelete", "message": "Cannot delete active rule set: {0}".format(name), - } + }, ) if not check_mode: - if active and module.params.get('force'): + if active and module.params.get("force"): deactivate_rule_set(client, module) try: client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name)) changed = True - rule_sets = [x for x in rule_sets if x['Name'] != name] + rule_sets = [x for x in rule_sets if x["Name"] != name] module.exit_json( changed=changed, @@ -228,27 +232,27 @@ def remove_rule_set(client, module): def main(): argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - active=dict(type='bool'), - force=dict(type='bool', default=False), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + active=dict(type="bool"), + force=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') + state = module.params.get("state") # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. - client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) - if state == 'absent': + if state == "absent": remove_rule_set(client, module) else: create_or_update_rule_set(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sns.py b/sns.py index 96f5b72e70e..53c63a05645 100644 --- a/sns.py +++ b/sns.py @@ -158,7 +158,7 @@ from botocore.exceptions import BotoCoreError from botocore.exceptions import ClientError except ImportError: - pass # Handled by AnsibleAWSModule + pass # Handled by AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup @@ -167,22 +167,22 @@ def main(): protocols = [ - 'http', - 'https', - 'email', - 'email_json', - 'sms', - 'sqs', - 'application', - 'lambda', + "http", + "https", + "email", + "email_json", + "sms", + "sqs", + "application", + "lambda", ] argument_spec = dict( - msg=dict(required=True, aliases=['default']), + msg=dict(required=True, aliases=["default"]), subject=dict(), topic=dict(required=True), - message_attributes=dict(type='dict'), - message_structure=dict(choices=['json', 'string'], default='json'), + message_attributes=dict(type="dict"), + message_structure=dict(choices=["json", "string"], default="json"), message_group_id=dict(), message_deduplication_id=dict(), ) @@ -193,50 +193,48 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) sns_kwargs = dict( - Message=module.params['msg'], - Subject=module.params['subject'], - MessageStructure=module.params['message_structure'], + Message=module.params["msg"], + Subject=module.params["subject"], + MessageStructure=module.params["message_structure"], ) - if module.params['message_attributes']: - if module.params['message_structure'] != 'string': + if module.params["message_attributes"]: + if module.params["message_structure"] != "string": module.fail_json(msg='message_attributes is only supported when the message_structure is "string".') - sns_kwargs['MessageAttributes'] = module.params['message_attributes'] + sns_kwargs["MessageAttributes"] = module.params["message_attributes"] if module.params["message_group_id"]: sns_kwargs["MessageGroupId"] = module.params["message_group_id"] if module.params["message_deduplication_id"]: sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"] - dict_msg = { - 'default': sns_kwargs['Message'] - } + dict_msg = {"default": sns_kwargs["Message"]} for p in protocols: if module.params[p]: - if sns_kwargs['MessageStructure'] != 'json': + if sns_kwargs["MessageStructure"] != "json": module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".') - dict_msg[p.replace('_', '-')] = module.params[p] + dict_msg[p.replace("_", "-")] = module.params[p] - client = module.client('sns') + client = module.client("sns") - topic = module.params['topic'] - if ':' in topic: + topic = module.params["topic"] + if ":" in topic: # Short names can't contain ':' so we'll assume this is the full ARN - sns_kwargs['TopicArn'] = topic + sns_kwargs["TopicArn"] = topic else: - sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic) + sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic) - if not sns_kwargs['TopicArn']: - module.fail_json(msg='Could not find topic: {0}'.format(topic)) + if not sns_kwargs["TopicArn"]: + module.fail_json(msg="Could not find topic: {0}".format(topic)) - if sns_kwargs['MessageStructure'] == 'json': - sns_kwargs['Message'] = json.dumps(dict_msg) + if sns_kwargs["MessageStructure"] == "json": + sns_kwargs["Message"] = json.dumps(dict_msg) try: result = client.publish(**sns_kwargs) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to publish message') + module.fail_json_aws(e, msg="Failed to publish message") sns_result = dict(msg="OK", message_id=result["MessageId"]) @@ -246,5 +244,5 @@ def main(): module.exit_json(**sns_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sns_topic.py b/sns_topic.py index 03b3338350c..90929a476ea 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -353,24 +353,25 @@ class SnsTopicManager(object): - """ Handles SNS Topic creation and destruction """ - - def __init__(self, - module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode): - - self.connection = module.client('sns') + """Handles SNS Topic creation and destruction""" + + def __init__( + self, + module, + name, + topic_type, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + tags, + purge_tags, + content_based_deduplication, + check_mode, + ): + self.connection = module.client("sns") self.module = module self.name = name self.topic_type = topic_type @@ -400,73 +401,80 @@ def _create_topic(self): # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) # don't support the attribute being set, even to False. - if self.topic_type == 'fifo': - attributes['FifoTopic'] = 'true' - if not self.name.endswith('.fifo'): - self.name = self.name + '.fifo' + if self.topic_type == "fifo": + attributes["FifoTopic"] = "true" + if not self.name.endswith(".fifo"): + self.name = self.name + ".fifo" if self.tags: tags = ansible_dict_to_boto3_tag_list(self.tags) if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name, - Attributes=attributes, - Tags=tags) + response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) - self.topic_arn = response['TopicArn'] + self.topic_arn = response["TopicArn"] return True def _set_topic_attrs(self): changed = False try: - topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'] + topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn) - if self.display_name and self.display_name != topic_attributes['DisplayName']: + if self.display_name and self.display_name != topic_attributes["DisplayName"]: changed = True - self.attributes_set.append('display_name') + self.attributes_set.append("display_name") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName', - AttributeValue=self.display_name) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="DisplayName", AttributeValue=self.display_name + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set display name") - if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])): + if self.policy and compare_policies(self.policy, json.loads(topic_attributes["Policy"])): changed = True - self.attributes_set.append('policy') + self.attributes_set.append("policy") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy', - AttributeValue=json.dumps(self.policy)) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="Policy", AttributeValue=json.dumps(self.policy) + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic policy") # Set content-based deduplication attribute. Ignore if topic_type is not fifo. - if ("FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true") and \ - self.content_based_deduplication: - enabled = "true" if self.content_based_deduplication in 'enabled' else "false" - if enabled != topic_attributes['ContentBasedDeduplication']: + if ( + "FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true" + ) and self.content_based_deduplication: + enabled = "true" if self.content_based_deduplication in "enabled" else "false" + if enabled != topic_attributes["ContentBasedDeduplication"]: changed = True - self.attributes_set.append('content_based_deduplication') + self.attributes_set.append("content_based_deduplication") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='ContentBasedDeduplication', - AttributeValue=enabled) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, AttributeName="ContentBasedDeduplication", AttributeValue=enabled + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication") - if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or - compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): + if self.delivery_policy and ( + "DeliveryPolicy" not in topic_attributes + or compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes["DeliveryPolicy"])) + ): changed = True - self.attributes_set.append('delivery_policy') + self.attributes_set.append("delivery_policy") if not self.check_mode: try: - self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy', - AttributeValue=json.dumps(self.delivery_policy)) + self.connection.set_topic_attributes( + TopicArn=self.topic_arn, + AttributeName="DeliveryPolicy", + AttributeValue=json.dumps(self.delivery_policy), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") return changed @@ -474,20 +482,23 @@ def _set_topic_attrs(self): def _set_topic_subs(self): changed = False subscriptions_existing_list = set() - desired_subscriptions = [(sub['protocol'], - canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in - self.subscriptions] + desired_subscriptions = [ + (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) for sub in self.subscriptions + ] for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub['Protocol'], sub['Endpoint']) + sub_key = (sub["Protocol"], sub["Endpoint"]) subscriptions_existing_list.add(sub_key) - if (self.purge_subscriptions and sub_key not in desired_subscriptions and - sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')): + if ( + self.purge_subscriptions + and sub_key not in desired_subscriptions + and sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted") + ): changed = True self.subscriptions_deleted.append(sub_key) if not self.check_mode: try: - self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") @@ -503,8 +514,8 @@ def _set_topic_subs(self): def _init_desired_subscription_attributes(self): for sub in self.subscriptions: - sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint'])) - tmp_dict = sub.get('attributes', {}) + sub_key = (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) + tmp_dict = sub.get("attributes", {}) # aws sdk expects values to be strings # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes for k, v in tmp_dict.items(): @@ -515,26 +526,28 @@ def _init_desired_subscription_attributes(self): def _set_topic_subs_attributes(self): changed = False for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub['Protocol'], sub['Endpoint']) - sub_arn = sub['SubscriptionArn'] + sub_key = (sub["Protocol"], sub["Endpoint"]) + sub_arn = sub["SubscriptionArn"] if not self.desired_subscription_attributes.get(sub_key): # subscription attributes aren't defined in desired, skipping continue try: - sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes'] + sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)[ + "Attributes" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) - raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery') - if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes: - if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower(): + raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery") + if raw_message is not None and "RawMessageDelivery" in sub_current_attributes: + if sub_current_attributes["RawMessageDelivery"].lower() != raw_message.lower(): changed = True if not self.check_mode: try: - self.connection.set_subscription_attributes(SubscriptionArn=sub_arn, - AttributeName='RawMessageDelivery', - AttributeValue=raw_message) + self.connection.set_subscription_attributes( + SubscriptionArn=sub_arn, AttributeName="RawMessageDelivery", AttributeValue=raw_message + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute") @@ -547,11 +560,11 @@ def _delete_subscriptions(self): if not subscriptions: return False for sub in subscriptions: - if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'): - self.subscriptions_deleted.append(sub['SubscriptionArn']) + if sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted"): + self.subscriptions_deleted.append(sub["SubscriptionArn"]) if not self.check_mode: try: - self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn']) + self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") return True @@ -566,7 +579,7 @@ def _delete_topic(self): return True def _name_is_arn(self): - return self.name.startswith('arn:') + return self.name.startswith("arn:") def ensure_ok(self): changed = False @@ -576,7 +589,9 @@ def ensure_ok(self): if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_attrs() elif self.display_name or self.policy or self.delivery_policy: - self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") + self.module.fail_json( + msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account" + ) changed |= self._set_topic_subs() self._init_desired_subscription_attributes() if self.topic_arn in list_topics(self.connection, self.module): @@ -593,7 +608,9 @@ def ensure_gone(self): self.populate_topic_arn() if self.topic_arn: if self.topic_arn not in list_topics(self.connection, self.module): - self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") + self.module.fail_json( + msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe" + ) changed = self._delete_subscriptions() changed |= self._delete_topic() return changed @@ -604,7 +621,7 @@ def populate_topic_arn(self): return name = self.name - if self.topic_type == 'fifo' and not name.endswith('.fifo'): + if self.topic_type == "fifo" and not name.endswith(".fifo"): name += ".fifo" self.topic_arn = topic_arn_lookup(self.connection, self.module, name) @@ -613,83 +630,87 @@ def main(): # We're kinda stuck with CamelCase here, it would be nice to switch to # snake_case, but we'd need to purge out the alias entries http_retry_args = dict( - minDelayTarget=dict(type='int', required=True), - maxDelayTarget=dict(type='int', required=True), - numRetries=dict(type='int', required=True), - numMaxDelayRetries=dict(type='int', required=True), - numMinDelayRetries=dict(type='int', required=True), - numNoDelayRetries=dict(type='int', required=True), - backoffFunction=dict(type='str', required=True, choices=['arithmetic', 'exponential', 'geometric', 'linear']), + minDelayTarget=dict(type="int", required=True), + maxDelayTarget=dict(type="int", required=True), + numRetries=dict(type="int", required=True), + numMaxDelayRetries=dict(type="int", required=True), + numMinDelayRetries=dict(type="int", required=True), + numNoDelayRetries=dict(type="int", required=True), + backoffFunction=dict(type="str", required=True, choices=["arithmetic", "exponential", "geometric", "linear"]), ) http_delivery_args = dict( - defaultHealthyRetryPolicy=dict(type='dict', required=True, options=http_retry_args), - disableSubscriptionOverrides=dict(type='bool', required=False), + defaultHealthyRetryPolicy=dict(type="dict", required=True, options=http_retry_args), + disableSubscriptionOverrides=dict(type="bool", required=False), defaultThrottlePolicy=dict( - type='dict', required=False, + type="dict", + required=False, options=dict( - maxReceivesPerSecond=dict(type='int', required=True), + maxReceivesPerSecond=dict(type="int", required=True), ), ), ) delivery_args = dict( - http=dict(type='dict', required=False, options=http_delivery_args), + http=dict(type="dict", required=False, options=http_delivery_args), ) argument_spec = dict( name=dict(required=True), - topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']), - state=dict(default='present', choices=['present', 'absent']), + topic_type=dict(type="str", default="standard", choices=["standard", "fifo"]), + state=dict(default="present", choices=["present", "absent"]), display_name=dict(), - policy=dict(type='dict'), - delivery_policy=dict(type='dict', options=delivery_args), - subscriptions=dict(default=[], type='list', elements='dict'), - purge_subscriptions=dict(type='bool', default=True), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - content_based_deduplication=dict(choices=['enabled', 'disabled']) + policy=dict(type="dict"), + delivery_policy=dict(type="dict", options=delivery_args), + subscriptions=dict(default=[], type="list", elements="dict"), + purge_subscriptions=dict(type="bool", default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + content_based_deduplication=dict(choices=["enabled", "disabled"]), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) - - name = module.params.get('name') - topic_type = module.params.get('topic_type') - state = module.params.get('state') - display_name = module.params.get('display_name') - policy = module.params.get('policy') - delivery_policy = module.params.get('delivery_policy') - subscriptions = module.params.get('subscriptions') - purge_subscriptions = module.params.get('purge_subscriptions') - content_based_deduplication = module.params.get('content_based_deduplication') + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + name = module.params.get("name") + topic_type = module.params.get("topic_type") + state = module.params.get("state") + display_name = module.params.get("display_name") + policy = module.params.get("policy") + delivery_policy = module.params.get("delivery_policy") + subscriptions = module.params.get("subscriptions") + purge_subscriptions = module.params.get("purge_subscriptions") + content_based_deduplication = module.params.get("content_based_deduplication") check_mode = module.check_mode - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - sns_topic = SnsTopicManager(module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode) - - if state == 'present': + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + sns_topic = SnsTopicManager( + module, + name, + topic_type, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + tags, + purge_tags, + content_based_deduplication, + check_mode, + ) + + if state == "present": changed = sns_topic.ensure_ok() - elif state == 'absent': + elif state == "absent": changed = sns_topic.ensure_gone() - sns_facts = dict(changed=changed, - sns_arn=sns_topic.topic_arn, - sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn)) + sns_facts = dict( + changed=changed, + sns_arn=sns_topic.topic_arn, + sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn), + ) module.exit_json(**sns_facts) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sns_topic_info.py b/sns_topic_info.py index 2fcde33e94c..51ec8372eac 100644 --- a/sns_topic_info.py +++ b/sns_topic_info.py @@ -146,18 +146,17 @@ def main(): argument_spec = dict( - topic_arn=dict(type='str', required=False), + topic_arn=dict(type="str", required=False), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - topic_arn = module.params.get('topic_arn') + topic_arn = module.params.get("topic_arn") try: - connection = module.client('sns', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("sns", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") if topic_arn: results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn)) @@ -167,5 +166,5 @@ def main(): module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sqs_queue.py b/sqs_queue.py index 4a23f18871b..ad3ce68a7ce 100644 --- a/sqs_queue.py +++ b/sqs_queue.py @@ -277,18 +277,18 @@ def get_queue_name(module, is_fifo=False): - name = module.params.get('name') - if not is_fifo or name.endswith('.fifo'): + name = module.params.get("name") + if not is_fifo or name.endswith(".fifo"): return name - return name + '.fifo' + return name + ".fifo" # NonExistentQueue is explicitly expected when a queue doesn't exist @AWSRetry.jittered_backoff() def get_queue_url(client, name): try: - return client.get_queue_url(QueueName=name)['QueueUrl'] - except is_boto3_error_code('AWS.SimpleQueueService.NonExistentQueue'): + return client.get_queue_url(QueueName=name)["QueueUrl"] + except is_boto3_error_code("AWS.SimpleQueueService.NonExistentQueue"): return None @@ -296,13 +296,13 @@ def describe_queue(client, queue_url): """ Description a queue in snake format """ - attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)["Attributes"] description = dict(attributes) - description.pop('Policy', None) - description.pop('RedrivePolicy', None) + description.pop("Policy", None) + description.pop("RedrivePolicy", None) description = camel_dict_to_snake_dict(description) - description['policy'] = attributes.get('Policy', None) - description['redrive_policy'] = attributes.get('RedrivePolicy', None) + description["policy"] = attributes.get("Policy", None) + description["redrive_policy"] = attributes.get("RedrivePolicy", None) # Boto3 returns everything as a string, convert them back to integers/dicts if # that's what we expected. @@ -310,12 +310,12 @@ def describe_queue(client, queue_url): if value is None: continue - if key in ['policy', 'redrive_policy']: + if key in ["policy", "redrive_policy"]: policy = json.loads(value) description[key] = policy continue - if key == 'content_based_deduplication': + if key == "content_based_deduplication": try: description[key] = bool(value) except (TypeError, ValueError): @@ -331,49 +331,48 @@ def describe_queue(client, queue_url): def create_or_update_sqs_queue(client, module): - is_fifo = (module.params.get('queue_type') == 'fifo') - kms_master_key_id = module.params.get('kms_master_key_id') + is_fifo = module.params.get("queue_type") == "fifo" + kms_master_key_id = module.params.get("kms_master_key_id") queue_name = get_queue_name(module, is_fifo) result = dict( name=queue_name, - region=module.params.get('region'), + region=module.params.get("region"), changed=False, ) queue_url = get_queue_url(client, queue_name) - result['queue_url'] = queue_url + result["queue_url"] = queue_url # Create a dict() to hold attributes that will be passed to boto3 create_attributes = {} if not queue_url: if is_fifo: - create_attributes['FifoQueue'] = "True" + create_attributes["FifoQueue"] = "True" if kms_master_key_id: - create_attributes['KmsMasterKeyId'] = kms_master_key_id - result['changed'] = True + create_attributes["KmsMasterKeyId"] = kms_master_key_id + result["changed"] = True if module.check_mode: return result - queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl'] + queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)["QueueUrl"] changed, arn = update_sqs_queue(module, client, queue_url) - result['changed'] |= changed - result['queue_arn'] = arn + result["changed"] |= changed + result["queue_arn"] = arn changed, tags = update_tags(client, queue_url, module) - result['changed'] |= changed - result['tags'] = tags + result["changed"] |= changed + result["tags"] = tags result.update(describe_queue(client, queue_url)) COMPATABILITY_KEYS = dict( - delay_seconds='delivery_delay', - receive_message_wait_time_seconds='receive_message_wait_time', - visibility_timeout='default_visibility_timeout', - kms_data_key_reuse_period_seconds='kms_data_key_reuse_period', + delay_seconds="delivery_delay", + receive_message_wait_time_seconds="receive_message_wait_time", + visibility_timeout="default_visibility_timeout", + kms_data_key_reuse_period_seconds="kms_data_key_reuse_period", ) for key in list(result.keys()): - # The return values changed between boto and boto3, add the old keys too # for backwards compatibility return_name = COMPATABILITY_KEYS.get(key) @@ -386,30 +385,32 @@ def create_or_update_sqs_queue(client, module): def update_sqs_queue(module, client, queue_url): check_mode = module.check_mode changed = False - existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] + existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)[ + "Attributes" + ] new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) attributes_to_set = dict() # Boto3 SQS deals with policies as strings, we want to deal with them as # dicts - if module.params.get('policy') is not None: - policy = module.params.get('policy') - current_value = existing_attributes.get('Policy', '{}') + if module.params.get("policy") is not None: + policy = module.params.get("policy") + current_value = existing_attributes.get("Policy", "{}") current_policy = json.loads(current_value) if compare_policies(current_policy, policy): - attributes_to_set['Policy'] = json.dumps(policy) + attributes_to_set["Policy"] = json.dumps(policy) changed = True - if module.params.get('redrive_policy') is not None: - policy = module.params.get('redrive_policy') - current_value = existing_attributes.get('RedrivePolicy', '{}') + if module.params.get("redrive_policy") is not None: + policy = module.params.get("redrive_policy") + current_value = existing_attributes.get("RedrivePolicy", "{}") current_policy = json.loads(current_value) if compare_policies(current_policy, policy): - attributes_to_set['RedrivePolicy'] = json.dumps(policy) + attributes_to_set["RedrivePolicy"] = json.dumps(policy) changed = True for attribute, value in existing_attributes.items(): # We handle these as a special case because they're IAM policies - if attribute in ['Policy', 'RedrivePolicy']: + if attribute in ["Policy", "RedrivePolicy"]: continue if attribute not in new_attributes.keys(): @@ -434,23 +435,19 @@ def update_sqs_queue(module, client, queue_url): if changed and not check_mode: client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) - return changed, existing_attributes.get('queue_arn') + return changed, existing_attributes.get("queue_arn") def delete_sqs_queue(client, module): - is_fifo = (module.params.get('queue_type') == 'fifo') + is_fifo = module.params.get("queue_type") == "fifo" queue_name = get_queue_name(module, is_fifo) - result = dict( - name=queue_name, - region=module.params.get('region'), - changed=False - ) + result = dict(name=queue_name, region=module.params.get("region"), changed=False) queue_url = get_queue_url(client, queue_name) if not queue_url: return result - result['changed'] = bool(queue_url) + result["changed"] = bool(queue_url) if not module.check_mode: AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url) @@ -458,13 +455,13 @@ def delete_sqs_queue(client, module): def update_tags(client, queue_url, module): - new_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + new_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") if new_tags is None: return False, {} try: - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags'] + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e: existing_tags = {} @@ -475,7 +472,7 @@ def update_tags(client, queue_url, module): client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True) if tags_to_add: client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add) - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {}) + existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get("Tags", {}) else: existing_tags = new_tags @@ -484,41 +481,40 @@ def update_tags(client, queue_url, module): def main(): - argument_spec = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']), - delay_seconds=dict(type='int', aliases=['delivery_delay']), - maximum_message_size=dict(type='int'), - message_retention_period=dict(type='int'), - policy=dict(type='dict'), - receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']), - redrive_policy=dict(type='dict'), - visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']), - kms_master_key_id=dict(type='str'), - fifo_throughput_limit=dict(type='str', choices=["perQueue", "perMessageGroupId"]), - deduplication_scope=dict(type='str', choices=['queue', 'messageGroup']), - kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False), - content_based_deduplication=dict(type='bool'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + queue_type=dict(type="str", default="standard", choices=["standard", "fifo"]), + delay_seconds=dict(type="int", aliases=["delivery_delay"]), + maximum_message_size=dict(type="int"), + message_retention_period=dict(type="int"), + policy=dict(type="dict"), + receive_message_wait_time_seconds=dict(type="int", aliases=["receive_message_wait_time"]), + redrive_policy=dict(type="dict"), + visibility_timeout=dict(type="int", aliases=["default_visibility_timeout"]), + kms_master_key_id=dict(type="str"), + fifo_throughput_limit=dict(type="str", choices=["perQueue", "perMessageGroupId"]), + deduplication_scope=dict(type="str", choices=["queue", "messageGroup"]), + kms_data_key_reuse_period_seconds=dict(type="int", aliases=["kms_data_key_reuse_period"], no_log=False), + content_based_deduplication=dict(type="bool"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params.get('state') - retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue']) + state = module.params.get("state") + retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["AWS.SimpleQueueService.NonExistentQueue"]) try: - client = module.client('sqs', retry_decorator=retry_decorator) - if state == 'present': + client = module.client("sqs", retry_decorator=retry_decorator) + if state == "present": result = create_or_update_sqs_queue(client, module) - elif state == 'absent': + elif state == "absent": result = delete_sqs_queue(client, module) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to control sqs queue') + module.fail_json_aws(e, msg="Failed to control sqs queue") else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ssm_inventory_info.py b/ssm_inventory_info.py index 4242596f128..c5b84909738 100644 --- a/ssm_inventory_info.py +++ b/ssm_inventory_info.py @@ -80,15 +80,9 @@ def get_ssm_inventory(connection, filters): def execute_module(module, connection): - instance_id = module.params.get("instance_id") try: - filters = [ - { - "Key": "AWS:InstanceInformation.InstanceId", - "Values": [instance_id] - } - ] + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": [instance_id]}] response = get_ssm_inventory(connection, filters) entities = response.get("Entities", []) diff --git a/ssm_parameter.py b/ssm_parameter.py index 493d2b294c4..aefafca009c 100644 --- a/ssm_parameter.py +++ b/ssm_parameter.py @@ -264,7 +264,7 @@ class ParameterWaiterFactory(BaseWaiterFactory): def __init__(self, module): - client = module.client('ssm') + client = module.client("ssm") super(ParameterWaiterFactory, self).__init__(module, client) @property @@ -272,22 +272,24 @@ def _waiter_model_data(self): data = super(ParameterWaiterFactory, self)._waiter_model_data ssm_data = dict( parameter_exists=dict( - operation='DescribeParameters', - delay=1, maxAttempts=20, + operation="DescribeParameters", + delay=1, + maxAttempts=20, acceptors=[ - dict(state='retry', matcher='error', expected='ParameterNotFound'), - dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) == `0`'), - dict(state='success', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), - ] + dict(state="retry", matcher="error", expected="ParameterNotFound"), + dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) == `0`"), + dict(state="success", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), + ], ), parameter_deleted=dict( - operation='DescribeParameters', - delay=1, maxAttempts=20, + operation="DescribeParameters", + delay=1, + maxAttempts=20, acceptors=[ - dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'), - dict(state='success', matcher='path', expected=True, argument='length(Parameters[]) == `0`'), - dict(state='success', matcher='error', expected='ParameterNotFound'), - ] + dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), + dict(state="success", matcher="path", expected=True, argument="length(Parameters[]) == `0`"), + dict(state="success", matcher="error", expected="ParameterNotFound"), + ], ), ) data.update(ssm_data) @@ -298,10 +300,10 @@ def _wait_exists(client, module, name): if module.check_mode: return wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter('parameter_exists') + waiter = wf.get_waiter("parameter_exists") try: waiter.wait( - ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ParameterFilters=[{"Key": "Name", "Values": [name]}], ) except botocore.exceptions.WaiterError: module.warn("Timeout waiting for parameter to exist") @@ -316,7 +318,7 @@ def _wait_updated(client, module, name, version): for x in range(1, 10): try: parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}]) - if parameter.get('Version', 0) > version: + if parameter.get("Version", 0) > version: return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update") @@ -327,10 +329,10 @@ def _wait_deleted(client, module, name): if module.check_mode: return wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter('parameter_deleted') + waiter = wf.get_waiter("parameter_deleted") try: waiter.wait( - ParameterFilters=[{'Key': 'Name', "Values": [name]}], + ParameterFilters=[{"Key": "Name", "Values": [name]}], ) except botocore.exceptions.WaiterError: module.warn("Timeout waiting for parameter to exist") @@ -340,24 +342,27 @@ def _wait_deleted(client, module, name): def tag_parameter(client, module, parameter_name, tags): try: - return client.add_tags_to_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name, Tags=tags) + return client.add_tags_to_resource( + aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, Tags=tags + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to add tag(s) to parameter") def untag_parameter(client, module, parameter_name, tag_keys): try: - return client.remove_tags_from_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name, TagKeys=tag_keys) + return client.remove_tags_from_resource( + aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, TagKeys=tag_keys + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter") def get_parameter_tags(client, module, parameter_name): try: - tags = client.list_tags_for_resource(aws_retry=True, ResourceType='Parameter', - ResourceId=parameter_name)['TagList'] + tags = client.list_tags_for_resource(aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name)[ + "TagList" + ] tags_dict = boto3_tag_list_to_ansible_dict(tags) return tags_dict except (BotoCoreError, ClientError) as e: @@ -372,14 +377,12 @@ def update_parameter_tags(client, module, parameter_name, supplied_tags): return False, response current_tags = get_parameter_tags(client, module, parameter_name) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, - module.params.get('purge_tags')) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, module.params.get("purge_tags")) if tags_to_add: if module.check_mode: return True, response - response = tag_parameter(client, module, parameter_name, - ansible_dict_to_boto3_tag_list(tags_to_add)) + response = tag_parameter(client, module, parameter_name, ansible_dict_to_boto3_tag_list(tags_to_add)) changed = True if tags_to_remove: if module.check_mode: @@ -407,16 +410,16 @@ def update_parameter(client, module, **args): @AWSRetry.jittered_backoff() def describe_parameter(client, module, **args): - paginator = client.get_paginator('describe_parameters') + paginator = client.get_paginator("describe_parameters") existing_parameter = paginator.paginate(**args).build_full_result() - if not existing_parameter['Parameters']: + if not existing_parameter["Parameters"]: return None - tags_dict = get_parameter_tags(client, module, module.params.get('name')) - existing_parameter['Parameters'][0]['tags'] = tags_dict + tags_dict = get_parameter_tags(client, module, module.params.get("name")) + existing_parameter["Parameters"][0]["tags"] = tags_dict - return existing_parameter['Parameters'][0] + return existing_parameter["Parameters"][0] def create_update_parameter(client, module): @@ -424,82 +427,78 @@ def create_update_parameter(client, module): existing_parameter = None response = {} - args = dict( - Name=module.params.get('name'), - Type=module.params.get('string_type'), - Tier=module.params.get('tier') - ) + args = dict(Name=module.params.get("name"), Type=module.params.get("string_type"), Tier=module.params.get("tier")) - if (module.params.get('overwrite_value') in ("always", "changed")): + if module.params.get("overwrite_value") in ("always", "changed"): args.update(Overwrite=True) else: args.update(Overwrite=False) - if module.params.get('value') is not None: - args.update(Value=module.params.get('value')) + if module.params.get("value") is not None: + args.update(Value=module.params.get("value")) - if module.params.get('description'): - args.update(Description=module.params.get('description')) + if module.params.get("description"): + args.update(Description=module.params.get("description")) - if module.params.get('string_type') == 'SecureString': - args.update(KeyId=module.params.get('key_id')) + if module.params.get("string_type") == "SecureString": + args.update(KeyId=module.params.get("key_id")) try: - existing_parameter = client.get_parameter(aws_retry=True, Name=args['Name'], WithDecryption=True) + existing_parameter = client.get_parameter(aws_retry=True, Name=args["Name"], WithDecryption=True) except botocore.exceptions.ClientError: pass except botocore.exceptions.BotoCoreError as e: module.fail_json_aws(e, msg="fetching parameter") if existing_parameter: - original_version = existing_parameter['Parameter']['Version'] - if 'Value' not in args: - args['Value'] = existing_parameter['Parameter']['Value'] + original_version = existing_parameter["Parameter"]["Version"] + if "Value" not in args: + args["Value"] = existing_parameter["Parameter"]["Value"] - if (module.params.get('overwrite_value') == 'always'): + if module.params.get("overwrite_value") == "always": (changed, response) = update_parameter(client, module, **args) - elif (module.params.get('overwrite_value') == 'changed'): - if existing_parameter['Parameter']['Type'] != args['Type']: + elif module.params.get("overwrite_value") == "changed": + if existing_parameter["Parameter"]["Type"] != args["Type"]: (changed, response) = update_parameter(client, module, **args) - elif existing_parameter['Parameter']['Value'] != args['Value']: + elif existing_parameter["Parameter"]["Value"] != args["Value"]: (changed, response) = update_parameter(client, module, **args) - elif args.get('Description'): + elif args.get("Description"): # Description field not available from get_parameter function so get it from describe_parameters try: describe_existing_parameter = describe_parameter( - client, module, - ParameterFilters=[{"Key": "Name", "Values": [args['Name']]}]) + client, module, ParameterFilters=[{"Key": "Name", "Values": [args["Name"]]}] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter.get('Description') != args['Description']: + if describe_existing_parameter.get("Description") != args["Description"]: (changed, response) = update_parameter(client, module, **args) if changed: - _wait_updated(client, module, module.params.get('name'), original_version) + _wait_updated(client, module, module.params.get("name"), original_version) # Handle tag updates for existing parameters - if module.params.get('overwrite_value') != 'never': + if module.params.get("overwrite_value") != "never": tags_changed, tags_response = update_parameter_tags( - client, module, existing_parameter['Parameter']['Name'], - module.params.get('tags')) + client, module, existing_parameter["Parameter"]["Name"], module.params.get("tags") + ) changed = changed or tags_changed if tags_response: - response['tag_updates'] = tags_response + response["tag_updates"] = tags_response else: # Add tags in initial creation request - if module.params.get('tags'): - args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get('tags'))) + if module.params.get("tags"): + args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get("tags"))) # Overwrite=True conflicts with tags and is not needed for new param args.update(Overwrite=False) (changed, response) = update_parameter(client, module, **args) - _wait_exists(client, module, module.params.get('name')) + _wait_exists(client, module, module.params.get("name")) return changed, response @@ -508,8 +507,8 @@ def delete_parameter(client, module): response = {} try: - existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get('name'), WithDecryption=True) - except is_boto3_error_code('ParameterNotFound'): + existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get("name"), WithDecryption=True) + except is_boto3_error_code("ParameterNotFound"): return False, {} except botocore.exceptions.ClientError: # If we can't describe the parameter we may still be able to delete it @@ -523,23 +522,23 @@ def delete_parameter(client, module): return True, {} try: - response = client.delete_parameter( - aws_retry=True, - Name=module.params.get('name') - ) - except is_boto3_error_code('ParameterNotFound'): + response = client.delete_parameter(aws_retry=True, Name=module.params.get("name")) + except is_boto3_error_code("ParameterNotFound"): return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="deleting parameter") - _wait_deleted(client, module, module.params.get('name')) + _wait_deleted(client, module, module.params.get("name")) return True, response def setup_client(module): retry_decorator = AWSRetry.jittered_backoff() - connection = module.client('ssm', retry_decorator=retry_decorator) + connection = module.client("ssm", retry_decorator=retry_decorator) return connection @@ -548,14 +547,14 @@ def setup_module_object(): name=dict(required=True), description=dict(), value=dict(required=False, no_log=True), - state=dict(default='present', choices=['present', 'absent']), - string_type=dict(default='String', choices=['String', 'StringList', 'SecureString'], aliases=['type']), - decryption=dict(default=True, type='bool'), + state=dict(default="present", choices=["present", "absent"]), + string_type=dict(default="String", choices=["String", "StringList", "SecureString"], aliases=["type"]), + decryption=dict(default=True, type="bool"), key_id=dict(default="alias/aws/ssm"), - overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']), - tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + overwrite_value=dict(default="changed", choices=["never", "changed", "always"]), + tier=dict(default="Standard", choices=["Standard", "Advanced", "Intelligent-Tiering"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) return AnsibleAWSModule( @@ -566,7 +565,7 @@ def setup_module_object(): def main(): module = setup_module_object() - state = module.params.get('state') + state = module.params.get("state") client = setup_client(module) invocations = { @@ -579,18 +578,17 @@ def main(): try: parameter_metadata = describe_parameter( - client, module, - ParameterFilters=[{"Key": "Name", "Values": [module.params.get('name')]}]) - except is_boto3_error_code('ParameterNotFound'): + client, module, ParameterFilters=[{"Key": "Name", "Values": [module.params.get("name")]}] + ) + except is_boto3_error_code("ParameterNotFound"): return False, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="to describe parameter") if parameter_metadata: - result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata, - ignore_list=['tags']) + result["parameter_metadata"] = camel_dict_to_snake_dict(parameter_metadata, ignore_list=["tags"]) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py index 0f4b3ec1397..4bbd1503ab8 100644 --- a/stepfunctions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -101,36 +101,36 @@ def manage_state_machine(state, sfn_client, module): state_machine_arn = get_state_machine_arn(sfn_client, module) - if state == 'present': + if state == "present": if state_machine_arn is None: create(sfn_client, module) else: update(state_machine_arn, sfn_client, module) - elif state == 'absent': + elif state == "absent": if state_machine_arn is not None: remove(state_machine_arn, sfn_client, module) - check_mode(module, msg='State is up-to-date.') + check_mode(module, msg="State is up-to-date.") module.exit_json(changed=False, state_machine_arn=state_machine_arn) def create(sfn_client, module): - check_mode(module, msg='State machine would be created.', changed=True) + check_mode(module, msg="State machine would be created.", changed=True) - tags = module.params.get('tags') - sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else [] + tags = module.params.get("tags") + sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name="key", tag_value_key_name="value") if tags else [] state_machine = sfn_client.create_state_machine( - name=module.params.get('name'), - definition=module.params.get('definition'), - roleArn=module.params.get('role_arn'), - tags=sfn_tags + name=module.params.get("name"), + definition=module.params.get("definition"), + roleArn=module.params.get("role_arn"), + tags=sfn_tags, ) - module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn')) + module.exit_json(changed=True, state_machine_arn=state_machine.get("stateMachineArn")) def remove(state_machine_arn, sfn_client, module): - check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True) + check_mode(module, msg="State machine would be deleted: {0}".format(state_machine_arn), changed=True) sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) module.exit_json(changed=True, state_machine_arn=state_machine_arn) @@ -140,29 +140,28 @@ def update(state_machine_arn, sfn_client, module): tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: - check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True) + check_mode(module, msg="State machine would be updated: {0}".format(state_machine_arn), changed=True) sfn_client.update_state_machine( stateMachineArn=state_machine_arn, - definition=module.params.get('definition'), - roleArn=module.params.get('role_arn') - ) - sfn_client.untag_resource( - resourceArn=state_machine_arn, - tagKeys=tags_to_remove + definition=module.params.get("definition"), + roleArn=module.params.get("role_arn"), ) + sfn_client.untag_resource(resourceArn=state_machine_arn, tagKeys=tags_to_remove) sfn_client.tag_resource( resourceArn=state_machine_arn, - tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value') + tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name="key", tag_value_key_name="value"), ) module.exit_json(changed=True, state_machine_arn=state_machine_arn) def compare_tags(state_machine_arn, sfn_client, module): - new_tags = module.params.get('tags') - current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags') - return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags')) + new_tags = module.params.get("tags") + current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get("tags") + return compare_aws_tags( + boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get("purge_tags") + ) def params_changed(state_machine_arn, sfn_client, module): @@ -171,7 +170,9 @@ def params_changed(state_machine_arn, sfn_client, module): from the existing state machine parameters. """ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn) - return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn') + return current.get("definition") != module.params.get("definition") or current.get("roleArn") != module.params.get( + "role_arn" + ) def get_state_machine_arn(sfn_client, module): @@ -179,42 +180,42 @@ def get_state_machine_arn(sfn_client, module): Finds the state machine ARN based on the name parameter. Returns None if there is no state machine with this name. """ - target_name = module.params.get('name') - all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines') + target_name = module.params.get("name") + all_state_machines = sfn_client.list_state_machines(aws_retry=True).get("stateMachines") for state_machine in all_state_machines: - if state_machine.get('name') == target_name: - return state_machine.get('stateMachineArn') + if state_machine.get("name") == target_name: + return state_machine.get("stateMachineArn") -def check_mode(module, msg='', changed=False): +def check_mode(module, msg="", changed=False): if module.check_mode: module.exit_json(changed=changed, output=msg) def main(): module_args = dict( - name=dict(type='str', required=True), - definition=dict(type='json'), - role_arn=dict(type='str'), - state=dict(choices=['present', 'absent'], default='present'), - tags=dict(default=None, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + name=dict(type="str", required=True), + definition=dict(type="json"), + role_arn=dict(type="str"), + state=dict(choices=["present", "absent"], default="present"), + tags=dict(default=None, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])], - supports_check_mode=True + required_if=[("state", "present", ["role_arn"]), ("state", "present", ["definition"])], + supports_check_mode=True, ) - sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5)) - state = module.params.get('state') + sfn_client = module.client("stepfunctions", retry_decorator=AWSRetry.jittered_backoff(retries=5)) + state = module.params.get("state") try: manage_state_machine(state, sfn_client, module) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to manage state machine') + module.fail_json_aws(e, msg="Failed to manage state machine") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/stepfunctions_state_machine_execution.py b/stepfunctions_state_machine_execution.py index 0b6858fbf42..b7a9f7efba8 100644 --- a/stepfunctions_state_machine_execution.py +++ b/stepfunctions_state_machine_execution.py @@ -100,95 +100,90 @@ def start_execution(module, sfn_client): - ''' + """ start_execution uses execution name to determine if a previous execution already exists. If an execution by the provided name exists, call client.start_execution will not be called. - ''' + """ - state_machine_arn = module.params.get('state_machine_arn') - name = module.params.get('name') - execution_input = module.params.get('execution_input') + state_machine_arn = module.params.get("state_machine_arn") + name = module.params.get("name") + execution_input = module.params.get("execution_input") try: # list_executions is eventually consistent - page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn) + page_iterators = sfn_client.get_paginator("list_executions").paginate(stateMachineArn=state_machine_arn) - for execution in page_iterators.build_full_result()['executions']: - if name == execution['name']: - check_mode(module, msg='State machine execution already exists.', changed=False) + for execution in page_iterators.build_full_result()["executions"]: + if name == execution["name"]: + check_mode(module, msg="State machine execution already exists.", changed=False) module.exit_json(changed=False) - check_mode(module, msg='State machine execution would be started.', changed=True) - res_execution = sfn_client.start_execution( - stateMachineArn=state_machine_arn, - name=name, - input=execution_input - ) - except is_boto3_error_code('ExecutionAlreadyExists'): + check_mode(module, msg="State machine execution would be started.", changed=True) + res_execution = sfn_client.start_execution(stateMachineArn=state_machine_arn, name=name, input=execution_input) + except is_boto3_error_code("ExecutionAlreadyExists"): # this will never be executed anymore module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to start execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution)) def stop_execution(module, sfn_client): - - cause = module.params.get('cause') - error = module.params.get('error') - execution_arn = module.params.get('execution_arn') + cause = module.params.get("cause") + error = module.params.get("error") + execution_arn = module.params.get("execution_arn") try: # describe_execution is eventually consistent - execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status'] - if execution_status != 'RUNNING': - check_mode(module, msg='State machine execution is not running.', changed=False) + execution_status = sfn_client.describe_execution(executionArn=execution_arn)["status"] + if execution_status != "RUNNING": + check_mode(module, msg="State machine execution is not running.", changed=False) module.exit_json(changed=False) - check_mode(module, msg='State machine execution would be stopped.', changed=True) - res = sfn_client.stop_execution( - executionArn=execution_arn, - cause=cause, - error=error - ) + check_mode(module, msg="State machine execution would be stopped.", changed=True) + res = sfn_client.stop_execution(executionArn=execution_arn, cause=cause, error=error) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to stop execution.") module.exit_json(changed=True, **camel_dict_to_snake_dict(res)) -def check_mode(module, msg='', changed=False): +def check_mode(module, msg="", changed=False): if module.check_mode: module.exit_json(changed=changed, output=msg) def main(): module_args = dict( - action=dict(choices=['start', 'stop'], default='start'), - name=dict(type='str'), - execution_input=dict(type='json', default={}), - state_machine_arn=dict(type='str'), - cause=dict(type='str', default=''), - error=dict(type='str', default=''), - execution_arn=dict(type='str') + action=dict(choices=["start", "stop"], default="start"), + name=dict(type="str"), + execution_input=dict(type="json", default={}), + state_machine_arn=dict(type="str"), + cause=dict(type="str", default=""), + error=dict(type="str", default=""), + execution_arn=dict(type="str"), ) module = AnsibleAWSModule( argument_spec=module_args, - required_if=[('action', 'start', ['name', 'state_machine_arn']), - ('action', 'stop', ['execution_arn']), - ], - supports_check_mode=True + required_if=[ + ("action", "start", ["name", "state_machine_arn"]), + ("action", "stop", ["execution_arn"]), + ], + supports_check_mode=True, ) - sfn_client = module.client('stepfunctions') + sfn_client = module.client("stepfunctions") - action = module.params.get('action') + action = module.params.get("action") if action == "start": start_execution(module, sfn_client) else: stop_execution(module, sfn_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/storagegateway_info.py b/storagegateway_info.py index 854d1cbb0d8..5ff72399786 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -187,21 +187,21 @@ class SGWInformationManager(object): def __init__(self, client, module): self.client = client self.module = module - self.name = self.module.params.get('name') + self.name = self.module.params.get("name") def fetch(self): gateways = self.list_gateways() for gateway in gateways: - if self.module.params.get('gather_local_disks'): + if self.module.params.get("gather_local_disks"): self.list_local_disks(gateway) # File share gateway - if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'): + if gateway["gateway_type"] == "FILE_S3" and self.module.params.get("gather_file_shares"): self.list_gateway_file_shares(gateway) # Volume tape gateway - elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'): + elif gateway["gateway_type"] == "VTL" and self.module.params.get("gather_tapes"): self.list_gateway_vtl(gateway) # iSCSI gateway - elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'): + elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get("gather_volumes"): self.list_gateway_volumes(gateway) self.module.exit_json(gateways=gateways) @@ -209,12 +209,13 @@ def fetch(self): """ List all storage gateways for the AWS endpoint. """ + def list_gateways(self): try: - paginator = self.client.get_paginator('list_gateways') + paginator = self.client.get_paginator("list_gateways") response = paginator.paginate( PaginationConfig={ - 'PageSize': 100, + "PageSize": 100, } ).build_full_result() @@ -231,6 +232,7 @@ def list_gateways(self): Read file share objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ + @staticmethod def _read_gateway_fileshare_response(fileshares, aws_reponse): for share in aws_reponse["FileShareInfoList"]: @@ -244,22 +246,16 @@ def _read_gateway_fileshare_response(fileshares, aws_reponse): """ List file shares attached to AWS storage gateway when in S3 mode. """ + def list_gateway_file_shares(self, gateway): try: - response = self.client.list_file_shares( - GatewayARN=gateway["gateway_arn"], - Limit=100 - ) + response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Limit=100) gateway["file_shares"] = [] marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) while marker is not None: - response = self.client.list_file_shares( - GatewayARN=gateway["gateway_arn"], - Marker=marker, - Limit=100 - ) + response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Marker=marker, Limit=100) marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) except (BotoCoreError, ClientError) as e: @@ -268,10 +264,13 @@ def list_gateway_file_shares(self, gateway): """ List storage gateway local disks """ + def list_local_disks(self, gateway): try: - gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in - self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']] + gateway["local_disks"] = [ + camel_dict_to_snake_dict(disk) + for disk in self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])["Disks"] + ] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") @@ -279,6 +278,7 @@ def list_local_disks(self, gateway): Read tape objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ + @staticmethod def _read_gateway_tape_response(tapes, aws_response): for tape in aws_response["TapeInfos"]: @@ -292,20 +292,16 @@ def _read_gateway_tape_response(tapes, aws_response): """ List VTL & VTS attached to AWS storage gateway in VTL mode """ + def list_gateway_vtl(self, gateway): try: - response = self.client.list_tapes( - Limit=100 - ) + response = self.client.list_tapes(Limit=100) gateway["tapes"] = [] marker = self._read_gateway_tape_response(gateway["tapes"], response) while marker is not None: - response = self.client.list_tapes( - Marker=marker, - Limit=100 - ) + response = self.client.list_tapes(Marker=marker, Limit=100) marker = self._read_gateway_tape_response(gateway["tapes"], response) except (BotoCoreError, ClientError) as e: @@ -314,14 +310,15 @@ def list_gateway_vtl(self, gateway): """ List volumes attached to AWS storage gateway in CACHED or STORAGE mode """ + def list_gateway_volumes(self, gateway): try: - paginator = self.client.get_paginator('list_volumes') + paginator = self.client.get_paginator("list_volumes") response = paginator.paginate( GatewayARN=gateway["gateway_arn"], PaginationConfig={ - 'PageSize': 100, - } + "PageSize": 100, + }, ).build_full_result() gateway["volumes"] = [] @@ -339,10 +336,10 @@ def list_gateway_volumes(self, gateway): def main(): argument_spec = dict( - gather_local_disks=dict(type='bool', default=True), - gather_tapes=dict(type='bool', default=True), - gather_file_shares=dict(type='bool', default=True), - gather_volumes=dict(type='bool', default=True) + gather_local_disks=dict(type="bool", default=True), + gather_tapes=dict(type="bool", default=True), + gather_file_shares=dict(type="bool", default=True), + gather_volumes=dict(type="bool", default=True), ) module = AnsibleAWSModule( @@ -350,13 +347,13 @@ def main(): supports_check_mode=True, ) - client = module.client('storagegateway') + client = module.client("storagegateway") if client is None: # this should never happen - module.fail_json(msg='Unknown error, failed to create storagegateway client, no information available.') + module.fail_json(msg="Unknown error, failed to create storagegateway client, no information available.") SGWInformationManager(client, module).fetch() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sts_assume_role.py b/sts_assume_role.py index c53bfa9c978..4a4860657cf 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -112,15 +112,14 @@ def _parse_response(response): - credentials = response.get('Credentials', {}) - user = response.get('AssumedRoleUser', {}) + credentials = response.get("Credentials", {}) + user = response.get("AssumedRoleUser", {}) sts_cred = { - 'access_key': credentials.get('AccessKeyId'), - 'secret_key': credentials.get('SecretAccessKey'), - 'session_token': credentials.get('SessionToken'), - 'expiration': credentials.get('Expiration') - + "access_key": credentials.get("AccessKeyId"), + "secret_key": credentials.get("SecretAccessKey"), + "session_token": credentials.get("SessionToken"), + "expiration": credentials.get("Expiration"), } sts_user = camel_dict_to_snake_dict(user) return sts_cred, sts_user @@ -128,13 +127,13 @@ def _parse_response(response): def assume_role_policy(connection, module): params = { - 'RoleArn': module.params.get('role_arn'), - 'RoleSessionName': module.params.get('role_session_name'), - 'Policy': module.params.get('policy'), - 'DurationSeconds': module.params.get('duration_seconds'), - 'ExternalId': module.params.get('external_id'), - 'SerialNumber': module.params.get('mfa_serial_number'), - 'TokenCode': module.params.get('mfa_token') + "RoleArn": module.params.get("role_arn"), + "RoleSessionName": module.params.get("role_session_name"), + "Policy": module.params.get("policy"), + "DurationSeconds": module.params.get("duration_seconds"), + "ExternalId": module.params.get("external_id"), + "SerialNumber": module.params.get("mfa_serial_number"), + "TokenCode": module.params.get("mfa_token"), } changed = False @@ -154,19 +153,19 @@ def main(): argument_spec = dict( role_arn=dict(required=True), role_session_name=dict(required=True), - duration_seconds=dict(required=False, default=None, type='int'), + duration_seconds=dict(required=False, default=None, type="int"), external_id=dict(required=False, default=None), policy=dict(required=False, default=None), mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None, no_log=True) + mfa_token=dict(required=False, default=None, no_log=True), ) module = AnsibleAWSModule(argument_spec=argument_spec) - connection = module.client('sts') + connection = module.client("sts") assume_role_policy(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sts_session_token.py b/sts_session_token.py index c780097be61..8656a96fc3c 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -84,31 +84,31 @@ def normalize_credentials(credentials): - access_key = credentials.get('AccessKeyId', None) - secret_key = credentials.get('SecretAccessKey', None) - session_token = credentials.get('SessionToken', None) - expiration = credentials.get('Expiration', None) + access_key = credentials.get("AccessKeyId", None) + secret_key = credentials.get("SecretAccessKey", None) + session_token = credentials.get("SessionToken", None) + expiration = credentials.get("Expiration", None) return { - 'access_key': access_key, - 'secret_key': secret_key, - 'session_token': session_token, - 'expiration': expiration + "access_key": access_key, + "secret_key": secret_key, + "session_token": session_token, + "expiration": expiration, } def get_session_token(connection, module): - duration_seconds = module.params.get('duration_seconds') - mfa_serial_number = module.params.get('mfa_serial_number') - mfa_token = module.params.get('mfa_token') + duration_seconds = module.params.get("duration_seconds") + mfa_serial_number = module.params.get("mfa_serial_number") + mfa_token = module.params.get("mfa_token") changed = False args = {} if duration_seconds is not None: - args['DurationSeconds'] = duration_seconds + args["DurationSeconds"] = duration_seconds if mfa_serial_number is not None: - args['SerialNumber'] = mfa_serial_number + args["SerialNumber"] = mfa_serial_number if mfa_token is not None: - args['TokenCode'] = mfa_token + args["TokenCode"] = mfa_token try: response = connection.get_session_token(**args) @@ -116,13 +116,13 @@ def get_session_token(connection, module): except ClientError as e: module.fail_json(msg=e) - credentials = normalize_credentials(response.get('Credentials', {})) + credentials = normalize_credentials(response.get("Credentials", {})) module.exit_json(changed=changed, sts_creds=credentials) def main(): argument_spec = dict( - duration_seconds=dict(required=False, default=None, type='int'), + duration_seconds=dict(required=False, default=None, type="int"), mfa_serial_number=dict(required=False, default=None), mfa_token=dict(required=False, default=None, no_log=True), ) @@ -130,12 +130,12 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec) try: - connection = module.client('sts') + connection = module.client("sts") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") get_session_token(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/waf_condition.py b/waf_condition.py index 6e1911323c9..efbb17e2cf8 100644 --- a/waf_condition.py +++ b/waf_condition.py @@ -418,73 +418,79 @@ class Condition(object): - def __init__(self, client, module): self.client = client self.module = module - self.type = module.params['type'] - self.method_suffix = MATCH_LOOKUP[self.type]['method'] - self.conditionset = MATCH_LOOKUP[self.type]['conditionset'] - self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's' - self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id' - self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple'] - self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's' - self.conditiontype = MATCH_LOOKUP[self.type]['type'] + self.type = module.params["type"] + self.method_suffix = MATCH_LOOKUP[self.type]["method"] + self.conditionset = MATCH_LOOKUP[self.type]["conditionset"] + self.conditionsets = MATCH_LOOKUP[self.type]["conditionset"] + "s" + self.conditionsetid = MATCH_LOOKUP[self.type]["conditionset"] + "Id" + self.conditiontuple = MATCH_LOOKUP[self.type]["conditiontuple"] + self.conditiontuples = MATCH_LOOKUP[self.type]["conditiontuple"] + "s" + self.conditiontype = MATCH_LOOKUP[self.type]["type"] def format_for_update(self, condition_set_id): # Prep kwargs kwargs = dict() - kwargs['Updates'] = list() + kwargs["Updates"] = list() - for filtr in self.module.params.get('filters'): + for filtr in self.module.params.get("filters"): # Only for ip_set - if self.type == 'ip': + if self.type == "ip": # there might be a better way of detecting an IPv6 address - if ':' in filtr.get('ip_address'): - ip_type = 'IPV6' + if ":" in filtr.get("ip_address"): + ip_type = "IPV6" else: - ip_type = 'IPV4' - condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')} + ip_type = "IPV4" + condition_insert = {"Type": ip_type, "Value": filtr.get("ip_address")} # Specific for geo_match_set - if self.type == 'geo': - condition_insert = dict(Type='Country', Value=filtr.get('country')) + if self.type == "geo": + condition_insert = dict(Type="Country", Value=filtr.get("country")) # Common For everything but ip_set and geo_match_set - if self.type not in ('ip', 'geo'): - - condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()), - TextTransformation=filtr.get('transformation', 'none').upper()) - - if filtr.get('field_to_match').upper() == "HEADER": - if filtr.get('header'): - condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower() + if self.type not in ("ip", "geo"): + condition_insert = dict( + FieldToMatch=dict(Type=filtr.get("field_to_match").upper()), + TextTransformation=filtr.get("transformation", "none").upper(), + ) + + if filtr.get("field_to_match").upper() == "HEADER": + if filtr.get("header"): + condition_insert["FieldToMatch"]["Data"] = filtr.get("header").lower() else: self.module.fail_json(msg=str("DATA required when HEADER requested")) # Specific for byte_match_set - if self.type == 'byte': - condition_insert['TargetString'] = filtr.get('target_string') - condition_insert['PositionalConstraint'] = filtr.get('position') + if self.type == "byte": + condition_insert["TargetString"] = filtr.get("target_string") + condition_insert["PositionalConstraint"] = filtr.get("position") # Specific for size_constraint_set - if self.type == 'size': - condition_insert['ComparisonOperator'] = filtr.get('comparison') - condition_insert['Size'] = filtr.get('size') + if self.type == "size": + condition_insert["ComparisonOperator"] = filtr.get("comparison") + condition_insert["Size"] = filtr.get("size") # Specific for regex_match_set - if self.type == 'regex': - condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId'] + if self.type == "regex": + condition_insert["RegexPatternSetId"] = self.ensure_regex_pattern_present(filtr.get("regex_pattern"))[ + "RegexPatternSetId" + ] - kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert}) + kwargs["Updates"].append({"Action": "INSERT", self.conditiontuple: condition_insert}) kwargs[self.conditionsetid] = condition_set_id return kwargs def format_for_deletion(self, condition): - return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple} - for current_condition_tuple in condition[self.conditiontuples]], - self.conditionsetid: condition[self.conditionsetid]} + return { + "Updates": [ + {"Action": "DELETE", self.conditiontuple: current_condition_tuple} + for current_condition_tuple in condition[self.conditiontuples] + ], + self.conditionsetid: condition[self.conditionsetid], + } @AWSRetry.exponential_backoff() def list_regex_patterns_with_backoff(self, **params): @@ -502,60 +508,77 @@ def list_regex_patterns(self): try: response = self.list_regex_patterns_with_backoff(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list regex patterns') - regex_patterns.extend(response['RegexPatternSets']) - if 'NextMarker' in response: - params['NextMarker'] = response['NextMarker'] + self.module.fail_json_aws(e, msg="Could not list regex patterns") + regex_patterns.extend(response["RegexPatternSets"]) + if "NextMarker" in response: + params["NextMarker"] = response["NextMarker"] else: break return regex_patterns def get_regex_pattern_by_name(self, name): existing_regex_patterns = self.list_regex_patterns() - regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns) + regex_lookup = dict((item["Name"], item["RegexPatternSetId"]) for item in existing_regex_patterns) if name in regex_lookup: - return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet'] + return self.get_regex_pattern_set_with_backoff(regex_lookup[name])["RegexPatternSet"] else: return None def ensure_regex_pattern_present(self, regex_pattern): - name = regex_pattern['name'] + name = regex_pattern["name"] pattern_set = self.get_regex_pattern_by_name(name) if not pattern_set: - pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name}, - self.client.create_regex_pattern_set)['RegexPatternSet'] - missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings']) - extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings']) + pattern_set = run_func_with_change_token_backoff( + self.client, self.module, {"Name": name}, self.client.create_regex_pattern_set + )["RegexPatternSet"] + missing = set(regex_pattern["regex_strings"]) - set(pattern_set["RegexPatternStrings"]) + extra = set(pattern_set["RegexPatternStrings"]) - set(regex_pattern["regex_strings"]) if not missing and not extra: return pattern_set - updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing] - updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra]) - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates}, - self.client.update_regex_pattern_set, wait=True) - return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet'] + updates = [{"Action": "INSERT", "RegexPatternString": pattern} for pattern in missing] + updates.extend([{"Action": "DELETE", "RegexPatternString": pattern} for pattern in extra]) + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": pattern_set["RegexPatternSetId"], "Updates": updates}, + self.client.update_regex_pattern_set, + wait=True, + ) + return self.get_regex_pattern_set_with_backoff(pattern_set["RegexPatternSetId"])["RegexPatternSet"] def delete_unused_regex_pattern(self, regex_pattern_set_id): try: - regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet'] + regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)[ + "RegexPatternSet" + ] updates = list() - for regex_pattern_string in regex_pattern_set['RegexPatternStrings']: - updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string}) - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates}, - self.client.update_regex_pattern_set) - - run_func_with_change_token_backoff(self.client, self.module, - {'RegexPatternSetId': regex_pattern_set_id}, - self.client.delete_regex_pattern_set, wait=True) - except is_boto3_error_code('WAFNonexistentItemException'): + for regex_pattern_string in regex_pattern_set["RegexPatternStrings"]: + updates.append({"Action": "DELETE", "RegexPatternString": regex_pattern_string}) + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": regex_pattern_set_id, "Updates": updates}, + self.client.update_regex_pattern_set, + ) + + run_func_with_change_token_backoff( + self.client, + self.module, + {"RegexPatternSetId": regex_pattern_set_id}, + self.client.delete_regex_pattern_set, + wait=True, + ) + except is_boto3_error_code("WAFNonexistentItemException"): return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg='Could not delete regex pattern') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Could not delete regex pattern") def get_condition_by_name(self, name): - all_conditions = [d for d in self.list_conditions() if d['Name'] == name] + all_conditions = [d for d in self.list_conditions() if d["Name"] == name] if all_conditions: return all_conditions[0][self.conditionsetid] @@ -563,17 +586,17 @@ def get_condition_by_name(self, name): def get_condition_by_id_with_backoff(self, condition_set_id): params = dict() params[self.conditionsetid] = condition_set_id - func = getattr(self.client, 'get_' + self.method_suffix) + func = getattr(self.client, "get_" + self.method_suffix) return func(**params)[self.conditionset] def get_condition_by_id(self, condition_set_id): try: return self.get_condition_by_id_with_backoff(condition_set_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not get condition') + self.module.fail_json_aws(e, msg="Could not get condition") def list_conditions(self): - method = 'list_' + self.method_suffix + 's' + method = "list_" + self.method_suffix + "s" try: paginator = self.client.get_paginator(method) func = paginator.paginate().build_full_result @@ -583,66 +606,68 @@ def list_conditions(self): try: return func()[self.conditionsets] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type) + self.module.fail_json_aws(e, msg="Could not list %s conditions" % self.type) def tidy_up_regex_patterns(self, regex_match_set): all_regex_match_sets = self.list_conditions() all_match_set_patterns = list() for rms in all_regex_match_sets: - all_match_set_patterns.extend(conditiontuple['RegexPatternSetId'] - for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples]) + all_match_set_patterns.extend( + conditiontuple["RegexPatternSetId"] + for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples] + ) for filtr in regex_match_set[self.conditiontuples]: - if filtr['RegexPatternSetId'] not in all_match_set_patterns: - self.delete_unused_regex_pattern(filtr['RegexPatternSetId']) + if filtr["RegexPatternSetId"] not in all_match_set_patterns: + self.delete_unused_regex_pattern(filtr["RegexPatternSetId"]) def find_condition_in_rules(self, condition_set_id): rules_in_use = [] try: - if self.client.__class__.__name__ == 'WAF': + if self.client.__class__.__name__ == "WAF": all_rules = list_rules_with_backoff(self.client) - elif self.client.__class__.__name__ == 'WAFRegional': + elif self.client.__class__.__name__ == "WAFRegional": all_rules = list_regional_rules_with_backoff(self.client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not list rules') + self.module.fail_json_aws(e, msg="Could not list rules") for rule in all_rules: try: - rule_details = get_rule_with_backoff(self.client, rule['RuleId']) + rule_details = get_rule_with_backoff(self.client, rule["RuleId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not get rule details') - if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]: - rules_in_use.append(rule_details['Name']) + self.module.fail_json_aws(e, msg="Could not get rule details") + if condition_set_id in [predicate["DataId"] for predicate in rule_details["Predicates"]]: + rules_in_use.append(rule_details["Name"]) return rules_in_use def find_and_delete_condition(self, condition_set_id): current_condition = self.get_condition_by_id(condition_set_id) in_use_rules = self.find_condition_in_rules(condition_set_id) if in_use_rules: - rulenames = ', '.join(in_use_rules) - self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames)) + rulenames = ", ".join(in_use_rules) + self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition["Name"], rulenames)) if current_condition[self.conditiontuples]: # Filters are deleted using update with the DELETE action - func = getattr(self.client, 'update_' + self.method_suffix) + func = getattr(self.client, "update_" + self.method_suffix) params = self.format_for_deletion(current_condition) try: # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call run_func_with_change_token_backoff(self.client, self.module, params, func) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not delete filters from condition') - func = getattr(self.client, 'delete_' + self.method_suffix) + self.module.fail_json_aws(e, msg="Could not delete filters from condition") + func = getattr(self.client, "delete_" + self.method_suffix) params = dict() params[self.conditionsetid] = condition_set_id try: run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not delete condition') + self.module.fail_json_aws(e, msg="Could not delete condition") # tidy up regex patterns - if self.type == 'regex': + if self.type == "regex": self.tidy_up_regex_patterns(current_condition) return True, {} def find_missing(self, update, current_condition): missing = [] - for desired in update['Updates']: + for desired in update["Updates"]: found = False desired_condition = desired[self.conditiontuple] current_conditions = current_condition[self.conditiontuples] @@ -657,39 +682,41 @@ def find_and_update_condition(self, condition_set_id): current_condition = self.get_condition_by_id(condition_set_id) update = self.format_for_update(condition_set_id) missing = self.find_missing(update, current_condition) - if self.module.params.get('purge_filters'): - extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple} - for current_tuple in current_condition[self.conditiontuples] - if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]] + if self.module.params.get("purge_filters"): + extra = [ + {"Action": "DELETE", self.conditiontuple: current_tuple} + for current_tuple in current_condition[self.conditiontuples] + if current_tuple not in [desired[self.conditiontuple] for desired in update["Updates"]] + ] else: extra = [] changed = bool(missing or extra) if changed: - update['Updates'] = missing + extra - func = getattr(self.client, 'update_' + self.method_suffix) + update["Updates"] = missing + extra + func = getattr(self.client, "update_" + self.method_suffix) try: result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not update condition') + self.module.fail_json_aws(e, msg="Could not update condition") return changed, self.get_condition_by_id(condition_set_id) def ensure_condition_present(self): - name = self.module.params['name'] + name = self.module.params["name"] condition_set_id = self.get_condition_by_name(name) if condition_set_id: return self.find_and_update_condition(condition_set_id) else: params = dict() - params['Name'] = name - func = getattr(self.client, 'create_' + self.method_suffix) + params["Name"] = name + func = getattr(self.client, "create_" + self.method_suffix) try: condition = run_func_with_change_token_backoff(self.client, self.module, params, func) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Could not create condition') + self.module.fail_json_aws(e, msg="Could not create condition") return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid]) def ensure_condition_absent(self): - condition_set_id = self.get_condition_by_name(self.module.params['name']) + condition_set_id = self.get_condition_by_name(self.module.params["name"]) if condition_set_id: return self.find_and_delete_condition(condition_set_id) return False, {} @@ -698,45 +725,46 @@ def ensure_condition_absent(self): def main(): filters_subspec = dict( country=dict(), - field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']), + field_to_match=dict(choices=["uri", "query_string", "header", "method", "body"]), header=dict(), - transformation=dict(choices=['none', 'compress_white_space', - 'html_entity_decode', 'lowercase', - 'cmd_line', 'url_decode']), - position=dict(choices=['exactly', 'starts_with', 'ends_with', - 'contains', 'contains_word']), - comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']), + transformation=dict( + choices=["none", "compress_white_space", "html_entity_decode", "lowercase", "cmd_line", "url_decode"] + ), + position=dict(choices=["exactly", "starts_with", "ends_with", "contains", "contains_word"]), + comparison=dict(choices=["EQ", "NE", "LE", "LT", "GE", "GT"]), target_string=dict(), # Bytes - size=dict(type='int'), + size=dict(type="int"), ip_address=dict(), regex_pattern=dict(), ) argument_spec = dict( name=dict(required=True), - type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']), - filters=dict(type='list', elements='dict'), - purge_filters=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + type=dict(required=True, choices=["byte", "geo", "ip", "regex", "size", "sql", "xss"]), + filters=dict(type="list", elements="dict"), + purge_filters=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[["state", "present", ["filters"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['filters']]]) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) condition = Condition(client, module) - if state == 'present': + if state == "present": (changed, results) = condition.ensure_condition_present() # return a condition agnostic ID for use by waf_rule - results['ConditionId'] = results[condition.conditionsetid] + results["ConditionId"] = results[condition.conditionsetid] else: (changed, results) = condition.ensure_condition_absent() module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/waf_info.py b/waf_info.py index 37a8c2bd025..ea294c92ed4 100644 --- a/waf_info.py +++ b/waf_info.py @@ -123,22 +123,20 @@ def main(): argument_spec = dict( name=dict(required=False), - waf_regional=dict(type='bool', default=False) + waf_regional=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) web_acls = list_web_acls(client, module) - name = module.params['name'] + name = module.params["name"] if name: - web_acls = [web_acl for web_acl in web_acls if - web_acl['Name'] == name] + web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name] if not web_acls: module.fail_json(msg="WAF named %s not found" % name) - module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId']) - for web_acl in web_acls]) + module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/waf_rule.py b/waf_rule.py index 54129dafc12..98064dd8ca4 100644 --- a/waf_rule.py +++ b/waf_rule.py @@ -157,48 +157,48 @@ def get_rule_by_name(client, module, name): - rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name] + rules = [d["RuleId"] for d in list_rules(client, module) if d["Name"] == name] if rules: return rules[0] def get_rule(client, module, rule_id): try: - return client.get_rule(RuleId=rule_id)['Rule'] + return client.get_rule(RuleId=rule_id)["Rule"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get WAF rule') + module.fail_json_aws(e, msg="Could not get WAF rule") def list_rules(client, module): - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": try: return list_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF rules') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not list WAF rules") + elif client.__class__.__name__ == "WAFRegional": try: return list_regional_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF Regional rules') + module.fail_json_aws(e, msg="Could not list WAF Regional rules") def list_regional_rules(client, module): try: return list_regional_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list WAF rules') + module.fail_json_aws(e, msg="Could not list WAF rules") def find_and_update_rule(client, module, rule_id): rule = get_rule(client, module, rule_id) - rule_id = rule['RuleId'] + rule_id = rule["RuleId"] existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) all_conditions = dict() for condition_type in MATCH_LOOKUP: - method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's' + method = "list_" + MATCH_LOOKUP[condition_type]["method"] + "s" all_conditions[condition_type] = dict() try: paginator = client.get_paginator(method) @@ -208,125 +208,133 @@ def find_and_update_rule(client, module, rule_id): # and throw different exceptions func = getattr(client, method) try: - pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's'] + pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type) + module.fail_json_aws(e, msg="Could not list %s conditions" % condition_type) for pred in pred_results: - pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id'] - all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred) - all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred) + pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"] + all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred) + all_conditions[condition_type][pred["DataId"]] = camel_dict_to_snake_dict(pred) - for condition in module.params['conditions']: - desired_conditions[condition['type']][condition['name']] = condition + for condition in module.params["conditions"]: + desired_conditions[condition["type"]][condition["name"]] = condition - reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items()) - for condition in rule['Predicates']: - existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition) + reverse_condition_types = dict((v["type"], k) for (k, v) in MATCH_LOOKUP.items()) + for condition in rule["Predicates"]: + existing_conditions[reverse_condition_types[condition["Type"]]][condition["DataId"]] = camel_dict_to_snake_dict( + condition + ) insertions = list() deletions = list() for condition_type in desired_conditions: - for (condition_name, condition) in desired_conditions[condition_type].items(): + for condition_name, condition in desired_conditions[condition_type].items(): if condition_name not in all_conditions[condition_type]: module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) - condition['data_id'] = all_conditions[condition_type][condition_name]['data_id'] - if condition['data_id'] not in existing_conditions[condition_type]: + condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"] + if condition["data_id"] not in existing_conditions[condition_type]: insertions.append(format_for_insertion(condition)) - if module.params['purge_conditions']: + if module.params["purge_conditions"]: for condition_type in existing_conditions: - deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values() - if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]]) + deletions.extend( + [ + format_for_deletion(condition) + for condition in existing_conditions[condition_type].values() + if not all_conditions[condition_type][condition["data_id"]]["name"] + in desired_conditions[condition_type] + ] + ) changed = bool(insertions or deletions) - update = { - 'RuleId': rule_id, - 'Updates': insertions + deletions - } + update = {"RuleId": rule_id, "Updates": insertions + deletions} if changed: try: run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update rule conditions') + module.fail_json_aws(e, msg="Could not update rule conditions") return changed, get_rule(client, module, rule_id) def format_for_insertion(condition): - return dict(Action='INSERT', - Predicate=dict(Negated=condition['negated'], - Type=MATCH_LOOKUP[condition['type']]['type'], - DataId=condition['data_id'])) + return dict( + Action="INSERT", + Predicate=dict( + Negated=condition["negated"], Type=MATCH_LOOKUP[condition["type"]]["type"], DataId=condition["data_id"] + ), + ) def format_for_deletion(condition): - return dict(Action='DELETE', - Predicate=dict(Negated=condition['negated'], - Type=condition['type'], - DataId=condition['data_id'])) + return dict( + Action="DELETE", + Predicate=dict(Negated=condition["negated"], Type=condition["type"], DataId=condition["data_id"]), + ) def remove_rule_conditions(client, module, rule_id): - conditions = get_rule(client, module, rule_id)['Predicates'] + conditions = get_rule(client, module, rule_id)["Predicates"] updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions] try: - run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule) + run_func_with_change_token_backoff(client, module, {"RuleId": rule_id, "Updates": updates}, client.update_rule) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not remove rule conditions') + module.fail_json_aws(e, msg="Could not remove rule conditions") def ensure_rule_present(client, module): - name = module.params['name'] + name = module.params["name"] rule_id = get_rule_by_name(client, module, name) params = dict() if rule_id: return find_and_update_rule(client, module, rule_id) else: - params['Name'] = module.params['name'] - metric_name = module.params['metric_name'] + params["Name"] = module.params["name"] + metric_name = module.params["metric_name"] if not metric_name: - metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name']) - params['MetricName'] = metric_name + metric_name = re.sub(r"[^a-zA-Z0-9]", "", module.params["name"]) + params["MetricName"] = metric_name try: - new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule'] + new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)["Rule"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not create rule') - return find_and_update_rule(client, module, new_rule['RuleId']) + module.fail_json_aws(e, msg="Could not create rule") + return find_and_update_rule(client, module, new_rule["RuleId"]) def find_rule_in_web_acls(client, module, rule_id): web_acls_in_use = [] try: - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": all_web_acls = list_web_acls_with_backoff(client) - elif client.__class__.__name__ == 'WAFRegional': + elif client.__class__.__name__ == "WAFRegional": all_web_acls = list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list Web ACLs') + module.fail_json_aws(e, msg="Could not list Web ACLs") for web_acl in all_web_acls: try: - web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId']) + web_acl_details = get_web_acl_with_backoff(client, web_acl["WebACLId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACL details') - if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]: - web_acls_in_use.append(web_acl_details['Name']) + module.fail_json_aws(e, msg="Could not get Web ACL details") + if rule_id in [rule["RuleId"] for rule in web_acl_details["Rules"]]: + web_acls_in_use.append(web_acl_details["Name"]) return web_acls_in_use def ensure_rule_absent(client, module): - rule_id = get_rule_by_name(client, module, module.params['name']) + rule_id = get_rule_by_name(client, module, module.params["name"]) in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) if in_use_web_acls: - web_acl_names = ', '.join(in_use_web_acls) - module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % - (module.params['name'], web_acl_names)) + web_acl_names = ", ".join(in_use_web_acls) + module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % (module.params["name"], web_acl_names)) if rule_id: remove_rule_conditions(client, module, rule_id) try: - return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True) + return True, run_func_with_change_token_backoff( + client, module, {"RuleId": rule_id}, client.delete_rule, wait=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not delete rule') + module.fail_json_aws(e, msg="Could not delete rule") return False, {} @@ -334,17 +342,17 @@ def main(): argument_spec = dict( name=dict(required=True), metric_name=dict(), - state=dict(default='present', choices=['present', 'absent']), - conditions=dict(type='list', elements='dict'), - purge_conditions=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False), + state=dict(default="present", choices=["present", "absent"]), + conditions=dict(type="list", elements="dict"), + purge_conditions=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) - if state == 'present': + if state == "present": (changed, results) = ensure_rule_present(client, module) else: (changed, results) = ensure_rule_absent(client, module) @@ -352,5 +360,5 @@ def main(): module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/waf_web_acl.py b/waf_web_acl.py index dc35308e833..4b71231aec9 100644 --- a/waf_web_acl.py +++ b/waf_web_acl.py @@ -179,7 +179,7 @@ def get_web_acl_by_name(client, module, name): - acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name] + acls = [d["WebACLId"] for d in list_web_acls(client, module) if d["Name"] == name] if acls: return acls[0] else: @@ -187,91 +187,93 @@ def get_web_acl_by_name(client, module, name): def create_rule_lookup(client, module): - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": try: rules = list_rules_with_backoff(client) - return dict((rule['Name'], rule) for rule in rules) + return dict((rule["Name"], rule) for rule in rules) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list rules') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not list rules") + elif client.__class__.__name__ == "WAFRegional": try: rules = list_regional_rules_with_backoff(client) - return dict((rule['Name'], rule) for rule in rules) + return dict((rule["Name"], rule) for rule in rules) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not list regional rules') + module.fail_json_aws(e, msg="Could not list regional rules") def get_web_acl(client, module, web_acl_id): try: - return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id) + module.fail_json_aws(e, msg="Could not get Web ACL with id %s" % web_acl_id) -def list_web_acls(client, module,): - if client.__class__.__name__ == 'WAF': +def list_web_acls( + client, + module, +): + if client.__class__.__name__ == "WAF": try: return list_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACLs') - elif client.__class__.__name__ == 'WAFRegional': + module.fail_json_aws(e, msg="Could not get Web ACLs") + elif client.__class__.__name__ == "WAFRegional": try: return list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not get Web ACLs') + module.fail_json_aws(e, msg="Could not get Web ACLs") def find_and_update_web_acl(client, module, web_acl_id): acl = get_web_acl(client, module, web_acl_id) rule_lookup = create_rule_lookup(client, module) - existing_rules = acl['Rules'] - desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'], - 'Priority': rule['priority'], - 'Action': {'Type': rule['action'].upper()}, - 'Type': rule.get('type', 'regular').upper()} - for rule in module.params['rules']] + existing_rules = acl["Rules"] + desired_rules = [ + { + "RuleId": rule_lookup[rule["name"]]["RuleId"], + "Priority": rule["priority"], + "Action": {"Type": rule["action"].upper()}, + "Type": rule.get("type", "regular").upper(), + } + for rule in module.params["rules"] + ] missing = [rule for rule in desired_rules if rule not in existing_rules] extras = [] - if module.params['purge_rules']: + if module.params["purge_rules"]: extras = [rule for rule in existing_rules if rule not in desired_rules] - insertions = [format_for_update(rule, 'INSERT') for rule in missing] - deletions = [format_for_update(rule, 'DELETE') for rule in extras] + insertions = [format_for_update(rule, "INSERT") for rule in missing] + deletions = [format_for_update(rule, "DELETE") for rule in extras] changed = bool(insertions + deletions) # Purge rules before adding new ones in case a deletion shares the same # priority as an insertion. - params = { - 'WebACLId': acl['WebACLId'], - 'DefaultAction': acl['DefaultAction'] - } + params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"]} change_tokens = [] if deletions: try: - params['Updates'] = deletions + params["Updates"] = deletions result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result['ChangeToken']) + change_tokens.append(result["ChangeToken"]) get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=result['ChangeToken'] - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=result["ChangeToken"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update Web ACL') + module.fail_json_aws(e, msg="Could not update Web ACL") if insertions: try: - params['Updates'] = insertions + params["Updates"] = insertions result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result['ChangeToken']) + change_tokens.append(result["ChangeToken"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not update Web ACL') + module.fail_json_aws(e, msg="Could not update Web ACL") if change_tokens: for token in change_tokens: get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=token - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=token) if changed: acl = get_web_acl(client, module, web_acl_id) return changed, acl @@ -281,77 +283,79 @@ def format_for_update(rule, action): return dict( Action=action, ActivatedRule=dict( - Priority=rule['Priority'], - RuleId=rule['RuleId'], - Action=dict( - Type=rule['Action']['Type'] - ) - ) + Priority=rule["Priority"], + RuleId=rule["RuleId"], + Action=dict(Type=rule["Action"]["Type"]), + ), ) def remove_rules_from_web_acl(client, module, web_acl_id): acl = get_web_acl(client, module, web_acl_id) - deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']] + deletions = [format_for_update(rule, "DELETE") for rule in acl["Rules"]] try: - params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions} + params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"], "Updates": deletions} run_func_with_change_token_backoff(client, module, params, client.update_web_acl) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not remove rule') + module.fail_json_aws(e, msg="Could not remove rule") def ensure_web_acl_present(client, module): changed = False result = None - name = module.params['name'] + name = module.params["name"] web_acl_id = get_web_acl_by_name(client, module, name) if web_acl_id: (changed, result) = find_and_update_web_acl(client, module, web_acl_id) else: - metric_name = module.params['metric_name'] + metric_name = module.params["metric_name"] if not metric_name: - metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name']) - default_action = module.params['default_action'].upper() + metric_name = re.sub(r"[^A-Za-z0-9]", "", module.params["name"]) + default_action = module.params["default_action"].upper() try: - params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}} + params = {"Name": name, "MetricName": metric_name, "DefaultAction": {"Type": default_action}} new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not create Web ACL') - (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId']) + module.fail_json_aws(e, msg="Could not create Web ACL") + (changed, result) = find_and_update_web_acl(client, module, new_web_acl["WebACL"]["WebACLId"]) return changed, result def ensure_web_acl_absent(client, module): - web_acl_id = get_web_acl_by_name(client, module, module.params['name']) + web_acl_id = get_web_acl_by_name(client, module, module.params["name"]) if web_acl_id: web_acl = get_web_acl(client, module, web_acl_id) - if web_acl['Rules']: + if web_acl["Rules"]: remove_rules_from_web_acl(client, module, web_acl_id) try: - run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True) + run_func_with_change_token_backoff( + client, module, {"WebACLId": web_acl_id}, client.delete_web_acl, wait=True + ) return True, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Could not delete Web ACL') + module.fail_json_aws(e, msg="Could not delete Web ACL") return False, {} def main(): argument_spec = dict( name=dict(required=True), - default_action=dict(choices=['block', 'allow', 'count']), + default_action=dict(choices=["block", "allow", "count"]), metric_name=dict(), - state=dict(default='present', choices=['present', 'absent']), - rules=dict(type='list', elements='dict'), - purge_rules=dict(type='bool', default=False), - waf_regional=dict(type='bool', default=False) + state=dict(default="present", choices=["present", "absent"]), + rules=dict(type="list", elements="dict"), + purge_rules=dict(type="bool", default=False), + waf_regional=dict(type="bool", default=False), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[["state", "present", ["default_action", "rules"]]], ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['state', 'present', ['default_action', 'rules']]]) - state = module.params.get('state') + state = module.params.get("state") - resource = 'waf' if not module.params['waf_regional'] else 'waf-regional' + resource = "waf" if not module.params["waf_regional"] else "waf-regional" client = module.client(resource) - if state == 'present': + if state == "present": (changed, results) = ensure_web_acl_present(client, module) else: (changed, results) = ensure_web_acl_absent(client, module) @@ -359,5 +363,5 @@ def main(): module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py index 961c9325b31..b96ba0cb1c1 100644 --- a/wafv2_ip_set.py +++ b/wafv2_ip_set.py @@ -138,41 +138,36 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.existing_set, self.id, self.locktoken, self.arn = self.get_set() def description(self): - return self.existing_set.get('Description') + return self.existing_set.get("Description") def _format_set(self, ip_set): if ip_set is None: return None - return camel_dict_to_snake_dict(self.existing_set, ignore_list=['tags']) + return camel_dict_to_snake_dict(self.existing_set, ignore_list=["tags"]) def get(self): return self._format_set(self.existing_set) def remove(self): try: - response = self.wafv2.delete_ip_set( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + response = self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.") return {} def create(self, description, ip_address_version, addresses, tags): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'IPAddressVersion': ip_address_version, - 'Addresses': addresses, + "Name": self.name, + "Scope": self.scope, + "IPAddressVersion": ip_address_version, + "Addresses": addresses, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_ip_set(**req_obj) @@ -184,15 +179,15 @@ def create(self, description, ip_address_version, addresses, tags): def update(self, description, addresses): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'Addresses': addresses, - 'LockToken': self.locktoken + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "Addresses": addresses, + "LockToken": self.locktoken, } if description: - req_obj['Description'] = description + req_obj["Description"] = description try: response = self.wafv2.update_ip_set(**req_obj) @@ -208,38 +203,31 @@ def get_set(self): id = None arn = None locktoken = None - for item in response.get('IPSets'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + for item in response.get("IPSets"): + if item.get("Name") == self.name: + id = item.get("Id") + locktoken = item.get("LockToken") + arn = item.get("ARN") if id: try: - existing_set = self.wafv2.get_ip_set( - Name=self.name, - Scope=self.scope, - Id=id - ).get('IPSet') + existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=id).get("IPSet") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_set['tags'] = tags + existing_set["tags"] = tags return existing_set, id, locktoken, arn def list(self, Nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': self.scope, - 'Limit': 100 - } + req_obj = {"Scope": self.scope, "Limit": 100} if Nextmarker: - req_obj['NextMarker'] = Nextmarker + req_obj["NextMarker"] = Nextmarker try: response = self.wafv2.list_ip_sets(**req_obj) - if response.get('NextMarker'): - response['IPSets'] += self.list(Nextmarker=response.get('NextMarker')).get('IPSets') + if response.get("NextMarker"): + response["IPSets"] += self.list(Nextmarker=response.get("NextMarker")).get("IPSets") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to list wafv2 ip set.") @@ -249,11 +237,11 @@ def list(self, Nextmarker=None): def compare(existing_set, addresses, purge_addresses, state): diff = False new_rules = [] - existing_rules = existing_set.get('addresses') - if state == 'present': + existing_rules = existing_set.get("addresses") + if state == "present": if purge_addresses: new_rules = addresses - if sorted(addresses) != sorted(existing_set.get('addresses')): + if sorted(addresses) != sorted(existing_set.get("addresses")): diff = True else: @@ -275,23 +263,22 @@ def compare(existing_set, addresses, purge_addresses, state): def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - description=dict(type='str'), - ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']), - addresses=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - purge_addresses=dict(type='bool', default=True), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + description=dict(type="str"), + ip_address_version=dict(type="str", choices=["IPV4", "IPV6"]), + addresses=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + purge_addresses=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['ip_address_version', 'addresses']]] + required_if=[["state", "present", ["ip_address_version", "addresses"]]], ) state = module.params.get("state") @@ -305,17 +292,18 @@ def main(): purge_addresses = module.params.get("purge_addresses") check_mode = module.check_mode - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") change = False retval = {} ip_set = IpSet(wafv2, name, scope, module.fail_json_aws) - if state == 'present': - + if state == "present": if ip_set.get(): - tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode) + tags_updated = ensure_wafv2_tags( + wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode + ) ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state) description_updated = bool(description) and ip_set.description() != description change = ips_updated or description_updated or tags_updated @@ -323,32 +311,23 @@ def main(): if module.check_mode: pass elif ips_updated or description_updated: - retval = ip_set.update( - description=description, - addresses=addresses - ) + retval = ip_set.update(description=description, addresses=addresses) elif tags_updated: retval, id, locktoken, arn = ip_set.get_set() else: if not check_mode: retval = ip_set.create( - description=description, - ip_address_version=ip_address_version, - addresses=addresses, - tags=tags + description=description, ip_address_version=ip_address_version, addresses=addresses, tags=tags ) change = True - if state == 'absent': + if state == "absent": if ip_set.get(): if addresses: if len(addresses) > 0: change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) if change and not check_mode: - retval = ip_set.update( - description=description, - addresses=addresses - ) + retval = ip_set.update(description=description, addresses=addresses) else: if not check_mode: retval = ip_set.remove() @@ -357,5 +336,5 @@ def main(): module.exit_json(changed=change, **retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py index 4e0d4feb538..caca5cd7081 100644 --- a/wafv2_ip_set_info.py +++ b/wafv2_ip_set_info.py @@ -83,17 +83,16 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): # there is currently no paginator for wafv2 - req_obj = { - 'Scope': scope, - 'Limit': 100 - } + req_obj = {"Scope": scope, "Limit": 100} if Nextmarker: - req_obj['NextMarker'] = Nextmarker + req_obj["NextMarker"] = Nextmarker try: response = wafv2.list_ip_sets(**req_obj) - if response.get('NextMarker'): - response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets') + if response.get("NextMarker"): + response["IPSets"] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get("NextMarker")).get( + "IPSets" + ) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 ip set") return response @@ -101,21 +100,15 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): def get_ip_set(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_ip_set( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_ip_set(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 ip set") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]) ) module = AnsibleAWSModule( @@ -126,26 +119,26 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if ip set exist response = list_ip_sets(wafv2, scope, module.fail_json_aws) id = None - for item in response.get('IPSets'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("IPSets"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") retval = {} existing_set = None if id: existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_set.get('IPSet')) - retval['tags'] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} + retval = camel_dict_to_snake_dict(existing_set.get("IPSet")) + retval["tags"] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_resources.py b/wafv2_resources.py index 552a2de03bd..b36f517120b 100644 --- a/wafv2_resources.py +++ b/wafv2_resources.py @@ -73,11 +73,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -85,9 +81,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): def list_wafv2_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) + response = wafv2.list_resources_for_web_acl(WebACLArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 web acl.") return response @@ -95,10 +89,7 @@ def list_wafv2_resources(wafv2, arn, fail_json_aws): def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): try: - response = wafv2.associate_web_acl( - WebACLArn=waf_arn, - ResourceArn=arn - ) + response = wafv2.associate_web_acl(WebACLArn=waf_arn, ResourceArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to add wafv2 web acl.") return response @@ -106,27 +97,24 @@ def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): def remove_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.disassociate_web_acl( - ResourceArn=arn - ) + response = wafv2.disassociate_web_acl(ResourceArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str'), - scope=dict(type='str', choices=['CLOUDFRONT', 'REGIONAL']), - arn=dict(type='str', required=True) + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str"), + scope=dict(type="str", choices=["CLOUDFRONT", "REGIONAL"]), + arn=dict(type="str", required=True), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['name', 'scope']]] + required_if=[["state", "present", ["name", "scope"]]], ) state = module.params.get("state") @@ -135,7 +123,7 @@ def main(): arn = module.params.get("arn") check_mode = module.check_mode - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists @@ -145,26 +133,26 @@ def main(): retval = {} change = False - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - waf_arn = existing_acl.get('WebACL').get('ARN') + waf_arn = existing_acl.get("WebACL").get("ARN") retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws) - if state == 'present': + if state == "present": if retval: - if arn not in retval.get('ResourceArns'): + if arn not in retval.get("ResourceArns"): change = True if not check_mode: retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws) - elif state == 'absent': + elif state == "absent": if retval: - if arn in retval.get('ResourceArns'): + if arn in retval.get("ResourceArns"): change = True if not check_mode: retval = remove_resources(wafv2, arn, module.fail_json_aws) @@ -172,5 +160,5 @@ def main(): module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py index f9c5c3c08c2..5cafee1f67d 100644 --- a/wafv2_resources_info.py +++ b/wafv2_resources_info.py @@ -62,11 +62,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -78,19 +74,16 @@ def list_web_acls(wafv2, scope, fail_json_aws): def list_wafv2_resources(wafv2, arn, fail_json_aws): try: - response = wafv2.list_resources_for_web_acl( - WebACLArn=arn - ) + response = wafv2.list_resources_for_web_acl(WebACLArn=arn) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to list wafv2 resources.") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( @@ -101,25 +94,25 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists response = list_web_acls(wafv2, scope, module.fail_json_aws) id = None retval = {} - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - arn = existing_acl.get('WebACL').get('ARN') + arn = existing_acl.get("WebACL").get("ARN") retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws)) module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py index 60cc60a131f..e2a7fd1d438 100644 --- a/wafv2_rule_group.py +++ b/wafv2_rule_group.py @@ -227,20 +227,20 @@ def __init__(self, wafv2, name, scope, fail_json_aws): def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'Rules': rules, - 'LockToken': self.locktoken, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "Rules": rules, + "LockToken": self.locktoken, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if description: - req_obj['Description'] = description + req_obj["Description"] = description try: response = self.wafv2.update_rule_group(**req_obj) @@ -252,11 +252,11 @@ def get_group(self): if self.id is None: response = self.list() - for item in response.get('RuleGroups'): - if item.get('Name') == self.name: - self.id = item.get('Id') - self.locktoken = item.get('LockToken') - self.arn = item.get('ARN') + for item in response.get("RuleGroups"): + if item.get("Name") == self.name: + self.id = item.get("Id") + self.locktoken = item.get("LockToken") + self.arn = item.get("ARN") return self.refresh_group() @@ -264,18 +264,14 @@ def refresh_group(self): existing_group = None if self.id: try: - response = self.wafv2.get_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id - ) - existing_group = response.get('RuleGroup') - self.locktoken = response.get('LockToken') + response = self.wafv2.get_rule_group(Name=self.name, Scope=self.scope, Id=self.id) + existing_group = response.get("RuleGroup") + self.locktoken = response.get("LockToken") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 rule group.") tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws) - existing_group['tags'] = tags or {} + existing_group["tags"] = tags or {} return existing_group @@ -290,10 +286,7 @@ def get(self): def remove(self): try: response = self.wafv2.delete_rule_group( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken + Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken ) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.") @@ -301,22 +294,22 @@ def remove(self): def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Capacity': capacity, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "Capacity": capacity, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_rule_group(**req_obj) @@ -329,26 +322,25 @@ def create(self, capacity, description, rules, sampled_requests, cloudwatch_metr def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - capacity=dict(type='int'), - description=dict(type='str'), - rules=dict(type='list', elements='dict'), - sampled_requests=dict(type='bool', default=False), - cloudwatch_metrics=dict(type='bool', default=True), - metric_name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - purge_rules=dict(default=True, type='bool'), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + capacity=dict(type="int"), + description=dict(type="str"), + rules=dict(type="list", elements="dict"), + sampled_requests=dict(type="bool", default=False), + cloudwatch_metrics=dict(type="bool", default=True), + metric_name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + purge_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['capacity', 'rules']]] + required_if=[["state", "present", ["capacity", "rules"]]], ) state = module.params.get("state") @@ -373,31 +365,26 @@ def main(): if not metric_name: metric_name = name - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws) change = False retval = {} - if state == 'present': + if state == "present": if rule_group.get(): - tagging_change = ensure_wafv2_tags(wafv2, rule_group.arn, tags, purge_tags, - module.fail_json_aws, module.check_mode) - rules_change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) - description_change = bool(description) and (rule_group.get().get('Description') != description) + tagging_change = ensure_wafv2_tags( + wafv2, rule_group.arn, tags, purge_tags, module.fail_json_aws, module.check_mode + ) + rules_change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) + description_change = bool(description) and (rule_group.get().get("Description") != description) change = tagging_change or rules_change or description_change retval = rule_group.get() if module.check_mode: # In check mode nothing changes... pass elif rules_change or description_change: - retval = rule_group.update( - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name - ) + retval = rule_group.update(description, rules, sampled_requests, cloudwatch_metrics, metric_name) elif tagging_change: retval = rule_group.refresh_group() @@ -405,35 +392,25 @@ def main(): change = True if not check_mode: retval = rule_group.create( - capacity, - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - tags + capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags ) - elif state == 'absent': + elif state == "absent": if rule_group.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) if change and not check_mode: retval = rule_group.update( - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name + description, rules, sampled_requests, cloudwatch_metrics, metric_name ) else: change = True if not check_mode: retval = rule_group.remove() - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=['tags'])) + module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=["tags"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py index c95b74f81c7..58862a9a5f2 100644 --- a/wafv2_rule_group_info.py +++ b/wafv2_rule_group_info.py @@ -101,11 +101,7 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_rule_group( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_rule_group(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 rule group.") return response @@ -113,39 +109,39 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): def main(): arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( argument_spec=arg_spec, - supports_check_mode=True + supports_check_mode=True, ) name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None retval = {} - for item in response.get('RuleGroups'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("RuleGroups"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") existing_group = None if id: existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup')) + retval = camel_dict_to_snake_dict(existing_group.get("RuleGroup")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval['tags'] = tags or {} + retval["tags"] = tags or {} module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py index 3b7fd8daa16..23e8f9c6b09 100644 --- a/wafv2_web_acl.py +++ b/wafv2_web_acl.py @@ -339,26 +339,35 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.fail_json_aws = fail_json_aws self.existing_acl, self.id, self.locktoken = self.get_web_acl() - def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies): + def update( + self, + default_action, + description, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + custom_response_bodies, + ): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'Id': self.id, - 'DefaultAction': default_action, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name + "Name": self.name, + "Scope": self.scope, + "Id": self.id, + "DefaultAction": default_action, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, }, - 'LockToken': self.locktoken + "LockToken": self.locktoken, } if description: - req_obj['Description'] = description + req_obj["Description"] = description if custom_response_bodies: - req_obj['CustomResponseBodies'] = custom_response_bodies + req_obj["CustomResponseBodies"] = custom_response_bodies try: response = self.wafv2.update_web_acl(**req_obj) @@ -370,12 +379,7 @@ def update(self, default_action, description, rules, sampled_requests, cloudwatc def remove(self): try: - response = self.wafv2.delete_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - LockToken=self.locktoken - ) + response = self.wafv2.delete_web_acl(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response @@ -392,47 +396,53 @@ def get_web_acl(self): existing_acl = None response = self.list() - for item in response.get('WebACLs'): - if item.get('Name') == self.name: - id = item.get('Id') - locktoken = item.get('LockToken') - arn = item.get('ARN') + for item in response.get("WebACLs"): + if item.get("Name") == self.name: + id = item.get("Id") + locktoken = item.get("LockToken") + arn = item.get("ARN") if id: try: - existing_acl = self.wafv2.get_web_acl( - Name=self.name, - Scope=self.scope, - Id=id - ) + existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=id) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_acl['tags'] = tags + existing_acl["tags"] = tags return existing_acl, id, locktoken def list(self): return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) - def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies): + def create( + self, + default_action, + rules, + sampled_requests, + cloudwatch_metrics, + metric_name, + tags, + description, + custom_response_bodies, + ): req_obj = { - 'Name': self.name, - 'Scope': self.scope, - 'DefaultAction': default_action, - 'Rules': rules, - 'VisibilityConfig': { - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - } + "Name": self.name, + "Scope": self.scope, + "DefaultAction": default_action, + "Rules": rules, + "VisibilityConfig": { + "SampledRequestsEnabled": sampled_requests, + "CloudWatchMetricsEnabled": cloudwatch_metrics, + "MetricName": metric_name, + }, } if custom_response_bodies: - req_obj['CustomResponseBodies'] = custom_response_bodies + req_obj["CustomResponseBodies"] = custom_response_bodies if description: - req_obj['Description'] = description + req_obj["Description"] = description if tags: - req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags) + req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: response = self.wafv2.create_web_acl(**req_obj) @@ -444,7 +454,6 @@ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, me def format_result(result): - # We were returning details of the Web ACL inside a "web_acl" parameter on # creation, keep returning it to avoid breaking existing playbooks, but also # return what the docs said we return (and returned when no change happened) @@ -452,31 +461,30 @@ def format_result(result): if "WebACL" in retval: retval.update(retval["WebACL"]) - return camel_dict_to_snake_dict(retval, ignore_list=['tags']) + return camel_dict_to_snake_dict(retval, ignore_list=["tags"]) def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), - description=dict(type='str'), - default_action=dict(type='str', choices=['Block', 'Allow']), - rules=dict(type='list', elements='dict'), - sampled_requests=dict(type='bool', default=False), - cloudwatch_metrics=dict(type='bool', default=True), - metric_name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - custom_response_bodies=dict(type='dict'), - purge_rules=dict(default=True, type='bool'), + state=dict(type="str", required=True, choices=["present", "absent"]), + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), + description=dict(type="str"), + default_action=dict(type="str", choices=["Block", "Allow"]), + rules=dict(type="list", elements="dict"), + sampled_requests=dict(type="bool", default=False), + cloudwatch_metrics=dict(type="bool", default=True), + metric_name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + custom_response_bodies=dict(type="dict"), + purge_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[['state', 'present', ['default_action', 'rules']]] + required_if=[["state", "present", ["default_action", "rules"]]], ) state = module.params.get("state") @@ -495,16 +503,16 @@ def main(): custom_response_bodies = module.params.get("custom_response_bodies") if custom_response_bodies: - module.require_botocore_at_least('1.20.40', reason='to set custom response bodies') + module.require_botocore_at_least("1.20.40", reason="to set custom response bodies") custom_response_bodies = {} for custom_name, body in module.params.get("custom_response_bodies").items(): custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True) - if default_action == 'Block': - default_action = {'Block': {}} - elif default_action == 'Allow': - default_action = {'Allow': {}} + if default_action == "Block": + default_action = {"Block": {}} + elif default_action == "Allow": + default_action = {"Allow": {}} if rules: rules = [] @@ -514,17 +522,19 @@ def main(): if not metric_name: metric_name = name - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") web_acl = WebACL(wafv2, name, scope, module.fail_json_aws) change = False retval = {} - if state == 'present': + if state == "present": if web_acl.get(): - tags_changed = ensure_wafv2_tags(wafv2, web_acl.get().get('WebACL').get('ARN'), tags, purge_tags, module.fail_json_aws, module.check_mode) - change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) - change = change or (description and web_acl.get().get('WebACL').get('Description') != description) - change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action) + tags_changed = ensure_wafv2_tags( + wafv2, web_acl.get().get("WebACL").get("ARN"), tags, purge_tags, module.fail_json_aws, module.check_mode + ) + change, rules = compare_priority_rules(web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state) + change = change or (description and web_acl.get().get("WebACL").get("Description") != description) + change = change or (default_action and web_acl.get().get("WebACL").get("DefaultAction") != default_action) if change and not check_mode: retval = web_acl.update( @@ -534,7 +544,7 @@ def main(): sampled_requests, cloudwatch_metrics, metric_name, - custom_response_bodies + custom_response_bodies, ) elif tags_changed: retval, id, locktoken = web_acl.get_web_acl() @@ -554,14 +564,16 @@ def main(): metric_name, tags, description, - custom_response_bodies + custom_response_bodies, ) - elif state == 'absent': + elif state == "absent": if web_acl.get(): if rules: if len(rules) > 0: - change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) + change, rules = compare_priority_rules( + web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state + ) if change and not check_mode: retval = web_acl.update( default_action, @@ -570,7 +582,7 @@ def main(): sampled_requests, cloudwatch_metrics, metric_name, - custom_response_bodies + custom_response_bodies, ) else: change = True @@ -580,5 +592,5 @@ def main(): module.exit_json(changed=change, **format_result(retval)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py index 8fe00f66a1a..e3cdc46e330 100644 --- a/wafv2_web_acl_info.py +++ b/wafv2_web_acl_info.py @@ -103,21 +103,16 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws): try: - response = wafv2.get_web_acl( - Name=name, - Scope=scope, - Id=id - ) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response def main(): - arg_spec = dict( - name=dict(type='str', required=True), - scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) + name=dict(type="str", required=True), + scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), ) module = AnsibleAWSModule( @@ -129,7 +124,7 @@ def main(): name = module.params.get("name") scope = module.params.get("scope") - wafv2 = module.client('wafv2') + wafv2 = module.client("wafv2") # check if web acl exists response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) @@ -137,19 +132,19 @@ def main(): arn = None retval = {} - for item in response.get('WebACLs'): - if item.get('Name') == name: - id = item.get('Id') - arn = item.get('ARN') + for item in response.get("WebACLs"): + if item.get("Name") == name: + id = item.get("Id") + arn = item.get("ARN") if id: existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_acl.get('WebACL')) + retval = camel_dict_to_snake_dict(existing_acl.get("WebACL")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval['tags'] = tags + retval["tags"] = tags module.exit_json(**retval) -if __name__ == '__main__': +if __name__ == "__main__": main() From 05be386567031e98aee2b463d6af023a51ea575d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 5 May 2023 09:34:16 +0200 Subject: [PATCH 654/683] cloudfront_distribution - fix version added for not-backported features (#1803) cloudfront_distribution - fix version added for not-backported features SUMMARY A couple of features didn't get backported to stable-5. Update the version_added ISSUE TYPE Docs Pull Request COMPONENT NAME cloudfront_distribution ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz --- cloudfront_distribution.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index ac43cada3ad..7b841c7f925 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -121,7 +121,7 @@ origin_shield_region: description: Specify which AWS region will be used for Origin Shield. Required if Origin Shield is enabled. type: str - version_added: 5.1.0 + version_added: 6.0.0 custom_headers: description: - Custom headers you wish to add to the request before passing it to the origin. @@ -177,11 +177,13 @@ The minimum number is C(1), the maximum is C(3). type: int default: 3 + version_added: 6.0.0 connection_timeout: description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. The minimum timeout is C(1) second, the maximum is C(10) seconds. type: int default: 10 + version_added: 6.0.0 purge_origins: description: Whether to remove any origins that aren't listed in I(origins). default: false @@ -1320,7 +1322,7 @@ returned: when enabled is true type: str sample: us-east-1 - version_added: 5.1.0 + version_added: 6.0.0 s3_origin_config: description: Origin access identity configuration for S3 Origin. returned: when s3_origin_access_identity_enabled is true From f1fce7db0b91c5ff0a8d4106bac1d173846e9f81 Mon Sep 17 00:00:00 2001 From: rmahroua Date: Fri, 5 May 2023 08:21:00 +0000 Subject: [PATCH 655/683] s3_lifecycle - fix invalid value type for transitions list (#1788) s3_lifecycle - fix invalid value type for transitions list Depends-On: #1792 SUMMARY Fixes #1774 ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_lifecycle ADDITIONAL INFORMATION Forces casting to integer for the transition_days parameter of a transitions list. Reviewed-by: Mark Chappell --- s3_lifecycle.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 9d5ea3b8ff6..24517b1e372 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -331,7 +331,7 @@ def build_rule(client, module): if transition.get("transition_date"): t_out["Date"] = transition["transition_date"] elif transition.get("transition_days") is not None: - t_out["Days"] = transition["transition_days"] + t_out["Days"] = int(transition["transition_days"]) if transition.get("storage_class"): t_out["StorageClass"] = transition["storage_class"].upper() rule["Transitions"].append(t_out) @@ -498,7 +498,7 @@ def create_lifecycle_rule(client, module): aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration ) except is_boto3_error_message("At least one action needs to be specified in a rule"): - # Amazon interpretted this as not changing anything + # Amazon interpreted this as not changing anything changed = False except ( botocore.exceptions.ClientError, From 24de0af43ac2f568617451c00c14631b2fa6bf6d Mon Sep 17 00:00:00 2001 From: rmahroua Date: Fri, 5 May 2023 08:21:05 +0000 Subject: [PATCH 656/683] cloudformation_stack_set - Add a waiter to ensure that running operations against existing stacksets complete (#1790) cloudformation_stack_set - Add a waiter to ensure that running operations against existing stacksets complete SUMMARY Add a waiter to ensure that running operations against existing stacksets complete. Current code would fail in cases where new instances need to be added since the previous update_stack_set(module, stack_params, cfn) would still be running. Fixes #1608 ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudformation_stack_set ADDITIONAL INFORMATION I initially thought that the module was not idempotent since new instances wouldn't be added to my existing stack sets. Upon closer examination, the issue had to do with the fact that we had prior calls being made before adding new instances to existing stack sets: raise error_class(parsed_response, operation_name)\nbotocore.errorfactory.OperationInProgressException: An error occurred (OperationInProgressException) when calling the UpdateStackInstances operation: Another Operation on StackSet arn:aws:cloudformation:us-east-1:XXXXXX:stackset/aws-config-stackset:2bcb419a-f263-48ca-9fe0-cdef11fb59de is in progress The error got triggered because of a missing waiter after this operation: changed |= update_stack_set(module, stack_params, cfn) This change add a waiter function after the update operation, which, in turn, ensure that the subsequent call to add stack instances to the stack set properly run. Reviewed-by: Mark Chappell --- cloudformation_stack_set.py | 41 ++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index 1a8673a909c..f825f2a63bf 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -182,9 +182,11 @@ description: Test stack in two accounts state: present template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 regions: - - us-east-1 + - us-east-1 - name: on subsequent calls, templates are optional but parameters and tags can be altered community.aws.cloudformation_stack_set: @@ -195,9 +197,11 @@ tags: foo: bar test: stack - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 regions: - - us-east-1 + - us-east-1 - name: The same type of update, but wait for the update to complete in all stacks community.aws.cloudformation_stack_set: @@ -209,7 +213,26 @@ tags: foo: bar test: stack - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 + regions: + - us-east-1 + +- name: Register new accounts (create new stack instances) with an existing stack set. + community.aws.cloudformation_stack_set: + name: my-stack + state: present + wait: true + parameters: + InstanceName: my_restacked_instance + tags: + foo: bar + test: stack + accounts: + - 123456789012 + - 234567890123 + - 345678901234 regions: - us-east-1 """ @@ -655,6 +678,14 @@ def main(): stack_params["OperationPreferences"] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) + await_stack_set_operation( + module, + cfn, + operation_id=stack_params["OperationId"], + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), + ) + # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, From 11f3ebefa878db522fc87ac3a3f59adc97dece25 Mon Sep 17 00:00:00 2001 From: Marco Braga Date: Fri, 5 May 2023 09:24:05 -0300 Subject: [PATCH 657/683] fix/elbv2-tg: KeyError when getting ProtocolVersion (#1800) fix/elbv2-tg: KeyError when getting ProtocolVersion SUMMARY Fix KeyError on module elb_target_group. Given the task: - name: Target | AWS | Create {{ tg.name }} community.aws.elb_target_group: state: present region: "{{ tg.region | d(omit) }}" name: "{{ tg.name }}" vpc_id: "{{ vpc_id }}" tags: "{{ tg.tags | d(omit) }}" purge_tags: "{{ tg.purge_tags | d('no') }}" protocol: "{{ tg.protocol }}" port: "{{ tg.port }}" # Health Check health_check_protocol: "{{ tg.health_check_protocol }}" health_check_path: "{{ tg.health_check_path | d(omit) }}" health_check_port: "{{ tg.health_check_port | d(omit) }}" successful_response_codes: "{{ tg.successful_response_codes | d(omit) }}" health_check_interval: "{{ tg.health_check_interval | d(omit) }}" health_check_timeout: "{{ tg.health_check_timeout | d(omit) }}" healthy_threshold_count: "{{ tg.healthy_threshold_count | d(omit) }}" unhealthy_threshold_count: "{{ tg.unhealthy_threshold_count | d(omit) }}" # Targets target_type: "{{ tg.target_type | d(omit) }}" modify_targets: "{{ tg.modify_targets | d(omit) }}" tagets: "{{ tg.tagets | d(omit) }}" # Config stickiness_enabled: "{{ tg.stickiness_enabled | d(omit) }}" stickiness_app_cookie_duration: "{{ tg.stickiness_app_cookie_duration | d(omit) }}" stickiness_app_cookie_name: "{{ tg.stickiness_app_cookie_name | d(omit) }}" stickiness_lb_cookie_duration: "{{ tg.stickiness_lb_cookie_duration | d(omit) }}" stickiness_type: "{{ tg.stickiness_type | d(omit) }}" proxy_protocol_v2_enabled: "{{ tg.proxy_protocol_v2_enabled | d(omit) }}" preserve_client_ip_enabled: "{{ tg.preserve_client_ip_enabled | d(omit) }}" deregistration_delay_timeout: "{{ tg.deregistration_delay_timeout | d(omit) }}" register: tg_out until: "tg_out is not failed" retries: 10 delay: 5 The following error when running against existing resources (after creation/idempotency) that do not use ProtocolVersion (optional*, E.g: TCP): The full traceback is: Traceback (most recent call last): File "/home/myuser/.ansible/tmp/ansible-tmp-1683068584.628099-3322568-261537723315094/AnsiballZ_elb_target_group.py", line 107, in _ansiballz_main() File "/home/myuser/.ansible/tmp/ansible-tmp-1683068584.628099-3322568-261537723315094/AnsiballZ_elb_target_group.py", line 99, in _ansiballz_main invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS) File "/home/myuser/.ansible/tmp/ansible-tmp-1683068584.628099-3322568-261537723315094/AnsiballZ_elb_target_group.py", line 47, in invoke_module runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.elb_target_group', init_globals=dict(_module_fqn='ansible_collections.community.aws.plugins.modules.elb_target_group', _modlib_path=modlib_path), File "/usr/lib64/python3.9/runpy.py", line 225, in run_module return _run_module_code(code, init_globals, run_name, mod_spec) File "/usr/lib64/python3.9/runpy.py", line 97, in _run_module_code _run_code(code, mod_globals, init_globals, File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/tmp/ansible_community.aws.elb_target_group_payload_y5gefe7e/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py", line 989, in File "/tmp/ansible_community.aws.elb_target_group_payload_y5gefe7e/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py", line 983, in main File "/tmp/ansible_community.aws.elb_target_group_payload_y5gefe7e/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py", line 685, in create_or_update_target_group KeyError: 'ProtocolVersion' fatal: [localhost]: FAILED! => { "attempts": 10, "changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/home/myuser/.ansible/tmp/ansible-tmp-1683068584.628099-3322568-261537723315094/AnsiballZ_elb_target_group.py\", line 107, in \n _ansiballz_main()\n File \"/home/myuser/.ansible/tmp/ansible-tmp-1683068584.628099-3322568-261537723315094/AnsiballZ_elb_target_group.py\", line 99, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/home/myuser/.ansible/tmp/ansible-tmp-1683068584.628099-3322568-261537723315094/AnsiballZ_elb_target_group.py\", line 47, in invoke_module\n runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.elb_target_group', init_globals=dict(_module_fqn='ansible_collections.community.aws.plugins.modules.elb_target_group', _modlib_path=modlib_path),\n File \"/usr/lib64/python3.9/runpy.py\", line 225, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/lib64/python3.9/runpy.py\", line 97, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/usr/lib64/python3.9/runpy.py\", line 87, in _run_code\n exec(code, run_globals)\n File \"/tmp/ansible_community.aws.elb_target_group_payload_y5gefe7e/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py\", line 989, in \n File \"/tmp/ansible_community.aws.elb_target_group_payload_y5gefe7e/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py\", line 983, in main\n File \"/tmp/ansible_community.aws.elb_target_group_payload_y5gefe7e/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py\", line 685, in create_or_update_target_group\nKeyError: 'ProtocolVersion'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } ProtocolVersion is not returned a valid attribute from the resource using TCP Protocol $ aws elbv2 describe-target-groups --target-group-arns arn:aws:elasticloadbalancing:us-east-1:[redacted:AWS_ACCOUNT_ID]:targetgroup/okipr3601-lfmwl-aext/513dc1925ba2791f { "TargetGroups": [ { "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:[redacted:AWS_ACCOUNT_ID]:targetgroup/okipr3601-lfmwl-aext/513dc1925ba2791f", "TargetGroupName": "okipr3601-lfmwl-aext", "Protocol": "TCP", "Port": 6443, "VpcId": "vpc-0eef74291b5613c03", "HealthCheckProtocol": "HTTPS", "HealthCheckPort": "6443", "HealthCheckEnabled": true, "HealthCheckIntervalSeconds": 10, "HealthCheckTimeoutSeconds": 10, "HealthyThresholdCount": 2, "UnhealthyThresholdCount": 2, "HealthCheckPath": "/readyz", "Matcher": { "HttpCode": "200-399" }, "LoadBalancerArns": [ "arn:aws:elasticloadbalancing:us-east-1:[redacted:AWS_ACCOUNT_ID]:loadbalancer/net/okipr3601-lfmwl-ext/93bc672ccecb2bd7" ], "TargetType": "ip", "IpAddressType": "ipv4" } ] } Versions $ ansible-galaxy collection list Collection Version -------------------- ------------ amazon.aws 5.4.0 community.aws 5.4.0 $ python -V Python 3.9.13 ansible --version ansible [core 2.14.5] config file = /home/ansible.cfg configured module search path = ['/home/myuser/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/myuser/.venvs/vpy3.9/lib64/python3.9/site-packages/ansible ansible collection location = /home/collections executable location = /home/myuser/.venvs/vpy3.9/bin/ansible python version = 3.9.13 (main, Nov 9 2022, 13:16:24) [GCC 8.5.0 20210514 (Red Hat 8.5.0-15)] (/home/myuser/.venvs/vpy3.9/bin/python3.9) jinja version = 3.1.2 libyaml = True $ pip freeze |grep boto boto3==1.26.125 botocore==1.29.125 *ProtocolVersion is not Required, according to the API definition: https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_TargetGroup.html ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_target_group ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- elb_target_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/elb_target_group.py b/elb_target_group.py index bbab1507d2d..93a3f333df6 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -719,7 +719,7 @@ def create_or_update_target_group(connection, module): # TODO: required and here? if "Matcher" in params: code_key = "HttpCode" - if target_group["ProtocolVersion"] == "GRPC": + if target_group.get("ProtocolVersion") == "GRPC": code_key = "GrpcCode" current_matcher_list = target_group["Matcher"][code_key].split(",") requested_matcher_list = params["Matcher"][code_key].split(",") From e1652025fb33d37dc5adc6802acff7cbf04118fa Mon Sep 17 00:00:00 2001 From: rmahroua Date: Sat, 6 May 2023 20:16:43 +0000 Subject: [PATCH 658/683] aws_config_delivery_channel - Add support for KMS encryption (#1786) aws_config_delivery_channel - Add support for KMS encryption SUMMARY Add support for KMS keys when creating an AWS Config delivery channel ISSUE TYPE Feature Pull Request COMPONENT NAME aws_config_delivery_channel ADDITIONAL INFORMATION AWS added support for KMS encryption of objects stored in S3. This adds that option via a new kms_key_arn module option. Reviewed-by: Mark Chappell --- config_delivery_channel.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/config_delivery_channel.py b/config_delivery_channel.py index aae8799de20..dc03a95f719 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -36,6 +36,10 @@ description: - The prefix for the specified Amazon S3 bucket. type: str + kms_key_arn: + description: + - The ARN of a KMS key used to encrypt objects delivered by Config. The key must belong to the same region as the destination S3 bucket. + type: str sns_topic_arn: description: - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. @@ -52,11 +56,20 @@ """ EXAMPLES = r""" -- name: Create Delivery Channel for AWS Config +- name: Create a delivery channel for AWS Config + community.aws.config_delivery_channel: + name: test_delivery_channel + state: present + s3_bucket: 'test_aws_config_bucket' + sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' + delivery_frequency: 'Twelve_Hours' + +- name: Create a delivery channel with encrypted objects community.aws.config_delivery_channel: name: test_delivery_channel state: present s3_bucket: 'test_aws_config_bucket' + kms_key_arn: 'arn:aws:kms:us-east-1:123456789012:key/160f41cb-e660-4fa0-8bf6-976f53bf7851' sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' delivery_frequency: 'Twelve_Hours' """ @@ -177,6 +190,7 @@ def main(): "state": dict(type="str", choices=["present", "absent"], default="present"), "s3_bucket": dict(type="str", required=True), "s3_prefix": dict(type="str"), + "kms_key_arn": dict(type="str", no_log=True), "sns_topic_arn": dict(type="str"), "delivery_frequency": dict( type="str", @@ -204,6 +218,8 @@ def main(): params["s3BucketName"] = module.params.get("s3_bucket") if module.params.get("s3_prefix"): params["s3KeyPrefix"] = module.params.get("s3_prefix") + if module.params.get("kms_key_arn"): + params["s3KmsKeyArn"] = module.params.get("kms_key_arn") if module.params.get("sns_topic_arn"): params["snsTopicARN"] = module.params.get("sns_topic_arn") if module.params.get("delivery_frequency"): From 5885832c93095a941208a893a45db0cc84d19b00 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 May 2023 19:21:22 +0200 Subject: [PATCH 659/683] Bulk migration to Python 3.6 f-strings (#1810) Bulk migration to Python 3.6 f-strings SUMMARY We've dropped support for Python <3.6, bulk migrate to fstrings and perform some general string cleanup A combination of black --preview flynt some manual cleanup ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/ tests/ ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- acm_certificate.py | 12 ++-- acm_certificate_info.py | 2 +- api_gateway.py | 8 +-- autoscaling_instance_refresh.py | 2 +- autoscaling_policy.py | 13 ++--- batch_compute_environment.py | 7 +-- cloudformation_stack_set.py | 55 +++++++++-------- cloudfront_distribution.py | 43 ++++++-------- codebuild_project.py | 3 +- codepipeline.py | 6 +- config_delivery_channel.py | 3 +- data_pipeline.py | 18 +++--- directconnect_confirm_connection.py | 4 +- directconnect_connection.py | 4 +- directconnect_link_aggregation_group.py | 32 +++++----- directconnect_virtual_interface.py | 14 ++--- dynamodb_table.py | 16 +++-- ec2_ami_copy.py | 4 +- ec2_launch_template.py | 45 ++++++-------- ec2_placement_group.py | 10 ++-- ec2_placement_group_info.py | 2 +- ec2_transit_gateway.py | 6 +- ec2_vpc_egress_igw.py | 12 ++-- ec2_vpc_nacl_info.py | 2 +- ec2_vpc_vgw.py | 2 +- ec2_vpc_vpn.py | 37 ++++++------ ec2_win_password.py | 4 +- ecs_attribute.py | 10 ++-- ecs_cluster.py | 2 +- ecs_ecr.py | 11 ++-- ecs_service.py | 8 +-- ecs_tag.py | 10 ++-- ecs_taskdefinition.py | 16 +++-- efs.py | 4 +- efs_info.py | 6 +- efs_tag.py | 8 +-- eks_cluster.py | 12 ++-- eks_fargate_profile.py | 10 ++-- eks_nodegroup.py | 20 +++---- elasticache.py | 38 ++++++------ elasticache_info.py | 4 +- elasticache_parameter_group.py | 22 +++---- elasticache_snapshot.py | 6 +- elb_instance.py | 2 +- elb_target.py | 12 ++-- elb_target_group.py | 2 +- elb_target_info.py | 6 +- glue_connection.py | 8 ++- glue_crawler.py | 8 +-- glue_job.py | 8 +-- iam_access_key.py | 12 ++-- iam_access_key_info.py | 2 +- iam_group.py | 24 ++++---- iam_managed_policy.py | 16 ++--- iam_password_policy.py | 2 +- iam_role.py | 42 +++++++------ iam_role_info.py | 8 +-- iam_saml_federation.py | 12 ++-- iam_server_certificate.py | 14 ++--- kinesis_stream.py | 72 +++++++++++------------ lightsail.py | 3 +- msk_cluster.py | 22 +++---- msk_config.py | 4 +- opensearch.py | 78 +++++++++---------------- redshift.py | 8 +-- redshift_cross_region_snapshots.py | 3 +- s3_bucket_notification.py | 6 +- s3_cors.py | 4 +- s3_lifecycle.py | 2 +- s3_logging.py | 4 +- s3_metrics_configuration.py | 4 +- s3_sync.py | 4 +- secretsmanager_secret.py | 2 +- ses_identity.py | 33 ++++------- ses_identity_policy.py | 6 +- ses_rule_set.py | 13 +++-- sns.py | 2 +- sns_topic.py | 10 ++-- stepfunctions_state_machine.py | 4 +- waf_condition.py | 4 +- waf_info.py | 2 +- waf_rule.py | 6 +- waf_web_acl.py | 2 +- 83 files changed, 482 insertions(+), 547 deletions(-) diff --git a/acm_certificate.py b/acm_certificate.py index 197124fb59e..4bf07f0321a 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -276,7 +276,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws(e, "Couldn't add tags to certificate {0}".format(resource_arn)) + module.fail_json_aws(e, f"Couldn't add tags to certificate {resource_arn}") if tags_to_remove and not module.check_mode: # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove] @@ -289,7 +289,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws(e, "Couldn't remove tags from certificate {0}".format(resource_arn)) + module.fail_json_aws(e, f"Couldn't remove tags from certificate {resource_arn}") new_tags = deepcopy(existing_tags) for key, value in tags_to_add.items(): new_tags[key] = value @@ -441,7 +441,7 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, cert_arn = None changed = False if len(certificates) > 1: - msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params["name_tag"] + msg = f"More than one certificate with Name={module.params['name_tag']} exists in ACM in this region" module.fail_json(msg=msg, certificates=certificates) elif len(certificates) == 1: # Update existing certificate that was previously imported to ACM. @@ -496,7 +496,7 @@ def main(): absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) < 1: for a in absent_args: - module.debug("%s is %s" % (a, module.params[a])) + module.debug(f"{a} is {module.params[a]}") module.fail_json( msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" ) @@ -505,7 +505,7 @@ def main(): absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: - module.debug("%s is %s" % (a, module.params[a])) + module.debug(f"{a} is {module.params[a]}") module.fail_json( msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" ) @@ -543,7 +543,7 @@ def main(): only_tags=filter_tags, ) - module.debug("Found %d corresponding certificates in ACM" % len(certificates)) + module.debug(f"Found {len(certificates)} corresponding certificates in ACM") if module.params["state"] == "present": ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) diff --git a/acm_certificate_info.py b/acm_certificate_info.py index 287e7006aef..420cd0e0f92 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -296,7 +296,7 @@ def main(): ) if module.params["certificate_arn"] and len(certificates) != 1: - module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params["certificate_arn"]) + module.fail_json(msg=f"No certificate exists in this region with ARN {module.params['certificate_arn']}") module.exit_json(certificates=certificates) diff --git a/api_gateway.py b/api_gateway.py index 176404f644d..c63ad5f1582 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -248,7 +248,7 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te with open(swagger_file) as f: apidata = f.read() except OSError as e: - msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e)) + msg = f"Failed trying to read swagger file {str(swagger_file)}: {str(e)}" module.fail_json(msg=msg, exception=traceback.format_exc()) if swagger_dict is not None: apidata = json.dumps(swagger_dict) @@ -281,7 +281,7 @@ def delete_rest_api(module, client, api_id): try: delete_response = delete_api(client, api_id) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="deleting API {0}".format(api_id)) + module.fail_json_aws(e, msg=f"deleting API {api_id}") return delete_response @@ -299,7 +299,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): try: configure_response = configure_api(client, api_id, api_data=api_data) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="configuring API {0}".format(api_id)) + module.fail_json_aws(e, msg=f"configuring API {api_id}") deploy_response = None @@ -308,7 +308,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): try: deploy_response = create_deployment(client, api_id, **module.params) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - msg = "deploying api {0} to stage {1}".format(api_id, stage) + msg = f"deploying api {api_id} to stage {stage}" module.fail_json_aws(e, msg) return configure_response, deploy_response diff --git a/autoscaling_instance_refresh.py b/autoscaling_instance_refresh.py index 5b9855d135d..86546fac21e 100644 --- a/autoscaling_instance_refresh.py +++ b/autoscaling_instance_refresh.py @@ -229,7 +229,7 @@ def start_or_cancel_instance_refresh(conn, module): result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0])) return module.exit_json(**result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to {0} InstanceRefresh".format(asg_state.replace("ed", ""))) + module.fail_json_aws(e, msg=f"Failed to {asg_state.replace('ed', '')} InstanceRefresh") def main(): diff --git a/autoscaling_policy.py b/autoscaling_policy.py index f76ce74ceb1..67f7ccbd54b 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -447,7 +447,7 @@ def create_scaling_policy(connection, module): # it's only required if policy is SimpleScaling and state is present if not module.params["scaling_adjustment"]: module.fail_json( - msg="scaling_adjustment is required when policy_type is SimpleScaling " "and state is present" + msg="scaling_adjustment is required when policy_type is SimpleScaling and state is present" ) params["ScalingAdjustment"] = module.params["scaling_adjustment"] if module.params["cooldown"]: @@ -455,7 +455,7 @@ def create_scaling_policy(connection, module): elif policy_type == "StepScaling": if not module.params["step_adjustments"]: - module.fail_json(msg="step_adjustments is required when policy_type is StepScaling" "and state is present") + module.fail_json(msg="step_adjustments is required when policy_type is StepScaling and state is present") params["StepAdjustments"] = [] for step_adjustment in module.params["step_adjustments"]: step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"]) @@ -472,8 +472,7 @@ def create_scaling_policy(connection, module): elif policy_type == "TargetTrackingScaling": if not module.params["target_tracking_config"]: module.fail_json( - msg="target_tracking_config is required when policy_type is " - "TargetTrackingScaling and state is present" + msg="target_tracking_config is required when policy_type is TargetTrackingScaling and state is present" ) else: params["TargetTrackingConfiguration"] = build_target_specification( @@ -488,7 +487,7 @@ def create_scaling_policy(connection, module): aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") before = after = {} if not policies: @@ -512,7 +511,7 @@ def create_scaling_policy(connection, module): aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values @@ -532,7 +531,7 @@ def delete_scaling_policy(connection, module): try: policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") if policy["ScalingPolicies"]: try: diff --git a/batch_compute_environment.py b/batch_compute_environment.py index ffc1f19b003..6bb2541e161 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -268,14 +268,11 @@ def validate_params(module): # validate compute environment name if not re.search(r"^[\w\_:]+$", compute_environment_name): module.fail_json( - msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters " - "and underscores.".format(compute_environment_name) + msg=f"Function compute_environment_name {compute_environment_name} is invalid. Names must contain only alphanumeric characters and underscores." ) if not compute_environment_name.startswith("arn:aws:batch:"): if len(compute_environment_name) > 128: - module.fail_json( - msg='compute_environment_name "{0}" exceeds 128 character limit'.format(compute_environment_name) - ) + module.fail_json(msg=f'compute_environment_name "{compute_environment_name}" exceeds 128 character limit') return diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py index f825f2a63bf..17e888b4f1b 100644 --- a/cloudformation_stack_set.py +++ b/cloudformation_stack_set.py @@ -346,7 +346,7 @@ def create_stack_set(module, stack_params, cfn): cfn.create_stack_set(aws_retry=True, **stack_params) return await_stack_set_exists(cfn, stack_params["StackSetName"]) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get("StackSetName"))) + module.fail_json_aws(err, msg=f"Failed to create stack set {stack_params.get('StackSetName')}.") def update_stack_set(module, stack_params, cfn): @@ -360,14 +360,19 @@ def update_stack_set(module, stack_params, cfn): except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws( err, - msg="One or more stack instances were not found for this stack set. Double check " - "the `accounts` and `regions` parameters.", + msg=( + "One or more stack instances were not found for this stack set. Double check " + "the `accounts` and `regions` parameters." + ), ) except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except module.fail_json_aws( err, - msg="Another operation is already in progress on this stack set - please try again later. When making " - "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.", + msg=( + "Another operation is already in progress on this stack set - please try again later. When making" + " multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op" + " errors." + ), ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") @@ -436,9 +441,8 @@ def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wai pass else: module.warn( - "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format( - operation_id, stack_set_name, max_wait - ) + f"Timed out waiting for operation {operation_id} on stack set {stack_set_name} after {max_wait} seconds." + " Returning unfinished operation" ) @@ -456,9 +460,8 @@ def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): time.sleep(15) module.warn( - "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( - stack_set_name, ", ".join(s["StackId"] for s in to_await), max_wait - ) + f"Timed out waiting for stack set {stack_set_name} instances {', '.join(s['StackId'] for s in to_await)} to" + f" complete after {max_wait} seconds. Returning unfinished operation" ) @@ -583,8 +586,10 @@ def main(): state = module.params["state"] if state == "present" and not module.params["accounts"]: module.fail_json( - msg="Can't create a stack set without choosing at least one account. " - "To get the ID of the current account, use the aws_caller_info module." + msg=( + "Can't create a stack set without choosing at least one account. " + "To get the ID of the current account, use the aws_caller_info module." + ) ) module.params["accounts"] = [to_native(a) for a in module.params["accounts"]] @@ -609,8 +614,10 @@ def main(): stack_params["UsePreviousTemplate"] = True else: module.fail_json( - msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " - "`template_body`, or `template_url`".format(module.params["name"]) + msg=( + f"The Stack Set {module.params['name']} does not exist, and no template was provided. Provide one" + " of `template`, `template_body`, or `template_url`" + ) ) stack_params["Parameters"] = [] @@ -668,11 +675,11 @@ def main(): if state == "present": if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log - stack_params["ClientRequestToken"] = "Ansible-StackSet-Create-{0}".format(operation_uuid) + stack_params["ClientRequestToken"] = f"Ansible-StackSet-Create-{operation_uuid}" changed = True create_stack_set(module, stack_params, cfn) else: - stack_params["OperationId"] = "Ansible-StackSet-Update-{0}".format(operation_uuid) + stack_params["OperationId"] = f"Ansible-StackSet-Update-{operation_uuid}" operation_ids.append(stack_params["OperationId"]) if module.params.get("regions"): stack_params["OperationPreferences"] = get_operation_preferences(module) @@ -694,7 +701,7 @@ def main(): module.params["regions"], ) if new_stack_instances: - operation_ids.append("Ansible-StackInstance-Create-{0}".format(operation_uuid)) + operation_ids.append(f"Ansible-StackInstance-Create-{operation_uuid}") changed = True cfn.create_stack_instances( StackSetName=module.params["name"], @@ -704,7 +711,7 @@ def main(): OperationId=operation_ids[-1], ) else: - operation_ids.append("Ansible-StackInstance-Update-{0}".format(operation_uuid)) + operation_ids.append(f"Ansible-StackInstance-Update-{operation_uuid}") cfn.update_stack_instances( StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in existing_stack_instances)), @@ -723,20 +730,20 @@ def main(): elif state == "absent": if not existing_stack_set: - module.exit_json(msg="Stack set {0} does not exist".format(module.params["name"])) + module.exit_json(msg=f"Stack set {module.params['name']} does not exist") if module.params.get("purge_stack_instances") is False: pass try: cfn.delete_stack_set( StackSetName=module.params["name"], ) - module.exit_json(msg="Stack set {0} deleted".format(module.params["name"])) + module.exit_json(msg=f"Stack set {module.params['name']} deleted") except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except module.fail_json_aws( - e, msg="Cannot delete stack {0} while there is an operation in progress".format(module.params["name"]) + e, msg=f"Cannot delete stack {module.params['name']} while there is an operation in progress" ) except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except - delete_instances_op = "Ansible-StackInstance-Delete-{0}".format(operation_uuid) + delete_instances_op = f"Ansible-StackInstance-Delete-{operation_uuid}" cfn.delete_stack_instances( StackSetName=module.params["name"], Accounts=module.params["accounts"], @@ -768,7 +775,7 @@ def main(): msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: " + stack_states, ) - module.exit_json(changed=True, msg="Stack set {0} deleted".format(module.params["name"])) + module.exit_json(changed=True, msg=f"Stack set {module.params['name']} deleted") result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids)) if any(o["status"] == "FAILED" for o in result["operations"]): diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 7b841c7f925..40bc15dac35 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1463,7 +1463,7 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): if list_items is None: list_items = [] if not isinstance(list_items, list): - raise ValueError("Expected a list, got a {0} with value {1}".format(type(list_items).__name__, str(list_items))) + raise ValueError(f"Expected a list, got a {type(list_items).__name__} with value {str(list_items)}") result = {} if include_quantity: result["quantity"] = len(list_items) @@ -1491,7 +1491,7 @@ def delete_distribution(client, module, distribution): aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"] ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution["Distribution"])) + module.fail_json_aws(e, msg=f"Error deleting distribution {to_native(distribution['Distribution'])}") def update_distribution(client, module, config, distribution_id, e_tag): @@ -1500,7 +1500,7 @@ def update_distribution(client, module, config, distribution_id, e_tag): "Distribution" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) + module.fail_json_aws(e, msg=f"Error updating distribution to {to_native(config)}") def tag_resource(client, module, arn, tags): @@ -1721,13 +1721,11 @@ def validate_logging(self, logging): def validate_is_list(self, list_to_validate, list_name): if not isinstance(list_to_validate, list): - self.module.fail_json( - msg="%s is of type %s. Must be a list." % (list_name, type(list_to_validate).__name__) - ) + self.module.fail_json(msg=f"{list_name} is of type {type(list_to_validate).__name__}. Must be a list.") def validate_required_key(self, key_name, full_key_name, dict_object): if key_name not in dict_object: - self.module.fail_json(msg="%s must be specified." % full_key_name) + self.module.fail_json(msg=f"{full_key_name} must be specified.") def validate_origins( self, @@ -1781,8 +1779,8 @@ def validate_s3_origin_configuration(self, client, existing_config, origin): return existing_config["s3_origin_config"]["origin_access_identity"] try: - comment = "access-identity-by-ansible-%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) - caller_reference = "%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) + comment = f"access-identity-by-ansible-{origin.get('domain_name')}-{self.__default_datetime_string}" + caller_reference = f"{origin.get('domain_name')}-{self.__default_datetime_string}" cfoai_config = dict( CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment) ) @@ -1790,8 +1788,8 @@ def validate_s3_origin_configuration(self, client, existing_config, origin): "Id" ] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin["id"]) - return "origin-access-identity/cloudfront/%s" % oai + self.module.fail_json_aws(e, msg=f"Couldn't create Origin Access Identity for id {origin['id']}") + return f"origin-access-identity/cloudfront/{oai}" def validate_origin(self, client, existing_config, origin, default_origin_path): try: @@ -1948,9 +1946,9 @@ def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid if is_default_cache: cache_behavior_name = "Default cache behavior" else: - cache_behavior_name = "Cache behavior for path %s" % cache_behavior["path_pattern"] + cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" self.module.fail_json( - msg="%s has target_origin_id pointing to an origin that does not exist." % cache_behavior_name + msg=f"{cache_behavior_name} has target_origin_id pointing to an origin that does not exist." ) cache_behavior["target_origin_id"] = target_origin_id cache_behavior = self.add_key_else_validate( @@ -2262,21 +2260,15 @@ def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_li or isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list) ): - self.module.fail_json( - msg="The attribute list {0} must be one of [{1}]".format( - attribute_list_name, " ".join(str(a) for a in allowed_list) - ) - ) + attribute_list = " ".join(str(a) for a in allowed_list) + self.module.fail_json(msg=f"The attribute list {attribute_list_name} must be one of [{attribute_list}]") except Exception as e: self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): if attribute is not None and attribute not in allowed_list: - self.module.fail_json( - msg="The attribute {0} must be one of [{1}]".format( - attribute_name, " ".join(str(a) for a in allowed_list) - ) - ) + attribute_list = " ".join(str(a) for a in allowed_list) + self.module.fail_json(msg=f"The attribute {attribute_name} must be one of [{attribute_list}]") def validate_distribution_from_caller_reference(self, caller_reference): try: @@ -2333,12 +2325,11 @@ def wait_until_processed(self, client, wait_timeout, distribution_id, caller_ref except botocore.exceptions.WaiterError as e: self.module.fail_json_aws( e, - msg="Timeout waiting for CloudFront action." - " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)), + msg=f"Timeout waiting for CloudFront action. Waited for {to_text(wait_timeout)} seconds before timeout.", ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) + self.module.fail_json_aws(e, msg=f"Error getting distribution {distribution_id}") def main(): diff --git a/codebuild_project.py b/codebuild_project.py index 6a910799d88..69fd2e463b5 100644 --- a/codebuild_project.py +++ b/codebuild_project.py @@ -310,8 +310,7 @@ class CodeBuildAnsibleAWSError(AnsibleAWSError): def do_create_project(client, params, formatted_params): if params["source"] is None or params["artifacts"] is None: raise CodeBuildAnsibleAWSError( - message="The source and artifacts parameters must be provided " - "when creating a new project. No existing project was found." + message="The source and artifacts parameters must be provided when creating a new project. No existing project was found." ) if params["tags"] is not None: diff --git a/codepipeline.py b/codepipeline.py index 7e0baf3fd65..9fb42643df4 100644 --- a/codepipeline.py +++ b/codepipeline.py @@ -216,7 +216,7 @@ def create_pipeline(client, name, role_arn, artifact_store, stages, version, mod resp = client.create_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict["name"])) + module.fail_json_aws(e, msg=f"Unable create pipeline {pipeline_dict['name']}") def update_pipeline(client, pipeline_dict, module): @@ -224,7 +224,7 @@ def update_pipeline(client, pipeline_dict, module): resp = client.update_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict["name"])) + module.fail_json_aws(e, msg=f"Unable update pipeline {pipeline_dict['name']}") def delete_pipeline(client, name, module): @@ -232,7 +232,7 @@ def delete_pipeline(client, name, module): resp = client.delete_pipeline(name=name) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable delete pipeline {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable delete pipeline {name}") def describe_pipeline(client, name, version, module): diff --git a/config_delivery_channel.py b/config_delivery_channel.py index dc03a95f719..c54fb36c05c 100644 --- a/config_delivery_channel.py +++ b/config_delivery_channel.py @@ -164,8 +164,7 @@ def update_resource(client, module, params, result): except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available", + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", ) except ( botocore.exceptions.ClientError, diff --git a/data_pipeline.py b/data_pipeline.py index d30be5c847d..4b602708163 100644 --- a/data_pipeline.py +++ b/data_pipeline.py @@ -271,7 +271,7 @@ def pipeline_field(client, dp_id, field): for field_key in dp_description["pipelineDescriptionList"][0]["fields"]: if field_key["key"] == field: return field_key["stringValue"] - raise KeyError("Field key {0} not found!".format(field)) + raise KeyError(f"Field key {field} not found!") def run_with_timeout(timeout, func, *func_args, **func_kwargs): @@ -350,7 +350,7 @@ def activate_pipeline(client, module): try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) + module.fail_json(msg=f"Data Pipeline {dp_name} not found") if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: changed = False @@ -388,7 +388,7 @@ def deactivate_pipeline(client, module): try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) + module.fail_json(msg=f"Data Pipeline {dp_name} not found") if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: changed = False @@ -527,7 +527,7 @@ def define_pipeline(client, module, objects, dp_id): dp_name = module.params.get("name") if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - msg = "Data Pipeline {0} is unable to be updated while in state FINISHED.".format(dp_name) + msg = f"Data Pipeline {dp_name} is unable to be updated while in state FINISHED." changed = False elif objects: @@ -538,14 +538,16 @@ def define_pipeline(client, module, objects, dp_id): client.put_pipeline_definition( pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values ) - msg = "Data Pipeline {0} has been updated.".format(dp_name) + msg = f"Data Pipeline {dp_name} has been updated." changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, - msg=f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects", + msg=( + f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects" + ), ) else: changed = False diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py index e8e0f2c6b08..870e459327d 100644 --- a/directconnect_confirm_connection.py +++ b/directconnect_confirm_connection.py @@ -87,7 +87,7 @@ def find_connection_id(client, connection_id=None, connection_name=None): response = describe_connections(client, params) except (BotoCoreError, ClientError) as e: if connection_id: - msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + msg = f"Failed to describe DirectConnect ID {connection_id}" else: msg = "Failed to describe DirectConnect connections" raise DirectConnectError( @@ -117,7 +117,7 @@ def get_connection_state(client, connection_id): return response["connections"][0]["connectionState"] except (BotoCoreError, ClientError, IndexError) as e: raise DirectConnectError( - msg="Failed to describe DirectConnect connection {0} state".format(connection_id), + msg=f"Failed to describe DirectConnect connection {connection_id} state", last_traceback=traceback.format_exc(), exception=e, ) diff --git a/directconnect_connection.py b/directconnect_connection.py index 176d83392d4..fd55a3b5291 100644 --- a/directconnect_connection.py +++ b/directconnect_connection.py @@ -187,7 +187,7 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: if connection_id: - msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + msg = f"Failed to describe DirectConnect ID {connection_id}" else: msg = "Failed to describe DirectConnect connections" raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) @@ -233,7 +233,7 @@ def create_connection(client, location, bandwidth, name, lag_id): connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: raise DirectConnectError( - msg="Failed to create DirectConnect connection {0}".format(name), + msg=f"Failed to create DirectConnect connection {name}", last_traceback=traceback.format_exc(), exception=e, ) diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py index 9a532c63298..57907c93bb9 100644 --- a/directconnect_link_aggregation_group.py +++ b/directconnect_link_aggregation_group.py @@ -250,7 +250,7 @@ def create_lag(client, num_connections, location, bandwidth, name, connection_id lag = client.create_lag(**parameters) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to create DirectConnect link aggregation group {0}".format(name), + msg=f"Failed to create DirectConnect link aggregation group {name}", last_traceback=traceback.format_exc(), exception=e, ) @@ -263,7 +263,7 @@ def delete_lag(client, lag_id): client.delete_lag(lagId=lag_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), + msg=f"Failed to delete Direct Connect link aggregation group {lag_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -285,8 +285,7 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if min_links and min_links > num_connections: raise DirectConnectError( - msg="The number of connections {0} must be greater than the minimum number of links " - "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), + msg=f"The number of connections {num_connections} must be greater than the minimum number of links {min_links} to update the LAG {lag_id}", last_traceback=None, exception=None, ) @@ -297,13 +296,9 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ except botocore.exceptions.ClientError as e: if wait and time.time() - start <= wait_timeout: continue - msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id) + msg = f"Failed to update Direct Connect link aggregation group {lag_id}." if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]: - msg += ( - "Unable to set the min number of links to {0} while the LAG connections are being requested".format( - min_links - ) - ) + msg += f"Unable to set the min number of links to {min_links} while the LAG connections are being requested" raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) else: break @@ -320,7 +315,7 @@ def ensure_present( exists = lag_exists(client, lag_id, lag_name) if not exists and lag_id: raise DirectConnectError( - msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), + msg=f"The Direct Connect link aggregation group {lag_id} does not exist.", last_traceback=None, exception="", ) @@ -346,7 +341,7 @@ def describe_virtual_interfaces(client, lag_id): response = client.describe_virtual_interfaces(connectionId=lag_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), + msg=f"Failed to describe any virtual interfaces associated with LAG: {lag_id}", last_traceback=traceback.format_exc(), exception=e, ) @@ -366,7 +361,7 @@ def disassociate_vis(client, lag_id, virtual_interfaces): response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), + msg=f"Could not delete virtual interface {vi} to delete link aggregation group {lag_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -385,10 +380,13 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete: raise DirectConnectError( - msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " - "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " - "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " - "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), + msg=( + "There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG" + f" {lag_id}. To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces" + " they will be deleted). Optionally, to ensure hosted connections are deleted after disassociation use" + " delete_with_disassociation: True and wait: True (as Virtual Interfaces may take a few moments to" + " delete)" + ), last_traceback=None, exception=None, ) diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py index ab6ee9d4ea4..ec0c87099a4 100644 --- a/directconnect_virtual_interface.py +++ b/directconnect_virtual_interface.py @@ -361,7 +361,7 @@ def vi_state(client, virtual_interface_id): """ Returns the state of the virtual interface. """ - err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id) + err_msg = f"Failed to describe virtual interface: {virtual_interface_id}" vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)( virtualInterfaceId=virtual_interface_id ) @@ -435,7 +435,7 @@ def modify_vi(client, virtual_interface_id, connection_id): """ Associate a new connection ID """ - err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id) + err_msg = f"Unable to associate {connection_id} with virtual interface {virtual_interface_id}" try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)( virtualInterfaceId=virtual_interface_id, connectionId=connection_id ) @@ -460,15 +460,15 @@ def ensure_state(connection, module): if virtual_interface_id is False: module.fail_json( - msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " - "and connection_id options if applicable to find a unique match." + msg=( + "Multiple virtual interfaces were found. Use the virtual_interface_id, name, " + "and connection_id options if applicable to find a unique match." + ) ) if state == "present": if not virtual_interface_id and module.params["virtual_interface_id"]: - module.fail_json( - msg="The virtual interface {0} does not exist.".format(module.params["virtual_interface_id"]) - ) + module.fail_json(msg=f"The virtual interface {module.params['virtual_interface_id']} does not exist.") elif not virtual_interface_id: assembled_params = assemble_params_for_creating_vi(module.params) diff --git a/dynamodb_table.py b/dynamodb_table.py index a9503735557..5be7a4b9c43 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -660,7 +660,7 @@ def _generate_global_indexes(billing_mode): continue name = index.get("name") if name in index_exists: - module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of global indexes") # Convert the type name to upper case and remove the global_ index["type"] = index["type"].upper()[7:] index = _generate_index(index, include_throughput) @@ -680,7 +680,7 @@ def _generate_local_indexes(): continue name = index.get("name") if name in index_exists: - module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of local indexes") index["type"] = index["type"].upper() index = _generate_index(index, False) index_exists[name] = True @@ -697,7 +697,7 @@ def _generate_global_index_map(current_table): continue name = index.get("name") if name in global_index_map: - module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of global indexes") idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case and remove the global_ idx["type"] = idx["type"].upper()[7:] @@ -713,7 +713,7 @@ def _generate_local_index_map(current_table): continue name = index.get("name") if name in local_index_map: - module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of local indexes") idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case idx["type"] = idx["type"].upper() @@ -734,8 +734,8 @@ def _generate_index(index, include_throughput=True): else: if non_key_attributes: module.fail_json( - "DynamoDB does not support specifying non-key-attributes ('includes') for " - "indexes of type 'all'. Index name: {0}".format(index["name"]) + "DynamoDB does not support specifying non-key-attributes ('includes') for indexes of type 'all'. Index" + f" name: {index['name']}" ) idx = dict( @@ -919,9 +919,7 @@ def update_table(current_table): primary_index_changes = _primary_index_changes(current_table) if primary_index_changes: module.fail_json( - "DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format( - primary_index_changes - ) + f"DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {primary_index_changes}" ) changed = False diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py index 5d7e49bde90..170a564e15d 100644 --- a/ec2_ami_copy.py +++ b/ec2_ami_copy.py @@ -171,7 +171,7 @@ def copy_image(module, ec2): try: if module.params.get("tag_equality"): - filters = [{"Name": "tag:%s" % k, "Values": [v]} for (k, v) in module.params.get("tags").items()] + filters = [{"Name": f"tag:{k}", "Values": [v]} for (k, v) in module.params.get("tags").items()] filters.append(dict(Name="state", Values=["available", "pending"])) images = ec2.describe_images(Filters=filters) if len(images["Images"]) > 0: @@ -197,7 +197,7 @@ def copy_image(module, ec2): except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not copy AMI") except Exception as e: - module.fail_json(msg="Unhandled exception. (%s)" % to_native(e)) + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") def main(): diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 8e1240d285f..01d36ccc57c 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -453,13 +453,11 @@ def determine_iam_role(module, name_or_arn): role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return {"arn": role["InstanceProfile"]["Arn"]} except is_boto3_error_code("NoSuchEntity") as e: - module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + module.fail_json_aws(e, msg=f"Could not find instance_role {name_or_arn}") except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format( - name_or_arn - ), + msg=f"An error occurred while searching for instance_role {name_or_arn}. Please try supplying the full ARN.", ) @@ -481,15 +479,18 @@ def existing_templates(module): except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="Launch template with ID {0} is not a valid ID. It should start with `lt-....`".format( - module.params.get("launch_template_id") + msg=( + f"Launch template with ID {module.params.get('launch_template_id')} is not a valid ID. It should start" + " with `lt-....`" ), ) except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="Launch template with ID {0} could not be found, please supply a name " - "instead so that a new template can be created".format(module.params.get("launch_template_id")), + msg=( + f"Launch template with ID {module.params.get('launch_template_id')} could not be found, please supply a" + " name instead so that a new template can be created" + ), ) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.") @@ -510,9 +511,7 @@ def existing_templates(module): except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws( e, - msg="Could not find launch template versions for {0} (ID: {1}).".format( - template["LaunchTemplateName"], template_id - ), + msg=f"Could not find launch template versions for {template['LaunchTemplateName']} (ID: {template_id}).", ) @@ -547,10 +546,8 @@ def delete_template(module): ) if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]: module.warn( - "Failed to delete template versions {0} on launch template {1}".format( - v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"], - template["LaunchTemplateId"], - ) + f"Failed to delete template versions {v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']} on" + f" launch template {template['LaunchTemplateId']}" ) deleted_versions = [ camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"] @@ -558,9 +555,7 @@ def delete_template(module): except (ClientError, BotoCoreError) as e: module.fail_json_aws( e, - msg="Could not delete existing versions of the launch template {0}".format( - template["LaunchTemplateId"] - ), + msg=f"Could not delete existing versions of the launch template {template['LaunchTemplateId']}", ) try: resp = ec2.delete_launch_template( @@ -568,7 +563,7 @@ def delete_template(module): aws_retry=True, ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template["LaunchTemplateId"])) + module.fail_json_aws(e, msg=f"Could not delete launch template {template['LaunchTemplateId']}") return { "deleted_versions": deleted_versions, "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]), @@ -647,9 +642,7 @@ def create_or_update(module, template_options): int(module.params.get("source_version")) except ValueError: module.fail_json( - msg='source_version param was not a valid integer, got "{0}"'.format( - module.params.get("source_version") - ) + msg=f"source_version param was not a valid integer, got \"{module.params.get('source_version')}\"" ) # get source template version source_version = next( @@ -658,7 +651,7 @@ def create_or_update(module, template_options): ) if source_version is None: module.fail_json( - msg='source_version does not exist, got "{0}"'.format(module.params.get("source_version")) + msg=f"source_version does not exist, got \"{module.params.get('source_version')}\"" ) resp = ec2.create_launch_template_version( LaunchTemplateId=template["LaunchTemplateId"], @@ -684,9 +677,7 @@ def create_or_update(module, template_options): int(module.params.get("default_version")) except ValueError: module.fail_json( - msg='default_version param was not a valid integer, got "{0}"'.format( - module.params.get("default_version") - ) + msg=f"default_version param was not a valid integer, got \"{module.params.get('default_version')}\"" ) set_default = ec2.modify_launch_template( LaunchTemplateId=template["LaunchTemplateId"], @@ -863,7 +854,7 @@ def main(): elif module.params.get("state") == "absent": out = delete_template(module) else: - module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get("state"))) + module.fail_json(msg=f"Unsupported value \"{module.params.get('state')}\" for `state` parameter") module.exit_json(**out) diff --git a/ec2_placement_group.py b/ec2_placement_group.py index 4e1967c846d..ccdd7d54785 100644 --- a/ec2_placement_group.py +++ b/ec2_placement_group.py @@ -120,7 +120,7 @@ def search_placement_group(connection, module): try: response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find placement group named [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't find placement group named [{name}]") if len(response["PlacementGroups"]) != 1: return None @@ -178,7 +178,7 @@ def create_placement_group(connection, module): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create placement group [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't create placement group [{name}]") module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name)) @@ -190,7 +190,7 @@ def delete_placement_group(connection, module): try: connection.delete_placement_group(GroupName=name, DryRun=module.check_mode) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete placement group [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't delete placement group [{name}]") module.exit_json(changed=True) @@ -220,9 +220,7 @@ def main(): else: name = module.params.get("name") module.fail_json( - msg=("Placement group '{}' exists, can't change strategy" + " from '{}' to '{}'").format( - name, placement_group["strategy"], strategy - ) + msg=f"Placement group '{name}' exists, can't change strategy from '{placement_group['strategy']}' to '{strategy}'" ) elif state == "absent": diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py index 970cd302636..75cbc72585c 100644 --- a/ec2_placement_group_info.py +++ b/ec2_placement_group_info.py @@ -95,7 +95,7 @@ def get_placement_groups_details(connection, module): else: response = connection.describe_placement_groups() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't find placement groups named [%s]" % names) + module.fail_json_aws(e, msg=f"Couldn't find placement groups named [{names}]") results = [] for placement_group in response["PlacementGroups"]: diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py index 8c6282d0b0f..9b50cb21b9c 100644 --- a/ec2_transit_gateway.py +++ b/ec2_transit_gateway.py @@ -327,7 +327,7 @@ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): if len(tgws) > 1: self._module.fail_json( - msg="EC2 returned more than one transit Gateway for description {0}, aborting".format(description) + msg=f"EC2 returned more than one transit Gateway for description {description}, aborting" ) elif tgws: tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"]) @@ -375,7 +375,7 @@ def create_tgw(self, description): else: result = self.get_matching_tgw(tgw_id=tgw_id) - self._results["msg"] = " Transit gateway {0} created".format(result["transit_gateway_id"]) + self._results["msg"] = f" Transit gateway {result['transit_gateway_id']} created" return result @@ -401,7 +401,7 @@ def delete_tgw(self, tgw_id): else: result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) - self._results["msg"] = " Transit gateway {0} deleted".format(tgw_id) + self._results["msg"] = f" Transit gateway {tgw_id} deleted" return result diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py index b15bec20f06..0a309b4863c 100644 --- a/ec2_vpc_egress_igw.py +++ b/ec2_vpc_egress_igw.py @@ -89,9 +89,7 @@ def delete_eigw(module, connection, eigw_id): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id) - ) + module.fail_json_aws(e, msg=f"Could not delete Egress-Only Internet Gateway {eigw_id} from VPC {module.vpc_id}") if not module.check_mode: changed = response.get("ReturnCode", False) @@ -119,12 +117,12 @@ def create_eigw(module, connection, vpc_id): # We need to catch the error and return something valid changed = True except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) + module.fail_json_aws(e, msg=f"invalid vpc ID '{vpc_id}' provided") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"Could not create Egress-Only Internet Gateway for vpc ID {vpc_id}") if not module.check_mode: gateway = response.get("EgressOnlyInternetGateway", {}) @@ -136,9 +134,7 @@ def create_eigw(module, connection, vpc_id): else: # EIGW gave back a bad attachment state or an invalid response so we error out module.fail_json( - msg="Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response".format( - vpc_id - ), + msg=f"Unable to create and attach Egress Only Internet Gateway to VPCId: {vpc_id}. Bad or no state in response", **camel_dict_to_snake_dict(response), ) diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py index ecf530a9d74..40e0398b974 100644 --- a/ec2_vpc_nacl_info.py +++ b/ec2_vpc_nacl_info.py @@ -137,7 +137,7 @@ def list_ec2_vpc_nacls(connection, module): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) + module.fail_json_aws(e, msg=f"Unable to describe network ACLs {nacl_ids}") # Turn the boto3 result in to ansible_friendly_snaked_names snaked_nacls = [] diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py index e59fe25839c..3ca4d8013e3 100644 --- a/ec2_vpc_vgw.py +++ b/ec2_vpc_vgw.py @@ -274,7 +274,7 @@ def create_vgw(client, module): get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]]) except botocore.exceptions.WaiterError as e: module.fail_json_aws( - e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response["VpnGateway"]["VpnGatewayId"]) + e, msg=f"Failed to wait for Vpn Gateway {response['VpnGateway']['VpnGatewayId']} to be available" ) except is_boto3_error_code("VpnGatewayLimitExceeded") as e: module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 8d8dc1467e1..0efce4a7470 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -384,7 +384,7 @@ def add_routes(connection, vpn_connection_id, routes_to_add): ) except (BotoCoreError, ClientError) as e: raise VPNConnectionException( - msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), + msg=f"Failed while adding route {route} to the VPN connection {vpn_connection_id}.", exception=e, ) @@ -397,7 +397,7 @@ def remove_routes(connection, vpn_connection_id, routes_to_remove): ) except (BotoCoreError, ClientError) as e: raise VPNConnectionException( - msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), + msg=f"Failed to remove route {route} from the VPN connection {vpn_connection_id}.", exception=e, ) @@ -435,7 +435,7 @@ def create_filter(module_params, provided_filters): elif raw_param in list(boto3ify_filter.items()): param = raw_param else: - raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param)) + raise VPNConnectionException(msg=f"{raw_param} is not a valid filter.") # reformat filters with special formats if param == "tag": @@ -487,8 +487,10 @@ def find_connection_response(connections=None): return None else: raise VPNConnectionException( - msg="More than one matching VPN connection was found. " - "To modify or delete a VPN please specify vpn_connection_id or add filters." + msg=( + "More than one matching VPN connection was found. " + "To modify or delete a VPN please specify vpn_connection_id or add filters." + ) ) # Found unique match @@ -524,8 +526,10 @@ def create_connection( if not (customer_gateway_id and vpn_gateway_id): raise VPNConnectionException( - msg="No matching connection was found. To create a new connection you must provide " - "both vpn_gateway_id and customer_gateway_id." + msg=( + "No matching connection was found. To create a new connection you must provide " + "both vpn_gateway_id and customer_gateway_id." + ) ) try: vpn = connection.create_vpn_connection( @@ -537,7 +541,7 @@ def create_connection( ) except WaiterError as e: raise VPNConnectionException( - msg="Failed to wait for VPN connection {0} to be available".format(vpn["VpnConnection"]["VpnConnectionId"]), + msg=f"Failed to wait for VPN connection {vpn['VpnConnection']['VpnConnectionId']} to be available", exception=e, ) except (BotoCoreError, ClientError) as e: @@ -555,19 +559,17 @@ def delete_connection(connection, vpn_connection_id, delay, max_attempts): ) except WaiterError as e: raise VPNConnectionException( - msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), exception=e + msg=f"Failed to wait for VPN connection {vpn_connection_id} to be removed", exception=e ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException( - msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), exception=e - ) + raise VPNConnectionException(msg=f"Failed to delete the VPN connection: {vpn_connection_id}", exception=e) def add_tags(connection, vpn_connection_id, add): try: connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), exception=e) + raise VPNConnectionException(msg=f"Failed to add the tags: {add}.", exception=e) def remove_tags(connection, vpn_connection_id, remove): @@ -576,7 +578,7 @@ def remove_tags(connection, vpn_connection_id, remove): try: connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), exception=e) + raise VPNConnectionException(msg=f"Failed to remove the tags: {remove}.", exception=e) def check_for_update(connection, module_params, vpn_connection_id): @@ -624,9 +626,10 @@ def check_for_update(connection, module_params, vpn_connection_id): if will_be is not None and to_text(will_be) != to_text(is_now): raise VPNConnectionException( - msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " - "connection attributes are tags and routes. The value you tried to change it to " - "is {2}.".format(attribute, is_now, will_be) + msg=( + f"You cannot modify {attribute}, the current value of which is {is_now}. Modifiable VPN connection" + f" attributes are tags and routes. The value you tried to change it to is {will_be}." + ) ) return changes diff --git a/ec2_win_password.py b/ec2_win_password.py index d1553c91aae..a9ca8e94ca1 100644 --- a/ec2_win_password.py +++ b/ec2_win_password.py @@ -174,7 +174,7 @@ def ec2_win_password(module): decoded = b64decode(data) if wait and datetime.datetime.now() >= end: - module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout) + module.fail_json(msg=f"wait for password timeout after {int(wait_timeout)} seconds") if key_file is not None and b_key_data is None: try: @@ -182,7 +182,7 @@ def ec2_win_password(module): key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) except IOError as e: # Handle bad files - module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror)) + module.fail_json(msg=f"I/O error ({int(e.errno)}) opening key file: {e.strerror}") except (ValueError, TypeError) as e: # Handle issues loading key module.fail_json(msg="unable to parse key file") diff --git a/ecs_attribute.py b/ecs_attribute.py index 085761b19c3..682014675a1 100644 --- a/ecs_attribute.py +++ b/ecs_attribute.py @@ -142,13 +142,13 @@ def _parse_attrs(self, attrs): for attr in attrs: if isinstance(attr, dict): if len(attr) != 1: - self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr)) + self.module.fail_json(msg=f"Incorrect attribute format - {str(attr)}") name, value = list(attr.items())[0] attrs_parsed.append({"name": name, "value": value}) elif isinstance(attr, str): attrs_parsed.append({"name": attr, "value": None}) else: - self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs)) + self.module.fail_json(msg=f"Incorrect attributes format - {str(attrs)}") return attrs_parsed @@ -197,14 +197,14 @@ def _get_ecs_arn(self): cluster=self.cluster, containerInstances=ecs_instances_arns )["containerInstances"] except (ClientError, EndpointConnectionError) as e: - self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") try: ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[ "containerInstanceArn" ] except StopIteration: - self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster)) + self.module.fail_json(msg=f"EC2 instance Id not found in ECS cluster - {str(self.cluster)}") return ecs_arn @@ -238,7 +238,7 @@ def attrs_get_by_name(self, attrs): for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"] ] except ClientError as e: - self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn] diff --git a/ecs_cluster.py b/ecs_cluster.py index e627cd98f1b..fca35331f69 100644 --- a/ecs_cluster.py +++ b/ecs_cluster.py @@ -203,7 +203,7 @@ def describe_cluster(self, cluster_name): c = self.find_in_array(response["clusters"], cluster_name) if c: return c - raise Exception("Unknown problem describing cluster %s." % cluster_name) + raise Exception(f"Unknown problem describing cluster {cluster_name}.") def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(clusterName=cluster_name) diff --git a/ecs_ecr.py b/ecs_ecr.py index 1e6efd7b331..fb812ca0a45 100644 --- a/ecs_ecr.py +++ b/ecs_ecr.py @@ -272,8 +272,7 @@ def create_repository(self, registry_id, name, image_tag_mutability, encryption_ default_registry_id = self.sts.get_caller_identity().get("Account") if registry_id != default_registry_id: raise Exception( - "Cannot create repository in registry {0}." - "Would be created in {1} instead.".format(registry_id, default_registry_id) + f"Cannot create repository in registry {registry_id}. Would be created in {default_registry_id} instead." ) if encryption_configuration is None: @@ -303,8 +302,8 @@ def set_repository_policy(self, registry_id, name, policy_text, force): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = "{0}:{1}".format(registry_id, name) - raise Exception("could not find repository {0}".format(printable)) + printable = f"{registry_id}:{name}" + raise Exception(f"could not find repository {printable}") return def delete_repository(self, registry_id, name, force): @@ -367,8 +366,8 @@ def put_lifecycle_policy(self, registry_id, name, policy_text): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = "{0}:{1}".format(registry_id, name) - raise Exception("could not find repository {0}".format(printable)) + printable = f"{registry_id}:{name}" + raise Exception(f"could not find repository {printable}") return def purge_lifecycle_policy(self, registry_id, name): diff --git a/ecs_service.py b/ecs_service.py index af5ad567dc8..8115b3b34fd 100644 --- a/ecs_service.py +++ b/ecs_service.py @@ -761,7 +761,7 @@ def describe_service(self, cluster_name, service_name): c = self.find_in_array(response["services"], service_name) if c: return c - raise Exception("Unknown problem describing service %s." % service_name) + raise Exception(f"Unknown problem describing service {service_name}.") def is_matching_service(self, expected, existing): # aws returns the arn of the task definition @@ -1065,9 +1065,7 @@ def main(): except Exception as e: module.fail_json_aws( e, - msg="Exception describing service '{0}' in cluster '{1}'".format( - module.params["name"], module.params["cluster"] - ), + msg=f"Exception describing service '{module.params['name']}' in cluster '{module.params['cluster']}'", ) results = dict(changed=False) @@ -1265,7 +1263,7 @@ def main(): break time.sleep(delay) if i is repeat - 1: - module.fail_json(msg="Service still not deleted after {0} tries of {1} seconds each.".format(repeat, delay)) + module.fail_json(msg=f"Service still not deleted after {repeat} tries of {delay} seconds each.") return module.exit_json(**results) diff --git a/ecs_tag.py b/ecs_tag.py index f11fc1f33ac..109b974eea6 100644 --- a/ecs_tag.py +++ b/ecs_tag.py @@ -123,7 +123,7 @@ def get_tags(ecs, module, resource): try: return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to fetch tags for resource {0}".format(resource)) + module.fail_json_aws(e, msg=f"Failed to fetch tags for resource {resource}") def get_arn(ecs, module, cluster_name, resource_type, resource): @@ -144,9 +144,9 @@ def get_arn(ecs, module, cluster_name, resource_type, resource): description = ecs.describe_container_instances(clusters=[resource]) resource_arn = description["containerInstances"][0]["containerInstanceArn"] except (IndexError, KeyError): - module.fail_json(msg="Failed to find {0} {1}".format(resource_type, resource)) + module.fail_json(msg=f"Failed to find {resource_type} {resource}") except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to find {0} {1}".format(resource_type, resource)) + module.fail_json_aws(e, msg=f"Failed to find {resource_type} {resource}") return resource_arn @@ -200,7 +200,7 @@ def main(): try: ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource)) + module.fail_json_aws(e, msg=f"Failed to remove tags {remove_tags} from resource {resource}") if state == "present" and add_tags: result["changed"] = True @@ -211,7 +211,7 @@ def main(): tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value") ecs.tag_resource(resourceArn=resource_arn, tags=tags) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource)) + module.fail_json_aws(e, msg=f"Failed to set tags {add_tags} on resource {resource}") result["tags"] = get_tags(ecs, module, resource_arn) module.exit_json(**result) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index 0a8e413dbcd..f150255fb89 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -830,8 +830,10 @@ def register_task( if network_mode == "awsvpc" and "hostPort" in port_mapping: if port_mapping["hostPort"] != port_mapping.get("containerPort"): self.module.fail_json( - msg="In awsvpc network mode, host port must be set to the same as " - "container port or not be set" + msg=( + "In awsvpc network mode, host port must be set to the same as " + "container port or not be set" + ) ) if "linuxParameters" in container: @@ -1017,17 +1019,19 @@ def main(): if existing and existing["status"] != "ACTIVE": # We cannot reactivate an inactive revision module.fail_json( - msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision) + msg=f"A task in family '{family}' already exists for revision {int(revision)}, but it is inactive" ) elif not existing: if not existing_definitions_in_family and revision != 1: module.fail_json( - msg="You have specified a revision of %d but a created revision would be 1" % revision + msg=f"You have specified a revision of {int(revision)} but a created revision would be 1" ) elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision: module.fail_json( - msg="You have specified a revision of %d but a created revision would be %d" - % (revision, existing_definitions_in_family[-1]["revision"] + 1) + msg=( + f"You have specified a revision of {int(revision)} but a created revision would be" + f" {int(existing_definitions_in_family[-1]['revision'] + 1)}" + ) ) else: existing = None diff --git a/efs.py b/efs.py index c1d9f247b34..df79babc92c 100644 --- a/efs.py +++ b/efs.py @@ -302,8 +302,8 @@ def get_file_systems(self, **kwargs): AWS documentation is available here: https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html """ - item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) - item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" if "Timestamp" in item["SizeInBytes"]: item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) if item["LifeCycleState"] == self.STATE_AVAILABLE: diff --git a/efs_info.py b/efs_info.py index 533af10d84d..76952337b97 100644 --- a/efs_info.py +++ b/efs_info.py @@ -195,7 +195,7 @@ def __init__(self, module): self.connection = module.client("efs") self.module = module except Exception as e: - module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) + module.fail_json(msg=f"Failed to connect to AWS: {to_native(e)}") self.region = module.region @@ -280,8 +280,8 @@ def get_file_systems(self, file_system_id=None, creation_token=None): AWS documentation is available here: U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) """ - item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) - item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" if "Timestamp" in item["SizeInBytes"]: item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) diff --git a/efs_tag.py b/efs_tag.py index 80eb5cc7b9c..3a4c5c8ced6 100644 --- a/efs_tag.py +++ b/efs_tag.py @@ -118,7 +118,7 @@ def get_tags(efs, module, resource): try: return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"]) except (BotoCoreError, ClientError) as get_tags_error: - module.fail_json_aws(get_tags_error, msg="Failed to fetch tags for resource {0}".format(resource)) + module.fail_json_aws(get_tags_error, msg=f"Failed to fetch tags for resource {resource}") def main(): @@ -164,7 +164,7 @@ def main(): efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as remove_tag_error: module.fail_json_aws( - remove_tag_error, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource) + remove_tag_error, msg=f"Failed to remove tags {remove_tags} from resource {resource}" ) if state == "present" and add_tags: @@ -176,9 +176,7 @@ def main(): tags = ansible_dict_to_boto3_tag_list(add_tags) efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws( - set_tag_error, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource) - ) + module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {add_tags} on resource {resource}") result["tags"] = get_tags(efs, module, resource) module.exit_json(**result) diff --git a/eks_cluster.py b/eks_cluster.py index 13ea5997d4d..a445def55c3 100644 --- a/eks_cluster.py +++ b/eks_cluster.py @@ -219,9 +219,9 @@ def ensure_present(client, module): params["tags"] = module.params["tags"] cluster = client.create_cluster(**params)["cluster"] except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't create cluster {name}") if wait: wait_until(client, module, "cluster_active") @@ -242,9 +242,9 @@ def ensure_absent(client, module): try: client.delete_cluster(name=module.params["name"]) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't delete cluster {name}") if wait: wait_until(client, module, "cluster_deleted") @@ -259,12 +259,12 @@ def get_cluster(client, module): except is_boto3_error_code("ResourceNotFoundException"): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get cluster {name}") def wait_until(client, module, waiter_name="cluster_active"): diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 71a632a2223..131f0651bd3 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -188,7 +188,7 @@ def validate_tags(client, module, fargate_profile): existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list or compare tags for Fargate Profile %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to list or compare tags for Fargate Profile {module.params.get('name')}") if tags_to_remove: changed = True @@ -196,7 +196,7 @@ def validate_tags(client, module, fargate_profile): try: client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") if tags_to_add: changed = True @@ -204,7 +204,7 @@ def validate_tags(client, module, fargate_profile): try: client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") return changed @@ -252,7 +252,7 @@ def create_or_update_fargate_profile(client, module): ) fargate_profile = client.create_fargate_profile(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name) + module.fail_json_aws(e, msg=f"Couldn't create fargate profile {name}") if wait: wait_until(client, module, "fargate_profile_active", name, cluster_name) @@ -274,7 +274,7 @@ def delete_fargate_profile(client, module): try: client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name) + module.fail_json_aws(e, msg=f"Couldn't delete fargate profile {name}") if wait: wait_until(client, module, "fargate_profile_deleted", name, cluster_name) diff --git a/eks_nodegroup.py b/eks_nodegroup.py index 6704af1af09..f146328f098 100644 --- a/eks_nodegroup.py +++ b/eks_nodegroup.py @@ -370,21 +370,21 @@ def validate_tags(client, module, nodegroup): existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list or compare tags for Nodegroup %s." % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to list or compare tags for Nodegroup {module.params.get('name')}.") if tags_to_remove: if not module.check_mode: changed = True try: client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") if tags_to_add: if not module.check_mode: changed = True try: client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") return changed @@ -422,7 +422,7 @@ def validate_taints(client, module, nodegroup, param_taints): try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set taints for Nodegroup %s." % params["nodegroupName"]) + module.fail_json_aws(e, msg=f"Unable to set taints for Nodegroup {params['nodegroupName']}.") return changed @@ -458,7 +458,7 @@ def validate_labels(client, module, nodegroup, param_labels): try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set labels for Nodegroup %s." % params["nodegroupName"]) + module.fail_json_aws(e, msg=f"Unable to set labels for Nodegroup {params['nodegroupName']}.") return changed @@ -467,7 +467,7 @@ def compare_params(module, params, nodegroup): for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]: if (param in nodegroup) and (param in params): if nodegroup[param] != params[param]: - module.fail_json(msg="Cannot modify parameter %s." % param) + module.fail_json(msg=f"Cannot modify parameter {param}.") if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params): module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") if nodegroup["updateConfig"] != params["updateConfig"]: @@ -485,7 +485,7 @@ def compare_params_launch_template(module, params, nodegroup): if (key in params["launchTemplate"]) and ( params["launchTemplate"][key] != nodegroup["launchTemplate"][key] ): - module.fail_json(msg="Cannot modify Launch Template %s." % key) + module.fail_json(msg=f"Cannot modify Launch Template {key}.") if ("version" in params["launchTemplate"]) and ( params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"] ): @@ -593,7 +593,7 @@ def create_or_update_nodegroups(client, module): try: nodegroup = client.create_nodegroup(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params["nodegroupName"]) + module.fail_json_aws(e, msg=f"Couldn't create Nodegroup {params['nodegroupName']}.") if wait: wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) @@ -613,7 +613,7 @@ def delete_nodegroups(client, module): try: client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name) + module.fail_json_aws(e, msg=f"Couldn't delete Nodegroup {name}.") if wait: wait_until(client, module, "nodegroup_deleted", name, clusterName) @@ -630,7 +630,7 @@ def get_nodegroup(client, module, nodegroup_name, cluster_name): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name) + module.fail_json_aws(e, msg=f"Couldn't get Nodegroup {nodegroup_name}.") def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): diff --git a/elasticache.py b/elasticache.py index ac6ea78b69f..e7a9b1808ff 100644 --- a/elasticache.py +++ b/elasticache.py @@ -218,8 +218,7 @@ def create(self): if self.wait: self._wait_for_status("gone") else: - msg = "'%s' is currently deleting. Cannot create." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json(msg=f"'{self.name}' is currently deleting. Cannot create.") kwargs = dict( CacheClusterId=self.name, @@ -262,8 +261,7 @@ def delete(self): if self.wait: self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot delete." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot delete.") try: response = self.conn.delete_cache_cluster(CacheClusterId=self.name) @@ -280,8 +278,7 @@ def delete(self): def sync(self): """Sync settings to cluster if required""" if not self.exists(): - msg = "'%s' is %s. Cannot sync." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot sync.") if self.status in ["creating", "rebooting", "modifying"]: if self.wait: @@ -293,11 +290,13 @@ def sync(self): if self._requires_destroy_and_create(): if not self.hard_modify: - msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'hard_modify' must be set to true to proceed." + ) if not self.wait: - msg = "'%s' requires destructive modification. 'wait' must be set to true." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'wait' must be set to true to proceed." + ) self.delete() self.create() return @@ -331,16 +330,14 @@ def modify(self): def reboot(self): """Reboot the cache cluster""" if not self.exists(): - msg = "'%s' is %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot reboot.") if self.status == "rebooting": return if self.status in ["creating", "modifying"]: if self.wait: self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot reboot.") # Collect ALL nodes for reboot cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] @@ -369,12 +366,12 @@ def _wait_for_status(self, awaited_status): # No need to wait, we're already done return if status_map[self.status] != awaited_status: - msg = "Invalid awaited status. '%s' cannot transition to '%s'" - self.module.fail_json(msg=msg % (self.status, awaited_status)) + self.module.fail_json( + msg=f"Invalid awaited status. '{self.status}' cannot transition to '{awaited_status}'" + ) if awaited_status not in set(status_map.values()): - msg = "'%s' is not a valid awaited status." - self.module.fail_json(msg=msg % awaited_status) + self.module.fail_json(msg=f"'{awaited_status}' is not a valid awaited status.") while True: sleep(1) @@ -470,8 +467,9 @@ def _get_nodes_to_remove(self): return [] if not self.hard_modify: - msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." + ) cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] return cache_node_ids[-num_nodes_to_remove:] diff --git a/elasticache_info.py b/elasticache_info.py index 28b31f76a7f..021d3a0270e 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -467,14 +467,14 @@ def get_elasticache_clusters(client, module): results = [] for cluster in clusters: cluster = camel_dict_to_snake_dict(cluster) - arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster["cache_cluster_id"]) + arn = f"arn:aws:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}" try: tags = get_elasticache_tags_with_backoff(client, arn) except is_boto3_error_code("CacheClusterNotFound"): # e.g: Cluster was listed but is in deleting state continue except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") + module.fail_json_aws(e, msg=f"Couldn't get tags for cluster {cluster['cache_cluster_id']}") cluster["tags"] = boto3_tag_list_to_ansible_dict(tags) diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 1e5a1c63b6f..20f5ed9838b 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -145,7 +145,7 @@ def make_current_modifiable_param_dict(module, conn, name): """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" current_info = get_info(conn, name) if current_info is False: - module.fail_json(msg="Could not connect to the cache parameter group %s." % name) + module.fail_json(msg=f"Could not connect to the cache parameter group {name}.") parameters = current_info["Parameters"] modifiable_params = {} @@ -168,8 +168,7 @@ def check_valid_modification(module, values, modifiable_params): # check valid modifiable parameters if parameter not in modifiable_params: module.fail_json( - msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." - % (parameter, modifiable_params.keys()) + msg=f"{parameter} is not a modifiable parameter. Valid parameters to modify are: {modifiable_params.keys()}." ) # check allowed datatype for modified parameters @@ -186,13 +185,17 @@ def check_valid_modification(module, values, modifiable_params): values[parameter] = 1 if new_value else 0 else: module.fail_json( - msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." - % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + msg=( + f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter" + f" {parameter}. Expected a type {modifiable_params[parameter][1]}." + ) ) else: module.fail_json( - msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." - % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + msg=( + f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter {parameter}." + f" Expected a type {modifiable_params[parameter][1]}." + ) ) # check allowed values for modifiable parameters @@ -200,8 +203,7 @@ def check_valid_modification(module, values, modifiable_params): if choices: if not (to_text(new_value) in choices or isinstance(new_value, int)): module.fail_json( - msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." - % (new_value, parameter, choices) + msg=f"{new_value} is not an allowed value for the parameter {parameter}. Valid parameters are: {choices}." ) # check if a new value is different from current value @@ -327,7 +329,7 @@ def main(): module.fail_json(msg="Creating a group requires a family group.") elif state == "reset" and not exists: module.fail_json( - msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name + msg=f"No group {parameter_group_name} to reset. Please create the group before using the state 'reset'." ) # Taking action diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index 66c9cb9da57..b6b6f55069c 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -156,8 +156,10 @@ def delete(module, connection, name): changed = False except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except module.fail_json( - msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." - "You may need to wait a few minutes." + msg=( + "Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow" + " deletion.You may need to wait a few minutes." + ) ) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete the snapshot.") diff --git a/elb_instance.py b/elb_instance.py index 2d6ca291968..6489a86bcf9 100644 --- a/elb_instance.py +++ b/elb_instance.py @@ -386,7 +386,7 @@ def main(): if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): - module.fail_json(msg="ELB {0} does not exist".format(elb)) + module.fail_json(msg=f"ELB {elb} does not exist") if module.params["state"] == "present": elb_man.register(wait, enable_availability_zone, timeout) diff --git a/elb_target.py b/elb_target.py index cab7b10aef8..d7dfaf824cb 100644 --- a/elb_target.py +++ b/elb_target.py @@ -136,7 +136,7 @@ def convert_tg_name_to_arn(connection, module, tg_name): try: response = describe_target_groups_with_backoff(connection, tg_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name)) + module.fail_json_aws(e, msg=f"Unable to describe target group {tg_name}") tg_arn = response["TargetGroups"][0]["TargetGroupArn"] @@ -170,7 +170,7 @@ def describe_targets(connection, module, tg_arn, target=None): return {} return targets[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe target health for target {0}".format(target)) + module.fail_json_aws(e, msg=f"Unable to describe target health for target {target}") @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -216,7 +216,7 @@ def register_target(connection, module): connection, module, target_group_arn, target, target_status, target_status_timeout ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target)) + module.fail_json_aws(e, msg=f"Unable to deregister target {target}") # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) @@ -274,7 +274,7 @@ def deregister_target(connection, module): deregister_target_with_backoff(connection, target_group_arn, target) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Unable to deregister target {0}".format(target)) + module.fail_json(msg=f"Unable to deregister target {target}") else: if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": module.warn( @@ -306,9 +306,7 @@ def target_status_check(connection, module, target_group_arn, target, target_sta sleep(1) if not reached_state: module.fail_json( - msg="Status check timeout of {0} exceeded, last status was {1}: ".format( - target_status_timeout, health_state - ) + msg=f"Status check timeout of {target_status_timeout} exceeded, last status was {health_state}: " ) diff --git a/elb_target_group.py b/elb_target_group.py index 93a3f333df6..4eb38f4c2d4 100644 --- a/elb_target_group.py +++ b/elb_target_group.py @@ -667,7 +667,7 @@ def create_or_update_target_group(connection, module): if target_group: diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)] if diffs: - module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % ", ".join(diffs)) + module.fail_json(msg=f"Cannot modify {', '.join(diffs)} parameter(s) for a target group") # Target group exists so check health check parameters match what has been passed health_check_params = dict() diff --git a/elb_target_info.py b/elb_target_info.py index e318f6c5b65..add122416d9 100644 --- a/elb_target_info.py +++ b/elb_target_info.py @@ -279,11 +279,11 @@ def _get_instance_ips(self): # typically this will happen if the instance doesn't exist self.module.fail_json_aws( e, - msg="Could not get instance info for instance '%s'" % (self.instance_id), + msg=f"Could not get instance info for instance '{self.instance_id}'", ) if len(reservations) < 1: - self.module.fail_json(msg="Instance ID %s could not be found" % self.instance_id) + self.module.fail_json(msg=f"Instance ID {self.instance_id} could not be found") instance = reservations[0]["Instances"][0] @@ -340,7 +340,7 @@ def _get_target_descriptions(self, target_groups): response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws( - e, msg="Could not describe target " + "health for target group %s" % tg.target_group_arn + e, msg="Could not describe target " + f"health for target group {tg.target_group_arn}" ) for t in response["TargetHealthDescriptions"]: diff --git a/glue_connection.py b/glue_connection.py index b1c935929f8..18039a8616d 100644 --- a/glue_connection.py +++ b/glue_connection.py @@ -269,7 +269,7 @@ def _await_glue_connection(connection, module): return glue_connection time.sleep(check_interval) - module.fail_json(msg="Timeout waiting for Glue connection %s" % module.params.get("name")) + module.fail_json(msg=f"Timeout waiting for Glue connection {module.params.get('name')}") def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): @@ -335,8 +335,10 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co if glue_connection: module.deprecate( - "The 'connection_properties' return key is deprecated and will be replaced" - " by 'raw_connection_properties'. Both values are returned for now.", + ( + "The 'connection_properties' return key is deprecated and will be replaced" + " by 'raw_connection_properties'. Both values are returned for now." + ), date="2024-06-01", collection_name="community.aws", ) diff --git a/glue_crawler.py b/glue_crawler.py index 0a8598b6c7a..5d92219df8b 100644 --- a/glue_crawler.py +++ b/glue_crawler.py @@ -305,7 +305,7 @@ def ensure_tags(connection, module, glue_crawler): return False account_id, partition = get_aws_account_info(module) - arn = "arn:{0}:glue:{1}:{2}:crawler/{3}".format(partition, module.region, account_id, module.params.get("name")) + arn = f"arn:{partition}:glue:{module.region}:{account_id}:crawler/{module.params.get('name')}" try: existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) @@ -313,7 +313,7 @@ def ensure_tags(connection, module, glue_crawler): if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg="Unable to get tags for Glue crawler %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to get tags for Glue crawler {module.params.get('name')}") tags_to_add, tags_to_remove = compare_aws_tags( existing_tags, module.params.get("tags"), module.params.get("purge_tags") @@ -325,7 +325,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") if tags_to_add: changed = True @@ -333,7 +333,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") return changed diff --git a/glue_job.py b/glue_job.py index 4740deed3c9..2567799757e 100644 --- a/glue_job.py +++ b/glue_job.py @@ -320,7 +320,7 @@ def ensure_tags(connection, module, glue_job): return False account_id, partition = get_aws_account_info(module) - arn = "arn:{0}:glue:{1}:{2}:job/{3}".format(partition, module.region, account_id, module.params.get("name")) + arn = f"arn:{partition}:glue:{module.region}:{account_id}:job/{module.params.get('name')}" try: existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) @@ -328,7 +328,7 @@ def ensure_tags(connection, module, glue_job): if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg="Unable to get tags for Glue job %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to get tags for Glue job {module.params.get('name')}") tags_to_add, tags_to_remove = compare_aws_tags( existing_tags, module.params.get("tags"), module.params.get("purge_tags") @@ -340,7 +340,7 @@ def ensure_tags(connection, module, glue_job): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") if tags_to_add: changed = True @@ -348,7 +348,7 @@ def ensure_tags(connection, module, glue_job): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") return changed diff --git a/iam_access_key.py b/iam_access_key.py index a8f03d7bced..ae3e9e7dd11 100644 --- a/iam_access_key.py +++ b/iam_access_key.py @@ -157,7 +157,7 @@ def delete_access_key(access_keys, user, access_key_id): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user)) + module.fail_json_aws(e, msg=f'Failed to delete access key "{access_key_id}" for user "{user}"') return True @@ -165,7 +165,7 @@ def delete_access_key(access_keys, user, access_key_id): def update_access_key(access_keys, user, access_key_id, enabled): if access_key_id not in access_keys: module.fail_json( - msg='Access key "{0}" not found attached to User "{1}"'.format(access_key_id, user), + msg=f'Access key "{access_key_id}" not found attached to User "{user}"', ) changes = dict() @@ -188,7 +188,7 @@ def update_access_key(access_keys, user, access_key_id, enabled): module.fail_json_aws( e, changes=changes, - msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user), + msg=f'Failed to update access key "{access_key_id}" for user "{user}"', ) return True @@ -210,7 +210,7 @@ def create_access_key(access_keys, user, rotate_keys, enabled): try: results = client.create_access_key(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user)) + module.fail_json_aws(e, msg=f'Failed to create access key for user "{user}"') results = camel_dict_to_snake_dict(results) access_key = results.get("access_key") access_key = normalize_boto3_result(access_key) @@ -232,7 +232,7 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) + module.fail_json_aws(e, msg=f'Failed to get access keys for user "{user}"') if not results: return None @@ -259,7 +259,7 @@ def main(): ) required_if = [ - ["state", "absent", ("id")], + ["state", "absent", ("id",)], ] mutually_exclusive = [ ["rotate_keys", "id"], diff --git a/iam_access_key_info.py b/iam_access_key_info.py index 22bbd564cb0..0ea8b514122 100644 --- a/iam_access_key_info.py +++ b/iam_access_key_info.py @@ -85,7 +85,7 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) + module.fail_json_aws(e, msg=f'Failed to get access keys for user "{user}"') if not results: return None diff --git a/iam_group.py b/iam_group.py index 357671dbdc6..c4f77fde772 100644 --- a/iam_group.py +++ b/iam_group.py @@ -263,7 +263,7 @@ def create_or_update_group(connection, module): try: connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params["GroupName"]) + module.fail_json_aws(e, msg=f"Couldn't detach policy from group {params['GroupName']}") # If there are policies to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(managed_policies) - set(current_attached_policies_arn_list): @@ -274,13 +274,13 @@ def create_or_update_group(connection, module): try: connection.attach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params["GroupName"]) + module.fail_json_aws(e, msg=f"Couldn't attach policy to group {params['GroupName']}") # Manage group memberships try: current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") current_group_members_list = [] for member in current_group_members: @@ -296,9 +296,7 @@ def create_or_update_group(connection, module): try: connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't remove user %s from group %s" % (user, params["GroupName"]) - ) + module.fail_json_aws(e, msg=f"Couldn't remove user {user} from group {params['GroupName']}") # If there are users to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(users) - set(current_group_members_list): @@ -309,7 +307,7 @@ def create_or_update_group(connection, module): try: connection.add_user_to_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params["GroupName"])) + module.fail_json_aws(e, msg=f"Couldn't add user {user} to group {params['GroupName']}") if module.check_mode: module.exit_json(changed=changed) @@ -317,7 +315,7 @@ def create_or_update_group(connection, module): try: group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) @@ -329,7 +327,7 @@ def destroy_group(connection, module): try: group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") if group: # Check mode means we would remove this group if module.check_mode: @@ -340,26 +338,26 @@ def destroy_group(connection, module): for policy in get_attached_policy_list(connection, module, params["GroupName"]): connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy["PolicyArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params["GroupName"]) + module.fail_json_aws(e, msg=f"Couldn't remove policy from group {params['GroupName']}") # Remove any users in the group otherwise deletion fails current_group_members_list = [] try: current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") for member in current_group_members: current_group_members_list.append(member["UserName"]) for user in current_group_members_list: try: connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params["GroupName"])) + module.fail_json_aws(e, f"Couldn't remove user {user} from group {params['GroupName']}") try: connection.delete_group(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't delete group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't delete group {params['GroupName']}") else: module.exit_json(changed=False) diff --git a/iam_managed_policy.py b/iam_managed_policy.py index 0f6189ca454..cc7fd8450e5 100644 --- a/iam_managed_policy.py +++ b/iam_managed_policy.py @@ -184,7 +184,7 @@ def get_or_create_policy_version(policy, policy_document): "Document" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v["VersionId"])) + module.fail_json_aws(e, msg=f"Couldn't get policy version {v['VersionId']}") if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): return v, True @@ -249,23 +249,23 @@ def detach_all_entities(policy, **kwargs): try: entities = client.list_entities_for_policy(PolicyArn=policy["Arn"], **kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy["PolicyName"])) + module.fail_json_aws(e, msg=f"Couldn't detach list entities for policy {policy['PolicyName']}") for g in entities["PolicyGroups"]: try: client.detach_group_policy(PolicyArn=policy["Arn"], GroupName=g["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g["GroupName"])) + module.fail_json_aws(e, msg=f"Couldn't detach group policy {g['GroupName']}") for u in entities["PolicyUsers"]: try: client.detach_user_policy(PolicyArn=policy["Arn"], UserName=u["UserName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u["UserName"])) + module.fail_json_aws(e, msg=f"Couldn't detach user policy {u['UserName']}") for r in entities["PolicyRoles"]: try: client.detach_role_policy(PolicyArn=policy["Arn"], RoleName=r["RoleName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r["RoleName"])) + module.fail_json_aws(e, msg=f"Couldn't detach role policy {r['RoleName']}") if entities["IsTruncated"]: detach_all_entities(policy, marker=entities["Marker"]) @@ -289,7 +289,7 @@ def create_or_update_policy(existing_policy): try: rvalue = client.create_policy(PolicyName=name, Path="/", PolicyDocument=policy, Description=description) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) + module.fail_json_aws(e, msg=f"Couldn't create policy {name}") module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue["Policy"])) else: @@ -327,12 +327,12 @@ def delete_policy(existing_policy): try: client.delete_policy_version(PolicyArn=existing_policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy version {0}".format(v["VersionId"])) + module.fail_json_aws(e, msg=f"Couldn't delete policy version {v['VersionId']}") # Delete policy try: client.delete_policy(PolicyArn=existing_policy["Arn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy["PolicyName"])) + module.fail_json_aws(e, msg=f"Couldn't delete policy {existing_policy['PolicyName']}") # This is the one case where we will return the old policy module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) diff --git a/iam_password_policy.py b/iam_password_policy.py index 7c93da4139f..5c65f7ebaec 100644 --- a/iam_password_policy.py +++ b/iam_password_policy.py @@ -112,7 +112,7 @@ def __init__(self, module): self.connection = module.resource("iam") self.module = module except Exception as e: - module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) + module.fail_json(msg=f"Failed to connect to AWS: {str(e)}") def policy_to_dict(self, policy): policy_attributes = [ diff --git a/iam_role.py b/iam_role.py index 3cafe85d2cb..be05707238a 100644 --- a/iam_role.py +++ b/iam_role.py @@ -290,7 +290,7 @@ def attach_policies(module, client, policies_to_attach, role_name): client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn, aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, role_name)) + module.fail_json_aws(e, msg=f"Unable to attach policy {policy_arn} to role {role_name}") return changed @@ -309,7 +309,7 @@ def remove_policies(module, client, policies_to_remove, role_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name)) + module.fail_json_aws(e, msg=f"Unable to detach policy {policy} from {role_name}") return changed @@ -324,7 +324,7 @@ def remove_inline_policies(module, client, role_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name)) + module.fail_json_aws(e, msg=f"Unable to delete policy {policy} embedded in {role_name}") def generate_create_params(module): @@ -376,7 +376,7 @@ def update_role_assumed_policy(module, client, role_name, target_assumed_policy, try: client.update_assume_role_policy(RoleName=role_name, PolicyDocument=target_assumed_policy, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update assume role policy for role {role_name}") return True @@ -391,7 +391,7 @@ def update_role_description(module, client, role_name, target_description, curre try: client.update_role(RoleName=role_name, Description=target_description, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update description for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update description for role {role_name}") return True @@ -406,7 +406,7 @@ def update_role_max_session_duration(module, client, role_name, target_duration, try: client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update maximum session duration for role {role_name}") return True @@ -424,14 +424,14 @@ def update_role_permissions_boundary( try: client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to remove permission boundary for role {role_name}") else: try: client.put_role_permissions_boundary( RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update permission boundary for role {role_name}") return True @@ -540,7 +540,7 @@ def create_instance_profiles(module, client, role_name, path): "InstanceProfiles" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {role_name}") # Profile already exists if any(p["InstanceProfileName"] == role_name for p in instance_profiles): @@ -560,13 +560,13 @@ def create_instance_profiles(module, client, role_name, path): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to create instance profile for role {role_name}") # And attach the role to the profile try: client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to attach role {role_name} to instance profile {role_name}") return True @@ -579,7 +579,7 @@ def remove_instance_profiles(module, client, role_name): "InstanceProfiles" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {role_name}") # Remove the role from the instance profile(s) for profile in instance_profiles: @@ -599,11 +599,9 @@ def remove_instance_profiles(module, client, role_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) + module.fail_json_aws(e, msg=f"Unable to remove instance profile {profile_name}") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name) - ) + module.fail_json_aws(e, msg=f"Unable to remove role {role_name} from instance profile {profile_name}") def destroy_role(module, client): @@ -640,7 +638,7 @@ def get_role_with_backoff(module, client, name): "Role" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get role {name}") def get_role(module, client, name): @@ -652,21 +650,21 @@ def get_role(module, client, name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get role {name}") def get_attached_policy_list(module, client, name): try: return client.list_attached_role_policies(RoleName=name, aws_retry=True)["AttachedPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to list attached policies for role {name}") def get_inline_policy_list(module, client, name): try: return client.list_role_policies(RoleName=name, aws_retry=True)["PolicyNames"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to list attached policies for role {name}") def get_role_tags(module, client): @@ -674,7 +672,7 @@ def get_role_tags(module, client): try: return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)["Tags"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to list tags for role {role_name}") def update_role_tags(module, client, role_name, new_tags, purge_tags): @@ -698,7 +696,7 @@ def update_role_tags(module, client, role_name, new_tags, purge_tags): if tags_to_add: client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for role %s" % role_name) + module.fail_json_aws(e, msg=f"Unable to set tags for role {role_name}") changed = bool(tags_to_add) or bool(tags_to_remove) return changed diff --git a/iam_role_info.py b/iam_role_info.py index a7576a131ec..d23754d90a0 100644 --- a/iam_role_info.py +++ b/iam_role_info.py @@ -195,15 +195,15 @@ def describe_iam_role(module, client, role): try: role["InlinePolicies"] = list_iam_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get inline policies for role {name}") try: role["ManagedPolicies"] = list_iam_attached_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get managed policies for role {name}") try: role["InstanceProfiles"] = list_iam_instance_profiles_for_role_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get instance profiles for role {name}") try: role["tags"] = boto3_tag_list_to_ansible_dict(role["Tags"]) del role["Tags"] @@ -224,7 +224,7 @@ def describe_iam_roles(module, client): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get IAM role {name}") else: params = dict() if path_prefix: diff --git a/iam_saml_federation.py b/iam_saml_federation.py index 238aa5d9a3f..acaaa38fc37 100644 --- a/iam_saml_federation.py +++ b/iam_saml_federation.py @@ -148,13 +148,13 @@ def create_or_update_saml_provider(self, name, metadata): try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'") if arn: # see if metadata needs updating try: resp = self._get_saml_provider(arn) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not retrieve the identity provider '{name}'") if metadata.strip() != resp["SAMLMetadataDocument"].strip(): # provider needs updating @@ -164,7 +164,7 @@ def create_or_update_saml_provider(self, name, metadata): resp = self._update_saml_provider(arn, metadata) res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not update the identity provider '{name}'") else: res["saml_provider"] = self._build_res(arn) @@ -175,7 +175,7 @@ def create_or_update_saml_provider(self, name, metadata): resp = self._create_saml_provider(metadata, name) res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not create the identity provider '{name}'") self.module.exit_json(**res) @@ -184,7 +184,7 @@ def delete_saml_provider(self, name): try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'") if arn: # delete res["changed"] = True @@ -192,7 +192,7 @@ def delete_saml_provider(self, name): try: self._delete_saml_provider(arn) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not delete the identity provider '{name}'") self.module.exit_json(**res) diff --git a/iam_server_certificate.py b/iam_server_certificate.py index dd8427dc15b..6a7734acacb 100644 --- a/iam_server_certificate.py +++ b/iam_server_certificate.py @@ -124,7 +124,7 @@ def check_duplicate_cert(new_cert): continue module.fail_json( changed=False, - msg="This certificate already exists under the name {0} and dup_ok=False".format(cert_name), + msg=f"This certificate already exists under the name {cert_name} and dup_ok=False", duplicate_cert=cert, ) @@ -195,7 +195,7 @@ def create_server_certificate(): try: client.upload_server_certificate(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to update server certificate {name}") return True @@ -217,7 +217,7 @@ def rename_server_certificate(current_cert): cert_metadata = current_cert.get("server_certificate_metadata", {}) if not current_cert: - module.fail_json(msg="Unable to find certificate {0}".format(name)) + module.fail_json(msg=f"Unable to find certificate {name}") current_path = cert_metadata.get("path", None) if new_path and current_path != new_path: @@ -232,7 +232,7 @@ def rename_server_certificate(current_cert): try: client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name), changes=changes) + module.fail_json_aws(e, msg=f"Failed to update server certificate {name}", changes=changes) return True @@ -257,7 +257,7 @@ def delete_server_certificate(current_cert): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete server certificate {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to delete server certificate {name}") return True @@ -276,7 +276,7 @@ def get_server_certificate(name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get server certificate {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to get server certificate {name}") cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate"))) return cert @@ -353,7 +353,7 @@ def main(): if changed: results["deleted_cert"] = name else: - msg = "Certificate with the name {0} already absent".format(name) + msg = f"Certificate with the name {name} already absent" results["msg"] = msg else: if new_name or new_path: diff --git a/kinesis_stream.py b/kinesis_stream.py index 8147f60f3db..d1ba65c86b2 100644 --- a/kinesis_stream.py +++ b/kinesis_stream.py @@ -317,7 +317,7 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=Fa if not status_achieved: err_msg = "Wait time out reached, while waiting for results" else: - err_msg = "Status {0} achieved successfully".format(status) + err_msg = f"Status {status} achieved successfully" return status_achieved, err_msg, stream @@ -361,14 +361,14 @@ def tags_action(client, stream_name, tags, action="create", check_mode=False): client.remove_tags_from_stream(**params) success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" else: if action == "create": success = True elif action == "delete": success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -461,14 +461,14 @@ def stream_action(client, stream_name, shard_count=1, action="create", timeout=3 client.delete_stream(**params) success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" else: if action == "create": success = True elif action == "delete": success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -519,14 +519,14 @@ def stream_encryption_action( client.stop_stream_encryption(**params) success = True else: - err_msg = "Invalid encryption action {0}".format(action) + err_msg = f"Invalid encryption action {action}" else: if action == "start_encryption": success = True elif action == "stop_encryption": success = True else: - err_msg = "Invalid encryption action {0}".format(action) + err_msg = f"Invalid encryption action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -567,21 +567,21 @@ def retention_action(client, stream_name, retention_period=24, action="increase" params["RetentionPeriodHours"] = retention_period client.increase_stream_retention_period(**params) success = True - err_msg = "Retention Period increased successfully to {0}".format(retention_period) + err_msg = f"Retention Period increased successfully to {retention_period}" elif action == "decrease": params["RetentionPeriodHours"] = retention_period client.decrease_stream_retention_period(**params) success = True - err_msg = "Retention Period decreased successfully to {0}".format(retention_period) + err_msg = f"Retention Period decreased successfully to {retention_period}" else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" else: if action == "increase": success = True elif action == "decrease": success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -695,9 +695,7 @@ def update( ) elif retention_period == current_stream["RetentionPeriodHours"]: - retention_msg = "Retention {0} is the same as {1}".format( - retention_period, current_stream["RetentionPeriodHours"] - ) + retention_msg = f"Retention {retention_period} is the same as {current_stream['RetentionPeriodHours']}" success = True if retention_changed: @@ -715,13 +713,12 @@ def update( stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: if current_stream["StreamStatus"] != "ACTIVE": - err_msg = "Retention Period for {0} is in the process of updating".format(stream_name) + err_msg = f"Retention Period for {stream_name} is in the process of updating" return success, changed, err_msg else: err_msg = ( - "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( - current_stream.get("StreamStatus", "UNKNOWN") - ) + "StreamStatus has to be ACTIVE in order to modify the retention period." + f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" ) return success, changed, err_msg @@ -742,7 +739,7 @@ def update( else: stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found and current_stream["StreamStatus"] != "ACTIVE": - err_msg = "Number of shards for {0} is in the process of updating".format(stream_name) + err_msg = f"Number of shards for {stream_name} is in the process of updating" return success, changed, err_msg if tags: @@ -753,9 +750,9 @@ def update( client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if success and changed: - err_msg = "Kinesis Stream {0} updated successfully.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} updated successfully." elif success and not changed: - err_msg = "Kinesis Stream {0} did not change.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} did not change." return success, changed, err_msg @@ -829,7 +826,7 @@ def create_stream( ) if not create_success: changed = True - err_msg = "Failed to create Kinesis stream: {0}".format(create_msg) + err_msg = f"Failed to create Kinesis stream: {create_msg}" return False, True, err_msg, {} else: changed = True @@ -837,11 +834,11 @@ def create_stream( wait_success, wait_msg, results = wait_for_status( client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = "Kinesis Stream {0} is in the process of being created".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} is in the process of being created" if not wait_success: return wait_success, True, wait_msg, results else: - err_msg = "Kinesis Stream {0} created successfully".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} created successfully" if tags: changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode) @@ -860,8 +857,9 @@ def create_stream( if not success: return success, changed, err_msg, results else: - err_msg = "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( - current_stream.get("StreamStatus", "UNKNOWN") + err_msg = ( + "StreamStatus has to be ACTIVE in order to modify the retention period." + f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" ) success = create_success changed = True @@ -916,15 +914,15 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode= success, err_msg, results = wait_for_status( client, stream_name, "DELETING", wait_timeout, check_mode=check_mode ) - err_msg = "Stream {0} deleted successfully".format(stream_name) + err_msg = f"Stream {stream_name} deleted successfully" if not success: return success, True, err_msg, results else: - err_msg = "Stream {0} is in the process of being deleted".format(stream_name) + err_msg = f"Stream {stream_name} is in the process of being deleted" else: success = True changed = False - err_msg = "Stream {0} does not exist".format(stream_name) + err_msg = f"Stream {stream_name} does not exist" return success, changed, err_msg, results @@ -968,7 +966,7 @@ def start_stream_encryption( if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id: changed = False success = True - err_msg = "Kinesis Stream {0} encryption already configured.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption already configured." else: success, err_msg = stream_encryption_action( client, @@ -984,15 +982,15 @@ def start_stream_encryption( success, err_msg, results = wait_for_status( client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = "Kinesis Stream {0} encryption started successfully.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption started successfully." if not success: return success, True, err_msg, results else: - err_msg = "Kinesis Stream {0} is in the process of starting encryption.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} is in the process of starting encryption." else: success = True changed = False - err_msg = "Kinesis Stream {0} does not exist".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} does not exist" if success: stream_found, stream_msg, results = find_stream(client, stream_name) @@ -1056,16 +1054,16 @@ def stop_stream_encryption( ) if not success: return success, True, err_msg, results - err_msg = "Kinesis Stream {0} encryption stopped successfully.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption stopped successfully." else: - err_msg = "Stream {0} is in the process of stopping encryption.".format(stream_name) + err_msg = f"Stream {stream_name} is in the process of stopping encryption." elif current_stream.get("EncryptionType") == "NONE": success = True - err_msg = "Kinesis Stream {0} encryption already stopped.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption already stopped." else: success = True changed = False - err_msg = "Stream {0} does not exist.".format(stream_name) + err_msg = f"Stream {stream_name} does not exist." if success: stream_found, stream_msg, results = find_stream(client, stream_name) diff --git a/lightsail.py b/lightsail.py index 6fb83b26b1f..16b4338e7dc 100644 --- a/lightsail.py +++ b/lightsail.py @@ -229,8 +229,7 @@ def wait_for_instance_state(module, client, instance_name, states): module.fail_json_aws(e) else: module.fail_json( - msg='Timed out waiting for instance "{0}" to get to one of the following states -' - " {1}".format(instance_name, states) + msg=f'Timed out waiting for instance "{instance_name}" to get to one of the following states - {states}' ) diff --git a/msk_cluster.py b/msk_cluster.py index 960ae115bcb..aa0383294b2 100644 --- a/msk_cluster.py +++ b/msk_cluster.py @@ -301,7 +301,7 @@ def find_cluster_by_name(client, module, cluster_name): module.fail_json_aws(e, "Failed to find kafka cluster by name") if cluster_list: if len(cluster_list) != 1: - module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name)) + module.fail_json(msg=f"Found more than one cluster with name '{cluster_name}'") return cluster_list[0] return {} @@ -340,9 +340,7 @@ def wait_for_cluster_state(client, module, arn, state="ACTIVE"): if current_state == state: return if time.time() - start > timeout: - module.fail_json( - msg="Timeout waiting for cluster {0} (desired state is '{1}')".format(current_state, state) - ) + module.fail_json(msg=f"Timeout waiting for cluster {current_state} (desired state is '{state}')") time.sleep(check_interval) @@ -559,7 +557,7 @@ def create_or_update_cluster(client, module): try: update_method = getattr(client, options.get("update_method", "update_" + method)) except AttributeError as e: - module.fail_json_aws(e, "There is no update method 'update_{0}'".format(method)) + module.fail_json_aws(e, f"There is no update method 'update_{method}'") if options["current_value"] != options["target_value"]: changed = True @@ -575,9 +573,7 @@ def create_or_update_cluster(client, module): wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") else: module.fail_json( - msg="Cluster can be updated only in active state, current state is '{0}'. check cluster state or use wait option".format( - state - ) + msg=f"Cluster can be updated only in active state, current state is '{state}'. check cluster state or use wait option" ) try: response["changes"][method] = update_method( @@ -587,7 +583,7 @@ def create_or_update_cluster(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws(e, "Failed to update cluster via 'update_{0}'".format(method)) + module.fail_json_aws(e, f"Failed to update cluster via 'update_{method}'") if module.params["wait"]: wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") @@ -606,7 +602,7 @@ def update_cluster_tags(client, module, arn): try: existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) + module.fail_json_aws(e, msg=f"Unable to retrieve tags for cluster '{arn}'") tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) @@ -617,7 +613,7 @@ def update_cluster_tags(client, module, arn): if tags_to_add: client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn)) + module.fail_json_aws(e, msg=f"Unable to set tags for cluster '{arn}'") changed = bool(tags_to_add) or bool(tags_to_remove) return changed @@ -761,7 +757,7 @@ def main(): ) if len(module.params["name"]) > 64: module.fail_json( - module.fail_json(msg='Cluster name "{0}" exceeds 64 character limit'.format(module.params["name"])) + module.fail_json(msg=f"Cluster name \"{module.params['name']}\" exceeds 64 character limit") ) changed, response = create_or_update_cluster(client, module) elif module.params["state"] == "absent": @@ -784,7 +780,7 @@ def main(): ) as e: module.fail_json_aws( e, - "Can not obtain information about cluster {0}".format(response["ClusterArn"]), + f"Can not obtain information about cluster {response['ClusterArn']}", ) module.exit_json( diff --git a/msk_config.py b/msk_config.py index 5b67cd9924f..864827eb610 100644 --- a/msk_config.py +++ b/msk_config.py @@ -107,7 +107,7 @@ def dict_to_prop(d): """convert dictionary to multi-line properties""" if len(d) == 0: return "" - return "\n".join("{0}={1}".format(k, v) for k, v in d.items()) + return "\n".join(f"{k}={v}" for k, v in d.items()) def prop_to_dict(p): @@ -149,7 +149,7 @@ def find_active_config(client, module): if len(active_configs) == 1: return active_configs[0] else: - module.fail_json_aws(msg="found more than one active config with name '{0}'".format(name)) + module.fail_json_aws(msg=f"found more than one active config with name '{name}'") return None diff --git a/opensearch.py b/opensearch.py index 88055d1a6dd..967f0c98d01 100644 --- a/opensearch.py +++ b/opensearch.py @@ -565,9 +565,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): # Check the module parameters to determine if this is allowed or not. if not module.params.get("allow_intermediate_upgrades"): module.fail_json( - msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( - source_version, target_engine_version, next_version - ) + msg=f"Cannot upgrade from {source_version} to version {target_engine_version}. The highest compatible version is {next_version}" ) parameters = { @@ -591,15 +589,13 @@ def upgrade_domain(client, module, source_version, target_engine_version): # raised if it's not possible to upgrade to the target version. module.fail_json_aws( e, - msg="Couldn't upgrade domain {0} from {1} to {2}".format(domain_name, current_version, next_version), + msg=f"Couldn't upgrade domain {domain_name} from {current_version} to {next_version}", ) if module.check_mode: module.exit_json( changed=True, - msg="Would have upgraded domain from {0} to {1} if not in check mode".format( - current_version, next_version - ), + msg=f"Would have upgraded domain from {current_version} to {next_version} if not in check mode", ) current_version = next_version @@ -664,9 +660,7 @@ def set_cluster_config(module, current_domain_config, desired_domain_config, cha } if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config: - change_set.append( - "ClusterConfig changed from {0} to {1}".format(current_domain_config["ClusterConfig"], cluster_config) - ) + change_set.append(f"ClusterConfig changed from {current_domain_config['ClusterConfig']} to {cluster_config}") changed = True return changed @@ -693,7 +687,7 @@ def set_ebs_options(module, current_domain_config, desired_domain_config, change ebs_config["Iops"] = ebs_opts.get("iops") if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config: - change_set.append("EBSOptions changed from {0} to {1}".format(current_domain_config["EBSOptions"], ebs_config)) + change_set.append(f"EBSOptions changed from {current_domain_config['EBSOptions']} to {ebs_config}") changed = True return changed @@ -719,10 +713,8 @@ def set_encryption_at_rest_options(module, current_domain_config, desired_domain and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config ): change_set.append( - "EncryptionAtRestOptions changed from {0} to {1}".format( - current_domain_config["EncryptionAtRestOptions"], - encryption_at_rest_config, - ) + f"EncryptionAtRestOptions changed from {current_domain_config['EncryptionAtRestOptions']} to" + f" {encryption_at_rest_config}" ) changed = True return changed @@ -742,10 +734,8 @@ def set_node_to_node_encryption_options(module, current_domain_config, desired_d and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config ): change_set.append( - "NodeToNodeEncryptionOptions changed from {0} to {1}".format( - current_domain_config["NodeToNodeEncryptionOptions"], - node_to_node_encryption_config, - ) + f"NodeToNodeEncryptionOptions changed from {current_domain_config['NodeToNodeEncryptionOptions']} to" + f" {node_to_node_encryption_config}" ) changed = True return changed @@ -805,18 +795,14 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change # Note the subnets may be the same but be listed in a different order. if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]): change_set.append( - "SubnetIds changed from {0} to {1}".format( - current_domain_config["VPCOptions"]["SubnetIds"], - vpc_config["SubnetIds"], - ) + f"SubnetIds changed from {current_domain_config['VPCOptions']['SubnetIds']} to" + f" {vpc_config['SubnetIds']}" ) changed = True if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]): change_set.append( - "SecurityGroup changed from {0} to {1}".format( - current_domain_config["VPCOptions"]["SecurityGroupIds"], - vpc_config["SecurityGroupIds"], - ) + f"SecurityGroup changed from {current_domain_config['VPCOptions']['SecurityGroupIds']} to" + f" {vpc_config['SecurityGroupIds']}" ) changed = True return changed @@ -857,9 +843,7 @@ def set_cognito_options(module, current_domain_config, desired_domain_config, ch cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config: - change_set.append( - "CognitoOptions changed from {0} to {1}".format(current_domain_config["CognitoOptions"], cognito_config) - ) + change_set.append(f"CognitoOptions changed from {current_domain_config['CognitoOptions']} to {cognito_config}") changed = True return changed @@ -922,10 +906,8 @@ def set_advanced_security_options(module, current_domain_config, desired_domain_ and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config ): change_set.append( - "AdvancedSecurityOptions changed from {0} to {1}".format( - current_domain_config["AdvancedSecurityOptions"], - advanced_security_config, - ) + f"AdvancedSecurityOptions changed from {current_domain_config['AdvancedSecurityOptions']} to" + f" {advanced_security_config}" ) changed = True return changed @@ -953,9 +935,8 @@ def set_domain_endpoint_options(module, current_domain_config, desired_domain_co if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config: change_set.append( - "DomainEndpointOptions changed from {0} to {1}".format( - current_domain_config["DomainEndpointOptions"], domain_endpoint_config - ) + f"DomainEndpointOptions changed from {current_domain_config['DomainEndpointOptions']} to" + f" {domain_endpoint_config}" ) changed = True return changed @@ -997,18 +978,15 @@ def set_auto_tune_options(module, current_domain_config, desired_domain_config, if current_domain_config is not None: if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]: change_set.append( - "AutoTuneOptions.DesiredState changed from {0} to {1}".format( - current_domain_config["AutoTuneOptions"]["DesiredState"], - auto_tune_config["DesiredState"], - ) + "AutoTuneOptions.DesiredState changed from" + f" {current_domain_config['AutoTuneOptions']['DesiredState']} to {auto_tune_config['DesiredState']}" ) changed = True if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]: change_set.append( - "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format( - current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"], - auto_tune_config["MaintenanceSchedules"], - ) + "AutoTuneOptions.MaintenanceSchedules changed from" + f" {current_domain_config['AutoTuneOptions']['MaintenanceSchedules']} to" + f" {auto_tune_config['MaintenanceSchedules']}" ) changed = True return changed @@ -1023,12 +1001,12 @@ def set_access_policy(module, current_domain_config, desired_domain_config, chan try: access_policy_config = json.dumps(access_policy_opt) except Exception as e: - module.fail_json(msg="Failed to convert the policy into valid JSON: %s" % str(e)) + module.fail_json(msg=f"Failed to convert the policy into valid JSON: {str(e)}") if current_domain_config is not None: # Updating existing domain current_access_policy = json.loads(current_domain_config["AccessPolicies"]) if not compare_policies(current_access_policy, access_policy_opt): - change_set.append("AccessPolicy changed from {0} to {1}".format(current_access_policy, access_policy_opt)) + change_set.append(f"AccessPolicy changed from {current_access_policy} to {access_policy_opt}") changed = True desired_domain_config["AccessPolicies"] = access_policy_config else: @@ -1134,7 +1112,7 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) + module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") else: # Create new OpenSearch cluster @@ -1152,12 +1130,12 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) + module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") try: existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name) + module.fail_json_aws(e, f"Couldn't get tags for domain {domain_name}") desired_tags = module.params["tags"] purge_tags = module.params["purge_tags"] diff --git a/redshift.py b/redshift.py index 61b9e3aeb4a..91993648de0 100644 --- a/redshift.py +++ b/redshift.py @@ -277,7 +277,7 @@ def _ensure_tags(redshift, identifier, existing_tags, module): account_id = get_aws_account_id(module) region = module.params.get("region") - resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}".format(region, account_id, identifier) + resource_arn = f"arn:aws:redshift:{region}:{account_id}:cluster:{identifier}" tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") @@ -565,7 +565,7 @@ def modify_cluster(module, redshift): redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing") ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter("cluster_available") @@ -580,7 +580,7 @@ def modify_cluster(module, redshift): redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if module.params.get("new_cluster_identifier"): identifier = module.params.get("new_cluster_identifier") @@ -595,7 +595,7 @@ def modify_cluster(module, redshift): try: resource = _describe_cluster(redshift, identifier) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if _ensure_tags(redshift, identifier, resource["Tags"], module): resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py index f4d895cb1cb..d2894dfcba8 100644 --- a/redshift_cross_region_snapshots.py +++ b/redshift_cross_region_snapshots.py @@ -164,8 +164,7 @@ def run_module(): if module.params.get("state") == "present": if requesting_unsupported_modifications(current_config, module.params): message = ( - "Cannot modify destination_region or grant_name. " - "Please disable cross-region snapshots, and re-run." + "Cannot modify destination_region or grant_name. Please disable cross-region snapshots, and re-run." ) module.fail_json(msg=message, **result) if needs_update(current_config, module.params): diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py index 9ba6e5e6799..1045164dce3 100644 --- a/s3_bucket_notification.py +++ b/s3_bucket_notification.py @@ -184,7 +184,7 @@ def full_config(self): try: config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg="{0}".format(e)) + self.module.fail_json(msg=f"{e}") # Handle different event targets if config_lookup.get("QueueConfigurations"): @@ -251,7 +251,7 @@ def _upload_bucket_config(self, configs): try: self.client.put_bucket_notification_configuration(**api_params) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg="{0}".format(e)) + self.module.fail_json(msg=f"{e}") class Config: @@ -299,7 +299,7 @@ def from_params(cls, **params): elif params["lambda_alias"]: qualifier = str(params["lambda_alias"]) if qualifier: - params["lambda_function_arn"] = "{0}:{1}".format(function_arn, qualifier) + params["lambda_function_arn"] = f"{function_arn}:{qualifier}" bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"] diff --git a/s3_cors.py b/s3_cors.py index 0d92ba56eac..d153c7df823 100644 --- a/s3_cors.py +++ b/s3_cors.py @@ -127,7 +127,7 @@ def create_or_update_bucket_cors(connection, module): try: cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to update CORS for bucket {name}") module.exit_json(changed=changed, name=name, rules=rules) @@ -140,7 +140,7 @@ def destroy_bucket_cors(connection, module): cors = connection.delete_bucket_cors(Bucket=name) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to delete CORS for bucket {name}") module.exit_json(changed=changed) diff --git a/s3_lifecycle.py b/s3_lifecycle.py index 24517b1e372..27f1179688d 100644 --- a/s3_lifecycle.py +++ b/s3_lifecycle.py @@ -661,7 +661,7 @@ def main(): if module.params.get(param) is None: break else: - msg = "one of the following is required when 'state' is 'present': %s" % ", ".join(required_when_present) + msg = f"one of the following is required when 'state' is 'present': {', '.join(required_when_present)}" module.fail_json(msg=msg) # If dates have been set, make sure they're in a valid format diff --git a/s3_logging.py b/s3_logging.py index b2eda67d135..193455a4be2 100644 --- a/s3_logging.py +++ b/s3_logging.py @@ -91,7 +91,7 @@ def verify_acls(connection, module, target_bucket): current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) current_grants = current_acl["Grants"] except is_boto3_error_code("NoSuchBucket"): - module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket)) + module.fail_json(msg=f"Target Bucket '{target_bucket}' not found") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, @@ -132,7 +132,7 @@ def enable_bucket_logging(connection, module): try: bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) except is_boto3_error_code("NoSuchBucket"): - module.fail_json(msg="Bucket '{0}' not found".format(bucket_name)) + module.fail_json(msg=f"Bucket '{bucket_name}' not found") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py index 90429ca64b4..d90e7d0e603 100644 --- a/s3_metrics_configuration.py +++ b/s3_metrics_configuration.py @@ -153,7 +153,7 @@ def create_or_update_metrics_configuration(client, module): aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration ) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id) + module.fail_json_aws(e, msg=f"Failed to put bucket metrics configuration '{mc_id}'") module.exit_json(changed=True) @@ -177,7 +177,7 @@ def delete_metrics_configuration(client, module): except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id) + module.fail_json_aws(e, msg=f"Failed to delete bucket metrics configuration '{mc_id}'") module.exit_json(changed=True) diff --git a/s3_sync.py b/s3_sync.py index efc07efb150..36809ed2f75 100644 --- a/s3_sync.py +++ b/s3_sync.py @@ -413,8 +413,8 @@ def filter_list(s3, bucket, s3filelist, strategy): remote_size = entry["s3_head"]["ContentLength"] - entry["whytime"] = "{0} / {1}".format(local_modified_epoch, remote_modified_epoch) - entry["whysize"] = "{0} / {1}".format(local_size, remote_size) + entry["whytime"] = f"{local_modified_epoch} / {remote_modified_epoch}" + entry["whysize"] = f"{local_size} / {remote_size}" if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: entry["skip_flag"] = True diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py index f611d600967..1a1340df723 100644 --- a/secretsmanager_secret.py +++ b/secretsmanager_secret.py @@ -366,7 +366,7 @@ def put_resource_policy(self, secret): try: json.loads(secret.secret_resource_policy_args.get("ResourcePolicy")) except (TypeError, ValueError) as e: - self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc()) + self.module.fail_json(msg=f"Failed to parse resource policy as JSON: {str(e)}", exception=format_exc()) try: response = self.client.put_resource_policy(**secret.secret_resource_policy_args) diff --git a/ses_identity.py b/ses_identity.py index 7a966da4a48..e324a7e12f7 100644 --- a/ses_identity.py +++ b/ses_identity.py @@ -242,9 +242,7 @@ def get_verification_attributes(connection, module, identity, retries=0, retryDe try: response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed to retrieve identity verification attributes for {identity}".format(identity=identity) - ) + module.fail_json_aws(e, msg=f"Failed to retrieve identity verification attributes for {identity}") identity_verification = response["VerificationAttributes"] if identity in identity_verification: break @@ -265,9 +263,7 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel try: response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed to retrieve identity notification attributes for {identity}".format(identity=identity) - ) + module.fail_json_aws(e, msg=f"Failed to retrieve identity notification attributes for {identity}") notification_attributes = response["NotificationAttributes"] # No clear AWS docs on when this happens, but it appears sometimes identities are not included in @@ -341,10 +337,7 @@ def update_notification_topic(connection, module, identity, identity_notificatio except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, - msg="Failed to set identity notification topic for {identity} {notification_type}".format( - identity=identity, - notification_type=notification_type, - ), + msg=f"Failed to set identity notification topic for {identity} {notification_type}", ) return True return False @@ -378,11 +371,7 @@ def update_notification_topic_headers(connection, module, identity, identity_not ) except (BotoCoreError, ClientError) as e: module.fail_json_aws( - e, - msg="Failed to set identity headers in notification for {identity} {notification_type}".format( - identity=identity, - notification_type=notification_type, - ), + e, msg=f"Failed to set identity headers in notification for {identity} {notification_type}" ) return True return False @@ -411,9 +400,7 @@ def update_feedback_forwarding(connection, module, identity, identity_notificati Identity=identity, ForwardingEnabled=required, aws_retry=True ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed to set identity feedback forwarding for {identity}".format(identity=identity) - ) + module.fail_json_aws(e, msg=f"Failed to set identity feedback forwarding for {identity}") return True return False @@ -460,8 +447,10 @@ def validate_params_for_identity_present(module): if module.params.get("feedback_forwarding") is False: if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")): module.fail_json( - msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " - "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + msg=( + "Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " + "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + ) ) @@ -477,7 +466,7 @@ def create_or_update_identity(connection, module, region, account_id): else: connection.verify_domain_identity(Domain=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to verify identity {identity}".format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to verify identity {identity}") if module.check_mode: verification_attributes = { "VerificationStatus": "Pending", @@ -520,7 +509,7 @@ def destroy_identity(connection, module): if not module.check_mode: connection.delete_identity(Identity=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete identity {identity}".format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to delete identity {identity}") changed = True module.exit_json( diff --git a/ses_identity_policy.py b/ses_identity_policy.py index a28d027549a..9b7a3d6b6fa 100644 --- a/ses_identity_policy.py +++ b/ses_identity_policy.py @@ -101,7 +101,7 @@ def get_identity_policy(connection, module, identity, policy_name): try: response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to retrieve identity policy {policy}".format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to retrieve identity policy {policy_name}") policies = response["Policies"] if policy_name in policies: return policies[policy_name] @@ -125,7 +125,7 @@ def create_or_update_identity_policy(connection, module): Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to put identity policy {policy}".format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to put identity policy {policy_name}") # Load the list of applied policies to include in the response. # In principle we should be able to just return the response, but given @@ -162,7 +162,7 @@ def delete_identity_policy(connection, module): if not module.check_mode: connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete identity policy {policy}".format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to delete identity policy {policy_name}") changed = True policies_present = list(policies_present) policies_present.remove(policy_name) diff --git a/ses_rule_set.py b/ses_rule_set.py index 9915622ed7d..8d09965774f 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -155,7 +155,7 @@ def update_active_rule_set(client, module, name, desired_active): try: client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't set active rule set to {name}.") changed = True active = True elif not desired_active and active: @@ -177,7 +177,7 @@ def create_or_update_rule_set(client, module): try: client.create_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't create rule set {name}.") changed = True rule_sets = list(rule_sets) rule_sets.append( @@ -206,12 +206,13 @@ def remove_rule_set(client, module): active = ruleset_active(client, module, name) if active and not module.params.get("force"): module.fail_json( - msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format( - name + msg=( + f"Couldn't delete rule set {name} because it is currently active. Set force=true to delete an" + " active ruleset." ), error={ "code": "CannotDelete", - "message": "Cannot delete active rule set: {0}".format(name), + "message": f"Cannot delete active rule set: {name}", }, ) if not check_mode: @@ -220,7 +221,7 @@ def remove_rule_set(client, module): try: client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't delete rule set {name}.") changed = True rule_sets = [x for x in rule_sets if x["Name"] != name] diff --git a/sns.py b/sns.py index 53c63a05645..493855b76e0 100644 --- a/sns.py +++ b/sns.py @@ -226,7 +226,7 @@ def main(): sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic) if not sns_kwargs["TopicArn"]: - module.fail_json(msg="Could not find topic: {0}".format(topic)) + module.fail_json(msg=f"Could not find topic: {topic}") if sns_kwargs["MessageStructure"] == "json": sns_kwargs["Message"] = json.dumps(dict_msg) diff --git a/sns_topic.py b/sns_topic.py index 90929a476ea..c99b7580663 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -413,7 +413,7 @@ def _create_topic(self): try: response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) + self.module.fail_json_aws(e, msg=f"Couldn't create topic {self.name}") self.topic_arn = response["TopicArn"] return True @@ -422,7 +422,7 @@ def _set_topic_attrs(self): try: topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't get topic attributes for topic {self.topic_arn}") if self.display_name and self.display_name != topic_attributes["DisplayName"]: changed = True @@ -509,7 +509,7 @@ def _set_topic_subs(self): try: self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't subscribe to topic {self.topic_arn}") return changed def _init_desired_subscription_attributes(self): @@ -537,7 +537,7 @@ def _set_topic_subs_attributes(self): "Attributes" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) + self.module.fail_json_aws(e, f"Couldn't get subscription attributes for subscription {sub_arn}") raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery") if raw_message is not None and "RawMessageDelivery" in sub_current_attributes: @@ -575,7 +575,7 @@ def _delete_topic(self): try: self.connection.delete_topic(TopicArn=self.topic_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't delete topic {self.topic_arn}") return True def _name_is_arn(self): diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py index 4bbd1503ab8..a2558c8085c 100644 --- a/stepfunctions_state_machine.py +++ b/stepfunctions_state_machine.py @@ -130,7 +130,7 @@ def create(sfn_client, module): def remove(state_machine_arn, sfn_client, module): - check_mode(module, msg="State machine would be deleted: {0}".format(state_machine_arn), changed=True) + check_mode(module, msg=f"State machine would be deleted: {state_machine_arn}", changed=True) sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) module.exit_json(changed=True, state_machine_arn=state_machine_arn) @@ -140,7 +140,7 @@ def update(state_machine_arn, sfn_client, module): tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: - check_mode(module, msg="State machine would be updated: {0}".format(state_machine_arn), changed=True) + check_mode(module, msg=f"State machine would be updated: {state_machine_arn}", changed=True) sfn_client.update_state_machine( stateMachineArn=state_machine_arn, diff --git a/waf_condition.py b/waf_condition.py index efbb17e2cf8..b1baae378e8 100644 --- a/waf_condition.py +++ b/waf_condition.py @@ -606,7 +606,7 @@ def list_conditions(self): try: return func()[self.conditionsets] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not list %s conditions" % self.type) + self.module.fail_json_aws(e, msg=f"Could not list {self.type} conditions") def tidy_up_regex_patterns(self, regex_match_set): all_regex_match_sets = self.list_conditions() @@ -643,7 +643,7 @@ def find_and_delete_condition(self, condition_set_id): in_use_rules = self.find_condition_in_rules(condition_set_id) if in_use_rules: rulenames = ", ".join(in_use_rules) - self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition["Name"], rulenames)) + self.module.fail_json(msg=f"Condition {current_condition['Name']} is in use by {rulenames}") if current_condition[self.conditiontuples]: # Filters are deleted using update with the DELETE action func = getattr(self.client, "update_" + self.method_suffix) diff --git a/waf_info.py b/waf_info.py index ea294c92ed4..711d1d8de74 100644 --- a/waf_info.py +++ b/waf_info.py @@ -134,7 +134,7 @@ def main(): if name: web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name] if not web_acls: - module.fail_json(msg="WAF named %s not found" % name) + module.fail_json(msg=f"WAF named {name} not found") module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls]) diff --git a/waf_rule.py b/waf_rule.py index 98064dd8ca4..28ff981623d 100644 --- a/waf_rule.py +++ b/waf_rule.py @@ -210,7 +210,7 @@ def find_and_update_rule(client, module, rule_id): try: pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list %s conditions" % condition_type) + module.fail_json_aws(e, msg=f"Could not list {condition_type} conditions") for pred in pred_results: pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"] all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred) @@ -231,7 +231,7 @@ def find_and_update_rule(client, module, rule_id): for condition_type in desired_conditions: for condition_name, condition in desired_conditions[condition_type].items(): if condition_name not in all_conditions[condition_type]: - module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) + module.fail_json(msg=f"Condition {condition_name} of type {condition_type} does not exist") condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"] if condition["data_id"] not in existing_conditions[condition_type]: insertions.append(format_for_insertion(condition)) @@ -326,7 +326,7 @@ def ensure_rule_absent(client, module): in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) if in_use_web_acls: web_acl_names = ", ".join(in_use_web_acls) - module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % (module.params["name"], web_acl_names)) + module.fail_json(msg=f"Rule {module.params['name']} is in use by Web ACL(s) {web_acl_names}") if rule_id: remove_rule_conditions(client, module, rule_id) try: diff --git a/waf_web_acl.py b/waf_web_acl.py index 4b71231aec9..dd78a2778a5 100644 --- a/waf_web_acl.py +++ b/waf_web_acl.py @@ -205,7 +205,7 @@ def get_web_acl(client, module, web_acl_id): try: return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not get Web ACL with id %s" % web_acl_id) + module.fail_json_aws(e, msg=f"Could not get Web ACL with id {web_acl_id}") def list_web_acls( From 17f24e2b7a4104d236fa5a3112c81a87179b5d1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Tue, 9 May 2023 14:41:27 -0400 Subject: [PATCH 660/683] opensearch_info - Fix the name of the domain_name key in the example (#1811) opensearch_info - Fix the name of the domain_name key in the example Reviewed-by: Mark Chappell --- opensearch_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opensearch_info.py b/opensearch_info.py index ef49637f5cc..976ea4279f7 100644 --- a/opensearch_info.py +++ b/opensearch_info.py @@ -35,7 +35,7 @@ EXAMPLES = r""" - name: Get information about an OpenSearch domain instance community.aws.opensearch_info: - domain-name: my-search-cluster + domain_name: my-search-cluster register: new_cluster_info - name: Get all OpenSearch instances From eb327c732e76506c9956f662595f504b177c68bc Mon Sep 17 00:00:00 2001 From: Gabor Simon Date: Thu, 1 Jun 2023 14:46:58 +0400 Subject: [PATCH 661/683] cloudfront_distribution: now honours the enabled setting (#1824) cloudfront_distribution: now honours the enabled setting SUMMARY Fixes: #1823 The enabled: false setting was ignored, because here we were falling back to the default True not only when the setting was None, but also when it was False. ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- cloudfront_distribution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 40bc15dac35..26cdc02336d 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -2218,7 +2218,7 @@ def validate_common_distribution_parameters( config["aliases"] = ansible_list_to_cloudfront_list(aliases) if logging is not None: config["logging"] = self.validate_logging(logging) - config["enabled"] = enabled or config.get("enabled", self.__default_distribution_enabled) + config["enabled"] = enabled if enabled is not None else config.get("enabled", self.__default_distribution_enabled) if price_class is not None: self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes) config["price_class"] = price_class From c64ab85ac4d344db22d6e250ce74769d4fbcf19e Mon Sep 17 00:00:00 2001 From: Gabor Simon Date: Thu, 1 Jun 2023 14:48:41 +0400 Subject: [PATCH 662/683] cloudfront_distribution: no longer crashes when waiting for completion of creation (#1822) cloudfront_distribution: no longer crashes when waiting for completion of creation SUMMARY Fixes #255 Here we were referring to the ["Id"] member of a queried distribution, but there is level of embedding missing: ["Distribution"]["Id"] (Just how it's used here ) ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- cloudfront_distribution.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 26cdc02336d..db6375ca1fb 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -2316,7 +2316,8 @@ def validate_distribution_id_from_alias(self, aliases): def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)["Id"] + distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference) + distribution_id = distribution["Distribution"]["Id"] try: waiter = client.get_waiter("distribution_deployed") From 61797a443763a09a892251ea766277ba2a47e9b0 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 1 Jun 2023 12:48:45 +0200 Subject: [PATCH 663/683] Fix FQDN in EXAMPLES block for mq_* modules. (#1833) Fix FQDN in EXAMPLES block for mq_* modules. SUMMARY Fix FQDN in EXAMPLES block for mq_* modules. ISSUE TYPE Docs Pull Request COMPONENT NAME mq_* Reviewed-by: Markus Bergholz --- mq_broker.py | 15 ++++++++++----- mq_broker_config.py | 10 ++++++---- mq_broker_info.py | 5 +++-- mq_user.py | 8 +++++--- mq_user_info.py | 11 +++++++---- 5 files changed, 31 insertions(+), 18 deletions(-) diff --git a/mq_broker.py b/mq_broker.py index 2cc5b8e375a..5fda006b8b0 100644 --- a/mq_broker.py +++ b/mq_broker.py @@ -134,7 +134,7 @@ EXAMPLES = r""" - name: create broker (if missing) with minimal required parameters - amazon.aws.mq_broker: + community.aws.mq_broker: broker_name: "{{ broker_name }}" security_groups: - sg_xxxxxxx @@ -142,17 +142,20 @@ - subnet_xxx - subnet_yyy register: result + - set_fact: broker_id: "{{ result.broker['BrokerId'] }}" + - name: use mq_broker_info to wait until broker is ready - amazon.aws.mq_broker_info: + community.aws.mq_broker_info: broker_id: "{{ broker_id }}" register: result until: "result.broker['BrokerState'] == 'RUNNING'" retries: 15 delay: 60 + - name: create or update broker with almost all parameter set including credentials - amazon.aws.mq_broker: + community.aws.mq_broker: broker_name: "my_broker_2" state: present deployment_mode: 'ACTIVE_STANDBY_MULTI_AZ' @@ -183,12 +186,14 @@ host_instance_type: 'mq.t3.micro' enable_audit_log: true enable_general_log: true + - name: reboot a broker - amazon.aws.mq_broker: + community.aws.mq_broker: broker_name: "my_broker_2" state: restarted + - name: delete a broker - amazon.aws.mq_broker: + community.aws.mq_broker: broker_name: "my_broker_2" state: absent """ diff --git a/mq_broker_config.py b/mq_broker_config.py index e530af47384..84f1b4dff3b 100644 --- a/mq_broker_config.py +++ b/mq_broker_config.py @@ -44,17 +44,19 @@ EXAMPLES = r""" - name: send new XML config to broker relying on credentials from environment - amazon.aws.mq_broker_config: + community.aws.mq_broker_config: broker_id: "aws-mq-broker-id" config_xml: "{{ lookup('file', 'activemq.xml' )}}" region: "{{ aws_region }}" + - name: send new XML config to broker and reboot if necessary - amazon.aws.mq_broker_config: + community.aws.mq_broker_config: broker_id: "aws-mq-broker-id" config_xml: "{{ lookup('file', 'activemq2.xml' )}}" reboot: true + - name: send new broker config and set all credentials explicitly - amazon.aws.mq_broker_config: + community.aws.mq_broker_config: broker_id: "{{ broker_id }}" config_xml: "{{ lookup('file', 'activemq3.xml')}}" config_description: "custom description for configuration object" @@ -129,7 +131,7 @@ def create_and_assign_config(conn, module, broker_id, cfg_id, cfg_xml_encoded): if "config_description" in module.params and module.params["config_description"]: kwargs["Description"] = module.params["config_description"] else: - kwargs["Description"] = "Updated through amazon.aws.mq_broker_config ansible module" + kwargs["Description"] = "Updated through community.aws.mq_broker_config ansible module" # try: c_response = conn.update_configuration(**kwargs) diff --git a/mq_broker_info.py b/mq_broker_info.py index da04596f589..c96e327cd02 100644 --- a/mq_broker_info.py +++ b/mq_broker_info.py @@ -31,11 +31,12 @@ EXAMPLES = r""" - name: get current broker settings by id - amazon.aws.mq_broker_info: + community.aws.mq_broker_info: broker_id: "aws-mq-broker-id" register: broker_info + - name: get current broker settings by name setting all credential parameters explicitly - amazon.aws.mq_broker_info: + community.aws.mq_broker_info: broker_name: "aws-mq-broker-name" register: broker_info """ diff --git a/mq_user.py b/mq_user.py index 00d8adfd58f..898212cbcba 100644 --- a/mq_user.py +++ b/mq_user.py @@ -62,22 +62,24 @@ EXAMPLES = r""" - name: create/update user - set provided password if user doesn't exist, yet - amazon.aws.mq_user: + community.aws.mq_user: state: present broker_id: "aws-mq-broker-id" username: "sample_user1" console_access: false groups: [ "g1", "g2" ] password: "plain-text-password" + - name: allow console access and update group list - relying on default state - amazon.aws.mq_user: + community.aws.mq_user: broker_id: "aws-mq-broker-id" username: "sample_user1" region: "{{ aws_region }}" console_access: true groups: [ "g1", "g2", "g3" ] + - name: remove user - setting all credentials explicitly - amazon.aws.mq_user: + community.aws.mq_user: state: absent broker_id: "aws-mq-broker-id" username: "other_user" diff --git a/mq_user_info.py b/mq_user_info.py index bd6b09c4eb0..8c63f829188 100644 --- a/mq_user_info.py +++ b/mq_user_info.py @@ -50,20 +50,23 @@ EXAMPLES = r""" - name: get all users as list - relying on environment for API credentials - amazon.aws.mq_user_info: + community.aws.mq_user_info: broker_id: "aws-mq-broker-id" max_results: 50 register: result + - name: get users as dict - explicitly specifying all credentials - amazon.aws.mq_user_info: + community.aws.mq_user_info: broker_id: "aws-mq-broker-id" register: result + - name: get list of users to decide which may need to be deleted - amazon.aws.mq_user_info: + community.aws.mq_user_info: broker_id: "aws-mq-broker-id" skip_pending_delete: true + - name: get list of users to decide which may need to be created - amazon.aws.mq_user_info: + community.aws.mq_user_info: broker_id: "aws-mq-broker-id" skip_pending_create: true """ From 6cbe8c6b5c95d5529dc3d9a9c8537af9d6f343cd Mon Sep 17 00:00:00 2001 From: Gabor Simon Date: Thu, 8 Jun 2023 13:26:45 +0400 Subject: [PATCH 664/683] dynamodb_table: secondary indexes are now created (#1826) dynamodb_table: secondary indexes are now created SUMMARY Fixes: #1825 Possibly by a typo, the index definition being checked was over-defined by an empty dict here. Without that line the index processing proceeds fine. ISSUE TYPE Bugfix Pull Request COMPONENT NAME dynamodb_table Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- dynamodb_table.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index 5be7a4b9c43..b71663c9eba 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -675,7 +675,6 @@ def _generate_local_indexes(): indexes = list() for index in module.params.get("indexes"): - index = dict() if index.get("type") not in ["all", "include", "keys_only"]: continue name = index.get("name") From 49089644e82baf4968f48bed467cce69fe562a8f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 21 Jun 2023 23:28:06 +0200 Subject: [PATCH 665/683] Add sanity, linters, changelog, black and units workflows (#1799) * Add sanity, linters, changelog and units workflows Signed-off-by: Alina Buzachis * Delete black.yml Signed-off-by: Alina Buzachis * Update references Signed-off-by: Alina Buzachis * Remove linters job Signed-off-by: Alina Buzachis * Add missing tox.ini file Signed-off-by: Alina Buzachis * Apply black on plugins/modules/cloudfront_distribution.py Signed-off-by: Alina Buzachis * Update tox.ini Signed-off-by: Alina Buzachis * Fix units Signed-off-by: Alina Buzachis --------- Signed-off-by: Alina Buzachis --- cloudfront_distribution.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index db6375ca1fb..64681d11106 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -2218,7 +2218,9 @@ def validate_common_distribution_parameters( config["aliases"] = ansible_list_to_cloudfront_list(aliases) if logging is not None: config["logging"] = self.validate_logging(logging) - config["enabled"] = enabled if enabled is not None else config.get("enabled", self.__default_distribution_enabled) + config["enabled"] = ( + enabled if enabled is not None else config.get("enabled", self.__default_distribution_enabled) + ) if price_class is not None: self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes) config["price_class"] = price_class From fb75a4047fbc7d207597cf9b5061d35c13a47f95 Mon Sep 17 00:00:00 2001 From: Gabor Simon Date: Thu, 22 Jun 2023 22:32:12 +0400 Subject: [PATCH 666/683] Issue 1819 cloudfront distribution origin s3 domain (#1821) Issue 1819 cloudfront distribution origin s3 domain SUMMARY Fixes #1819 As per Origin Domain Name spec now the S3 domain names are in the form {name}.s3.{region}.amazonaws.com, so the string fragment .s3.amazonaws.com no longer occurs in them, and therefore they aren't recognised as S3 origin domains. Consequentially, the origin is treated as a custom one, so a custom_origin_config member is generated into it, which collides with the s3_origin_config and produces an error: botocore.errorfactory.InvalidOrigin: An error occurred (InvalidOrigin) when calling the CreateDistribution operation: You must specify either a CustomOrigin or an S3Origin. You cannot specify both. The backward-compatible way is to recognise both {name}.s3.amazonaws.com and {name}.s3.{domain}.amazonaws.com, but for this a regular expression is the most effective solution. ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution ADDITIONAL INFORMATION The breakdown of the regex I used: \.s3(?:\.[^.]+)?\.amazonaws\.com$ \.s3 matches ".s3" \.[^.]+ would match a dot followed by at least one, possibly more non-dot characters (\.[^]+) would match the same, just grouped, so we could treat it as an atom (?:\.[^]+) would match the same, just grouped in a non-capturing fashion (we don't want to extract the matched characters) (?:\.[^]+)? matches the same, occuring 0 or 1 times \.amazonaws\.com matches ".amazonaws.com" $ matches the end of the input string Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- cloudfront_distribution.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 64681d11106..7aa312cecd1 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1417,6 +1417,7 @@ from collections import OrderedDict import datetime +import re try: import botocore @@ -1676,7 +1677,7 @@ def __init__(self, module): "http2and3", ] ) - self.__s3_bucket_domain_identifier = ".s3.amazonaws.com" + self.__s3_bucket_domain_regex = re.compile(r"\.s3(?:\.[^.]+)?\.amazonaws\.com$") def add_missing_key(self, dict_object, key_to_set, value_to_set): if key_to_set not in dict_object and value_to_set is not None: @@ -1818,7 +1819,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): ) else: origin_shield_region = origin_shield_region.lower() - if self.__s3_bucket_domain_identifier in origin.get("domain_name").lower(): + if self.__s3_bucket_domain_regex.search(origin.get("domain_name").lower()): if origin.get("s3_origin_access_identity_enabled") is not None: if origin["s3_origin_access_identity_enabled"]: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) @@ -1834,10 +1835,10 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin["s3_origin_config"] = dict(origin_access_identity=oai) - if "custom_origin_config" in origin: - self.module.fail_json( - msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive" - ) + if "custom_origin_config" in origin: + self.module.fail_json( + msg="s3 origin domains and custom_origin_config are mutually exclusive" + ) else: origin = self.add_missing_key( origin, "custom_origin_config", existing_config.get("custom_origin_config", {}) From e067c352e67c804c4d369b4f0430e31caec1103d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 26 Jun 2023 10:16:00 +0200 Subject: [PATCH 667/683] lint - cloudfront_distribution - apply black formatting (#1851) lint - cloudfront_distribution - apply black formatting SUMMARY #1821 introduced some bad formatting ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution ADDITIONAL INFORMATION --- cloudfront_distribution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 7aa312cecd1..37fd914dbb4 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1837,7 +1837,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): if "custom_origin_config" in origin: self.module.fail_json( - msg="s3 origin domains and custom_origin_config are mutually exclusive" + msg="s3 origin domains and custom_origin_config are mutually exclusive", ) else: origin = self.add_missing_key( From ae97a42eb07d307e7d7b1704b7ab40204489b519 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 26 Jun 2023 23:23:50 +0200 Subject: [PATCH 668/683] Various ARN handling fixes (#1848) Various ARN handling fixes Depends-On: ansible-collections/amazon.aws#1619 SUMMARY fixes: #1846 Various modules had hard-coded ARN handling which assumed the use of the main partition. This causes problems for folks using Gov Cloud (and aws-cn) ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/batch_compute_environment.py plugins/modules/ec2_launch_template.py plugins/modules/elasticache_info.py plugins/modules/iam_group.py plugins/modules/iam_role.py plugins/modules/msk_config.py plugins/modules/redshift.py plugins/modules/sns_topic.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- batch_compute_environment.py | 3 ++- ec2_launch_template.py | 4 ++-- elasticache_info.py | 17 +++-------------- iam_group.py | 5 +++-- iam_role.py | 7 ++++--- msk_config.py | 4 +++- redshift.py | 8 ++++---- sns_topic.py | 5 +++-- 8 files changed, 24 insertions(+), 29 deletions(-) diff --git a/batch_compute_environment.py b/batch_compute_environment.py index 6bb2541e161..cbe1184b2fe 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -233,6 +233,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @@ -270,7 +271,7 @@ def validate_params(module): module.fail_json( msg=f"Function compute_environment_name {compute_environment_name} is invalid. Names must contain only alphanumeric characters and underscores." ) - if not compute_environment_name.startswith("arn:aws:batch:"): + if not validate_aws_arn(compute_environment_name, service="batch"): if len(compute_environment_name) > 128: module.fail_json(msg=f'compute_environment_name "{compute_environment_name}" exceeds 128 character limit') diff --git a/ec2_launch_template.py b/ec2_launch_template.py index 01d36ccc57c..6cd1de3fb0d 100644 --- a/ec2_launch_template.py +++ b/ec2_launch_template.py @@ -422,7 +422,6 @@ type: int """ -import re from uuid import uuid4 try: @@ -436,6 +435,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list @@ -446,7 +446,7 @@ def determine_iam_role(module, name_or_arn): - if re.match(r"^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$", name_or_arn): + if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): return {"arn": name_or_arn} iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: diff --git a/elasticache_info.py b/elasticache_info.py index 021d3a0270e..50a8cb5ff0d 100644 --- a/elasticache_info.py +++ b/elasticache_info.py @@ -410,6 +410,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict @@ -444,18 +445,6 @@ def get_elasticache_tags_with_backoff(client, cluster_id): return client.list_tags_for_resource(ResourceName=cluster_id)["TagList"] -def get_aws_account_id(module): - try: - client = module.client("sts") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Can't authorize connection") - - try: - return client.get_caller_identity()["Account"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't obtain AWS account id") - - def get_elasticache_clusters(client, module): region = module.region try: @@ -463,11 +452,11 @@ def get_elasticache_clusters(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain cache cluster info") - account_id = get_aws_account_id(module) + account_id, partition = get_aws_account_info(module) results = [] for cluster in clusters: cluster = camel_dict_to_snake_dict(cluster) - arn = f"arn:aws:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}" + arn = f"arn:{partition}:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}" try: tags = get_elasticache_tags_with_backoff(client, arn) except is_boto3_error_code("CacheClusterNotFound"): diff --git a/iam_group.py b/iam_group.py index c4f77fde772..f88ebac120d 100644 --- a/iam_group.py +++ b/iam_group.py @@ -171,6 +171,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @@ -203,7 +204,7 @@ def compare_group_members(current_group_members, new_group_members): def convert_friendly_names_to_arns(connection, module, policy_names): - if not any(not policy.startswith("arn:") for policy in policy_names if policy is not None): + if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): return policy_names allpolicies = {} paginator = connection.get_paginator("list_policies") @@ -213,7 +214,7 @@ def convert_friendly_names_to_arns(connection, module, policy_names): allpolicies[policy["PolicyName"]] = policy["Arn"] allpolicies[policy["Arn"]] = policy["Arn"] try: - return [allpolicies[policy] for policy in policy_names] + return [allpolicies[policy] for policy in policy_names if policy is not None] except KeyError as e: module.fail_json(msg="Couldn't find policy: " + str(e)) diff --git a/iam_role.py b/iam_role.py index be05707238a..b39281e17b9 100644 --- a/iam_role.py +++ b/iam_role.py @@ -224,6 +224,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @@ -265,7 +266,7 @@ def wait_iam_exists(module, client): def convert_friendly_names_to_arns(module, client, policy_names): - if not any(not policy.startswith("arn:") for policy in policy_names): + if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): return policy_names allpolicies = {} @@ -275,7 +276,7 @@ def convert_friendly_names_to_arns(module, client, policy_names): allpolicies[policy["PolicyName"]] = policy["Arn"] allpolicies[policy["Arn"]] = policy["Arn"] try: - return [allpolicies[policy] for policy in policy_names] + return [allpolicies[policy] for policy in policy_names if policy is not None] except KeyError as e: module.fail_json_aws(e, msg="Couldn't find policy") @@ -746,7 +747,7 @@ def main(): if module.params.get("boundary"): if module.params.get("create_instance_profile"): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") - if not module.params.get("boundary").startswith("arn:aws:iam"): + if not validate_aws_arn(module.params.get("boundary"), service="iam"): module.fail_json(msg="Boundary policy must be an ARN") if module.params.get("max_session_duration"): max_session_duration = module.params.get("max_session_duration") diff --git a/msk_config.py b/msk_config.py index 864827eb610..2469f95984b 100644 --- a/msk_config.py +++ b/msk_config.py @@ -98,6 +98,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule @@ -283,7 +284,8 @@ def main(): # return some useless staff in check mode if configuration doesn't exists # can be useful when these options are referenced by other modules during check mode run if module.check_mode and not response.get("Arn"): - arn = "arn:aws:kafka:region:account:configuration/name/id" + account_id, partition = get_aws_account_info(module) + arn = f"arn:{partition}:kafka:{module.region}:{account_id}:configuration/{module.params['name']}/id" revision = 1 server_properties = "" else: diff --git a/redshift.py b/redshift.py index 91993648de0..4463722e59e 100644 --- a/redshift.py +++ b/redshift.py @@ -263,7 +263,7 @@ from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict @@ -275,9 +275,9 @@ def _ensure_tags(redshift, identifier, existing_tags, module): """Compares and update resource tags""" - account_id = get_aws_account_id(module) - region = module.params.get("region") - resource_arn = f"arn:aws:redshift:{region}:{account_id}:cluster:{identifier}" + account_id, partition = get_aws_account_info(module) + region = module.region + resource_arn = f"arn:{partition}:redshift:{region}:{account_id}:cluster:{identifier}" tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") diff --git a/sns_topic.py b/sns_topic.py index c99b7580663..22a2c82c216 100644 --- a/sns_topic.py +++ b/sns_topic.py @@ -338,9 +338,10 @@ except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.sns import list_topics @@ -579,7 +580,7 @@ def _delete_topic(self): return True def _name_is_arn(self): - return self.name.startswith("arn:") + return bool(parse_aws_arn(self.name)) def ensure_ok(self): changed = False From 98c819a9b2b6d79286f5413383c2ec9aefdbc240 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 4 Jul 2023 14:59:21 +0200 Subject: [PATCH 669/683] DynamoDB - add waiter for secondary indexes (#1866) DynamoDB - add waiter for secondary indexes SUMMARY Integration tests keep failing due to concurrency issues (the service doesn't like making updates while index updates are in progress) Adds a waiter for the indexes ISSUE TYPE Feature Pull Request COMPONENT NAME dynamodb_table ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- dynamodb_table.py | 72 ++++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 44 deletions(-) diff --git a/dynamodb_table.py b/dynamodb_table.py index b71663c9eba..66470c2b9c3 100644 --- a/dynamodb_table.py +++ b/dynamodb_table.py @@ -130,13 +130,15 @@ wait_timeout: description: - How long (in seconds) to wait for creation / update / deletion to complete. + - AWS only allows secondary indexies to be updated one at a time, this module will automatically update them + in serial, and the timeout will be separately applied for each index. aliases: ['wait_for_active_timeout'] - default: 300 + default: 900 type: int wait: description: - When I(wait=True) the module will wait for up to I(wait_timeout) seconds - for table creation or deletion to complete before returning. + for index updates, table creation or deletion to complete before returning. default: True type: bool extends_documentation_fragment: @@ -256,9 +258,11 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_indexes_active +from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_exists +from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_not_exists from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - DYNAMO_TYPE_DEFAULT = "STRING" INDEX_REQUIRED_OPTIONS = ["name", "type", "hash_key_name"] INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + [ @@ -283,7 +287,7 @@ retries=45, delay=5, max_delay=30, - catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], + catch_extra_error_codes=["ResourceInUseException", "ResourceNotFoundException"], ) def _update_table_with_long_retry(**changes): return client.update_table(TableName=module.params.get("name"), **changes) @@ -296,47 +300,27 @@ def _describe_table(**params): def wait_exists(): - table_name = module.params.get("name") - wait_timeout = module.params.get("wait_timeout") - - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - - try: - waiter = client.get_waiter("table_exists") - waiter.wait( - WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, - TableName=table_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="Timeout while waiting on table creation") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while waiting on table creation") + wait_table_exists( + module, + module.params.get("wait_timeout"), + module.params.get("name"), + ) def wait_not_exists(): - table_name = module.params.get("name") - wait_timeout = module.params.get("wait_timeout") + wait_table_not_exists( + module, + module.params.get("wait_timeout"), + module.params.get("name"), + ) - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - try: - waiter = client.get_waiter("table_not_exists") - waiter.wait( - WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, - TableName=table_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="Timeout while waiting on table deletion") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while waiting on table deletion") +def wait_indexes(): + wait_indexes_active( + module, + module.params.get("wait_timeout"), + module.params.get("name"), + ) def _short_type_to_long(short_key): @@ -858,6 +842,7 @@ def _update_table(current_table): if additional_global_index_changes: for index in additional_global_index_changes: + wait_indexes() try: _update_table_with_long_retry( GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes["AttributeDefinitions"] @@ -870,9 +855,6 @@ def _update_table(current_table): additional_global_index_changes=additional_global_index_changes, ) - if module.params.get("wait"): - wait_exists() - return True @@ -927,6 +909,7 @@ def update_table(current_table): if module.params.get("wait"): wait_exists() + wait_indexes() return changed @@ -983,6 +966,7 @@ def create_table(): if module.params.get("wait"): wait_exists() + wait_indexes() return True @@ -1058,7 +1042,7 @@ def main(): tags=dict(type="dict", aliases=["resource_tags"]), purge_tags=dict(type="bool", default=True), wait=dict(type="bool", default=True), - wait_timeout=dict(default=300, type="int", aliases=["wait_for_active_timeout"]), + wait_timeout=dict(default=900, type="int", aliases=["wait_for_active_timeout"]), ) module = AnsibleAWSModule( From aa224a1e1936e5275ed47f90c571f91d5ea2b2d4 Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Thu, 13 Jul 2023 10:31:56 +0530 Subject: [PATCH 670/683] Add transit-gateway-id parameter to ec2_vpc_vpn module (#1877) Add transit-gateway-id parameter to ec2_vpc_vpn module SUMMARY This PR adds transit_gateway_id parameter to ec2_vpc_vpn module. It is needed for the validated content role that manages the creation of transit gateway and attaches VPN to the created transit gateway. ISSUE TYPE Feature Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Bikouo Aubin Reviewed-by: Alina Buzachis Reviewed-by: GomathiselviS Reviewed-by: Mark Chappell --- ec2_vpc_vpn.py | 50 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 0efce4a7470..3e4987d9725 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -35,6 +35,7 @@ vpn_gateway_id: description: - The ID of the virtual private gateway. + - Mutually exclusive with I(transit_gateway_id). type: str vpn_connection_id: description: @@ -46,6 +47,12 @@ default: False type: bool required: false + transit_gateway_id: + description: + - The ID of the transit gateway. + - Mutually exclusive with I(vpn_gateway_id). + type: str + version_added: 6.2.0 tunnel_options: description: - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr) @@ -139,12 +146,18 @@ # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. -- name: create a VPN connection +- name: create a VPN connection with vpn_gateway_id community.aws.ec2_vpc_vpn: state: present vpn_gateway_id: vgw-XXXXXXXX customer_gateway_id: cgw-XXXXXXXX +- name: Attach a vpn connection to transit gateway + community.aws.ec2_vpc_vpn: + state: present + transit_gateway_id: tgw-XXXXXXXX + customer_gateway_id: cgw-XXXXXXXX + - name: modify VPN connection tags community.aws.ec2_vpc_vpn: state: present @@ -231,6 +244,12 @@ returned: I(state=present) sample: vpn_gateway_id: vgw-cb0ae2a2 +transit_gateway_id: + description: The transit gateway id to which the vpn connection can be attached. + type: str + returned: I(state=present) + sample: + transit_gateway_id: tgw-cb0ae2a2 options: description: The VPN connection options (currently only containing static_routes_only). type: complex @@ -421,6 +440,7 @@ def create_filter(module_params, provided_filters): param_to_filter = { "customer_gateway_id": "customer-gateway-id", "vpn_gateway_id": "vpn-gateway-id", + "transit_gateway_id": "transit-gateway-id", "vpn_connection_id": "vpn-connection-id", } @@ -505,6 +525,7 @@ def create_connection( customer_gateway_id, static_only, vpn_gateway_id, + transit_gateway_id, connection_type, max_attempts, delay, @@ -524,17 +545,21 @@ def create_connection( if t_opt: options["TunnelOptions"] = t_opt - if not (customer_gateway_id and vpn_gateway_id): + if not (customer_gateway_id and (vpn_gateway_id or transit_gateway_id)): raise VPNConnectionException( msg=( "No matching connection was found. To create a new connection you must provide " - "both vpn_gateway_id and customer_gateway_id." + "customer_gateway_id and one of either transit_gateway_id or vpn_gateway_id." ) ) + vpn_connection_params = {"Type": connection_type, "CustomerGatewayId": customer_gateway_id, "Options": options} + if vpn_gateway_id: + vpn_connection_params["VpnGatewayId"] = vpn_gateway_id + if transit_gateway_id: + vpn_connection_params["TransitGatewayId"] = transit_gateway_id + try: - vpn = connection.create_vpn_connection( - Type=connection_type, CustomerGatewayId=customer_gateway_id, VpnGatewayId=vpn_gateway_id, Options=options - ) + vpn = connection.create_vpn_connection(**vpn_connection_params) connection.get_waiter("vpn_connection_available").wait( VpnConnectionIds=[vpn["VpnConnection"]["VpnConnectionId"]], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, @@ -674,6 +699,7 @@ def get_check_mode_results(connection, module_params, vpn_connection_id=None, cu "customer_gateway_configuration": "", "customer_gateway_id": module_params.get("customer_gateway_id"), "vpn_gateway_id": module_params.get("vpn_gateway_id"), + "transit_gateway_id": module_params.get("transit_gateway_id"), "options": {"static_routes_only": module_params.get("static_only")}, "routes": [module_params.get("routes")], } @@ -752,6 +778,7 @@ def ensure_present(connection, module_params, check_mode=False): customer_gateway_id=module_params.get("customer_gateway_id"), static_only=module_params.get("static_only"), vpn_gateway_id=module_params.get("vpn_gateway_id"), + transit_gateway_id=module_params.get("transit_gateway_id"), connection_type=module_params.get("connection_type"), tunnel_options=module_params.get("tunnel_options"), max_attempts=max_attempts, @@ -797,6 +824,7 @@ def main(): vpn_gateway_id=dict(type="str"), tags=dict(type="dict", aliases=["resource_tags"]), connection_type=dict(default="ipsec.1", type="str"), + transit_gateway_id=dict(type="str"), tunnel_options=dict(no_log=True, type="list", default=[], elements="dict"), static_only=dict(default=False, type="bool"), customer_gateway_id=dict(type="str"), @@ -807,7 +835,15 @@ def main(): wait_timeout=dict(type="int", default=600), delay=dict(type="int", default=15), ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + mutually_exclusive = [ + ["vpn_gateway_id", "transit_gateway_id"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) connection = module.client("ec2", retry_decorator=VPNRetry.jittered_backoff(retries=10)) state = module.params.get("state") From 016395858a8c16edffdaa4f11b2534c9263dc118 Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Tue, 1 Aug 2023 15:36:07 +0200 Subject: [PATCH 671/683] api_gateway - add parameter name (#1845) api_gateway - add parameter name SUMMARY api_gateway - Add parameter name to defined the name of the API gateway to create/update, default to ansible-temp-api for backward compatibility ISSUE TYPE Feature Pull Request COMPONENT NAME api_gateway ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Bikouo Aubin Reviewed-by: Mark Chappell --- api_gateway.py | 173 ++++++++++++++++++++++++++++++++++++++++---- api_gateway_info.py | 156 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 313 insertions(+), 16 deletions(-) create mode 100644 api_gateway_info.py diff --git a/api_gateway.py b/api_gateway.py index c63ad5f1582..0339bf7a329 100644 --- a/api_gateway.py +++ b/api_gateway.py @@ -98,17 +98,31 @@ choices: ['EDGE', 'REGIONAL', 'PRIVATE'] type: str default: EDGE + name: + description: + - The name of the RestApi. + type: str + version_added: 6.2.0 + lookup: + description: + - Look up API gateway by either I(tags) (and I(name) if supplied) or by I(api_id). + - If I(lookup=tag) and I(tags) is not specified then no lookup for an existing API gateway + is performed and a new API gateway will be created. + - When using I(lookup=tag), multiple matches being found will result in a failure and no changes will be made. + - To change the tags of a API gateway use I(lookup=id). + default: tag + choices: [ 'tag', 'id' ] + type: str + version_added: 6.2.0 author: - 'Michael De La Rue (@mikedlr)' notes: - - A future version of this module will probably use tags or another - ID so that an API can be created only once. - - As an early work around an intermediate version will probably do - the same using a tag embedded in the API name. + - 'Tags are used to uniquely identify API gateway when the I(api_id) is not supplied. version_added=6.2.0' extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules - amazon.aws.boto3 + - amazon.aws.tags """ EXAMPLES = r""" @@ -140,6 +154,14 @@ cache_size: '6.1' canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True } state: present + +- name: Delete API gateway + amazon.aws.api_gateway: + name: ansible-rest-api + tags: + automation: ansible + lookup: tags + state: absent """ RETURN = r""" @@ -178,6 +200,8 @@ from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def main(): @@ -195,13 +219,17 @@ def main(): stage_canary_settings=dict(type="dict", default={}), tracing_enabled=dict(type="bool", default=False), endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), + name=dict(type="str"), + lookup=dict(type="str", choices=["tag", "id"], default="tag"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) mutually_exclusive = [["swagger_file", "swagger_dict", "swagger_text"]] # noqa: F841 module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=False, + supports_check_mode=True, mutually_exclusive=mutually_exclusive, ) @@ -211,6 +239,9 @@ def main(): swagger_dict = module.params.get("swagger_dict") swagger_text = module.params.get("swagger_text") endpoint_type = module.params.get("endpoint_type") + name = module.params.get("name") + tags = module.params.get("tags") + lookup = module.params.get("lookup") client = module.client("apigateway") @@ -221,12 +252,47 @@ def main(): if state == "present": if api_id is None: - api_id = create_empty_api(module, client, endpoint_type) - api_data = get_api_definitions( - module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text - ) - conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) + # lookup API gateway using tags + if tags and lookup == "tag": + rest_api = get_api_by_tags(client, module, name, tags) + if rest_api: + api_id = rest_api["id"] + if module.check_mode: + module.exit_json(changed=True, msg="Create/update operation skipped - running in check mode.") + if api_id is None: + api_data = get_api_definitions( + module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text + ) + # create new API gateway as non were provided and/or found using lookup=tag + api_id = create_empty_api(module, client, name, endpoint_type, tags) + conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + if tags: + if not conf_res: + conf_res = get_rest_api(module, client, api_id=api_id) + tag_changed, tag_result = ensure_apigateway_tags( + module, client, api_id=api_id, current_tags=conf_res.get("tags"), new_tags=tags, purge_tags=purge_tags + ) + if tag_changed: + changed |= tag_changed + conf_res = tag_result if state == "absent": + if api_id is None: + if lookup != "tag" or not tags: + module.fail_json( + msg="API gateway id must be supplied to delete API gateway or provided tag with lookup=tag to identify API gateway id." + ) + rest_api = get_api_by_tags(client, module, name, tags) + if not rest_api: + module.exit_json(changed=False, msg="No API gateway identified with tags provided") + api_id = rest_api["id"] + elif not describe_api(client, module, api_id): + module.exit_json(changed=False, msg="API gateway id '{0}' does not exist.".format(api_id)) + + if module.check_mode: + module.exit_json(changed=True, msg="Delete operation skipped - running in check mode.", api_id=api_id) + del_res = delete_rest_api(module, client, api_id) exit_args = {"changed": changed, "api_id": api_id} @@ -241,6 +307,24 @@ def main(): module.exit_json(**exit_args) +def ensure_apigateway_tags(module, client, api_id, current_tags, new_tags, purge_tags): + changed = False + tag_result = {} + tags_to_set, tags_to_delete = compare_aws_tags(current_tags, new_tags, purge_tags) + if tags_to_set or tags_to_delete: + changed = True + apigateway_arn = f"arn:aws:apigateway:{module.region}::/restapis/{api_id}" + # Remove tags from Resource + if tags_to_delete: + client.untag_resource(resourceArn=apigateway_arn, tagKeys=tags_to_delete) + # add new tags to resource + if tags_to_set: + client.tag_resource(resourceArn=apigateway_arn, tags=tags_to_set) + # Describe API gateway + tag_result = get_rest_api(module, client, api_id=api_id) + return changed, tag_result + + def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None): apidata = None if swagger_file is not None: @@ -260,7 +344,16 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te return apidata -def create_empty_api(module, client, endpoint_type): +def get_rest_api(module, client, api_id): + try: + response = client.get_rest_api(restApiId=api_id) + response.pop("ResponseMetadata", None) + return response + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"failed to get REST API with api_id={api_id}") + + +def create_empty_api(module, client, name, endpoint_type, tags): """ creates a new empty API ready to be configured. The description is temporarily set to show the API as incomplete but should be @@ -268,7 +361,8 @@ def create_empty_api(module, client, endpoint_type): """ desc = "Incomplete API creation by ansible api_gateway module" try: - awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type) + rest_api_name = name or "ansible-temp-api" + awsret = create_api(client, name=rest_api_name, description=desc, endpoint_type=endpoint_type, tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: module.fail_json_aws(e, msg="creating API") return awsret["id"] @@ -298,6 +392,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): configure_response = None try: configure_response = configure_api(client, api_id, api_data=api_data) + configure_response.pop("ResponseMetadata", None) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: module.fail_json_aws(e, msg=f"configuring API {api_id}") @@ -307,6 +402,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): if stage: try: deploy_response = create_deployment(client, api_id, **module.params) + deploy_response.pop("ResponseMetadata", None) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: msg = f"deploying api {api_id} to stage {stage}" module.fail_json_aws(e, msg) @@ -314,14 +410,38 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): return configure_response, deploy_response +def get_api_by_tags(client, module, name, tags): + count = 0 + result = None + for api in list_apis(client): + if name and api["name"] != name: + continue + api_tags = api.get("tags", {}) + if all((tag_key in api_tags and api_tags[tag_key] == tag_value for tag_key, tag_value in tags.items())): + result = api + count += 1 + + if count > 1: + args = "Tags" + if name: + args += " and name" + module.fail_json(msg="{0} provided do not identify a unique API gateway".format(args)) + return result + + retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ["TooManyRequestsException"]} @AWSRetry.jittered_backoff(**retry_params) -def create_api(client, name=None, description=None, endpoint_type=None): - return client.create_rest_api( - name="ansible-temp-api", description=description, endpointConfiguration={"types": [endpoint_type]} - ) +def create_api(client, name, description=None, endpoint_type=None, tags=None): + params = {"name": name} + if description: + params["description"] = description + if endpoint_type: + params["endpointConfiguration"] = {"types": [endpoint_type]} + if tags: + params["tags"] = tags + return client.create_rest_api(**params) @AWSRetry.jittered_backoff(**retry_params) @@ -363,5 +483,26 @@ def create_deployment(client, rest_api_id, **params): return result +@AWSRetry.jittered_backoff(**retry_params) +def list_apis(client): + paginator = client.get_paginator("get_rest_apis") + return paginator.paginate().build_full_result().get("items", []) + + +@AWSRetry.jittered_backoff(**retry_params) +def describe_api(client, module, rest_api_id): + try: + response = client.get_rest_api(restApiId=rest_api_id) + response.pop("ResponseMetadata") + except is_boto3_error_code("ResourceNotFoundException"): + response = {} + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get Rest API '{0}'.".format(rest_api_id)) + return response + + if __name__ == "__main__": main() diff --git a/api_gateway_info.py b/api_gateway_info.py new file mode 100644 index 00000000000..5c904544b9c --- /dev/null +++ b/api_gateway_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: api_gateway_info +version_added: 6.1.0 +short_description: Gather information about ec2 instances in AWS +description: + - Gather information about ec2 instances in AWS +options: + ids: + description: + - The list of the string identifiers of the associated RestApis. + type: list + elements: str +author: + - Aubin Bikouo (@abikouo) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +--- +# List all API gateway +- name: List all for a specific function + community.aws.api_gateway_info: + +# Get information for a specific API gateway +- name: List all for a specific function + community.aws.api_gateway_info: + ids: + - 012345678a + - abcdefghij +""" + +RETURN = r""" +--- +rest_apis: + description: A list of API gateway. + returned: always + type: complex + contains: + name: + description: The name of the API. + returned: success + type: str + sample: 'ansible-tmp-api' + id: + description: The identifier of the API. + returned: success + type: str + sample: 'abcdefgh' + api_key_source: + description: The source of the API key for metering requests according to a usage plan. + returned: success + type: str + sample: 'HEADER' + created_date: + description: The timestamp when the API was created. + returned: success + type: str + sample: "2020-01-01T11:37:59+00:00" + description: + description: The description of the API. + returned: success + type: str + sample: "Automatic deployment by Ansible." + disable_execute_api_endpoint: + description: Specifies whether clients can invoke your API by using the default execute-api endpoint. + returned: success + type: bool + sample: False + endpoint_configuration: + description: The endpoint configuration of this RestApi showing the endpoint types of the API. + returned: success + type: dict + sample: {"types": ["REGIONAL"]} + tags: + description: The collection of tags. + returned: success + type: dict + sample: {"key": "value"} +""" + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +@AWSRetry.jittered_backoff() +def _list_rest_apis(connection, **params): + paginator = connection.get_paginator("get_rest_apis") + return paginator.paginate(**params).build_full_result().get("items", []) + + +@AWSRetry.jittered_backoff() +def _describe_rest_api(connection, module, rest_api_id): + try: + response = connection.get_rest_api(restApiId=rest_api_id) + response.pop("ResponseMetadata") + except is_boto3_error_code("ResourceNotFoundException"): + response = {} + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get Rest API '{0}'.".format(rest_api_id)) + return response + + +def main(): + argument_spec = dict( + ids=dict(type="list", elements="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + connection = module.client("apigateway") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + ids = module.params.get("ids") + if ids: + rest_apis = [] + for rest_api_id in ids: + result = _describe_rest_api(connection, module, rest_api_id) + if result: + rest_apis.append(result) + else: + rest_apis = _list_rest_apis(connection) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_rest_apis = [camel_dict_to_snake_dict(item) for item in rest_apis] + module.exit_json(changed=False, rest_apis=snaked_rest_apis) + + +if __name__ == "__main__": + main() From f971946505d58ed15a24d233371722b0805ef4eb Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 31 Aug 2023 17:58:59 +0200 Subject: [PATCH 672/683] Mass update of docs and tests (credentials/session tokens) (#1921) Mass update of docs and tests (credentials/session tokens) SUMMARY We had a cleanup of credentials/session parameters which included a batch of deprecations and renames. Ensure that all of our tests and docs are using the 'canonical' names ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/batch_compute_environment.py plugins/modules/cloudformation_exports_info.py plugins/modules/ec2_vpc_vpn.py plugins/modules/elasticache.py plugins/modules/elasticache_parameter_group.py plugins/modules/elasticache_snapshot.py plugins/modules/ses_rule_set.py plugins/modules/sts_assume_role.py plugins/modules/sts_session_token.py tests/integration ADDITIONAL INFORMATION See also ansible-collections/amazon.aws#1172 ansible-collections/amazon.aws#1714 Reviewed-by: Alina Buzachis --- batch_compute_environment.py | 6 +++--- cloudformation_exports_info.py | 5 ++--- ec2_vpc_vpn.py | 3 +-- elasticache.py | 3 +-- elasticache_parameter_group.py | 5 ++--- elasticache_snapshot.py | 3 +-- ses_rule_set.py | 5 ++--- sts_assume_role.py | 13 +++++++------ sts_session_token.py | 15 +++++++++------ 9 files changed, 28 insertions(+), 30 deletions(-) diff --git a/batch_compute_environment.py b/batch_compute_environment.py index cbe1184b2fe..d7ee4ebc1f5 100644 --- a/batch_compute_environment.py +++ b/batch_compute_environment.py @@ -165,15 +165,15 @@ changed: false invocation: module_args: - aws_access_key: ~ - aws_secret_key: ~ + access_key: ~ + secret_key: ~ bid_percentage: ~ compute_environment_name: compute_environment_state: ENABLED compute_resource_type: EC2 desiredv_cpus: 0 ec2_key_pair: ~ - ec2_url: ~ + endpoint_url: ~ image_id: ~ instance_role: "arn:aws:iam::..." instance_types: diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py index 3c93c6a3459..cf769606d47 100644 --- a/cloudformation_exports_info.py +++ b/cloudformation_exports_info.py @@ -19,10 +19,9 @@ """ EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get Exports - community.aws.cloudformation_exports_info: - profile: 'my_aws_profile' - region: 'my_region' + community.aws.cloudformation_exports_info: {} register: cf_exports - ansible.builtin.debug: msg: "{{ cf_exports }}" diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py index 3e4987d9725..abc97f796b7 100644 --- a/ec2_vpc_vpn.py +++ b/ec2_vpc_vpn.py @@ -143,8 +143,7 @@ """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: create a VPN connection with vpn_gateway_id community.aws.ec2_vpc_vpn: diff --git a/elasticache.py b/elasticache.py index e7a9b1808ff..d45509cb606 100644 --- a/elasticache.py +++ b/elasticache.py @@ -103,8 +103,7 @@ RETURN = r""" # """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: Basic example community.aws.elasticache: diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py index 20f5ed9838b..00f2af19a08 100644 --- a/elasticache_parameter_group.py +++ b/elasticache_parameter_group.py @@ -50,9 +50,8 @@ """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. ---- +# Note: These examples do not set authentication details, see the AWS Guide for details. + - hosts: localhost connection: local tasks: diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py index b6b6f55069c..0816527fb4f 100644 --- a/elasticache_snapshot.py +++ b/elasticache_snapshot.py @@ -49,8 +49,7 @@ """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. - name: 'Create a snapshot' community.aws.elasticache_snapshot: diff --git a/ses_rule_set.py b/ses_rule_set.py index 8d09965774f..cf478c0f90a 100644 --- a/ses_rule_set.py +++ b/ses_rule_set.py @@ -50,9 +50,8 @@ """ EXAMPLES = r""" -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. ---- +# Note: These examples do not set authentication details, see the AWS Guide for details. + - name: Create default rule set and activate it if not already community.aws.ses_rule_set: name: default-rule-set diff --git a/sts_assume_role.py b/sts_assume_role.py index 4a4860657cf..4d934c2d5cd 100644 --- a/sts_assume_role.py +++ b/sts_assume_role.py @@ -49,7 +49,8 @@ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. type: str notes: - - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token. + - In order to use the assumed role in a following playbook task you must pass the I(access_key), + I(secret_key) and I(session_token) parameters to modules that should use the assumed credentials. extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules @@ -80,19 +81,19 @@ """ EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - # Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) - community.aws.sts_assume_role: + access_key: AKIA1EXAMPLE1EXAMPLE + secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE role_arn: "arn:aws:iam::123456789012:role/someRole" role_session_name: "someRoleSession" register: assumed_role # Use the assumed role above to tag an instance in account 123456789012 - amazon.aws.ec2_tag: - aws_access_key: "{{ assumed_role.sts_creds.access_key }}" - aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" - security_token: "{{ assumed_role.sts_creds.session_token }}" + access_key: "{{ assumed_role.sts_creds.access_key }}" + secret_key: "{{ assumed_role.sts_creds.secret_key }}" + session_token: "{{ assumed_role.sts_creds.session_token }}" resource: i-xyzxyz01 state: present tags: diff --git a/sts_session_token.py b/sts_session_token.py index 8656a96fc3c..044a6367b58 100644 --- a/sts_session_token.py +++ b/sts_session_token.py @@ -8,7 +8,7 @@ --- module: sts_session_token version_added: 1.0.0 -short_description: Obtain a session token from the AWS Security Token Service +short_description: obtain a session token from the AWS Security Token Service description: - Obtain a session token from the AWS Security Token Service. author: @@ -29,7 +29,8 @@ - The value provided by the MFA device, if the trust policy of the user requires MFA. type: str notes: - - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token). + - In order to use the session token in a following playbook task you must pass the I(access_key), + I(secret_key) and I(session_token) parameters to modules that should use the session credentials. extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules @@ -42,7 +43,7 @@ returned: always type: list sample: - access_key: ASXXXXXXXXXXXXXXXXXX + access_key: ASIAXXXXXXXXXXXXXXXX expiration: "2016-04-08T11:59:47+00:00" secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -59,14 +60,16 @@ # (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) - name: Get a session token community.aws.sts_session_token: + access_key: AKIA1EXAMPLE1EXAMPLE + secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE duration_seconds: 3600 register: session_credentials - name: Use the session token obtained above to tag an instance in account 123456789012 amazon.aws.ec2_tag: - aws_access_key: "{{ session_credentials.sts_creds.access_key }}" - aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}" - security_token: "{{ session_credentials.sts_creds.session_token }}" + access_key: "{{ session_credentials.sts_creds.access_key }}" + secret_key: "{{ session_credentials.sts_creds.secret_key }}" + session_token: "{{ session_credentials.sts_creds.session_token }}" resource: i-xyzxyz01 state: present tags: From 93e675ac1cdfc2497751806de9929cbe520b3e19 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 5 Sep 2023 22:12:45 +0200 Subject: [PATCH 673/683] Add route53_wait module. (#1904) Add route53_wait module SUMMARY Add a route53_wait module. This allows to wait for updated/added Route53 DNS entries to propagate when the route53 module was called with wait=false. Depends on ansible-collections/amazon.aws#1683, thus the tests shouldn't really do anything right now. ISSUE TYPE New Module Pull Request COMPONENT NAME route53 Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- route53_wait.py | 185 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 route53_wait.py diff --git a/route53_wait.py b/route53_wait.py new file mode 100644 index 00000000000..45b199887fd --- /dev/null +++ b/route53_wait.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: route53_wait +version_added: 6.2.0 +short_description: wait for changes in Amazons Route 53 DNS service to propagate +description: + - When using M(amazon.aws.route53) with I(wait=false), this module allows to wait for the + module's propagation to finish at a later point of time. +options: + result: + aliases: + - results + description: + - The registered result of one or multiple M(amazon.aws.route53) invocations. + required: true + type: dict + wait_timeout: + description: + - How long to wait for the changes to be replicated, in seconds. + - This timeout will be used for every changed result in I(result). + default: 300 + type: int + region: + description: + - This setting is ignored by the module. It is only present to make it possible to + have I(region) present in the module default group. + type: str +author: + - Felix Fontein (@felixfontein) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.boto3 +""" + +RETURN = r""" +# +""" + +EXAMPLES = r""" +# Example when using a single route53 invocation: + +- name: Add new.foo.com as an A record with 3 IPs + amazon.aws.route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: + - 1.1.1.1 + - 2.2.2.2 + - 3.3.3.3 + register: module_result + +# do something else + +- name: Wait for the changes of the above route53 invocation to propagate + community.aws.route53_wait: + result: "{{ module_result }}" + +######################################################################### +# Example when using a loop over amazon.aws.route53: + +- name: Add various A records + amazon.aws.route53: + state: present + zone: foo.com + record: "{{ item.record }}" + type: A + ttl: 300 + value: "{{ item.value }}" + loop: + - record: new.foo.com + value: 1.1.1.1 + - record: foo.foo.com + value: 2.2.2.2 + - record: bar.foo.com + value: + - 3.3.3.3 + - 4.4.4.4 + register: module_results + +# do something else + +- name: Wait for the changes of the above three route53 invocations to propagate + community.aws.route53_wait: + results: "{{ module_results }}" +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + +WAIT_RETRY = 5 # how many seconds to wait between propagation status polls + + +def detect_task_results(results): + if "results" in results: + # This must be the registered result of a loop of route53 tasks + for key in ("changed", "msg", "skipped"): + if key not in results: + raise ValueError(f"missing {key} key") + if not isinstance(results["results"], list): + raise ValueError("results is present, but not a list") + for index, result in enumerate(results["results"]): + if not isinstance(result, dict): + raise ValueError(f"result {index + 1} is not a dictionary") + for key in ("changed", "failed", "ansible_loop_var", "invocation"): + if key not in result: + raise ValueError(f"missing {key} key for result {index + 1}") + yield f" for result #{index + 1}", result + return + # This must be a single route53 task + for key in ("changed", "failed"): + if key not in results: + raise ValueError(f"missing {key} key") + yield "", results + + +def main(): + argument_spec = dict( + result=dict(type="dict", required=True, aliases=["results"]), + wait_timeout=dict(type="int", default=300), + region=dict(type="str"), # ignored + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + result_in = module.params["result"] + wait_timeout_in = module.params.get("wait_timeout") + + changed_results = [] + try: + for id, result in detect_task_results(result_in): + if result.get("wait_id"): + changed_results.append((id, result["wait_id"])) + except ValueError as exc: + module.fail_json( + msg=f"The value passed as result does not seem to be a registered route53 result: {to_native(exc)}" + ) + + # connect to the route53 endpoint + try: + route53 = module.client("route53") + except botocore.exceptions.HTTPClientError as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + for what, wait_id in changed_results: + try: + waiter = get_waiter(route53, "resource_record_sets_changed") + waiter.wait( + Id=wait_id, + WaiterConfig=dict( + Delay=WAIT_RETRY, + MaxAttempts=wait_timeout_in // WAIT_RETRY, + ), + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg=f"Timeout waiting for resource records changes{what} to be applied") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to update records") + except Exception as e: + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") + + module.exit_json(changed=False) + + +if __name__ == "__main__": + main() From d2e5dd74bde6ff497f14c11217992eaf0f581bf1 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 6 Sep 2023 13:10:19 +0200 Subject: [PATCH 674/683] Fix version_added for route53_wait (#1928) Fix version_added for route53_wait SUMMARY We accidentally set version_added to 6.2 instead of 6.3 ISSUE TYPE Docs Pull Request COMPONENT NAME route53_wait ADDITIONAL INFORMATION --- route53_wait.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route53_wait.py b/route53_wait.py index 45b199887fd..6b72681d4c1 100644 --- a/route53_wait.py +++ b/route53_wait.py @@ -7,7 +7,7 @@ DOCUMENTATION = r""" --- module: route53_wait -version_added: 6.2.0 +version_added: 6.3.0 short_description: wait for changes in Amazons Route 53 DNS service to propagate description: - When using M(amazon.aws.route53) with I(wait=false), this module allows to wait for the From 61ded0ab0e5d0760ab1c14b22f7d1dd213e701e6 Mon Sep 17 00:00:00 2001 From: Thomas Bruckmann Date: Thu, 7 Sep 2023 12:56:20 +0200 Subject: [PATCH 675/683] feat: adds platform config to task definition to support ARM (#1892) feat: adds platform config to task definition to support ARM SUMMARY Fixes #1891 ISSUE TYPE Feature Pull Request COMPONENT NAME ecs_taskdefinition ADDITIONAL INFORMATION I've just added the Parameter the same way the other parameters are also included. Tested it with our AWS Account. Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz Reviewed-by: Thomas Bruckmann --- ecs_taskdefinition.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py index f150255fb89..4c4aefc2032 100644 --- a/ecs_taskdefinition.py +++ b/ecs_taskdefinition.py @@ -627,6 +627,27 @@ expression: description: A cluster query language expression to apply to the constraint. type: str + runtime_platform: + version_added: 6.4.0 + description: + - runtime platform configuration for the task + required: false + type: dict + default: { + "operatingSystemFamily": "LINUX", + "cpuArchitecture": "X86_64" + } + suboptions: + cpuArchitecture: + description: The CPU Architecture type to be used by the task + type: str + required: false + choices: ['X86_64', 'ARM64'] + operatingSystemFamily: + description: OS type to be used by the task + type: str + required: false + choices: ['LINUX', 'WINDOWS_SERVER_2019_FULL', 'WINDOWS_SERVER_2019_CORE', 'WINDOWS_SERVER_2022_FULL', 'WINDOWS_SERVER_2022_CORE'] extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules @@ -813,6 +834,7 @@ def register_task( cpu, memory, placement_constraints, + runtime_platform, ): validated_containers = [] @@ -873,6 +895,8 @@ def register_task( params["executionRoleArn"] = execution_role_arn if placement_constraints: params["placementConstraints"] = placement_constraints + if runtime_platform: + params["runtimePlatform"] = runtime_platform try: response = self.ecs.register_task_definition(aws_retry=True, **params) @@ -939,6 +963,24 @@ def main(): elements="dict", options=dict(type=dict(type="str"), expression=dict(type="str")), ), + runtime_platform=dict( + required=False, + default={"operatingSystemFamily": "LINUX", "cpuArchitecture": "X86_64"}, + type="dict", + options=dict( + cpuArchitecture=dict(required=False, choices=["X86_64", "ARM64"]), + operatingSystemFamily=dict( + required=False, + choices=[ + "LINUX", + "WINDOWS_SERVER_2019_FULL", + "WINDOWS_SERVER_2019_CORE", + "WINDOWS_SERVER_2022_FULL", + "WINDOWS_SERVER_2022_CORE", + ], + ), + ), + ), ) module = AnsibleAWSModule( @@ -1157,6 +1199,7 @@ def _task_definition_matches( module.params["cpu"], module.params["memory"], module.params["placement_constraints"], + module.params["runtime_platform"], ) results["changed"] = True From 33cf61d6db1445065d7c5d5af47c8d3af2185492 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 12 Sep 2023 07:37:30 +0200 Subject: [PATCH 676/683] Cleanup after renamed plugins (#1940) Cleanup after renamed plugins SUMMARY Prior to 5.0.0 we renamed various modules. Cleanup the integration tests (and some docs). ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/modules/acm_certificate.py plugins/modules/acm_certificate_info.py plugins/modules/autoscaling_policy.py plugins/modules/codepipeline.py plugins/modules/storagegateway_info.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- acm_certificate.py | 12 ++++++------ acm_certificate_info.py | 10 +++++----- autoscaling_policy.py | 6 +++--- codepipeline.py | 2 +- storagegateway_info.py | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/acm_certificate.py b/acm_certificate.py index 4bf07f0321a..204dbecb03d 100644 --- a/acm_certificate.py +++ b/acm_certificate.py @@ -167,14 +167,14 @@ EXAMPLES = r""" - name: upload a self-signed certificate - community.aws.aws_acm: + community.aws.acm_certificate: certificate: "{{ lookup('file', 'cert.pem' ) }}" privateKey: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert" region: ap-southeast-2 # AWS region - name: create/update a certificate with a chain - community.aws.aws_acm: + community.aws.acm_certificate: certificate: "{{ lookup('file', 'cert.pem' ) }}" private_key: "{{ lookup('file', 'key.pem' ) }}" name_tag: my_cert @@ -188,25 +188,25 @@ var: cert_create.certificate.arn - name: delete the cert we just created - community.aws.aws_acm: + community.aws.acm_certificate: name_tag: my_cert state: absent region: ap-southeast-2 - name: delete a certificate with a particular ARN - community.aws.aws_acm: + community.aws.acm_certificate: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" state: absent region: ap-southeast-2 - name: delete all certificates with a particular domain name - community.aws.aws_acm: + community.aws.acm_certificate: domain_name: acm.ansible.com state: absent region: ap-southeast-2 - name: add tags to an existing certificate with a particular ARN - community.aws.aws_acm: + community.aws.acm_certificate: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" tags: Name: my_certificate diff --git a/acm_certificate_info.py b/acm_certificate_info.py index 420cd0e0f92..26d00e7e319 100644 --- a/acm_certificate_info.py +++ b/acm_certificate_info.py @@ -48,19 +48,19 @@ EXAMPLES = r""" - name: obtain all ACM certificates - community.aws.aws_acm_info: + community.aws.acm_certificate_info: - name: obtain all information for a single ACM certificate - community.aws.aws_acm_info: + community.aws.acm_certificate_info: domain_name: "*.example_com" - name: obtain all certificates pending validation - community.aws.aws_acm_info: + community.aws.acm_certificate_info: statuses: - PENDING_VALIDATION - name: obtain all certificates with tag Name=foo and myTag=bar - community.aws.aws_acm_info: + community.aws.acm_certificate_info: tags: Name: foo myTag: bar @@ -68,7 +68,7 @@ # The output is still a list of certificates, just one item long. - name: obtain information about a certificate with a particular ARN - community.aws.aws_acm_info: + community.aws.acm_certificate_info: certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" """ diff --git a/autoscaling_policy.py b/autoscaling_policy.py index 67f7ccbd54b..6d69d849226 100644 --- a/autoscaling_policy.py +++ b/autoscaling_policy.py @@ -223,7 +223,7 @@ asg_name: "application-asg" - name: create TargetTracking predefined policy - ec2_scaling_policy: + community.aws.autoscaling_policy: name: "predefined-policy-1" policy_type: TargetTrackingScaling target_tracking_config: @@ -234,7 +234,7 @@ register: result - name: create TargetTracking predefined policy with resource_label - ec2_scaling_policy: + community.aws.autoscaling_policy: name: "predefined-policy-1" policy_type: TargetTrackingScaling target_tracking_config: @@ -246,7 +246,7 @@ register: result - name: create TargetTrackingScaling custom policy - ec2_scaling_policy: + community.aws.autoscaling_policy: name: "custom-policy-1" policy_type: TargetTrackingScaling target_tracking_config: diff --git a/codepipeline.py b/codepipeline.py index 9fb42643df4..b1fe604768f 100644 --- a/codepipeline.py +++ b/codepipeline.py @@ -81,7 +81,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. # Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) -- community.aws.aws_codepipeline: +- community.aws.codepipeline: name: my_deploy_pipeline role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service artifact_store: diff --git a/storagegateway_info.py b/storagegateway_info.py index 5ff72399786..55b7c4685d4 100644 --- a/storagegateway_info.py +++ b/storagegateway_info.py @@ -165,10 +165,10 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: "Get AWS storage gateway information" - community.aws.aws_sgw_info: + community.aws.storagegateway_info: - name: "Get AWS storage gateway information for region eu-west-3" - community.aws.aws_sgw_info: + community.aws.storagegateway_info: region: eu-west-3 """ From 3458b56bcba2426ff63c03981d8a25655bc9e186 Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 18 Sep 2023 18:31:07 +0200 Subject: [PATCH 677/683] Promote iam_role and iam_role_info modules --- iam_role.py => plugins/modules/IAMRole/iam_role.py | 0 iam_role_info.py => plugins/modules/IAMRole/iam_role_info.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename iam_role.py => plugins/modules/IAMRole/iam_role.py (100%) rename iam_role_info.py => plugins/modules/IAMRole/iam_role_info.py (100%) diff --git a/iam_role.py b/plugins/modules/IAMRole/iam_role.py similarity index 100% rename from iam_role.py rename to plugins/modules/IAMRole/iam_role.py diff --git a/iam_role_info.py b/plugins/modules/IAMRole/iam_role_info.py similarity index 100% rename from iam_role_info.py rename to plugins/modules/IAMRole/iam_role_info.py From eaeb0700a2a0e770c64888232cefc83cc2ef2d23 Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 18 Sep 2023 18:31:59 +0200 Subject: [PATCH 678/683] Moving to the final location --- plugins/modules/{IAMRole => }/iam_role.py | 0 plugins/modules/{IAMRole => }/iam_role_info.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename plugins/modules/{IAMRole => }/iam_role.py (100%) rename plugins/modules/{IAMRole => }/iam_role_info.py (100%) diff --git a/plugins/modules/IAMRole/iam_role.py b/plugins/modules/iam_role.py similarity index 100% rename from plugins/modules/IAMRole/iam_role.py rename to plugins/modules/iam_role.py diff --git a/plugins/modules/IAMRole/iam_role_info.py b/plugins/modules/iam_role_info.py similarity index 100% rename from plugins/modules/IAMRole/iam_role_info.py rename to plugins/modules/iam_role_info.py From bb0728ea93ff259c0d4d530de72cb9b7adc079bf Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 18 Sep 2023 18:37:52 +0200 Subject: [PATCH 679/683] Remove unwanted --- __init__.py | 0 accessanalyzer_validate_policy_info.py | 235 -- acm_certificate.py | 556 ----- acm_certificate_info.py | 305 --- api_gateway.py | 508 ---- api_gateway_domain.py | 344 --- api_gateway_info.py | 156 -- application_autoscaling_policy.py | 551 ----- autoscaling_complete_lifecycle_action.py | 101 - autoscaling_instance_refresh.py | 267 --- autoscaling_instance_refresh_info.py | 218 -- autoscaling_launch_config.py | 734 ------ autoscaling_launch_config_find.py | 208 -- autoscaling_launch_config_info.py | 230 -- autoscaling_lifecycle_hook.py | 308 --- autoscaling_policy.py | 610 ----- autoscaling_scheduled_action.py | 323 --- aws_region_info.py | 98 - batch_compute_environment.py | 497 ---- batch_job_definition.py | 470 ---- batch_job_queue.py | 306 --- cloudformation_exports_info.py | 79 - cloudformation_stack_set.py | 787 ------ cloudfront_distribution.py | 2530 -------------------- cloudfront_distribution_info.py | 408 ---- cloudfront_invalidation.py | 271 --- cloudfront_origin_access_identity.py | 291 --- cloudfront_response_headers_policy.py | 296 --- codebuild_project.py | 491 ---- codecommit_repository.py | 243 -- codepipeline.py | 312 --- config_aggregation_authorization.py | 156 -- config_aggregator.py | 232 -- config_delivery_channel.py | 245 -- config_recorder.py | 196 -- config_rule.py | 284 --- data_pipeline.py | 636 ----- directconnect_confirm_connection.py | 157 -- directconnect_connection.py | 354 --- directconnect_gateway.py | 372 --- directconnect_link_aggregation_group.py | 485 ---- directconnect_virtual_interface.py | 538 ----- dms_endpoint.py | 691 ------ dms_replication_subnet_group.py | 218 -- dynamodb_table.py | 1092 --------- dynamodb_ttl.py | 158 -- ec2_ami_copy.py | 223 -- ec2_carrier_gateway.py | 259 -- ec2_carrier_gateway_info.py | 159 -- ec2_customer_gateway.py | 245 -- ec2_customer_gateway_info.py | 140 -- ec2_launch_template.py | 863 ------- ec2_placement_group.py | 235 -- ec2_placement_group_info.py | 129 - ec2_snapshot_copy.py | 183 -- ec2_transit_gateway.py | 516 ---- ec2_transit_gateway_info.py | 253 -- ec2_transit_gateway_vpc_attachment.py | 344 --- ec2_transit_gateway_vpc_attachment_info.py | 203 -- ec2_vpc_egress_igw.py | 192 -- ec2_vpc_nacl.py | 607 ----- ec2_vpc_nacl_info.py | 227 -- ec2_vpc_peer.py | 611 ----- ec2_vpc_peering_info.py | 258 -- ec2_vpc_vgw.py | 534 ----- ec2_vpc_vgw_info.py | 190 -- ec2_vpc_vpn.py | 866 ------- ec2_vpc_vpn_info.py | 218 -- ec2_win_password.py | 220 -- ecs_attribute.py | 305 --- ecs_cluster.py | 370 --- ecs_ecr.py | 597 ----- ecs_service.py | 1273 ---------- ecs_service_info.py | 244 -- ecs_tag.py | 221 -- ecs_task.py | 480 ---- ecs_taskdefinition.py | 1239 ---------- ecs_taskdefinition_info.py | 378 --- efs.py | 806 ------- efs_info.py | 393 --- efs_tag.py | 186 -- eks_cluster.py | 307 --- eks_fargate_profile.py | 365 --- eks_nodegroup.py | 753 ------ elasticache.py | 552 ----- elasticache_info.py | 496 ---- elasticache_parameter_group.py | 372 --- elasticache_snapshot.py | 212 -- elasticache_subnet_group.py | 259 -- elasticbeanstalk_app.py | 225 -- elb_classic_lb_info.py | 239 -- elb_instance.py | 403 ---- elb_network_lb.py | 501 ---- elb_target.py | 347 --- elb_target_group.py | 1058 -------- elb_target_group_info.py | 323 --- elb_target_info.py | 395 --- glue_connection.py | 416 ---- glue_crawler.py | 447 ---- glue_job.py | 491 ---- iam_access_key.py | 311 --- iam_access_key_info.py | 122 - iam_group.py | 413 ---- iam_managed_policy.py | 379 --- iam_mfa_device_info.py | 103 - iam_password_policy.py | 220 -- iam_saml_federation.py | 235 -- iam_server_certificate.py | 380 --- iam_server_certificate_info.py | 160 -- inspector_target.py | 251 -- kinesis_stream.py | 1144 --------- lightsail.py | 403 ---- lightsail_snapshot.py | 205 -- lightsail_static_ip.py | 142 -- mq_broker.py | 547 ----- mq_broker_config.py | 224 -- mq_broker_info.py | 120 - mq_user.py | 271 --- mq_user_info.py | 153 -- msk_cluster.py | 795 ------ msk_config.py | 306 --- networkfirewall.py | 348 --- networkfirewall_info.py | 234 -- networkfirewall_policy.py | 439 ---- networkfirewall_policy_info.py | 257 -- networkfirewall_rule_group.py | 828 ------- networkfirewall_rule_group_info.py | 446 ---- opensearch.py | 1329 ---------- opensearch_info.py | 528 ---- redshift.py | 692 ------ redshift_cross_region_snapshots.py | 197 -- redshift_info.py | 344 --- redshift_subnet_group.py | 275 --- route53_wait.py | 185 -- s3_bucket_info.py | 625 ----- s3_bucket_notification.py | 391 --- s3_cors.py | 168 -- s3_lifecycle.py | 690 ------ s3_logging.py | 215 -- s3_metrics_configuration.py | 209 -- s3_sync.py | 556 ----- s3_website.py | 331 --- secretsmanager_secret.py | 658 ----- ses_identity.py | 575 ----- ses_identity_policy.py | 203 -- ses_rule_set.py | 258 -- sns.py | 248 -- sns_topic.py | 717 ------ sns_topic_info.py | 170 -- sqs_queue.py | 520 ---- ssm_inventory_info.py | 114 - ssm_parameter.py | 594 ----- stepfunctions_state_machine.py | 221 -- stepfunctions_state_machine_execution.py | 189 -- storagegateway_info.py | 359 --- sts_assume_role.py | 172 -- sts_session_token.py | 144 -- waf_condition.py | 770 ------ waf_info.py | 142 -- waf_rule.py | 364 --- waf_web_acl.py | 367 --- wafv2_ip_set.py | 340 --- wafv2_ip_set_info.py | 144 -- wafv2_resources.py | 164 -- wafv2_resources_info.py | 118 - wafv2_rule_group.py | 416 ---- wafv2_rule_group_info.py | 147 -- wafv2_web_acl.py | 596 ----- wafv2_web_acl_info.py | 150 -- 169 files changed, 65906 deletions(-) delete mode 100644 __init__.py delete mode 100644 accessanalyzer_validate_policy_info.py delete mode 100644 acm_certificate.py delete mode 100644 acm_certificate_info.py delete mode 100644 api_gateway.py delete mode 100644 api_gateway_domain.py delete mode 100644 api_gateway_info.py delete mode 100644 application_autoscaling_policy.py delete mode 100644 autoscaling_complete_lifecycle_action.py delete mode 100644 autoscaling_instance_refresh.py delete mode 100644 autoscaling_instance_refresh_info.py delete mode 100644 autoscaling_launch_config.py delete mode 100644 autoscaling_launch_config_find.py delete mode 100644 autoscaling_launch_config_info.py delete mode 100644 autoscaling_lifecycle_hook.py delete mode 100644 autoscaling_policy.py delete mode 100644 autoscaling_scheduled_action.py delete mode 100644 aws_region_info.py delete mode 100644 batch_compute_environment.py delete mode 100644 batch_job_definition.py delete mode 100644 batch_job_queue.py delete mode 100644 cloudformation_exports_info.py delete mode 100644 cloudformation_stack_set.py delete mode 100644 cloudfront_distribution.py delete mode 100644 cloudfront_distribution_info.py delete mode 100644 cloudfront_invalidation.py delete mode 100644 cloudfront_origin_access_identity.py delete mode 100644 cloudfront_response_headers_policy.py delete mode 100644 codebuild_project.py delete mode 100644 codecommit_repository.py delete mode 100644 codepipeline.py delete mode 100644 config_aggregation_authorization.py delete mode 100644 config_aggregator.py delete mode 100644 config_delivery_channel.py delete mode 100644 config_recorder.py delete mode 100644 config_rule.py delete mode 100644 data_pipeline.py delete mode 100644 directconnect_confirm_connection.py delete mode 100644 directconnect_connection.py delete mode 100644 directconnect_gateway.py delete mode 100644 directconnect_link_aggregation_group.py delete mode 100644 directconnect_virtual_interface.py delete mode 100644 dms_endpoint.py delete mode 100644 dms_replication_subnet_group.py delete mode 100644 dynamodb_table.py delete mode 100644 dynamodb_ttl.py delete mode 100644 ec2_ami_copy.py delete mode 100644 ec2_carrier_gateway.py delete mode 100644 ec2_carrier_gateway_info.py delete mode 100644 ec2_customer_gateway.py delete mode 100644 ec2_customer_gateway_info.py delete mode 100644 ec2_launch_template.py delete mode 100644 ec2_placement_group.py delete mode 100644 ec2_placement_group_info.py delete mode 100644 ec2_snapshot_copy.py delete mode 100644 ec2_transit_gateway.py delete mode 100644 ec2_transit_gateway_info.py delete mode 100644 ec2_transit_gateway_vpc_attachment.py delete mode 100644 ec2_transit_gateway_vpc_attachment_info.py delete mode 100644 ec2_vpc_egress_igw.py delete mode 100644 ec2_vpc_nacl.py delete mode 100644 ec2_vpc_nacl_info.py delete mode 100644 ec2_vpc_peer.py delete mode 100644 ec2_vpc_peering_info.py delete mode 100644 ec2_vpc_vgw.py delete mode 100644 ec2_vpc_vgw_info.py delete mode 100644 ec2_vpc_vpn.py delete mode 100644 ec2_vpc_vpn_info.py delete mode 100644 ec2_win_password.py delete mode 100644 ecs_attribute.py delete mode 100644 ecs_cluster.py delete mode 100644 ecs_ecr.py delete mode 100644 ecs_service.py delete mode 100644 ecs_service_info.py delete mode 100644 ecs_tag.py delete mode 100644 ecs_task.py delete mode 100644 ecs_taskdefinition.py delete mode 100644 ecs_taskdefinition_info.py delete mode 100644 efs.py delete mode 100644 efs_info.py delete mode 100644 efs_tag.py delete mode 100644 eks_cluster.py delete mode 100644 eks_fargate_profile.py delete mode 100644 eks_nodegroup.py delete mode 100644 elasticache.py delete mode 100644 elasticache_info.py delete mode 100644 elasticache_parameter_group.py delete mode 100644 elasticache_snapshot.py delete mode 100644 elasticache_subnet_group.py delete mode 100644 elasticbeanstalk_app.py delete mode 100644 elb_classic_lb_info.py delete mode 100644 elb_instance.py delete mode 100644 elb_network_lb.py delete mode 100644 elb_target.py delete mode 100644 elb_target_group.py delete mode 100644 elb_target_group_info.py delete mode 100644 elb_target_info.py delete mode 100644 glue_connection.py delete mode 100644 glue_crawler.py delete mode 100644 glue_job.py delete mode 100644 iam_access_key.py delete mode 100644 iam_access_key_info.py delete mode 100644 iam_group.py delete mode 100644 iam_managed_policy.py delete mode 100644 iam_mfa_device_info.py delete mode 100644 iam_password_policy.py delete mode 100644 iam_saml_federation.py delete mode 100644 iam_server_certificate.py delete mode 100644 iam_server_certificate_info.py delete mode 100644 inspector_target.py delete mode 100644 kinesis_stream.py delete mode 100644 lightsail.py delete mode 100644 lightsail_snapshot.py delete mode 100644 lightsail_static_ip.py delete mode 100644 mq_broker.py delete mode 100644 mq_broker_config.py delete mode 100644 mq_broker_info.py delete mode 100644 mq_user.py delete mode 100644 mq_user_info.py delete mode 100644 msk_cluster.py delete mode 100644 msk_config.py delete mode 100644 networkfirewall.py delete mode 100644 networkfirewall_info.py delete mode 100644 networkfirewall_policy.py delete mode 100644 networkfirewall_policy_info.py delete mode 100644 networkfirewall_rule_group.py delete mode 100644 networkfirewall_rule_group_info.py delete mode 100644 opensearch.py delete mode 100644 opensearch_info.py delete mode 100644 redshift.py delete mode 100644 redshift_cross_region_snapshots.py delete mode 100644 redshift_info.py delete mode 100644 redshift_subnet_group.py delete mode 100644 route53_wait.py delete mode 100644 s3_bucket_info.py delete mode 100644 s3_bucket_notification.py delete mode 100644 s3_cors.py delete mode 100644 s3_lifecycle.py delete mode 100644 s3_logging.py delete mode 100644 s3_metrics_configuration.py delete mode 100644 s3_sync.py delete mode 100644 s3_website.py delete mode 100644 secretsmanager_secret.py delete mode 100644 ses_identity.py delete mode 100644 ses_identity_policy.py delete mode 100644 ses_rule_set.py delete mode 100644 sns.py delete mode 100644 sns_topic.py delete mode 100644 sns_topic_info.py delete mode 100644 sqs_queue.py delete mode 100644 ssm_inventory_info.py delete mode 100644 ssm_parameter.py delete mode 100644 stepfunctions_state_machine.py delete mode 100644 stepfunctions_state_machine_execution.py delete mode 100644 storagegateway_info.py delete mode 100644 sts_assume_role.py delete mode 100644 sts_session_token.py delete mode 100644 waf_condition.py delete mode 100644 waf_info.py delete mode 100644 waf_rule.py delete mode 100644 waf_web_acl.py delete mode 100644 wafv2_ip_set.py delete mode 100644 wafv2_ip_set_info.py delete mode 100644 wafv2_resources.py delete mode 100644 wafv2_resources_info.py delete mode 100644 wafv2_rule_group.py delete mode 100644 wafv2_rule_group_info.py delete mode 100644 wafv2_web_acl.py delete mode 100644 wafv2_web_acl_info.py diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/accessanalyzer_validate_policy_info.py b/accessanalyzer_validate_policy_info.py deleted file mode 100644 index fab777175e7..00000000000 --- a/accessanalyzer_validate_policy_info.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: accessanalyzer_validate_policy_info -version_added: 5.0.0 -short_description: Performs validation of IAM policies -description: - - Requests the validation of a policy and returns a list of findings. -options: - policy: - description: - - A properly json formatted policy. - type: json - aliases: ['policy_document'] - required: true - locale: - description: - - The locale to use for localizing the findings. - - Supported locales include C(DE), C(EN), C(ES), C(FR), C(IT), C(JA), C(KO), C(PT_BR), - C(ZH_CN) and C(ZH_TW). - - For more information about supported locales see the AWS Documentation - C(https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_ValidatePolicy.html) - type: str - required: false - default: 'EN' - policy_type: - description: - - The type of policy to validate. - - C(identity) policies grant permissions to IAM principals, including both managed and inline - policies for IAM roles, users, and groups. - - C(resource) policies policies grant permissions on AWS resources, including trust policies - for IAM roles and bucket policies for S3 buckets. - type: str - choices: ['identity', 'resource', 'service_control'] - default: 'identity' - required: false - resource_type: - description: - - The type of resource to attach to your resource policy. - - Ignored unless I(policy_type=resource). - - Supported resource types include C(AWS::S3::Bucket), C(AWS::S3::AccessPoint), - C(AWS::S3::MultiRegionAccessPoint) and C(AWS::S3ObjectLambda::AccessPoint) - - For resource types not supported as valid values, IAM Access Analyzer runs policy checks - that apply to all resource policies. - - For more information about supported locales see the AWS Documentation - C(https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_ValidatePolicy.html) - type: str - required: false - results_filter: - description: - - Filter the findings and limit them to specific finding types. - type: list - elements: str - choices: ['error', 'security', 'suggestion', 'warning'] - required: false -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Validate a policy -- name: Validate a simple IAM policy - community.aws.accessanalyzer_validate_policy_info: - policy: "{{ lookup('template', 'managed_policy.json.j2') }}" -""" - -RETURN = r""" -findings: - description: The list of findings in a policy returned by IAM Access Analyzer based on its suite of policy checks. - returned: success - type: list - elements: dict - contains: - finding_details: - description: - - A localized message describing the finding. - type: str - returned: success - sample: 'Resource ARN does not match the expected ARN format. Update the resource portion of the ARN.' - finding_type: - description: - - The severity of the finding. - type: str - returned: success - sample: 'ERROR' - issue_code: - description: - - An identifier for the type of issue found. - type: str - returned: success - sample: 'INVALID_ARN_RESOURCE' - learn_more_link: - description: - - A link to additional information about the finding type. - type: str - returned: success - sample: 'https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-reference-policy-checks.html' - locations: - description: - - The location of the item resulting in the recommendations. - type: list - returned: success - elements: dict - contains: - path: - description: A path in a policy, represented as a sequence of path elements. - type: list - elements: dict - returned: success - sample: [{"value": "Statement"}, {"index": 0}, {"value": "Resource"}, {"index": 0}] - span: - description: - - Where in the policy the finding refers to. - - Note - when using lookups or passing dictionaries to I(policy) the policy string may be - converted to a single line of JSON, changing th column, line and offset values. - type: dict - contains: - start: - description: The start position of the span. - type: dict - returned: success - contains: - column: - description: The column of the position, starting from C(0). - type: int - returned: success - line: - description: The line of the position, starting from C(1). - type: int - returned: success - offset: - description: The offset within the policy that corresponds to the position, starting from C(0). - type: int - returned: success - end: - description: The end position of the span. - type: dict - returned: success - contains: - column: - description: The column of the position, starting from C(0). - type: int - returned: success - line: - description: The line of the position, starting from C(1). - type: int - returned: success - offset: - description: The offset within the policy that corresponds to the position, starting from C(0). - type: int - returned: success -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def filter_findings(findings, type_filter): - if not type_filter: - return findings - - # Convert type_filter to the findingType strings returned by the API - filter_map = dict(error="ERROR", security="SECURITY_WARNING", suggestion="SUGGESTION", warning="WARNING") - allowed_types = [filter_map[t] for t in type_filter] - - filtered_results = [f for f in findings if f.get("findingType", None) in allowed_types] - return filtered_results - - -def main(): - # Botocore only supports specific values for locale and resource_type, however the supported - # values are likely to be expanded, let's avoid hard coding limits which might not hold true in - # the long term... - argument_spec = dict( - policy=dict(required=True, type="json", aliases=["policy_document"]), - locale=dict(required=False, type="str", default="EN"), - policy_type=dict( - required=False, type="str", default="identity", choices=["identity", "resource", "service_control"] - ), - resource_type=dict(required=False, type="str"), - results_filter=dict( - required=False, type="list", elements="str", choices=["error", "security", "suggestion", "warning"] - ), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - policy_type_map = dict( - identity="IDENTITY_POLICY", resource="RESOURCE_POLICY", service_control="SERVICE_CONTROL_POLICY" - ) - - policy = module.params.get("policy") - policy_type = policy_type_map[module.params.get("policy_type")] - locale = module.params.get("locale").upper() - resource_type = module.params.get("resource_type") - results_filter = module.params.get("results_filter") - - try: - client = module.client("accessanalyzer", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - params = dict(locale=locale, policyDocument=policy, policyType=policy_type) - if policy_type == "RESOURCE_POLICY" and resource_type: - params["policyType"] = resource_type - - results = client.validate_policy(aws_retry=True, **params) - - findings = filter_findings(results.get("findings", []), results_filter) - results["findings"] = findings - - results = camel_dict_to_snake_dict(results) - - module.exit_json(changed=False, **results) - - -if __name__ == "__main__": - main() diff --git a/acm_certificate.py b/acm_certificate.py deleted file mode 100644 index 204dbecb03d..00000000000 --- a/acm_certificate.py +++ /dev/null @@ -1,556 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Author: -# - Matthew Davis -# on behalf of Telstra Corporation Limited - -DOCUMENTATION = r""" ---- -module: acm_certificate -short_description: Upload and delete certificates in the AWS Certificate Manager service -version_added: 1.0.0 -description: - - > - Import and delete certificates in Amazon Web Service's Certificate - Manager (AWS ACM). - - > - This module does not currently interact with AWS-provided certificates. - It currently only manages certificates provided to AWS by the user. - - The ACM API allows users to upload multiple certificates for the same domain - name, and even multiple identical certificates. This module attempts to - restrict such freedoms, to be idempotent, as per the Ansible philosophy. - It does this through applying AWS resource "Name" tags to ACM certificates. - - > - When I(state=present), - if there is one certificate in ACM - with a C(Name) tag equal to the I(name_tag) parameter, - and an identical body and chain, - this task will succeed without effect. - - > - When I(state=present), - if there is one certificate in ACM - a I(Name) tag equal to the I(name_tag) parameter, - and a different body, - this task will overwrite that certificate. - - > - When I(state=present), - if there are multiple certificates in ACM - with a I(Name) tag equal to the I(name_tag) parameter, - this task will fail. - - > - When I(state=absent) and I(certificate_arn) is defined, - this module will delete the ACM resource with that ARN if it exists in this - region, and succeed without effect if it doesn't exist. - - > - When I(state=absent) and I(domain_name) is defined, this module will delete - all ACM resources in this AWS region with a corresponding domain name. - If there are none, it will succeed without effect. - - > - When I(state=absent) and I(certificate_arn) is not defined, - and I(domain_name) is not defined, this module will delete all ACM resources - in this AWS region with a corresponding I(Name) tag. - If there are none, it will succeed without effect. - - > - Note that this may not work properly with keys of size 4096 bits, due to a - limitation of the ACM API. - - Prior to release 5.0.0 this module was called C(community.aws.aws_acm). - The usage did not change. -options: - certificate: - description: - - The body of the PEM encoded public certificate. - - Required when I(state) is not C(absent) and the certificate does not exist. - - > - If your certificate is in a file, - use C(lookup('file', 'path/to/cert.pem')). - type: str - certificate_arn: - description: - - The ARN of a certificate in ACM to modify or delete. - - > - If I(state=present), the certificate with the specified ARN can be updated. - For example, this can be used to add/remove tags to an existing certificate. - - > - If I(state=absent), you must provide one of - I(certificate_arn), I(domain_name) or I(name_tag). - - > - If I(state=absent) and no resource exists with this ARN in this region, - the task will succeed with no effect. - - > - If I(state=absent) and the corresponding resource exists in a different - region, this task may report success without deleting that resource. - type: str - aliases: [arn] - certificate_chain: - description: - - The body of the PEM encoded chain for your certificate. - - > - If your certificate chain is in a file, - use C(lookup('file', 'path/to/chain.pem')). - - Ignored when I(state=absent) - type: str - domain_name: - description: - - The domain name of the certificate. - - > - If I(state=absent) and I(domain_name) is specified, - this task will delete all ACM certificates with this domain. - - > - Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) - must be provided. - - > - If I(state=present) this must not be specified. - (Since the domain name is encoded within the public certificate's body.) - type: str - aliases: [domain] - name_tag: - description: - - > - The unique identifier for tagging resources using AWS tags, - with key I(Name). - - This can be any set of characters accepted by AWS for tag values. - - > - This is to ensure Ansible can treat certificates idempotently, - even though the ACM API allows duplicate certificates. - - If I(state=preset), this must be specified. - - > - If I(state=absent) and I(name_tag) is specified, - this task will delete all ACM certificates with this Name tag. - - > - If I(state=absent), you must provide exactly one of - I(certificate_arn), I(domain_name) or I(name_tag). - - > - If both I(name_tag) and the 'Name' tag in I(tags) are set, - the values must be the same. - - > - If the 'Name' tag in I(tags) is not set and I(name_tag) is set, - the I(name_tag) value is copied to I(tags). - type: str - aliases: [name] - private_key: - description: - - The body of the PEM encoded private key. - - Required when I(state=present) and the certificate does not exist. - - Ignored when I(state=absent). - - > - If your private key is in a file, - use C(lookup('file', 'path/to/key.pem')). - type: str - state: - description: - - > - If I(state=present), the specified public certificate and private key - will be uploaded, with I(Name) tag equal to I(name_tag). - - > - If I(state=absent), any certificates in this region - with a corresponding I(domain_name), I(name_tag) or I(certificate_arn) - will be deleted. - choices: [present, absent] - default: present - type: str - -notes: - - Support for I(tags) and I(purge_tags) was added in release 3.2.0 -author: - - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -- name: upload a self-signed certificate - community.aws.acm_certificate: - certificate: "{{ lookup('file', 'cert.pem' ) }}" - privateKey: "{{ lookup('file', 'key.pem' ) }}" - name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert" - region: ap-southeast-2 # AWS region - -- name: create/update a certificate with a chain - community.aws.acm_certificate: - certificate: "{{ lookup('file', 'cert.pem' ) }}" - private_key: "{{ lookup('file', 'key.pem' ) }}" - name_tag: my_cert - certificate_chain: "{{ lookup('file', 'chain.pem' ) }}" - state: present - region: ap-southeast-2 - register: cert_create - -- name: print ARN of cert we just created - ansible.builtin.debug: - var: cert_create.certificate.arn - -- name: delete the cert we just created - community.aws.acm_certificate: - name_tag: my_cert - state: absent - region: ap-southeast-2 - -- name: delete a certificate with a particular ARN - community.aws.acm_certificate: - certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" - state: absent - region: ap-southeast-2 - -- name: delete all certificates with a particular domain name - community.aws.acm_certificate: - domain_name: acm.ansible.com - state: absent - region: ap-southeast-2 - -- name: add tags to an existing certificate with a particular ARN - community.aws.acm_certificate: - certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" - tags: - Name: my_certificate - Application: search - Environment: development - purge_tags: true -""" - -RETURN = r""" -certificate: - description: Information about the certificate which was uploaded - type: complex - returned: when I(state=present) - contains: - arn: - description: The ARN of the certificate in ACM - type: str - returned: when I(state=present) and not in check mode - sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" - domain_name: - description: The domain name encoded within the public certificate - type: str - returned: when I(state=present) - sample: acm.ansible.com -arns: - description: A list of the ARNs of the certificates in ACM which were deleted - type: list - elements: str - returned: when I(state=absent) - sample: - - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901" -""" - - -import base64 -from copy import deepcopy -import re # regex library - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils._text import to_text - -from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): - if tags is None: - return (False, existing_tags) - - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags) - changed = bool(tags_to_add or tags_to_remove) - if tags_to_add and not module.check_mode: - try: - client.add_tags_to_certificate( - CertificateArn=resource_arn, - Tags=ansible_dict_to_boto3_tag_list(tags_to_add), - ) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: - module.fail_json_aws(e, f"Couldn't add tags to certificate {resource_arn}") - if tags_to_remove and not module.check_mode: - # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. - tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove] - try: - client.remove_tags_from_certificate( - CertificateArn=resource_arn, - Tags=tags_list, - ) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: - module.fail_json_aws(e, f"Couldn't remove tags from certificate {resource_arn}") - new_tags = deepcopy(existing_tags) - for key, value in tags_to_add.items(): - new_tags[key] = value - for key in tags_to_remove: - new_tags.pop(key, None) - return (changed, new_tags) - - -# Takes in two text arguments -# Each a PEM encoded certificate -# Or a chain of PEM encoded certificates -# May include some lines between each chain in the cert, e.g. "Subject: ..." -# Returns True iff the chains/certs are functionally identical (including chain order) -def chain_compare(module, a, b): - chain_a_pem = pem_chain_split(module, a) - chain_b_pem = pem_chain_split(module, b) - - if len(chain_a_pem) != len(chain_b_pem): - return False - - # Chain length is the same - for ca, cb in zip(chain_a_pem, chain_b_pem): - der_a = PEM_body_to_DER(module, ca) - der_b = PEM_body_to_DER(module, cb) - if der_a != der_b: - return False - - return True - - -# Takes in PEM encoded data with no headers -# returns equivilent DER as byte array -def PEM_body_to_DER(module, pem): - try: - der = base64.b64decode(to_text(pem)) - except (ValueError, TypeError) as e: - module.fail_json_aws(e, msg="Unable to decode certificate chain") - return der - - -# Store this globally to avoid repeated recompilation -pem_chain_split_regex = re.compile( - r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?" -) - - -# Use regex to split up a chain or single cert into an array of base64 encoded data -# Using "-----BEGIN CERTIFICATE-----" and "----END CERTIFICATE----" -# Noting that some chains have non-pem data in between each cert -# This function returns only what's between the headers, excluding the headers -def pem_chain_split(module, pem): - pem_arr = re.findall(pem_chain_split_regex, to_text(pem)) - - if len(pem_arr) == 0: - # This happens if the regex doesn't match at all - module.fail_json(msg="Unable to split certificate chain. Possibly zero-length chain?") - - return pem_arr - - -def update_imported_certificate(client, module, acm, old_cert, desired_tags): - """ - Update the existing certificate that was previously imported in ACM. - """ - module.debug("Existing certificate found in ACM") - if ("tags" not in old_cert) or ("Name" not in old_cert["tags"]): - # shouldn't happen - module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert) - if module.params.get("name_tag") is not None and (old_cert["tags"]["Name"] != module.params.get("name_tag")): - # This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name', - # and the 'Name' tag in the AWS API does not match the ansible 'name_tag'. - module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert) - if "certificate" not in old_cert: - # shouldn't happen - module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert) - - cert_arn = None - # Are the existing certificate in ACM and the local certificate the same? - same = True - if module.params.get("certificate") is not None: - same &= chain_compare(module, old_cert["certificate"], module.params["certificate"]) - if module.params["certificate_chain"]: - # Need to test this - # not sure if Amazon appends the cert itself to the chain when self-signed - same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate_chain"]) - else: - # When there is no chain with a cert - # it seems Amazon returns the cert itself as the chain - same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate"]) - - if same: - module.debug("Existing certificate in ACM is the same") - cert_arn = old_cert["certificate_arn"] - changed = False - else: - absent_args = ["certificate", "name_tag", "private_key"] - if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json( - msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" - ) - module.debug("Existing certificate in ACM is different, overwriting") - changed = True - if module.check_mode: - cert_arn = old_cert["certificate_arn"] - # note: returned domain will be the domain of the previous cert - else: - # update cert in ACM - cert_arn = acm.import_certificate( - client, - module, - certificate=module.params["certificate"], - private_key=module.params["private_key"], - certificate_chain=module.params["certificate_chain"], - arn=old_cert["certificate_arn"], - tags=desired_tags, - ) - return (changed, cert_arn) - - -def import_certificate(client, module, acm, desired_tags): - """ - Import a certificate to ACM. - """ - # Validate argument requirements - absent_args = ["certificate", "name_tag", "private_key"] - cert_arn = None - if sum([(module.params[a] is not None) for a in absent_args]) < 3: - module.fail_json( - msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified" - ) - module.debug("No certificate in ACM. Creating new one.") - changed = True - if module.check_mode: - domain = "example.com" - module.exit_json(certificate=dict(domain_name=domain), changed=True) - else: - cert_arn = acm.import_certificate( - client, - module, - certificate=module.params["certificate"], - private_key=module.params["private_key"], - certificate_chain=module.params["certificate_chain"], - tags=desired_tags, - ) - return (changed, cert_arn) - - -def ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags): - cert_arn = None - changed = False - if len(certificates) > 1: - msg = f"More than one certificate with Name={module.params['name_tag']} exists in ACM in this region" - module.fail_json(msg=msg, certificates=certificates) - elif len(certificates) == 1: - # Update existing certificate that was previously imported to ACM. - (changed, cert_arn) = update_imported_certificate(client, module, acm, certificates[0], desired_tags) - else: # len(certificates) == 0 - # Import new certificate to ACM. - (changed, cert_arn) = import_certificate(client, module, acm, desired_tags) - - # Add/remove tags to/from certificate - try: - existing_tags = boto3_tag_list_to_ansible_dict( - client.list_tags_for_certificate(CertificateArn=cert_arn)["Tags"] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for certificate") - - purge_tags = module.params.get("purge_tags") - (c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags) - changed |= c - domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn) - module.exit_json(certificate=dict(domain_name=domain, arn=cert_arn, tags=new_tags), changed=changed) - - -def ensure_certificates_absent(client, module, acm, certificates): - for cert in certificates: - if not module.check_mode: - acm.delete_certificate(client, module, cert["certificate_arn"]) - module.exit_json(arns=[cert["certificate_arn"] for cert in certificates], changed=(len(certificates) > 0)) - - -def main(): - argument_spec = dict( - certificate=dict(), - certificate_arn=dict(aliases=["arn"]), - certificate_chain=dict(), - domain_name=dict(aliases=["domain"]), - name_tag=dict(aliases=["name"]), - private_key=dict(no_log=True), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - state=dict(default="present", choices=["present", "absent"]), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - acm = ACMServiceManager(module) - - # Check argument requirements - if module.params["state"] == "present": - # at least one of these should be specified. - absent_args = ["certificate_arn", "domain_name", "name_tag"] - if sum([(module.params[a] is not None) for a in absent_args]) < 1: - for a in absent_args: - module.debug(f"{a} is {module.params[a]}") - module.fail_json( - msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" - ) - else: # absent - # exactly one of these should be specified - absent_args = ["certificate_arn", "domain_name", "name_tag"] - if sum([(module.params[a] is not None) for a in absent_args]) != 1: - for a in absent_args: - module.debug(f"{a} is {module.params[a]}") - module.fail_json( - msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" - ) - - filter_tags = None - desired_tags = None - if module.params.get("tags") is not None: - desired_tags = module.params["tags"] - else: - # Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed - module.params["purge_tags"] = False - if module.params.get("name_tag") is not None: - # The module was originally implemented to filter certificates based on the 'Name' tag. - # Other tags are not used to filter certificates. - # It would make sense to replace the existing name_tag, domain, certificate_arn attributes - # with a 'filter' attribute, but that would break backwards-compatibility. - filter_tags = dict(Name=module.params["name_tag"]) - if desired_tags is not None: - if "Name" in desired_tags: - if desired_tags["Name"] != module.params["name_tag"]: - module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'") - else: - desired_tags["Name"] = module.params["name_tag"] - else: - desired_tags = deepcopy(filter_tags) - - client = module.client("acm") - - # fetch the list of certificates currently in ACM - certificates = acm.get_certificates( - client=client, - module=module, - domain_name=module.params["domain_name"], - arn=module.params["certificate_arn"], - only_tags=filter_tags, - ) - - module.debug(f"Found {len(certificates)} corresponding certificates in ACM") - if module.params["state"] == "present": - ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) - - else: # state == absent - ensure_certificates_absent(client, module, acm, certificates) - - -if __name__ == "__main__": - # tests() - main() diff --git a/acm_certificate_info.py b/acm_certificate_info.py deleted file mode 100644 index 26d00e7e319..00000000000 --- a/acm_certificate_info.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: acm_certificate_info -short_description: Retrieve certificate information from AWS Certificate Manager service -version_added: 1.0.0 -description: - - Retrieve information for ACM certificates. - - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API. - - Prior to release 5.0.0 this module was called C(community.aws.aws_acm_info). - The usage did not change. -options: - certificate_arn: - description: - - If provided, the results will be filtered to show only the certificate with this ARN. - - If no certificate with this ARN exists, this task will fail. - - If a certificate with this ARN exists in a different region, this task will fail. - aliases: - - arn - type: str - domain_name: - description: - - The domain name of an ACM certificate to limit the search to. - aliases: - - name - type: str - statuses: - description: - - Status to filter the certificate results. - choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED'] - type: list - elements: str - tags: - description: - - Filter results to show only certificates with tags that match all the tags specified here. - type: dict -author: - - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: obtain all ACM certificates - community.aws.acm_certificate_info: - -- name: obtain all information for a single ACM certificate - community.aws.acm_certificate_info: - domain_name: "*.example_com" - -- name: obtain all certificates pending validation - community.aws.acm_certificate_info: - statuses: - - PENDING_VALIDATION - -- name: obtain all certificates with tag Name=foo and myTag=bar - community.aws.acm_certificate_info: - tags: - Name: foo - myTag: bar - - -# The output is still a list of certificates, just one item long. -- name: obtain information about a certificate with a particular ARN - community.aws.acm_certificate_info: - certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12" - -""" - -RETURN = r""" -certificates: - description: A list of certificates - returned: always - type: complex - contains: - certificate: - description: The ACM Certificate body - returned: when certificate creation is complete - sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n' - type: str - certificate_arn: - description: Certificate ARN - returned: always - sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc - type: str - certificate_chain: - description: Full certificate chain for the certificate - returned: when certificate creation is complete - sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...' - type: str - created_at: - description: Date certificate was created - returned: always - sample: '2017-08-15T10:31:19+10:00' - type: str - domain_name: - description: Domain name for the certificate - returned: always - sample: '*.example.com' - type: str - domain_validation_options: - description: Options used by ACM to validate the certificate - returned: when certificate type is AMAZON_ISSUED - type: complex - contains: - domain_name: - description: Fully qualified domain name of the certificate - returned: always - sample: example.com - type: str - validation_domain: - description: The domain name ACM used to send validation emails - returned: always - sample: example.com - type: str - validation_emails: - description: A list of email addresses that ACM used to send domain validation emails - returned: always - sample: - - admin@example.com - - postmaster@example.com - type: list - elements: str - validation_status: - description: Validation status of the domain - returned: always - sample: SUCCESS - type: str - failure_reason: - description: Reason certificate request failed - returned: only when certificate issuing failed - type: str - sample: NO_AVAILABLE_CONTACTS - in_use_by: - description: A list of ARNs for the AWS resources that are using the certificate. - returned: always - sample: [] - type: list - elements: str - issued_at: - description: Date certificate was issued - returned: always - sample: '2017-01-01T00:00:00+10:00' - type: str - issuer: - description: Issuer of the certificate - returned: always - sample: Amazon - type: str - key_algorithm: - description: Algorithm used to generate the certificate - returned: always - sample: RSA-2048 - type: str - not_after: - description: Date after which the certificate is not valid - returned: always - sample: '2019-01-01T00:00:00+10:00' - type: str - not_before: - description: Date before which the certificate is not valid - returned: always - sample: '2017-01-01T00:00:00+10:00' - type: str - renewal_summary: - description: Information about managed renewal process - returned: when certificate is issued by Amazon and a renewal has been started - type: complex - contains: - domain_validation_options: - description: Options used by ACM to validate the certificate - returned: when certificate type is AMAZON_ISSUED - type: complex - contains: - domain_name: - description: Fully qualified domain name of the certificate - returned: always - sample: example.com - type: str - validation_domain: - description: The domain name ACM used to send validation emails - returned: always - sample: example.com - type: str - validation_emails: - description: A list of email addresses that ACM used to send domain validation emails - returned: always - sample: - - admin@example.com - - postmaster@example.com - type: list - elements: str - validation_status: - description: Validation status of the domain - returned: always - sample: SUCCESS - type: str - renewal_status: - description: Status of the domain renewal - returned: always - sample: PENDING_AUTO_RENEWAL - type: str - revocation_reason: - description: Reason for certificate revocation - returned: when the certificate has been revoked - sample: SUPERCEDED - type: str - revoked_at: - description: Date certificate was revoked - returned: when the certificate has been revoked - sample: '2017-09-01T10:00:00+10:00' - type: str - serial: - description: The serial number of the certificate - returned: always - sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f - type: str - signature_algorithm: - description: Algorithm used to sign the certificate - returned: always - sample: SHA256WITHRSA - type: str - status: - description: Status of the certificate in ACM - returned: always - sample: ISSUED - type: str - subject: - description: The name of the entity that is associated with the public key contained in the certificate - returned: always - sample: CN=*.example.com - type: str - subject_alternative_names: - description: Subject Alternative Names for the certificate - returned: always - sample: - - '*.example.com' - type: list - elements: str - tags: - description: Tags associated with the certificate - returned: always - type: dict - sample: - Application: helloworld - Environment: test - type: - description: The source of the certificate - returned: always - sample: AMAZON_ISSUED - type: str -""" - -from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - argument_spec = dict( - certificate_arn=dict(aliases=["arn"]), - domain_name=dict(aliases=["name"]), - statuses=dict( - type="list", - elements="str", - choices=[ - "PENDING_VALIDATION", - "ISSUED", - "INACTIVE", - "EXPIRED", - "VALIDATION_TIMED_OUT", - "REVOKED", - "FAILED", - ], - ), - tags=dict(type="dict"), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - acm_info = ACMServiceManager(module) - - client = module.client("acm") - - certificates = acm_info.get_certificates( - client, - module, - domain_name=module.params["domain_name"], - statuses=module.params["statuses"], - arn=module.params["certificate_arn"], - only_tags=module.params["tags"], - ) - - if module.params["certificate_arn"] and len(certificates) != 1: - module.fail_json(msg=f"No certificate exists in this region with ARN {module.params['certificate_arn']}") - - module.exit_json(certificates=certificates) - - -if __name__ == "__main__": - main() diff --git a/api_gateway.py b/api_gateway.py deleted file mode 100644 index 0339bf7a329..00000000000 --- a/api_gateway.py +++ /dev/null @@ -1,508 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: api_gateway -version_added: 1.0.0 -short_description: Manage AWS API Gateway APIs -description: - - Allows for the management of API Gateway APIs. - - Normally you should give the api_id since there is no other - stable guaranteed unique identifier for the API. If you do - not give api_id then a new API will be created each time - this is run. - - swagger_file and swagger_text are passed directly on to AWS - transparently whilst swagger_dict is an ansible dict which is - converted to JSON before the API definitions are uploaded. - - Prior to release 5.0.0 this module was called C(community.aws.aws_api_gateway). - The usage did not change. -options: - api_id: - description: - - The ID of the API you want to manage. - type: str - state: - description: Create or delete API Gateway. - default: present - choices: [ 'present', 'absent' ] - type: str - swagger_file: - description: - - JSON or YAML file containing swagger definitions for API. - Exactly one of I(swagger_file), I(swagger_text) or I(swagger_dict) must - be present. - type: path - aliases: ['src', 'api_file'] - swagger_text: - description: - - Swagger definitions for API in JSON or YAML as a string direct - from playbook. - type: str - swagger_dict: - description: - - Swagger definitions API ansible dictionary which will be - converted to JSON and uploaded. - type: json - stage: - description: - - The name of the stage the API should be deployed to. - type: str - deploy_desc: - description: - - Description of the deployment. - - Recorded and visible in the AWS console. - default: Automatic deployment by Ansible. - type: str - cache_enabled: - description: - - Enable API GW caching of backend responses. - type: bool - default: false - cache_size: - description: - - Size in GB of the API GW cache, becomes effective when cache_enabled is true. - choices: ['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237'] - type: str - default: '0.5' - stage_variables: - description: - - ENV variables for the stage. Define a dict of key values pairs for variables. - type: dict - default: {} - stage_canary_settings: - description: - - Canary settings for the deployment of the stage. - - 'Dict with following settings:' - - 'C(percentTraffic): The percent (0-100) of traffic diverted to a canary deployment.' - - 'C(deploymentId): The ID of the canary deployment.' - - 'C(stageVariableOverrides): Stage variables overridden for a canary release deployment.' - - 'C(useStageCache): A Boolean flag to indicate whether the canary deployment uses the stage cache or not.' - - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage) - type: dict - default: {} - tracing_enabled: - description: - - Specifies whether active tracing with X-ray is enabled for the API GW stage. - type: bool - default: false - endpoint_type: - description: - - Type of endpoint configuration. - - Use C(EDGE) for an edge optimized API endpoint, - C(REGIONAL) for just a regional deploy or C(PRIVATE) for a private API. - - This flag will only be used when creating a new API Gateway setup, not for updates. - choices: ['EDGE', 'REGIONAL', 'PRIVATE'] - type: str - default: EDGE - name: - description: - - The name of the RestApi. - type: str - version_added: 6.2.0 - lookup: - description: - - Look up API gateway by either I(tags) (and I(name) if supplied) or by I(api_id). - - If I(lookup=tag) and I(tags) is not specified then no lookup for an existing API gateway - is performed and a new API gateway will be created. - - When using I(lookup=tag), multiple matches being found will result in a failure and no changes will be made. - - To change the tags of a API gateway use I(lookup=id). - default: tag - choices: [ 'tag', 'id' ] - type: str - version_added: 6.2.0 -author: - - 'Michael De La Rue (@mikedlr)' -notes: - - 'Tags are used to uniquely identify API gateway when the I(api_id) is not supplied. version_added=6.2.0' -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 - - amazon.aws.tags -""" - -EXAMPLES = r""" -- name: Setup AWS API Gateway setup on AWS and deploy API definition - community.aws.api_gateway: - swagger_file: my_api.yml - stage: production - cache_enabled: true - cache_size: '1.6' - tracing_enabled: true - endpoint_type: EDGE - state: present - -- name: Update API definition to deploy new version - community.aws.api_gateway: - api_id: 'abc123321cba' - swagger_file: my_api.yml - deploy_desc: Make auth fix available. - cache_enabled: true - cache_size: '1.6' - endpoint_type: EDGE - state: present - -- name: Update API definitions and settings and deploy as canary - community.aws.api_gateway: - api_id: 'abc123321cba' - swagger_file: my_api.yml - cache_enabled: true - cache_size: '6.1' - canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True } - state: present - -- name: Delete API gateway - amazon.aws.api_gateway: - name: ansible-rest-api - tags: - automation: ansible - lookup: tags - state: absent -""" - -RETURN = r""" -api_id: - description: API id of the API endpoint created - returned: success - type: str - sample: '0ln4zq7p86' -configure_response: - description: AWS response from the API configure call - returned: success - type: dict - sample: { api_key_source: "HEADER", created_at: "2020-01-01T11:37:59+00:00", id: "0ln4zq7p86" } -deploy_response: - description: AWS response from the API deploy call - returned: success - type: dict - sample: { created_date: "2020-01-01T11:36:59+00:00", id: "rptv4b", description: "Automatic deployment by Ansible." } -resource_actions: - description: Actions performed against AWS API - returned: always - type: list - sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"] -""" - -import json -import traceback - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - - -def main(): - argument_spec = dict( - api_id=dict(type="str", required=False), - state=dict(type="str", default="present", choices=["present", "absent"]), - swagger_file=dict(type="path", default=None, aliases=["src", "api_file"]), - swagger_dict=dict(type="json", default=None), - swagger_text=dict(type="str", default=None), - stage=dict(type="str", default=None), - deploy_desc=dict(type="str", default="Automatic deployment by Ansible."), - cache_enabled=dict(type="bool", default=False), - cache_size=dict(type="str", default="0.5", choices=["0.5", "1.6", "6.1", "13.5", "28.4", "58.2", "118", "237"]), - stage_variables=dict(type="dict", default={}), - stage_canary_settings=dict(type="dict", default={}), - tracing_enabled=dict(type="bool", default=False), - endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), - name=dict(type="str"), - lookup=dict(type="str", choices=["tag", "id"], default="tag"), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - ) - - mutually_exclusive = [["swagger_file", "swagger_dict", "swagger_text"]] # noqa: F841 - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - ) - - api_id = module.params.get("api_id") - state = module.params.get("state") # noqa: F841 - swagger_file = module.params.get("swagger_file") - swagger_dict = module.params.get("swagger_dict") - swagger_text = module.params.get("swagger_text") - endpoint_type = module.params.get("endpoint_type") - name = module.params.get("name") - tags = module.params.get("tags") - lookup = module.params.get("lookup") - - client = module.client("apigateway") - - changed = True # for now it will stay that way until we can sometimes avoid change - conf_res = None - dep_res = None - del_res = None - - if state == "present": - if api_id is None: - # lookup API gateway using tags - if tags and lookup == "tag": - rest_api = get_api_by_tags(client, module, name, tags) - if rest_api: - api_id = rest_api["id"] - if module.check_mode: - module.exit_json(changed=True, msg="Create/update operation skipped - running in check mode.") - if api_id is None: - api_data = get_api_definitions( - module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text - ) - # create new API gateway as non were provided and/or found using lookup=tag - api_id = create_empty_api(module, client, name, endpoint_type, tags) - conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data) - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - if tags: - if not conf_res: - conf_res = get_rest_api(module, client, api_id=api_id) - tag_changed, tag_result = ensure_apigateway_tags( - module, client, api_id=api_id, current_tags=conf_res.get("tags"), new_tags=tags, purge_tags=purge_tags - ) - if tag_changed: - changed |= tag_changed - conf_res = tag_result - if state == "absent": - if api_id is None: - if lookup != "tag" or not tags: - module.fail_json( - msg="API gateway id must be supplied to delete API gateway or provided tag with lookup=tag to identify API gateway id." - ) - rest_api = get_api_by_tags(client, module, name, tags) - if not rest_api: - module.exit_json(changed=False, msg="No API gateway identified with tags provided") - api_id = rest_api["id"] - elif not describe_api(client, module, api_id): - module.exit_json(changed=False, msg="API gateway id '{0}' does not exist.".format(api_id)) - - if module.check_mode: - module.exit_json(changed=True, msg="Delete operation skipped - running in check mode.", api_id=api_id) - - del_res = delete_rest_api(module, client, api_id) - - exit_args = {"changed": changed, "api_id": api_id} - - if conf_res is not None: - exit_args["configure_response"] = camel_dict_to_snake_dict(conf_res) - if dep_res is not None: - exit_args["deploy_response"] = camel_dict_to_snake_dict(dep_res) - if del_res is not None: - exit_args["delete_response"] = camel_dict_to_snake_dict(del_res) - - module.exit_json(**exit_args) - - -def ensure_apigateway_tags(module, client, api_id, current_tags, new_tags, purge_tags): - changed = False - tag_result = {} - tags_to_set, tags_to_delete = compare_aws_tags(current_tags, new_tags, purge_tags) - if tags_to_set or tags_to_delete: - changed = True - apigateway_arn = f"arn:aws:apigateway:{module.region}::/restapis/{api_id}" - # Remove tags from Resource - if tags_to_delete: - client.untag_resource(resourceArn=apigateway_arn, tagKeys=tags_to_delete) - # add new tags to resource - if tags_to_set: - client.tag_resource(resourceArn=apigateway_arn, tags=tags_to_set) - # Describe API gateway - tag_result = get_rest_api(module, client, api_id=api_id) - return changed, tag_result - - -def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None): - apidata = None - if swagger_file is not None: - try: - with open(swagger_file) as f: - apidata = f.read() - except OSError as e: - msg = f"Failed trying to read swagger file {str(swagger_file)}: {str(e)}" - module.fail_json(msg=msg, exception=traceback.format_exc()) - if swagger_dict is not None: - apidata = json.dumps(swagger_dict) - if swagger_text is not None: - apidata = swagger_text - - if apidata is None: - module.fail_json(msg="module error - no swagger info provided") - return apidata - - -def get_rest_api(module, client, api_id): - try: - response = client.get_rest_api(restApiId=api_id) - response.pop("ResponseMetadata", None) - return response - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"failed to get REST API with api_id={api_id}") - - -def create_empty_api(module, client, name, endpoint_type, tags): - """ - creates a new empty API ready to be configured. The description is - temporarily set to show the API as incomplete but should be - updated when the API is configured. - """ - desc = "Incomplete API creation by ansible api_gateway module" - try: - rest_api_name = name or "ansible-temp-api" - awsret = create_api(client, name=rest_api_name, description=desc, endpoint_type=endpoint_type, tags=tags) - except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="creating API") - return awsret["id"] - - -def delete_rest_api(module, client, api_id): - """ - Deletes entire REST API setup - """ - try: - delete_response = delete_api(client, api_id) - except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg=f"deleting API {api_id}") - return delete_response - - -def ensure_api_in_correct_state(module, client, api_id, api_data): - """Make sure that we have the API configured and deployed as instructed. - - This function first configures the API correctly uploading the - swagger definitions and then deploys those. Configuration and - deployment should be closely tied because there is only one set of - definitions so if we stop, they may be updated by someone else and - then we deploy the wrong configuration. - """ - - configure_response = None - try: - configure_response = configure_api(client, api_id, api_data=api_data) - configure_response.pop("ResponseMetadata", None) - except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg=f"configuring API {api_id}") - - deploy_response = None - - stage = module.params.get("stage") - if stage: - try: - deploy_response = create_deployment(client, api_id, **module.params) - deploy_response.pop("ResponseMetadata", None) - except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - msg = f"deploying api {api_id} to stage {stage}" - module.fail_json_aws(e, msg) - - return configure_response, deploy_response - - -def get_api_by_tags(client, module, name, tags): - count = 0 - result = None - for api in list_apis(client): - if name and api["name"] != name: - continue - api_tags = api.get("tags", {}) - if all((tag_key in api_tags and api_tags[tag_key] == tag_value for tag_key, tag_value in tags.items())): - result = api - count += 1 - - if count > 1: - args = "Tags" - if name: - args += " and name" - module.fail_json(msg="{0} provided do not identify a unique API gateway".format(args)) - return result - - -retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ["TooManyRequestsException"]} - - -@AWSRetry.jittered_backoff(**retry_params) -def create_api(client, name, description=None, endpoint_type=None, tags=None): - params = {"name": name} - if description: - params["description"] = description - if endpoint_type: - params["endpointConfiguration"] = {"types": [endpoint_type]} - if tags: - params["tags"] = tags - return client.create_rest_api(**params) - - -@AWSRetry.jittered_backoff(**retry_params) -def delete_api(client, api_id): - return client.delete_rest_api(restApiId=api_id) - - -@AWSRetry.jittered_backoff(**retry_params) -def configure_api(client, api_id, api_data=None, mode="overwrite"): - return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data) - - -@AWSRetry.jittered_backoff(**retry_params) -def create_deployment(client, rest_api_id, **params): - canary_settings = params.get("stage_canary_settings") - - if canary_settings and len(canary_settings) > 0: - result = client.create_deployment( - restApiId=rest_api_id, - stageName=params.get("stage"), - description=params.get("deploy_desc"), - cacheClusterEnabled=params.get("cache_enabled"), - cacheClusterSize=params.get("cache_size"), - variables=params.get("stage_variables"), - canarySettings=canary_settings, - tracingEnabled=params.get("tracing_enabled"), - ) - else: - result = client.create_deployment( - restApiId=rest_api_id, - stageName=params.get("stage"), - description=params.get("deploy_desc"), - cacheClusterEnabled=params.get("cache_enabled"), - cacheClusterSize=params.get("cache_size"), - variables=params.get("stage_variables"), - tracingEnabled=params.get("tracing_enabled"), - ) - - return result - - -@AWSRetry.jittered_backoff(**retry_params) -def list_apis(client): - paginator = client.get_paginator("get_rest_apis") - return paginator.paginate().build_full_result().get("items", []) - - -@AWSRetry.jittered_backoff(**retry_params) -def describe_api(client, module, rest_api_id): - try: - response = client.get_rest_api(restApiId=rest_api_id) - response.pop("ResponseMetadata") - except is_boto3_error_code("ResourceNotFoundException"): - response = {} - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get Rest API '{0}'.".format(rest_api_id)) - return response - - -if __name__ == "__main__": - main() diff --git a/api_gateway_domain.py b/api_gateway_domain.py deleted file mode 100644 index 10a1ca1f2f7..00000000000 --- a/api_gateway_domain.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: api_gateway_domain -short_description: Manage AWS API Gateway custom domains -description: - - Manages API Gateway custom domains for API GW Rest APIs. - - AWS API Gateway custom domain setups use CloudFront behind the scenes. - So you will get a CloudFront distribution as a result, configured to be aliased with your domain. - - Prior to release 5.0.0 this module was called C(community.aws.aws_api_gateway_domain). - The usage did not change. -version_added: '3.3.0' -author: - - 'Stefan Horning (@stefanhorning)' -options: - domain_name: - description: - - Domain name you want to use for your API GW deployment. - required: true - type: str - certificate_arn: - description: - - AWS Certificate Manger (ACM) TLS certificate ARN. - type: str - required: true - security_policy: - description: - - Set allowed TLS versions through AWS defined policies. Currently only C(TLS_1_0) and C(TLS_1_2) are available. - default: TLS_1_2 - choices: ['TLS_1_0', 'TLS_1_2'] - type: str - endpoint_type: - description: - - API endpoint configuration for domain. Use EDGE for edge-optimized endpoint, or use C(REGIONAL) or C(PRIVATE). - default: EDGE - choices: ['EDGE', 'REGIONAL', 'PRIVATE'] - type: str - domain_mappings: - description: - - Map your domain base paths to your API GW REST APIs, that you previously created. Use provided ID of the API setup and the release stage. - - "domain_mappings should be a list of dictionaries containing three keys: base_path, rest_api_id and stage." - - "Example: I([{ base_path: v1, rest_api_id: abc123, stage: production }])" - - if you want base path to be just I(/) omit the param completely or set it to empty string. - required: true - type: list - elements: dict - state: - description: - - Create or delete custom domain setup. - default: present - choices: [ 'present', 'absent' ] - type: str -notes: - - Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module. - - Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated) - options to add own Certificates. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Setup endpoint for a custom domain for your API Gateway HTTP API - community.aws.api_gateway_domain: - domain_name: myapi.foobar.com - certificate_arn: 'arn:aws:acm:us-east-1:1231123123:certificate/8bd89412-abc123-xxxxx' - security_policy: TLS_1_2 - endpoint_type: EDGE - domain_mappings: - - { rest_api_id: abc123, stage: production } - state: present - register: api_gw_domain_result - -- name: Create a DNS record for your custom domain on route 53 (using route53 module) - community.aws.route53: - record: myapi.foobar.com - value: "{{ api_gw_domain_result.response.domain.distribution_domain_name }}" - type: A - alias: true - zone: foobar.com - alias_hosted_zone_id: "{{ api_gw_domain_result.response.domain.distribution_hosted_zone_id }}" - command: create -""" - -RETURN = r""" -response: - description: The data returned by create_domain_name (or update and delete) and create_base_path_mapping methods by boto3. - returned: success - type: dict - sample: - domain: - { - domain_name: mydomain.com, - certificate_arn: 'arn:aws:acm:xxxxxx', - distribution_domain_name: xxxx.cloudfront.net, - distribution_hosted_zone_id: ABC123123, - endpoint_configuration: { types: ['EDGE'] }, - domain_name_status: 'AVAILABLE', - security_policy: TLS_1_2, - tags: {} - } - path_mappings: [ - { base_path: '(empty)', rest_api_id: 'abcd123', stage: 'production' } - ] -""" - -import copy - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError - from botocore.exceptions import EndpointConnectionError -except ImportError: - pass # caught by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_domain(module, client): - domain_name = module.params.get("domain_name") - result = {} - try: - result["domain"] = get_domain_name(client, domain_name) - result["path_mappings"] = get_domain_mappings(client, domain_name) - except is_boto3_error_code("NotFoundException"): - return None - except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="getting API GW domain") - return camel_dict_to_snake_dict(result) - - -def create_domain(module, client): - path_mappings = module.params.get("domain_mappings", []) - domain_name = module.params.get("domain_name") - result = {"domain": {}, "path_mappings": []} - - try: - result["domain"] = create_domain_name( - module, - client, - domain_name, - module.params.get("certificate_arn"), - module.params.get("endpoint_type"), - module.params.get("security_policy"), - ) - - for mapping in path_mappings: - base_path = mapping.get("base_path", "") - rest_api_id = mapping.get("rest_api_id") - stage = mapping.get("stage") - if rest_api_id is None or stage is None: - module.fail_json("Every domain mapping needs a rest_api_id and stage name") - - result["path_mappings"].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage)) - - except (ClientError, BotoCoreError, EndpointConnectionError) as e: - module.fail_json_aws(e, msg="creating API GW domain") - return camel_dict_to_snake_dict(result) - - -def update_domain(module, client, existing_domain): - domain_name = module.params.get("domain_name") - result = existing_domain - result["updated"] = False - - domain = existing_domain.get("domain") - # Compare only relevant set of domain arguments. - # As get_domain_name gathers all kind of state information that can't be set anyways. - # Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity. - existing_domain_settings = { - "certificate_arn": domain.get("certificate_arn"), - "security_policy": domain.get("security_policy"), - "endpoint_type": domain.get("endpoint_configuration").get("types")[0], - } - specified_domain_settings = { - "certificate_arn": module.params.get("certificate_arn"), - "security_policy": module.params.get("security_policy"), - "endpoint_type": module.params.get("endpoint_type"), - } - - if specified_domain_settings != existing_domain_settings: - try: - result["domain"] = update_domain_name( - client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings) - ) - result["updated"] = True - except (ClientError, BotoCoreError, EndpointConnectionError) as e: - module.fail_json_aws(e, msg="updating API GW domain") - - existing_mappings = copy.deepcopy(existing_domain.get("path_mappings", [])) - # Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings - for mapping in existing_mappings: - if mapping.get("base_path", "missing") == "(none)": - mapping.pop("base_path") - - specified_mappings = copy.deepcopy(module.params.get("domain_mappings", [])) - # Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings - for mapping in specified_mappings: - if mapping.get("base_path", "missing") == "": - mapping.pop("base_path") - - if specified_mappings != existing_mappings: - try: - # When lists missmatch delete all existing mappings before adding new ones as specified - for mapping in existing_domain.get("path_mappings", []): - delete_domain_mapping(client, domain_name, mapping["base_path"]) - for mapping in module.params.get("domain_mappings", []): - result["path_mappings"] = add_domain_mapping( - client, domain_name, mapping.get("base_path", ""), mapping.get("rest_api_id"), mapping.get("stage") - ) - result["updated"] = True - except (ClientError, BotoCoreError, EndpointConnectionError) as e: - module.fail_json_aws(e, msg="updating API GW domain mapping") - - return camel_dict_to_snake_dict(result) - - -def delete_domain(module, client): - domain_name = module.params.get("domain_name") - try: - result = delete_domain_name(client, domain_name) - except (ClientError, BotoCoreError, EndpointConnectionError) as e: - module.fail_json_aws(e, msg="deleting API GW domain") - return camel_dict_to_snake_dict(result) - - -retry_params = {"delay": 5, "backoff": 1.2} - - -@AWSRetry.jittered_backoff(**retry_params) -def get_domain_name(client, domain_name): - return client.get_domain_name(domainName=domain_name) - - -@AWSRetry.jittered_backoff(**retry_params) -def get_domain_mappings(client, domain_name): - return client.get_base_path_mappings(domainName=domain_name, limit=200).get("items", []) - - -@AWSRetry.jittered_backoff(**retry_params) -def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy): - endpoint_configuration = {"types": [endpoint_type]} - - if endpoint_type == "EDGE": - return client.create_domain_name( - domainName=domain_name, - certificateArn=certificate_arn, - endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy, - ) - else: - # Use regionalCertificateArn for regional domain deploys - return client.create_domain_name( - domainName=domain_name, - regionalCertificateArn=certificate_arn, - endpointConfiguration=endpoint_configuration, - securityPolicy=security_policy, - ) - - -@AWSRetry.jittered_backoff(**retry_params) -def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage): - return client.create_base_path_mapping( - domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage - ) - - -@AWSRetry.jittered_backoff(**retry_params) -def update_domain_name(client, domain_name, **kwargs): - patch_operations = [] - - for key, value in kwargs.items(): - path = "/" + key - if key == "endpointType": - continue - patch_operations.append({"op": "replace", "path": path, "value": value}) - - return client.update_domain_name(domainName=domain_name, patchOperations=patch_operations) - - -@AWSRetry.jittered_backoff(**retry_params) -def delete_domain_name(client, domain_name): - return client.delete_domain_name(domainName=domain_name) - - -@AWSRetry.jittered_backoff(**retry_params) -def delete_domain_mapping(client, domain_name, base_path): - return client.delete_base_path_mapping(domainName=domain_name, basePath=base_path) - - -def main(): - argument_spec = dict( - domain_name=dict(type="str", required=True), - certificate_arn=dict(type="str", required=True), - security_policy=dict(type="str", default="TLS_1_2", choices=["TLS_1_0", "TLS_1_2"]), - endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]), - domain_mappings=dict(type="list", required=True, elements="dict"), - state=dict(type="str", default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=False, - ) - - client = module.client("apigateway") - - state = module.params.get("state") - changed = False - - if state == "present": - existing_domain = get_domain(module, client) - if existing_domain is not None: - result = update_domain(module, client, existing_domain) - changed = result["updated"] - else: - result = create_domain(module, client) - changed = True - if state == "absent": - result = delete_domain(module, client) - changed = True - - exit_args = {"changed": changed} - - if result is not None: - exit_args["response"] = result - - module.exit_json(**exit_args) - - -if __name__ == "__main__": - main() diff --git a/api_gateway_info.py b/api_gateway_info.py deleted file mode 100644 index 5c904544b9c..00000000000 --- a/api_gateway_info.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: api_gateway_info -version_added: 6.1.0 -short_description: Gather information about ec2 instances in AWS -description: - - Gather information about ec2 instances in AWS -options: - ids: - description: - - The list of the string identifiers of the associated RestApis. - type: list - elements: str -author: - - Aubin Bikouo (@abikouo) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" ---- -# List all API gateway -- name: List all for a specific function - community.aws.api_gateway_info: - -# Get information for a specific API gateway -- name: List all for a specific function - community.aws.api_gateway_info: - ids: - - 012345678a - - abcdefghij -""" - -RETURN = r""" ---- -rest_apis: - description: A list of API gateway. - returned: always - type: complex - contains: - name: - description: The name of the API. - returned: success - type: str - sample: 'ansible-tmp-api' - id: - description: The identifier of the API. - returned: success - type: str - sample: 'abcdefgh' - api_key_source: - description: The source of the API key for metering requests according to a usage plan. - returned: success - type: str - sample: 'HEADER' - created_date: - description: The timestamp when the API was created. - returned: success - type: str - sample: "2020-01-01T11:37:59+00:00" - description: - description: The description of the API. - returned: success - type: str - sample: "Automatic deployment by Ansible." - disable_execute_api_endpoint: - description: Specifies whether clients can invoke your API by using the default execute-api endpoint. - returned: success - type: bool - sample: False - endpoint_configuration: - description: The endpoint configuration of this RestApi showing the endpoint types of the API. - returned: success - type: dict - sample: {"types": ["REGIONAL"]} - tags: - description: The collection of tags. - returned: success - type: dict - sample: {"key": "value"} -""" - - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - - -@AWSRetry.jittered_backoff() -def _list_rest_apis(connection, **params): - paginator = connection.get_paginator("get_rest_apis") - return paginator.paginate(**params).build_full_result().get("items", []) - - -@AWSRetry.jittered_backoff() -def _describe_rest_api(connection, module, rest_api_id): - try: - response = connection.get_rest_api(restApiId=rest_api_id) - response.pop("ResponseMetadata") - except is_boto3_error_code("ResourceNotFoundException"): - response = {} - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get Rest API '{0}'.".format(rest_api_id)) - return response - - -def main(): - argument_spec = dict( - ids=dict(type="list", elements="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - connection = module.client("apigateway") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - ids = module.params.get("ids") - if ids: - rest_apis = [] - for rest_api_id in ids: - result = _describe_rest_api(connection, module, rest_api_id) - if result: - rest_apis.append(result) - else: - rest_apis = _list_rest_apis(connection) - - # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_rest_apis = [camel_dict_to_snake_dict(item) for item in rest_apis] - module.exit_json(changed=False, rest_apis=snaked_rest_apis) - - -if __name__ == "__main__": - main() diff --git a/application_autoscaling_policy.py b/application_autoscaling_policy.py deleted file mode 100644 index 8bbd91728a6..00000000000 --- a/application_autoscaling_policy.py +++ /dev/null @@ -1,551 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: application_autoscaling_policy -version_added: 1.0.0 -short_description: Manage Application Auto Scaling Scaling Policies -notes: - - For more details of the parameters and returns see - U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy) -description: - - Creates, updates or removes a Scaling Policy. - - Prior to release 5.0.0 this module was called C(community.aws.aws_application_scaling_policy). - The usage did not change. -author: - - Gustavo Maia (@gurumaia) - - Chen Leibovich (@chenl87) -options: - state: - description: Whether a policy should be C(present) or C(absent). - required: true - choices: ['absent', 'present'] - type: str - policy_name: - description: The name of the scaling policy. - required: true - type: str - service_namespace: - description: The namespace of the AWS service. - required: true - choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb'] - type: str - resource_id: - description: The identifier of the resource associated with the scalable target. - required: true - type: str - scalable_dimension: - description: The scalable dimension associated with the scalable target. - required: true - choices: [ 'ecs:service:DesiredCount', - 'ec2:spot-fleet-request:TargetCapacity', - 'elasticmapreduce:instancegroup:InstanceCount', - 'appstream:fleet:DesiredCapacity', - 'dynamodb:table:ReadCapacityUnits', - 'dynamodb:table:WriteCapacityUnits', - 'dynamodb:index:ReadCapacityUnits', - 'dynamodb:index:WriteCapacityUnits'] - type: str - policy_type: - description: The policy type. - required: true - choices: ['StepScaling', 'TargetTrackingScaling'] - type: str - step_scaling_policy_configuration: - description: A step scaling policy. This parameter is required if you are creating a policy and I(policy_type=StepScaling). - required: false - type: dict - target_tracking_scaling_policy_configuration: - description: - - A target tracking policy. This parameter is required if you are creating a new policy and I(policy_type=TargetTrackingScaling). - - 'Full documentation of the suboptions can be found in the API documentation:' - - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)' - required: false - type: dict - suboptions: - CustomizedMetricSpecification: - description: The metric to use if using a customized metric. - type: dict - DisableScaleIn: - description: Whether scaling-in should be disabled. - type: bool - PredefinedMetricSpecification: - description: The metric to use if using a predefined metric. - type: dict - ScaleInCooldown: - description: The time (in seconds) to wait after scaling-in before another scaling action can occur. - type: int - ScaleOutCooldown: - description: The time (in seconds) to wait after scaling-out before another scaling action can occur. - type: int - TargetValue: - description: The target value for the metric. - type: float - minimum_tasks: - description: The minimum value to scale to in response to a scale in event. - This parameter is required if you are creating a first new policy for the specified service. - required: false - type: int - maximum_tasks: - description: The maximum value to scale to in response to a scale out event. - This parameter is required if you are creating a first new policy for the specified service. - required: false - type: int - override_task_capacity: - description: - - Whether or not to override values of minimum and/or maximum tasks if it's already set. - - Defaults to C(false). - required: false - type: bool -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create step scaling policy for ECS Service -- name: scaling_policy - community.aws.application_autoscaling_policy: - state: present - policy_name: test_policy - service_namespace: ecs - resource_id: service/poc-pricing/test-as - scalable_dimension: ecs:service:DesiredCount - policy_type: StepScaling - minimum_tasks: 1 - maximum_tasks: 6 - step_scaling_policy_configuration: - AdjustmentType: ChangeInCapacity - StepAdjustments: - - MetricIntervalUpperBound: 123 - ScalingAdjustment: 2 - - MetricIntervalLowerBound: 123 - ScalingAdjustment: -2 - Cooldown: 123 - MetricAggregationType: Average - -# Create target tracking scaling policy for ECS Service -- name: scaling_policy - community.aws.application_autoscaling_policy: - state: present - policy_name: test_policy - service_namespace: ecs - resource_id: service/poc-pricing/test-as - scalable_dimension: ecs:service:DesiredCount - policy_type: TargetTrackingScaling - minimum_tasks: 1 - maximum_tasks: 6 - target_tracking_scaling_policy_configuration: - TargetValue: 60 - PredefinedMetricSpecification: - PredefinedMetricType: ECSServiceAverageCPUUtilization - ScaleOutCooldown: 60 - ScaleInCooldown: 60 - -# Remove scalable target for ECS Service -- name: scaling_policy - community.aws.application_autoscaling_policy: - state: absent - policy_name: test_policy - policy_type: StepScaling - service_namespace: ecs - resource_id: service/cluster-name/service-name - scalable_dimension: ecs:service:DesiredCount -""" - -RETURN = r""" -alarms: - description: List of the CloudWatch alarms associated with the scaling policy - returned: when state present - type: complex - contains: - alarm_arn: - description: The Amazon Resource Name (ARN) of the alarm - returned: when state present - type: str - alarm_name: - description: The name of the alarm - returned: when state present - type: str -service_namespace: - description: The namespace of the AWS service. - returned: when state present - type: str - sample: ecs -resource_id: - description: The identifier of the resource associated with the scalable target. - returned: when state present - type: str - sample: service/cluster-name/service-name -scalable_dimension: - description: The scalable dimension associated with the scalable target. - returned: when state present - type: str - sample: ecs:service:DesiredCount -policy_arn: - description: The Amazon Resource Name (ARN) of the scaling policy.. - returned: when state present - type: str -policy_name: - description: The name of the scaling policy. - returned: when state present - type: str -policy_type: - description: The policy type. - returned: when state present - type: str -min_capacity: - description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present). - returned: when state present - type: int - sample: 1 -max_capacity: - description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present). - returned: when state present - type: int - sample: 2 -role_arn: - description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present). - returned: when state present - type: str - sample: arn:aws:iam::123456789012:role/roleName -step_scaling_policy_configuration: - description: The step scaling policy. - returned: when state present and the policy type is StepScaling - type: complex - contains: - adjustment_type: - description: The adjustment type - returned: when state present and the policy type is StepScaling - type: str - sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity" - cooldown: - description: The amount of time, in seconds, after a scaling activity completes - where previous trigger-related scaling activities can influence future scaling events - returned: when state present and the policy type is StepScaling - type: int - sample: 60 - metric_aggregation_type: - description: The aggregation type for the CloudWatch metrics - returned: when state present and the policy type is StepScaling - type: str - sample: "Average, Minimum, Maximum" - step_adjustments: - description: A set of adjustments that enable you to scale based on the size of the alarm breach - returned: when state present and the policy type is StepScaling - type: list - elements: dict -target_tracking_scaling_policy_configuration: - description: The target tracking policy. - returned: when state present and the policy type is TargetTrackingScaling - type: complex - contains: - predefined_metric_specification: - description: A predefined metric - returned: when state present and the policy type is TargetTrackingScaling - type: complex - contains: - predefined_metric_type: - description: The metric type - returned: when state present and the policy type is TargetTrackingScaling - type: str - sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization" - resource_label: - description: Identifies the resource associated with the metric type - returned: when metric type is ALBRequestCountPerTarget - type: str - scale_in_cooldown: - description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start - returned: when state present and the policy type is TargetTrackingScaling - type: int - sample: 60 - scale_out_cooldown: - description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start - returned: when state present and the policy type is TargetTrackingScaling - type: int - sample: 60 - target_value: - description: The target value for the metric - returned: when state present and the policy type is TargetTrackingScaling - type: int - sample: 70 -creation_time: - description: The Unix timestamp for when the scalable target was created. - returned: when state present - type: str - sample: '2017-09-28T08:22:51.881000-03:00' -""" - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import _camel_to_snake - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# Merge the results of the scalable target creation and policy deletion/creation -# There's no risk in overriding values since mutual keys have the same values in our case -def merge_results(scalable_target_result, policy_result): - if scalable_target_result["changed"] or policy_result["changed"]: - changed = True - else: - changed = False - - merged_response = scalable_target_result["response"].copy() - merged_response.update(policy_result["response"]) - - return {"changed": changed, "response": merged_response} - - -def delete_scaling_policy(connection, module): - changed = False - try: - scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get("service_namespace"), - ResourceId=module.params.get("resource_id"), - ScalableDimension=module.params.get("scalable_dimension"), - PolicyNames=[module.params.get("policy_name")], - MaxResults=1, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe scaling policies") - - if scaling_policy["ScalingPolicies"]: - try: - connection.delete_scaling_policy( - ServiceNamespace=module.params.get("service_namespace"), - ResourceId=module.params.get("resource_id"), - ScalableDimension=module.params.get("scalable_dimension"), - PolicyName=module.params.get("policy_name"), - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete scaling policy") - - return {"changed": changed} - - -def create_scalable_target(connection, module): - changed = False - - try: - scalable_targets = connection.describe_scalable_targets( - ServiceNamespace=module.params.get("service_namespace"), - ResourceIds=[ - module.params.get("resource_id"), - ], - ScalableDimension=module.params.get("scalable_dimension"), - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe scalable targets") - - # Scalable target registration will occur if: - # 1. There is no scalable target registered for this service - # 2. A scalable target exists, different min/max values are defined and override is set to "yes" - if not scalable_targets["ScalableTargets"] or ( - module.params.get("override_task_capacity") - and ( - scalable_targets["ScalableTargets"][0]["MinCapacity"] != module.params.get("minimum_tasks") - or scalable_targets["ScalableTargets"][0]["MaxCapacity"] != module.params.get("maximum_tasks") - ) - ): - changed = True - try: - connection.register_scalable_target( - ServiceNamespace=module.params.get("service_namespace"), - ResourceId=module.params.get("resource_id"), - ScalableDimension=module.params.get("scalable_dimension"), - MinCapacity=module.params.get("minimum_tasks"), - MaxCapacity=module.params.get("maximum_tasks"), - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to register scalable target") - - try: - response = connection.describe_scalable_targets( - ServiceNamespace=module.params.get("service_namespace"), - ResourceIds=[ - module.params.get("resource_id"), - ], - ScalableDimension=module.params.get("scalable_dimension"), - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe scalable targets") - - if response["ScalableTargets"]: - snaked_response = camel_dict_to_snake_dict(response["ScalableTargets"][0]) - else: - snaked_response = {} - - return {"changed": changed, "response": snaked_response} - - -def create_scaling_policy(connection, module): - try: - scaling_policy = connection.describe_scaling_policies( - ServiceNamespace=module.params.get("service_namespace"), - ResourceId=module.params.get("resource_id"), - ScalableDimension=module.params.get("scalable_dimension"), - PolicyNames=[module.params.get("policy_name")], - MaxResults=1, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe scaling policies") - - changed = False - - if scaling_policy["ScalingPolicies"]: - scaling_policy = scaling_policy["ScalingPolicies"][0] - # check if the input parameters are equal to what's already configured - for attr in ( - "PolicyName", - "ServiceNamespace", - "ResourceId", - "ScalableDimension", - "PolicyType", - "StepScalingPolicyConfiguration", - "TargetTrackingScalingPolicyConfiguration", - ): - if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)): - changed = True - scaling_policy[attr] = module.params.get(_camel_to_snake(attr)) - else: - changed = True - scaling_policy = { - "PolicyName": module.params.get("policy_name"), - "ServiceNamespace": module.params.get("service_namespace"), - "ResourceId": module.params.get("resource_id"), - "ScalableDimension": module.params.get("scalable_dimension"), - "PolicyType": module.params.get("policy_type"), - "StepScalingPolicyConfiguration": module.params.get("step_scaling_policy_configuration"), - "TargetTrackingScalingPolicyConfiguration": module.params.get( - "target_tracking_scaling_policy_configuration" - ), - } - - if changed: - try: - if module.params.get("step_scaling_policy_configuration"): - connection.put_scaling_policy( - PolicyName=scaling_policy["PolicyName"], - ServiceNamespace=scaling_policy["ServiceNamespace"], - ResourceId=scaling_policy["ResourceId"], - ScalableDimension=scaling_policy["ScalableDimension"], - PolicyType=scaling_policy["PolicyType"], - StepScalingPolicyConfiguration=scaling_policy["StepScalingPolicyConfiguration"], - ) - elif module.params.get("target_tracking_scaling_policy_configuration"): - connection.put_scaling_policy( - PolicyName=scaling_policy["PolicyName"], - ServiceNamespace=scaling_policy["ServiceNamespace"], - ResourceId=scaling_policy["ResourceId"], - ScalableDimension=scaling_policy["ScalableDimension"], - PolicyType=scaling_policy["PolicyType"], - TargetTrackingScalingPolicyConfiguration=scaling_policy["TargetTrackingScalingPolicyConfiguration"], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create scaling policy") - - try: - response = connection.describe_scaling_policies( - ServiceNamespace=module.params.get("service_namespace"), - ResourceId=module.params.get("resource_id"), - ScalableDimension=module.params.get("scalable_dimension"), - PolicyNames=[module.params.get("policy_name")], - MaxResults=1, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe scaling policies") - - if response["ScalingPolicies"]: - snaked_response = camel_dict_to_snake_dict(response["ScalingPolicies"][0]) - else: - snaked_response = {} - - return {"changed": changed, "response": snaked_response} - - -def main(): - argument_spec = dict( - state=dict(type="str", required=True, choices=["present", "absent"]), - policy_name=dict(type="str", required=True), - service_namespace=dict( - type="str", required=True, choices=["appstream", "dynamodb", "ec2", "ecs", "elasticmapreduce"] - ), - resource_id=dict(type="str", required=True), - scalable_dimension=dict( - type="str", - required=True, - choices=[ - "ecs:service:DesiredCount", - "ec2:spot-fleet-request:TargetCapacity", - "elasticmapreduce:instancegroup:InstanceCount", - "appstream:fleet:DesiredCapacity", - "dynamodb:table:ReadCapacityUnits", - "dynamodb:table:WriteCapacityUnits", - "dynamodb:index:ReadCapacityUnits", - "dynamodb:index:WriteCapacityUnits", - ], - ), - policy_type=dict(type="str", required=True, choices=["StepScaling", "TargetTrackingScaling"]), - step_scaling_policy_configuration=dict(type="dict"), - target_tracking_scaling_policy_configuration=dict( - type="dict", - options=dict( - CustomizedMetricSpecification=dict(type="dict"), - DisableScaleIn=dict(type="bool"), - PredefinedMetricSpecification=dict(type="dict"), - ScaleInCooldown=dict(type="int"), - ScaleOutCooldown=dict(type="int"), - TargetValue=dict(type="float"), - ), - ), - minimum_tasks=dict(type="int"), - maximum_tasks=dict(type="int"), - override_task_capacity=dict(type="bool"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("application-autoscaling") - - # Remove any target_tracking_scaling_policy_configuration suboptions that are None - policy_config_options = [ - "CustomizedMetricSpecification", - "DisableScaleIn", - "PredefinedMetricSpecification", - "ScaleInCooldown", - "ScaleOutCooldown", - "TargetValue", - ] - if isinstance(module.params["target_tracking_scaling_policy_configuration"], dict): - for option in policy_config_options: - if module.params["target_tracking_scaling_policy_configuration"][option] is None: - module.params["target_tracking_scaling_policy_configuration"].pop(option) - - if module.params.get("state") == "present": - # A scalable target must be registered prior to creating a scaling policy - scalable_target_result = create_scalable_target(connection, module) - policy_result = create_scaling_policy(connection, module) - # Merge the results of the scalable target creation and policy deletion/creation - # There's no risk in overriding values since mutual keys have the same values in our case - merged_result = merge_results(scalable_target_result, policy_result) - module.exit_json(**merged_result) - else: - policy_result = delete_scaling_policy(connection, module) - module.exit_json(**policy_result) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_complete_lifecycle_action.py b/autoscaling_complete_lifecycle_action.py deleted file mode 100644 index 94a8d031fdd..00000000000 --- a/autoscaling_complete_lifecycle_action.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_complete_lifecycle_action -short_description: Completes the lifecycle action of an instance -description: - - Used to complete the lifecycle action for the specified instance with the specified result. -version_added: "4.1.0" -author: - - Saleh Abbas (@salehabbas) -options: - asg_name: - description: - - The name of the Auto Scaling Group which the instance belongs to. - type: str - required: true - lifecycle_hook_name: - description: - - The name of the lifecycle hook to complete. - type: str - required: true - lifecycle_action_result: - description: - - The action for the lifecycle hook to take. - choices: ['CONTINUE', 'ABANDON'] - type: str - required: true - instance_id: - description: - - The ID of the instance. - type: str - required: true -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. -# Complete the lifecycle action -- aws_asg_complete_lifecycle_action: - asg_name: my-auto-scaling-group - lifecycle_hook_name: my-lifecycle-hook - lifecycle_action_result: CONTINUE - instance_id: i-123knm1l2312 -""" - -RETURN = r""" ---- -status: - description: How things went - returned: success - type: str - sample: ["OK"] -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - argument_spec = dict( - asg_name=dict(required=True, type="str"), - lifecycle_hook_name=dict(required=True, type="str"), - lifecycle_action_result=dict(required=True, type="str", choices=["CONTINUE", "ABANDON"]), - instance_id=dict(required=True, type="str"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - asg_name = module.params.get("asg_name") - lifecycle_hook_name = module.params.get("lifecycle_hook_name") - lifecycle_action_result = module.params.get("lifecycle_action_result") - instance_id = module.params.get("instance_id") - - autoscaling = module.client("autoscaling") - try: - results = autoscaling.complete_lifecycle_action( - LifecycleHookName=lifecycle_hook_name, - AutoScalingGroupName=asg_name, - LifecycleActionResult=lifecycle_action_result, - InstanceId=instance_id, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to completes the lifecycle action") - - module.exit_json(results=results) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_instance_refresh.py b/autoscaling_instance_refresh.py deleted file mode 100644 index 86546fac21e..00000000000 --- a/autoscaling_instance_refresh.py +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_instance_refresh -version_added: 3.2.0 -short_description: Start or cancel an EC2 Auto Scaling Group (ASG) instance refresh in AWS -description: - - Start or cancel an EC2 Auto Scaling Group instance refresh in AWS. - - Can be used with M(community.aws.autoscaling_instance_refresh_info) to track the subsequent progress. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh). - The usage did not change. -author: - - "Dan Khersonsky (@danquixote)" -options: - state: - description: - - Desired state of the ASG. - type: str - required: true - choices: [ 'started', 'cancelled' ] - name: - description: - - The name of the auto scaling group you are searching for. - type: str - required: true - strategy: - description: - - The strategy to use for the instance refresh. The only valid value is C(Rolling). - - A rolling update is an update that is applied to all instances in an Auto Scaling group until all instances have been updated. - - A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale in. - - If the rolling update process fails, any instances that were already replaced are not rolled back to their previous configuration. - type: str - default: 'Rolling' - preferences: - description: - - Set of preferences associated with the instance refresh request. - - If not provided, the default values are used. - - For I(min_healthy_percentage), the default value is C(90). - - For I(instance_warmup), the default is to use the value specified for the health check grace period for the Auto Scaling group. - - Can not be specified when I(state) is set to 'cancelled'. - required: false - suboptions: - min_healthy_percentage: - description: - - Total percent of capacity in ASG that must remain healthy during instance refresh to allow operation to continue. - - It is rounded up to the nearest integer. - type: int - default: 90 - instance_warmup: - description: - - The number of seconds until a newly launched instance is configured and ready to use. - - During this time, Amazon EC2 Auto Scaling does not immediately move on to the next replacement. - - The default is to use the value for the health check grace period defined for the group. - type: int - type: dict -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Start a refresh - community.aws.autoscaling_instance_refresh: - name: some-asg - state: started - -- name: Cancel a refresh - community.aws.autoscaling_instance_refresh: - name: some-asg - state: cancelled - -- name: Start a refresh and pass preferences - community.aws.autoscaling_instance_refresh: - name: some-asg - state: started - preferences: - min_healthy_percentage: 91 - instance_warmup: 60 - -""" - -RETURN = r""" ---- -instance_refresh_id: - description: instance refresh id - returned: success - type: str - sample: "08b91cf7-8fa6-48af-b6a6-d227f40f1b9b" -auto_scaling_group_name: - description: Name of autoscaling group - returned: success - type: str - sample: "public-webapp-production-1" -status: - description: - - The current state of the group when DeleteAutoScalingGroup is in progress. - - The following are the possible statuses - - Pending -- The request was created, but the operation has not started. - - InProgress -- The operation is in progress. - - Successful -- The operation completed successfully. - - Failed -- The operation failed to complete. You can troubleshoot using the status reason and the scaling activities. - - Cancelling -- - - An ongoing operation is being cancelled. - - Cancellation does not roll back any replacements that have already been completed, - - but it prevents new replacements from being started. - - Cancelled -- The operation is cancelled. - returned: success - type: str - sample: "Pending" -start_time: - description: The date and time this ASG was created, in ISO 8601 format. - returned: success - type: str - sample: "2015-11-25T00:05:36.309Z" -end_time: - description: The date and time this ASG was created, in ISO 8601 format. - returned: success - type: str - sample: "2015-11-25T00:05:36.309Z" -percentage_complete: - description: the % of completeness - returned: success - type: int - sample: 100 -instances_to_update: - description: num. of instance to update - returned: success - type: int - sample: 5 -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def start_or_cancel_instance_refresh(conn, module): - """ - Args: - conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. - module: AnsibleAWSModule object - - Returns: - { - "instance_refreshes": [ - { - 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg', - 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09', - 'instances_to_update': 1, - 'percentage_complete': 0, - "preferences": { - "instance_warmup": 60, - "min_healthy_percentage": 90, - "skip_matching": false - }, - 'start_time': '2021-02-04T03:39:40+00:00', - 'status': 'Cancelling', - 'status_reason': 'Replacing instances before cancelling.', - } - ] - } - """ - - asg_state = module.params.get("state") - asg_name = module.params.get("name") - preferences = module.params.get("preferences") - - args = {} - args["AutoScalingGroupName"] = asg_name - if asg_state == "started": - args["Strategy"] = module.params.get("strategy") - if preferences: - if asg_state == "cancelled": - module.fail_json(msg="can not pass preferences dict when canceling a refresh") - _prefs = scrub_none_parameters(preferences) - args["Preferences"] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) - cmd_invocations = { - "cancelled": conn.cancel_instance_refresh, - "started": conn.start_instance_refresh, - } - try: - if module.check_mode: - if asg_state == "started": - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( - "InstanceRefreshes", "[]" - ) - if ongoing_refresh: - module.exit_json( - changed=False, - msg="In check_mode - Instance Refresh is already in progress, can not start new instance refresh.", - ) - else: - module.exit_json(changed=True, msg="Would have started instance refresh if not in check mode.") - elif asg_state == "cancelled": - ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get( - "InstanceRefreshes", "[]" - )[0] - if ongoing_refresh.get("Status", "") in ["Cancelling", "Cancelled"]: - module.exit_json( - changed=False, - msg="In check_mode - Instance Refresh already cancelled or is pending cancellation.", - ) - elif not ongoing_refresh: - module.exit_json(chaned=False, msg="In check_mode - No active referesh found, nothing to cancel.") - else: - module.exit_json(changed=True, msg="Would have cancelled instance refresh if not in check mode.") - result = cmd_invocations[asg_state](aws_retry=True, **args) - instance_refreshes = conn.describe_instance_refreshes( - AutoScalingGroupName=asg_name, InstanceRefreshIds=[result["InstanceRefreshId"]] - ) - result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0])) - return module.exit_json(**result) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to {asg_state.replace('ed', '')} InstanceRefresh") - - -def main(): - argument_spec = dict( - state=dict( - type="str", - required=True, - choices=["started", "cancelled"], - ), - name=dict(required=True), - strategy=dict(type="str", default="Rolling", required=False), - preferences=dict( - type="dict", - required=False, - options=dict( - min_healthy_percentage=dict(type="int", default=90), - instance_warmup=dict(type="int"), - ), - ), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - autoscaling = module.client( - "autoscaling", - retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InstanceRefreshInProgress"]), - ) - - start_or_cancel_instance_refresh(autoscaling, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_instance_refresh_info.py b/autoscaling_instance_refresh_info.py deleted file mode 100644 index 639940b1b77..00000000000 --- a/autoscaling_instance_refresh_info.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_instance_refresh_info -version_added: 3.2.0 -short_description: Gather information about EC2 Auto Scaling Group (ASG) Instance Refreshes in AWS -description: - - Describes one or more instance refreshes. - - You can determine the status of a request by looking at the I(status) parameter. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh_info). - The usage did not change. -author: - - "Dan Khersonsky (@danquixote)" -options: - name: - description: - - The name of the Auto Scaling group. - type: str - required: true - ids: - description: - - One or more instance refresh IDs. - type: list - elements: str - default: [] - next_token: - description: - - The token for the next set of items to return. (You received this token from a previous call.) - type: str - max_records: - description: - - The maximum number of items to return with this call. The default value is 50 and the maximum value is 100. - type: int - required: false -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Find an refresh by ASG name - community.aws.autoscaling_instance_refresh_info: - name: somename-asg - -- name: Find an refresh by ASG name and one or more refresh-IDs - community.aws.autoscaling_instance_refresh_info: - name: somename-asg - ids: ['some-id-123'] - register: asgs - -- name: Find an refresh by ASG name and set max_records - community.aws.autoscaling_instance_refresh_info: - name: somename-asg - max_records: 4 - register: asgs - -- name: Find an refresh by ASG name and NextToken, if received from a previous call - community.aws.autoscaling_instance_refresh_info: - name: somename-asg - next_token: 'some-token-123' - register: asgs -""" - -RETURN = r""" ---- -instance_refresh_id: - description: instance refresh id - returned: success - type: str - sample: "08b91cf7-8fa6-48af-b6a6-d227f40f1b9b" -auto_scaling_group_name: - description: Name of autoscaling group - returned: success - type: str - sample: "public-webapp-production-1" -status: - description: - - The current state of the group when DeleteAutoScalingGroup is in progress. - - The following are the possible statuses - - C(Pending) - The request was created, but the operation has not started. - - C(InProgress) - The operation is in progress. - - C(Successful) - The operation completed successfully. - - C(Failed) - The operation failed to complete. - You can troubleshoot using the status reason and the scaling activities. - - C(Cancelling) - An ongoing operation is being cancelled. - Cancellation does not roll back any replacements that have already been - completed, but it prevents new replacements from being started. - - C(Cancelled) - The operation is cancelled.' - returned: success - type: str - sample: "Pending" -start_time: - description: The date and time this ASG was created, in ISO 8601 format. - returned: success - type: str - sample: "2015-11-25T00:05:36.309Z" -end_time: - description: The date and time this ASG was created, in ISO 8601 format. - returned: success - type: str - sample: "2015-11-25T00:05:36.309Z" -percentage_complete: - description: the % of completeness - returned: success - type: int - sample: 100 -instances_to_update: - description: num. of instance to update - returned: success - type: int - sample: 5 -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def find_asg_instance_refreshes(conn, module): - """ - Args: - conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. - module: AnsibleAWSModule object - - Returns: - { - "instance_refreshes": [ - { - 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg', - 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09', - 'instances_to_update': 1, - 'percentage_complete': 0, - "preferences": { - "instance_warmup": 60, - "min_healthy_percentage": 90, - "skip_matching": false - }, - 'start_time': '2021-02-04T03:39:40+00:00', - 'status': 'Cancelled', - 'status_reason': 'Cancelled due to user request.', - } - ], - 'next_token': 'string' - } - """ - - asg_name = module.params.get("name") - asg_ids = module.params.get("ids") - asg_next_token = module.params.get("next_token") - asg_max_records = module.params.get("max_records") - - args = {} - args["AutoScalingGroupName"] = asg_name - if asg_ids: - args["InstanceRefreshIds"] = asg_ids - if asg_next_token: - args["NextToken"] = asg_next_token - if asg_max_records: - args["MaxRecords"] = asg_max_records - - try: - instance_refreshes_result = {} - response = conn.describe_instance_refreshes(**args) - if "InstanceRefreshes" in response: - instance_refreshes_dict = dict( - instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "") - ) - instance_refreshes_result = camel_dict_to_snake_dict(instance_refreshes_dict) - - while "NextToken" in response: - args["NextToken"] = response["NextToken"] - response = conn.describe_instance_refreshes(**args) - if "InstanceRefreshes" in response: - instance_refreshes_dict = camel_dict_to_snake_dict( - dict(instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "")) - ) - instance_refreshes_result.update(instance_refreshes_dict) - - return module.exit_json(**instance_refreshes_result) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe InstanceRefreshes") - - -def main(): - argument_spec = dict( - name=dict(required=True, type="str"), - ids=dict(required=False, default=[], elements="str", type="list"), - next_token=dict(required=False, default=None, type="str", no_log=True), - max_records=dict(required=False, type="int"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - autoscaling = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - find_asg_instance_refreshes(autoscaling, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_launch_config.py b/autoscaling_launch_config.py deleted file mode 100644 index a3cd600fa70..00000000000 --- a/autoscaling_launch_config.py +++ /dev/null @@ -1,734 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_launch_config -version_added: 1.0.0 - -short_description: Create or delete AWS Autoscaling Launch Configurations - -description: - - Can create or delete AWS Autoscaling Configurations. - - Works with the M(community.aws.autoscaling_group) module to manage Autoscaling Groups. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc). - The usage did not change. - -notes: - - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the - launch configuration on AWS. You must create a new config and assign it to the ASG instead. - -author: - - "Gareth Rushgrove (@garethr)" - - "Willem van Ketwich (@wilvk)" - -options: - state: - description: - - Register or deregister the instance. - default: present - choices: ['present', 'absent'] - type: str - name: - description: - - Unique name for configuration. - required: true - type: str - instance_type: - description: - - Instance type to use for the instance. - - Required when creating a new Launch Configuration. - type: str - image_id: - description: - - The AMI unique identifier to be used for the group. - type: str - key_name: - description: - - The SSH key name to be used for access to managed instances. - type: str - security_groups: - description: - - A list of security groups to apply to the instances. - - You can specify either security group names or IDs or a mix. - type: list - elements: str - default: [] - volumes: - description: - - A list dictionaries defining the volumes to create. - - For any volume, a volume size less than C(1) will be interpreted as a request not to create the volume. - type: list - elements: dict - suboptions: - device_name: - type: str - description: - - The name for the volume (For example C(/dev/sda)). - required: true - no_device: - type: bool - description: - - When I(no_device=true) the device will not be created. - snapshot: - type: str - description: - - The ID of an EBS snapshot to copy when creating the volume. - - Mutually exclusive with the I(ephemeral) parameter. - ephemeral: - type: str - description: - - Whether the volume should be ephemeral. - - Data on ephemeral volumes is lost when the instance is stopped. - - Mutually exclusive with the I(snapshot) parameter. - volume_size: - type: int - description: - - The size of the volume (in GiB). - - Required unless one of I(ephemeral), I(snapshot) or I(no_device) is set. - volume_type: - type: str - description: - - The type of volume to create. - - See - U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types. - delete_on_termination: - type: bool - default: false - description: - - Whether the volume should be automatically deleted when the instance - is terminated. - iops: - type: int - description: - - The number of IOPS per second to provision for the volume. - - Required when I(volume_type=io1). - throughput: - type: int - description: - - The throughput to provision for a gp3 volume. - - Valid Range is a minimum value of 125 and a maximum value of 1000. - version_added: 3.1.0 - encrypted: - type: bool - default: false - description: - - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK. - user_data: - description: - - Opaque blob of data which is made available to the ec2 instance. Mutually exclusive with I(user_data_path). - type: str - user_data_path: - description: - - Path to the file that contains userdata for the ec2 instances. Mutually exclusive with I(user_data). - type: path - kernel_id: - description: - - Kernel id for the EC2 instance. - type: str - spot_price: - description: - - The spot price you are bidding. Only applies for an autoscaling group with spot instances. - type: float - instance_monitoring: - description: - - Specifies whether instances are launched with detailed monitoring. - type: bool - default: false - assign_public_ip: - description: - - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address - to each instance launched in a Amazon VPC. - type: bool - ramdisk_id: - description: - - A RAM disk id for the instances. - type: str - instance_profile_name: - description: - - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances. - type: str - ebs_optimized: - description: - - Specifies whether the instance is optimized for EBS I/O (true) or not (false). - default: false - type: bool - classic_link_vpc_id: - description: - - Id of ClassicLink enabled VPC - type: str - classic_link_vpc_security_groups: - description: - - A list of security group IDs with which to associate the ClassicLink VPC instances. - type: list - elements: str - vpc_id: - description: - - VPC ID, used when resolving security group names to IDs. - type: str - instance_id: - description: - - The Id of a running instance to use as a basis for a launch configuration. Can be used in place of I(image_id) and I(instance_type). - type: str - placement_tenancy: - description: - - Determines whether the instance runs on single-tenant hardware or not. - - When not set AWS will default to C(default). - type: str - choices: ['default', 'dedicated'] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -- name: create a launch configuration with an encrypted volume - community.aws.autoscaling_launch_config: - name: special - image_id: ami-XXX - key_name: default - security_groups: ['group', 'group2' ] - instance_type: t1.micro - volumes: - - device_name: /dev/sda1 - volume_size: 100 - volume_type: io1 - iops: 3000 - delete_on_termination: true - encrypted: true - - device_name: /dev/sdb - ephemeral: ephemeral0 - -- name: create a launch configuration using a running instance id as a basis - community.aws.autoscaling_launch_config: - name: special - instance_id: i-00a48b207ec59e948 - key_name: default - security_groups: ['launch-wizard-2' ] - volumes: - - device_name: /dev/sda1 - volume_size: 120 - volume_type: io1 - iops: 3000 - delete_on_termination: true - -- name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image - community.aws.autoscaling_launch_config: - name: special - image_id: ami-XXX - key_name: default - security_groups: ['group', 'group2' ] - instance_type: t1.micro - volumes: - - device_name: /dev/sdf - no_device: true - -- name: Use EBS snapshot ID for volume - block: - - name: Set Volume Facts - ansible.builtin.set_fact: - volumes: - - device_name: /dev/sda1 - volume_size: 20 - ebs: - snapshot: snap-XXXX - volume_type: gp2 - delete_on_termination: true - encrypted: false - - - name: Create launch configuration - community.aws.autoscaling_launch_config: - name: lc1 - image_id: ami-xxxx - assign_public_ip: true - instance_type: t2.medium - key_name: my-key - security_groups: "['sg-xxxx']" - volumes: "{{ volumes }}" - register: lc_info -""" - -RETURN = r""" -arn: - description: The Amazon Resource Name of the launch configuration. - returned: when I(state=present) - type: str - sample: arn:aws:autoscaling:us-east-1:123456789012:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name -changed: - description: Whether the state of the launch configuration has changed. - returned: always - type: bool - sample: false -created_time: - description: The creation date and time for the launch configuration. - returned: when I(state=present) - type: str - sample: '2017-11-03 23:46:44.841000' -image_id: - description: The ID of the Amazon Machine Image used by the launch configuration. - returned: when I(state=present) - type: str - sample: ami-9be6f38c -instance_type: - description: The instance type for the instances. - returned: when I(state=present) - type: str - sample: t1.micro -name: - description: The name of the launch configuration. - returned: when I(state=present) - type: str - sample: launch_config_name -result: - description: The specification details for the launch configuration. - returned: when I(state=present) - type: complex - contains: - PlacementTenancy: - description: The tenancy of the instances, either default or dedicated. - returned: when I(state=present) - type: str - sample: default - associate_public_ip_address: - description: (EC2-VPC) Indicates whether to assign a public IP address to each instance. - returned: when I(state=present) - type: bool - sample: false - block_device_mappings: - description: A block device mapping, which specifies the block devices. - returned: when I(state=present) - type: complex - contains: - device_name: - description: The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh). - returned: when I(state=present) - type: str - sample: /dev/sda1 - ebs: - description: The information about the Amazon EBS volume. - returned: when I(state=present) - type: complex - contains: - snapshot_id: - description: The ID of the snapshot. - returned: when I(state=present) - type: str - volume_size: - description: The volume size, in GiB. - returned: when I(state=present) - type: str - sample: '100' - virtual_name: - description: The name of the virtual device (for example, ephemeral0). - returned: when I(state=present) - type: str - sample: ephemeral0 - classic_link_vpc_id: - description: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. - returned: when I(state=present) - type: str - classic_link_vpc_security_groups: - description: The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. - returned: when I(state=present) - type: list - sample: [] - created_time: - description: The creation date and time for the launch configuration. - returned: when I(state=present) - type: str - sample: '2017-11-03 23:46:44.841000' - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: when I(state=present) - type: bool - sample: true - ebs_optimized: - description: Indicates whether the instance is optimized for EBS I/O C(true) or not C(false). - returned: when I(state=present) - type: bool - sample: false - image_id: - description: The ID of the Amazon Machine Image used by the launch configuration. - returned: when I(state=present) - type: str - sample: ami-9be6f38c - instance_monitoring: - description: Indicates whether instances in this group are launched with detailed C(true) or basic C(false) monitoring. - returned: when I(state=present) - type: bool - sample: true - instance_profile_name: - description: The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. - returned: when I(state=present) - type: str - sample: null - instance_type: - description: The instance type for the instances. - returned: when I(state=present) - type: str - sample: t1.micro - iops: - description: The number of I/O operations per second (IOPS) to provision for the volume. - returned: when I(state=present) - type: int - kernel_id: - description: The ID of the kernel associated with the AMI. - returned: when I(state=present) - type: str - sample: '' - key_name: - description: The name of the key pair. - returned: when I(state=present) - type: str - sample: testkey - launch_configuration_arn: - description: The Amazon Resource Name (ARN) of the launch configuration. - returned: when I(state=present) - type: str - sample: arn:aws:autoscaling:us-east-1:123456789012:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name - member: - description: "" - returned: when I(state=present) - type: str - sample: "\n " - name: - description: The name of the launch configuration. - returned: when I(state=present) - type: str - sample: launch_config_name - ramdisk_id: - description: The ID of the RAM disk associated with the AMI. - returned: when I(state=present) - type: str - sample: '' - security_groups: - description: The security groups to associate with the instances. - returned: when I(state=present) - type: list - sample: - - sg-5e27db2f - spot_price: - description: The price to bid when launching Spot Instances. - returned: when I(state=present) - type: float - use_block_device_types: - description: Indicates whether to suppress a device mapping. - returned: when I(state=present) - type: bool - sample: false - user_data: - description: The user data available to the instances. - returned: when I(state=present) - type: str - sample: '' - volume_type: - description: The volume type (one of standard, io1, gp2). - returned: when I(state=present) - type: str - sample: io1 -security_groups: - description: The security groups to associate with the instances. - returned: when I(state=present) - type: list - sample: - - sg-5e27db2f -""" - - -import traceback - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create_block_device_meta(module, volume): - if "snapshot" not in volume and "ephemeral" not in volume and "no_device" not in volume: - if "volume_size" not in volume: - module.fail_json(msg="Size must be specified when creating a new volume or modifying the root volume") - if "snapshot" in volume: - if volume.get("volume_type") == "io1" and "iops" not in volume: - module.fail_json(msg="io1 volumes must have an iops value set") - if "ephemeral" in volume: - if "snapshot" in volume: - module.fail_json(msg="Cannot set both ephemeral and snapshot") - - return_object = {} - - if "ephemeral" in volume: - return_object["VirtualName"] = volume.get("ephemeral") - - if "device_name" in volume: - return_object["DeviceName"] = volume.get("device_name") - - if "no_device" in volume: - return_object["NoDevice"] = volume.get("no_device") - - if any( - key in volume - for key in [ - "snapshot", - "volume_size", - "volume_type", - "delete_on_termination", - "iops", - "throughput", - "encrypted", - ] - ): - return_object["Ebs"] = {} - - if "snapshot" in volume: - return_object["Ebs"]["SnapshotId"] = volume.get("snapshot") - - if "volume_size" in volume: - return_object["Ebs"]["VolumeSize"] = int(volume.get("volume_size", 0)) - - if "volume_type" in volume: - return_object["Ebs"]["VolumeType"] = volume.get("volume_type") - - if "delete_on_termination" in volume: - return_object["Ebs"]["DeleteOnTermination"] = volume.get("delete_on_termination", False) - - if "iops" in volume: - return_object["Ebs"]["Iops"] = volume.get("iops") - - if "throughput" in volume: - if volume.get("volume_type") != "gp3": - module.fail_json(msg="The throughput parameter is supported only for GP3 volumes.") - return_object["Ebs"]["Throughput"] = volume.get("throughput") - - if "encrypted" in volume: - return_object["Ebs"]["Encrypted"] = volume.get("encrypted") - - return return_object - - -def create_launch_config(connection, module): - name = module.params.get("name") - vpc_id = module.params.get("vpc_id") - try: - ec2_connection = module.client("ec2") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - try: - security_groups = get_ec2_security_group_ids_from_names( - module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id, boto3=True - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get Security Group IDs") - except ValueError as e: - module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) - user_data = module.params.get("user_data") - user_data_path = module.params.get("user_data_path") - volumes = module.params["volumes"] - instance_monitoring = module.params.get("instance_monitoring") - assign_public_ip = module.params.get("assign_public_ip") - instance_profile_name = module.params.get("instance_profile_name") - ebs_optimized = module.params.get("ebs_optimized") - classic_link_vpc_id = module.params.get("classic_link_vpc_id") - classic_link_vpc_security_groups = module.params.get("classic_link_vpc_security_groups") - - block_device_mapping = [] - - convert_list = [ - "image_id", - "instance_type", - "instance_type", - "instance_id", - "placement_tenancy", - "key_name", - "kernel_id", - "ramdisk_id", - "spot_price", - ] - - launch_config = snake_dict_to_camel_dict( - dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list) - ) - - if user_data_path: - try: - with open(user_data_path, "r") as user_data_file: - user_data = user_data_file.read() - except IOError as e: - module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) - - if volumes: - for volume in volumes: - if "device_name" not in volume: - module.fail_json(msg="Device name must be set for volume") - # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume - if "volume_size" not in volume or int(volume["volume_size"]) > 0: - block_device_mapping.append(create_block_device_meta(module, volume)) - - try: - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( - "LaunchConfigurations" - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe launch configuration by name") - - changed = False - result = {} - - launch_config["LaunchConfigurationName"] = name - - if security_groups is not None: - launch_config["SecurityGroups"] = security_groups - - if classic_link_vpc_id is not None: - launch_config["ClassicLinkVPCId"] = classic_link_vpc_id - - if instance_monitoring is not None: - launch_config["InstanceMonitoring"] = {"Enabled": instance_monitoring} - - if classic_link_vpc_security_groups is not None: - launch_config["ClassicLinkVPCSecurityGroups"] = classic_link_vpc_security_groups - - if block_device_mapping: - launch_config["BlockDeviceMappings"] = block_device_mapping - - if instance_profile_name is not None: - launch_config["IamInstanceProfile"] = instance_profile_name - - if assign_public_ip is not None: - launch_config["AssociatePublicIpAddress"] = assign_public_ip - - if user_data is not None: - launch_config["UserData"] = user_data - - if ebs_optimized is not None: - launch_config["EbsOptimized"] = ebs_optimized - - if len(launch_configs) == 0: - try: - connection.create_launch_configuration(**launch_config) - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( - "LaunchConfigurations" - ) - changed = True - if launch_configs: - launch_config = launch_configs[0] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create launch configuration") - - result = dict( - (k, v) - for k, v in launch_config.items() - if k not in ["Connection", "CreatedTime", "InstanceMonitoring", "BlockDeviceMappings"] - ) - - result["CreatedTime"] = to_text(launch_config.get("CreatedTime")) - - try: - result["InstanceMonitoring"] = module.boolean(launch_config.get("InstanceMonitoring").get("Enabled")) - except AttributeError: - result["InstanceMonitoring"] = False - - result["BlockDeviceMappings"] = [] - - for block_device_mapping in launch_config.get("BlockDeviceMappings", []): - result["BlockDeviceMappings"].append( - dict( - device_name=block_device_mapping.get("DeviceName"), virtual_name=block_device_mapping.get("VirtualName") - ) - ) - if block_device_mapping.get("Ebs") is not None: - result["BlockDeviceMappings"][-1]["ebs"] = dict( - snapshot_id=block_device_mapping.get("Ebs").get("SnapshotId"), - volume_size=block_device_mapping.get("Ebs").get("VolumeSize"), - ) - - if user_data_path: - result["UserData"] = "hidden" # Otherwise, we dump binary to the user's terminal - - return_object = { - "Name": result.get("LaunchConfigurationName"), - "CreatedTime": result.get("CreatedTime"), - "ImageId": result.get("ImageId"), - "Arn": result.get("LaunchConfigurationARN"), - "SecurityGroups": result.get("SecurityGroups"), - "InstanceType": result.get("InstanceType"), - "Result": result, - } - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object)) - - -def delete_launch_config(connection, module): - try: - name = module.params.get("name") - launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get( - "LaunchConfigurations" - ) - if launch_configs: - connection.delete_launch_configuration( - LaunchConfigurationName=launch_configs[0].get("LaunchConfigurationName") - ) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete launch configuration") - - -def main(): - argument_spec = dict( - name=dict(required=True), - image_id=dict(), - instance_id=dict(), - key_name=dict(), - security_groups=dict(default=[], type="list", elements="str"), - user_data=dict(), - user_data_path=dict(type="path"), - kernel_id=dict(), - volumes=dict(type="list", elements="dict"), - instance_type=dict(), - state=dict(default="present", choices=["present", "absent"]), - spot_price=dict(type="float"), - ramdisk_id=dict(), - instance_profile_name=dict(), - ebs_optimized=dict(default=False, type="bool"), - instance_monitoring=dict(default=False, type="bool"), - assign_public_ip=dict(type="bool"), - classic_link_vpc_security_groups=dict(type="list", elements="str"), - classic_link_vpc_id=dict(), - vpc_id=dict(), - placement_tenancy=dict(choices=["default", "dedicated"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[["user_data", "user_data_path"]], - ) - - try: - connection = module.client("autoscaling") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="unable to establish connection") - - state = module.params.get("state") - - if state == "present": - create_launch_config(connection, module) - elif state == "absent": - delete_launch_config(connection, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_launch_config_find.py b/autoscaling_launch_config_find.py deleted file mode 100644 index 037c21ed9f9..00000000000 --- a/autoscaling_launch_config_find.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Jose Armesto -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_launch_config_find -version_added: 1.0.0 -short_description: Find AWS Autoscaling Launch Configurations -description: - - Returns list of matching Launch Configurations for a given name, along with other useful information. - - Results can be sorted and sliced. - - Based on the work by Tom Bamford U(https://github.com/tombamford). - - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc_find). - The usage did not change. -author: - - "Jose Armesto (@fiunchinho)" -options: - name_regex: - description: - - A Launch Configuration to match. - - It'll be compiled as regex. - required: True - type: str - sort_order: - description: - - Order in which to sort results. - choices: ['ascending', 'descending'] - default: 'ascending' - type: str - limit: - description: - - How many results to show. - - Corresponds to Python slice notation like list[:limit]. - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Search for the Launch Configurations that start with "app" - community.aws.autoscaling_launch_config_find: - name_regex: app.* - sort_order: descending - limit: 2 -""" - -RETURN = r""" -image_id: - description: AMI id - returned: when Launch Configuration was found - type: str - sample: "ami-0d75df7e" -user_data: - description: User data used to start instance - returned: when Launch Configuration was found - type: str - sample: "ZXhwb3J0IENMT1VE" -name: - description: Name of the Launch Configuration - returned: when Launch Configuration was found - type: str - sample: "myapp-v123" -arn: - description: Name of the AMI - returned: when Launch Configuration was found - type: str - sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject" -instance_type: - description: Type of ec2 instance - returned: when Launch Configuration was found - type: str - sample: "t2.small" -created_time: - description: When it was created - returned: when Launch Configuration was found - type: str - sample: "2016-06-29T14:59:22.222000+00:00" -ebs_optimized: - description: Launch Configuration EBS optimized property - returned: when Launch Configuration was found - type: bool - sample: False -instance_monitoring: - description: Launch Configuration instance monitoring property - returned: when Launch Configuration was found - type: str - sample: {"Enabled": false} -classic_link_vpc_security_groups: - description: Launch Configuration classic link vpc security groups property - returned: when Launch Configuration was found - type: list - sample: [] -block_device_mappings: - description: Launch Configuration block device mappings property - returned: when Launch Configuration was found - type: list - sample: [] -keyname: - description: Launch Configuration ssh key - returned: when Launch Configuration was found - type: str - sample: mykey -security_groups: - description: Launch Configuration security groups - returned: when Launch Configuration was found - type: list - sample: [] -kernel_id: - description: Launch Configuration kernel to use - returned: when Launch Configuration was found - type: str - sample: '' -ram_disk_id: - description: Launch Configuration ram disk property - returned: when Launch Configuration was found - type: str - sample: '' -associate_public_address: - description: Assign public address or not - returned: when Launch Configuration was found - type: bool - sample: True -... -""" - -import re - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def find_launch_configs(client, module): - name_regex = module.params.get("name_regex") - sort_order = module.params.get("sort_order") - limit = module.params.get("limit") - - paginator = client.get_paginator("describe_launch_configurations") - - response_iterator = paginator.paginate(PaginationConfig={"MaxItems": 1000, "PageSize": 100}) - - results = [] - - for response in response_iterator: - response["LaunchConfigurations"] = filter( - lambda lc: re.compile(name_regex).match(lc["LaunchConfigurationName"]), response["LaunchConfigurations"] - ) - - for lc in response["LaunchConfigurations"]: - data = { - "name": lc["LaunchConfigurationName"], - "arn": lc["LaunchConfigurationARN"], - "created_time": lc["CreatedTime"], - "user_data": lc["UserData"], - "instance_type": lc["InstanceType"], - "image_id": lc["ImageId"], - "ebs_optimized": lc["EbsOptimized"], - "instance_monitoring": lc["InstanceMonitoring"], - "classic_link_vpc_security_groups": lc["ClassicLinkVPCSecurityGroups"], - "block_device_mappings": lc["BlockDeviceMappings"], - "keyname": lc["KeyName"], - "security_groups": lc["SecurityGroups"], - "kernel_id": lc["KernelId"], - "ram_disk_id": lc["RamdiskId"], - "associate_public_address": lc.get("AssociatePublicIpAddress", False), - } - - results.append(data) - - results.sort(key=lambda e: e["name"], reverse=(sort_order == "descending")) - - if limit: - results = results[:int(limit)] # fmt: skip - - module.exit_json(changed=False, results=results) - - -def main(): - argument_spec = dict( - name_regex=dict(required=True), - sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), - limit=dict(required=False, type="int"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - ) - - try: - client = module.client("autoscaling") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - find_launch_configs(client, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_launch_config_info.py b/autoscaling_launch_config_info.py deleted file mode 100644 index f5123c2ef00..00000000000 --- a/autoscaling_launch_config_info.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_launch_config_info -version_added: 1.0.0 -short_description: Gather information about AWS Autoscaling Launch Configurations -description: - - Gather information about AWS Autoscaling Launch Configurations. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_lc_info). - The usage did not change. -author: - - "Loïc Latreille (@psykotox)" -options: - name: - description: - - A name or a list of name to match. - default: [] - type: list - elements: str - sort: - description: - - Optional attribute which with to sort the results. - choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name'] - type: str - sort_order: - description: - - Order in which to sort results. - - Only used when the 'sort' parameter is specified. - choices: ['ascending', 'descending'] - default: 'ascending' - type: str - sort_start: - description: - - Which result to start with (when sorting). - - Corresponds to Python slice notation. - type: int - sort_end: - description: - - Which result to end with (when sorting). - - Corresponds to Python slice notation. - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all launch configurations - community.aws.autoscaling_launch_config_info: - -- name: Gather information about launch configuration with name "example" - community.aws.autoscaling_launch_config_info: - name: example - -- name: Gather information sorted by created_time from most recent to least recent - community.aws.autoscaling_launch_config_info: - sort: created_time - sort_order: descending -""" - -RETURN = r""" -block_device_mapping: - description: Block device mapping for the instances of launch configuration. - type: list - returned: always - sample: "[{ - 'device_name': '/dev/xvda':, - 'ebs': { - 'delete_on_termination': true, - 'volume_size': 8, - 'volume_type': 'gp2' - }]" -classic_link_vpc_security_groups: - description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id. - type: str - returned: always - sample: -created_time: - description: The creation date and time for the launch configuration. - type: str - returned: always - sample: "2016-05-27T13:47:44.216000+00:00" -ebs_optimized: - description: EBS I/O optimized C(true) or not C(false). - type: bool - returned: always - sample: true, -image_id: - description: ID of the Amazon Machine Image (AMI). - type: str - returned: always - sample: "ami-12345678" -instance_monitoring: - description: Launched with detailed monitoring or not. - type: dict - returned: always - sample: "{ - 'enabled': true - }" -instance_type: - description: Instance type. - type: str - returned: always - sample: "t2.micro" -kernel_id: - description: ID of the kernel associated with the AMI. - type: str - returned: always - sample: -key_name: - description: Name of the key pair. - type: str - returned: always - sample: "user_app" -launch_configuration_arn: - description: Amazon Resource Name (ARN) of the launch configuration. - type: str - returned: always - sample: "arn:aws:autoscaling:us-east-1:123456798012:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app" -launch_configuration_name: - description: Name of the launch configuration. - type: str - returned: always - sample: "lc-app" -ramdisk_id: - description: ID of the RAM disk associated with the AMI. - type: str - returned: always - sample: -security_groups: - description: Security groups to associated. - type: list - returned: always - sample: "[ - 'web' - ]" -user_data: - description: User data available. - type: str - returned: always -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def list_launch_configs(connection, module): - launch_config_name = module.params.get("name") - sort = module.params.get("sort") - sort_order = module.params.get("sort_order") - sort_start = module.params.get("sort_start") - sort_end = module.params.get("sort_end") - - try: - pg = connection.get_paginator("describe_launch_configurations") - launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result() - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Failed to list launch configs") - - snaked_launch_configs = [] - for launch_config in launch_configs["LaunchConfigurations"]: - snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config)) - - for launch_config in snaked_launch_configs: - if "CreatedTime" in launch_config: - launch_config["CreatedTime"] = str(launch_config["CreatedTime"]) - - if sort: - snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == "descending")) - - if sort and sort_start and sort_end: - snaked_launch_configs = snaked_launch_configs[sort_start:sort_end] - elif sort and sort_start: - snaked_launch_configs = snaked_launch_configs[sort_start:] - elif sort and sort_end: - snaked_launch_configs = snaked_launch_configs[:sort_end] - - module.exit_json(launch_configurations=snaked_launch_configs) - - -def main(): - argument_spec = dict( - name=dict(required=False, default=[], type="list", elements="str"), - sort=dict( - required=False, - default=None, - choices=[ - "launch_configuration_name", - "image_id", - "created_time", - "instance_type", - "kernel_id", - "ramdisk_id", - "key_name", - ], - ), - sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]), - sort_start=dict(required=False, type="int"), - sort_end=dict(required=False, type="int"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - connection = module.client("autoscaling") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - list_launch_configs(connection, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_lifecycle_hook.py b/autoscaling_lifecycle_hook.py deleted file mode 100644 index a77fcce0ad0..00000000000 --- a/autoscaling_lifecycle_hook.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: autoscaling_lifecycle_hook -version_added: 1.0.0 -short_description: Create, delete or update AWS ASG Lifecycle Hooks -description: - - Will create a new hook when I(state=present) and no given Hook is found. - - Will update an existing hook when I(state=present) and a Hook is found, but current and provided parameters differ. - - Will delete the hook when I(state=absent) and a Hook is found. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_lifecycle_hook). - The usage did not change. -author: - - Igor 'Tsigankov' Eyrich (@tsiganenok) -options: - state: - description: - - Create or delete Lifecycle Hook. - - When I(state=present) updates existing hook or creates a new hook if not found. - choices: ['present', 'absent'] - default: present - type: str - lifecycle_hook_name: - description: - - The name of the lifecycle hook. - required: true - type: str - autoscaling_group_name: - description: - - The name of the Auto Scaling group to which you want to assign the lifecycle hook. - required: true - type: str - transition: - description: - - The instance state to which you want to attach the lifecycle hook. - - Required when I(state=present). - choices: ['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING'] - type: str - role_arn: - description: - - The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. - type: str - notification_target_arn: - description: - - The ARN of the notification target that Auto Scaling will use to notify you when an - instance is in the transition state for the lifecycle hook. - - This target can be either an SQS queue or an SNS topic. - - If you specify an empty string, this overrides the current ARN. - type: str - notification_meta_data: - description: - - Contains additional information that you want to include any time Auto Scaling sends a message to the notification target. - type: str - heartbeat_timeout: - description: - - The amount of time, in seconds, that can elapse before the lifecycle hook times out. - When the lifecycle hook times out, Auto Scaling performs the default action. - You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. - - By default Amazon AWS will use C(3600) (1 hour). - type: int - default_result: - description: - - Defines the action the Auto Scaling group should take when the lifecycle hook timeout - elapses or if an unexpected failure occurs. - choices: ['ABANDON', 'CONTINUE'] - default: ABANDON - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create / Update lifecycle hook - community.aws.autoscaling_lifecycle_hook: - region: eu-central-1 - state: present - autoscaling_group_name: example - lifecycle_hook_name: example - transition: autoscaling:EC2_INSTANCE_LAUNCHING - heartbeat_timeout: 7000 - default_result: ABANDON - -- name: Delete lifecycle hook - community.aws.autoscaling_lifecycle_hook: - region: eu-central-1 - state: absent - autoscaling_group_name: example - lifecycle_hook_name: example -""" - -RETURN = r""" ---- -auto_scaling_group_name: - description: The unique name of the auto scaling group. - returned: success - type: str - sample: "myasg" -default_result: - description: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. - returned: success - type: str - sample: CONTINUE -global_timeout: - description: The maximum time, in seconds, that an instance can remain in a C(Pending:Wait) or C(Terminating:Wait) state. - returned: success - type: int - sample: 172800 -heartbeat_timeout: - description: The maximum time, in seconds, that can elapse before the lifecycle hook times out. - returned: success - type: int - sample: 3600 -lifecycle_hook_name: - description: The name of the lifecycle hook. - returned: success - type: str - sample: "mylifecyclehook" -lifecycle_transition: - description: The instance state to which lifecycle hook should be attached. - returned: success - type: str - sample: "autoscaling:EC2_INSTANCE_LAUNCHING" -""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create_lifecycle_hook(connection, module): - lch_name = module.params.get("lifecycle_hook_name") - asg_name = module.params.get("autoscaling_group_name") - transition = module.params.get("transition") - role_arn = module.params.get("role_arn") - notification_target_arn = module.params.get("notification_target_arn") - notification_meta_data = module.params.get("notification_meta_data") - heartbeat_timeout = module.params.get("heartbeat_timeout") - default_result = module.params.get("default_result") - - return_object = {} - return_object["changed"] = False - - lch_params = { - "LifecycleHookName": lch_name, - "AutoScalingGroupName": asg_name, - "LifecycleTransition": transition, - } - - if role_arn: - lch_params["RoleARN"] = role_arn - - if notification_target_arn: - lch_params["NotificationTargetARN"] = notification_target_arn - - if notification_meta_data: - lch_params["NotificationMetadata"] = notification_meta_data - - if heartbeat_timeout: - lch_params["HeartbeatTimeout"] = heartbeat_timeout - - if default_result: - lch_params["DefaultResult"] = default_result - - try: - existing_hook = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, - LifecycleHookNames=[lch_name], - )["LifecycleHooks"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get Lifecycle Hook") - - if not existing_hook: - try: - if module.check_mode: - module.exit_json( - changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode." - ) - return_object["changed"] = True - connection.put_lifecycle_hook(**lch_params) - return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] - )["LifecycleHooks"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create LifecycleHook") - - else: - added, removed, modified, same = dict_compare(lch_params, existing_hook[0]) - if modified: - try: - if module.check_mode: - module.exit_json( - changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode." - ) - return_object["changed"] = True - connection.put_lifecycle_hook(**lch_params) - return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name] - )["LifecycleHooks"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create LifecycleHook") - - module.exit_json(**camel_dict_to_snake_dict(return_object)) - - -def dict_compare(d1, d2): - d1_keys = set(d1.keys()) - d2_keys = set(d2.keys()) - intersect_keys = d1_keys.intersection(d2_keys) - added = d1_keys - d2_keys - removed = d2_keys - d1_keys - modified = False - for key in d1: - if d1[key] != d2[key]: - modified = True - break - - same = set(o for o in intersect_keys if d1[o] == d2[o]) - return added, removed, modified, same - - -def delete_lifecycle_hook(connection, module): - lch_name = module.params.get("lifecycle_hook_name") - asg_name = module.params.get("autoscaling_group_name") - - return_object = {} - return_object["changed"] = False - - try: - all_hooks = connection.describe_lifecycle_hooks( - AutoScalingGroupName=asg_name, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks") - - for hook in all_hooks["LifecycleHooks"]: - if hook["LifecycleHookName"] == lch_name: - lch_params = { - "LifecycleHookName": lch_name, - "AutoScalingGroupName": asg_name, - } - - try: - if module.check_mode: - module.exit_json( - changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode." - ) - connection.delete_lifecycle_hook(**lch_params) - return_object["changed"] = True - return_object["lifecycle_hook_removed"] = { - "LifecycleHookName": lch_name, - "AutoScalingGroupName": asg_name, - } - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete LifecycleHook") - else: - pass - - module.exit_json(**camel_dict_to_snake_dict(return_object)) - - -def main(): - argument_spec = dict( - autoscaling_group_name=dict(required=True, type="str"), - lifecycle_hook_name=dict(required=True, type="str"), - transition=dict( - type="str", choices=["autoscaling:EC2_INSTANCE_TERMINATING", "autoscaling:EC2_INSTANCE_LAUNCHING"] - ), - role_arn=dict(type="str"), - notification_target_arn=dict(type="str"), - notification_meta_data=dict(type="str"), - heartbeat_timeout=dict(type="int"), - default_result=dict(default="ABANDON", choices=["ABANDON", "CONTINUE"]), - state=dict(default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[["state", "present", ["transition"]]], - ) - - state = module.params.get("state") - - connection = module.client("autoscaling") - - changed = False - - if state == "present": - create_lifecycle_hook(connection, module) - elif state == "absent": - delete_lifecycle_hook(connection, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_policy.py b/autoscaling_policy.py deleted file mode 100644 index 6d69d849226..00000000000 --- a/autoscaling_policy.py +++ /dev/null @@ -1,610 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: autoscaling_policy -short_description: Create or delete AWS scaling policies for Autoscaling groups -version_added: 1.0.0 -description: - - Can create or delete scaling policies for autoscaling groups. - - Referenced autoscaling groups must already exist. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_scaling_policy). - The usage did not change. -author: - - Zacharie Eakin (@zeekin) - - Will Thames (@willthames) -options: - state: - type: str - description: - - Register or deregister the policy. - choices: ['present', 'absent'] - default: 'present' - name: - type: str - description: - - Unique name for the scaling policy. - required: true - asg_name: - type: str - description: - - Name of the associated autoscaling group. - - Required if I(state) is C(present). - adjustment_type: - type: str - description: - - The type of change in capacity of the autoscaling group. - - Required if I(state) is C(present). - choices: - - ChangeInCapacity - - ExactCapacity - - PercentChangeInCapacity - scaling_adjustment: - type: int - description: - - The amount by which the autoscaling group is adjusted by the policy. - - A negative number has the effect of scaling down the ASG. - - Units are numbers of instances for C(ExactCapacity) or C(ChangeInCapacity) or percent - of existing instances for C(PercentChangeInCapacity). - - Required when I(policy_type) is C(SimpleScaling). - min_adjustment_step: - type: int - description: - - Minimum amount of adjustment when policy is triggered. - - Only used when I(adjustment_type) is C(PercentChangeInCapacity). - cooldown: - type: int - description: - - The minimum period of time (in seconds) between which autoscaling actions can take place. - - Only used when I(policy_type) is C(SimpleScaling). - policy_type: - type: str - description: - - Auto scaling adjustment policy. - choices: - - StepScaling - - SimpleScaling - - TargetTrackingScaling - default: SimpleScaling - metric_aggregation: - type: str - description: - - The aggregation type for the CloudWatch metrics. - - Only used when I(policy_type) is not C(SimpleScaling). - choices: - - Minimum - - Maximum - - Average - default: Average - step_adjustments: - type: list - description: - - List of dicts containing I(lower_bound), I(upper_bound) and I(scaling_adjustment). - - Intervals must not overlap or have a gap between them. - - At most, one item can have an undefined I(lower_bound). - If any item has a negative lower_bound, then there must be a step adjustment with an undefined I(lower_bound). - - At most, one item can have an undefined I(upper_bound). - If any item has a positive upper_bound, then there must be a step adjustment with an undefined I(upper_bound). - - The bounds are the amount over the alarm threshold at which the adjustment will trigger. - This means that for an alarm threshold of 50, triggering at 75 requires a lower bound of 25. - See U(http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_StepAdjustment.html). - elements: dict - suboptions: - lower_bound: - type: int - description: - - The lower bound for the difference between the alarm threshold and - the CloudWatch metric. - upper_bound: - type: int - description: - - The upper bound for the difference between the alarm threshold and - the CloudWatch metric. - scaling_adjustment: - type: int - description: - - The amount by which to scale. - required: true - target_tracking_config: - type: dict - description: - - Allows you to specify a I(target_tracking_config) for autoscaling policies in AWS. - - I(target_tracking_config) can accept nested dicts for I(customized_metric_spec) or I(predefined_metric_spec). - Each specification aligns with their boto3 equivalent. - - Required when I(TargetTrackingScaling) policy is specified. - version_added: 4.1.0 - suboptions: - customized_metric_spec: - type: dict - description: - - Specify a dict will be passed in as a call for C(TargetTrackingConfiguration). - suboptions: - metric_name: - type: str - description: - - The name of the metric. - required: true - namespace: - type: str - description: - - The namespace of the metric. - required: true - statistic: - type: str - description: - - The statistic of the metric. - required: true - choices: - - Average - - Minimum - - Maximum - - SampleCount - - Sum - dimensions: - type: list - description: - - The dimensions of the metric. The element of the list should be a dict. - elements: dict - unit: - type: str - description: - - The unit of the metric. Reference AmazonCloudWatch API for valid Units. - predefined_metric_spec: - type: dict - description: - - Specify a dict will be passed in as a call for I(TargetTrackingConfiguration). - suboptions: - predefined_metric_type: - type: str - required: true - description: - - Required if C(predefined_metric_spec) is used. - choices: - - ASGAverageCPUUtilization - - ASGAverageNetworkIn - - ASGAverageNetworkOut - - ALBRequestCountPerTarget - resource_label: - type: str - description: - - Uniquely identifies a specific ALB target group from which to determine the average request count served by your Auto Scaling group. - - You can't specify a resource label unless the target group is attached to the Auto Scaling group. - target_value: - type: float - description: - - Specify a float number for target utilization. - - Required when I(target_tracking_config) is specified. - required: true - disable_scalein: - type: bool - description: - - Indicate whether scaling in by the target tracking scaling policy is disabled. - estimated_instance_warmup: - type: int - description: - - The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Simple Scale Down policy - community.aws.autoscaling_policy: - state: present - region: US-XXX - name: "scaledown-policy" - adjustment_type: "ChangeInCapacity" - asg_name: "application-asg" - scaling_adjustment: -1 - min_adjustment_step: 1 - cooldown: 300 - -# For an alarm with a breach threshold of 20, the -# following creates a stepped policy: -# From 20-40 (0-20 above threshold), increase by 50% of existing capacity -# From 41-infinity, increase by 100% of existing capacity -- community.aws.autoscaling_policy: - state: present - region: US-XXX - name: "step-scale-up-policy" - policy_type: StepScaling - metric_aggregation: Maximum - step_adjustments: - - upper_bound: 20 - scaling_adjustment: 50 - - lower_bound: 20 - scaling_adjustment: 100 - adjustment_type: "PercentChangeInCapacity" - asg_name: "application-asg" - -- name: create TargetTracking predefined policy - community.aws.autoscaling_policy: - name: "predefined-policy-1" - policy_type: TargetTrackingScaling - target_tracking_config: - predefined_metric_spec: - predefined_metric_type: ASGAverageCPUUtilization - target_value: 98.0 - asg_name: "asg-test-1" - register: result - -- name: create TargetTracking predefined policy with resource_label - community.aws.autoscaling_policy: - name: "predefined-policy-1" - policy_type: TargetTrackingScaling - target_tracking_config: - predefined_metric_spec: - predefined_metric_type: ALBRequestCountPerTarget - resource_label: app/my-alb/778d41231d141a0f/targetgroup/my-alb-target-group/942f017f100becff - target_value: 98.0 - asg_name: "asg-test-1" - register: result - -- name: create TargetTrackingScaling custom policy - community.aws.autoscaling_policy: - name: "custom-policy-1" - policy_type: TargetTrackingScaling - target_tracking_config: - customized_metric_spec: - metric_name: metric_1 - namespace: namespace_1 - statistic: Minimum - unit: Gigabits - dimensions: [{'Name': 'dimension1', 'Value': 'value1'}] - disable_scalein: true - target_value: 98.0 - asg_name: asg-test-1 - register: result -""" - -RETURN = r""" -adjustment_type: - description: Scaling policy adjustment type. - returned: always - type: str - sample: PercentChangeInCapacity -alarms: - description: Cloudwatch alarms related to the policy. - returned: always - type: complex - contains: - alarm_name: - description: Name of the Cloudwatch alarm. - returned: always - type: str - sample: cpu-very-high - alarm_arn: - description: ARN of the Cloudwatch alarm. - returned: always - type: str - sample: arn:aws:cloudwatch:us-east-2:1234567890:alarm:cpu-very-high -arn: - description: ARN of the scaling policy. Provided for backward compatibility, value is the same as I(policy_arn). - returned: always - type: str - sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy -as_name: - description: Auto Scaling Group name. Provided for backward compatibility, value is the same as I(auto_scaling_group_name). - returned: always - type: str - sample: app-asg -auto_scaling_group_name: - description: Name of Auto Scaling Group. - returned: always - type: str - sample: app-asg -metric_aggregation_type: - description: Method used to aggregate metrics. - returned: when I(policy_type) is C(StepScaling) - type: str - sample: Maximum -name: - description: Name of the scaling policy. Provided for backward compatibility, value is the same as I(policy_name). - returned: always - type: str - sample: app-policy -policy_arn: - description: ARN of scaling policy. - returned: always - type: str - sample: arn:aws:autoscaling:us-east-2:123456789012:scalingPolicy:59e37526-bd27-42cf-adca-5cd3d90bc3b9:autoScalingGroupName/app-asg:policyName/app-policy -policy_name: - description: Name of scaling policy. - returned: always - type: str - sample: app-policy -policy_type: - description: Type of auto scaling policy. - returned: always - type: str - sample: StepScaling -scaling_adjustment: - description: Adjustment to make when alarm is triggered. - returned: When I(policy_type) is C(SimpleScaling) - type: int - sample: 1 -step_adjustments: - description: List of step adjustments. - returned: always - type: complex - contains: - metric_interval_lower_bound: - description: Lower bound for metric interval. - returned: if step has a lower bound - type: float - sample: 20.0 - metric_interval_upper_bound: - description: Upper bound for metric interval. - returned: if step has an upper bound - type: float - sample: 40.0 - scaling_adjustment: - description: Adjustment to make if this step is reached. - returned: always - type: int - sample: 50 -""" - -try: - import botocore -except ImportError: - pass # caught by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def build_target_specification(target_tracking_config): - # Initialize an empty dict() for building TargetTrackingConfiguration policies, - # which will be returned - targetTrackingConfig = dict() - - if target_tracking_config.get("target_value"): - targetTrackingConfig["TargetValue"] = target_tracking_config["target_value"] - - if target_tracking_config.get("disable_scalein"): - targetTrackingConfig["DisableScaleIn"] = target_tracking_config["disable_scalein"] - else: - # Accounting for boto3 response - targetTrackingConfig["DisableScaleIn"] = False - - if target_tracking_config["predefined_metric_spec"] is not None: - # Build spec for predefined_metric_spec - targetTrackingConfig["PredefinedMetricSpecification"] = dict() - if target_tracking_config["predefined_metric_spec"].get("predefined_metric_type"): - targetTrackingConfig["PredefinedMetricSpecification"]["PredefinedMetricType"] = target_tracking_config[ - "predefined_metric_spec" - ]["predefined_metric_type"] - - if target_tracking_config["predefined_metric_spec"].get("resource_label"): - targetTrackingConfig["PredefinedMetricSpecification"]["ResourceLabel"] = target_tracking_config[ - "predefined_metric_spec" - ]["resource_label"] - - elif target_tracking_config["customized_metric_spec"] is not None: - # Build spec for customized_metric_spec - targetTrackingConfig["CustomizedMetricSpecification"] = dict() - if target_tracking_config["customized_metric_spec"].get("metric_name"): - targetTrackingConfig["CustomizedMetricSpecification"]["MetricName"] = target_tracking_config[ - "customized_metric_spec" - ]["metric_name"] - - if target_tracking_config["customized_metric_spec"].get("namespace"): - targetTrackingConfig["CustomizedMetricSpecification"]["Namespace"] = target_tracking_config[ - "customized_metric_spec" - ]["namespace"] - - if target_tracking_config["customized_metric_spec"].get("dimensions"): - targetTrackingConfig["CustomizedMetricSpecification"]["Dimensions"] = target_tracking_config[ - "customized_metric_spec" - ]["dimensions"] - - if target_tracking_config["customized_metric_spec"].get("statistic"): - targetTrackingConfig["CustomizedMetricSpecification"]["Statistic"] = target_tracking_config[ - "customized_metric_spec" - ]["statistic"] - - if target_tracking_config["customized_metric_spec"].get("unit"): - targetTrackingConfig["CustomizedMetricSpecification"]["Unit"] = target_tracking_config[ - "customized_metric_spec" - ]["unit"] - - return targetTrackingConfig - - -def create_scaling_policy(connection, module): - changed = False - asg_name = module.params["asg_name"] - policy_type = module.params["policy_type"] - policy_name = module.params["name"] - - if policy_type == "TargetTrackingScaling": - params = dict(PolicyName=policy_name, PolicyType=policy_type, AutoScalingGroupName=asg_name) - else: - params = dict( - PolicyName=policy_name, - PolicyType=policy_type, - AutoScalingGroupName=asg_name, - AdjustmentType=module.params["adjustment_type"], - ) - - # min_adjustment_step attribute is only relevant if the adjustment_type - # is set to percentage change in capacity, so it is a special case - if module.params["adjustment_type"] == "PercentChangeInCapacity": - if module.params["min_adjustment_step"]: - params["MinAdjustmentMagnitude"] = module.params["min_adjustment_step"] - - if policy_type == "SimpleScaling": - # can't use required_if because it doesn't allow multiple criteria - - # it's only required if policy is SimpleScaling and state is present - if not module.params["scaling_adjustment"]: - module.fail_json( - msg="scaling_adjustment is required when policy_type is SimpleScaling and state is present" - ) - params["ScalingAdjustment"] = module.params["scaling_adjustment"] - if module.params["cooldown"]: - params["Cooldown"] = module.params["cooldown"] - - elif policy_type == "StepScaling": - if not module.params["step_adjustments"]: - module.fail_json(msg="step_adjustments is required when policy_type is StepScaling and state is present") - params["StepAdjustments"] = [] - for step_adjustment in module.params["step_adjustments"]: - step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"]) - if step_adjustment.get("lower_bound"): - step_adjust_params["MetricIntervalLowerBound"] = step_adjustment["lower_bound"] - if step_adjustment.get("upper_bound"): - step_adjust_params["MetricIntervalUpperBound"] = step_adjustment["upper_bound"] - params["StepAdjustments"].append(step_adjust_params) - if module.params["metric_aggregation"]: - params["MetricAggregationType"] = module.params["metric_aggregation"] - if module.params["estimated_instance_warmup"]: - params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] - - elif policy_type == "TargetTrackingScaling": - if not module.params["target_tracking_config"]: - module.fail_json( - msg="target_tracking_config is required when policy_type is TargetTrackingScaling and state is present" - ) - else: - params["TargetTrackingConfiguration"] = build_target_specification( - module.params.get("target_tracking_config") - ) - if module.params["estimated_instance_warmup"]: - params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"] - - # Ensure idempotency with policies - try: - policies = connection.describe_policies( - aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] - )["ScalingPolicies"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") - - before = after = {} - if not policies: - changed = True - else: - policy = policies[0] - for key in params: - if params[key] != policy.get(key): - changed = True - before[key] = params[key] - after[key] = policy.get(key) - - if changed: - try: - connection.put_scaling_policy(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create autoscaling policy") - - try: - policies = connection.describe_policies( - aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] - )["ScalingPolicies"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") - - policy = camel_dict_to_snake_dict(policies[0]) - # Backward compatible return values - policy["arn"] = policy["policy_arn"] - policy["as_name"] = policy["auto_scaling_group_name"] - policy["name"] = policy["policy_name"] - - if before and after: - module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy) - else: - module.exit_json(changed=changed, **policy) - - -def delete_scaling_policy(connection, module): - policy_name = module.params.get("name") - - try: - policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") - - if policy["ScalingPolicies"]: - try: - connection.delete_policy( - aws_retry=True, - AutoScalingGroupName=policy["ScalingPolicies"][0]["AutoScalingGroupName"], - PolicyName=policy_name, - ) - module.exit_json(changed=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete autoscaling policy") - - module.exit_json(changed=False) - - -def main(): - step_adjustment_spec = dict( - lower_bound=dict(type="int"), upper_bound=dict(type="int"), scaling_adjustment=dict(type="int", required=True) - ) - - predefined_metric_spec = dict( - predefined_metric_type=dict( - type="str", - choices=[ - "ASGAverageCPUUtilization", - "ASGAverageNetworkIn", - "ASGAverageNetworkOut", - "ALBRequestCountPerTarget", - ], - required=True, - ), - resource_label=dict(type="str"), - ) - customized_metric_spec = dict( - metric_name=dict(type="str", required=True), - namespace=dict(type="str", required=True), - statistic=dict(type="str", required=True, choices=["Average", "Minimum", "Maximum", "SampleCount", "Sum"]), - dimensions=dict(type="list", elements="dict"), - unit=dict(type="str"), - ) - - target_tracking_spec = dict( - disable_scalein=dict(type="bool"), - target_value=dict(type="float", required=True), - predefined_metric_spec=dict(type="dict", options=predefined_metric_spec), - customized_metric_spec=dict(type="dict", options=customized_metric_spec), - ) - - argument_spec = dict( - name=dict(required=True), - adjustment_type=dict(choices=["ChangeInCapacity", "ExactCapacity", "PercentChangeInCapacity"]), - asg_name=dict(), - scaling_adjustment=dict(type="int"), - min_adjustment_step=dict(type="int"), - cooldown=dict(type="int"), - state=dict(default="present", choices=["present", "absent"]), - metric_aggregation=dict(default="Average", choices=["Minimum", "Maximum", "Average"]), - policy_type=dict(default="SimpleScaling", choices=["SimpleScaling", "StepScaling", "TargetTrackingScaling"]), - target_tracking_config=dict(type="dict", options=target_tracking_spec), - step_adjustments=dict(type="list", options=step_adjustment_spec, elements="dict"), - estimated_instance_warmup=dict(type="int"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["asg_name"]]]) - - connection = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get("state") - - if state == "present": - create_scaling_policy(connection, module) - elif state == "absent": - delete_scaling_policy(connection, module) - - -if __name__ == "__main__": - main() diff --git a/autoscaling_scheduled_action.py b/autoscaling_scheduled_action.py deleted file mode 100644 index 9bfb70b8330..00000000000 --- a/autoscaling_scheduled_action.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Based off of https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py -# (c) 2016, Mike Mochan <@mmochan> - -DOCUMENTATION = r""" ---- -module: autoscaling_scheduled_action -version_added: 2.2.0 -short_description: Create, modify and delete ASG scheduled scaling actions -description: - - The module will create a new scheduled action when I(state=present) and no given action is found. - - The module will update a new scheduled action when I(state=present) and the given action is found. - - The module will delete a new scheduled action when I(state=absent) and the given action is found. - - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_scheduled_action). - The usage did not change. -options: - autoscaling_group_name: - description: - - The name of the autoscaling group to add a scheduled action to. - type: str - required: true - scheduled_action_name: - description: - - The name of the scheduled action. - type: str - required: true - start_time: - description: - - Start time for the action. - type: str - end_time: - description: - - End time for the action. - type: str - time_zone: - description: - - Time zone to run against. - type: str - recurrence: - description: - - Cron style schedule to repeat the action on. - - Required when I(state=present). - type: str - min_size: - description: - - ASG min capacity. - type: int - max_size: - description: - - ASG max capacity. - type: int - desired_capacity: - description: - - ASG desired capacity. - type: int - state: - description: - - Create / update or delete scheduled action. - type: str - required: false - default: present - choices: ['present', 'absent'] -author: - - Mark Woolley(@marknet15) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create a scheduled action for a autoscaling group. -- name: Create a minimal scheduled action for autoscaling group - community.aws.autoscaling_scheduled_action: - region: eu-west-1 - autoscaling_group_name: test_asg - scheduled_action_name: test_scheduled_action - start_time: 2021 October 25 08:00 UTC - recurrence: 40 22 * * 1-5 - desired_capacity: 10 - state: present - register: scheduled_action - -- name: Create a scheduled action for autoscaling group - community.aws.autoscaling_scheduled_action: - region: eu-west-1 - autoscaling_group_name: test_asg - scheduled_action_name: test_scheduled_action - start_time: 2021 October 25 08:00 UTC - end_time: 2021 October 25 08:00 UTC - time_zone: Europe/London - recurrence: 40 22 * * 1-5 - min_size: 10 - max_size: 15 - desired_capacity: 10 - state: present - register: scheduled_action - -- name: Delete scheduled action - community.aws.autoscaling_scheduled_action: - region: eu-west-1 - autoscaling_group_name: test_asg - scheduled_action_name: test_scheduled_action - state: absent -""" - -RETURN = r""" -scheduled_action_name: - description: The name of the scheduled action. - returned: when I(state=present) - type: str - sample: test_scheduled_action -start_time: - description: Start time for the action. - returned: when I(state=present) - type: str - sample: '2021 October 25 08:00 UTC' -end_time: - description: End time for the action. - returned: when I(state=present) - type: str - sample: '2021 October 25 08:00 UTC' -time_zone: - description: The ID of the Amazon Machine Image used by the launch configuration. - returned: when I(state=present) - type: str - sample: Europe/London -recurrence: - description: Cron style schedule to repeat the action on. - returned: when I(state=present) - type: str - sample: '40 22 * * 1-5' -min_size: - description: ASG min capacity. - returned: when I(state=present) - type: int - sample: 1 -max_size: - description: ASG max capacity. - returned: when I(state=present) - type: int - sample: 2 -desired_capacity: - description: ASG desired capacity. - returned: when I(state=present) - type: int - sample: 1 -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -try: - from dateutil.parser import parse as timedate_parse - - HAS_DATEUTIL = True -except ImportError: - HAS_DATEUTIL = False - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def format_request(): - params = dict( - AutoScalingGroupName=module.params.get("autoscaling_group_name"), - ScheduledActionName=module.params.get("scheduled_action_name"), - Recurrence=module.params.get("recurrence"), - ) - - # Some of these params are optional - if module.params.get("desired_capacity") is not None: - params["DesiredCapacity"] = module.params.get("desired_capacity") - - if module.params.get("min_size") is not None: - params["MinSize"] = module.params.get("min_size") - - if module.params.get("max_size") is not None: - params["MaxSize"] = module.params.get("max_size") - - if module.params.get("time_zone") is not None: - params["TimeZone"] = module.params.get("time_zone") - - if module.params.get("start_time") is not None: - params["StartTime"] = module.params.get("start_time") - - if module.params.get("end_time") is not None: - params["EndTime"] = module.params.get("end_time") - - return params - - -def delete_scheduled_action(current_actions): - if current_actions == []: - return False - - if module.check_mode: - return True - - params = dict( - AutoScalingGroupName=module.params.get("autoscaling_group_name"), - ScheduledActionName=module.params.get("scheduled_action_name"), - ) - - try: - client.delete_scheduled_action(aws_retry=True, **params) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - - return True - - -def get_scheduled_actions(): - params = dict( - AutoScalingGroupName=module.params.get("autoscaling_group_name"), - ScheduledActionNames=[module.params.get("scheduled_action_name")], - ) - - try: - actions = client.describe_scheduled_actions(aws_retry=True, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - current_actions = actions.get("ScheduledUpdateGroupActions") - - return current_actions - - -def put_scheduled_update_group_action(current_actions): - changed = False - changes = dict() - params = format_request() - - if len(current_actions) < 1: - changed = True - else: - # To correctly detect changes convert the start_time & end_time to datetime object - if "StartTime" in params: - params["StartTime"] = timedate_parse(params["StartTime"]) - if "EndTime" in params: - params["EndTime"] = timedate_parse(params["EndTime"]) - - for k, v in params.items(): - if current_actions[0].get(k) != v: - changes[k] = v - - if changes: - changed = True - - if module.check_mode: - return changed - - try: - client.put_scheduled_update_group_action(aws_retry=True, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - return changed - - -def main(): - global module - global client - - argument_spec = dict( - autoscaling_group_name=dict(required=True, type="str"), - scheduled_action_name=dict(required=True, type="str"), - start_time=dict(default=None, type="str"), - end_time=dict(default=None, type="str"), - time_zone=dict(default=None, type="str"), - recurrence=dict(type="str"), - min_size=dict(default=None, type="int"), - max_size=dict(default=None, type="int"), - desired_capacity=dict(default=None, type="int"), - state=dict(default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, required_if=[["state", "present", ["recurrence"]]], supports_check_mode=True - ) - - if not HAS_DATEUTIL: - module.fail_json(msg="dateutil is required for this module") - - if not module.botocore_at_least("1.20.24"): - module.fail_json(msg="botocore version >= 1.20.24 is required for this module") - - client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) - current_actions = get_scheduled_actions() - state = module.params.get("state") - results = dict() - - if state == "present": - changed = put_scheduled_update_group_action(current_actions) - if not module.check_mode: - updated_action = get_scheduled_actions()[0] - results = dict( - scheduled_action_name=updated_action.get("ScheduledActionName"), - start_time=updated_action.get("StartTime"), - end_time=updated_action.get("EndTime"), - time_zone=updated_action.get("TimeZone"), - recurrence=updated_action.get("Recurrence"), - min_size=updated_action.get("MinSize"), - max_size=updated_action.get("MaxSize"), - desired_capacity=updated_action.get("DesiredCapacity"), - ) - else: - changed = delete_scheduled_action(current_actions) - - results["changed"] = changed - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/aws_region_info.py b/aws_region_info.py deleted file mode 100644 index a268c13b3c8..00000000000 --- a/aws_region_info.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: aws_region_info -short_description: Gather information about AWS regions -version_added: 1.0.0 -description: - - Gather information about AWS regions. -author: - - 'Henrique Rodrigues (@Sodki)' -options: - filters: - description: - - A dict of filters to apply. - - Each dict item consists of a filter key and a filter value. - - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters. - - Filter names and values are case sensitive. - - You can use underscores instead of dashes (-) in the filter keys. - - Filter keys with underscores will take precedence in case of conflict. - default: {} - type: dict -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Gather information about all regions -- community.aws.aws_region_info: - -# Gather information about a single region -- community.aws.aws_region_info: - filters: - region-name: eu-west-1 -""" - -RETURN = r""" -regions: - returned: on success - description: > - Regions that match the provided filters. Each element consists of a dict with all the information related - to that region. - type: list - sample: "[{ - 'endpoint': 'ec2.us-west-1.amazonaws.com', - 'region_name': 'us-west-1' - }]" -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - argument_spec = dict( - filters=dict(default={}, type="dict"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - - # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict(module.params.get("filters")) - for k in module.params.get("filters").keys(): - if "_" in k: - sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] - del sanitized_filters[k] - - try: - regions = connection.describe_regions( - aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe regions.") - - module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions["Regions"]]) - - -if __name__ == "__main__": - main() diff --git a/batch_compute_environment.py b/batch_compute_environment.py deleted file mode 100644 index d7ee4ebc1f5..00000000000 --- a/batch_compute_environment.py +++ /dev/null @@ -1,497 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Jon Meran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: batch_compute_environment -version_added: 1.0.0 -short_description: Manage AWS Batch Compute Environments -description: - - This module allows the management of AWS Batch Compute Environments. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.batch_compute_environment) to manage the compute - environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions. - - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_compute_environment). - The usage did not change. -author: - - Jon Meran (@jonmer85) -options: - compute_environment_name: - description: - - The name for your compute environment. - - Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed. - required: true - type: str - type: - description: - - The type of the compute environment. - required: true - choices: ["MANAGED", "UNMANAGED"] - type: str - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - compute_environment_state: - description: - - The state of the compute environment. - - If the state is C(ENABLED), then the compute environment accepts jobs - from a queue and can scale out automatically based on queues. - default: "ENABLED" - choices: ["ENABLED", "DISABLED"] - type: str - service_role: - description: - - The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS - services on your behalf. - required: true - type: str - compute_resource_type: - description: - - The type of compute resource. - required: true - choices: ["EC2", "SPOT"] - type: str - minv_cpus: - description: - - The minimum number of EC2 vCPUs that an environment should maintain. - required: true - type: int - maxv_cpus: - description: - - The maximum number of EC2 vCPUs that an environment can reach. - required: true - type: int - desiredv_cpus: - description: - - The desired number of EC2 vCPUS in the compute environment. - type: int - instance_types: - description: - - The instance types that may be launched. - required: true - type: list - elements: str - image_id: - description: - - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. - type: str - subnets: - description: - - The VPC subnets into which the compute resources are launched. - required: true - type: list - elements: str - security_group_ids: - description: - - The EC2 security groups that are associated with instances launched in the compute environment. - required: true - type: list - elements: str - ec2_key_pair: - description: - - The EC2 key pair that is used for instances launched in the compute environment. - type: str - instance_role: - description: - - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment. - required: true - type: str - tags: - description: - - Key-value pair tags to be applied to resources that are launched in the compute environment. - type: dict - bid_percentage: - description: - - The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that - instance type before instances are launched. - - For example, if your bid percentage is 20%, then the Spot price - must be below 20% of the current On-Demand price for that EC2 instance. - type: int - spot_iam_fleet_role: - description: - - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: My Batch Compute Environment - community.aws.batch_compute_environment: - compute_environment_name: computeEnvironmentName - state: present - region: us-east-1 - compute_environment_state: ENABLED - type: MANAGED - compute_resource_type: EC2 - minv_cpus: 0 - maxv_cpus: 2 - desiredv_cpus: 1 - instance_types: - - optimal - subnets: - - my-subnet1 - - my-subnet2 - security_group_ids: - - my-sg1 - - my-sg2 - instance_role: arn:aws:iam:::instance-profile/ - tags: - tag1: value1 - tag2: value2 - service_role: arn:aws:iam:::role/service-role/ - register: aws_batch_compute_environment_action - -- name: show results - ansible.builtin.debug: - var: aws_batch_compute_environment_action -""" - -RETURN = r""" ---- -output: - description: "returns what action was taken, whether something was changed, invocation and response" - returned: always - sample: - batch_compute_environment_action: none - changed: false - invocation: - module_args: - access_key: ~ - secret_key: ~ - bid_percentage: ~ - compute_environment_name: - compute_environment_state: ENABLED - compute_resource_type: EC2 - desiredv_cpus: 0 - ec2_key_pair: ~ - endpoint_url: ~ - image_id: ~ - instance_role: "arn:aws:iam::..." - instance_types: - - optimal - maxv_cpus: 8 - minv_cpus: 0 - profile: ~ - region: us-east-1 - security_group_ids: - - "*******" - security_token: ~ - service_role: "arn:aws:iam::...." - spot_iam_fleet_role: ~ - state: present - subnets: - - "******" - tags: - Environment: - Name: - type: MANAGED - validate_certs: true - response: - computeEnvironmentArn: "arn:aws:batch:...." - computeEnvironmentName: - computeResources: - desiredvCpus: 0 - instanceRole: "arn:aws:iam::..." - instanceTypes: - - optimal - maxvCpus: 8 - minvCpus: 0 - securityGroupIds: - - "******" - subnets: - - "*******" - tags: - Environment: - Name: - type: EC2 - ecsClusterArn: "arn:aws:ecs:....." - serviceRole: "arn:aws:iam::..." - state: ENABLED - status: VALID - statusReason: "ComputeEnvironment Healthy" - type: MANAGED - type: dict -""" - -import re - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# --------------------------------------------------------------------------------------------------- -# -# Helper Functions & classes -# -# --------------------------------------------------------------------------------------------------- - - -def set_api_params(module, module_params): - """ - Sets module parameters to those expected by the boto3 API. - - :param module: - :param module_params: - :return: - """ - api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None) - return snake_dict_to_camel_dict(api_params) - - -def validate_params(module): - """ - Performs basic parameter validation. - - :param module: - :return: - """ - - compute_environment_name = module.params["compute_environment_name"] - - # validate compute environment name - if not re.search(r"^[\w\_:]+$", compute_environment_name): - module.fail_json( - msg=f"Function compute_environment_name {compute_environment_name} is invalid. Names must contain only alphanumeric characters and underscores." - ) - if not validate_aws_arn(compute_environment_name, service="batch"): - if len(compute_environment_name) > 128: - module.fail_json(msg=f'compute_environment_name "{compute_environment_name}" exceeds 128 character limit') - - return - - -# --------------------------------------------------------------------------------------------------- -# -# Batch Compute Environment functions -# -# --------------------------------------------------------------------------------------------------- - - -def get_current_compute_environment(module, client): - try: - environments = client.describe_compute_environments( - computeEnvironments=[module.params["compute_environment_name"]] - ) - if len(environments["computeEnvironments"]) > 0: - return environments["computeEnvironments"][0] - else: - return None - except ClientError: - return None - - -def create_compute_environment(module, client): - """ - Adds a Batch compute environment - - :param module: - :param client: - :return: - """ - - changed = False - - # set API parameters - params = ("compute_environment_name", "type", "service_role") - api_params = set_api_params(module, params) - - if module.params["compute_environment_state"] is not None: - api_params["state"] = module.params["compute_environment_state"] - - compute_resources_param_list = ( - "minv_cpus", - "maxv_cpus", - "desiredv_cpus", - "instance_types", - "image_id", - "subnets", - "security_group_ids", - "ec2_key_pair", - "instance_role", - "tags", - "bid_percentage", - "spot_iam_fleet_role", - ) - compute_resources_params = set_api_params(module, compute_resources_param_list) - - if module.params["compute_resource_type"] is not None: - compute_resources_params["type"] = module.params["compute_resource_type"] - - # if module.params['minv_cpus'] is not None: - # compute_resources_params['minvCpus'] = module.params['minv_cpus'] - - api_params["computeResources"] = compute_resources_params - - try: - if not module.check_mode: - client.create_compute_environment(**api_params) - changed = True - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Error creating compute environment") - - return changed - - -def remove_compute_environment(module, client): - """ - Remove a Batch compute environment - - :param module: - :param client: - :return: - """ - - changed = False - - # set API parameters - api_params = {"computeEnvironment": module.params["compute_environment_name"]} - - try: - if not module.check_mode: - client.delete_compute_environment(**api_params) - changed = True - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Error removing compute environment") - return changed - - -def manage_state(module, client): - changed = False - current_state = "absent" - state = module.params["state"] - compute_environment_state = module.params["compute_environment_state"] - compute_environment_name = module.params["compute_environment_name"] - service_role = module.params["service_role"] - minv_cpus = module.params["minv_cpus"] - maxv_cpus = module.params["maxv_cpus"] - desiredv_cpus = module.params["desiredv_cpus"] - action_taken = "none" - update_env_response = "" - - check_mode = module.check_mode - - # check if the compute environment exists - current_compute_environment = get_current_compute_environment(module, client) - response = current_compute_environment - if current_compute_environment: - current_state = "present" - - if state == "present": - if current_state == "present": - updates = False - # Update Batch Compute Environment configuration - compute_kwargs = {"computeEnvironment": compute_environment_name} - - # Update configuration if needed - compute_resources = {} - if compute_environment_state and current_compute_environment["state"] != compute_environment_state: - compute_kwargs.update({"state": compute_environment_state}) - updates = True - if service_role and current_compute_environment["serviceRole"] != service_role: - compute_kwargs.update({"serviceRole": service_role}) - updates = True - if minv_cpus is not None and current_compute_environment["computeResources"]["minvCpus"] != minv_cpus: - compute_resources["minvCpus"] = minv_cpus - if maxv_cpus is not None and current_compute_environment["computeResources"]["maxvCpus"] != maxv_cpus: - compute_resources["maxvCpus"] = maxv_cpus - if ( - desiredv_cpus is not None - and current_compute_environment["computeResources"]["desiredvCpus"] != desiredv_cpus - ): - compute_resources["desiredvCpus"] = desiredv_cpus - if len(compute_resources) > 0: - compute_kwargs["computeResources"] = compute_resources - updates = True - if updates: - try: - if not check_mode: - update_env_response = client.update_compute_environment(**compute_kwargs) - if not update_env_response: - module.fail_json(msg="Unable to get compute environment information after creating") - changed = True - action_taken = "updated" - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to update environment.") - - else: - # Create Batch Compute Environment - changed = create_compute_environment(module, client) - # Describe compute environment - action_taken = "added" - response = get_current_compute_environment(module, client) - if not response: - module.fail_json(msg="Unable to get compute environment information after creating") - else: - if current_state == "present": - # remove the compute environment - changed = remove_compute_environment(module, client) - action_taken = "deleted" - return dict(changed=changed, batch_compute_environment_action=action_taken, response=response) - - -# --------------------------------------------------------------------------------------------------- -# -# MAIN -# -# --------------------------------------------------------------------------------------------------- - - -def main(): - """ - Main entry point. - - :return dict: changed, batch_compute_environment_action, response - """ - - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - compute_environment_name=dict(required=True), - type=dict(required=True, choices=["MANAGED", "UNMANAGED"]), - compute_environment_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), - service_role=dict(required=True), - compute_resource_type=dict(required=True, choices=["EC2", "SPOT"]), - minv_cpus=dict(type="int", required=True), - maxv_cpus=dict(type="int", required=True), - desiredv_cpus=dict(type="int"), - instance_types=dict(type="list", required=True, elements="str"), - image_id=dict(), - subnets=dict(type="list", required=True, elements="str"), - security_group_ids=dict(type="list", required=True, elements="str"), - ec2_key_pair=dict(no_log=False), - instance_role=dict(required=True), - tags=dict(type="dict"), - bid_percentage=dict(type="int"), - spot_iam_fleet_role=dict(), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - client = module.client("batch") - - validate_params(module) - - results = manage_state(module, client) - - module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=["Tags"])) - - -if __name__ == "__main__": - main() diff --git a/batch_job_definition.py b/batch_job_definition.py deleted file mode 100644 index 9ea5dc8cefa..00000000000 --- a/batch_job_definition.py +++ /dev/null @@ -1,470 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Jon Meran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: batch_job_definition -version_added: 1.0.0 -short_description: Manage AWS Batch Job Definitions -description: - - This module allows the management of AWS Batch Job Definitions. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.batch_compute_environment) to manage the compute - environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions. - - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_job_definition). - The usage did not change. -author: - - Jon Meran (@jonmer85) -options: - job_definition_arn: - description: - - The ARN for the job definition. - type: str - job_definition_name: - description: - - The name for the job definition. - required: true - type: str - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - type: - description: - - The type of job definition. - required: true - type: str - parameters: - description: - - Default parameter substitution placeholders to set in the job definition. Parameters are specified as a - key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from - the job definition. - type: dict - image: - description: - - > - The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker - Hub registry are available by default. Other repositories are specified with C(repository-url/image-name:tag). - Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, - and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker - Remote API and the IMAGE parameter of docker run. - required: true - type: str - vcpus: - description: - - The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container - section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to - 1,024 CPU shares. - required: true - type: int - memory: - description: - - The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory - specified here, the container is killed. This parameter maps to Memory in the Create a container section of the - Docker Remote API and the --memory option to docker run. - required: true - type: int - command: - description: - - The command that is passed to the container. This parameter maps to Cmd in the Create a container section of - the Docker Remote API and the COMMAND parameter to docker run. For more information, - see U(https://docs.docker.com/engine/reference/builder/#cmd). - type: list - elements: str - default: [] - job_role_arn: - description: - - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. - type: str - volumes: - description: - - A list of data volumes used in a job. - suboptions: - host: - description: - - The contents of the host parameter determine whether your data volume persists on the host container - instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host - path for your data volume, but the data is not guaranteed to persist after the containers associated with - it stop running. - This is a dictionary with one property, sourcePath - The path on the host container - instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned - a host path for you. If the host parameter contains a sourcePath file location, then the data volume - persists at the specified location on the host container instance until you delete it manually. If the - sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the - location does exist, the contents of the source path folder are exported. - name: - description: - - The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are - allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints. - type: list - elements: dict - default: [] - environment: - description: - - The environment variables to pass to a container. This parameter maps to Env in the Create a container section - of the Docker Remote API and the --env option to docker run. - suboptions: - name: - description: - - The name of the key value pair. For environment variables, this is the name of the environment variable. - value: - description: - - The value of the key value pair. For environment variables, this is the value of the environment variable. - type: list - elements: dict - default: [] - mount_points: - description: - - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container - section of the Docker Remote API and the --volume option to docker run. - suboptions: - containerPath: - description: - - The path on the container at which to mount the host volume. - readOnly: - description: - - If this value is true , the container has read-only access to the volume; otherwise, the container can write - to the volume. The default value is C(false). - sourceVolume: - description: - - The name of the volume to mount. - type: list - elements: dict - default: [] - readonly_root_filesystem: - description: - - When this parameter is true, the container is given read-only access to its root file system. This parameter - maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option - to docker run. - type: str - privileged: - description: - - When this parameter is true, the container is given elevated privileges on the host container instance - (similar to the root user). This parameter maps to Privileged in the Create a container section of the - Docker Remote API and the --privileged option to docker run. - type: str - ulimits: - description: - - A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section - of the Docker Remote API and the --ulimit option to docker run. - suboptions: - hardLimit: - description: - - The hard limit for the ulimit type. - name: - description: - - The type of the ulimit. - softLimit: - description: - - The soft limit for the ulimit type. - type: list - elements: dict - default: [] - user: - description: - - The user name to use inside the container. This parameter maps to User in the Create a container section of - the Docker Remote API and the --user option to docker run. - type: str - attempts: - description: - - Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 - attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that - many times. - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" ---- -- name: My Batch Job Definition - community.aws.batch_job_definition: - job_definition_name: My Batch Job Definition - state: present - type: container - parameters: - Param1: Val1 - Param2: Val2 - image: - vcpus: 1 - memory: 512 - command: - - python - - run_my_script.py - - arg1 - job_role_arn: - attempts: 3 - register: job_definition_create_result - -- name: show results - ansible.builtin.debug: var=job_definition_create_result -""" - -RETURN = r""" ---- -output: - description: "returns what action was taken, whether something was changed, invocation and response" - returned: always - sample: - aws_batch_job_definition_action: none - changed: false - response: - job_definition_arn: "arn:aws:batch:...." - job_definition_name: - status: INACTIVE - type: container - type: dict -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.batch import cc -from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# --------------------------------------------------------------------------------------------------- -# -# Helper Functions & classes -# -# --------------------------------------------------------------------------------------------------- - -# logger = logging.getLogger() -# logging.basicConfig(filename='ansible_debug.log') -# logger.setLevel(logging.DEBUG) - - -def validate_params(module, batch_client): - """ - Performs basic parameter validation. - - :param module: - :param batch_client: - :return: - """ - return - - -# --------------------------------------------------------------------------------------------------- -# -# Batch Job Definition functions -# -# --------------------------------------------------------------------------------------------------- - - -def get_current_job_definition(module, batch_client): - try: - environments = batch_client.describe_job_definitions(jobDefinitionName=module.params["job_definition_name"]) - if len(environments["jobDefinitions"]) > 0: - latest_revision = max(map(lambda d: d["revision"], environments["jobDefinitions"])) - latest_definition = next( - (x for x in environments["jobDefinitions"] if x["revision"] == latest_revision), None - ) - return latest_definition - return None - except ClientError: - return None - - -def create_job_definition(module, batch_client): - """ - Adds a Batch job definition - - :param module: - :param batch_client: - :return: - """ - - changed = False - - # set API parameters - api_params = set_api_params(module, get_base_params()) - container_properties_params = set_api_params(module, get_container_property_params()) - retry_strategy_params = set_api_params(module, get_retry_strategy_params()) - - api_params["retryStrategy"] = retry_strategy_params - api_params["containerProperties"] = container_properties_params - - try: - if not module.check_mode: - batch_client.register_job_definition(**api_params) - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Error registering job definition") - - return changed - - -def get_retry_strategy_params(): - return ("attempts",) - - -def get_container_property_params(): - return ( - "image", - "vcpus", - "memory", - "command", - "job_role_arn", - "volumes", - "environment", - "mount_points", - "readonly_root_filesystem", - "privileged", - "ulimits", - "user", - ) - - -def get_base_params(): - return "job_definition_name", "type", "parameters" - - -def get_compute_environment_order_list(module): - compute_environment_order_list = [] - for ceo in module.params["compute_environment_order"]: - compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) - return compute_environment_order_list - - -def remove_job_definition(module, batch_client): - """ - Remove a Batch job definition - - :param module: - :param batch_client: - :return: - """ - - changed = False - - try: - if not module.check_mode: - batch_client.deregister_job_definition(jobDefinition=module.params["job_definition_arn"]) - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Error removing job definition") - return changed - - -def job_definition_equal(module, current_definition): - equal = True - - for param in get_base_params(): - if module.params.get(param) != current_definition.get(cc(param)): - equal = False - break - - for param in get_container_property_params(): - if module.params.get(param) != current_definition.get("containerProperties").get(cc(param)): - equal = False - break - - for param in get_retry_strategy_params(): - if module.params.get(param) != current_definition.get("retryStrategy").get(cc(param)): - equal = False - break - - return equal - - -def manage_state(module, batch_client): - changed = False - current_state = "absent" - state = module.params["state"] - job_definition_name = module.params["job_definition_name"] - action_taken = "none" - response = None - - check_mode = module.check_mode - - # check if the job definition exists - current_job_definition = get_current_job_definition(module, batch_client) - if current_job_definition: - current_state = "present" - - if state == "present": - if current_state == "present": - # check if definition has changed and register a new version if necessary - if not job_definition_equal(module, current_job_definition): - create_job_definition(module, batch_client) - action_taken = "updated with new version" - changed = True - else: - # Create Job definition - changed = create_job_definition(module, batch_client) - action_taken = "added" - - response = get_current_job_definition(module, batch_client) - if not response: - module.fail_json(msg="Unable to get job definition information after creating/updating") - else: - if current_state == "present": - # remove the Job definition - changed = remove_job_definition(module, batch_client) - action_taken = "deregistered" - return dict(changed=changed, batch_job_definition_action=action_taken, response=response) - - -# --------------------------------------------------------------------------------------------------- -# -# MAIN -# -# --------------------------------------------------------------------------------------------------- - - -def main(): - """ - Main entry point. - - :return dict: ansible facts - """ - - argument_spec = dict( - state=dict(required=False, default="present", choices=["present", "absent"]), - job_definition_name=dict(required=True), - job_definition_arn=dict(), - type=dict(required=True), - parameters=dict(type="dict"), - image=dict(required=True), - vcpus=dict(type="int", required=True), - memory=dict(type="int", required=True), - command=dict(type="list", default=[], elements="str"), - job_role_arn=dict(), - volumes=dict(type="list", default=[], elements="dict"), - environment=dict(type="list", default=[], elements="dict"), - mount_points=dict(type="list", default=[], elements="dict"), - readonly_root_filesystem=dict(), - privileged=dict(), - ulimits=dict(type="list", default=[], elements="dict"), - user=dict(), - attempts=dict(type="int"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - batch_client = module.client("batch") - - validate_params(module, batch_client) - - results = manage_state(module, batch_client) - - module.exit_json(**camel_dict_to_snake_dict(results)) - - -if __name__ == "__main__": - main() diff --git a/batch_job_queue.py b/batch_job_queue.py deleted file mode 100644 index c9e253d0652..00000000000 --- a/batch_job_queue.py +++ /dev/null @@ -1,306 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Jon Meran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: batch_job_queue -version_added: 1.0.0 -short_description: Manage AWS Batch Job Queues -description: - - This module allows the management of AWS Batch Job Queues. - - It is idempotent and supports "Check" mode. - - Use module M(community.aws.batch_compute_environment) to manage the compute - environment, M(community.aws.batch_job_queue) to manage job queues, M(community.aws.batch_job_definition) to manage job definitions. - - Prior to release 5.0.0 this module was called C(community.aws.aws_batch_job_queue). - The usage did not change. -author: - - Jon Meran (@jonmer85) -options: - job_queue_name: - description: - - The name for the job queue. - required: true - type: str - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - job_queue_state: - description: - - The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs. - default: "ENABLED" - choices: ["ENABLED", "DISABLED"] - type: str - priority: - description: - - The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority - parameter) are evaluated first when associated with same compute environment. Priority is determined in - ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job - queue with a priority value of 10. - required: true - type: int - compute_environment_order: - description: - - The set of compute environments mapped to a job queue and their order relative to each other. The job - scheduler uses this parameter to determine which compute environment should execute a given job. Compute - environments must be in the VALID state before you can associate them with a job queue. You can associate up to - 3 compute environments with a job queue. - required: true - type: list - elements: dict - suboptions: - order: - type: int - description: The relative priority of the environment. - compute_environment: - type: str - description: The name of the compute environment. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: My Batch Job Queue - community.aws.batch_job_queue: - job_queue_name: jobQueueName - state: present - region: us-east-1 - job_queue_state: ENABLED - priority: 1 - compute_environment_order: - - order: 1 - compute_environment: my_compute_env1 - - order: 2 - compute_environment: my_compute_env2 - register: batch_job_queue_action - -- name: show results - ansible.builtin.debug: - var: batch_job_queue_action -""" - -RETURN = r""" ---- -output: - description: "returns what action was taken, whether something was changed, invocation and response" - returned: always - sample: - batch_job_queue_action: updated - changed: false - response: - job_queue_arn: "arn:aws:batch:...." - job_queue_name: - priority: 1 - state: DISABLED - status: UPDATING - status_reason: "JobQueue Healthy" - type: dict -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - -# --------------------------------------------------------------------------------------------------- -# -# Helper Functions & classes -# -# --------------------------------------------------------------------------------------------------- - - -def validate_params(module): - """ - Performs basic parameter validation. - - :param module: - """ - return - - -# --------------------------------------------------------------------------------------------------- -# -# Batch Job Queue functions -# -# --------------------------------------------------------------------------------------------------- - - -def get_current_job_queue(module, client): - try: - environments = client.describe_job_queues(jobQueues=[module.params["job_queue_name"]]) - return environments["jobQueues"][0] if len(environments["jobQueues"]) > 0 else None - except ClientError: - return None - - -def create_job_queue(module, client): - """ - Adds a Batch job queue - - :param module: - :param client: - :return: - """ - - changed = False - - # set API parameters - params = ("job_queue_name", "priority") - api_params = set_api_params(module, params) - - if module.params["job_queue_state"] is not None: - api_params["state"] = module.params["job_queue_state"] - - api_params["computeEnvironmentOrder"] = get_compute_environment_order_list(module) - - try: - if not module.check_mode: - client.create_job_queue(**api_params) - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Error creating compute environment") - - return changed - - -def get_compute_environment_order_list(module): - compute_environment_order_list = [] - for ceo in module.params["compute_environment_order"]: - compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"])) - return compute_environment_order_list - - -def remove_job_queue(module, client): - """ - Remove a Batch job queue - - :param module: - :param client: - :return: - """ - - changed = False - - # set API parameters - api_params = {"jobQueue": module.params["job_queue_name"]} - - try: - if not module.check_mode: - client.delete_job_queue(**api_params) - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Error removing job queue") - return changed - - -def manage_state(module, client): - changed = False - current_state = "absent" - state = module.params["state"] - job_queue_state = module.params["job_queue_state"] - job_queue_name = module.params["job_queue_name"] - priority = module.params["priority"] - action_taken = "none" - response = None - - check_mode = module.check_mode - - # check if the job queue exists - current_job_queue = get_current_job_queue(module, client) - if current_job_queue: - current_state = "present" - - if state == "present": - if current_state == "present": - updates = False - # Update Batch Job Queue configuration - job_kwargs = {"jobQueue": job_queue_name} - - # Update configuration if needed - if job_queue_state and current_job_queue["state"] != job_queue_state: - job_kwargs.update({"state": job_queue_state}) - updates = True - if priority is not None and current_job_queue["priority"] != priority: - job_kwargs.update({"priority": priority}) - updates = True - - new_compute_environment_order_list = get_compute_environment_order_list(module) - if new_compute_environment_order_list != current_job_queue["computeEnvironmentOrder"]: - job_kwargs["computeEnvironmentOrder"] = new_compute_environment_order_list - updates = True - - if updates: - try: - if not check_mode: - client.update_job_queue(**job_kwargs) - changed = True - action_taken = "updated" - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to update job queue") - - else: - # Create Job Queue - changed = create_job_queue(module, client) - action_taken = "added" - - # Describe job queue - response = get_current_job_queue(module, client) - if not response: - module.fail_json(msg="Unable to get job queue information after creating/updating") - else: - if current_state == "present": - # remove the Job Queue - changed = remove_job_queue(module, client) - action_taken = "deleted" - return dict(changed=changed, batch_job_queue_action=action_taken, response=response) - - -# --------------------------------------------------------------------------------------------------- -# -# MAIN -# -# --------------------------------------------------------------------------------------------------- - - -def main(): - """ - Main entry point. - - :return dict: changed, batch_job_queue_action, response - """ - - argument_spec = dict( - state=dict(required=False, default="present", choices=["present", "absent"]), - job_queue_name=dict(required=True), - job_queue_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]), - priority=dict(type="int", required=True), - compute_environment_order=dict(type="list", required=True, elements="dict"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - client = module.client("batch") - - validate_params(module) - - results = manage_state(module, client) - - module.exit_json(**camel_dict_to_snake_dict(results)) - - -if __name__ == "__main__": - main() diff --git a/cloudformation_exports_info.py b/cloudformation_exports_info.py deleted file mode 100644 index cf769606d47..00000000000 --- a/cloudformation_exports_info.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: cloudformation_exports_info -short_description: Read a value from CloudFormation Exports -version_added: 1.0.0 -description: - - Module retrieves a value from CloudFormation Exports -author: - - "Michael Moyle (@mmoyle)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. -- name: Get Exports - community.aws.cloudformation_exports_info: {} - register: cf_exports -- ansible.builtin.debug: - msg: "{{ cf_exports }}" -""" - -RETURN = r""" -export_items: - description: A dictionary of Exports items names and values. - returned: Always - type: dict -""" - -try: - from botocore.exceptions import ClientError - from botocore.exceptions import BotoCoreError -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.exponential_backoff() -def list_exports(cloudformation_client): - """Get Exports Names and Values and return in dictionary""" - list_exports_paginator = cloudformation_client.get_paginator("list_exports") - exports = list_exports_paginator.paginate().build_full_result()["Exports"] - export_items = dict() - - for item in exports: - export_items[item["Name"]] = item["Value"] - - return export_items - - -def main(): - argument_spec = dict() - result = dict(changed=False, original_message="") - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - cloudformation_client = module.client("cloudformation") - - try: - result["export_items"] = list_exports(cloudformation_client) - - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e) - - result.update() - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/cloudformation_stack_set.py b/cloudformation_stack_set.py deleted file mode 100644 index 17e888b4f1b..00000000000 --- a/cloudformation_stack_set.py +++ /dev/null @@ -1,787 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: cloudformation_stack_set -version_added: 1.0.0 -short_description: Manage groups of CloudFormation stacks -description: - - Launches/updates/deletes AWS CloudFormation Stack Sets. -notes: - - To make an individual stack, you want the M(amazon.aws.cloudformation) module. -options: - name: - description: - - Name of the CloudFormation stack set. - required: true - type: str - description: - description: - - A description of what this stack set creates. - type: str - parameters: - description: - - A list of hashes of all the template variables for the stack. The value can be a string or a dict. - - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example). - default: {} - type: dict - state: - description: - - If I(state=present), stack will be created. If I(state=present) and if stack exists and template has changed, it will be updated. - If I(state=absent), stack will be removed. - default: present - choices: [ present, absent ] - type: str - template: - description: - - The local path of the CloudFormation template. - - This must be the full path to the file, relative to the working directory. If using roles this may look - like C(roles/cloudformation/files/cloudformation-example.json). - - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) - must be specified (but only one of them). - - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) - are specified, the previous template will be reused. - type: path - template_body: - description: - - Template body. Use this to pass in the actual body of the CloudFormation template. - - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) - must be specified (but only one of them). - - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) - are specified, the previous template will be reused. - type: str - template_url: - description: - - Location of file containing the template body. - - The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region - as the stack. - - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) - must be specified (but only one of them). - - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) - are specified, the previous template will be reused. - type: str - purge_stacks: - description: - - Only applicable when I(state=absent). Sets whether, when deleting a stack set, the stack instances should also be deleted. - - By default, instances will be deleted. To keep stacks when stack set is deleted set I(purge_stacks=false). - type: bool - default: true - wait: - description: - - Whether or not to wait for stack operation to complete. This includes waiting for stack instances to reach UPDATE_COMPLETE status. - - If you choose not to wait, this module will not notify when stack operations fail because it will not wait for them to finish. - type: bool - default: false - wait_timeout: - description: - - How long to wait (in seconds) for stacks to complete create/update/delete operations. - default: 900 - type: int - capabilities: - description: - - Capabilities allow stacks to create and modify IAM resources, which may include adding users or roles. - - Currently the only available values are 'CAPABILITY_IAM' and 'CAPABILITY_NAMED_IAM'. Either or both may be provided. - - > - The following resources require that one or both of these parameters is specified: AWS::IAM::AccessKey, - AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, AWS::IAM::UserToGroupAddition - type: list - elements: str - choices: - - 'CAPABILITY_IAM' - - 'CAPABILITY_NAMED_IAM' - regions: - description: - - A list of AWS regions to create instances of a stack in. The I(region) parameter chooses where the Stack Set is created, and I(regions) - specifies the region for stack instances. - - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will - have their stack instances updated. - type: list - elements: str - accounts: - description: - - A list of AWS accounts in which to create instance of CloudFormation stacks. - - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will - have their stack instances updated. - type: list - elements: str - administration_role_arn: - description: - - ARN of the administration role, meaning the role that CloudFormation Stack Sets use to assume the roles in your child accounts. - - This defaults to C(arn:aws:iam::{{ account ID }}:role/AWSCloudFormationStackSetAdministrationRole) where C({{ account ID }}) is replaced with the - account number of the current IAM role/user/STS credentials. - aliases: - - admin_role_arn - - admin_role - - administration_role - type: str - execution_role_name: - description: - - ARN of the execution role, meaning the role that CloudFormation Stack Sets assumes in your child accounts. - - This MUST NOT be an ARN, and the roles must exist in each child account specified. - - The default name for the execution role is C(AWSCloudFormationStackSetExecutionRole) - aliases: - - exec_role_name - - exec_role - - execution_role - type: str - tags: - description: - - Dictionary of tags to associate with stack and its resources during stack creation. - - Can be updated later, updating tags removes previous entries. - type: dict - failure_tolerance: - description: - - Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time. - type: dict - default: {} - suboptions: - fail_count: - description: - - The number of accounts, per region, for which this operation can fail before CloudFormation - stops the operation in that region. - - You must specify one of I(fail_count) and I(fail_percentage). - type: int - fail_percentage: - type: int - description: - - The percentage of accounts, per region, for which this stack operation can fail before CloudFormation - stops the operation in that region. - - You must specify one of I(fail_count) and I(fail_percentage). - parallel_percentage: - type: int - description: - - The maximum percentage of accounts in which to perform this operation at one time. - - You must specify one of I(parallel_count) and I(parallel_percentage). - - Note that this setting lets you specify the maximum for operations. - For large deployments, under certain circumstances the actual percentage may be lower. - parallel_count: - type: int - description: - - The maximum number of accounts in which to perform this operation at one time. - - I(parallel_count) may be at most one more than the I(fail_count). - - You must specify one of I(parallel_count) and I(parallel_percentage). - - Note that this setting lets you specify the maximum for operations. - For large deployments, under certain circumstances the actual count may be lower. - -author: - - "Ryan Scott Brown (@ryansb)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create a stack set with instances in two accounts - community.aws.cloudformation_stack_set: - name: my-stack - description: Test stack in two accounts - state: present - template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template - accounts: - - 123456789012 - - 234567890123 - regions: - - us-east-1 - -- name: on subsequent calls, templates are optional but parameters and tags can be altered - community.aws.cloudformation_stack_set: - name: my-stack - state: present - parameters: - InstanceName: my_stacked_instance - tags: - foo: bar - test: stack - accounts: - - 123456789012 - - 234567890123 - regions: - - us-east-1 - -- name: The same type of update, but wait for the update to complete in all stacks - community.aws.cloudformation_stack_set: - name: my-stack - state: present - wait: true - parameters: - InstanceName: my_restacked_instance - tags: - foo: bar - test: stack - accounts: - - 123456789012 - - 234567890123 - regions: - - us-east-1 - -- name: Register new accounts (create new stack instances) with an existing stack set. - community.aws.cloudformation_stack_set: - name: my-stack - state: present - wait: true - parameters: - InstanceName: my_restacked_instance - tags: - foo: bar - test: stack - accounts: - - 123456789012 - - 234567890123 - - 345678901234 - regions: - - us-east-1 -""" - -RETURN = r""" -operations_log: - type: list - description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. - returned: always - sample: - - action: CREATE - creation_timestamp: '2018-06-18T17:40:46.372000+00:00' - end_timestamp: '2018-06-18T17:41:24.560000+00:00' - operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8 - status: FAILED - stack_instances: - - account: '1234567890' - region: us-east-1 - stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 - status: OUTDATED - status_reason: Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service. - -operations: - description: All operations initiated by this run of the cloudformation_stack_set module - returned: always - type: list - sample: - - action: CREATE - administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole - creation_timestamp: '2018-06-18T17:40:46.372000+00:00' - end_timestamp: '2018-06-18T17:41:24.560000+00:00' - execution_role_name: AWSCloudFormationStackSetExecutionRole - operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8 - operation_preferences: - region_order: - - us-east-1 - - us-east-2 - stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 - status: FAILED -stack_instances: - description: CloudFormation stack instances that are members of this stack set. This will also include their region and account ID. - returned: state == present - type: list - sample: - - account: '1234567890' - region: us-east-1 - stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 - status: OUTDATED - status_reason: > - Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service. - - account: '1234567890' - region: us-east-2 - stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 - status: OUTDATED - status_reason: Cancelled since failure tolerance has exceeded -stack_set: - type: dict - description: Facts about the currently deployed stack set, its parameters, and its tags - returned: state == present - sample: - administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole - capabilities: [] - description: test stack PRIME - execution_role_name: AWSCloudFormationStackSetExecutionRole - parameters: [] - stack_set_arn: arn:aws:cloudformation:us-east-1:1234567890:stackset/TestStackPrime:19f3f684-aae9-467-ba36-e09f92cf5929 - stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929 - stack_set_name: TestStackPrime - status: ACTIVE - tags: - Some: Thing - an: other - template_body: | - AWSTemplateFormatVersion: "2010-09-09" - Parameters: {} - Resources: - Bukkit: - Type: "AWS::S3::Bucket" - Properties: {} - other: - Type: "AWS::SNS::Topic" - Properties: {} - -""" - -import datetime -import itertools -import time -import uuid - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - # handled by AnsibleAWSModule - pass - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create_stack_set(module, stack_params, cfn): - try: - cfn.create_stack_set(aws_retry=True, **stack_params) - return await_stack_set_exists(cfn, stack_params["StackSetName"]) - except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg=f"Failed to create stack set {stack_params.get('StackSetName')}.") - - -def update_stack_set(module, stack_params, cfn): - # if the state is present and the stack already exists, we try to update it. - # AWS will tell us if the stack template and parameters are the same and - # don't need to be updated. - try: - cfn.update_stack_set(**stack_params) - except is_boto3_error_code("StackSetNotFound") as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.") - except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except - module.fail_json_aws( - err, - msg=( - "One or more stack instances were not found for this stack set. Double check " - "the `accounts` and `regions` parameters." - ), - ) - except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except - module.fail_json_aws( - err, - msg=( - "Another operation is already in progress on this stack set - please try again later. When making" - " multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op" - " errors." - ), - ) - except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, msg="Could not update stack set.") - if module.params.get("wait"): - await_stack_set_operation( - module, - cfn, - operation_id=stack_params["OperationId"], - stack_set_name=stack_params["StackSetName"], - max_wait=module.params.get("wait_timeout"), - ) - - return True - - -def compare_stack_instances(cfn, stack_set_name, accounts, regions): - instance_list = cfn.list_stack_instances( - aws_retry=True, - StackSetName=stack_set_name, - )["Summaries"] - desired_stack_instances = set(itertools.product(accounts, regions)) - existing_stack_instances = set((i["Account"], i["Region"]) for i in instance_list) - # new stacks, existing stacks, unspecified stacks - return ( - (desired_stack_instances - existing_stack_instances), - existing_stack_instances, - (existing_stack_instances - desired_stack_instances), - ) - - -@AWSRetry.jittered_backoff(retries=3, delay=4) -def stack_set_facts(cfn, stack_set_name): - try: - ss = cfn.describe_stack_set(StackSetName=stack_set_name)["StackSet"] - ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) - return ss - except cfn.exceptions.from_code("StackSetNotFound"): - # Return None if the stack doesn't exist - return - - -def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait): - wait_start = datetime.datetime.now() - operation = None - for i in range(max_wait // 15): - try: - operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id) - if operation["StackSetOperation"]["Status"] not in ("RUNNING", "STOPPING"): - # Stack set has completed operation - break - except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except - pass - except is_boto3_error_code("OperationNotFound"): # pylint: disable=duplicate-except - pass - time.sleep(15) - - if operation and operation["StackSetOperation"]["Status"] not in ("FAILED", "STOPPED"): - await_stack_instance_completion( - module, - cfn, - stack_set_name=stack_set_name, - # subtract however long we waited already - max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), - ) - elif operation and operation["StackSetOperation"]["Status"] in ("FAILED", "STOPPED"): - pass - else: - module.warn( - f"Timed out waiting for operation {operation_id} on stack set {stack_set_name} after {max_wait} seconds." - " Returning unfinished operation" - ) - - -def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): - to_await = None - for i in range(max_wait // 15): - try: - stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name) - to_await = [inst for inst in stack_instances["Summaries"] if inst["Status"] != "CURRENT"] - if not to_await: - return stack_instances["Summaries"] - except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except - # this means the deletion beat us, or the stack set is not yet propagated - pass - time.sleep(15) - - module.warn( - f"Timed out waiting for stack set {stack_set_name} instances {', '.join(s['StackId'] for s in to_await)} to" - f" complete after {max_wait} seconds. Returning unfinished operation" - ) - - -def await_stack_set_exists(cfn, stack_set_name): - # AWSRetry will retry on `StackSetNotFound` errors for us - ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)["StackSet"] - ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"]) - return camel_dict_to_snake_dict(ss, ignore_list=("Tags",)) - - -def describe_stack_tree(module, stack_set_name, operation_ids=None): - jittered_backoff_decorator = AWSRetry.jittered_backoff( - retries=5, delay=3, max_delay=5, catch_extra_error_codes=["StackSetNotFound"] - ) - cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) - result = dict() - result["stack_set"] = camel_dict_to_snake_dict( - cfn.describe_stack_set( - StackSetName=stack_set_name, - aws_retry=True, - )["StackSet"] - ) - result["stack_set"]["tags"] = boto3_tag_list_to_ansible_dict(result["stack_set"]["tags"]) - result["operations_log"] = sorted( - camel_dict_to_snake_dict( - cfn.list_stack_set_operations( - StackSetName=stack_set_name, - aws_retry=True, - ) - )["summaries"], - key=lambda x: x["creation_timestamp"], - ) - result["stack_instances"] = sorted( - [camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances(StackSetName=stack_set_name)["Summaries"]], - key=lambda i: i["region"] + i["account"], - ) - - if operation_ids: - result["operations"] = [] - for op_id in operation_ids: - try: - result["operations"].append( - camel_dict_to_snake_dict( - cfn.describe_stack_set_operation( - StackSetName=stack_set_name, - OperationId=op_id, - )["StackSetOperation"] - ) - ) - except is_boto3_error_code("OperationNotFoundException"): # pylint: disable=duplicate-except - pass - return result - - -def get_operation_preferences(module): - params = dict() - if module.params.get("regions"): - params["RegionOrder"] = list(module.params["regions"]) - for param, api_name in { - "fail_count": "FailureToleranceCount", - "fail_percentage": "FailureTolerancePercentage", - "parallel_percentage": "MaxConcurrentPercentage", - "parallel_count": "MaxConcurrentCount", - }.items(): - if module.params.get("failure_tolerance", {}).get(param): - params[api_name] = module.params.get("failure_tolerance", {}).get(param) - return params - - -def main(): - argument_spec = dict( - name=dict(required=True), - description=dict(), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=900), - state=dict(default="present", choices=["present", "absent"]), - purge_stacks=dict(type="bool", default=True), - parameters=dict(type="dict", default={}), - template=dict(type="path"), - template_url=dict(), - template_body=dict(), - capabilities=dict(type="list", elements="str", choices=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]), - regions=dict(type="list", elements="str"), - accounts=dict(type="list", elements="str"), - failure_tolerance=dict( - type="dict", - default={}, - options=dict( - fail_count=dict(type="int"), - fail_percentage=dict(type="int"), - parallel_percentage=dict(type="int"), - parallel_count=dict(type="int"), - ), - mutually_exclusive=[ - ["fail_count", "fail_percentage"], - ["parallel_count", "parallel_percentage"], - ], - ), - administration_role_arn=dict(aliases=["admin_role_arn", "administration_role", "admin_role"]), - execution_role_name=dict(aliases=["execution_role", "exec_role", "exec_role_name"]), - tags=dict(type="dict"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[["template_url", "template", "template_body"]], - supports_check_mode=True, - ) - - # Wrap the cloudformation client methods that this module uses with - # automatic backoff / retry for throttling error codes - jittered_backoff_decorator = AWSRetry.jittered_backoff( - retries=10, delay=3, max_delay=30, catch_extra_error_codes=["StackSetNotFound"] - ) - cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator) - existing_stack_set = stack_set_facts(cfn, module.params["name"]) - - operation_uuid = to_native(uuid.uuid4()) - operation_ids = [] - # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. - stack_params = {} - state = module.params["state"] - if state == "present" and not module.params["accounts"]: - module.fail_json( - msg=( - "Can't create a stack set without choosing at least one account. " - "To get the ID of the current account, use the aws_caller_info module." - ) - ) - - module.params["accounts"] = [to_native(a) for a in module.params["accounts"]] - - stack_params["StackSetName"] = module.params["name"] - if module.params.get("description"): - stack_params["Description"] = module.params["description"] - - if module.params.get("capabilities"): - stack_params["Capabilities"] = module.params["capabilities"] - - if module.params["template"] is not None: - with open(module.params["template"], "r") as tpl: - stack_params["TemplateBody"] = tpl.read() - elif module.params["template_body"] is not None: - stack_params["TemplateBody"] = module.params["template_body"] - elif module.params["template_url"] is not None: - stack_params["TemplateURL"] = module.params["template_url"] - else: - # no template is provided, but if the stack set exists already, we can use the existing one. - if existing_stack_set: - stack_params["UsePreviousTemplate"] = True - else: - module.fail_json( - msg=( - f"The Stack Set {module.params['name']} does not exist, and no template was provided. Provide one" - " of `template`, `template_body`, or `template_url`" - ) - ) - - stack_params["Parameters"] = [] - for k, v in module.params["parameters"].items(): - if isinstance(v, dict): - # set parameter based on a dict to allow additional CFN Parameter Attributes - param = dict(ParameterKey=k) - - if "value" in v: - param["ParameterValue"] = to_native(v["value"]) - - if "use_previous_value" in v and bool(v["use_previous_value"]): - param["UsePreviousValue"] = True - param.pop("ParameterValue", None) - - stack_params["Parameters"].append(param) - else: - # allow default k/v configuration to set a template parameter - stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)}) - - if module.params.get("tags") and isinstance(module.params.get("tags"), dict): - stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"]) - - if module.params.get("administration_role_arn"): - # TODO loosen the semantics here to autodetect the account ID and build the ARN - stack_params["AdministrationRoleARN"] = module.params["administration_role_arn"] - if module.params.get("execution_role_name"): - stack_params["ExecutionRoleName"] = module.params["execution_role_name"] - - result = {} - - if module.check_mode: - if state == "absent" and existing_stack_set: - module.exit_json(changed=True, msg="Stack set would be deleted", meta=[]) - elif state == "absent" and not existing_stack_set: - module.exit_json(changed=False, msg="Stack set doesn't exist", meta=[]) - elif state == "present" and not existing_stack_set: - module.exit_json(changed=True, msg="New stack set would be created", meta=[]) - elif state == "present" and existing_stack_set: - new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( - cfn, - module.params["name"], - module.params["accounts"], - module.params["regions"], - ) - if new_stacks: - module.exit_json(changed=True, msg="New stack instance(s) would be created", meta=[]) - elif unspecified_stacks and module.params.get("purge_stack_instances"): - module.exit_json(changed=True, msg="Old stack instance(s) would be deleted", meta=[]) - else: - # TODO: need to check the template and other settings for correct check mode - module.exit_json(changed=False, msg="No changes detected", meta=[]) - - changed = False - if state == "present": - if not existing_stack_set: - # on create this parameter has a different name, and cannot be referenced later in the job log - stack_params["ClientRequestToken"] = f"Ansible-StackSet-Create-{operation_uuid}" - changed = True - create_stack_set(module, stack_params, cfn) - else: - stack_params["OperationId"] = f"Ansible-StackSet-Update-{operation_uuid}" - operation_ids.append(stack_params["OperationId"]) - if module.params.get("regions"): - stack_params["OperationPreferences"] = get_operation_preferences(module) - changed |= update_stack_set(module, stack_params, cfn) - - await_stack_set_operation( - module, - cfn, - operation_id=stack_params["OperationId"], - stack_set_name=stack_params["StackSetName"], - max_wait=module.params.get("wait_timeout"), - ) - - # now create/update any appropriate stack instances - new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( - cfn, - module.params["name"], - module.params["accounts"], - module.params["regions"], - ) - if new_stack_instances: - operation_ids.append(f"Ansible-StackInstance-Create-{operation_uuid}") - changed = True - cfn.create_stack_instances( - StackSetName=module.params["name"], - Accounts=list(set(acct for acct, region in new_stack_instances)), - Regions=list(set(region for acct, region in new_stack_instances)), - OperationPreferences=get_operation_preferences(module), - OperationId=operation_ids[-1], - ) - else: - operation_ids.append(f"Ansible-StackInstance-Update-{operation_uuid}") - cfn.update_stack_instances( - StackSetName=module.params["name"], - Accounts=list(set(acct for acct, region in existing_stack_instances)), - Regions=list(set(region for acct, region in existing_stack_instances)), - OperationPreferences=get_operation_preferences(module), - OperationId=operation_ids[-1], - ) - for op in operation_ids: - await_stack_set_operation( - module, - cfn, - operation_id=op, - stack_set_name=module.params["name"], - max_wait=module.params.get("wait_timeout"), - ) - - elif state == "absent": - if not existing_stack_set: - module.exit_json(msg=f"Stack set {module.params['name']} does not exist") - if module.params.get("purge_stack_instances") is False: - pass - try: - cfn.delete_stack_set( - StackSetName=module.params["name"], - ) - module.exit_json(msg=f"Stack set {module.params['name']} deleted") - except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, msg=f"Cannot delete stack {module.params['name']} while there is an operation in progress" - ) - except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except - delete_instances_op = f"Ansible-StackInstance-Delete-{operation_uuid}" - cfn.delete_stack_instances( - StackSetName=module.params["name"], - Accounts=module.params["accounts"], - Regions=module.params["regions"], - RetainStacks=(not module.params.get("purge_stacks")), - OperationId=delete_instances_op, - ) - await_stack_set_operation( - module, - cfn, - operation_id=delete_instances_op, - stack_set_name=stack_params["StackSetName"], - max_wait=module.params.get("wait_timeout"), - ) - try: - cfn.delete_stack_set( - StackSetName=module.params["name"], - ) - except is_boto3_error_code("StackSetNotEmptyException") as exc: # pylint: disable=duplicate-except - # this time, it is likely that either the delete failed or there are more stacks. - instances = cfn.list_stack_instances( - StackSetName=module.params["name"], - ) - stack_states = ", ".join( - "(account={Account}, region={Region}, state={Status})".format(**i) for i in instances["Summaries"] - ) - module.fail_json_aws( - exc, - msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: " - + stack_states, - ) - module.exit_json(changed=True, msg=f"Stack set {module.params['name']} deleted") - - result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids)) - if any(o["status"] == "FAILED" for o in result["operations"]): - module.fail_json(msg="One or more operations failed to execute", **result) - module.exit_json(changed=changed, **result) - - -if __name__ == "__main__": - main() diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py deleted file mode 100644 index 37fd914dbb4..00000000000 --- a/cloudfront_distribution.py +++ /dev/null @@ -1,2530 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- - -version_added: 1.0.0 -module: cloudfront_distribution - -short_description: Create, update and delete AWS CloudFront distributions - -description: - - Allows for easy creation, updating and deletion of CloudFront distributions. - -author: - - Willem van Ketwich (@wilvk) - - Will Thames (@willthames) - -options: - - state: - description: - - The desired state of the distribution. - - I(state=present) creates a new distribution or updates an existing distribution. - - I(state=absent) deletes an existing distribution. - choices: ['present', 'absent'] - default: 'present' - type: str - - distribution_id: - description: - - The ID of the CloudFront distribution. - - This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag). - type: str - - e_tag: - description: - - A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id). - - Is determined automatically if not specified. - type: str - - caller_reference: - description: - - A unique identifier for creating and updating CloudFront distributions. - - Each caller reference must be unique across all distributions. e.g. a caller reference used in a web - distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id) - to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format - C(YYYY-MM-DDTHH:MM:SS.ffffff). - type: str - - alias: - description: - - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only - be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as - the I(e_tag), or I(caller_reference) of an existing distribution. - type: str - - aliases: - description: - - A list of domain name aliases (CNAMEs) as strings to be used for the distribution. - - Each alias must be unique across all distribution for the AWS account. - type: list - elements: str - default: [] - - purge_aliases: - description: - - Specifies whether existing aliases will be removed before adding new aliases. - - When I(purge_aliases=true), existing aliases are removed and I(aliases) are added. - default: false - type: bool - - default_root_object: - description: - - A config element that specifies the path to request when the user requests the origin. - - e.g. if specified as 'index.html', this maps to www.example.com/index.html when www.example.com is called by the user. - - This prevents the entire distribution origin from being exposed at the root. - type: str - - default_origin_domain_name: - description: - - The domain name to use for an origin if no I(origins) have been specified. - - Should only be used on a first run of generating a distribution and not on - subsequent runs. - - Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias). - type: str - - default_origin_path: - description: - - The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified. - type: str - - origins: - type: list - elements: dict - description: - - A config element that is a list of complex origin objects to be specified for the distribution. Used for creating and updating distributions. - suboptions: - id: - description: A unique identifier for the origin or origin group. I(id) must be unique within the distribution. - type: str - domain_name: - description: - - The domain name which CloudFront will query as the origin. - - For more information see the CloudFront documentation - at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName) - type: str - origin_path: - description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. - type: str - origin_shield: - description: Specify origin shield options for the origin. - type: dict - suboptions: - enabled: - description: Indicate whether you want the origin to have Origin Shield enabled or not. - type: bool - origin_shield_region: - description: Specify which AWS region will be used for Origin Shield. Required if Origin Shield is enabled. - type: str - version_added: 6.0.0 - custom_headers: - description: - - Custom headers you wish to add to the request before passing it to the origin. - - For more information see the CloudFront documentation - at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html). - type: list - elements: dict - suboptions: - header_name: - description: The name of a header that you want CloudFront to forward to your origin. - type: str - header_value: - description: The value for the header that you specified in the I(header_name) field. - type: str - s3_origin_access_identity_enabled: - description: - - Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. - - Will automatically create an Identity for you if no I(s3_origin_config) is specified. - - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html). - type: bool - s3_origin_config: - description: Specify origin access identity for S3 origins. - type: dict - suboptions: - origin_access_identity: - description: Existing origin access identity in the format C(origin-access-identity/cloudfront/OID_ID). - type: str - custom_origin_config: - description: Connection information about the origin. - type: dict - suboptions: - http_port: - description: The HTTP port the custom origin listens on. - type: int - https_port: - description: The HTTPS port the custom origin listens on. - type: int - origin_protocol_policy: - description: The origin protocol policy to apply to your origin. - type: str - origin_ssl_protocols: - description: A list of SSL/TLS protocols that you want CloudFront to use when communicating to the origin over HTTPS. - type: list - elements: str - origin_read_timeout: - description: A timeout (in seconds) when reading from your origin. - type: int - origin_keepalive_timeout: - description: A keep-alive timeout (in seconds). - type: int - connection_attempts: - description: The number of times that CloudFront attempts to connect to the origin. - The minimum number is C(1), the maximum is C(3). - type: int - default: 3 - version_added: 6.0.0 - connection_timeout: - description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. - The minimum timeout is C(1) second, the maximum is C(10) seconds. - type: int - default: 10 - version_added: 6.0.0 - purge_origins: - description: Whether to remove any origins that aren't listed in I(origins). - default: false - type: bool - - default_cache_behavior: - type: dict - description: - - A dict specifying the default cache behavior of the distribution. - - If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid - cache_behavior in I(cache_behaviors) with defaults. - suboptions: - target_origin_id: - description: - - The ID of the origin that you want CloudFront to route requests to - by default. - type: str - response_headers_policy_id: - description: - - The ID of the header policy that CloudFront adds to responses that it sends to viewers. - type: str - forwarded_values: - description: - - A dict that specifies how CloudFront handles query strings and cookies. - type: dict - suboptions: - query_string: - description: - - Indicates whether you want CloudFront to forward query strings - to the origin that is associated with this cache behavior. - type: bool - cookies: - description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. - type: dict - suboptions: - forward: - description: - - Specifies which cookies to forward to the origin for this cache behavior. - - Valid values are C(all), C(none), or C(whitelist). - type: str - whitelisted_names: - type: list - elements: str - description: A list of cookies to forward to the origin for this cache behavior. - headers: - description: - - A list of headers to forward to the origin for this cache behavior. - - To forward all headers use a list containing a single element '*' (C(['*'])) - type: list - elements: str - query_string_cache_keys: - description: - - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior. - type: list - elements: str - trusted_signers: - description: - - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content. - type: dict - suboptions: - enabled: - description: Whether you want to require viewers to use signed URLs to access the files specified by I(target_origin_id) - type: bool - items: - description: A list of trusted signers for this cache behavior. - elements: str - type: list - viewer_protocol_policy: - description: - - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id). - - Valid values are C(allow-all), C(redirect-to-https) and C(https-only). - type: str - default_ttl: - description: The default amount of time that you want objects to stay in CloudFront caches. - type: int - max_ttl: - description: The maximum amount of time that you want objects to stay in CloudFront caches. - type: int - min_ttl: - description: The minimum amount of time that you want objects to stay in CloudFront caches. - type: int - allowed_methods: - description: A dict that controls which HTTP methods CloudFront processes and forwards. - type: dict - suboptions: - items: - description: A list of HTTP methods that you want CloudFront to process and forward. - type: list - elements: str - cached_methods: - description: - - A list of HTTP methods that you want CloudFront to apply caching to. - - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]). - type: list - elements: str - smooth_streaming: - description: - - Whether you want to distribute media files in the Microsoft Smooth Streaming format. - type: bool - compress: - description: - - Whether you want CloudFront to automatically compress files. - type: bool - lambda_function_associations: - description: - - A list of Lambda function associations to use for this cache behavior. - type: list - elements: dict - suboptions: - lambda_function_arn: - description: The ARN of the Lambda function. - type: str - event_type: - description: - - Specifies the event type that triggers a Lambda function invocation. - - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response). - type: str - field_level_encryption_id: - description: - - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data. - type: str - - cache_behaviors: - type: list - elements: dict - description: - - A list of dictionaries describing the cache behaviors for the distribution. - - The order of the list is preserved across runs unless I(purge_cache_behaviors) is enabled. - suboptions: - path_pattern: - description: - - The pattern that specifies which requests to apply the behavior to. - type: str - target_origin_id: - description: - - The ID of the origin that you want CloudFront to route requests to - by default. - type: str - response_headers_policy_id: - description: - - The ID of the header policy that CloudFront adds to responses that it sends to viewers. - type: str - forwarded_values: - description: - - A dict that specifies how CloudFront handles query strings and cookies. - type: dict - suboptions: - query_string: - description: - - Indicates whether you want CloudFront to forward query strings - to the origin that is associated with this cache behavior. - type: bool - cookies: - description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. - type: dict - suboptions: - forward: - description: - - Specifies which cookies to forward to the origin for this cache behavior. - - Valid values are C(all), C(none), or C(whitelist). - type: str - whitelisted_names: - type: list - elements: str - description: A list of cookies to forward to the origin for this cache behavior. - headers: - description: - - A list of headers to forward to the origin for this cache behavior. - - To forward all headers use a list containing a single element '*' (C(['*'])) - type: list - elements: str - query_string_cache_keys: - description: - - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior. - type: list - elements: str - trusted_signers: - description: - - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content. - type: dict - suboptions: - enabled: - description: Whether you want to require viewers to use signed URLs to access the files specified by I(path_pattern) and I(target_origin_id) - type: bool - items: - description: A list of trusted signers for this cache behavior. - elements: str - type: list - viewer_protocol_policy: - description: - - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id) when a request matches I(path_pattern). - - Valid values are C(allow-all), C(redirect-to-https) and C(https-only). - type: str - default_ttl: - description: The default amount of time that you want objects to stay in CloudFront caches. - type: int - max_ttl: - description: The maximum amount of time that you want objects to stay in CloudFront caches. - type: int - min_ttl: - description: The minimum amount of time that you want objects to stay in CloudFront caches. - type: int - allowed_methods: - description: A dict that controls which HTTP methods CloudFront processes and forwards. - type: dict - suboptions: - items: - description: A list of HTTP methods that you want CloudFront to process and forward. - type: list - elements: str - cached_methods: - description: - - A list of HTTP methods that you want CloudFront to apply caching to. - - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]). - type: list - elements: str - smooth_streaming: - description: - - Whether you want to distribute media files in the Microsoft Smooth Streaming format. - type: bool - compress: - description: - - Whether you want CloudFront to automatically compress files. - type: bool - lambda_function_associations: - description: - - A list of Lambda function associations to use for this cache behavior. - type: list - elements: dict - suboptions: - lambda_function_arn: - description: The ARN of the Lambda function. - type: str - event_type: - description: - - Specifies the event type that triggers a Lambda function invocation. - - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response). - type: str - field_level_encryption_id: - description: - - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data. - type: str - - - purge_cache_behaviors: - description: - - Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). - - This switch also allows the reordering of I(cache_behaviors). - default: false - type: bool - - custom_error_responses: - type: list - elements: dict - description: - - A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. - - This attribute configures custom http error messages returned to the user. - suboptions: - error_code: - type: int - description: The error code the custom error page is for. - error_caching_min_ttl: - type: int - description: The length of time (in seconds) that CloudFront will cache status codes for. - response_code: - type: int - description: - - The HTTP status code that CloudFront should return to a user when the origin returns the HTTP status code specified by I(error_code). - response_page_path: - type: str - description: - - The path to the custom error page that you want CloudFront to return to a viewer when your origin returns - the HTTP status code specified by I(error_code). - - purge_custom_error_responses: - description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses). - default: false - type: bool - - comment: - description: - - A comment that describes the CloudFront distribution. - - If not specified, it defaults to a generic message that it has been created with Ansible, and a datetime stamp. - type: str - - logging: - description: - - A config element that is a complex object that defines logging for the distribution. - suboptions: - enabled: - description: When I(enabled=true) CloudFront will log access to an S3 bucket. - type: bool - include_cookies: - description: When I(include_cookies=true) CloudFront will include cookies in the logs. - type: bool - bucket: - description: The S3 bucket to store the log in. - type: str - prefix: - description: A prefix to include in the S3 object names. - type: str - type: dict - - price_class: - description: - - A string that specifies the pricing class of the distribution. As per - U(https://aws.amazon.com/cloudfront/pricing/) - - I(price_class=PriceClass_100) consists of the areas United States, Canada and Europe. - - I(price_class=PriceClass_200) consists of the areas United States, Canada, Europe, Japan, India, - Hong Kong, Philippines, S. Korea, Singapore & Taiwan. - - I(price_class=PriceClass_All) consists of the areas United States, Canada, Europe, Japan, India, - South America, Australia, Hong Kong, Philippines, S. Korea, Singapore & Taiwan. - - AWS defaults this to C(PriceClass_All). - - Valid values are C(PriceClass_100), C(PriceClass_200) and C(PriceClass_All) - type: str - - enabled: - description: - - A boolean value that specifies whether the distribution is enabled or disabled. - - Defaults to C(false). - type: bool - - viewer_certificate: - type: dict - description: - - A dict that specifies the encryption details of the distribution. - suboptions: - cloudfront_default_certificate: - type: bool - description: - - If you're using the CloudFront domain name for your distribution, such as C(123456789abcde.cloudfront.net) - you should set I(cloudfront_default_certificate=true). - - If I(cloudfront_default_certificate=true) do not set I(ssl_support_method). - iam_certificate_id: - type: str - description: - - The ID of a certificate stored in IAM to use for HTTPS connections. - - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method). - acm_certificate_arn: - type: str - description: - - The ID of a certificate stored in ACM to use for HTTPS connections. - - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method). - ssl_support_method: - type: str - description: - - How CloudFront should serve SSL certificates. - - Valid values are C(sni-only) for SNI, and C(vip) if CloudFront is configured to use a dedicated IP for your content. - minimum_protocol_version: - type: str - description: - - The security policy that you want CloudFront to use for HTTPS connections. - - See U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) - for supported security policies. - - restrictions: - type: dict - description: - - A config element that is a complex object that describes how a distribution should restrict it's content. - suboptions: - geo_restriction: - description: Apply a restriction based on the location of the requester. - type: dict - suboptions: - restriction_type: - type: str - description: - - The method that you want to use to restrict distribution of your content by country. - - Valid values are C(none), C(whitelist), C(blacklist). - items: - description: - - A list of ISO 3166-1 two letter (Alpha 2) country codes that the - restriction should apply to. - - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/).' - type: list - elements: str - - web_acl_id: - description: - - The ID of a Web Application Firewall (WAF) Access Control List (ACL). - type: str - - http_version: - description: - - The version of the http protocol to use for the distribution. - - AWS defaults this to C(http2). - - Valid values are C(http1.1), C(http2), C(http3) and C(http2and3). - type: str - - ipv6_enabled: - description: - - Determines whether IPv6 support is enabled or not. - - Defaults to C(false). - type: bool - - wait: - description: - - Specifies whether the module waits until the distribution has completed processing the creation or update. - type: bool - default: false - - wait_timeout: - description: - - Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. - default: 1800 - type: int - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: create a basic distribution with defaults and tags - community.aws.cloudfront_distribution: - state: present - default_origin_domain_name: www.my-cloudfront-origin.com - tags: - Name: example distribution - Project: example project - Priority: '1' - -- name: update a distribution comment by distribution_id - community.aws.cloudfront_distribution: - state: present - distribution_id: E1RP5A2MJ8073O - comment: modified by ansible cloudfront.py - -- name: update a distribution comment by caller_reference - community.aws.cloudfront_distribution: - state: present - caller_reference: my cloudfront distribution 001 - comment: modified by ansible cloudfront.py - -- name: update a distribution's aliases and comment using the distribution_id as a reference - community.aws.cloudfront_distribution: - state: present - distribution_id: E1RP5A2MJ8073O - comment: modified by cloudfront.py again - aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ] - -- name: update a distribution's aliases and comment using an alias as a reference - community.aws.cloudfront_distribution: - state: present - caller_reference: my test distribution - comment: modified by cloudfront.py again - aliases: - - www.my-distribution-source.com - - zzz.aaa.io - -- name: update a distribution's comment and aliases and tags and remove existing tags - community.aws.cloudfront_distribution: - state: present - distribution_id: E15BU8SDCGSG57 - comment: modified by cloudfront.py again - aliases: - - tested.com - tags: - Project: distribution 1.2 - purge_tags: true - -- name: create a distribution with an origin, logging and default cache behavior - community.aws.cloudfront_distribution: - state: present - caller_reference: unique test distribution ID - origins: - - id: 'my test origin-000111' - domain_name: www.example.com - origin_path: /production - custom_headers: - - header_name: MyCustomHeaderName - header_value: MyCustomHeaderValue - default_cache_behavior: - target_origin_id: 'my test origin-000111' - forwarded_values: - query_string: true - cookies: - forward: all - headers: - - '*' - viewer_protocol_policy: allow-all - smooth_streaming: true - compress: true - allowed_methods: - items: - - GET - - HEAD - cached_methods: - - GET - - HEAD - logging: - enabled: true - include_cookies: false - bucket: mylogbucket.s3.amazonaws.com - prefix: myprefix/ - enabled: false - comment: this is a CloudFront distribution with logging - -- name: delete a distribution - community.aws.cloudfront_distribution: - state: absent - caller_reference: replaceable distribution -""" - -RETURN = r""" -active_trusted_signers: - description: Key pair IDs that CloudFront is aware of for each trusted signer. - returned: always - type: complex - contains: - enabled: - description: Whether trusted signers are in use. - returned: always - type: bool - sample: false - quantity: - description: Number of trusted signers. - returned: always - type: int - sample: 1 - items: - description: Number of trusted signers. - returned: when there are trusted signers - type: list - sample: - - key_pair_id -aliases: - description: Aliases that refer to the distribution. - returned: always - type: complex - contains: - items: - description: List of aliases. - returned: always - type: list - sample: - - test.example.com - quantity: - description: Number of aliases. - returned: always - type: int - sample: 1 -arn: - description: Amazon Resource Name of the distribution. - returned: always - type: str - sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI -cache_behaviors: - description: CloudFront cache behaviors. - returned: always - type: complex - contains: - items: - description: List of cache behaviors. - returned: always - type: complex - contains: - allowed_methods: - description: Methods allowed by the cache behavior. - returned: always - type: complex - contains: - cached_methods: - description: Methods cached by the cache behavior. - returned: always - type: complex - contains: - items: - description: List of cached methods. - returned: always - type: list - sample: - - HEAD - - GET - quantity: - description: Count of cached methods. - returned: always - type: int - sample: 2 - items: - description: List of methods allowed by the cache behavior. - returned: always - type: list - sample: - - HEAD - - GET - quantity: - description: Count of methods allowed by the cache behavior. - returned: always - type: int - sample: 2 - compress: - description: Whether compression is turned on for the cache behavior. - returned: always - type: bool - sample: false - default_ttl: - description: Default Time to Live of the cache behavior. - returned: always - type: int - sample: 86400 - forwarded_values: - description: Values forwarded to the origin for this cache behavior. - returned: always - type: complex - contains: - cookies: - description: Cookies to forward to the origin. - returned: always - type: complex - contains: - forward: - description: Which cookies to forward to the origin for this cache behavior. - returned: always - type: str - sample: none - whitelisted_names: - description: The names of the cookies to forward to the origin for this cache behavior. - returned: when I(forward=whitelist) - type: complex - contains: - quantity: - description: Count of cookies to forward. - returned: always - type: int - sample: 1 - items: - description: List of cookies to forward. - returned: when list is not empty - type: list - sample: my_cookie - headers: - description: Which headers are used to vary on cache retrievals. - returned: always - type: complex - contains: - quantity: - description: Count of headers to vary on. - returned: always - type: int - sample: 1 - items: - description: List of headers to vary on. - returned: when list is not empty - type: list - sample: - - Host - query_string: - description: Whether the query string is used in cache lookups. - returned: always - type: bool - sample: false - query_string_cache_keys: - description: Which query string keys to use in cache lookups. - returned: always - type: complex - contains: - quantity: - description: Count of query string cache keys to use in cache lookups. - returned: always - type: int - sample: 1 - items: - description: List of query string cache keys to use in cache lookups. - returned: when list is not empty - type: list - sample: - lambda_function_associations: - description: Lambda function associations for a cache behavior. - returned: always - type: complex - contains: - quantity: - description: Count of lambda function associations. - returned: always - type: int - sample: 1 - items: - description: List of lambda function associations. - returned: when list is not empty - type: list - sample: - - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function - event_type: viewer-response - max_ttl: - description: Maximum Time to Live. - returned: always - type: int - sample: 31536000 - min_ttl: - description: Minimum Time to Live. - returned: always - type: int - sample: 0 - path_pattern: - description: Path pattern that determines this cache behavior. - returned: always - type: str - sample: /path/to/files/* - smooth_streaming: - description: Whether smooth streaming is enabled. - returned: always - type: bool - sample: false - target_origin_id: - description: ID of origin reference by this cache behavior. - returned: always - type: str - sample: origin_abcd - trusted_signers: - description: Trusted signers. - returned: always - type: complex - contains: - enabled: - description: Whether trusted signers are enabled for this cache behavior. - returned: always - type: bool - sample: false - quantity: - description: Count of trusted signers. - returned: always - type: int - sample: 1 - viewer_protocol_policy: - description: Policy of how to handle http/https. - returned: always - type: str - sample: redirect-to-https - quantity: - description: Count of cache behaviors. - returned: always - type: int - sample: 1 - -caller_reference: - description: Idempotency reference given when creating CloudFront distribution. - returned: always - type: str - sample: '1484796016700' -comment: - description: Any comments you want to include about the distribution. - returned: always - type: str - sample: 'my first CloudFront distribution' -custom_error_responses: - description: Custom error responses to use for error handling. - returned: always - type: complex - contains: - items: - description: List of custom error responses. - returned: always - type: complex - contains: - error_caching_min_ttl: - description: Minimum time to cache this error response. - returned: always - type: int - sample: 300 - error_code: - description: Origin response code that triggers this error response. - returned: always - type: int - sample: 500 - response_code: - description: Response code to return to the requester. - returned: always - type: str - sample: '500' - response_page_path: - description: Path that contains the error page to display. - returned: always - type: str - sample: /errors/5xx.html - quantity: - description: Count of custom error response items - returned: always - type: int - sample: 1 -default_cache_behavior: - description: Default cache behavior. - returned: always - type: complex - contains: - allowed_methods: - description: Methods allowed by the cache behavior. - returned: always - type: complex - contains: - cached_methods: - description: Methods cached by the cache behavior. - returned: always - type: complex - contains: - items: - description: List of cached methods. - returned: always - type: list - sample: - - HEAD - - GET - quantity: - description: Count of cached methods. - returned: always - type: int - sample: 2 - items: - description: List of methods allowed by the cache behavior. - returned: always - type: list - sample: - - HEAD - - GET - quantity: - description: Count of methods allowed by the cache behavior. - returned: always - type: int - sample: 2 - compress: - description: Whether compression is turned on for the cache behavior. - returned: always - type: bool - sample: false - default_ttl: - description: Default Time to Live of the cache behavior. - returned: always - type: int - sample: 86400 - forwarded_values: - description: Values forwarded to the origin for this cache behavior. - returned: always - type: complex - contains: - cookies: - description: Cookies to forward to the origin. - returned: always - type: complex - contains: - forward: - description: Which cookies to forward to the origin for this cache behavior. - returned: always - type: str - sample: none - whitelisted_names: - description: The names of the cookies to forward to the origin for this cache behavior. - returned: when I(forward=whitelist) - type: complex - contains: - quantity: - description: Count of cookies to forward. - returned: always - type: int - sample: 1 - items: - description: List of cookies to forward. - returned: when list is not empty - type: list - sample: my_cookie - headers: - description: Which headers are used to vary on cache retrievals. - returned: always - type: complex - contains: - quantity: - description: Count of headers to vary on. - returned: always - type: int - sample: 1 - items: - description: List of headers to vary on. - returned: when list is not empty - type: list - sample: - - Host - query_string: - description: Whether the query string is used in cache lookups. - returned: always - type: bool - sample: false - query_string_cache_keys: - description: Which query string keys to use in cache lookups. - returned: always - type: complex - contains: - quantity: - description: Count of query string cache keys to use in cache lookups. - returned: always - type: int - sample: 1 - items: - description: List of query string cache keys to use in cache lookups. - returned: when list is not empty - type: list - sample: - lambda_function_associations: - description: Lambda function associations for a cache behavior. - returned: always - type: complex - contains: - quantity: - description: Count of lambda function associations. - returned: always - type: int - sample: 1 - items: - description: List of lambda function associations. - returned: when list is not empty - type: list - sample: - - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function - event_type: viewer-response - max_ttl: - description: Maximum Time to Live. - returned: always - type: int - sample: 31536000 - min_ttl: - description: Minimum Time to Live. - returned: always - type: int - sample: 0 - path_pattern: - description: Path pattern that determines this cache behavior. - returned: always - type: str - sample: /path/to/files/* - smooth_streaming: - description: Whether smooth streaming is enabled. - returned: always - type: bool - sample: false - target_origin_id: - description: ID of origin reference by this cache behavior. - returned: always - type: str - sample: origin_abcd - trusted_signers: - description: Trusted signers. - returned: always - type: complex - contains: - enabled: - description: Whether trusted signers are enabled for this cache behavior. - returned: always - type: bool - sample: false - quantity: - description: Count of trusted signers. - returned: always - type: int - sample: 1 - viewer_protocol_policy: - description: Policy of how to handle http/https. - returned: always - type: str - sample: redirect-to-https -default_root_object: - description: The object that you want CloudFront to request from your origin (for example, index.html) - when a viewer requests the root URL for your distribution. - returned: always - type: str - sample: '' -diff: - description: Difference between previous configuration and new configuration. - returned: always - type: dict - sample: {} -domain_name: - description: Domain name of CloudFront distribution. - returned: always - type: str - sample: d1vz8pzgurxosf.cloudfront.net -enabled: - description: Whether the CloudFront distribution is enabled or not. - returned: always - type: bool - sample: true -http_version: - description: Version of HTTP supported by the distribution. - returned: always - type: str - sample: http2 -id: - description: CloudFront distribution ID. - returned: always - type: str - sample: E123456ABCDEFG -in_progress_invalidation_batches: - description: The number of invalidation batches currently in progress. - returned: always - type: int - sample: 0 -is_ipv6_enabled: - description: Whether IPv6 is enabled. - returned: always - type: bool - sample: true -last_modified_time: - description: Date and time distribution was last modified. - returned: always - type: str - sample: '2017-10-13T01:51:12.656000+00:00' -logging: - description: Logging information. - returned: always - type: complex - contains: - bucket: - description: S3 bucket logging destination. - returned: always - type: str - sample: logs-example-com.s3.amazonaws.com - enabled: - description: Whether logging is enabled. - returned: always - type: bool - sample: true - include_cookies: - description: Whether to log cookies. - returned: always - type: bool - sample: false - prefix: - description: Prefix added to logging object names. - returned: always - type: str - sample: cloudfront/test -origins: - description: Origins in the CloudFront distribution. - returned: always - type: complex - contains: - items: - description: List of origins. - returned: always - type: complex - contains: - custom_headers: - description: Custom headers passed to the origin. - returned: always - type: complex - contains: - quantity: - description: Count of headers. - returned: always - type: int - sample: 1 - custom_origin_config: - description: Configuration of the origin. - returned: always - type: complex - contains: - http_port: - description: Port on which HTTP is listening. - returned: always - type: int - sample: 80 - https_port: - description: Port on which HTTPS is listening. - returned: always - type: int - sample: 443 - origin_keepalive_timeout: - description: Keep-alive timeout. - returned: always - type: int - sample: 5 - origin_protocol_policy: - description: Policy of which protocols are supported. - returned: always - type: str - sample: https-only - origin_read_timeout: - description: Timeout for reads to the origin. - returned: always - type: int - sample: 30 - origin_ssl_protocols: - description: SSL protocols allowed by the origin. - returned: always - type: complex - contains: - items: - description: List of SSL protocols. - returned: always - type: list - sample: - - TLSv1 - - TLSv1.1 - - TLSv1.2 - quantity: - description: Count of SSL protocols. - returned: always - type: int - sample: 3 - domain_name: - description: Domain name of the origin. - returned: always - type: str - sample: test-origin.example.com - id: - description: ID of the origin. - returned: always - type: str - sample: test-origin.example.com - origin_path: - description: Subdirectory to prefix the request from the S3 or HTTP origin. - returned: always - type: str - sample: '' - connection_attempts: - description: The number of times that CloudFront attempts to connect to the origin. - returned: always - type: int - sample: 3 - connection_timeout: - description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. - returned: always - type: int - sample: 10 - origin_shield: - description: Configuration of the origin Origin Shield. - returned: always - type: complex - contains: - enabled: - description: Whether Origin Shield is enabled or not. - returned: always - type: bool - sample: false - origin_shield_region: - description: Which region is used by Origin Shield. - returned: when enabled is true - type: str - sample: us-east-1 - version_added: 6.0.0 - s3_origin_config: - description: Origin access identity configuration for S3 Origin. - returned: when s3_origin_access_identity_enabled is true - type: dict - contains: - origin_access_identity: - type: str - description: The origin access id as a path. - sample: origin-access-identity/cloudfront/EXAMPLEID - quantity: - description: Count of origins. - returned: always - type: int - sample: 1 -price_class: - description: Price class of CloudFront distribution. - returned: always - type: str - sample: PriceClass_All -restrictions: - description: Restrictions in use by CloudFront. - returned: always - type: complex - contains: - geo_restriction: - description: Controls the countries in which your content is distributed. - returned: always - type: complex - contains: - quantity: - description: Count of restrictions. - returned: always - type: int - sample: 1 - items: - description: List of country codes allowed or disallowed. - returned: always - type: list - sample: xy - restriction_type: - description: Type of restriction. - returned: always - type: str - sample: blacklist -status: - description: Status of the CloudFront distribution. - returned: always - type: str - sample: InProgress -tags: - description: Distribution tags. - returned: always - type: dict - sample: - Hello: World -viewer_certificate: - description: Certificate used by CloudFront distribution. - returned: always - type: complex - contains: - acm_certificate_arn: - description: ARN of ACM certificate. - returned: when certificate comes from ACM - type: str - sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef - certificate: - description: Reference to certificate. - returned: always - type: str - sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef - certificate_source: - description: Where certificate comes from. - returned: always - type: str - sample: acm - minimum_protocol_version: - description: Minimum SSL/TLS protocol supported by this distribution. - returned: always - type: str - sample: TLSv1 - ssl_support_method: - description: Support for pre-SNI browsers or not. - returned: always - type: str - sample: sni-only -web_acl_id: - description: ID of Web Access Control List (from WAF service). - returned: always - type: str - sample: abcd1234-1234-abcd-abcd-abcd12345678 -""" - -from collections import OrderedDict -import datetime -import re - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import recursive_diff -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def change_dict_key_name(dictionary, old_key, new_key): - if old_key in dictionary: - dictionary[new_key] = dictionary.get(old_key) - dictionary.pop(old_key, None) - return dictionary - - -def merge_validation_into_config(config, validated_node, node_name): - if validated_node is not None: - if isinstance(validated_node, dict): - config_node = config.get(node_name) - if config_node is not None: - config_node_items = list(config_node.items()) - else: - config_node_items = [] - config[node_name] = dict(config_node_items + list(validated_node.items())) - if isinstance(validated_node, list): - config[node_name] = list(set(config.get(node_name) + validated_node)) - return config - - -def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): - if list_items is None: - list_items = [] - if not isinstance(list_items, list): - raise ValueError(f"Expected a list, got a {type(list_items).__name__} with value {str(list_items)}") - result = {} - if include_quantity: - result["quantity"] = len(list_items) - if len(list_items) > 0: - result["items"] = list_items - return result - - -def create_distribution(client, module, config, tags): - try: - if not tags: - return client.create_distribution(aws_retry=True, DistributionConfig=config)["Distribution"] - else: - distribution_config_with_tags = {"DistributionConfig": config, "Tags": {"Items": tags}} - return client.create_distribution_with_tags( - aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags - )["Distribution"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error creating distribution") - - -def delete_distribution(client, module, distribution): - try: - return client.delete_distribution( - aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Error deleting distribution {to_native(distribution['Distribution'])}") - - -def update_distribution(client, module, config, distribution_id, e_tag): - try: - return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)[ - "Distribution" - ] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Error updating distribution to {to_native(config)}") - - -def tag_resource(client, module, arn, tags): - try: - return client.tag_resource(aws_retry=True, Resource=arn, Tags=dict(Items=tags)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error tagging resource") - - -def untag_resource(client, module, arn, tag_keys): - try: - return client.untag_resource(aws_retry=True, Resource=arn, TagKeys=dict(Items=tag_keys)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error untagging resource") - - -def list_tags_for_resource(client, module, arn): - try: - response = client.list_tags_for_resource(aws_retry=True, Resource=arn) - return boto3_tag_list_to_ansible_dict(response.get("Tags").get("Items")) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error listing tags for resource") - - -def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn): - if valid_tags is None: - return False - changed = False - to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags) - if to_remove: - untag_resource(client, module, arn, to_remove) - changed = True - if to_add: - tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add)) - changed = True - return changed - - -class CloudFrontValidationManager(object): - """ - Manages CloudFront validations - """ - - def __init__(self, module): - self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) - self.module = module - self.__default_distribution_enabled = True - self.__default_http_port = 80 - self.__default_https_port = 443 - self.__default_ipv6_enabled = False - self.__default_origin_ssl_protocols = [ - "TLSv1", - "TLSv1.1", - "TLSv1.2", - ] - self.__default_custom_origin_protocol_policy = "match-viewer" - self.__default_custom_origin_read_timeout = 30 - self.__default_custom_origin_keepalive_timeout = 5 - self.__default_datetime_string = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") - self.__default_cache_behavior_min_ttl = 0 - self.__default_cache_behavior_max_ttl = 31536000 - self.__default_cache_behavior_default_ttl = 86400 - self.__default_cache_behavior_compress = False - self.__default_cache_behavior_viewer_protocol_policy = "allow-all" - self.__default_cache_behavior_smooth_streaming = False - self.__default_cache_behavior_forwarded_values_forward_cookies = "none" - self.__default_cache_behavior_forwarded_values_query_string = True - self.__default_trusted_signers_enabled = False - self.__valid_price_classes = set( - [ - "PriceClass_100", - "PriceClass_200", - "PriceClass_All", - ] - ) - self.__valid_origin_protocol_policies = set( - [ - "http-only", - "match-viewer", - "https-only", - ] - ) - self.__valid_origin_ssl_protocols = set( - [ - "SSLv3", - "TLSv1", - "TLSv1.1", - "TLSv1.2", - ] - ) - self.__valid_cookie_forwarding = set( - [ - "none", - "whitelist", - "all", - ] - ) - self.__valid_viewer_protocol_policies = set( - [ - "allow-all", - "https-only", - "redirect-to-https", - ] - ) - self.__valid_methods = set( - [ - "GET", - "HEAD", - "POST", - "PUT", - "PATCH", - "OPTIONS", - "DELETE", - ] - ) - self.__valid_methods_cached_methods = [ - set( - [ - "GET", - "HEAD", - ] - ), - set( - [ - "GET", - "HEAD", - "OPTIONS", - ] - ), - ] - self.__valid_methods_allowed_methods = [ - self.__valid_methods_cached_methods[0], - self.__valid_methods_cached_methods[1], - self.__valid_methods, - ] - self.__valid_lambda_function_association_event_types = set( - [ - "viewer-request", - "viewer-response", - "origin-request", - "origin-response", - ] - ) - self.__valid_viewer_certificate_ssl_support_methods = set( - [ - "sni-only", - "vip", - ] - ) - self.__valid_viewer_certificate_minimum_protocol_versions = set( - [ - "SSLv3", - "TLSv1", - "TLSv1_2016", - "TLSv1.1_2016", - "TLSv1.2_2018", - "TLSv1.2_2019", - "TLSv1.2_2021", - ] - ) - self.__valid_viewer_certificate_certificate_sources = set( - [ - "cloudfront", - "iam", - "acm", - ] - ) - self.__valid_http_versions = set( - [ - "http1.1", - "http2", - "http3", - "http2and3", - ] - ) - self.__s3_bucket_domain_regex = re.compile(r"\.s3(?:\.[^.]+)?\.amazonaws\.com$") - - def add_missing_key(self, dict_object, key_to_set, value_to_set): - if key_to_set not in dict_object and value_to_set is not None: - dict_object[key_to_set] = value_to_set - return dict_object - - def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set): - if old_key not in dict_object and value_to_set is not None: - dict_object[new_key] = value_to_set - else: - dict_object = change_dict_key_name(dict_object, old_key, new_key) - return dict_object - - def add_key_else_validate( - self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False - ): - if key_name in dict_object: - self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values) - else: - if to_aws_list: - dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set) - elif value_to_set is not None: - dict_object[key_name] = value_to_set - return dict_object - - def validate_logging(self, logging): - try: - if logging is None: - return None - valid_logging = {} - if logging and not set(["enabled", "include_cookies", "bucket", "prefix"]).issubset(logging): - self.module.fail_json( - msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified." - ) - valid_logging["include_cookies"] = logging.get("include_cookies") - valid_logging["enabled"] = logging.get("enabled") - valid_logging["bucket"] = logging.get("bucket") - valid_logging["prefix"] = logging.get("prefix") - return valid_logging - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution logging") - - def validate_is_list(self, list_to_validate, list_name): - if not isinstance(list_to_validate, list): - self.module.fail_json(msg=f"{list_name} is of type {type(list_to_validate).__name__}. Must be a list.") - - def validate_required_key(self, key_name, full_key_name, dict_object): - if key_name not in dict_object: - self.module.fail_json(msg=f"{full_key_name} must be specified.") - - def validate_origins( - self, - client, - config, - origins, - default_origin_domain_name, - default_origin_path, - create_distribution, - purge_origins=False, - ): - try: - if origins is None: - if default_origin_domain_name is None and not create_distribution: - if purge_origins: - return None - else: - return ansible_list_to_cloudfront_list(config) - if default_origin_domain_name is not None: - origins = [{"domain_name": default_origin_domain_name, "origin_path": default_origin_path or ""}] - else: - origins = [] - self.validate_is_list(origins, "origins") - if not origins and default_origin_domain_name is None and create_distribution: - self.module.fail_json( - msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one." - ) - all_origins = OrderedDict() - new_domains = list() - for origin in config: - all_origins[origin.get("domain_name")] = origin - for origin in origins: - origin = self.validate_origin( - client, all_origins.get(origin.get("domain_name"), {}), origin, default_origin_path - ) - all_origins[origin["domain_name"]] = origin - new_domains.append(origin["domain_name"]) - if purge_origins: - for domain in list(all_origins.keys()): - if domain not in new_domains: - del all_origins[domain] - return ansible_list_to_cloudfront_list(list(all_origins.values())) - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution origins") - - def validate_s3_origin_configuration(self, client, existing_config, origin): - if origin.get("s3_origin_config", {}).get("origin_access_identity"): - return origin["s3_origin_config"]["origin_access_identity"] - - if existing_config.get("s3_origin_config", {}).get("origin_access_identity"): - return existing_config["s3_origin_config"]["origin_access_identity"] - - try: - comment = f"access-identity-by-ansible-{origin.get('domain_name')}-{self.__default_datetime_string}" - caller_reference = f"{origin.get('domain_name')}-{self.__default_datetime_string}" - cfoai_config = dict( - CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment) - ) - oai = client.create_cloud_front_origin_access_identity(**cfoai_config)["CloudFrontOriginAccessIdentity"][ - "Id" - ] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg=f"Couldn't create Origin Access Identity for id {origin['id']}") - return f"origin-access-identity/cloudfront/{oai}" - - def validate_origin(self, client, existing_config, origin, default_origin_path): - try: - origin = self.add_missing_key( - origin, "origin_path", existing_config.get("origin_path", default_origin_path or "") - ) - self.validate_required_key("origin_path", "origins[].origin_path", origin) - origin = self.add_missing_key(origin, "id", existing_config.get("id", self.__default_datetime_string)) - if "custom_headers" in origin and len(origin.get("custom_headers")) > 0: - for custom_header in origin.get("custom_headers"): - if "header_name" not in custom_header or "header_value" not in custom_header: - self.module.fail_json( - msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified." - ) - origin["custom_headers"] = ansible_list_to_cloudfront_list(origin.get("custom_headers")) - else: - origin["custom_headers"] = ansible_list_to_cloudfront_list() - if "origin_shield" in origin: - origin_shield = origin.get("origin_shield") - if origin_shield.get("enabled"): - origin_shield_region = origin_shield.get("origin_shield_region") - if origin_shield_region is None: - self.module.fail_json( - msg="origins[].origin_shield.origin_shield_region must be specified" - " when origins[].origin_shield.enabled is true." - ) - else: - origin_shield_region = origin_shield_region.lower() - if self.__s3_bucket_domain_regex.search(origin.get("domain_name").lower()): - if origin.get("s3_origin_access_identity_enabled") is not None: - if origin["s3_origin_access_identity_enabled"]: - s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) - else: - s3_origin_config = None - - del origin["s3_origin_access_identity_enabled"] - - if s3_origin_config: - oai = s3_origin_config - else: - oai = "" - - origin["s3_origin_config"] = dict(origin_access_identity=oai) - - if "custom_origin_config" in origin: - self.module.fail_json( - msg="s3 origin domains and custom_origin_config are mutually exclusive", - ) - else: - origin = self.add_missing_key( - origin, "custom_origin_config", existing_config.get("custom_origin_config", {}) - ) - custom_origin_config = origin.get("custom_origin_config") - custom_origin_config = self.add_key_else_validate( - custom_origin_config, - "origin_protocol_policy", - "origins[].custom_origin_config.origin_protocol_policy", - self.__default_custom_origin_protocol_policy, - self.__valid_origin_protocol_policies, - ) - custom_origin_config = self.add_missing_key( - custom_origin_config, "origin_read_timeout", self.__default_custom_origin_read_timeout - ) - custom_origin_config = self.add_missing_key( - custom_origin_config, "origin_keepalive_timeout", self.__default_custom_origin_keepalive_timeout - ) - custom_origin_config = self.add_key_else_change_dict_key( - custom_origin_config, "http_port", "h_t_t_p_port", self.__default_http_port - ) - custom_origin_config = self.add_key_else_change_dict_key( - custom_origin_config, "https_port", "h_t_t_p_s_port", self.__default_https_port - ) - if custom_origin_config.get("origin_ssl_protocols", {}).get("items"): - custom_origin_config["origin_ssl_protocols"] = custom_origin_config["origin_ssl_protocols"]["items"] - if custom_origin_config.get("origin_ssl_protocols"): - self.validate_attribute_list_with_allowed_list( - custom_origin_config["origin_ssl_protocols"], - "origins[].origin_ssl_protocols", - self.__valid_origin_ssl_protocols, - ) - else: - custom_origin_config["origin_ssl_protocols"] = self.__default_origin_ssl_protocols - custom_origin_config["origin_ssl_protocols"] = ansible_list_to_cloudfront_list( - custom_origin_config["origin_ssl_protocols"] - ) - return origin - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error validating distribution origin") - - def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False): - try: - if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False: - return ansible_list_to_cloudfront_list(config) - all_cache_behaviors = OrderedDict() - # cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors - # is true (if purge_cache_behaviors is not true, we can't really know the full new order) - if not purge_cache_behaviors: - for behavior in config: - all_cache_behaviors[behavior["path_pattern"]] = behavior - for cache_behavior in cache_behaviors: - valid_cache_behavior = self.validate_cache_behavior( - all_cache_behaviors.get(cache_behavior.get("path_pattern"), {}), cache_behavior, valid_origins - ) - all_cache_behaviors[cache_behavior["path_pattern"]] = valid_cache_behavior - if purge_cache_behaviors: - for target_origin_id in set(all_cache_behaviors.keys()) - set( - [cb["path_pattern"] for cb in cache_behaviors] - ): - del all_cache_behaviors[target_origin_id] - return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors") - - def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False): - if is_default_cache and cache_behavior is None: - cache_behavior = {} - if cache_behavior is None and valid_origins is not None: - return config - cache_behavior = self.validate_cache_behavior_first_level_keys( - config, cache_behavior, valid_origins, is_default_cache - ) - cache_behavior = self.validate_forwarded_values(config, cache_behavior.get("forwarded_values"), cache_behavior) - cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior) - cache_behavior = self.validate_lambda_function_associations( - config, cache_behavior.get("lambda_function_associations"), cache_behavior - ) - cache_behavior = self.validate_trusted_signers(config, cache_behavior.get("trusted_signers"), cache_behavior) - cache_behavior = self.validate_field_level_encryption_id( - config, cache_behavior.get("field_level_encryption_id"), cache_behavior - ) - return cache_behavior - - def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): - try: - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, "min_ttl", "min_t_t_l", config.get("min_t_t_l", self.__default_cache_behavior_min_ttl) - ) - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, "max_ttl", "max_t_t_l", config.get("max_t_t_l", self.__default_cache_behavior_max_ttl) - ) - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, - "default_ttl", - "default_t_t_l", - config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), - ) - cache_behavior = self.add_missing_key( - cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress) - ) - target_origin_id = cache_behavior.get("target_origin_id", config.get("target_origin_id")) - if not target_origin_id: - target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins) - if target_origin_id not in [origin["id"] for origin in valid_origins.get("items", [])]: - if is_default_cache: - cache_behavior_name = "Default cache behavior" - else: - cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" - self.module.fail_json( - msg=f"{cache_behavior_name} has target_origin_id pointing to an origin that does not exist." - ) - cache_behavior["target_origin_id"] = target_origin_id - cache_behavior = self.add_key_else_validate( - cache_behavior, - "viewer_protocol_policy", - "cache_behavior.viewer_protocol_policy", - config.get("viewer_protocol_policy", self.__default_cache_behavior_viewer_protocol_policy), - self.__valid_viewer_protocol_policies, - ) - cache_behavior = self.add_missing_key( - cache_behavior, - "smooth_streaming", - config.get("smooth_streaming", self.__default_cache_behavior_smooth_streaming), - ) - return cache_behavior - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys") - - def validate_forwarded_values(self, config, forwarded_values, cache_behavior): - try: - if not forwarded_values: - forwarded_values = dict() - existing_config = config.get("forwarded_values", {}) - headers = forwarded_values.get("headers", existing_config.get("headers", {}).get("items")) - if headers: - headers.sort() - forwarded_values["headers"] = ansible_list_to_cloudfront_list(headers) - if "cookies" not in forwarded_values: - forward = existing_config.get("cookies", {}).get( - "forward", self.__default_cache_behavior_forwarded_values_forward_cookies - ) - forwarded_values["cookies"] = {"forward": forward} - else: - existing_whitelist = existing_config.get("cookies", {}).get("whitelisted_names", {}).get("items") - whitelist = forwarded_values.get("cookies").get("whitelisted_names", existing_whitelist) - if whitelist: - self.validate_is_list(whitelist, "forwarded_values.whitelisted_names") - forwarded_values["cookies"]["whitelisted_names"] = ansible_list_to_cloudfront_list(whitelist) - cookie_forwarding = forwarded_values.get("cookies").get( - "forward", existing_config.get("cookies", {}).get("forward") - ) - self.validate_attribute_with_allowed_values( - cookie_forwarding, "cache_behavior.forwarded_values.cookies.forward", self.__valid_cookie_forwarding - ) - forwarded_values["cookies"]["forward"] = cookie_forwarding - query_string_cache_keys = forwarded_values.get( - "query_string_cache_keys", existing_config.get("query_string_cache_keys", {}).get("items", []) - ) - self.validate_is_list(query_string_cache_keys, "forwarded_values.query_string_cache_keys") - forwarded_values["query_string_cache_keys"] = ansible_list_to_cloudfront_list(query_string_cache_keys) - forwarded_values = self.add_missing_key( - forwarded_values, - "query_string", - existing_config.get("query_string", self.__default_cache_behavior_forwarded_values_query_string), - ) - cache_behavior["forwarded_values"] = forwarded_values - return cache_behavior - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating forwarded values") - - def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior): - try: - if lambda_function_associations is not None: - self.validate_is_list(lambda_function_associations, "lambda_function_associations") - for association in lambda_function_associations: - association = change_dict_key_name(association, "lambda_function_arn", "lambda_function_a_r_n") - self.validate_attribute_with_allowed_values( - association.get("event_type"), - "cache_behaviors[].lambda_function_associations.event_type", - self.__valid_lambda_function_association_event_types, - ) - cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list( - lambda_function_associations - ) - else: - if "lambda_function_associations" in config: - cache_behavior["lambda_function_associations"] = config.get("lambda_function_associations") - else: - cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list([]) - return cache_behavior - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating lambda function associations") - - def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior): - if field_level_encryption_id is not None: - cache_behavior["field_level_encryption_id"] = field_level_encryption_id - elif "field_level_encryption_id" in config: - cache_behavior["field_level_encryption_id"] = config.get("field_level_encryption_id") - else: - cache_behavior["field_level_encryption_id"] = "" - return cache_behavior - - def validate_allowed_methods(self, config, allowed_methods, cache_behavior): - try: - if allowed_methods is not None: - self.validate_required_key("items", "cache_behavior.allowed_methods.items[]", allowed_methods) - temp_allowed_items = allowed_methods.get("items") - self.validate_is_list(temp_allowed_items, "cache_behavior.allowed_methods.items") - self.validate_attribute_list_with_allowed_list( - temp_allowed_items, "cache_behavior.allowed_methods.items[]", self.__valid_methods_allowed_methods - ) - cached_items = allowed_methods.get("cached_methods") - if "cached_methods" in allowed_methods: - self.validate_is_list(cached_items, "cache_behavior.allowed_methods.cached_methods") - self.validate_attribute_list_with_allowed_list( - cached_items, - "cache_behavior.allowed_items.cached_methods[]", - self.__valid_methods_cached_methods, - ) - # we don't care if the order of how cloudfront stores the methods differs - preserving existing - # order reduces likelihood of making unnecessary changes - if "allowed_methods" in config and set(config["allowed_methods"]["items"]) == set(temp_allowed_items): - cache_behavior["allowed_methods"] = config["allowed_methods"] - else: - cache_behavior["allowed_methods"] = ansible_list_to_cloudfront_list(temp_allowed_items) - - if cached_items and set(cached_items) == set( - config.get("allowed_methods", {}).get("cached_methods", {}).get("items", []) - ): - cache_behavior["allowed_methods"]["cached_methods"] = config["allowed_methods"]["cached_methods"] - else: - cache_behavior["allowed_methods"]["cached_methods"] = ansible_list_to_cloudfront_list(cached_items) - else: - if "allowed_methods" in config: - cache_behavior["allowed_methods"] = config.get("allowed_methods") - return cache_behavior - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating allowed methods") - - def validate_trusted_signers(self, config, trusted_signers, cache_behavior): - try: - if trusted_signers is None: - trusted_signers = {} - if "items" in trusted_signers: - valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get("items")) - else: - valid_trusted_signers = dict(quantity=config.get("quantity", 0)) - if "items" in config: - valid_trusted_signers = dict(items=config["items"]) - valid_trusted_signers["enabled"] = trusted_signers.get( - "enabled", config.get("enabled", self.__default_trusted_signers_enabled) - ) - cache_behavior["trusted_signers"] = valid_trusted_signers - return cache_behavior - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating trusted signers") - - def validate_viewer_certificate(self, viewer_certificate): - try: - if viewer_certificate is None: - return None - if ( - viewer_certificate.get("cloudfront_default_certificate") - and viewer_certificate.get("ssl_support_method") is not None - ): - self.module.fail_json( - msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" - + "_certificate set to true." - ) - self.validate_attribute_with_allowed_values( - viewer_certificate.get("ssl_support_method"), - "viewer_certificate.ssl_support_method", - self.__valid_viewer_certificate_ssl_support_methods, - ) - self.validate_attribute_with_allowed_values( - viewer_certificate.get("minimum_protocol_version"), - "viewer_certificate.minimum_protocol_version", - self.__valid_viewer_certificate_minimum_protocol_versions, - ) - self.validate_attribute_with_allowed_values( - viewer_certificate.get("certificate_source"), - "viewer_certificate.certificate_source", - self.__valid_viewer_certificate_certificate_sources, - ) - viewer_certificate = change_dict_key_name( - viewer_certificate, "cloudfront_default_certificate", "cloud_front_default_certificate" - ) - viewer_certificate = change_dict_key_name(viewer_certificate, "ssl_support_method", "s_s_l_support_method") - viewer_certificate = change_dict_key_name(viewer_certificate, "iam_certificate_id", "i_a_m_certificate_id") - viewer_certificate = change_dict_key_name( - viewer_certificate, "acm_certificate_arn", "a_c_m_certificate_arn" - ) - return viewer_certificate - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating viewer certificate") - - def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses): - try: - if custom_error_responses is None and not purge_custom_error_responses: - return ansible_list_to_cloudfront_list(config) - self.validate_is_list(custom_error_responses, "custom_error_responses") - result = list() - existing_responses = dict((response["error_code"], response) for response in custom_error_responses) - for custom_error_response in custom_error_responses: - self.validate_required_key("error_code", "custom_error_responses[].error_code", custom_error_response) - custom_error_response = change_dict_key_name( - custom_error_response, "error_caching_min_ttl", "error_caching_min_t_t_l" - ) - if "response_code" in custom_error_response: - custom_error_response["response_code"] = str(custom_error_response["response_code"]) - if custom_error_response["error_code"] in existing_responses: - del existing_responses[custom_error_response["error_code"]] - result.append(custom_error_response) - if not purge_custom_error_responses: - result.extend(existing_responses.values()) - - return ansible_list_to_cloudfront_list(result) - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating custom error responses") - - def validate_restrictions(self, config, restrictions, purge_restrictions=False): - try: - if restrictions is None: - if purge_restrictions: - return None - else: - return config - self.validate_required_key("geo_restriction", "restrictions.geo_restriction", restrictions) - geo_restriction = restrictions.get("geo_restriction") - self.validate_required_key( - "restriction_type", "restrictions.geo_restriction.restriction_type", geo_restriction - ) - existing_restrictions = ( - config.get("geo_restriction", {}).get(geo_restriction["restriction_type"], {}).get("items", []) - ) - geo_restriction_items = geo_restriction.get("items") - if not purge_restrictions: - geo_restriction_items.extend( - [rest for rest in existing_restrictions if rest not in geo_restriction_items] - ) - valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items) - valid_restrictions["restriction_type"] = geo_restriction.get("restriction_type") - return {"geo_restriction": valid_restrictions} - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating restrictions") - - def validate_distribution_config_parameters( - self, config, default_root_object, ipv6_enabled, http_version, web_acl_id - ): - try: - config["default_root_object"] = default_root_object or config.get("default_root_object", "") - config["is_i_p_v6_enabled"] = ( - ipv6_enabled - if ipv6_enabled is not None - else config.get("is_i_p_v6_enabled", self.__default_ipv6_enabled) - ) - if http_version is not None or config.get("http_version"): - self.validate_attribute_with_allowed_values(http_version, "http_version", self.__valid_http_versions) - config["http_version"] = http_version or config.get("http_version") - if web_acl_id or config.get("web_a_c_l_id"): - config["web_a_c_l_id"] = web_acl_id or config.get("web_a_c_l_id") - return config - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution config parameters") - - def validate_common_distribution_parameters( - self, config, enabled, aliases, logging, price_class, purge_aliases=False - ): - try: - if config is None: - config = {} - if aliases is not None: - if not purge_aliases: - aliases.extend( - [alias for alias in config.get("aliases", {}).get("items", []) if alias not in aliases] - ) - config["aliases"] = ansible_list_to_cloudfront_list(aliases) - if logging is not None: - config["logging"] = self.validate_logging(logging) - config["enabled"] = ( - enabled if enabled is not None else config.get("enabled", self.__default_distribution_enabled) - ) - if price_class is not None: - self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes) - config["price_class"] = price_class - return config - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating common distribution parameters") - - def validate_comment(self, config, comment): - config["comment"] = comment or config.get( - "comment", "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string - ) - return config - - def validate_caller_reference(self, caller_reference): - return caller_reference or self.__default_datetime_string - - def get_first_origin_id_for_default_cache_behavior(self, valid_origins): - try: - if valid_origins is not None: - valid_origins_list = valid_origins.get("items") - if ( - valid_origins_list is not None - and isinstance(valid_origins_list, list) - and len(valid_origins_list) > 0 - ): - return str(valid_origins_list[0].get("id")) - self.module.fail_json( - msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration." - ) - except Exception as e: - self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior") - - def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list): - try: - self.validate_is_list(attribute_list, attribute_list_name) - if ( - isinstance(allowed_list, list) - and set(attribute_list) not in allowed_list - or isinstance(allowed_list, set) - and not set(allowed_list).issuperset(attribute_list) - ): - attribute_list = " ".join(str(a) for a in allowed_list) - self.module.fail_json(msg=f"The attribute list {attribute_list_name} must be one of [{attribute_list}]") - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") - - def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): - if attribute is not None and attribute not in allowed_list: - attribute_list = " ".join(str(a) for a in allowed_list) - self.module.fail_json(msg=f"The attribute {attribute_name} must be one of [{attribute_list}]") - - def validate_distribution_from_caller_reference(self, caller_reference): - try: - distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) - distribution_name = "Distribution" - distribution_config_name = "DistributionConfig" - distribution_ids = [dist.get("Id") for dist in distributions] - for distribution_id in distribution_ids: - distribution = self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) - if distribution is not None: - distribution_config = distribution[distribution_name].get(distribution_config_name) - if ( - distribution_config is not None - and distribution_config.get("CallerReference") == caller_reference - ): - distribution[distribution_name][distribution_config_name] = distribution_config - return distribution - - except Exception as e: - self.module.fail_json_aws(e, msg="Error validating distribution from caller reference") - - def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference): - try: - if caller_reference is not None: - return self.validate_distribution_from_caller_reference(caller_reference) - else: - if aliases and distribution_id is None: - distribution_id = self.validate_distribution_id_from_alias(aliases) - if distribution_id: - return self.__cloudfront_facts_mgr.get_distribution(id=distribution_id) - return None - except Exception as e: - self.module.fail_json_aws( - e, msg="Error validating distribution_id from alias, aliases and caller reference" - ) - - def validate_distribution_id_from_alias(self, aliases): - distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False) - if distributions: - for distribution in distributions: - distribution_aliases = distribution.get("Aliases", {}).get("Items", []) - if set(aliases) & set(distribution_aliases): - return distribution["Id"] - return None - - def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): - if distribution_id is None: - distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference) - distribution_id = distribution["Distribution"]["Id"] - - try: - waiter = client.get_waiter("distribution_deployed") - attempts = 1 + int(wait_timeout / 60) - waiter.wait(Id=distribution_id, WaiterConfig={"MaxAttempts": attempts}) - except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws( - e, - msg=f"Timeout waiting for CloudFront action. Waited for {to_text(wait_timeout)} seconds before timeout.", - ) - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg=f"Error getting distribution {distribution_id}") - - -def main(): - argument_spec = dict( - state=dict(choices=["present", "absent"], default="present"), - caller_reference=dict(), - comment=dict(), - distribution_id=dict(), - e_tag=dict(), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - alias=dict(), - aliases=dict(type="list", default=[], elements="str"), - purge_aliases=dict(type="bool", default=False), - default_root_object=dict(), - origins=dict(type="list", elements="dict"), - purge_origins=dict(type="bool", default=False), - default_cache_behavior=dict(type="dict"), - cache_behaviors=dict(type="list", elements="dict"), - purge_cache_behaviors=dict(type="bool", default=False), - custom_error_responses=dict(type="list", elements="dict"), - purge_custom_error_responses=dict(type="bool", default=False), - logging=dict(type="dict"), - price_class=dict(), - enabled=dict(type="bool"), - viewer_certificate=dict(type="dict"), - restrictions=dict(type="dict"), - web_acl_id=dict(), - http_version=dict(), - ipv6_enabled=dict(type="bool"), - default_origin_domain_name=dict(), - default_origin_path=dict(), - wait=dict(default=False, type="bool"), - wait_timeout=dict(default=1800, type="int"), - ) - - result = {} - changed = True - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=False, - mutually_exclusive=[ - ["distribution_id", "alias"], - ["default_origin_domain_name", "distribution_id"], - ["default_origin_domain_name", "alias"], - ], - ) - - client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff()) - - validation_mgr = CloudFrontValidationManager(module) - - state = module.params.get("state") - caller_reference = module.params.get("caller_reference") - comment = module.params.get("comment") - e_tag = module.params.get("e_tag") - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - distribution_id = module.params.get("distribution_id") - alias = module.params.get("alias") - aliases = module.params.get("aliases") - purge_aliases = module.params.get("purge_aliases") - default_root_object = module.params.get("default_root_object") - origins = module.params.get("origins") - purge_origins = module.params.get("purge_origins") - default_cache_behavior = module.params.get("default_cache_behavior") - cache_behaviors = module.params.get("cache_behaviors") - purge_cache_behaviors = module.params.get("purge_cache_behaviors") - custom_error_responses = module.params.get("custom_error_responses") - purge_custom_error_responses = module.params.get("purge_custom_error_responses") - logging = module.params.get("logging") - price_class = module.params.get("price_class") - enabled = module.params.get("enabled") - viewer_certificate = module.params.get("viewer_certificate") - restrictions = module.params.get("restrictions") - purge_restrictions = module.params.get("purge_restrictions") - web_acl_id = module.params.get("web_acl_id") - http_version = module.params.get("http_version") - ipv6_enabled = module.params.get("ipv6_enabled") - default_origin_domain_name = module.params.get("default_origin_domain_name") - default_origin_path = module.params.get("default_origin_path") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - - if alias and alias not in aliases: - aliases.append(alias) - - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( - distribution_id, aliases, caller_reference - ) - - update = state == "present" and distribution - create = state == "present" and not distribution - delete = state == "absent" and distribution - - if not (update or create or delete): - module.exit_json(changed=False) - - config = {} - if update or delete: - config = distribution["Distribution"]["DistributionConfig"] - e_tag = distribution["ETag"] - distribution_id = distribution["Distribution"]["Id"] - - if update: - config = camel_dict_to_snake_dict(config, reversible=True) - - if create or update: - config = validation_mgr.validate_common_distribution_parameters( - config, enabled, aliases, logging, price_class, purge_aliases - ) - config = validation_mgr.validate_distribution_config_parameters( - config, default_root_object, ipv6_enabled, http_version, web_acl_id - ) - config["origins"] = validation_mgr.validate_origins( - client, - config.get("origins", {}).get("items", []), - origins, - default_origin_domain_name, - default_origin_path, - create, - purge_origins, - ) - config["cache_behaviors"] = validation_mgr.validate_cache_behaviors( - config.get("cache_behaviors", {}).get("items", []), - cache_behaviors, - config["origins"], - purge_cache_behaviors, - ) - config["default_cache_behavior"] = validation_mgr.validate_cache_behavior( - config.get("default_cache_behavior", {}), default_cache_behavior, config["origins"], True - ) - config["custom_error_responses"] = validation_mgr.validate_custom_error_responses( - config.get("custom_error_responses", {}).get("items", []), - custom_error_responses, - purge_custom_error_responses, - ) - valid_restrictions = validation_mgr.validate_restrictions( - config.get("restrictions", {}), restrictions, purge_restrictions - ) - if valid_restrictions: - config["restrictions"] = valid_restrictions - valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate) - config = merge_validation_into_config(config, valid_viewer_certificate, "viewer_certificate") - config = validation_mgr.validate_comment(config, comment) - config = snake_dict_to_camel_dict(config, capitalize_first=True) - - if create: - config["CallerReference"] = validation_mgr.validate_caller_reference(caller_reference) - result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {})) - result = camel_dict_to_snake_dict(result) - result["tags"] = list_tags_for_resource(client, module, result["arn"]) - - if delete: - if config["Enabled"]: - config["Enabled"] = False - result = update_distribution(client, module, config, distribution_id, e_tag) - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) - distribution = validation_mgr.validate_distribution_from_aliases_caller_reference( - distribution_id, aliases, caller_reference - ) - # e_tag = distribution['ETag'] - result = delete_distribution(client, module, distribution) - - if update: - changed = config != distribution["Distribution"]["DistributionConfig"] - if changed: - result = update_distribution(client, module, config, distribution_id, e_tag) - else: - result = distribution["Distribution"] - existing_tags = list_tags_for_resource(client, module, result["ARN"]) - distribution["Distribution"]["DistributionConfig"]["tags"] = existing_tags - changed |= update_tags(client, module, existing_tags, tags, purge_tags, result["ARN"]) - result = camel_dict_to_snake_dict(result) - result["distribution_config"]["tags"] = config["tags"] = list_tags_for_resource(client, module, result["arn"]) - result["diff"] = dict() - diff = recursive_diff(distribution["Distribution"]["DistributionConfig"], config) - if diff: - result["diff"]["before"] = diff[0] - result["diff"]["after"] = diff[1] - - if wait and (create or update): - validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference")) - - if "distribution_config" in result: - result.update(result["distribution_config"]) - del result["distribution_config"] - - module.exit_json(changed=changed, **result) - - -if __name__ == "__main__": - main() diff --git a/cloudfront_distribution_info.py b/cloudfront_distribution_info.py deleted file mode 100644 index 3bd20868ae5..00000000000 --- a/cloudfront_distribution_info.py +++ /dev/null @@ -1,408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: cloudfront_distribution_info -version_added: 1.0.0 -short_description: Obtain facts about an AWS CloudFront distribution -description: - - Gets information about an AWS CloudFront distribution. - - Prior to release 5.0.0 this module was called C(community.aws.cloudfront_info). - The usage did not change. -author: - - Willem van Ketwich (@wilvk) -options: - distribution_id: - description: - - The id of the CloudFront distribution. Used with I(distribution), I(distribution_config), - I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations). - required: false - type: str - invalidation_id: - description: - - The id of the invalidation to get information about. - - Used with I(invalidation). - required: false - type: str - origin_access_identity_id: - description: - - The id of the CloudFront origin access identity to get information about. - required: false - type: str -# web_acl_id: -# description: -# - Used with I(list_distributions_by_web_acl_id). -# required: false -# type: str - domain_name_alias: - description: - - Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront - distribution to get the distribution id where required. - required: false - type: str - all_lists: - description: - - Get all CloudFront lists that do not require parameters. - required: false - default: false - type: bool - origin_access_identity: - description: - - Get information about an origin access identity. - - Requires I(origin_access_identity_id) to be specified. - required: false - default: false - type: bool - origin_access_identity_config: - description: - - Get the configuration information about an origin access identity. - - Requires I(origin_access_identity_id) to be specified. - required: false - default: false - type: bool - distribution: - description: - - Get information about a distribution. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. - required: false - default: false - type: bool - distribution_config: - description: - - Get the configuration information about a distribution. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. - required: false - default: false - type: bool - invalidation: - description: - - Get information about an invalidation. - - Requires I(invalidation_id) to be specified. - required: false - default: false - type: bool - streaming_distribution: - description: - - Get information about a specified RTMP distribution. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. - required: false - default: false - type: bool - streaming_distribution_config: - description: - - Get the configuration information about a specified RTMP distribution. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. - required: false - default: false - type: bool - list_origin_access_identities: - description: - - Get a list of CloudFront origin access identities. - - Requires I(origin_access_identity_id) to be set. - required: false - default: false - type: bool - list_distributions: - description: - - Get a list of CloudFront distributions. - required: false - default: false - type: bool - list_distributions_by_web_acl_id: - description: - - Get a list of distributions using web acl id as a filter. - - Requires I(web_acl_id) to be set. - required: false - default: false - type: bool - list_invalidations: - description: - - Get a list of invalidations. - - Requires I(distribution_id) or I(domain_name_alias) to be specified. - required: false - default: false - type: bool - list_streaming_distributions: - description: - - Get a list of streaming distributions. - required: false - default: false - type: bool - summary: - description: - - Returns a summary of all distributions, streaming distributions and origin_access_identities. - - This is the default behaviour if no option is selected. - required: false - default: false - type: bool - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Get a summary of distributions - community.aws.cloudfront_distribution_info: - summary: true - register: result - -- name: Get information about a distribution - community.aws.cloudfront_distribution_info: - distribution: true - distribution_id: my-cloudfront-distribution-id - register: result_did -- ansible.builtin.debug: - msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}" - -- name: Get information about a distribution using the CNAME of the cloudfront distribution. - community.aws.cloudfront_distribution_info: - distribution: true - domain_name_alias: www.my-website.com - register: result_website -- ansible.builtin.debug: - msg: "{{ result_website['cloudfront']['www.my-website.com'] }}" - -- name: Get all information about an invalidation for a distribution. - community.aws.cloudfront_distribution_info: - invalidation: true - distribution_id: my-cloudfront-distribution-id - invalidation_id: my-cloudfront-invalidation-id - -- name: Get all information about a CloudFront origin access identity. - community.aws.cloudfront_distribution_info: - origin_access_identity: true - origin_access_identity_id: my-cloudfront-origin-access-identity-id - -- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) - community.aws.cloudfront_distribution_info: - origin_access_identity: true - origin_access_identity_id: my-cloudfront-origin-access-identity-id - -- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions) - community.aws.cloudfront_distribution_info: - all_lists: true -""" - -RETURN = r""" -origin_access_identity: - description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set. - returned: only if I(origin_access_identity) is true - type: dict -origin_access_identity_configuration: - description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set. - returned: only if I(origin_access_identity_configuration) is true - type: dict -distribution: - description: > - Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias) - to be specified. Requires I(origin_access_identity_id) to be set. - returned: only if distribution is true - type: dict -distribution_config: - description: > - Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias) - to be specified. - returned: only if I(distribution_config) is true - type: dict -invalidation: - description: > - Describes the invalidation information for the distribution. Requires - I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.) - returned: only if invalidation is true - type: dict -streaming_distribution: - description: > - Describes the streaming information for the distribution. Requires - I(distribution_id) or I(domain_name_alias) to be specified. - returned: only if I(streaming_distribution) is true - type: dict -streaming_distribution_config: - description: > - Describes the streaming configuration information for the distribution. - Requires I(distribution_id) or I(domain_name_alias) to be specified. - returned: only if I(streaming_distribution_config) is true - type: dict -summary: - description: Gives a summary of distributions, streaming distributions and origin access identities. - returned: as default or if summary is true - type: dict -result: - description: > - Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id - as figuring out the DistributionId is usually the reason one uses this module in the first place. - returned: always - type: dict -""" - -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases): - facts[distribution_id] = details - # also have a fixed key for accessing results/details returned - facts["result"] = details - facts["result"]["DistributionId"] = distribution_id - - for alias in aliases: - facts[alias] = details - return facts - - -def main(): - argument_spec = dict( - distribution_id=dict(required=False, type="str"), - invalidation_id=dict(required=False, type="str"), - origin_access_identity_id=dict(required=False, type="str"), - domain_name_alias=dict(required=False, type="str"), - all_lists=dict(required=False, default=False, type="bool"), - distribution=dict(required=False, default=False, type="bool"), - distribution_config=dict(required=False, default=False, type="bool"), - origin_access_identity=dict(required=False, default=False, type="bool"), - origin_access_identity_config=dict(required=False, default=False, type="bool"), - invalidation=dict(required=False, default=False, type="bool"), - streaming_distribution=dict(required=False, default=False, type="bool"), - streaming_distribution_config=dict(required=False, default=False, type="bool"), - list_origin_access_identities=dict(required=False, default=False, type="bool"), - list_distributions=dict(required=False, default=False, type="bool"), - list_distributions_by_web_acl_id=dict(required=False, default=False, type="bool"), - list_invalidations=dict(required=False, default=False, type="bool"), - list_streaming_distributions=dict(required=False, default=False, type="bool"), - summary=dict(required=False, default=False, type="bool"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - service_mgr = CloudFrontFactsServiceManager(module) - - distribution_id = module.params.get("distribution_id") - invalidation_id = module.params.get("invalidation_id") - origin_access_identity_id = module.params.get("origin_access_identity_id") - web_acl_id = module.params.get("web_acl_id") - domain_name_alias = module.params.get("domain_name_alias") - all_lists = module.params.get("all_lists") - distribution = module.params.get("distribution") - distribution_config = module.params.get("distribution_config") - origin_access_identity = module.params.get("origin_access_identity") - origin_access_identity_config = module.params.get("origin_access_identity_config") - invalidation = module.params.get("invalidation") - streaming_distribution = module.params.get("streaming_distribution") - streaming_distribution_config = module.params.get("streaming_distribution_config") - list_origin_access_identities = module.params.get("list_origin_access_identities") - list_distributions = module.params.get("list_distributions") - list_distributions_by_web_acl_id = module.params.get("list_distributions_by_web_acl_id") - list_invalidations = module.params.get("list_invalidations") - list_streaming_distributions = module.params.get("list_streaming_distributions") - summary = module.params.get("summary") - - aliases = [] - result = {"cloudfront": {}} - facts = {} - - require_distribution_id = ( - distribution - or distribution_config - or invalidation - or streaming_distribution - or streaming_distribution_config - or list_invalidations - ) - - # set default to summary if no option specified - summary = summary or not ( - distribution - or distribution_config - or origin_access_identity - or origin_access_identity_config - or invalidation - or streaming_distribution - or streaming_distribution_config - or list_origin_access_identities - or list_distributions_by_web_acl_id - or list_invalidations - or list_streaming_distributions - or list_distributions - ) - - # validations - if require_distribution_id and distribution_id is None and domain_name_alias is None: - module.fail_json(msg="Error distribution_id or domain_name_alias have not been specified.") - if invalidation and invalidation_id is None: - module.fail_json(msg="Error invalidation_id has not been specified.") - if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None: - module.fail_json(msg="Error origin_access_identity_id has not been specified.") - if list_distributions_by_web_acl_id and web_acl_id is None: - module.fail_json(msg="Error web_acl_id has not been specified.") - - # get distribution id from domain name alias - if require_distribution_id and distribution_id is None: - distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias) - if not distribution_id: - module.fail_json(msg="Error unable to source a distribution id from domain_name_alias") - - # set appropriate cloudfront id - if invalidation_id is not None and invalidation: - facts.update({invalidation_id: {}}) - if origin_access_identity_id and (origin_access_identity or origin_access_identity_config): - facts.update({origin_access_identity_id: {}}) - if web_acl_id: - facts.update({web_acl_id: {}}) - - # get details based on options - if distribution: - facts_to_set = service_mgr.get_distribution(id=distribution_id) - if distribution_config: - facts_to_set = service_mgr.get_distribution_config(id=distribution_id) - if origin_access_identity: - facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(id=origin_access_identity_id)) - if origin_access_identity_config: - facts[origin_access_identity_id].update( - service_mgr.get_origin_access_identity_config(id=origin_access_identity_id) - ) - if invalidation: - facts_to_set = service_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation_id) - facts[invalidation_id].update(facts_to_set) - if streaming_distribution: - facts_to_set = service_mgr.get_streaming_distribution(id=distribution_id) - if streaming_distribution_config: - facts_to_set = service_mgr.get_streaming_distribution_config(id=distribution_id) - if list_invalidations: - invalidations = service_mgr.list_invalidations(distribution_id=distribution_id) or {} - facts_to_set = {"invalidations": invalidations} - if "facts_to_set" in vars(): - aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) - facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) - - # get list based on options - if all_lists or list_origin_access_identities: - facts["origin_access_identities"] = service_mgr.list_origin_access_identities() or {} - if all_lists or list_distributions: - facts["distributions"] = service_mgr.list_distributions() or {} - if all_lists or list_streaming_distributions: - facts["streaming_distributions"] = service_mgr.list_streaming_distributions() or {} - if list_distributions_by_web_acl_id: - facts["distributions_by_web_acl_id"] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {} - if list_invalidations: - facts["invalidations"] = service_mgr.list_invalidations(distribution_id=distribution_id) or {} - - # default summary option - if summary: - facts["summary"] = service_mgr.summary() - - result["changed"] = False - result["cloudfront"].update(facts) - - module.exit_json(msg="Retrieved CloudFront info.", **result) - - -if __name__ == "__main__": - main() diff --git a/cloudfront_invalidation.py b/cloudfront_invalidation.py deleted file mode 100644 index b98b56be2d2..00000000000 --- a/cloudfront_invalidation.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- - -version_added: 1.0.0 -module: cloudfront_invalidation - -short_description: create invalidations for AWS CloudFront distributions -description: - - Allows for invalidation of a batch of paths for a CloudFront distribution. - -author: - - Willem van Ketwich (@wilvk) - -options: - distribution_id: - description: - - The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias. - required: false - type: str - alias: - description: - - The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id. - required: false - type: str - caller_reference: - description: - - A unique reference identifier for the invalidation paths. - - Defaults to current datetime stamp. - required: false - default: - type: str - target_paths: - description: - - A list of paths on the distribution to invalidate. Each path should begin with C(/). Wildcards are allowed. eg. C(/foo/bar/*) - required: true - type: list - elements: str - -notes: - - does not support check mode - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -- name: create a batch of invalidations using a distribution_id for a reference - community.aws.cloudfront_invalidation: - distribution_id: E15BU8SDCGSG57 - caller_reference: testing 123 - target_paths: - - /testpathone/test1.css - - /testpathtwo/test2.js - - /testpaththree/test3.ss - -- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match - community.aws.cloudfront_invalidation: - alias: alias.test.com - caller_reference: testing 123 - target_paths: - - /testpathone/test4.css - - /testpathtwo/test5.js - - /testpaththree/* - -""" - -RETURN = r""" -invalidation: - description: The invalidation's information. - returned: always - type: complex - contains: - create_time: - description: The date and time the invalidation request was first made. - returned: always - type: str - sample: '2018-02-01T15:50:41.159000+00:00' - id: - description: The identifier for the invalidation request. - returned: always - type: str - sample: I2G9MOWJZFV612 - invalidation_batch: - description: The current invalidation information for the batch request. - returned: always - type: complex - contains: - caller_reference: - description: The value used to uniquely identify an invalidation request. - returned: always - type: str - sample: testing 123 - paths: - description: A dict that contains information about the objects that you want to invalidate. - returned: always - type: complex - contains: - items: - description: A list of the paths that you want to invalidate. - returned: always - type: list - sample: - - /testpathtwo/test2.js - - /testpathone/test1.css - - /testpaththree/test3.ss - quantity: - description: The number of objects that you want to invalidate. - returned: always - type: int - sample: 3 - status: - description: The status of the invalidation request. - returned: always - type: str - sample: Completed -location: - description: The fully qualified URI of the distribution and invalidation batch request. - returned: always - type: str - sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 -""" - -import datetime - -try: - import botocore -except ImportError: - pass # caught by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class CloudFrontInvalidationServiceManager(object): - """ - Handles CloudFront service calls to AWS for invalidations - """ - - def __init__(self, module, cloudfront_facts_mgr): - self.module = module - self.client = module.client("cloudfront") - self.__cloudfront_facts_mgr = cloudfront_facts_mgr - - def create_invalidation(self, distribution_id, invalidation_batch): - current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch["CallerReference"]) - try: - response = self.client.create_invalidation( - DistributionId=distribution_id, InvalidationBatch=invalidation_batch - ) - response.pop("ResponseMetadata", None) - if current_invalidation_response: - return response, False - else: - return response, True - except is_boto3_error_message( - "Your request contains a caller reference that was used for a previous invalidation " - "batch for the same distribution." - ): - self.module.warn( - "InvalidationBatch target paths are not modifiable. " - "To make a new invalidation please update caller_reference." - ) - return current_invalidation_response, False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") - - def get_invalidation(self, distribution_id, caller_reference): - # find all invalidations for the distribution - invalidations = self.__cloudfront_facts_mgr.list_invalidations(distribution_id=distribution_id) - - # check if there is an invalidation with the same caller reference - for invalidation in invalidations: - invalidation_info = self.__cloudfront_facts_mgr.get_invalidation( - distribution_id=distribution_id, id=invalidation["Id"] - ) - if invalidation_info.get("InvalidationBatch", {}).get("CallerReference") == caller_reference: - invalidation_info.pop("ResponseMetadata", None) - return invalidation_info - return {} - - -class CloudFrontInvalidationValidationManager(object): - """ - Manages CloudFront validations for invalidation batches - """ - - def __init__(self, module, cloudfront_facts_mgr): - self.module = module - self.__cloudfront_facts_mgr = cloudfront_facts_mgr - - def validate_distribution_id(self, distribution_id, alias): - try: - if distribution_id is None and alias is None: - self.module.fail_json(msg="distribution_id or alias must be specified") - if distribution_id is None: - distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias) - return distribution_id - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error validating parameters.") - - def create_aws_list(self, invalidation_batch): - aws_list = {} - aws_list["Quantity"] = len(invalidation_batch) - aws_list["Items"] = invalidation_batch - return aws_list - - def validate_invalidation_batch(self, invalidation_batch, caller_reference): - try: - if caller_reference is not None: - valid_caller_reference = caller_reference - else: - valid_caller_reference = datetime.datetime.now().isoformat() - valid_invalidation_batch = { - "paths": self.create_aws_list(invalidation_batch), - "caller_reference": valid_caller_reference, - } - return valid_invalidation_batch - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error validating invalidation batch.") - - -def main(): - argument_spec = dict( - caller_reference=dict(), - distribution_id=dict(), - alias=dict(), - target_paths=dict(required=True, type="list", elements="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[["distribution_id", "alias"]] - ) - - cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) - validation_mgr = CloudFrontInvalidationValidationManager(module, cloudfront_facts_mgr) - service_mgr = CloudFrontInvalidationServiceManager(module, cloudfront_facts_mgr) - - caller_reference = module.params.get("caller_reference") - distribution_id = module.params.get("distribution_id") - alias = module.params.get("alias") - target_paths = module.params.get("target_paths") - - result = {} - - distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias) - valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference) - valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True) - result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) - - -if __name__ == "__main__": - main() diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py deleted file mode 100644 index 3c9340df611..00000000000 --- a/cloudfront_origin_access_identity.py +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- - -version_added: 1.0.0 -module: cloudfront_origin_access_identity - -short_description: Create, update and delete origin access identities for a - CloudFront distribution - -description: - - Allows for easy creation, updating and deletion of origin access - identities. - -author: - - Willem van Ketwich (@wilvk) - -options: - state: - description: If the named resource should exist. - choices: - - present - - absent - default: present - type: str - origin_access_identity_id: - description: - - The origin_access_identity_id of the CloudFront distribution. - required: false - type: str - comment: - description: - - A comment to describe the CloudFront origin access identity. - required: false - type: str - caller_reference: - description: - - A unique identifier to reference the origin access identity by. - required: false - type: str - -notes: - - Does not support check mode. - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -- name: create an origin access identity - community.aws.cloudfront_origin_access_identity: - state: present - caller_reference: this is an example reference - comment: this is an example comment - -- name: update an existing origin access identity using caller_reference as an identifier - community.aws.cloudfront_origin_access_identity: - origin_access_identity_id: E17DRN9XUOAHZX - caller_reference: this is an example reference - comment: this is a new comment - -- name: delete an existing origin access identity using caller_reference as an identifier - community.aws.cloudfront_origin_access_identity: - state: absent - caller_reference: this is an example reference - comment: this is a new comment - -""" - -RETURN = r""" -cloud_front_origin_access_identity: - description: The origin access identity's information. - returned: always - type: complex - contains: - cloud_front_origin_access_identity_config: - description: describes a url specifying the origin access identity. - returned: always - type: complex - contains: - caller_reference: - description: a caller reference for the oai - returned: always - type: str - comment: - description: a comment describing the oai - returned: always - type: str - id: - description: a unique identifier of the oai - returned: always - type: str - s3_canonical_user_id: - description: the canonical user ID of the user who created the oai - returned: always - type: str -e_tag: - description: The current version of the origin access identity created. - returned: always - type: str -location: - description: The fully qualified URI of the new origin access identity just created. - returned: when initially created - type: str - -""" - -import datetime - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class CloudFrontOriginAccessIdentityServiceManager(object): - """ - Handles CloudFront origin access identity service calls to aws - """ - - def __init__(self, module): - self.module = module - self.client = module.client("cloudfront") - - def create_origin_access_identity(self, caller_reference, comment): - try: - return self.client.create_cloud_front_origin_access_identity( - CloudFrontOriginAccessIdentityConfig={"CallerReference": caller_reference, "Comment": comment} - ) - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.") - - def delete_origin_access_identity(self, origin_access_identity_id, e_tag): - try: - result = self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag) - return result, True - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error deleting Origin Access Identity.") - - def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag): - changed = False - new_config = {"CallerReference": caller_reference, "Comment": comment} - - try: - current_config = self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)[ - "CloudFrontOriginAccessIdentityConfig" - ] - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.") - - if new_config != current_config: - changed = True - - try: - # If the CallerReference is a value already sent in a previous identity request - # the returned value is that of the original request - result = self.client.update_cloud_front_origin_access_identity( - CloudFrontOriginAccessIdentityConfig=new_config, - Id=origin_access_identity_id, - IfMatch=e_tag, - ) - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.") - - return result, changed - - -class CloudFrontOriginAccessIdentityValidationManager(object): - """ - Manages CloudFront Origin Access Identities - """ - - def __init__(self, module): - self.module = module - self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) - - def describe_origin_access_identity(self, origin_access_identity_id, fail_if_missing=True): - try: - return self.__cloudfront_facts_mgr.get_origin_access_identity( - id=origin_access_identity_id, fail_if_error=False - ) - except is_boto3_error_code("NoSuchCloudFrontOriginAccessIdentity") as e: # pylint: disable=duplicate-except - if fail_if_missing: - self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") - return {} - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.") - - def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id, fail_if_missing): - oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing) - if oai is not None: - return oai.get("ETag") - - def validate_origin_access_identity_id_from_caller_reference(self, caller_reference): - origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities() - origin_origin_access_identity_ids = [oai.get("Id") for oai in origin_access_identities] - for origin_access_identity_id in origin_origin_access_identity_ids: - oai_config = self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id) - temp_caller_reference = oai_config.get("CloudFrontOriginAccessIdentityConfig").get("CallerReference") - if temp_caller_reference == caller_reference: - return origin_access_identity_id - - def validate_comment(self, comment): - if comment is None: - return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime( - "%Y-%m-%dT%H:%M:%S.%f" - ) - return comment - - def validate_caller_reference_from_origin_access_identity_id(self, origin_access_identity_id, caller_reference): - if caller_reference is None: - if origin_access_identity_id is None: - return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f") - oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing=True) - origin_access_config = oai.get("CloudFrontOriginAccessIdentity", {}).get( - "CloudFrontOriginAccessIdentityConfig", {} - ) - return origin_access_config.get("CallerReference") - return caller_reference - - -def main(): - argument_spec = dict( - state=dict(choices=["present", "absent"], default="present"), - origin_access_identity_id=dict(), - caller_reference=dict(), - comment=dict(), - ) - - result = {} - e_tag = None - changed = False - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) - service_mgr = CloudFrontOriginAccessIdentityServiceManager(module) - validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module) - - state = module.params.get("state") - caller_reference = module.params.get("caller_reference") - - comment = module.params.get("comment") - origin_access_identity_id = module.params.get("origin_access_identity_id") - - if origin_access_identity_id is None and caller_reference is not None: - origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference( - caller_reference - ) - - if state == "present": - comment = validation_mgr.validate_comment(comment) - caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id( - origin_access_identity_id, caller_reference - ) - if origin_access_identity_id is not None: - e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, True) - # update cloudfront origin access identity - result, changed = service_mgr.update_origin_access_identity( - caller_reference, comment, origin_access_identity_id, e_tag - ) - else: - # create cloudfront origin access identity - result = service_mgr.create_origin_access_identity(caller_reference, comment) - changed = True - else: - e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, False) - if e_tag: - result, changed = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) - - result.pop("ResponseMetadata", None) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) - - -if __name__ == "__main__": - main() diff --git a/cloudfront_response_headers_policy.py b/cloudfront_response_headers_policy.py deleted file mode 100644 index a7558e8a86d..00000000000 --- a/cloudfront_response_headers_policy.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -version_added: 3.2.0 -module: cloudfront_response_headers_policy - -short_description: Create, update and delete response headers policies to be used in a Cloudfront distribution - -description: - - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers - - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy) - -author: - - Stefan Horning (@stefanhorning) - -options: - state: - description: Decides if the named policy should be absent or present - choices: - - present - - absent - default: present - type: str - name: - description: Name of the policy - required: true - type: str - comment: - description: Description of the policy - required: false - type: str - cors_config: - description: CORS header config block - required: false - default: {} - type: dict - security_headers_config: - description: Security headers config block. For headers suchs as XSS-Protection, Content-Security-Policy or Strict-Transport-Security - required: false - default: {} - type: dict - custom_headers_config: - description: Custom headers config block. Define your own list of headers and values as a list - required: false - default: {} - type: dict - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Creationg a Cloudfront header policy using all predefined header features and a custom header for demonstration - community.aws.cloudfront_response_headers_policy: - name: my-header-policy - comment: My header policy for all the headers - cors_config: - access_control_allow_origins: - items: - - 'https://foo.com/bar' - - 'https://bar.com/foo' - access_control_allow_headers: - items: - - 'X-Session-Id' - access_control_allow_methods: - items: - - GET - - OPTIONS - - HEAD - access_control_allow_credentials: true - access_control_expose_headers: - items: - - 'X-Session-Id' - access_control_max_age_sec: 1800 - origin_override: true - security_headers_config: - xss_protection: - protection: true - report_uri: 'https://my.report-uri.com/foo/bar' - override: true - frame_options: - frame_option: 'SAMEORIGIN' - override: true - referrer_policy: - referrer_policy: 'same-origin' - override: true - content_security_policy: - content_security_policy: "frame-ancestors 'none'; report-uri https://my.report-uri.com/r/d/csp/enforce;" - override: true - content_type_options: - override: true - strict_transport_security: - include_subdomains: true - preload: true - access_control_max_age_sec: 63072000 - override: true - custom_headers_config: - items: - - { header: 'X-Test-Header', value: 'Foo', override: true } - state: present - -- name: Delete header policy - community.aws.cloudfront_response_headers_policy: - name: my-header-policy - state: absent -""" - -RETURN = r""" -response_headers_policy: - description: The policy's information - returned: success - type: complex - contains: - id: - description: ID of the policy - returned: always - type: str - sample: '10a45b52-630e-4b7c-77c6-205f06df0462' - last_modified_time: - description: Timestamp of last modification of policy - returned: always - type: str - sample: '2022-02-04T13:23:27.304000+00:00' - response_headers_policy_config: - description: The response headers config dict containing all the headers configured - returned: always - type: complex - contains: - name: - description: Name of the policy - type: str - returned: always - sample: my-header-policy -""" - -import datetime - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class CloudfrontResponseHeadersPolicyService(object): - def __init__(self, module): - self.module = module - self.client = module.client("cloudfront") - self.check_mode = module.check_mode - - def find_response_headers_policy(self, name): - try: - policies = self.client.list_response_headers_policies()["ResponseHeadersPolicyList"]["Items"] - - for policy in policies: - if policy["ResponseHeadersPolicy"]["ResponseHeadersPolicyConfig"]["Name"] == name: - policy_id = policy["ResponseHeadersPolicy"]["Id"] - # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here - matching_policy = self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"]) - break - else: - matching_policy = None - - return matching_policy - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error fetching policy information") - - def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config): - cors_config = snake_dict_to_camel_dict(cors_config, capitalize_first=True) - security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True) - - # Little helper for turning xss_protection into XSSProtection and not into XssProtection - if "XssProtection" in security_headers_config: - security_headers_config["XSSProtection"] = security_headers_config.pop("XssProtection") - - custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True) - - config = { - "Name": name, - "Comment": comment, - "CorsConfig": self.insert_quantities(cors_config), - "SecurityHeadersConfig": security_headers_config, - "CustomHeadersConfig": self.insert_quantities(custom_headers_config), - } - - config = {k: v for k, v in config.items() if v} - - matching_policy = self.find_response_headers_policy(name) - - changed = False - - if self.check_mode: - self.module.exit_json(changed=True, response_headers_policy=camel_dict_to_snake_dict(config)) - - if matching_policy is None: - try: - result = self.client.create_response_headers_policy(ResponseHeadersPolicyConfig=config) - changed = True - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error creating policy") - else: - policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] - etag = matching_policy["ETag"] - try: - result = self.client.update_response_headers_policy( - Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config - ) - - changed_time = result["ResponseHeadersPolicy"]["LastModifiedTime"] - seconds = 3 # threshhold for returned timestamp age - seconds_ago = datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds) - - # consider change made by this execution of the module if returned timestamp was very recent - if changed_time > seconds_ago: - changed = True - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Updating creating policy") - - self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) - - def delete_response_header_policy(self, name): - matching_policy = self.find_response_headers_policy(name) - - if matching_policy is None: - self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting") - else: - policy_id = matching_policy["ResponseHeadersPolicy"]["Id"] - etag = matching_policy["ETag"] - if self.check_mode: - result = {} - else: - try: - result = self.client.delete_response_headers_policy(Id=policy_id, IfMatch=etag) - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error deleting policy") - - self.module.exit_json(changed=True, **camel_dict_to_snake_dict(result)) - - # Inserts a Quantity field into dicts with a list ('Items') - @staticmethod - def insert_quantities(dict_with_items): - # Items on top level case - if "Items" in dict_with_items and isinstance(dict_with_items["Items"], list): - dict_with_items["Quantity"] = len(dict_with_items["Items"]) - - # Items on second level case - for k, v in dict_with_items.items(): - if isinstance(v, dict) and "Items" in v: - v["Quantity"] = len(v["Items"]) - - return dict_with_items - - -def main(): - argument_spec = dict( - name=dict(required=True, type="str"), - comment=dict(type="str"), - cors_config=dict(type="dict", default=dict()), - security_headers_config=dict(type="dict", default=dict()), - custom_headers_config=dict(type="dict", default=dict()), - state=dict(choices=["present", "absent"], type="str", default="present"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - name = module.params.get("name") - comment = module.params.get("comment", "") - cors_config = module.params.get("cors_config") - security_headers_config = module.params.get("security_headers_config") - custom_headers_config = module.params.get("custom_headers_config") - state = module.params.get("state") - - service = CloudfrontResponseHeadersPolicyService(module) - - if state == "absent": - service.delete_response_header_policy(name) - else: - service.create_response_header_policy( - name, comment, cors_config, security_headers_config, custom_headers_config - ) - - -if __name__ == "__main__": - main() diff --git a/codebuild_project.py b/codebuild_project.py deleted file mode 100644 index 69fd2e463b5..00000000000 --- a/codebuild_project.py +++ /dev/null @@ -1,491 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: codebuild_project -version_added: 1.0.0 -short_description: Create or delete an AWS CodeBuild project -notes: - - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html). - - I(tags) changed from boto3 format to standard dict format in release 6.0.0. -description: - - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code. - - Prior to release 5.0.0 this module was called C(community.aws.aws_codebuild). - The usage did not change. -author: - - Stefan Horning (@stefanhorning) -options: - name: - description: - - Name of the CodeBuild project. - required: true - type: str - description: - description: - - Descriptive text of the CodeBuild project. - type: str - source: - description: - - Configure service and location for the build input source. - - I(source) is required when creating a new project. - suboptions: - type: - description: - - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)." - required: true - type: str - location: - description: - - Information about the location of the source code to be built. For I(type) C(CODEPIPELINE) location should not be specified. - type: str - git_clone_depth: - description: - - When using git you can specify the clone depth as an integer here. - type: int - buildspec: - description: - - The build spec declaration to use for the builds in this build project. Leave empty if part of the CodeBuild project. - type: str - insecure_ssl: - description: - - Enable this flag to ignore SSL warnings while connecting to the project source code. - type: bool - type: dict - artifacts: - description: - - Information about the build output artifacts for the build project. - - I(artifacts) is required when creating a new project. - suboptions: - type: - description: - - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)." - required: true - location: - description: - - Information about the build output artifact location. When choosing I(type) C(S3), set the bucket name here. - path: - description: - - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts. - - Used for path in S3 bucket when type is C(S3). - namespace_type: - description: - - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts. - - Accepts C(BUILD_ID) and C(NONE). - - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)." - name: - description: - - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact. - packaging: - description: - - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file. - type: dict - cache: - description: - - Caching params to speed up following builds. - suboptions: - type: - description: - - Cache type. Can be C(NO_CACHE) or C(S3). - required: true - location: - description: - - Caching location on S3. - required: true - type: dict - environment: - description: - - Information about the build environment for the build project. - suboptions: - type: - description: - - The type of build environment to use for the project. Usually C(LINUX_CONTAINER). - required: true - image: - description: - - The ID of the Docker image to use for this build project. - required: true - compute_type: - description: - - Information about the compute resources the build project will use. - - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)." - required: true - environment_variables: - description: - - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields. - - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }" - privileged_mode: - description: - - Enables running the Docker daemon inside a Docker container. - - Set to C(true) only if the build project is be used to build Docker images. - type: dict - service_role: - description: - - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account. - type: str - timeout_in_minutes: - description: - - How long CodeBuild should wait until timing out any build that has not been marked as completed. - default: 60 - type: int - encryption_key: - description: - - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts. - type: str - vpc_config: - description: - - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC. - type: dict - state: - description: - - Create or remove CodeBuild project. - default: 'present' - choices: ['present', 'absent'] - type: str - -extends_documentation_fragment: - - amazon.aws.boto3.modules - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags.modules -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- community.aws.codebuild_project: - name: my_project - description: My nice little project - service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role" - source: - # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3 - type: CODEPIPELINE - buildspec: '' - artifacts: - namespaceType: NONE - packaging: NONE - type: CODEPIPELINE - name: my_project - environment: - computeType: BUILD_GENERAL1_SMALL - privilegedMode: "true" - image: "aws/codebuild/docker:17.09.0" - type: LINUX_CONTAINER - environmentVariables: - - { name: 'PROFILE', value: 'staging' } - encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3" - region: us-east-1 - state: present -""" - -RETURN = r""" -project: - description: Returns the dictionary describing the code project configuration. - returned: success - type: complex - contains: - name: - description: Name of the CodeBuild project. - returned: always - type: str - sample: my_project - arn: - description: ARN of the CodeBuild project. - returned: always - type: str - sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder - description: - description: A description of the CodeBuild project. - returned: always - type: str - sample: My nice little project - source: - description: Information about the build input source code. - returned: always - type: complex - contains: - type: - description: The type of the repository. - returned: always - type: str - sample: CODEPIPELINE - location: - description: Location identifier, depending on the source type. - returned: when configured - type: str - git_clone_depth: - description: The git clone depth. - returned: when configured - type: int - build_spec: - description: The build spec declaration to use for the builds in this build project. - returned: always - type: str - auth: - description: Information about the authorization settings for AWS CodeBuild to access the source code to be built. - returned: when configured - type: complex - insecure_ssl: - description: True if set to ignore SSL warnings. - returned: when configured - type: bool - artifacts: - description: Information about the output of build artifacts - returned: always - type: complex - contains: - type: - description: The type of build artifact. - returned: always - type: str - sample: CODEPIPELINE - location: - description: Output location for build artifacts. - returned: when configured - type: str - # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project - cache: - description: Cache settings for the build project. - returned: when configured - type: dict - environment: - description: Environment settings for the build. - returned: always - type: dict - service_role: - description: IAM role to be used during build to access other AWS services. - returned: always - type: str - sample: arn:aws:iam::123123123:role/codebuild-service-role - timeout_in_minutes: - description: The timeout of a build in minutes. - returned: always - type: int - sample: 60 - tags: - description: - - Tags added to the project in the boto3 list of dictionaries format. - - I(tags) and I(reource_tags) represent the same information in - different formats. - returned: when configured - type: list - reource_tags: - description: - - A simple dictionary representing the tags added to the project. - - I(tags) and I(reource_tags) represent the same information in - different formats. - returned: when configured - type: dict - version_added: 4.0.0 - created: - description: Timestamp of the create time of the project. - returned: always - type: str - sample: "2018-04-17T16:56:03.245000+02:00" -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class CodeBuildAnsibleAWSError(AnsibleAWSError): - pass - - -def do_create_project(client, params, formatted_params): - if params["source"] is None or params["artifacts"] is None: - raise CodeBuildAnsibleAWSError( - message="The source and artifacts parameters must be provided when creating a new project. No existing project was found." - ) - - if params["tags"] is not None: - formatted_params["tags"] = ansible_dict_to_boto3_tag_list( - params["tags"], tag_name_key_name="key", tag_value_key_name="value" - ) - - permitted_create_params = get_boto3_client_method_parameters(client, "create_project") - formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) - - # Or create new project: - try: - resp = client.create_project(**formatted_create_params) - changed = True - return resp, changed - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise CodeBuildAnsibleAWSError( - message="Unable to create CodeBuild project", - exception=e, - ) - - -def merge_tags(found_tags, tags, purge_tags): - if purge_tags: - return tags - - merged_tags = boto3_tag_list_to_ansible_dict(found_tags) - merged_tags.update(tags) - return merged_tags - - -def format_tags(tags): - return ansible_dict_to_boto3_tag_list( - tags, - tag_name_key_name="key", - tag_value_key_name="value", - ) - - -def do_update_project(client, params, formatted_params, found_project): - permitted_update_params = get_boto3_client_method_parameters(client, "update_project") - formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) - - found_tags = found_project.pop("tags", []) - if params["tags"] is not None: - formatted_update_params["tags"] = format_tags( - merge_tags(found_tags, params["tags"], params["purge_tags"]), - ) - - resp = update_project(client=client, params=formatted_update_params) - updated_project = resp["project"] - - # Prep both dicts for sensible change comparison: - found_project.pop("lastModified") - updated_project.pop("lastModified") - updated_tags = updated_project.pop("tags", []) - found_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(found_tags) - updated_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(updated_tags) - - changed = updated_project != found_project - - updated_project["tags"] = updated_tags - return resp, changed - - -def create_or_update_project(client, params): - resp = {} - name = params["name"] - # clean up params - formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) - - # Check if project with that name already exists and if so update existing: - found = describe_project(client=client, name=name) - changed = False - - if "name" not in found: - return do_create_project(client, params, formatted_params) - - return do_update_project(client, params, formatted_params, found) - - -def update_project(client, params): - name = params["name"] - - try: - resp = client.update_project(**params) - return resp - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise CodeBuildAnsibleAWSError( - message="Unable to update CodeBuild project", - exception=e, - ) - - -def delete_project(client, name): - found = describe_project(client=client, name=name) - if "name" not in found: - return {}, False - - try: - resp = client.delete_project(name=name) - return resp, True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise CodeBuildAnsibleAWSError( - message="Unable to update CodeBuild project", - exception=e, - ) - - -def describe_project(client, name): - project = {} - try: - projects = client.batch_get_projects(names=[name])["projects"] - if len(projects) > 0: - project = projects[0] - return project - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise CodeBuildAnsibleAWSError( - message="Unable to describe CodeBuild projects", - exception=e, - ) - - -def format_project_result(project_result): - formated_result = camel_dict_to_snake_dict(project_result) - project = project_result.get("project", {}) - if project: - tags = project.get("tags", []) - formated_result["project"]["resource_tags"] = boto3_tag_list_to_ansible_dict(tags) - formated_result["ORIGINAL"] = project_result - return formated_result - - -def main(): - argument_spec = dict( - name=dict(required=True), - description=dict(), - source=dict(type="dict"), - artifacts=dict(type="dict"), - cache=dict(type="dict"), - environment=dict(type="dict"), - service_role=dict(), - timeout_in_minutes=dict(type="int", default=60), - encryption_key=dict(no_log=False), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - vpc_config=dict(type="dict"), - state=dict(choices=["present", "absent"], default="present"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client("codebuild") - - state = module.params.get("state") - changed = False - - try: - if state == "present": - project_result, changed = create_or_update_project( - client=client_conn, - params=module.params, - ) - elif state == "absent": - project_result, changed = delete_project( - client=client_conn, - name=module.params["name"], - ) - except CodeBuildAnsibleAWSError as e: - if e.exception: - module.fail_json_aws(e.exception, msg=e.message) - module.fail_json(msg=e.message) - - formatted_result = format_project_result(project_result) - module.exit_json(changed=changed, **formatted_result) - - -if __name__ == "__main__": - main() diff --git a/codecommit_repository.py b/codecommit_repository.py deleted file mode 100644 index 14b08bd88a9..00000000000 --- a/codecommit_repository.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Shuang Wang -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: codecommit_repository -version_added: 1.0.0 -short_description: Manage repositories in AWS CodeCommit -description: - - Supports creation and deletion of CodeCommit repositories. - - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit. - - Prior to release 5.0.0 this module was called C(community.aws.aws_codecommit). - The usage did not change. -author: - - Shuang Wang (@ptux) -options: - name: - description: - - Name of repository. - required: true - type: str - description: - description: - - Description or comment of repository. - required: false - aliases: - - comment - type: str - default: '' - state: - description: - - Specifies the state of repository. - required: true - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -repository_metadata: - description: "Information about the repository." - returned: always - type: complex - contains: - account_id: - description: "The ID of the AWS account associated with the repository." - returned: when state is present - type: str - sample: "123456789012" - arn: - description: "The Amazon Resource Name (ARN) of the repository." - returned: when state is present - type: str - sample: "arn:aws:codecommit:ap-northeast-1:123456789012:username" - clone_url_http: - description: "The URL to use for cloning the repository over HTTPS." - returned: when state is present - type: str - sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame" - clone_url_ssh: - description: "The URL to use for cloning the repository over SSH." - returned: when state is present - type: str - sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame" - creation_date: - description: "The date and time the repository was created, in timestamp format." - returned: when state is present - type: str - sample: "2018-10-16T13:21:41.261000+09:00" - last_modified_date: - description: "The date and time the repository was last modified, in timestamp format." - returned: when state is present - type: str - sample: "2018-10-16T13:21:41.261000+09:00" - repository_description: - description: "A comment or description about the repository." - returned: when state is present - type: str - sample: "test from ptux" - repository_id: - description: "The ID of the repository that was created or deleted" - returned: always - type: str - sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e" - repository_name: - description: "The repository's name." - returned: when state is present - type: str - sample: "reponame" - -response_metadata: - description: "Information about the response." - returned: always - type: complex - contains: - http_headers: - description: "http headers of http response" - returned: always - type: dict - http_status_code: - description: "http status code of http response" - returned: always - type: str - sample: "200" - request_id: - description: "http request id" - returned: always - type: str - sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef" - retry_attempts: - description: "numbers of retry attempts" - returned: always - type: str - sample: "0" -""" - -EXAMPLES = r""" -# Create a new repository -- community.aws.codecommit_repository: - name: repo - state: present - -# Delete a repository -- community.aws.codecommit_repository: - name: repo - state: absent -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class CodeCommit(object): - def __init__(self, module=None): - self._module = module - self._client = self._module.client("codecommit") - self._check_mode = self._module.check_mode - - def process(self): - result = dict(changed=False) - - if self._module.params["state"] == "present": - if not self._repository_exists(): - if not self._check_mode: - result = self._create_repository() - result["changed"] = True - else: - metadata = self._get_repository()["repositoryMetadata"] - if not metadata.get("repositoryDescription"): - metadata["repositoryDescription"] = "" - if metadata["repositoryDescription"] != self._module.params["description"]: - if not self._check_mode: - self._update_repository() - result["changed"] = True - result.update(self._get_repository()) - if self._module.params["state"] == "absent" and self._repository_exists(): - if not self._check_mode: - result = self._delete_repository() - result["changed"] = True - return result - - def _repository_exists(self): - try: - paginator = self._client.get_paginator("list_repositories") - for page in paginator.paginate(): - repositories = page["repositories"] - for item in repositories: - if self._module.params["name"] in item.values(): - return True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="couldn't get repository") - return False - - def _get_repository(self): - try: - result = self._client.get_repository( - repositoryName=self._module.params["name"], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="couldn't get repository") - return result - - def _update_repository(self): - try: - result = self._client.update_repository_description( - repositoryName=self._module.params["name"], - repositoryDescription=self._module.params["description"], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="couldn't create repository") - return result - - def _create_repository(self): - try: - result = self._client.create_repository( - repositoryName=self._module.params["name"], - repositoryDescription=self._module.params["description"], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="couldn't create repository") - return result - - def _delete_repository(self): - try: - result = self._client.delete_repository( - repositoryName=self._module.params["name"], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="couldn't delete repository") - return result - - -def main(): - argument_spec = dict( - name=dict(required=True), - state=dict(choices=["present", "absent"], required=True), - description=dict(default="", aliases=["comment"]), - ) - - ansible_aws_module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - aws_codecommit = CodeCommit(module=ansible_aws_module) - result = aws_codecommit.process() - ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result)) - - -if __name__ == "__main__": - main() diff --git a/codepipeline.py b/codepipeline.py deleted file mode 100644 index b1fe604768f..00000000000 --- a/codepipeline.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: codepipeline -version_added: 1.0.0 -short_description: Create or delete AWS CodePipelines -notes: - - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html). -description: - - Create or delete a CodePipeline on AWS. - - Prior to release 5.0.0 this module was called C(community.aws.aws_codepipeline). - The usage did not change. -author: - - Stefan Horning (@stefanhorning) -options: - name: - description: - - Name of the CodePipeline. - required: true - type: str - role_arn: - description: - - ARN of the IAM role to use when executing the CodePipeline. - required: true - type: str - artifact_store: - description: - - Location information where artifacts are stored (on S3). Dictionary with fields type and location. - required: true - suboptions: - type: - description: - - Type of the artifacts storage (only 'S3' is currently supported). - type: str - location: - description: - - Bucket name for artifacts. - type: str - type: dict - stages: - description: - - List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage. - required: true - suboptions: - name: - description: - - Name of the stage (step) in the CodePipeline. - type: str - actions: - description: - - List of action configurations for that stage. - - 'See the boto3 documentation for full documentation of suboptions:' - - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codepipeline.html#CodePipeline.Client.create_pipeline)' - type: list - elements: dict - elements: dict - type: list - version: - description: - - Version number of the CodePipeline. This number is automatically incremented when a CodePipeline is updated. - required: false - type: int - state: - description: - - Create or remove CodePipeline. - default: 'present' - choices: ['present', 'absent'] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container) -- community.aws.codepipeline: - name: my_deploy_pipeline - role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service - artifact_store: - type: S3 - location: my_s3_codepipline_bucket - stages: - - name: Get_source - actions: - - - name: Git_pull - actionTypeId: - category: Source - owner: ThirdParty - provider: GitHub - version: '1' - outputArtifacts: - - { name: my-app-source } - configuration: - Owner: mediapeers - Repo: my_gh_repo - PollForSourceChanges: 'true' - Branch: master - # Generate token like this: - # https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html - # GH Link: https://github.com/settings/tokens - OAuthToken: 'abc123def456' - runOrder: 1 - - name: Build - actions: - - - name: CodeBuild - actionTypeId: - category: Build - owner: AWS - provider: CodeBuild - version: '1' - inputArtifacts: - - { name: my-app-source } - outputArtifacts: - - { name: my-app-build } - configuration: - # A project with that name needs to be setup on AWS CodeBuild already (use code_build module). - ProjectName: codebuild-project-name - runOrder: 1 - - name: ECS_deploy - actions: - - - name: ECS_deploy - actionTypeId: - category: Deploy - owner: AWS - provider: ECS - version: '1' - inputArtifacts: - - { name: vod-api-app-build } - configuration: - # an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module) - ClusterName: ecs-cluster-name - ServiceName: ecs-cluster-service-name - FileName: imagedefinitions.json - region: us-east-1 - state: present -""" - -RETURN = r""" -pipeline: - description: Returns the dictionary describing the CodePipeline configuration. - returned: success - type: complex - contains: - name: - description: Name of the CodePipeline - returned: always - type: str - sample: my_deploy_pipeline - role_arn: - description: ARN of the IAM role attached to the CodePipeline - returned: always - type: str - sample: arn:aws:iam::123123123:role/codepipeline-service-role - artifact_store: - description: Information about where the build artifacts are stored - returned: always - type: complex - contains: - type: - description: The type of the artifacts store, such as S3 - returned: always - type: str - sample: S3 - location: - description: The location of the artifacts storage (s3 bucket name) - returned: always - type: str - sample: my_s3_codepipline_bucket - encryption_key: - description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key. - returned: when configured - type: str - stages: - description: List of stages configured for this CodePipeline - returned: always - type: list - version: - description: - - The version number of the CodePipeline. - - This number is auto incremented when CodePipeline params are changed. - returned: always - type: int -""" - -import copy - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create_pipeline(client, name, role_arn, artifact_store, stages, version, module): - pipeline_dict = {"name": name, "roleArn": role_arn, "artifactStore": artifact_store, "stages": stages} - if version: - pipeline_dict["version"] = version - try: - resp = client.create_pipeline(pipeline=pipeline_dict) - return resp - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable create pipeline {pipeline_dict['name']}") - - -def update_pipeline(client, pipeline_dict, module): - try: - resp = client.update_pipeline(pipeline=pipeline_dict) - return resp - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable update pipeline {pipeline_dict['name']}") - - -def delete_pipeline(client, name, module): - try: - resp = client.delete_pipeline(name=name) - return resp - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable delete pipeline {name}") - - -def describe_pipeline(client, name, version, module): - pipeline = {} - try: - if version is not None: - pipeline = client.get_pipeline(name=name, version=version) - return pipeline - else: - pipeline = client.get_pipeline(name=name) - return pipeline - except is_boto3_error_code("PipelineNotFoundException"): - return pipeline - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def main(): - argument_spec = dict( - name=dict(required=True, type="str"), - role_arn=dict(required=True, type="str"), - artifact_store=dict(required=True, type="dict"), - stages=dict(required=True, type="list", elements="dict"), - version=dict(type="int"), - state=dict(choices=["present", "absent"], default="present"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - client_conn = module.client("codepipeline") - - state = module.params.get("state") - changed = False - - # Determine if the CodePipeline exists - found_code_pipeline = describe_pipeline( - client=client_conn, name=module.params["name"], version=module.params["version"], module=module - ) - pipeline_result = {} - - if state == "present": - if "pipeline" in found_code_pipeline: - pipeline_dict = copy.deepcopy(found_code_pipeline["pipeline"]) - # Update dictionary with provided module params: - pipeline_dict["roleArn"] = module.params["role_arn"] - pipeline_dict["artifactStore"] = module.params["artifact_store"] - pipeline_dict["stages"] = module.params["stages"] - if module.params["version"] is not None: - pipeline_dict["version"] = module.params["version"] - - pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module) - - if compare_policies(found_code_pipeline["pipeline"], pipeline_result["pipeline"]): - changed = True - else: - pipeline_result = create_pipeline( - client=client_conn, - name=module.params["name"], - role_arn=module.params["role_arn"], - artifact_store=module.params["artifact_store"], - stages=module.params["stages"], - version=module.params["version"], - module=module, - ) - changed = True - elif state == "absent": - if found_code_pipeline: - pipeline_result = delete_pipeline(client=client_conn, name=module.params["name"], module=module) - changed = True - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result)) - - -if __name__ == "__main__": - main() diff --git a/config_aggregation_authorization.py b/config_aggregation_authorization.py deleted file mode 100644 index 903d5a5e1fe..00000000000 --- a/config_aggregation_authorization.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: config_aggregation_authorization -version_added: 1.0.0 -short_description: Manage cross-account AWS Config authorizations -description: - - Module manages AWS Config aggregation authorizations. - - Prior to release 5.0.0 this module was called C(community.aws.aws_config_aggregation_authorization). - The usage did not change. -author: - - "Aaron Smith (@slapula)" -options: - state: - description: - - Whether the Config rule should be present or absent. - default: present - choices: ['present', 'absent'] - type: str - authorized_account_id: - description: - - The 12-digit account ID of the account authorized to aggregate data. - type: str - required: true - authorized_aws_region: - description: - - The region authorized to collect aggregated data. - type: str - required: true -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Get current account ID - community.aws.aws_caller_info: - register: whoami -- community.aws.config_aggregation_authorization: - state: present - authorized_account_id: '{{ whoami.account }}' - authorized_aws_region: us-east-1 -""" - -RETURN = r"""#""" - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def resource_exists(client, module, params): - try: - current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] - authorization_exists = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), - None, - ) - if authorization_exists: - return True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): - return False - - -def create_resource(client, module, params, result): - try: - response = client.put_aggregation_authorization( - AuthorizedAccountId=params["AuthorizedAccountId"], - AuthorizedAwsRegion=params["AuthorizedAwsRegion"], - ) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") - - -def update_resource(client, module, params, result): - current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"] - current_params = next( - (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]), - None, - ) - - del current_params["AggregationAuthorizationArn"] - del current_params["CreationTime"] - - if params != current_params: - try: - response = client.put_aggregation_authorization( - AuthorizedAccountId=params["AuthorizedAccountId"], - AuthorizedAwsRegion=params["AuthorizedAwsRegion"], - ) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") - - -def delete_resource(client, module, params, result): - try: - response = client.delete_aggregation_authorization( - AuthorizedAccountId=params["AuthorizedAccountId"], - AuthorizedAwsRegion=params["AuthorizedAwsRegion"], - ) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization") - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "state": dict(type="str", choices=["present", "absent"], default="present"), - "authorized_account_id": dict(type="str", required=True), - "authorized_aws_region": dict(type="str", required=True), - }, - supports_check_mode=False, - ) - - result = {"changed": False} - - params = { - "AuthorizedAccountId": module.params.get("authorized_account_id"), - "AuthorizedAwsRegion": module.params.get("authorized_aws_region"), - } - - client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) - resource_status = resource_exists(client, module, params) - - if module.params.get("state") == "present": - if not resource_status: - create_resource(client, module, params, result) - else: - update_resource(client, module, params, result) - - if module.params.get("state") == "absent": - if resource_status: - delete_resource(client, module, params, result) - - module.exit_json(changed=result["changed"]) - - -if __name__ == "__main__": - main() diff --git a/config_aggregator.py b/config_aggregator.py deleted file mode 100644 index 58866159028..00000000000 --- a/config_aggregator.py +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: config_aggregator -version_added: 1.0.0 -short_description: Manage AWS Config aggregations across multiple accounts -description: - - Module manages AWS Config aggregator resources. - - Prior to release 5.0.0 this module was called C(community.aws.aws_config_aggregator). - The usage did not change. -author: - - "Aaron Smith (@slapula)" -options: - name: - description: - - The name of the AWS Config resource. - required: true - type: str - state: - description: - - Whether the Config rule should be present or absent. - default: present - choices: ['present', 'absent'] - type: str - account_sources: - description: - - Provides a list of source accounts and regions to be aggregated. - suboptions: - account_ids: - description: - - A list of 12-digit account IDs of accounts being aggregated. - type: list - elements: str - aws_regions: - description: - - A list of source regions being aggregated. - type: list - elements: str - all_aws_regions: - description: - - If true, aggregate existing AWS Config regions and future regions. - type: bool - type: list - elements: dict - required: true - organization_source: - description: - - The region authorized to collect aggregated data. - suboptions: - role_arn: - description: - - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. - type: str - aws_regions: - description: - - The source regions being aggregated. - type: list - elements: str - all_aws_regions: - description: - - If true, aggregate existing AWS Config regions and future regions. - type: bool - type: dict - required: true -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create cross-account aggregator - community.aws.config_aggregator: - name: test_config_rule - state: present - account_sources: - account_ids: - - 1234567890 - - 0123456789 - - 9012345678 - all_aws_regions: true -""" - -RETURN = r"""#""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def resource_exists(client, module, params): - try: - aggregator = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] - ) - return aggregator["ConfigurationAggregators"][0] - except is_boto3_error_code("NoSuchConfigurationAggregatorException"): - return - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def create_resource(client, module, params, result): - try: - client.put_configuration_aggregator( - ConfigurationAggregatorName=params["ConfigurationAggregatorName"], - AccountAggregationSources=params["AccountAggregationSources"], - OrganizationAggregationSource=params["OrganizationAggregationSource"], - ) - result["changed"] = True - result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") - - -def update_resource(client, module, params, result): - result["changed"] = False - - current_params = client.describe_configuration_aggregators( - ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]] - )["ConfigurationAggregators"][0] - - if params["AccountAggregationSources"] != current_params.get("AccountAggregationSources", []): - result["changed"] = True - - if params["OrganizationAggregationSource"] != current_params.get("OrganizationAggregationSource", {}): - result["changed"] = True - - if result["changed"]: - try: - client.put_configuration_aggregator( - ConfigurationAggregatorName=params["ConfigurationAggregatorName"], - AccountAggregationSources=params["AccountAggregationSources"], - OrganizationAggregationSource=params["OrganizationAggregationSource"], - ) - result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") - - -def delete_resource(client, module, params, result): - try: - client.delete_configuration_aggregator(ConfigurationAggregatorName=params["ConfigurationAggregatorName"]) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator") - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "name": dict(type="str", required=True), - "state": dict(type="str", choices=["present", "absent"], default="present"), - "account_sources": dict(type="list", required=True, elements="dict"), - "organization_source": dict(type="dict", required=True), - }, - supports_check_mode=False, - ) - - result = {"changed": False} - - name = module.params.get("name") - state = module.params.get("state") - - params = {} - if name: - params["ConfigurationAggregatorName"] = name - params["AccountAggregationSources"] = [] - if module.params.get("account_sources"): - for i in module.params.get("account_sources"): - tmp_dict = {} - if i.get("account_ids"): - tmp_dict["AccountIds"] = i.get("account_ids") - if i.get("aws_regions"): - tmp_dict["AwsRegions"] = i.get("aws_regions") - if i.get("all_aws_regions") is not None: - tmp_dict["AllAwsRegions"] = i.get("all_aws_regions") - params["AccountAggregationSources"].append(tmp_dict) - if module.params.get("organization_source"): - params["OrganizationAggregationSource"] = {} - if module.params.get("organization_source").get("role_arn"): - params["OrganizationAggregationSource"].update( - {"RoleArn": module.params.get("organization_source").get("role_arn")} - ) - if module.params.get("organization_source").get("aws_regions"): - params["OrganizationAggregationSource"].update( - {"AwsRegions": module.params.get("organization_source").get("aws_regions")} - ) - if module.params.get("organization_source").get("all_aws_regions") is not None: - params["OrganizationAggregationSource"].update( - {"AllAwsRegions": module.params.get("organization_source").get("all_aws_regions")} - ) - - client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) - - resource_status = resource_exists(client, module, params) - - if state == "present": - if not resource_status: - create_resource(client, module, params, result) - else: - update_resource(client, module, params, result) - - if state == "absent": - if resource_status: - delete_resource(client, module, params, result) - - module.exit_json(changed=result["changed"]) - - -if __name__ == "__main__": - main() diff --git a/config_delivery_channel.py b/config_delivery_channel.py deleted file mode 100644 index c54fb36c05c..00000000000 --- a/config_delivery_channel.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: config_delivery_channel -version_added: 1.0.0 -short_description: Manage AWS Config delivery channels -description: - - This module manages AWS Config delivery locations for rule checks and configuration info. - - Prior to release 5.0.0 this module was called C(community.aws.aws_config_delivery_channel). - The usage did not change. -author: - - "Aaron Smith (@slapula)" -options: - name: - description: - - The name of the AWS Config resource. - required: true - type: str - state: - description: - - Whether the Config rule should be present or absent. - default: present - choices: ['present', 'absent'] - type: str - s3_bucket: - description: - - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files. - type: str - required: true - s3_prefix: - description: - - The prefix for the specified Amazon S3 bucket. - type: str - kms_key_arn: - description: - - The ARN of a KMS key used to encrypt objects delivered by Config. The key must belong to the same region as the destination S3 bucket. - type: str - sns_topic_arn: - description: - - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. - type: str - delivery_frequency: - description: - - The frequency with which AWS Config delivers configuration snapshots. - choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create a delivery channel for AWS Config - community.aws.config_delivery_channel: - name: test_delivery_channel - state: present - s3_bucket: 'test_aws_config_bucket' - sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' - delivery_frequency: 'Twelve_Hours' - -- name: Create a delivery channel with encrypted objects - community.aws.config_delivery_channel: - name: test_delivery_channel - state: present - s3_bucket: 'test_aws_config_bucket' - kms_key_arn: 'arn:aws:kms:us-east-1:123456789012:key/160f41cb-e660-4fa0-8bf6-976f53bf7851' - sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' - delivery_frequency: 'Twelve_Hours' -""" - -RETURN = r"""#""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# this waits for an IAM role to become fully available, at the cost of -# taking a long time to fail when the IAM role/policy really is invalid -retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff( - catch_extra_error_codes=["InsufficientDeliveryPolicyException"], -) - - -def resource_exists(client, module, params): - try: - channel = client.describe_delivery_channels( - DeliveryChannelNames=[params["name"]], - aws_retry=True, - ) - return channel["DeliveryChannels"][0] - except is_boto3_error_code("NoSuchDeliveryChannelException"): - return - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def create_resource(client, module, params, result): - try: - retry_unavailable_iam_on_put_delivery( - client.put_delivery_channel, - )( - DeliveryChannel=params, - ) - result["changed"] = True - result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) - return result - except is_boto3_error_code("InvalidS3KeyPrefixException") as e: - module.fail_json_aws( - e, - msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix", - ) - except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", - ) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg="Couldn't create AWS Config delivery channel", - ) - - -def update_resource(client, module, params, result): - current_params = client.describe_delivery_channels( - DeliveryChannelNames=[params["name"]], - aws_retry=True, - ) - - if params != current_params["DeliveryChannels"][0]: - try: - retry_unavailable_iam_on_put_delivery( - client.put_delivery_channel, - )( - DeliveryChannel=params, - ) - result["changed"] = True - result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) - return result - except is_boto3_error_code("InvalidS3KeyPrefixException") as e: - module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") - except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", - ) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") - - -def delete_resource(client, module, params, result): - try: - response = client.delete_delivery_channel(DeliveryChannelName=params["name"]) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel") - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "name": dict(type="str", required=True), - "state": dict(type="str", choices=["present", "absent"], default="present"), - "s3_bucket": dict(type="str", required=True), - "s3_prefix": dict(type="str"), - "kms_key_arn": dict(type="str", no_log=True), - "sns_topic_arn": dict(type="str"), - "delivery_frequency": dict( - type="str", - choices=[ - "One_Hour", - "Three_Hours", - "Six_Hours", - "Twelve_Hours", - "TwentyFour_Hours", - ], - ), - }, - supports_check_mode=False, - ) - - result = {"changed": False} - - name = module.params.get("name") - state = module.params.get("state") - - params = {} - if name: - params["name"] = name - if module.params.get("s3_bucket"): - params["s3BucketName"] = module.params.get("s3_bucket") - if module.params.get("s3_prefix"): - params["s3KeyPrefix"] = module.params.get("s3_prefix") - if module.params.get("kms_key_arn"): - params["s3KmsKeyArn"] = module.params.get("kms_key_arn") - if module.params.get("sns_topic_arn"): - params["snsTopicARN"] = module.params.get("sns_topic_arn") - if module.params.get("delivery_frequency"): - params["configSnapshotDeliveryProperties"] = {"deliveryFrequency": module.params.get("delivery_frequency")} - - client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) - - resource_status = resource_exists(client, module, params) - - if state == "present": - if not resource_status: - create_resource(client, module, params, result) - if resource_status: - update_resource(client, module, params, result) - - if state == "absent": - if resource_status: - delete_resource(client, module, params, result) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/config_recorder.py b/config_recorder.py deleted file mode 100644 index 2672664a5fe..00000000000 --- a/config_recorder.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: config_recorder -version_added: 1.0.0 -short_description: Manage AWS Config Recorders -description: - - Module manages AWS Config configuration recorder settings. - - Prior to release 5.0.0 this module was called C(community.aws.aws_config_recorder). - The usage did not change. -author: - - "Aaron Smith (@slapula)" -options: - name: - description: - - The name of the AWS Config resource. - required: true - type: str - state: - description: - - Whether the Config rule should be present or absent. - default: present - choices: ['present', 'absent'] - type: str - role_arn: - description: - - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account. - - Required when I(state=present). - type: str - recording_group: - description: - - Specifies the types of AWS resources for which AWS Config records configuration changes. - - Required when I(state=present) - suboptions: - all_supported: - description: - - Specifies whether AWS Config records configuration changes for every supported type of regional resource. - - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts - recording resources of that type automatically. - - If I(all_supported=true), you cannot enumerate a list of I(resource_types). - include_global_types: - description: - - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources) - with the resources that it records. - - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, - you should consider customizing AWS Config in only one region to record global resources. - - If you set I(include_global_types=true), you must also set I(all_supported=true). - - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording - resources of that type automatically. - resource_types: - description: - - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, - C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)). - - Before you can set this option, you must set I(all_supported=false). - type: dict -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create Configuration Recorder for AWS Config - community.aws.config_recorder: - name: test_configuration_recorder - state: present - role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' - recording_group: - all_supported: true - include_global_types: true -""" - -RETURN = r"""#""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def resource_exists(client, module, params): - try: - recorder = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) - return recorder["ConfigurationRecorders"][0] - except is_boto3_error_code("NoSuchConfigurationRecorderException"): - return - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def create_resource(client, module, params, result): - try: - response = client.put_configuration_recorder(ConfigurationRecorder=params) - result["changed"] = True - result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder") - - -def update_resource(client, module, params, result): - current_params = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]]) - - if params != current_params["ConfigurationRecorders"][0]: - try: - response = client.put_configuration_recorder(ConfigurationRecorder=params) - result["changed"] = True - result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder") - - -def delete_resource(client, module, params, result): - try: - response = client.delete_configuration_recorder(ConfigurationRecorderName=params["name"]) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder") - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "name": dict(type="str", required=True), - "state": dict(type="str", choices=["present", "absent"], default="present"), - "role_arn": dict(type="str"), - "recording_group": dict(type="dict"), - }, - supports_check_mode=False, - required_if=[ - ("state", "present", ["role_arn", "recording_group"]), - ], - ) - - result = {"changed": False} - - name = module.params.get("name") - state = module.params.get("state") - - params = {} - if name: - params["name"] = name - if module.params.get("role_arn"): - params["roleARN"] = module.params.get("role_arn") - if module.params.get("recording_group"): - params["recordingGroup"] = {} - if module.params.get("recording_group").get("all_supported") is not None: - params["recordingGroup"].update({"allSupported": module.params.get("recording_group").get("all_supported")}) - if module.params.get("recording_group").get("include_global_types") is not None: - params["recordingGroup"].update( - {"includeGlobalResourceTypes": module.params.get("recording_group").get("include_global_types")} - ) - if module.params.get("recording_group").get("resource_types"): - params["recordingGroup"].update( - {"resourceTypes": module.params.get("recording_group").get("resource_types")} - ) - else: - params["recordingGroup"].update({"resourceTypes": []}) - - client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) - - resource_status = resource_exists(client, module, params) - - if state == "present": - if not resource_status: - create_resource(client, module, params, result) - if resource_status: - update_resource(client, module, params, result) - - if state == "absent": - if resource_status: - delete_resource(client, module, params, result) - - module.exit_json(changed=result["changed"]) - - -if __name__ == "__main__": - main() diff --git a/config_rule.py b/config_rule.py deleted file mode 100644 index 3b49c17465e..00000000000 --- a/config_rule.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: config_rule -version_added: 1.0.0 -short_description: Manage AWS Config rule resources -description: - - Module manages AWS Config rules. - - Prior to release 5.0.0 this module was called C(community.aws.aws_config_rule). - The usage did not change. -author: - - "Aaron Smith (@slapula)" -options: - name: - description: - - The name of the AWS Config resource. - required: true - type: str - state: - description: - - Whether the Config rule should be present or absent. - default: present - choices: ['present', 'absent'] - type: str - description: - description: - - The description that you provide for the AWS Config rule. - type: str - scope: - description: - - Defines which resources can trigger an evaluation for the rule. - suboptions: - compliance_types: - description: - - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. - You can only specify one type if you also specify a resource ID for I(compliance_id). - compliance_id: - description: - - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, - you must specify one resource type for I(compliance_types). - tag_key: - description: - - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule. - tag_value: - description: - - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. - If you specify a value for I(tag_value), you must also specify a value for I(tag_key). - type: dict - source: - description: - - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to - evaluate your AWS resources. - suboptions: - owner: - description: - - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. - You can only specify one type if you also specify a resource ID for I(compliance_id). - identifier: - description: - - The ID of the only AWS resource that you want to trigger an evaluation for the rule. - If you specify a resource ID, you must specify one resource type for I(compliance_types). - details: - description: - - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. - - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs. - - Key C(EventSource) The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. - - Key C(MessageType) The type of notification that triggers AWS Config to run an evaluation for a rule. - - Key C(MaximumExecutionFrequency) The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. - type: dict - required: true - input_parameters: - description: - - A string, in JSON format, that is passed to the AWS Config rule Lambda function. - type: str - execution_frequency: - description: - - The maximum frequency with which AWS Config runs evaluations for a rule. - choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create Config Rule for AWS Config - community.aws.config_rule: - name: test_config_rule - state: present - description: 'This AWS Config rule checks for public write access on S3 buckets' - scope: - compliance_types: - - 'AWS::S3::Bucket' - source: - owner: AWS - identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' - -""" - -RETURN = r"""#""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def rule_exists(client, module, params): - try: - rule = client.describe_config_rules( - ConfigRuleNames=[params["ConfigRuleName"]], - aws_retry=True, - ) - return rule["ConfigRules"][0] - except is_boto3_error_code("NoSuchConfigRuleException"): - return - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def create_resource(client, module, params, result): - try: - client.put_config_rule(ConfigRule=params) - result["changed"] = True - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Config rule") - - -def update_resource(client, module, params, result): - current_params = client.describe_config_rules( - ConfigRuleNames=[params["ConfigRuleName"]], - aws_retry=True, - ) - - del current_params["ConfigRules"][0]["ConfigRuleArn"] - del current_params["ConfigRules"][0]["ConfigRuleId"] - del current_params["ConfigRules"][0]["EvaluationModes"] - - if params != current_params["ConfigRules"][0]: - try: - client.put_config_rule(ConfigRule=params) - result["changed"] = True - result["rule"] = camel_dict_to_snake_dict(rule_exists(client, module, params)) - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create AWS Config rule") - - -def delete_resource(client, module, params, result): - try: - response = client.delete_config_rule( - ConfigRuleName=params["ConfigRuleName"], - aws_retry=True, - ) - result["changed"] = True - result["rule"] = {} - return result - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete AWS Config rule") - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "name": dict(type="str", required=True), - "state": dict(type="str", choices=["present", "absent"], default="present"), - "description": dict(type="str"), - "scope": dict(type="dict"), - "source": dict(type="dict", required=True), - "input_parameters": dict(type="str"), - "execution_frequency": dict( - type="str", - choices=[ - "One_Hour", - "Three_Hours", - "Six_Hours", - "Twelve_Hours", - "TwentyFour_Hours", - ], - ), - }, - supports_check_mode=False, - ) - - result = {"changed": False} - - name = module.params.get("name") - resource_type = module.params.get("resource_type") - state = module.params.get("state") - - params = {} - if name: - params["ConfigRuleName"] = name - if module.params.get("description"): - params["Description"] = module.params.get("description") - if module.params.get("scope"): - params["Scope"] = {} - if module.params.get("scope").get("compliance_types"): - params["Scope"].update( - { - "ComplianceResourceTypes": module.params.get("scope").get("compliance_types"), - } - ) - if module.params.get("scope").get("tag_key"): - params["Scope"].update( - { - "TagKey": module.params.get("scope").get("tag_key"), - } - ) - if module.params.get("scope").get("tag_value"): - params["Scope"].update( - { - "TagValue": module.params.get("scope").get("tag_value"), - } - ) - if module.params.get("scope").get("compliance_id"): - params["Scope"].update( - { - "ComplianceResourceId": module.params.get("scope").get("compliance_id"), - } - ) - if module.params.get("source"): - params["Source"] = {} - if module.params.get("source").get("owner"): - params["Source"].update( - { - "Owner": module.params.get("source").get("owner"), - } - ) - if module.params.get("source").get("identifier"): - params["Source"].update( - { - "SourceIdentifier": module.params.get("source").get("identifier"), - } - ) - if module.params.get("source").get("details"): - params["Source"].update( - { - "SourceDetails": module.params.get("source").get("details"), - } - ) - if module.params.get("input_parameters"): - params["InputParameters"] = module.params.get("input_parameters") - if module.params.get("execution_frequency"): - params["MaximumExecutionFrequency"] = module.params.get("execution_frequency") - params["ConfigRuleState"] = "ACTIVE" - - client = module.client("config", retry_decorator=AWSRetry.jittered_backoff()) - - existing_rule = rule_exists(client, module, params) - - if state == "present": - if not existing_rule: - create_resource(client, module, params, result) - else: - update_resource(client, module, params, result) - - if state == "absent": - if existing_rule: - delete_resource(client, module, params, result) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/data_pipeline.py b/data_pipeline.py deleted file mode 100644 index 4b602708163..00000000000 --- a/data_pipeline.py +++ /dev/null @@ -1,636 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: data_pipeline -version_added: 1.0.0 -author: - - Raghu Udiyar (@raags) - - Sloane Hertel (@s-hertel) -short_description: Create and manage AWS Datapipelines -description: - - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects) - given to the datapipeline. - - The pipeline definition must be in the format given here - U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax). - - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state. -options: - name: - description: - - The name of the Datapipeline to create/modify/delete. - required: true - type: str - description: - description: - - An optional description for the pipeline being created. - default: '' - type: str - objects: - type: list - elements: dict - default: [] - description: - - A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields). - suboptions: - id: - description: - - The ID of the object. - type: str - name: - description: - - The name of the object. - type: str - fields: - description: - - Key-value pairs that define the properties of the object. - - The value is specified as a reference to another object I(refValue) or as a string value I(stringValue) - but not as both. - type: list - elements: dict - suboptions: - key: - type: str - description: - - The field identifier. - stringValue: - type: str - description: - - The field value. - - Exactly one of I(stringValue) and I(refValue) may be specified. - refValue: - type: str - description: - - The field value, expressed as the identifier of another object. - - Exactly one of I(stringValue) and I(refValue) may be specified. - parameters: - description: - - A list of parameter objects (dicts) in the pipeline definition. - type: list - elements: dict - default: [] - suboptions: - id: - description: - - The ID of the parameter object. - attributes: - description: - - A list of attributes (dicts) of the parameter object. - type: list - elements: dict - suboptions: - key: - description: The field identifier. - type: str - stringValue: - description: The field value. - type: str - - values: - description: - - A list of parameter values (dicts) in the pipeline definition. - type: list - elements: dict - default: [] - suboptions: - id: - description: The ID of the parameter value - type: str - stringValue: - description: The field value - type: str - timeout: - description: - - Time in seconds to wait for the pipeline to transition to the requested state, fail otherwise. - default: 300 - type: int - state: - description: - - The requested state of the pipeline. - choices: ['present', 'absent', 'active', 'inactive'] - default: present - type: str - tags: - description: - - A dict of key:value pair(s) to add to the pipeline. - type: dict - default: {} - aliases: ['resource_tags'] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create pipeline -- community.aws.data_pipeline: - name: test-dp - region: us-west-2 - objects: "{{pipelineObjects}}" - parameters: "{{pipelineParameters}}" - values: "{{pipelineValues}}" - tags: - key1: val1 - key2: val2 - state: present - -# Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects -- community.aws.data_pipeline: - name: test-dp - objects: - - "id": "DefaultSchedule" - "name": "Every 1 day" - "fields": - - "key": "period" - "stringValue": "1 days" - - "key": "type" - "stringValue": "Schedule" - - "key": "startAt" - "stringValue": "FIRST_ACTIVATION_DATE_TIME" - - "id": "Default" - "name": "Default" - "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" }, - { "key": "role", "stringValue": "DataPipelineDefaultRole" }, - { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" }, - { "key": "scheduleType", "stringValue": "cron" }, - { "key": "schedule", "refValue": "DefaultSchedule" }, - { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ] - state: active - -# Activate pipeline -- community.aws.data_pipeline: - name: test-dp - region: us-west-2 - state: active - -# Delete pipeline -- community.aws.data_pipeline: - name: test-dp - region: us-west-2 - state: absent - -""" - -RETURN = r""" -changed: - description: whether the data pipeline has been modified - type: bool - returned: always - sample: - changed: true -result: - description: - - Contains the data pipeline data (data_pipeline) and a return message (msg). - If the data pipeline exists data_pipeline will contain the keys description, name, - pipeline_id, state, tags, and unique_id. If the data pipeline does not exist then - data_pipeline will be an empty dict. The msg describes the status of the operation. - returned: always - type: dict -""" - -import hashlib -import json -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -DP_ACTIVE_STATES = ["ACTIVE", "SCHEDULED"] -DP_INACTIVE_STATES = ["INACTIVE", "PENDING", "FINISHED", "DELETING"] -DP_ACTIVATING_STATE = "ACTIVATING" -DP_DEACTIVATING_STATE = "DEACTIVATING" -PIPELINE_DOESNT_EXIST = "^.*Pipeline with id: {0} does not exist$" - - -class DataPipelineNotFound(Exception): - pass - - -class TimeOutException(Exception): - pass - - -def pipeline_id(client, name): - """Return pipeline id for the given pipeline name - - :param object client: boto3 datapipeline client - :param string name: pipeline name - :returns: pipeline id - :raises: DataPipelineNotFound - - """ - pipelines = client.list_pipelines() - for dp in pipelines["pipelineIdList"]: - if dp["name"] == name: - return dp["id"] - raise DataPipelineNotFound - - -def pipeline_description(client, dp_id): - """Return pipeline description list - - :param object client: boto3 datapipeline client - :returns: pipeline description dictionary - :raises: DataPipelineNotFound - - """ - try: - return client.describe_pipelines(pipelineIds=[dp_id]) - except is_boto3_error_code(["PipelineNotFoundException", "PipelineDeletedException"]): - raise DataPipelineNotFound - - -def pipeline_field(client, dp_id, field): - """Return a pipeline field from the pipeline description. - - The available fields are listed in describe_pipelines output. - - :param object client: boto3 datapipeline client - :param string dp_id: pipeline id - :param string field: pipeline description field - :returns: pipeline field information - - """ - dp_description = pipeline_description(client, dp_id) - for field_key in dp_description["pipelineDescriptionList"][0]["fields"]: - if field_key["key"] == field: - return field_key["stringValue"] - raise KeyError(f"Field key {field} not found!") - - -def run_with_timeout(timeout, func, *func_args, **func_kwargs): - """Run func with the provided args and kwargs, and wait until - timeout for truthy return value - - :param int timeout: time to wait for status - :param function func: function to run, should return True or False - :param args func_args: function args to pass to func - :param kwargs func_kwargs: function key word args - :returns: True if func returns truthy within timeout - :raises: TimeOutException - - """ - - for count in range(timeout // 10): - if func(*func_args, **func_kwargs): - return True - else: - # check every 10s - time.sleep(10) - - raise TimeOutException - - -def check_dp_exists(client, dp_id): - """Check if datapipeline exists - - :param object client: boto3 datapipeline client - :param string dp_id: pipeline id - :returns: True or False - - """ - try: - # pipeline_description raises DataPipelineNotFound - if pipeline_description(client, dp_id): - return True - else: - return False - except DataPipelineNotFound: - return False - - -def check_dp_status(client, dp_id, status): - """Checks if datapipeline matches states in status list - - :param object client: boto3 datapipeline client - :param string dp_id: pipeline id - :param list status: list of states to check against - :returns: True or False - - """ - if not isinstance(status, list): - raise AssertionError() - if pipeline_field(client, dp_id, field="@pipelineState") in status: - return True - else: - return False - - -def pipeline_status_timeout(client, dp_id, status, timeout): - args = (client, dp_id, status) - return run_with_timeout(timeout, check_dp_status, *args) - - -def pipeline_exists_timeout(client, dp_id, timeout): - args = (client, dp_id) - return run_with_timeout(timeout, check_dp_exists, *args) - - -def activate_pipeline(client, module): - """Activates pipeline""" - dp_name = module.params.get("name") - timeout = module.params.get("timeout") - - try: - dp_id = pipeline_id(client, dp_name) - except DataPipelineNotFound: - module.fail_json(msg=f"Data Pipeline {dp_name} not found") - - if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: - changed = False - else: - try: - client.activate_pipeline(pipelineId=dp_id) - except is_boto3_error_code("InvalidRequestException"): - module.fail_json(msg="You need to populate your pipeline before activation.") - try: - pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, timeout=timeout) - except TimeOutException: - if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - # activated but completed more rapidly than it was checked - pass - else: - module.fail_json( - msg=f"Data Pipeline {dp_name} failed to activate within timeout {timeout} seconds", - ) - changed = True - - data_pipeline = get_result(client, dp_id) - result = { - "data_pipeline": data_pipeline, - "msg": f"Data Pipeline {dp_name} activated.", - } - - return (changed, result) - - -def deactivate_pipeline(client, module): - """Deactivates pipeline""" - dp_name = module.params.get("name") - timeout = module.params.get("timeout") - - try: - dp_id = pipeline_id(client, dp_name) - except DataPipelineNotFound: - module.fail_json(msg=f"Data Pipeline {dp_name} not found") - - if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: - changed = False - else: - client.deactivate_pipeline(pipelineId=dp_id) - try: - pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, timeout=timeout) - except TimeOutException: - module.fail_json( - msg=f"Data Pipeline {dp_name} failed to deactivate within timeout {timeout} seconds", - ) - changed = True - - data_pipeline = get_result(client, dp_id) - result = { - "data_pipeline": data_pipeline, - "msg": f"Data Pipeline {dp_name} deactivated.", - } - - return (changed, result) - - -def _delete_dp_with_check(dp_id, client, timeout): - client.delete_pipeline(pipelineId=dp_id) - try: - pipeline_status_timeout(client=client, dp_id=dp_id, status=[PIPELINE_DOESNT_EXIST], timeout=timeout) - except DataPipelineNotFound: - return True - - -def delete_pipeline(client, module): - """Deletes pipeline""" - dp_name = module.params.get("name") - timeout = module.params.get("timeout") - - try: - dp_id = pipeline_id(client, dp_name) - _delete_dp_with_check(dp_id, client, timeout) - changed = True - except DataPipelineNotFound: - changed = False - except TimeOutException: - module.fail_json( - msg=f"Data Pipeline {dp_name} failed to delete within timeout {timeout} seconds", - ) - result = { - "data_pipeline": {}, - "msg": f"Data Pipeline {dp_name} deleted", - } - - return (changed, result) - - -def build_unique_id(module): - data = dict(module.params) - # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline - [data.pop(each, None) for each in ("objects", "timeout")] - json_data = json.dumps(data, sort_keys=True).encode("utf-8") - hashed_data = hashlib.md5(json_data).hexdigest() - return hashed_data - - -def format_tags(tags): - """Reformats tags - - :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3}) - :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}]) - - """ - return [dict(key=k, value=v) for k, v in tags.items()] - - -def get_result(client, dp_id): - """Get the current state of the data pipeline and reformat it to snake_case for exit_json - - :param object client: boto3 datapipeline client - :param string dp_id: pipeline id - :returns: reformatted dict of pipeline description - - """ - # pipeline_description returns a pipelineDescriptionList of length 1 - # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict) - dp = pipeline_description(client, dp_id)["pipelineDescriptionList"][0] - - # Get uniqueId and pipelineState in fields to add to the exit_json result - dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId") - dp["pipeline_state"] = pipeline_field(client, dp_id, field="@pipelineState") - - # Remove fields; can't make a list snake_case and most of the data is redundant - del dp["fields"] - - # Note: tags is already formatted fine so we don't need to do anything with it - - # Reformat data pipeline and add reformatted fields back - dp = camel_dict_to_snake_dict(dp) - return dp - - -def diff_pipeline(client, module, objects, unique_id, dp_name): - """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated""" - result = {} - changed = False - create_dp = False - - # See if there is already a pipeline with the same unique_id - unique_id = build_unique_id(module) - try: - dp_id = pipeline_id(client, dp_name) - dp_unique_id = to_text(pipeline_field(client, dp_id, field="uniqueId")) - if dp_unique_id != unique_id: - # A change is expected but not determined. Updated to a bool in create_pipeline(). - changed = "NEW_VERSION" - create_dp = True - # Unique ids are the same - check if pipeline needs modification - else: - dp_objects = client.get_pipeline_definition(pipelineId=dp_id)["pipelineObjects"] - # Definition needs to be updated - if dp_objects != objects: - changed, msg = define_pipeline(client, module, objects, dp_id) - # No changes - else: - msg = f"Data Pipeline {dp_name} is present" - data_pipeline = get_result(client, dp_id) - result = { - "data_pipeline": data_pipeline, - "msg": msg, - } - except DataPipelineNotFound: - create_dp = True - - return create_dp, changed, result - - -def define_pipeline(client, module, objects, dp_id): - """Puts pipeline definition""" - dp_name = module.params.get("name") - - if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - msg = f"Data Pipeline {dp_name} is unable to be updated while in state FINISHED." - changed = False - - elif objects: - parameters = module.params.get("parameters") - values = module.params.get("values") - - try: - client.put_pipeline_definition( - pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values - ) - msg = f"Data Pipeline {dp_name} has been updated." - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg=( - f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects" - ), - ) - else: - changed = False - msg = "" - - return changed, msg - - -def create_pipeline(client, module): - """Creates datapipeline. Uses uniqueId to achieve idempotency.""" - dp_name = module.params.get("name") - objects = module.params.get("objects", None) - description = module.params.get("description", "") - tags = module.params.get("tags") - timeout = module.params.get("timeout") - - unique_id = build_unique_id(module) - create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name) - - if changed == "NEW_VERSION": - # delete old version - changed, creation_result = delete_pipeline(client, module) - - # There isn't a pipeline or it has different parameters than the pipeline in existence. - if create_dp: - # Make pipeline - try: - tags = format_tags(tags) - dp = client.create_pipeline(name=dp_name, uniqueId=unique_id, description=description, tags=tags) - dp_id = dp["pipelineId"] - pipeline_exists_timeout(client, dp_id, timeout) - except TimeOutException: - module.fail_json( - msg=f"Data Pipeline {dp_name} failed to create within timeout {timeout} seconds", - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg=f"Failed to create the data pipeline {dp_name}.", - ) - # Put pipeline definition - changed, msg = define_pipeline(client, module, objects, dp_id) - - changed = True - data_pipeline = get_result(client, dp_id) - result = { - "data_pipeline": data_pipeline, - "msg": f"Data Pipeline {dp_name} created." + msg, - } - - return (changed, result) - - -def main(): - argument_spec = dict( - name=dict(required=True), - description=dict(required=False, default=""), - objects=dict(required=False, type="list", default=[], elements="dict"), - parameters=dict(required=False, type="list", default=[], elements="dict"), - timeout=dict(required=False, type="int", default=300), - state=dict(default="present", choices=["present", "absent", "active", "inactive"]), - tags=dict(required=False, type="dict", default={}, aliases=["resource_tags"]), - values=dict(required=False, type="list", default=[], elements="dict"), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False) - - try: - client = module.client("datapipeline") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - state = module.params.get("state") - if state == "present": - changed, result = create_pipeline(client, module) - elif state == "absent": - changed, result = delete_pipeline(client, module) - elif state == "active": - changed, result = activate_pipeline(client, module) - elif state == "inactive": - changed, result = deactivate_pipeline(client, module) - - module.exit_json(result=result, changed=changed) - - -if __name__ == "__main__": - main() diff --git a/directconnect_confirm_connection.py b/directconnect_confirm_connection.py deleted file mode 100644 index 870e459327d..00000000000 --- a/directconnect_confirm_connection.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: directconnect_confirm_connection -short_description: Confirms the creation of a hosted DirectConnect connection -description: - - Confirms the creation of a hosted DirectConnect, which requires approval before it can be used. - - DirectConnect connections that require approval would be in the C(ordering). - - After confirmation, they will move to the C(pending) state and finally the C(available) state. - - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_confirm_connection). - The usage did not change. -author: - - "Matt Traynham (@mtraynham)" -options: - name: - description: - - The name of the Direct Connect connection. - - One of I(connection_id) or I(name) must be specified. - type: str - connection_id: - description: - - The ID of the Direct Connect connection. - - One of I(connection_id) or I(name) must be specified. - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -# confirm a Direct Connect by name -- name: confirm the connection id - community.aws.directconnect_confirm_connection: - name: my_host_direct_connect - -# confirm a Direct Connect by connection_id -- name: confirm the connection id - community.aws.directconnect_confirm_connection: - connection_id: dxcon-xxxxxxxx -""" - -RETURN = r""" - -connection_state: - description: The state of the connection. - returned: always - type: str - sample: pending -""" - -import traceback - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # handled by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} - - -@AWSRetry.jittered_backoff(**retry_params) -def describe_connections(client, params): - return client.describe_connections(**params) - - -def find_connection_id(client, connection_id=None, connection_name=None): - params = {} - if connection_id: - params["connectionId"] = connection_id - try: - response = describe_connections(client, params) - except (BotoCoreError, ClientError) as e: - if connection_id: - msg = f"Failed to describe DirectConnect ID {connection_id}" - else: - msg = "Failed to describe DirectConnect connections" - raise DirectConnectError( - msg=msg, - last_traceback=traceback.format_exc(), - exception=e, - ) - - match = [] - if len(response.get("connections", [])) == 1 and connection_id: - if response["connections"][0]["connectionState"] != "deleted": - match.append(response["connections"][0]["connectionId"]) - - for conn in response.get("connections", []): - if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": - match.append(conn["connectionId"]) - - if len(match) == 1: - return match[0] - else: - raise DirectConnectError(msg="Could not find a valid DirectConnect connection") - - -def get_connection_state(client, connection_id): - try: - response = describe_connections(client, dict(connectionId=connection_id)) - return response["connections"][0]["connectionState"] - except (BotoCoreError, ClientError, IndexError) as e: - raise DirectConnectError( - msg=f"Failed to describe DirectConnect connection {connection_id} state", - last_traceback=traceback.format_exc(), - exception=e, - ) - - -def main(): - argument_spec = dict(connection_id=dict(), name=dict()) - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[["connection_id", "name"]], - required_one_of=[["connection_id", "name"]], - ) - client = module.client("directconnect") - - connection_id = module.params["connection_id"] - connection_name = module.params["name"] - - changed = False - connection_state = None - try: - connection_id = find_connection_id(client, connection_id, connection_name) - connection_state = get_connection_state(client, connection_id) - if connection_state == "ordering": - client.confirm_connection(connectionId=connection_id) - changed = True - connection_state = get_connection_state(client, connection_id) - except DirectConnectError as e: - if e.last_traceback: - module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response)) - else: - module.fail_json(msg=e.msg) - - module.exit_json(changed=changed, connection_state=connection_state) - - -if __name__ == "__main__": - main() diff --git a/directconnect_connection.py b/directconnect_connection.py deleted file mode 100644 index fd55a3b5291..00000000000 --- a/directconnect_connection.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: directconnect_connection -version_added: 1.0.0 -short_description: Creates, deletes, modifies a DirectConnect connection -description: - - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location. - - Upon creation the connection may be added to a link aggregation group or established as a standalone connection. - - The connection may later be associated or disassociated with a link aggregation group. - - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_connection). - The usage did not change. -author: - - "Sloane Hertel (@s-hertel)" -options: - state: - description: - - The state of the Direct Connect connection. - choices: - - present - - absent - type: str - required: true - name: - description: - - The name of the Direct Connect connection. This is required to create a - new connection. - - One of I(connection_id) or I(name) must be specified. - type: str - connection_id: - description: - - The ID of the Direct Connect connection. - - Modifying attributes of a connection with I(forced_update) will result in a new Direct Connect connection ID. - - One of I(connection_id) or I(name) must be specified. - type: str - location: - description: - - Where the Direct Connect connection is located. - - Required when I(state=present). - type: str - bandwidth: - description: - - The bandwidth of the Direct Connect connection. - - Required when I(state=present). - choices: - - 1Gbps - - 10Gbps - type: str - link_aggregation_group: - description: - - The ID of the link aggregation group you want to associate with the connection. - - This is optional when a stand-alone connection is desired. - type: str - forced_update: - description: - - To modify I(bandwidth) or I(location) the connection needs to be deleted and recreated. - - By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location). - type: bool - default: false -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -# create a Direct Connect connection -- community.aws.directconnect_connection: - name: ansible-test-connection - state: present - location: EqDC2 - link_aggregation_group: dxlag-xxxxxxxx - bandwidth: 1Gbps - register: dc - -# disassociate the LAG from the connection -- community.aws.directconnect_connection: - state: present - connection_id: dc.connection.connection_id - location: EqDC2 - bandwidth: 1Gbps - -# replace the connection with one with more bandwidth -- community.aws.directconnect_connection: - state: present - name: ansible-test-connection - location: EqDC2 - bandwidth: 10Gbps - forced_update: true - -# delete the connection -- community.aws.directconnect_connection: - state: absent - name: ansible-test-connection -""" - -RETURN = r""" -connection: - description: The attributes of the direct connect connection. - type: complex - returned: I(state=present) - contains: - aws_device: - description: The endpoint which the physical connection terminates on. - returned: when the requested state is no longer 'requested' - type: str - sample: EqDC2-12pmo7hemtz1z - bandwidth: - description: The bandwidth of the connection. - returned: always - type: str - sample: 1Gbps - connection_id: - description: The ID of the connection. - returned: always - type: str - sample: dxcon-ffy9ywed - connection_name: - description: The name of the connection. - returned: always - type: str - sample: ansible-test-connection - connection_state: - description: The state of the connection. - returned: always - type: str - sample: pending - loa_issue_time: - description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment. - returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested') - type: str - sample: '2018-03-20T17:36:26-04:00' - location: - description: The location of the connection. - returned: always - type: str - sample: EqDC2 - owner_account: - description: The account that owns the direct connect connection. - returned: always - type: str - sample: '123456789012' - region: - description: The region in which the connection exists. - returned: always - type: str - sample: us-east-1 -""" - -import traceback - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # handled by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} - - -def connection_status(client, connection_id): - return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False) - - -def connection_exists(client, connection_id=None, connection_name=None, verify=True): - params = {} - if connection_id: - params["connectionId"] = connection_id - try: - response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) - except (BotoCoreError, ClientError) as e: - if connection_id: - msg = f"Failed to describe DirectConnect ID {connection_id}" - else: - msg = "Failed to describe DirectConnect connections" - raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) - - match = [] - connection = [] - - # look for matching connections - - if len(response.get("connections", [])) == 1 and connection_id: - if response["connections"][0]["connectionState"] != "deleted": - match.append(response["connections"][0]["connectionId"]) - connection.extend(response["connections"]) - - for conn in response.get("connections", []): - if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted": - match.append(conn["connectionId"]) - connection.append(conn) - - # verifying if the connections exists; if true, return connection identifier, otherwise return False - if verify and len(match) == 1: - return match[0] - elif verify: - return False - # not verifying if the connection exists; just return current connection info - elif len(connection) == 1: - return {"connection": connection[0]} - return {"connection": {}} - - -def create_connection(client, location, bandwidth, name, lag_id): - if not name: - raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.") - params = { - "location": location, - "bandwidth": bandwidth, - "connectionName": name, - } - if lag_id: - params["lagId"] = lag_id - - try: - connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) - except (BotoCoreError, ClientError) as e: - raise DirectConnectError( - msg=f"Failed to create DirectConnect connection {name}", - last_traceback=traceback.format_exc(), - exception=e, - ) - return connection["connectionId"] - - -def changed_properties(current_status, location, bandwidth): - current_bandwidth = current_status["bandwidth"] - current_location = current_status["location"] - - return current_bandwidth != bandwidth or current_location != location - - -@AWSRetry.jittered_backoff(**retry_params) -def update_associations(client, latest_state, connection_id, lag_id): - changed = False - if "lagId" in latest_state and lag_id != latest_state["lagId"]: - disassociate_connection_and_lag(client, connection_id, lag_id=latest_state["lagId"]) - changed = True - if (changed and lag_id) or (lag_id and "lagId" not in latest_state): - associate_connection_and_lag(client, connection_id, lag_id) - changed = True - return changed - - -def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update): - # the connection is found; get the latest state and see if it needs to be updated - if connection_id: - latest_state = connection_status(client, connection_id=connection_id)["connection"] - if changed_properties(latest_state, location, bandwidth) and forced_update: - ensure_absent(client, connection_id) - return ensure_present( - client=client, - connection_id=None, - connection_name=connection_name, - location=location, - bandwidth=bandwidth, - lag_id=lag_id, - forced_update=forced_update, - ) - elif update_associations(client, latest_state, connection_id, lag_id): - return True, connection_id - - # no connection found; create a new one - else: - return True, create_connection(client, location, bandwidth, connection_name, lag_id) - - return False, connection_id - - -@AWSRetry.jittered_backoff(**retry_params) -def ensure_absent(client, connection_id): - changed = False - if connection_id: - delete_connection(client, connection_id) - changed = True - - return changed - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=["present", "absent"]), - name=dict(), - location=dict(), - bandwidth=dict(choices=["1Gbps", "10Gbps"]), - link_aggregation_group=dict(), - connection_id=dict(), - forced_update=dict(type="bool", default=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=[("connection_id", "name")], - required_if=[("state", "present", ("location", "bandwidth"))], - ) - - connection = module.client("directconnect") - - state = module.params.get("state") - try: - connection_id = connection_exists( - connection, connection_id=module.params.get("connection_id"), connection_name=module.params.get("name") - ) - if not connection_id and module.params.get("connection_id"): - module.fail_json( - msg=f"The Direct Connect connection {module.params['connection_id']} does not exist.", - ) - - if state == "present": - changed, connection_id = ensure_present( - connection, - connection_id=connection_id, - connection_name=module.params.get("name"), - location=module.params.get("location"), - bandwidth=module.params.get("bandwidth"), - lag_id=module.params.get("link_aggregation_group"), - forced_update=module.params.get("forced_update"), - ) - response = connection_status(connection, connection_id) - elif state == "absent": - changed = ensure_absent(connection, connection_id) - response = {} - except DirectConnectError as e: - if e.last_traceback: - module.fail_json( - msg=e.msg, - exception=e.last_traceback, - **camel_dict_to_snake_dict(e.exception.response), - ) - else: - module.fail_json(msg=e.msg) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) - - -if __name__ == "__main__": - main() diff --git a/directconnect_gateway.py b/directconnect_gateway.py deleted file mode 100644 index b231f0e8f44..00000000000 --- a/directconnect_gateway.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: directconnect_gateway -author: - - Gobin Sougrakpam (@gobins) -version_added: 1.0.0 -short_description: Manage AWS Direct Connect gateway -description: - - Creates AWS Direct Connect Gateway. - - Deletes AWS Direct Connect Gateway. - - Attaches Virtual Gateways to Direct Connect Gateway. - - Detaches Virtual Gateways to Direct Connect Gateway. - - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_gateway). - The usage did not change. -options: - state: - description: - - Set I(state=present) to ensure a resource is created. - - Set I(state=absent) to remove a resource. - default: present - choices: [ "present", "absent"] - type: str - name: - description: - - Name of the Direct Connect Gateway to be created or deleted. - type: str - amazon_asn: - description: - - The Amazon side ASN. - - Required when I(state=present). - type: str - direct_connect_gateway_id: - description: - - The ID of an existing Direct Connect Gateway. - - Required when I(state=absent). - type: str - virtual_gateway_id: - description: - - The VPN gateway ID of an existing virtual gateway. - type: str - wait_timeout: - description: - - How long to wait for the association to be deleted. - type: int - default: 320 -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create a new direct connect gateway attached to virtual private gateway - community.aws.directconnect_gateway: - state: present - name: my-dx-gateway - amazon_asn: 7224 - virtual_gateway_id: vpg-12345 - register: created_dxgw - -- name: Create a new unattached dxgw - community.aws.directconnect_gateway: - state: present - name: my-dx-gateway - amazon_asn: 7224 - register: created_dxgw -""" - -RETURN = r""" -result: - description: - - The attributes of the Direct Connect Gateway - type: complex - returned: I(state=present) - contains: - amazon_side_asn: - description: ASN on the amazon side. - type: str - direct_connect_gateway_id: - description: The ID of the direct connect gateway. - type: str - direct_connect_gateway_name: - description: The name of the direct connect gateway. - type: str - direct_connect_gateway_state: - description: The state of the direct connect gateway. - type: str - owner_account: - description: The AWS account ID of the owner of the direct connect gateway. - type: str -""" - -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def dx_gateway_info(client, gateway_id, module): - try: - resp = client.describe_direct_connect_gateways( - directConnectGatewayId=gateway_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to fetch gateway information.") - if resp["directConnectGateways"]: - return resp["directConnectGateways"][0] - - -def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): - polling_increment_secs = 15 - max_retries = 3 - status_achieved = False - - for x in range(0, max_retries): - try: - response = check_dxgw_association( - client, - module, - gateway_id=gateway_id, - virtual_gateway_id=virtual_gateway_id, - ) - if response["directConnectGatewayAssociations"]: - if response["directConnectGatewayAssociations"][0]["associationState"] == status: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - else: - status_achieved = True - break - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed while waiting for gateway association.") - - result = response - return status_achieved, result - - -def associate_direct_connect_gateway(client, module, gateway_id): - params = dict() - params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") - try: - response = client.create_direct_connect_gateway_association( - directConnectGatewayId=gateway_id, - virtualGatewayId=params["virtual_gateway_id"], - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, "Failed to associate gateway") - - status_achieved, dxgw = wait_for_status(client, module, gateway_id, params["virtual_gateway_id"], "associating") - if not status_achieved: - module.fail_json(msg="Error waiting for dxgw to attach to vpg - please check the AWS console") - - result = response - return result - - -def delete_association(client, module, gateway_id, virtual_gateway_id): - try: - response = client.delete_direct_connect_gateway_association( - directConnectGatewayId=gateway_id, - virtualGatewayId=virtual_gateway_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete gateway association.") - - status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, "disassociating") - if not status_achieved: - module.fail_json(msg="Error waiting for dxgw to detach from vpg - please check the AWS console") - - result = response - return result - - -def create_dx_gateway(client, module): - params = dict() - params["name"] = module.params.get("name") - params["amazon_asn"] = module.params.get("amazon_asn") - try: - response = client.create_direct_connect_gateway( - directConnectGatewayName=params["name"], - amazonSideAsn=int(params["amazon_asn"]), - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to create direct connect gateway.") - - result = response - return result - - -def find_dx_gateway(client, module, gateway_id=None): - params = dict() - gateways = list() - if gateway_id is not None: - params["directConnectGatewayId"] = gateway_id - while True: - try: - resp = client.describe_direct_connect_gateways(**params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe gateways") - gateways.extend(resp["directConnectGateways"]) - if "nextToken" in resp: - params["nextToken"] = resp["nextToken"] - else: - break - if gateways != []: - count = 0 - for gateway in gateways: - if module.params.get("name") == gateway["directConnectGatewayName"]: - count += 1 - return gateway - return None - - -def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None): - try: - if virtual_gateway_id is None: - resp = client.describe_direct_connect_gateway_associations( - directConnectGatewayId=gateway_id, - ) - else: - resp = client.describe_direct_connect_gateway_associations( - directConnectGatewayId=gateway_id, - virtualGatewayId=virtual_gateway_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to check gateway association") - return resp - - -def ensure_present(client, module): - # If an existing direct connect gateway matches our args - # then a match is considered to have been found and we will not create another dxgw. - - changed = False - params = dict() - result = dict() - params["name"] = module.params.get("name") - params["amazon_asn"] = module.params.get("amazon_asn") - params["virtual_gateway_id"] = module.params.get("virtual_gateway_id") - - # check if a gateway matching our module args already exists - existing_dxgw = find_dx_gateway(client, module) - - if existing_dxgw is not None and existing_dxgw["directConnectGatewayState"] != "deleted": - gateway_id = existing_dxgw["directConnectGatewayId"] - # if a gateway_id was provided, check if it is attach to the DXGW - if params["virtual_gateway_id"]: - resp = check_dxgw_association( - client, module, gateway_id=gateway_id, virtual_gateway_id=params["virtual_gateway_id"] - ) - if not resp["directConnectGatewayAssociations"]: - # attach the dxgw to the supplied virtual_gateway_id - associate_direct_connect_gateway(client, module, gateway_id) - changed = True - # if params['virtual_gateway_id'] is not provided, check the dxgw is attached to a VPG. If so, detach it. - else: - existing_dxgw = find_dx_gateway(client, module) - - resp = check_dxgw_association(client, module, gateway_id=gateway_id) - if resp["directConnectGatewayAssociations"]: - for association in resp["directConnectGatewayAssociations"]: - if association["associationState"] not in ["disassociating", "disassociated"]: - delete_association( - client, - module, - gateway_id=gateway_id, - virtual_gateway_id=association["virtualGatewayId"], - ) - else: - # create a new dxgw - new_dxgw = create_dx_gateway(client, module) - changed = True - gateway_id = new_dxgw["directConnectGateway"]["directConnectGatewayId"] - - # if a vpc-id was supplied, attempt to attach it to the dxgw - if params["virtual_gateway_id"]: - associate_direct_connect_gateway(client, module, gateway_id) - resp = check_dxgw_association( - client, - module, - gateway_id=gateway_id, - ) - if resp["directConnectGatewayAssociations"]: - changed = True - - result = dx_gateway_info(client, gateway_id, module) - return changed, result - - -def ensure_absent(client, module): - # If an existing direct connect gateway matches our args - # then a match is considered to have been found and we will not create another dxgw. - - changed = False - result = dict() - dx_gateway_id = module.params.get("direct_connect_gateway_id") - existing_dxgw = find_dx_gateway(client, module, dx_gateway_id) - if existing_dxgw is not None: - resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) - if resp["directConnectGatewayAssociations"]: - for association in resp["directConnectGatewayAssociations"]: - if association["associationState"] not in ["disassociating", "disassociated"]: - delete_association( - client, - module, - gateway_id=dx_gateway_id, - virtual_gateway_id=association["virtualGatewayId"], - ) - # wait for deleting association - timeout = time.time() + module.params.get("wait_timeout") - while time.time() < timeout: - resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id) - if resp["directConnectGatewayAssociations"] != []: - time.sleep(15) - else: - break - - try: - resp = client.delete_direct_connect_gateway( - directConnectGatewayId=dx_gateway_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete gateway") - result = resp["directConnectGateway"] - return changed - - -def main(): - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(), - amazon_asn=dict(), - virtual_gateway_id=dict(), - direct_connect_gateway_id=dict(), - wait_timeout=dict(type="int", default=320), - ) - required_if = [("state", "present", ["name", "amazon_asn"]), ("state", "absent", ["direct_connect_gateway_id"])] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - ) - - state = module.params.get("state") - - try: - client = module.client("directconnect") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - if state == "present": - (changed, results) = ensure_present(client, module) - elif state == "absent": - changed = ensure_absent(client, module) - results = {} - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(results)) - - -if __name__ == "__main__": - main() diff --git a/directconnect_link_aggregation_group.py b/directconnect_link_aggregation_group.py deleted file mode 100644 index 57907c93bb9..00000000000 --- a/directconnect_link_aggregation_group.py +++ /dev/null @@ -1,485 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: directconnect_link_aggregation_group -version_added: 1.0.0 -short_description: Manage Direct Connect LAG bundles -description: - - Create, delete, or modify a Direct Connect link aggregation group. - - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_link_aggregation_group). - The usage did not change. -author: - - "Sloane Hertel (@s-hertel)" -options: - state: - description: - - The state of the Direct Connect link aggregation group. - choices: - - present - - absent - type: str - required: true - name: - description: - - The name of the Direct Connect link aggregation group. - type: str - link_aggregation_group_id: - description: - - The ID of the Direct Connect link aggregation group. - type: str - num_connections: - description: - - The number of connections with which to initialize the link aggregation group. - type: int - min_links: - description: - - The minimum number of physical connections that must be operational for the LAG itself to be operational. - type: int - location: - description: - - The location of the link aggregation group. - type: str - bandwidth: - description: - - The bandwidth of the link aggregation group. - type: str - force_delete: - description: - - This allows the minimum number of links to be set to 0, any hosted connections disassociated, - and any virtual interfaces associated to the LAG deleted. - type: bool - default: false - connection_id: - description: - - A connection ID to link with the link aggregation group upon creation. - type: str - delete_with_disassociation: - description: - - To be used with I(state=absent) to delete connections after disassociating them with the LAG. - type: bool - default: false - wait: - description: - - Whether or not to wait for the operation to complete. - - May be useful when waiting for virtual interfaces to be deleted. - - The time to wait can be controlled by setting I(wait_timeout). - type: bool - default: false - wait_timeout: - description: - - The duration in seconds to wait if I(wait=true). - default: 120 - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -# create a Direct Connect connection -- community.aws.directconnect_link_aggregation_group: - state: present - location: EqDC2 - lag_id: dxlag-xxxxxxxx - bandwidth: 1Gbps -""" - -RETURN = r""" -changed: - type: str - description: Whether or not the LAG has changed. - returned: always -aws_device: - type: str - description: The AWS Direct Connection endpoint that hosts the LAG. - sample: "EqSe2-1bwfvazist2k0" - returned: when I(state=present) -connections: - type: list - description: A list of connections bundled by this LAG. - sample: - "connections": [ - { - "aws_device": "EqSe2-1bwfvazist2k0", - "bandwidth": "1Gbps", - "connection_id": "dxcon-fgzjah5a", - "connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h", - "connection_state": "down", - "lag_id": "dxlag-fgnsp4rq", - "location": "EqSe2", - "owner_account": "123456789012", - "region": "us-west-2" - } - ] - returned: when I(state=present) -connections_bandwidth: - type: str - description: The individual bandwidth of the physical connections bundled by the LAG. - sample: "1Gbps" - returned: when I(state=present) -lag_id: - type: str - description: Unique identifier for the link aggregation group. - sample: "dxlag-fgnsp4rq" - returned: when I(state=present) -lag_name: - type: str - description: User-provided name for the link aggregation group. - returned: when I(state=present) -lag_state: - type: str - description: State of the LAG. - sample: "pending" - returned: when I(state=present) -location: - type: str - description: Where the connection is located. - sample: "EqSe2" - returned: when I(state=present) -minimum_links: - type: int - description: The minimum number of physical connections that must be operational for the LAG itself to be operational. - returned: when I(state=present) -number_of_connections: - type: int - description: The number of physical connections bundled by the LAG. - returned: when I(state=present) -owner_account: - type: str - description: Owner account ID of the LAG. - returned: when I(state=present) -region: - type: str - description: The region in which the LAG exists. - returned: when I(state=present) -""" - -import traceback -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def lag_status(client, lag_id): - return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False) - - -def lag_exists(client, lag_id=None, lag_name=None, verify=True): - """If verify=True, returns the LAG ID or None - If verify=False, returns the LAG's data (or an empty dict) - """ - try: - if lag_id: - response = client.describe_lags(lagId=lag_id) - else: - response = client.describe_lags() - except botocore.exceptions.ClientError as e: - if lag_id and verify: - return False - elif lag_id: - return {} - else: - failed_op = "Failed to describe DirectConnect link aggregation groups." - raise DirectConnectError(msg=failed_op, last_traceback=traceback.format_exc(), exception=e) - - match = [] # List of LAG IDs that are exact matches - lag = [] # List of LAG data that are exact matches - - # look for matching connections - if len(response.get("lags", [])) == 1 and lag_id: - if response["lags"][0]["lagState"] != "deleted": - match.append(response["lags"][0]["lagId"]) - lag.append(response["lags"][0]) - else: - for each in response.get("lags", []): - if each["lagState"] != "deleted": - if not lag_id: - if lag_name == each["lagName"]: - match.append(each["lagId"]) - else: - match.append(each["lagId"]) - - # verifying if the connections exists; if true, return connection identifier, otherwise return False - if verify and len(match) == 1: - return match[0] - elif verify: - return False - - # not verifying if the connection exists; just return current connection info - else: - if len(lag) == 1: - return lag[0] - else: - return {} - - -def create_lag(client, num_connections, location, bandwidth, name, connection_id): - if not name: - raise DirectConnectError( - msg="Failed to create a Direct Connect link aggregation group: name required.", - last_traceback=None, - exception="", - ) - - parameters = dict( - numberOfConnections=num_connections, location=location, connectionsBandwidth=bandwidth, lagName=name - ) - if connection_id: - parameters.update(connectionId=connection_id) - try: - lag = client.create_lag(**parameters) - except botocore.exceptions.ClientError as e: - raise DirectConnectError( - msg=f"Failed to create DirectConnect link aggregation group {name}", - last_traceback=traceback.format_exc(), - exception=e, - ) - - return lag["lagId"] - - -def delete_lag(client, lag_id): - try: - client.delete_lag(lagId=lag_id) - except botocore.exceptions.ClientError as e: - raise DirectConnectError( - msg=f"Failed to delete Direct Connect link aggregation group {lag_id}.", - last_traceback=traceback.format_exc(), - exception=e, - ) - - -@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=["DirectConnectClientException"]) -def _update_lag(client, lag_id, lag_name, min_links): - params = {} - if min_links: - params.update(minimumLinks=min_links) - if lag_name: - params.update(lagName=lag_name) - - client.update_lag(lagId=lag_id, **params) - - -def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout): - start = time.time() - - if min_links and min_links > num_connections: - raise DirectConnectError( - msg=f"The number of connections {num_connections} must be greater than the minimum number of links {min_links} to update the LAG {lag_id}", - last_traceback=None, - exception=None, - ) - - while True: - try: - _update_lag(client, lag_id, lag_name, min_links) - except botocore.exceptions.ClientError as e: - if wait and time.time() - start <= wait_timeout: - continue - msg = f"Failed to update Direct Connect link aggregation group {lag_id}." - if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]: - msg += f"Unable to set the min number of links to {min_links} while the LAG connections are being requested" - raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) - else: - break - - -def lag_changed(current_status, name, min_links): - """Determines if a modifiable link aggregation group attribute has been modified.""" - return (name and name != current_status["lagName"]) or (min_links and min_links != current_status["minimumLinks"]) - - -def ensure_present( - client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout -): - exists = lag_exists(client, lag_id, lag_name) - if not exists and lag_id: - raise DirectConnectError( - msg=f"The Direct Connect link aggregation group {lag_id} does not exist.", - last_traceback=None, - exception="", - ) - - # the connection is found; get the latest state and see if it needs to be updated - if exists: - lag_id = exists - latest_state = lag_status(client, lag_id) - if lag_changed(latest_state, lag_name, min_links): - update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout) - return True, lag_id - return False, lag_id - - # no connection found; create a new one - else: - lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id) - update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout) - return True, lag_id - - -def describe_virtual_interfaces(client, lag_id): - try: - response = client.describe_virtual_interfaces(connectionId=lag_id) - except botocore.exceptions.ClientError as e: - raise DirectConnectError( - msg=f"Failed to describe any virtual interfaces associated with LAG: {lag_id}", - last_traceback=traceback.format_exc(), - exception=e, - ) - return response.get("virtualInterfaces", []) - - -def get_connections_and_virtual_interfaces(client, lag_id): - virtual_interfaces = describe_virtual_interfaces(client, lag_id) - connections = lag_status(client, lag_id=lag_id).get("connections", []) - return virtual_interfaces, connections - - -def disassociate_vis(client, lag_id, virtual_interfaces): - for vi in virtual_interfaces: - delete_virtual_interface(client, vi["virtualInterfaceId"]) - try: - response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) - except botocore.exceptions.ClientError as e: - raise DirectConnectError( - msg=f"Could not delete virtual interface {vi} to delete link aggregation group {lag_id}.", - last_traceback=traceback.format_exc(), - exception=e, - ) - - -def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout): - lag_id = lag_exists(client, lag_id, lag_name) - if not lag_id: - return False - - latest_status = lag_status(client, lag_id) - - # determine the associated connections and virtual interfaces to disassociate - virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id) - - # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete - if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete: - raise DirectConnectError( - msg=( - "There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG" - f" {lag_id}. To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces" - " they will be deleted). Optionally, to ensure hosted connections are deleted after disassociation use" - " delete_with_disassociation: True and wait: True (as Virtual Interfaces may take a few moments to" - " delete)" - ), - last_traceback=None, - exception=None, - ) - - # update min_links to be 0 so we can remove the LAG - update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout) - - # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached - for connection in connections: - disassociate_connection_and_lag(client, connection["connectionId"], lag_id) - if delete_with_disassociation: - delete_connection(client, connection["connectionId"]) - - for vi in virtual_interfaces: - delete_virtual_interface(client, vi["virtualInterfaceId"]) - - start_time = time.time() - while True: - try: - delete_lag(client, lag_id) - except DirectConnectError as e: - if ( - ("until its Virtual Interfaces are deleted" in e.exception) - and (time.time() - start_time < wait_timeout) - and wait - ): - continue - else: - return True - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=["present", "absent"]), - name=dict(), - link_aggregation_group_id=dict(), - num_connections=dict(type="int"), - min_links=dict(type="int"), - location=dict(), - bandwidth=dict(), - connection_id=dict(), - delete_with_disassociation=dict(type="bool", default=False), - force_delete=dict(type="bool", default=False), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=120), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=[("link_aggregation_group_id", "name")], - required_if=[("state", "present", ("location", "bandwidth"))], - ) - - try: - connection = module.client("directconnect") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - state = module.params.get("state") - response = {} - try: - if state == "present": - changed, lag_id = ensure_present( - connection, - num_connections=module.params.get("num_connections"), - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - location=module.params.get("location"), - bandwidth=module.params.get("bandwidth"), - connection_id=module.params.get("connection_id"), - min_links=module.params.get("min_links"), - wait=module.params.get("wait"), - wait_timeout=module.params.get("wait_timeout"), - ) - response = lag_status(connection, lag_id) - elif state == "absent": - changed = ensure_absent( - connection, - lag_id=module.params.get("link_aggregation_group_id"), - lag_name=module.params.get("name"), - force_delete=module.params.get("force_delete"), - delete_with_disassociation=module.params.get("delete_with_disassociation"), - wait=module.params.get("wait"), - wait_timeout=module.params.get("wait_timeout"), - ) - except DirectConnectError as e: - if e.last_traceback: - module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception)) - else: - module.fail_json(msg=e.msg) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) - - -if __name__ == "__main__": - main() diff --git a/directconnect_virtual_interface.py b/directconnect_virtual_interface.py deleted file mode 100644 index ec0c87099a4..00000000000 --- a/directconnect_virtual_interface.py +++ /dev/null @@ -1,538 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: directconnect_virtual_interface -version_added: 1.0.0 -short_description: Manage Direct Connect virtual interfaces -description: - - Create, delete, or modify a Direct Connect public or private virtual interface. - - Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_virtual_interface). - The usage did not change. -author: - - "Sloane Hertel (@s-hertel)" -options: - state: - description: - - The desired state of the Direct Connect virtual interface. - choices: [present, absent] - type: str - required: true - id_to_associate: - description: - - The ID of the link aggregation group or connection to associate with the virtual interface. - aliases: [link_aggregation_group_id, connection_id] - type: str - required: true - public: - description: - - The type of virtual interface. - type: bool - name: - description: - - The name of the virtual interface. - type: str - vlan: - description: - - The VLAN ID. - default: 100 - type: int - bgp_asn: - description: - - The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. - default: 65000 - type: int - authentication_key: - description: - - The authentication key for BGP configuration. - type: str - amazon_address: - description: - - The amazon address CIDR with which to create the virtual interface. - type: str - customer_address: - description: - - The customer address CIDR with which to create the virtual interface. - type: str - address_type: - description: - - The type of IP address for the BGP peer. - type: str - cidr: - description: - - A list of route filter prefix CIDRs with which to create the public virtual interface. - type: list - elements: str - virtual_gateway_id: - description: - - The virtual gateway ID required for creating a private virtual interface. - - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required. - These options are mutually exclusive. - type: str - direct_connect_gateway_id: - description: - - The direct connect gateway ID for creating a private virtual interface. - - To create a private virtual interface I(virtual_gateway_id) or I(direct_connect_gateway_id) is required. - These options are mutually exclusive. - type: str - virtual_interface_id: - description: - - The virtual interface ID. - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -address_family: - description: The address family for the BGP peer. - returned: always - type: str - sample: ipv4 -amazon_address: - description: IP address assigned to the Amazon interface. - returned: always - type: str - sample: 169.254.255.1/30 -asn: - description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. - returned: always - type: int - sample: 65000 -auth_key: - description: The authentication key for BGP configuration. - returned: always - type: str - sample: 0xZ59Y1JZ2oDOSh6YriIlyRE -bgp_peers: - description: A list of the BGP peers configured on this virtual interface. - returned: always - type: complex - contains: - address_family: - description: The address family for the BGP peer. - returned: always - type: str - sample: ipv4 - amazon_address: - description: IP address assigned to the Amazon interface. - returned: always - type: str - sample: 169.254.255.1/30 - asn: - description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. - returned: always - type: int - sample: 65000 - auth_key: - description: The authentication key for BGP configuration. - returned: always - type: str - sample: 0xZ59Y1JZ2oDOSh6YriIlyRE - bgp_peer_state: - description: The state of the BGP peer (verifying, pending, available) - returned: always - type: str - sample: available - bgp_status: - description: The up/down state of the BGP peer. - returned: always - type: str - sample: up - customer_address: - description: IP address assigned to the customer interface. - returned: always - type: str - sample: 169.254.255.2/30 -changed: - description: Indicated if the virtual interface has been created/modified/deleted - returned: always - type: bool - sample: false -connection_id: - description: - - The ID of the connection. This field is also used as the ID type for operations that - use multiple connection types (LAG, interconnect, and/or connection). - returned: always - type: str - sample: dxcon-fgb175av -customer_address: - description: IP address assigned to the customer interface. - returned: always - type: str - sample: 169.254.255.2/30 -customer_router_config: - description: Information for generating the customer router configuration. - returned: always - type: str -location: - description: Where the connection is located. - returned: always - type: str - sample: EqDC2 -owner_account: - description: The AWS account that will own the new virtual interface. - returned: always - type: str - sample: '123456789012' -route_filter_prefixes: - description: A list of routes to be advertised to the AWS network in this region (public virtual interface). - returned: always - type: complex - contains: - cidr: - description: A routes to be advertised to the AWS network in this region. - returned: always - type: str - sample: 54.227.92.216/30 -virtual_gateway_id: - description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces. - returned: when I(public=False) - type: str - sample: vgw-f3ce259a -direct_connect_gateway_id: - description: The ID of the Direct Connect gateway. This only applies to private virtual interfaces. - returned: when I(public=False) - type: str - sample: f7593767-eded-44e8-926d-a2234175835d -virtual_interface_id: - description: The ID of the virtual interface. - returned: always - type: str - sample: dxvif-fh0w7cex -virtual_interface_name: - description: The name of the virtual interface assigned by the customer. - returned: always - type: str - sample: test_virtual_interface -virtual_interface_state: - description: State of the virtual interface (confirming, verifying, pending, available, down, rejected). - returned: always - type: str - sample: available -virtual_interface_type: - description: The type of virtual interface (private, public). - returned: always - type: str - sample: private -vlan: - description: The VLAN ID. - returned: always - type: int - sample: 100 -""" - -EXAMPLES = r""" ---- -- name: create an association between a LAG and connection - community.aws.directconnect_virtual_interface: - state: present - name: "{{ name }}" - link_aggregation_group_id: LAG-XXXXXXXX - connection_id: dxcon-XXXXXXXX - -- name: remove an association between a connection and virtual interface - community.aws.directconnect_virtual_interface: - state: absent - connection_id: dxcon-XXXXXXXX - virtual_interface_id: dxv-XXXXXXXX - -""" - -import traceback - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - # handled by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError -from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def try_except_ClientError(failure_msg): - """ - Wrapper for boto3 calls that uses AWSRetry and handles exceptions - """ - - def wrapper(f): - def run_func(*args, **kwargs): - try: - result = AWSRetry.jittered_backoff( - retries=8, delay=5, catch_extra_error_codes=["DirectConnectClientException"] - )(f)(*args, **kwargs) - except (ClientError, BotoCoreError) as e: - raise DirectConnectError(failure_msg, traceback.format_exc(), e) - return result - - return run_func - - return wrapper - - -def find_unique_vi(client, connection_id, virtual_interface_id, name): - """ - Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found. - If multiple matches are found False is returned. If no matches are found None is returned. - """ - - # Get the virtual interfaces, filtering by the ID if provided. - vi_params = {} - if virtual_interface_id: - vi_params = {"virtualInterfaceId": virtual_interface_id} - - virtual_interfaces = try_except_ClientError(failure_msg="Failed to describe virtual interface")( - client.describe_virtual_interfaces - )(**vi_params).get("virtualInterfaces") - - # Remove deleting/deleted matches from the results. - virtual_interfaces = [vi for vi in virtual_interfaces if vi["virtualInterfaceState"] not in ("deleting", "deleted")] - - matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id) - return exact_match(matching_virtual_interfaces) - - -def exact_match(virtual_interfaces): - """ - Returns the virtual interface ID if one was found, - None if the virtual interface ID needs to be created, - False if an exact match was not found - """ - - if not virtual_interfaces: - return None - if len(virtual_interfaces) == 1: - return virtual_interfaces[0]["virtualInterfaceId"] - else: - return False - - -def filter_virtual_interfaces(virtual_interfaces, name, connection_id): - """ - Filters the available virtual interfaces to try to find a unique match - """ - # Filter by name if provided. - if name: - matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name) - if len(matching_by_name) == 1: - return matching_by_name - else: - matching_by_name = virtual_interfaces - - # If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated) - if connection_id and len(matching_by_name) > 1: - matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id) - if len(matching_by_connection_id) == 1: - return matching_by_connection_id - else: - matching_by_connection_id = matching_by_name - - return matching_by_connection_id - - -def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id): - """ - Return virtual interfaces that have the connection_id associated - """ - return [vi for vi in virtual_interfaces if vi["connectionId"] == connection_id] - - -def find_virtual_interface_by_name(virtual_interfaces, name): - """ - Return virtual interfaces that match the provided name - """ - return [vi for vi in virtual_interfaces if vi["virtualInterfaceName"] == name] - - -def vi_state(client, virtual_interface_id): - """ - Returns the state of the virtual interface. - """ - err_msg = f"Failed to describe virtual interface: {virtual_interface_id}" - vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)( - virtualInterfaceId=virtual_interface_id - ) - return vi["virtualInterfaces"][0] - - -def assemble_params_for_creating_vi(params): - """ - Returns kwargs to use in the call to create the virtual interface - - Params for public virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr - Params for private virtual interfaces: - virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId - """ - - public = params["public"] - name = params["name"] - vlan = params["vlan"] - bgp_asn = params["bgp_asn"] - auth_key = params["authentication_key"] - amazon_addr = params["amazon_address"] - customer_addr = params["customer_address"] - family_addr = params["address_type"] - cidr = params["cidr"] - virtual_gateway_id = params["virtual_gateway_id"] - direct_connect_gateway_id = params["direct_connect_gateway_id"] - - parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn) - opt_params = dict( - authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr - ) - - for name, value in opt_params.items(): - if value: - parameters[name] = value - - # virtual interface type specific parameters - if public and cidr: - parameters["routeFilterPrefixes"] = [{"cidr": c} for c in cidr] - if not public: - if virtual_gateway_id: - parameters["virtualGatewayId"] = virtual_gateway_id - elif direct_connect_gateway_id: - parameters["directConnectGatewayId"] = direct_connect_gateway_id - - return parameters - - -def create_vi(client, public, associated_id, creation_params): - """ - :param public: a boolean - :param associated_id: a link aggregation group ID or connection ID to associate - with the virtual interface. - :param creation_params: a dict of parameters to use in the AWS SDK call - :return The ID of the created virtual interface - """ - err_msg = "Failed to create virtual interface" - if public: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)( - connectionId=associated_id, newPublicVirtualInterface=creation_params - ) - else: - vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)( - connectionId=associated_id, newPrivateVirtualInterface=creation_params - ) - return vi["virtualInterfaceId"] - - -def modify_vi(client, virtual_interface_id, connection_id): - """ - Associate a new connection ID - """ - err_msg = f"Unable to associate {connection_id} with virtual interface {virtual_interface_id}" - try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)( - virtualInterfaceId=virtual_interface_id, connectionId=connection_id - ) - - -def needs_modification(client, virtual_interface_id, connection_id): - """ - Determine if the associated connection ID needs to be updated - """ - return vi_state(client, virtual_interface_id).get("connectionId") != connection_id - - -def ensure_state(connection, module): - changed = False - - state = module.params["state"] - connection_id = module.params["id_to_associate"] - public = module.params["public"] - name = module.params["name"] - - virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get("virtual_interface_id"), name) - - if virtual_interface_id is False: - module.fail_json( - msg=( - "Multiple virtual interfaces were found. Use the virtual_interface_id, name, " - "and connection_id options if applicable to find a unique match." - ) - ) - - if state == "present": - if not virtual_interface_id and module.params["virtual_interface_id"]: - module.fail_json(msg=f"The virtual interface {module.params['virtual_interface_id']} does not exist.") - - elif not virtual_interface_id: - assembled_params = assemble_params_for_creating_vi(module.params) - virtual_interface_id = create_vi(connection, public, connection_id, assembled_params) - changed = True - - if needs_modification(connection, virtual_interface_id, connection_id): - modify_vi(connection, virtual_interface_id, connection_id) - changed = True - - latest_state = vi_state(connection, virtual_interface_id) - - else: - if virtual_interface_id: - delete_virtual_interface(connection, virtual_interface_id) - changed = True - - latest_state = {} - - return changed, latest_state - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=["present", "absent"]), - id_to_associate=dict(required=True, aliases=["link_aggregation_group_id", "connection_id"]), - public=dict(type="bool"), - name=dict(), - vlan=dict(type="int", default=100), - bgp_asn=dict(type="int", default=65000), - authentication_key=dict(no_log=True), - amazon_address=dict(), - customer_address=dict(), - address_type=dict(), - cidr=dict(type="list", elements="str"), - virtual_gateway_id=dict(), - direct_connect_gateway_id=dict(), - virtual_interface_id=dict(), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=[["virtual_interface_id", "name"]], - required_if=[ - ["state", "present", ["public"]], - ["public", True, ["amazon_address"]], - ["public", True, ["customer_address"]], - ["public", True, ["cidr"]], - ], - mutually_exclusive=[["virtual_gateway_id", "direct_connect_gateway_id"]], - ) - - connection = module.client("directconnect") - - try: - changed, latest_state = ensure_state(connection, module) - except DirectConnectError as e: - if e.exception: - module.fail_json_aws(exception=e.exception, msg=e.msg) - else: - module.fail_json(msg=e.msg) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state)) - - -if __name__ == "__main__": - main() diff --git a/dms_endpoint.py b/dms_endpoint.py deleted file mode 100644 index b417003689d..00000000000 --- a/dms_endpoint.py +++ /dev/null @@ -1,691 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: dms_endpoint -version_added: 1.0.0 -short_description: Creates or destroys a data migration services endpoint -description: - - Creates or destroys a data migration services endpoint, - that can be used to replicate data. -options: - state: - description: - - State of the endpoint. - default: present - choices: ['present', 'absent'] - type: str - endpointidentifier: - description: - - An identifier name for the endpoint. - type: str - required: true - endpointtype: - description: - - Type of endpoint we want to manage. - - Required when I(state=present). - choices: ['source', 'target'] - type: str - enginename: - description: - - Database engine that we want to use, please refer to - the AWS DMS for more information on the supported - engines and their limitations. - - Required when I(state=present). - choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora', - 'redshift', 's3', 'db2', 'azuredb', 'sybase', - 'dynamodb', 'mongodb', 'sqlserver'] - type: str - username: - description: - - Username our endpoint will use to connect to the database. - type: str - password: - description: - - Password used to connect to the database - this attribute can only be written - the AWS API does not return this parameter. - type: str - servername: - description: - - Servername that the endpoint will connect to. - type: str - port: - description: - - TCP port for access to the database. - type: int - databasename: - description: - - Name for the database on the origin or target side. - type: str - extraconnectionattributes: - description: - - Extra attributes for the database connection, the AWS documentation - states " For more information about extra connection attributes, - see the documentation section for your data store." - type: str - kmskeyid: - description: - - Encryption key to use to encrypt replication storage and - connection information. - type: str - tags: - description: - - A list of tags to add to the endpoint. - type: dict - certificatearn: - description: - - Amazon Resource Name (ARN) for the certificate. - type: str - sslmode: - description: - - Mode used for the SSL connection. - default: none - choices: ['none', 'require', 'verify-ca', 'verify-full'] - type: str - serviceaccessrolearn: - description: - - Amazon Resource Name (ARN) for the service access role that you - want to use to create the endpoint. - type: str - externaltabledefinition: - description: - - The external table definition. - type: str - dynamodbsettings: - description: - - Settings in JSON format for the target Amazon DynamoDB endpoint - if source or target is dynamodb. - type: dict - s3settings: - description: - - S3 buckets settings for the target Amazon S3 endpoint. - type: dict - dmstransfersettings: - description: - - The settings in JSON format for the DMS transfer type of - source endpoint. - type: dict - mongodbsettings: - description: - - Settings in JSON format for the source MongoDB endpoint. - type: dict - kinesissettings: - description: - - Settings in JSON format for the target Amazon Kinesis - Data Streams endpoint. - type: dict - elasticsearchsettings: - description: - - Settings in JSON format for the target Elasticsearch endpoint. - type: dict - wait: - description: - - Whether Ansible should wait for the object to be deleted when I(state=absent). - type: bool - default: false - timeout: - description: - - Time in seconds we should wait for when deleting a resource. - - Required when I(wait=true). - type: int - retries: - description: - - number of times we should retry when deleting a resource - - Required when I(wait=true). - type: int -author: - - "Rui Moreira (@ruimoreira)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 - - amazon.aws.tags -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details -- name: Endpoint Creation - community.aws.dms_endpoint: - state: absent - endpointidentifier: 'testsource' - endpointtype: source - enginename: aurora - username: testing1 - password: testint1234 - servername: testing.domain.com - port: 3306 - databasename: 'testdb' - sslmode: none - wait: false -""" - -RETURN = r""" -endpoint: - description: - - A description of the DMS endpoint. - returned: success - type: dict - contains: - database_name: - description: - - The name of the database at the endpoint. - type: str - returned: success - example: "exampledb" - endpoint_arn: - description: - - The ARN that uniquely identifies the endpoint. - type: str - returned: success - example: "arn:aws:dms:us-east-1:123456789012:endpoint:1234556789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" - endpoint_identifier: - description: - - The database endpoint identifier. - type: str - returned: success - example: "ansible-test-12345678-dms" - endpoint_type: - description: - - The type of endpoint. Valid values are C(SOURCE) and C(TARGET). - type: str - returned: success - example: "SOURCE" - engine_display_name: - description: - - The expanded name for the engine name. - type: str - returned: success - example: "Amazon Aurora MySQL" - engine_name: - description: - - The database engine name. - type: str - returned: success - example: "aurora" - kms_key_id: - description: - - An KMS key ID that is used to encrypt the connection parameters for the endpoint. - type: str - returned: success - example: "arn:aws:kms:us-east-1:123456789012:key/01234567-abcd-12ab-98fe-123456789abc" - port: - description: - - The port used to access the endpoint. - type: str - returned: success - example: 3306 - server_name: - description: - - The name of the server at the endpoint. - type: str - returned: success - example: "ansible-test-123456789.example.com" - ssl_mode: - description: - - The SSL mode used to connect to the endpoint. - type: str - returned: success - example: "none" - tags: - description: - - A dictionary representing the tags attached to the endpoint. - type: dict - returned: success - example: {"MyTagKey": "MyTagValue"} - username: - description: - - The user name used to connect to the endpoint. - type: str - returned: success - example: "example-username" - dms_transfer_settings: - description: - - Additional transfer related settings. - type: dict - returned: when additional DMS Transfer settings have been configured. - s3_settings: - description: - - Additional settings for S3 endpoints. - type: dict - returned: when the I(endpoint_type) is C(s3) - mongo_db_settings: - description: - - Additional settings for MongoDB endpoints. - type: dict - returned: when the I(endpoint_type) is C(mongodb) - kinesis_settings: - description: - - Additional settings for Kinesis endpoints. - type: dict - returned: when the I(endpoint_type) is C(kinesis) - kafka_settings: - description: - - Additional settings for Kafka endpoints. - type: dict - returned: when the I(endpoint_type) is C(kafka) - elasticsearch_settings: - description: - - Additional settings for Elasticsearch endpoints. - type: dict - returned: when the I(endpoint_type) is C(elasticsearch) - neptune_settings: - description: - - Additional settings for Amazon Neptune endpoints. - type: dict - returned: when the I(endpoint_type) is C(neptune) - redshift_settings: - description: - - Additional settings for Redshift endpoints. - type: dict - returned: when the I(endpoint_type) is C(redshift) - postgre_sql_settings: - description: - - Additional settings for PostgrSQL endpoints. - type: dict - returned: when the I(endpoint_type) is C(postgres) - my_sql_settings: - description: - - Additional settings for MySQL endpoints. - type: dict - returned: when the I(endpoint_type) is C(mysql) - oracle_settings: - description: - - Additional settings for Oracle endpoints. - type: dict - returned: when the I(endpoint_type) is C(oracle) - sybase_settings: - description: - - Additional settings for Sybase endpoints. - type: dict - returned: when the I(endpoint_type) is C(sybase) - microsoft_sql_server_settings: - description: - - Additional settings for Microsoft SQL Server endpoints. - type: dict - returned: when the I(endpoint_type) is C(sqlserver) - i_b_m_db_settings: - description: - - Additional settings for IBM DB2 endpoints. - type: dict - returned: when the I(endpoint_type) is C(db2) - doc_db_settings: - description: - - Additional settings for DocumentDB endpoints. - type: dict - returned: when the I(endpoint_type) is C(documentdb) - redis_settings: - description: - - Additional settings for Redis endpoints. - type: dict - returned: when the I(endpoint_type) is C(redshift) -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -backoff_params = dict(retries=5, delay=1, backoff=1.5) - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_describe_tags(connection, **params): - """checks if the endpoint exists""" - tags = connection.list_tags_for_resource(**params).get("TagList", []) - return boto3_tag_list_to_ansible_dict(tags) - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_describe_endpoints(connection, **params): - try: - endpoints = connection.describe_endpoints(**params) - except is_boto3_error_code("ResourceNotFoundFault"): - return None - return endpoints.get("Endpoints", None) - - -def describe_endpoint(connection, endpoint_identifier): - """checks if the endpoint exists""" - endpoint_filter = dict(Name="endpoint-id", Values=[endpoint_identifier]) - try: - endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe the DMS endpoint.") - - if not endpoints: - return None - - endpoint = endpoints[0] - try: - tags = dms_describe_tags(connection, ResourceArn=endpoint["EndpointArn"]) - endpoint["tags"] = tags - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags") - return endpoint - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_delete_endpoint(client, **params): - """deletes the DMS endpoint based on the EndpointArn""" - if module.params.get("wait"): - return delete_dms_endpoint(client) - else: - return client.delete_endpoint(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_create_endpoint(client, **params): - """creates the DMS endpoint""" - return client.create_endpoint(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_modify_endpoint(client, **params): - """updates the endpoint""" - return client.modify_endpoint(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def get_endpoint_deleted_waiter(client): - return client.get_waiter("endpoint_deleted") - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_remove_tags(client, **params): - return client.remove_tags_from_resource(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def dms_add_tags(client, **params): - return client.add_tags_to_resource(**params) - - -def endpoint_exists(endpoint): - """Returns boolean based on the existence of the endpoint - :param endpoint: dict containing the described endpoint - :return: bool - """ - return bool(len(endpoint["Endpoints"])) - - -def delete_dms_endpoint(connection, endpoint_arn): - try: - delete_arn = dict(EndpointArn=endpoint_arn) - if module.params.get("wait"): - delete_output = connection.delete_endpoint(**delete_arn) - delete_waiter = get_endpoint_deleted_waiter(connection) - delete_waiter.wait( - Filters=[{"Name": "endpoint-arn", "Values": [endpoint_arn]}], - WaiterConfig={"Delay": module.params.get("timeout"), "MaxAttempts": module.params.get("retries")}, - ) - return delete_output - else: - return connection.delete_endpoint(**delete_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete the DMS endpoint.") - - -def create_module_params(): - """ - Reads the module parameters and returns a dict - :return: dict - """ - endpoint_parameters = dict( - EndpointIdentifier=module.params.get("endpointidentifier"), - EndpointType=module.params.get("endpointtype"), - EngineName=module.params.get("enginename"), - Username=module.params.get("username"), - Password=module.params.get("password"), - ServerName=module.params.get("servername"), - Port=module.params.get("port"), - DatabaseName=module.params.get("databasename"), - SslMode=module.params.get("sslmode"), - ) - if module.params.get("EndpointArn"): - endpoint_parameters["EndpointArn"] = module.params.get("EndpointArn") - if module.params.get("certificatearn"): - endpoint_parameters["CertificateArn"] = module.params.get("certificatearn") - - if module.params.get("dmstransfersettings"): - endpoint_parameters["DmsTransferSettings"] = module.params.get("dmstransfersettings") - - if module.params.get("extraconnectionattributes"): - endpoint_parameters["ExtraConnectionAttributes"] = module.params.get("extraconnectionattributes") - - if module.params.get("kmskeyid"): - endpoint_parameters["KmsKeyId"] = module.params.get("kmskeyid") - - if module.params.get("tags"): - endpoint_parameters["Tags"] = module.params.get("tags") - - if module.params.get("serviceaccessrolearn"): - endpoint_parameters["ServiceAccessRoleArn"] = module.params.get("serviceaccessrolearn") - - if module.params.get("externaltabledefinition"): - endpoint_parameters["ExternalTableDefinition"] = module.params.get("externaltabledefinition") - - if module.params.get("dynamodbsettings"): - endpoint_parameters["DynamoDbSettings"] = module.params.get("dynamodbsettings") - - if module.params.get("s3settings"): - endpoint_parameters["S3Settings"] = module.params.get("s3settings") - - if module.params.get("mongodbsettings"): - endpoint_parameters["MongoDbSettings"] = module.params.get("mongodbsettings") - - if module.params.get("kinesissettings"): - endpoint_parameters["KinesisSettings"] = module.params.get("kinesissettings") - - if module.params.get("elasticsearchsettings"): - endpoint_parameters["ElasticsearchSettings"] = module.params.get("elasticsearchsettings") - - if module.params.get("wait"): - endpoint_parameters["wait"] = module.boolean(module.params.get("wait")) - - if module.params.get("timeout"): - endpoint_parameters["timeout"] = module.params.get("timeout") - - if module.params.get("retries"): - endpoint_parameters["retries"] = module.params.get("retries") - - return endpoint_parameters - - -def compare_params(param_described): - """ - Compares the dict obtained from the describe DMS endpoint and - what we are reading from the values in the template We can - never compare the password as boto3's method for describing - a DMS endpoint does not return the value for - the password for security reasons ( I assume ) - """ - param_described = dict(param_described) - modparams = create_module_params() - # modify can't update tags - param_described.pop("Tags", None) - modparams.pop("Tags", None) - changed = False - for paramname in modparams: - if ( - paramname == "Password" - or paramname in param_described - and param_described[paramname] == modparams[paramname] - or str(param_described[paramname]).lower() == modparams[paramname] - ): - pass - else: - changed = True - return changed - - -def modify_dms_endpoint(connection, endpoint): - arn = endpoint["EndpointArn"] - try: - params = create_module_params() - # modify can't update tags - params.pop("Tags", None) - return dms_modify_endpoint(connection, EndpointArn=arn, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params) - - -def ensure_tags(connection, endpoint): - desired_tags = module.params.get("tags", None) - if desired_tags is None: - return False - - current_tags = endpoint.get("tags", {}) - - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, module.params.get("purge_tags")) - - if not tags_to_remove and not tags_to_add: - return False - - if module.check_mode: - return True - - arn = endpoint.get("EndpointArn") - - try: - if tags_to_remove: - dms_remove_tags(connection, ResourceArn=arn, TagKeys=tags_to_remove) - if tags_to_add: - tag_list = ansible_dict_to_boto3_tag_list(tags_to_add) - dms_add_tags(connection, ResourceArn=arn, Tags=tag_list) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update DMS endpoint tags.") - - return True - - -def create_dms_endpoint(connection): - """ - Function to create the dms endpoint - :param connection: boto3 aws connection - :return: information about the dms endpoint object - """ - - try: - params = create_module_params() - return dms_create_endpoint(connection, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create DMS endpoint.") - - -def main(): - argument_spec = dict( - state=dict(choices=["present", "absent"], default="present"), - endpointidentifier=dict(required=True), - endpointtype=dict(choices=["source", "target"]), - enginename=dict( - choices=[ - "mysql", - "oracle", - "postgres", - "mariadb", - "aurora", - "redshift", - "s3", - "db2", - "azuredb", - "sybase", - "dynamodb", - "mongodb", - "sqlserver", - ], - required=False, - ), - username=dict(), - password=dict(no_log=True), - servername=dict(), - port=dict(type="int"), - databasename=dict(), - extraconnectionattributes=dict(), - kmskeyid=dict(no_log=False), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - certificatearn=dict(), - sslmode=dict(choices=["none", "require", "verify-ca", "verify-full"], default="none"), - serviceaccessrolearn=dict(), - externaltabledefinition=dict(), - dynamodbsettings=dict(type="dict"), - s3settings=dict(type="dict"), - dmstransfersettings=dict(type="dict"), - mongodbsettings=dict(type="dict"), - kinesissettings=dict(type="dict"), - elasticsearchsettings=dict(type="dict"), - wait=dict(type="bool", default=False), - timeout=dict(type="int"), - retries=dict(type="int"), - ) - global module - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[ - ["state", "present", ["endpointtype"]], - ["state", "present", ["enginename"]], - ["state", "absent", ["wait"]], - ["wait", "True", ["timeout"]], - ["wait", "True", ["retries"]], - ], - supports_check_mode=False, - ) - exit_message = None - changed = False - - state = module.params.get("state") - - dmsclient = module.client("dms") - endpoint = describe_endpoint(dmsclient, module.params.get("endpointidentifier")) - if state == "present": - if endpoint: - changed |= ensure_tags(dmsclient, endpoint) - params_changed = compare_params(endpoint) - if params_changed: - updated_dms = modify_dms_endpoint(dmsclient, endpoint) - exit_message = updated_dms - endpoint = exit_message.get("Endpoint") - changed = True - else: - exit_message = "Endpoint Already Exists" - else: - exit_message = create_dms_endpoint(dmsclient) - endpoint = exit_message.get("Endpoint") - changed = True - - if changed: - # modify and create don't return tags - tags = dms_describe_tags(dmsclient, ResourceArn=endpoint["EndpointArn"]) - endpoint["tags"] = tags - elif state == "absent": - if endpoint: - delete_results = delete_dms_endpoint(dmsclient, endpoint["EndpointArn"]) - exit_message = delete_results - endpoint = None - changed = True - else: - changed = False - exit_message = "DMS Endpoint does not exist" - - endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=["tags"]) - module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message) - - -if __name__ == "__main__": - main() diff --git a/dms_replication_subnet_group.py b/dms_replication_subnet_group.py deleted file mode 100644 index 7135aa14ea6..00000000000 --- a/dms_replication_subnet_group.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: dms_replication_subnet_group -version_added: 1.0.0 -short_description: creates or destroys a data migration services subnet group -description: - - Creates or destroys a data migration services subnet group. -options: - state: - description: - - State of the subnet group. - default: present - choices: ['present', 'absent'] - type: str - identifier: - description: - - The name for the replication subnet group. - This value is stored as a lowercase string. - Must contain no more than 255 alphanumeric characters, - periods, spaces, underscores, or hyphens. Must not be "default". - type: str - required: true - description: - description: - - The description for the subnet group. - type: str - required: true - subnet_ids: - description: - - A list containing the subnet ids for the replication subnet group, - needs to be at least 2 items in the list. - type: list - elements: str - required: true -author: - - "Rui Moreira (@ruimoreira)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- community.aws.dms_replication_subnet_group: - state: present - identifier: "dev-sngroup" - description: "Development Subnet Group asdasdas" - subnet_ids: ['subnet-id1','subnet-id2'] -""" - -RETURN = r""" # """ - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -backoff_params = dict(retries=5, delay=1, backoff=1.5) - - -@AWSRetry.jittered_backoff(**backoff_params) -def describe_subnet_group(connection, subnet_group): - """checks if instance exists""" - try: - subnet_group_filter = dict(Name="replication-subnet-group-id", Values=[subnet_group]) - return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter]) - except botocore.exceptions.ClientError: - return {"ReplicationSubnetGroups": []} - - -@AWSRetry.jittered_backoff(**backoff_params) -def replication_subnet_group_create(connection, **params): - """creates the replication subnet group""" - return connection.create_replication_subnet_group(**params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def replication_subnet_group_modify(connection, **modify_params): - return connection.modify_replication_subnet_group(**modify_params) - - -@AWSRetry.jittered_backoff(**backoff_params) -def replication_subnet_group_delete(module, connection): - subnetid = module.params.get("identifier") - delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) - return connection.delete_replication_subnet_group(**delete_parameters) - - -def replication_subnet_exists(subnet): - """Returns boolean based on the existence of the endpoint - :param endpoint: dict containing the described endpoint - :return: bool - """ - return bool(len(subnet["ReplicationSubnetGroups"])) - - -def create_module_params(module): - """ - Reads the module parameters and returns a dict - :return: dict - """ - instance_parameters = dict( - # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API - ReplicationSubnetGroupIdentifier=module.params.get("identifier").lower(), - ReplicationSubnetGroupDescription=module.params.get("description"), - SubnetIds=module.params.get("subnet_ids"), - ) - - return instance_parameters - - -def compare_params(module, param_described): - """ - Compares the dict obtained from the describe function and - what we are reading from the values in the template We can - never compare passwords as boto3's method for describing - a DMS endpoint does not return the value for - the password for security reasons ( I assume ) - """ - modparams = create_module_params(module) - changed = False - # need to sanitize values that get returned from the API - if "VpcId" in param_described.keys(): - param_described.pop("VpcId") - if "SubnetGroupStatus" in param_described.keys(): - param_described.pop("SubnetGroupStatus") - for paramname in modparams.keys(): - if paramname in param_described.keys() and param_described.get(paramname) == modparams[paramname]: - pass - elif paramname == "SubnetIds": - subnets = [] - for subnet in param_described.get("Subnets"): - subnets.append(subnet.get("SubnetIdentifier")) - for modulesubnet in modparams["SubnetIds"]: - if modulesubnet in subnets: - pass - else: - changed = True - return changed - - -def create_replication_subnet_group(module, connection): - try: - params = create_module_params(module) - return replication_subnet_group_create(connection, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create DMS replication subnet group.") - - -def modify_replication_subnet_group(module, connection): - try: - modify_params = create_module_params(module) - return replication_subnet_group_modify(connection, **modify_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to Modify the DMS replication subnet group.") - - -def main(): - argument_spec = dict( - state=dict(type="str", choices=["present", "absent"], default="present"), - identifier=dict(type="str", required=True), - description=dict(type="str", required=True), - subnet_ids=dict(type="list", elements="str", required=True), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - exit_message = None - changed = False - - state = module.params.get("state") - dmsclient = module.client("dms") - subnet_group = describe_subnet_group(dmsclient, module.params.get("identifier")) - if state == "present": - if replication_subnet_exists(subnet_group): - if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]): - if not module.check_mode: - exit_message = modify_replication_subnet_group(module, dmsclient) - else: - exit_message = dmsclient - changed = True - else: - exit_message = "No changes to Subnet group" - else: - if not module.check_mode: - exit_message = create_replication_subnet_group(module, dmsclient) - changed = True - else: - exit_message = "Check mode enabled" - - elif state == "absent": - if replication_subnet_exists(subnet_group): - if not module.check_mode: - replication_subnet_group_delete(module, dmsclient) - changed = True - exit_message = "Replication subnet group Deleted" - else: - exit_message = dmsclient - changed = True - - else: - changed = False - exit_message = "Replication subnet group does not exist" - - module.exit_json(changed=changed, msg=exit_message) - - -if __name__ == "__main__": - main() diff --git a/dynamodb_table.py b/dynamodb_table.py deleted file mode 100644 index 66470c2b9c3..00000000000 --- a/dynamodb_table.py +++ /dev/null @@ -1,1092 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: dynamodb_table -version_added: 1.0.0 -short_description: Create, update or delete AWS Dynamo DB tables -description: - - Create or delete AWS Dynamo DB tables. - - Can update the provisioned throughput on existing tables. - - Returns the status of the specified table. -author: - - Alan Loi (@loia) -options: - state: - description: - - Create or delete the table. - choices: ['present', 'absent'] - default: 'present' - type: str - name: - description: - - Name of the table. - required: true - type: str - hash_key_name: - description: - - Name of the hash key. - - Required when I(state=present) and table doesn't exist. - type: str - hash_key_type: - description: - - Type of the hash key. - - Defaults to C('STRING') when creating a new table. - choices: ['STRING', 'NUMBER', 'BINARY'] - type: str - range_key_name: - description: - - Name of the range key. - type: str - range_key_type: - description: - - Type of the range key. - - Defaults to C('STRING') when creating a new range key. - choices: ['STRING', 'NUMBER', 'BINARY'] - type: str - billing_mode: - description: - - Controls whether provisoned pr on-demand tables are created. - choices: ['PROVISIONED', 'PAY_PER_REQUEST'] - type: str - read_capacity: - description: - - Read throughput capacity (units) to provision. - - Defaults to C(1) when creating a new table. - type: int - write_capacity: - description: - - Write throughput capacity (units) to provision. - - Defaults to C(1) when creating a new table. - type: int - indexes: - description: - - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput. - - "required options: ['name', 'type', 'hash_key_name']" - - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']" - suboptions: - name: - description: The name of the index. - type: str - required: true - type: - description: - - The type of index. - type: str - required: true - choices: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] - hash_key_name: - description: - - The name of the hash-based key. - - Required if index doesn't already exist. - - Can not be modified once the index has been created. - required: false - type: str - hash_key_type: - description: - - The type of the hash-based key. - - Defaults to C('STRING') when creating a new index. - - Can not be modified once the index has been created. - type: str - choices: ['STRING', 'NUMBER', 'BINARY'] - range_key_name: - description: - - The name of the range-based key. - - Can not be modified once the index has been created. - type: str - range_key_type: - type: str - description: - - The type of the range-based key. - - Defaults to C('STRING') when creating a new index. - - Can not be modified once the index has been created. - choices: ['STRING', 'NUMBER', 'BINARY'] - includes: - type: list - description: A list of fields to include when using C(global_include) or C(include) indexes. - elements: str - read_capacity: - description: - - Read throughput capacity (units) to provision for the index. - type: int - write_capacity: - description: - - Write throughput capacity (units) to provision for the index. - type: int - default: [] - type: list - elements: dict - table_class: - description: - - The class of the table. - - Requires at least botocore version 1.23.18. - choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS'] - type: str - version_added: 3.1.0 - wait_timeout: - description: - - How long (in seconds) to wait for creation / update / deletion to complete. - - AWS only allows secondary indexies to be updated one at a time, this module will automatically update them - in serial, and the timeout will be separately applied for each index. - aliases: ['wait_for_active_timeout'] - default: 900 - type: int - wait: - description: - - When I(wait=True) the module will wait for up to I(wait_timeout) seconds - for index updates, table creation or deletion to complete before returning. - default: True - type: bool -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create dynamo table with hash and range primary key - community.aws.dynamodb_table: - name: my-table - region: us-east-1 - hash_key_name: id - hash_key_type: STRING - range_key_name: create_time - range_key_type: NUMBER - read_capacity: 2 - write_capacity: 2 - tags: - tag_name: tag_value - -- name: Update capacity on existing dynamo table - community.aws.dynamodb_table: - name: my-table - region: us-east-1 - read_capacity: 10 - write_capacity: 10 - -- name: Create pay-per-request table - community.aws.dynamodb_table: - name: my-table - region: us-east-1 - hash_key_name: id - hash_key_type: STRING - billing_mode: PAY_PER_REQUEST - -- name: set index on existing dynamo table - community.aws.dynamodb_table: - name: my-table - region: us-east-1 - indexes: - - name: NamedIndex - type: global_include - hash_key_name: id - range_key_name: create_time - includes: - - other_field - - other_field2 - read_capacity: 10 - write_capacity: 10 - -- name: Delete dynamo table - community.aws.dynamodb_table: - name: my-table - region: us-east-1 - state: absent -""" - -RETURN = r""" -table: - description: The returned table params from the describe API call. - returned: success - type: complex - contains: {} - sample: { - "arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table", - "attribute_definitions": [ - { - "attribute_name": "id", - "attribute_type": "N" - } - ], - "billing_mode": "PROVISIONED", - "creation_date_time": "2022-02-04T13:36:01.578000+00:00", - "id": "533b45fe-0870-4b66-9b00-d2afcfe96f19", - "item_count": 0, - "key_schema": [ - { - "attribute_name": "id", - "key_type": "HASH" - } - ], - "name": "ansible-test-14482047-alinas-mbp", - "provisioned_throughput": { - "number_of_decreases_today": 0, - "read_capacity_units": 1, - "write_capacity_units": 1 - }, - "size": 0, - "status": "ACTIVE", - "table_arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table", - "table_id": "533b45fe-0870-4b66-9b00-d2afcfe96f19", - "table_name": "ansible-test-table", - "table_size_bytes": 0, - "table_status": "ACTIVE", - "tags": {} - } -table_status: - description: The current status of the table. - returned: success - type: str - sample: ACTIVE -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_indexes_active -from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_exists -from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_not_exists -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - -DYNAMO_TYPE_DEFAULT = "STRING" -INDEX_REQUIRED_OPTIONS = ["name", "type", "hash_key_name"] -INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + [ - "hash_key_type", - "range_key_name", - "range_key_type", - "includes", - "read_capacity", - "write_capacity", -] -INDEX_TYPE_OPTIONS = ["all", "global_all", "global_include", "global_keys_only", "include", "keys_only"] -# Map in both directions -DYNAMO_TYPE_MAP_LONG = {"STRING": "S", "NUMBER": "N", "BINARY": "B"} -DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items()) -KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys()) - - -# If you try to update an index while another index is updating, it throws -# LimitExceededException/ResourceInUseException exceptions at you. This can be -# pretty slow, so add plenty of retries... -@AWSRetry.jittered_backoff( - retries=45, - delay=5, - max_delay=30, - catch_extra_error_codes=["ResourceInUseException", "ResourceNotFoundException"], -) -def _update_table_with_long_retry(**changes): - return client.update_table(TableName=module.params.get("name"), **changes) - - -# ResourceNotFoundException is expected here if the table doesn't exist -@AWSRetry.jittered_backoff(catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"]) -def _describe_table(**params): - return client.describe_table(**params) - - -def wait_exists(): - wait_table_exists( - module, - module.params.get("wait_timeout"), - module.params.get("name"), - ) - - -def wait_not_exists(): - wait_table_not_exists( - module, - module.params.get("wait_timeout"), - module.params.get("name"), - ) - - -def wait_indexes(): - wait_indexes_active( - module, - module.params.get("wait_timeout"), - module.params.get("name"), - ) - - -def _short_type_to_long(short_key): - if not short_key: - return None - return DYNAMO_TYPE_MAP_SHORT.get(short_key, None) - - -def _long_type_to_short(long_key): - if not long_key: - return None - return DYNAMO_TYPE_MAP_LONG.get(long_key, None) - - -def _schema_dict(key_name, key_type): - return dict( - AttributeName=key_name, - KeyType=key_type, - ) - - -def _merge_index_params(index, current_index): - idx = dict(current_index) - idx.update(index) - return idx - - -def _decode_primary_index(current_table): - """ - Decodes the primary index info from the current table definition - splitting it up into the keys we use as parameters - """ - # The schema/attribute definitions are a list of dicts which need the same - # treatment as boto3's tag lists - schema = boto3_tag_list_to_ansible_dict( - current_table.get("key_schema", []), - # Map from 'HASH'/'RANGE' to attribute name - tag_name_key_name="key_type", - tag_value_key_name="attribute_name", - ) - attributes = boto3_tag_list_to_ansible_dict( - current_table.get("attribute_definitions", []), - # Map from attribute name to 'S'/'N'/'B'. - tag_name_key_name="attribute_name", - tag_value_key_name="attribute_type", - ) - - hash_key_name = schema.get("HASH") - hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None)) - range_key_name = schema.get("RANGE", None) - range_key_type = _short_type_to_long(attributes.get(range_key_name, None)) - - return dict( - hash_key_name=hash_key_name, - hash_key_type=hash_key_type, - range_key_name=range_key_name, - range_key_type=range_key_type, - ) - - -def _decode_index(index_data, attributes, type_prefix=""): - try: - index_map = dict( - name=index_data["index_name"], - ) - - index_data = dict(index_data) - index_data["attribute_definitions"] = attributes - - index_map.update(_decode_primary_index(index_data)) - - throughput = index_data.get("provisioned_throughput", {}) - index_map["provisioned_throughput"] = throughput - if throughput: - index_map["read_capacity"] = throughput.get("read_capacity_units") - index_map["write_capacity"] = throughput.get("write_capacity_units") - - projection = index_data.get("projection", {}) - if projection: - index_map["type"] = type_prefix + projection.get("projection_type") - index_map["includes"] = projection.get("non_key_attributes", []) - - return index_map - except Exception as e: - module.fail_json_aws(e, msg="Decode failure", index_data=index_data) - - -def compatability_results(current_table): - if not current_table: - return dict() - - billing_mode = current_table.get("billing_mode") - - primary_indexes = _decode_primary_index(current_table) - - hash_key_name = primary_indexes.get("hash_key_name") - hash_key_type = primary_indexes.get("hash_key_type") - range_key_name = primary_indexes.get("range_key_name") - range_key_type = primary_indexes.get("range_key_type") - - indexes = list() - global_indexes = current_table.get("_global_index_map", {}) - local_indexes = current_table.get("_local_index_map", {}) - for index in global_indexes: - idx = dict(global_indexes[index]) - idx.pop("provisioned_throughput", None) - indexes.append(idx) - for index in local_indexes: - idx = dict(local_indexes[index]) - idx.pop("provisioned_throughput", None) - indexes.append(idx) - - compat_results = dict( - hash_key_name=hash_key_name, - hash_key_type=hash_key_type, - range_key_name=range_key_name, - range_key_type=range_key_type, - indexes=indexes, - billing_mode=billing_mode, - region=module.region, - table_name=current_table.get("table_name", None), - table_class=current_table.get("table_class_summary", {}).get("table_class", None), - table_status=current_table.get("table_status", None), - tags=current_table.get("tags", {}), - ) - - if billing_mode == "PROVISIONED": - throughput = current_table.get("provisioned_throughput", {}) - compat_results["read_capacity"] = throughput.get("read_capacity_units", None) - compat_results["write_capacity"] = throughput.get("write_capacity_units", None) - - return compat_results - - -def get_dynamodb_table(): - table_name = module.params.get("name") - try: - table = _describe_table(TableName=table_name) - except is_boto3_error_code("ResourceNotFoundException"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe table") - - table = table["Table"] - try: - tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table["TableArn"])["Tags"] - except is_boto3_error_code("AccessDeniedException"): - module.warn("Permission denied when listing tags") - tags = [] - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to list table tags") - - tags = boto3_tag_list_to_ansible_dict(tags) - - table = camel_dict_to_snake_dict(table) - - # Put some of the values into places people will expect them - table["arn"] = table["table_arn"] - table["name"] = table["table_name"] - table["status"] = table["table_status"] - table["id"] = table["table_id"] - table["size"] = table["table_size_bytes"] - table["tags"] = tags - - if "table_class_summary" in table: - table["table_class"] = table["table_class_summary"]["table_class"] - - # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST - # and when updating the billing_mode - if "billing_mode_summary" in table: - table["billing_mode"] = table["billing_mode_summary"]["billing_mode"] - else: - table["billing_mode"] = "PROVISIONED" - - # convert indexes into something we can easily search against - attributes = table["attribute_definitions"] - global_index_map = dict() - local_index_map = dict() - for index in table.get("global_secondary_indexes", []): - idx = _decode_index(index, attributes, type_prefix="global_") - global_index_map[idx["name"]] = idx - for index in table.get("local_secondary_indexes", []): - idx = _decode_index(index, attributes) - local_index_map[idx["name"]] = idx - table["_global_index_map"] = global_index_map - table["_local_index_map"] = local_index_map - - return table - - -def _generate_attribute_map(): - """ - Builds a map of Key Names to Type - """ - attributes = dict() - - for index in (module.params, *module.params.get("indexes")): - # run through hash_key_name and range_key_name - for t in ["hash", "range"]: - key_name = index.get(t + "_key_name") - if not key_name: - continue - key_type = index.get(t + "_key_type") or DYNAMO_TYPE_DEFAULT - _type = _long_type_to_short(key_type) - if key_name in attributes: - if _type != attributes[key_name]: - module.fail_json( - msg="Conflicting attribute type", type_1=_type, type_2=attributes[key_name], key_name=key_name - ) - else: - attributes[key_name] = _type - - return attributes - - -def _generate_attributes(): - attributes = _generate_attribute_map() - - # Use ansible_dict_to_boto3_tag_list to generate the list of dicts - # format we need - attrs = ansible_dict_to_boto3_tag_list( - attributes, tag_name_key_name="AttributeName", tag_value_key_name="AttributeType" - ) - return list(attrs) - - -def _generate_throughput(params=None): - if not params: - params = module.params - - read_capacity = params.get("read_capacity") or 1 - write_capacity = params.get("write_capacity") or 1 - throughput = dict( - ReadCapacityUnits=read_capacity, - WriteCapacityUnits=write_capacity, - ) - - return throughput - - -def _generate_schema(params=None): - if not params: - params = module.params - - schema = list() - hash_key_name = params.get("hash_key_name") - range_key_name = params.get("range_key_name") - - if hash_key_name: - entry = _schema_dict(hash_key_name, "HASH") - schema.append(entry) - if range_key_name: - entry = _schema_dict(range_key_name, "RANGE") - schema.append(entry) - - return schema - - -def _primary_index_changes(current_table): - primary_index = _decode_primary_index(current_table) - - hash_key_name = primary_index.get("hash_key_name") - _hash_key_name = module.params.get("hash_key_name") - hash_key_type = primary_index.get("hash_key_type") - _hash_key_type = module.params.get("hash_key_type") - range_key_name = primary_index.get("range_key_name") - _range_key_name = module.params.get("range_key_name") - range_key_type = primary_index.get("range_key_type") - _range_key_type = module.params.get("range_key_type") - - changed = list() - - if _hash_key_name and (_hash_key_name != hash_key_name): - changed.append("hash_key_name") - if _hash_key_type and (_hash_key_type != hash_key_type): - changed.append("hash_key_type") - if _range_key_name and (_range_key_name != range_key_name): - changed.append("range_key_name") - if _range_key_type and (_range_key_type != range_key_type): - changed.append("range_key_type") - - return changed - - -def _throughput_changes(current_table, params=None): - if not params: - params = module.params - - throughput = current_table.get("provisioned_throughput", {}) - read_capacity = throughput.get("read_capacity_units", None) - _read_capacity = params.get("read_capacity") or read_capacity - write_capacity = throughput.get("write_capacity_units", None) - _write_capacity = params.get("write_capacity") or write_capacity - - if (read_capacity != _read_capacity) or (write_capacity != _write_capacity): - return dict( - ReadCapacityUnits=_read_capacity, - WriteCapacityUnits=_write_capacity, - ) - - return dict() - - -def _generate_global_indexes(billing_mode): - index_exists = dict() - indexes = list() - - include_throughput = True - - if billing_mode == "PAY_PER_REQUEST": - include_throughput = False - - for index in module.params.get("indexes"): - if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: - continue - name = index.get("name") - if name in index_exists: - module.fail_json(msg=f"Duplicate key {name} in list of global indexes") - # Convert the type name to upper case and remove the global_ - index["type"] = index["type"].upper()[7:] - index = _generate_index(index, include_throughput) - index_exists[name] = True - indexes.append(index) - - return indexes - - -def _generate_local_indexes(): - index_exists = dict() - indexes = list() - - for index in module.params.get("indexes"): - if index.get("type") not in ["all", "include", "keys_only"]: - continue - name = index.get("name") - if name in index_exists: - module.fail_json(msg=f"Duplicate key {name} in list of local indexes") - index["type"] = index["type"].upper() - index = _generate_index(index, False) - index_exists[name] = True - indexes.append(index) - - return indexes - - -def _generate_global_index_map(current_table): - global_index_map = dict() - existing_indexes = current_table["_global_index_map"] - for index in module.params.get("indexes"): - if index.get("type") not in ["global_all", "global_include", "global_keys_only"]: - continue - name = index.get("name") - if name in global_index_map: - module.fail_json(msg=f"Duplicate key {name} in list of global indexes") - idx = _merge_index_params(index, existing_indexes.get(name, {})) - # Convert the type name to upper case and remove the global_ - idx["type"] = idx["type"].upper()[7:] - global_index_map[name] = idx - return global_index_map - - -def _generate_local_index_map(current_table): - local_index_map = dict() - existing_indexes = current_table["_local_index_map"] - for index in module.params.get("indexes"): - if index.get("type") not in ["all", "include", "keys_only"]: - continue - name = index.get("name") - if name in local_index_map: - module.fail_json(msg=f"Duplicate key {name} in list of local indexes") - idx = _merge_index_params(index, existing_indexes.get(name, {})) - # Convert the type name to upper case - idx["type"] = idx["type"].upper() - local_index_map[name] = idx - return local_index_map - - -def _generate_index(index, include_throughput=True): - key_schema = _generate_schema(index) - throughput = _generate_throughput(index) - non_key_attributes = index["includes"] or [] - projection = dict( - ProjectionType=index["type"], - ) - if index["type"] != "ALL": - if non_key_attributes: - projection["NonKeyAttributes"] = non_key_attributes - else: - if non_key_attributes: - module.fail_json( - "DynamoDB does not support specifying non-key-attributes ('includes') for indexes of type 'all'. Index" - f" name: {index['name']}" - ) - - idx = dict( - IndexName=index["name"], - KeySchema=key_schema, - Projection=projection, - ) - - if include_throughput: - idx["ProvisionedThroughput"] = throughput - - return idx - - -def _attribute_changes(current_table): - # TODO (future) It would be nice to catch attempts to change types here. - return _generate_attributes() - - -def _global_index_changes(current_table): - current_global_index_map = current_table["_global_index_map"] - global_index_map = _generate_global_index_map(current_table) - - current_billing_mode = current_table.get("billing_mode") - - if module.params.get("billing_mode") is None: - billing_mode = current_billing_mode - else: - billing_mode = module.params.get("billing_mode") - - include_throughput = True - - if billing_mode == "PAY_PER_REQUEST": - include_throughput = False - - index_changes = list() - - # TODO (future) it would be nice to add support for deleting an index - for name in global_index_map: - idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput)) - if name not in current_global_index_map: - index_changes.append(dict(Create=idx)) - else: - # The only thing we can change is the provisioned throughput. - # TODO (future) it would be nice to throw a deprecation here - # rather than dropping other changes on the floor - _current = current_global_index_map[name] - _new = global_index_map[name] - - if include_throughput: - change = dict(_throughput_changes(_current, _new)) - if change: - update = dict( - IndexName=name, - ProvisionedThroughput=change, - ) - index_changes.append(dict(Update=update)) - - return index_changes - - -def _local_index_changes(current_table): - # TODO (future) Changes to Local Indexes aren't possible after creation, - # we should probably throw a deprecation warning here (original module - # also just dropped these changes on the floor) - return [] - - -def _update_table(current_table): - changes = dict() - additional_global_index_changes = list() - - # Get throughput / billing_mode changes - throughput_changes = _throughput_changes(current_table) - if throughput_changes: - changes["ProvisionedThroughput"] = throughput_changes - - current_billing_mode = current_table.get("billing_mode") - new_billing_mode = module.params.get("billing_mode") - - if new_billing_mode is None: - new_billing_mode = current_billing_mode - - if current_billing_mode != new_billing_mode: - changes["BillingMode"] = new_billing_mode - - # Update table_class use exisiting if none is defined - if module.params.get("table_class"): - if module.params.get("table_class") != current_table.get("table_class"): - changes["TableClass"] = module.params.get("table_class") - - global_index_changes = _global_index_changes(current_table) - if global_index_changes: - changes["GlobalSecondaryIndexUpdates"] = global_index_changes - # Only one index can be changed at a time except if changing the billing mode, pass the first during the - # main update and deal with the others on a slow retry to wait for - # completion - - if current_billing_mode == new_billing_mode: - if len(global_index_changes) > 1: - changes["GlobalSecondaryIndexUpdates"] = [global_index_changes[0]] - additional_global_index_changes = global_index_changes[1:] - - local_index_changes = _local_index_changes(current_table) - if local_index_changes: - changes["LocalSecondaryIndexUpdates"] = local_index_changes - - if not changes: - return False - - if module.check_mode: - return True - - if global_index_changes or local_index_changes: - changes["AttributeDefinitions"] = _generate_attributes() - - try: - client.update_table(aws_retry=True, TableName=module.params.get("name"), **changes) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update table") - - if additional_global_index_changes: - for index in additional_global_index_changes: - wait_indexes() - try: - _update_table_with_long_retry( - GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes["AttributeDefinitions"] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - msg="Failed to update table", - changes=changes, - additional_global_index_changes=additional_global_index_changes, - ) - - return True - - -def _update_tags(current_table): - _tags = module.params.get("tags") - if _tags is None: - return False - - tags_to_add, tags_to_remove = compare_aws_tags( - current_table["tags"], module.params.get("tags"), purge_tags=module.params.get("purge_tags") - ) - - # If neither need updating we can return already - if not (tags_to_add or tags_to_remove): - return False - - if module.check_mode: - return True - - if tags_to_add: - try: - client.tag_resource( - aws_retry=True, - ResourceArn=current_table["arn"], - Tags=ansible_dict_to_boto3_tag_list(tags_to_add), - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to tag table") - if tags_to_remove: - try: - client.untag_resource( - aws_retry=True, - ResourceArn=current_table["arn"], - TagKeys=tags_to_remove, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to untag table") - - return True - - -def update_table(current_table): - primary_index_changes = _primary_index_changes(current_table) - if primary_index_changes: - module.fail_json( - f"DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {primary_index_changes}" - ) - - changed = False - changed |= _update_table(current_table) - changed |= _update_tags(current_table) - - if module.params.get("wait"): - wait_exists() - wait_indexes() - - return changed - - -def create_table(): - table_name = module.params.get("name") - table_class = module.params.get("table_class") - hash_key_name = module.params.get("hash_key_name") - billing_mode = module.params.get("billing_mode") - - if billing_mode is None: - billing_mode = "PROVISIONED" - - tags = ansible_dict_to_boto3_tag_list(module.params.get("tags") or {}) - - if not hash_key_name: - module.fail_json('"hash_key_name" must be provided when creating a new table.') - - if module.check_mode: - return True - - if billing_mode == "PROVISIONED": - throughput = _generate_throughput() - - attributes = _generate_attributes() - key_schema = _generate_schema() - local_indexes = _generate_local_indexes() - global_indexes = _generate_global_indexes(billing_mode) - - params = dict( - TableName=table_name, - AttributeDefinitions=attributes, - KeySchema=key_schema, - Tags=tags, - BillingMode=billing_mode - # TODO (future) - # StreamSpecification, - # SSESpecification, - ) - - if table_class: - params["TableClass"] = table_class - if billing_mode == "PROVISIONED": - params["ProvisionedThroughput"] = throughput - if local_indexes: - params["LocalSecondaryIndexes"] = local_indexes - if global_indexes: - params["GlobalSecondaryIndexes"] = global_indexes - - try: - client.create_table(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create table") - - if module.params.get("wait"): - wait_exists() - wait_indexes() - - return True - - -def delete_table(current_table): - if not current_table: - return False - - if module.check_mode: - return True - - table_name = module.params.get("name") - - # If an index is mid-update then we have to wait for the update to complete - # before deletion will succeed - long_retry = AWSRetry.jittered_backoff( - retries=45, - delay=5, - max_delay=30, - catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"], - ) - - try: - long_retry(client.delete_table)(TableName=table_name) - except is_boto3_error_code("ResourceNotFoundException"): - return False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete table") - - if module.params.get("wait"): - wait_not_exists() - - return True - - -def main(): - global module - global client - - # TODO (future) It would be good to split global and local indexes. They have - # different parameters, use a separate namespace for names, - # and local indexes can't be updated. - index_options = dict( - name=dict(type="str", required=True), - # It would be nice to make this optional, but because Local and Global - # indexes are mixed in here we need this to be able to tell to which - # group of indexes the index belongs. - type=dict(type="str", required=True, choices=INDEX_TYPE_OPTIONS), - hash_key_name=dict(type="str", required=False), - hash_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), - range_key_name=dict(type="str", required=False), - range_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES), - includes=dict(type="list", required=False, elements="str"), - read_capacity=dict(type="int", required=False), - write_capacity=dict(type="int", required=False), - ) - - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(required=True, type="str"), - hash_key_name=dict(type="str"), - hash_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), - range_key_name=dict(type="str"), - range_key_type=dict(type="str", choices=KEY_TYPE_CHOICES), - billing_mode=dict(type="str", choices=["PROVISIONED", "PAY_PER_REQUEST"]), - read_capacity=dict(type="int"), - write_capacity=dict(type="int"), - indexes=dict(default=[], type="list", elements="dict", options=index_options), - table_class=dict(type="str", choices=["STANDARD", "STANDARD_INFREQUENT_ACCESS"]), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - wait=dict(type="bool", default=True), - wait_timeout=dict(default=900, type="int", aliases=["wait_for_active_timeout"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - check_boto3=False, - ) - - retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"], - ) - client = module.client("dynamodb", retry_decorator=retry_decorator) - - if module.params.get("table_class"): - module.require_botocore_at_least("1.23.18", reason="to set table_class") - - current_table = get_dynamodb_table() - changed = False - table = None - results = dict() - - state = module.params.get("state") - if state == "present": - if current_table: - changed |= update_table(current_table) - else: - changed |= create_table() - table = get_dynamodb_table() - elif state == "absent": - changed |= delete_table(current_table) - - compat_results = compatability_results(table) - if compat_results: - results.update(compat_results) - - results["changed"] = changed - if table: - # These are used to pass computed data about, not needed for users - table.pop("_global_index_map", None) - table.pop("_local_index_map", None) - results["table"] = table - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/dynamodb_ttl.py b/dynamodb_ttl.py deleted file mode 100644 index eca236cf49a..00000000000 --- a/dynamodb_ttl.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: dynamodb_ttl -version_added: 1.0.0 -short_description: Set TTL for a given DynamoDB table -description: -- Sets the TTL for a given DynamoDB table. -options: - state: - description: - - State to set DynamoDB table to. - choices: ['enable', 'disable'] - required: false - type: str - table_name: - description: - - Name of the DynamoDB table to work on. - required: true - type: str - attribute_name: - description: - - The name of the Time To Live attribute used to store the expiration time for items in the table. - - This appears to be required by the API even when disabling TTL. - required: true - type: str - -author: -- Ted Timmons (@tedder) -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: enable TTL on my cowfacts table - community.aws.dynamodb_ttl: - state: enable - table_name: cowfacts - attribute_name: cow_deleted_date - -- name: disable TTL on my cowfacts table - community.aws.dynamodb_ttl: - state: disable - table_name: cowfacts - attribute_name: cow_deleted_date -""" - -RETURN = r""" -current_status: - description: current or new TTL specification. - type: dict - returned: always - sample: - - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" } - - { "AttributeName": "deploy_timestamp", "Enabled": true } -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_current_ttl_state(c, table_name): - """Fetch the state dict for a table.""" - current_state = c.describe_time_to_live(TableName=table_name) - return current_state.get("TimeToLiveDescription") - - -def does_state_need_changing(attribute_name, desired_state, current_spec): - """Run checks to see if the table needs to be modified. Basically a dirty check.""" - if not current_spec: - # we don't have an entry (or a table?) - return True - - if desired_state.lower() == "enable" and current_spec.get("TimeToLiveStatus") not in ["ENABLING", "ENABLED"]: - return True - if desired_state.lower() == "disable" and current_spec.get("TimeToLiveStatus") not in ["DISABLING", "DISABLED"]: - return True - if attribute_name != current_spec.get("AttributeName"): - return True - - return False - - -def set_ttl_state(c, table_name, state, attribute_name): - """Set our specification. Returns the update_time_to_live specification dict, - which is different than the describe_* call.""" - is_enabled = False - if state.lower() == "enable": - is_enabled = True - - ret = c.update_time_to_live( - TableName=table_name, - TimeToLiveSpecification={ - "Enabled": is_enabled, - "AttributeName": attribute_name, - }, - ) - - return ret.get("TimeToLiveSpecification") - - -def main(): - argument_spec = dict( - state=dict(choices=["enable", "disable"]), - table_name=dict(required=True), - attribute_name=dict(required=True), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - ) - - try: - dbclient = module.client("dynamodb") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - result = {"changed": False} - state = module.params["state"] - - # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the - # methods so it's easier to do here. - try: - current_state = get_current_ttl_state(dbclient, module.params["table_name"]) - - if does_state_need_changing(module.params["attribute_name"], module.params["state"], current_state): - # changes needed - new_state = set_ttl_state( - dbclient, module.params["table_name"], module.params["state"], module.params["attribute_name"] - ) - result["current_status"] = new_state - result["changed"] = True - else: - # no changes needed - result["current_status"] = current_state - - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Failed to get or update ttl state") - except botocore.exceptions.ParamValidationError as e: - module.fail_json_aws(e, msg="Failed due to invalid parameters") - except ValueError as e: - module.fail_json_aws(e, msg="Failed") - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/ec2_ami_copy.py b/ec2_ami_copy.py deleted file mode 100644 index 170a564e15d..00000000000 --- a/ec2_ami_copy.py +++ /dev/null @@ -1,223 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_ami_copy -version_added: 1.0.0 -short_description: copies AMI between AWS regions, return new image id -description: - - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.) -options: - source_region: - description: - - The source region the AMI should be copied from. - required: true - type: str - source_image_id: - description: - - The ID of the AMI in source region that should be copied. - required: true - type: str - name: - description: - - The name of the new AMI to copy. (As of 2.3 the default is C(default), in prior versions it was C(null).) - default: "default" - type: str - description: - description: - - An optional human-readable string describing the contents and purpose of the new AMI. - type: str - default: '' - encrypted: - description: - - Whether or not the destination snapshots of the copied AMI should be encrypted. - type: bool - default: false - kms_key_id: - description: - - KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account. - type: str - wait: - description: - - Wait for the copied AMI to be in state C(available) before returning. - type: bool - default: false - wait_timeout: - description: - - How long before wait gives up, in seconds. - - Prior to 2.3 the default was C(1200). - - From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults. - - This was reenabled in 2.6 to allow timeouts greater than 10 minutes. - default: 600 - type: int - tags: - description: - - 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})' - type: dict - aliases: ['resource_tags'] - tag_equality: - description: - - Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match - in an existing AMI, the AMI will not be copied again. - default: false - type: bool -author: - - Amir Moulavi (@amir343) - - Tim C (@defunctio) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Basic AMI Copy - community.aws.ec2_ami_copy: - source_region: us-east-1 - region: eu-west-1 - source_image_id: ami-xxxxxxx - -- name: AMI copy wait until available - community.aws.ec2_ami_copy: - source_region: us-east-1 - region: eu-west-1 - source_image_id: ami-xxxxxxx - wait: true - wait_timeout: 1200 # Default timeout is 600 - register: image_id - -- name: Named AMI copy - community.aws.ec2_ami_copy: - source_region: us-east-1 - region: eu-west-1 - source_image_id: ami-xxxxxxx - name: My-Awesome-AMI - description: latest patch - -- name: Tagged AMI copy (will not copy the same AMI twice) - community.aws.ec2_ami_copy: - source_region: us-east-1 - region: eu-west-1 - source_image_id: ami-xxxxxxx - tags: - Name: My-Super-AMI - Patch: 1.2.3 - tag_equality: true - -- name: Encrypted AMI copy - community.aws.ec2_ami_copy: - source_region: us-east-1 - region: eu-west-1 - source_image_id: ami-xxxxxxx - encrypted: true - -- name: Encrypted AMI copy with specified key - community.aws.ec2_ami_copy: - source_region: us-east-1 - region: eu-west-1 - source_image_id: ami-xxxxxxx - encrypted: true - kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b -""" - -RETURN = r""" -image_id: - description: AMI ID of the copied AMI - returned: always - type: str - sample: ami-e689729e -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError - from botocore.exceptions import WaiterError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def copy_image(module, ec2): - """ - Copies an AMI - - module : AnsibleAWSModule object - ec2: ec2 connection object - """ - - image = None - changed = False - tags = module.params.get("tags") - - params = { - "SourceRegion": module.params.get("source_region"), - "SourceImageId": module.params.get("source_image_id"), - "Name": module.params.get("name"), - "Description": module.params.get("description"), - "Encrypted": module.params.get("encrypted"), - } - if module.params.get("kms_key_id"): - params["KmsKeyId"] = module.params.get("kms_key_id") - - try: - if module.params.get("tag_equality"): - filters = [{"Name": f"tag:{k}", "Values": [v]} for (k, v) in module.params.get("tags").items()] - filters.append(dict(Name="state", Values=["available", "pending"])) - images = ec2.describe_images(Filters=filters) - if len(images["Images"]) > 0: - image = images["Images"][0] - if not image: - image = ec2.copy_image(**params) - image_id = image["ImageId"] - if tags: - ec2.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags)) - changed = True - - if module.params.get("wait"): - delay = 15 - max_attempts = module.params.get("wait_timeout") // delay - image_id = image.get("ImageId") - ec2.get_waiter("image_available").wait( - ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} - ) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(image)) - except WaiterError as e: - module.fail_json_aws(e, msg="An error occurred waiting for the image to become available") - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not copy AMI") - except Exception as e: - module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") - - -def main(): - argument_spec = dict( - source_region=dict(required=True), - source_image_id=dict(required=True), - name=dict(default="default"), - description=dict(default=""), - encrypted=dict(type="bool", default=False, required=False), - kms_key_id=dict(type="str", required=False), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=600), - tags=dict(type="dict", aliases=["resource_tags"]), - tag_equality=dict(type="bool", default=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - ec2 = module.client("ec2") - copy_image(module, ec2) - - -if __name__ == "__main__": - main() diff --git a/ec2_carrier_gateway.py b/ec2_carrier_gateway.py deleted file mode 100644 index 6517879c0f8..00000000000 --- a/ec2_carrier_gateway.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_carrier_gateway -version_added: 6.0.0 -short_description: Manage an AWS VPC Carrier gateway -description: - - Manage an AWS VPC Carrier gateway. -author: - - "Marco Braga (@mtulio)" -options: - vpc_id: - description: - - The VPC ID for the VPC in which to manage the Carrier Gateway. - required: true - type: str - carrier_gateway_id: - description: - - The Carrier Gateway ID to manage the Carrier Gateway. - required: false - type: str - state: - description: - - Create or terminate the Carrier Gateway. - default: present - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Ensure that the VPC has an Carrier Gateway. -# The Carrier Gateway ID can be accessed via {{cagw.carrier_gateway_id}} for use in setting up Route tables etc. -- name: Create Carrier gateway - community.aws.ec2_carrier_gateway: - vpc_id: vpc-abcdefgh - state: present - register: cagw - -- name: Create Carrier gateway with tags - community.aws.ec2_carrier_gateway: - vpc_id: vpc-abcdefgh - state: present - tags: - Tag1: tag1 - Tag2: tag2 - register: cagw - -- name: Delete Carrier gateway - community.aws.ec2_carrier_gateway: - vpc_id: vpc-abcdefgh - carrier_gateway_id: "cagw-123" - state: absent - register: vpc_cagw_delete -""" - -RETURN = r""" -changed: - description: If any changes have been made to the Carrier Gateway. - type: bool - returned: always - sample: - changed: false -carrier_gateway_id: - description: The unique identifier for the Carrier Gateway. - type: str - returned: I(state=present) - sample: - carrier_gateway_id: "cagw-XXXXXXXX" -tags: - description: The tags associated the Carrier Gateway. - type: dict - returned: I(state=present) - sample: - tags: - "Ansible": "Test" -vpc_id: - description: The VPC ID associated with the Carrier Gateway. - type: str - returned: I(state=present) - sample: - vpc_id: "vpc-XXXXXXXX" -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=10, delay=10) -def describe_cagws_with_backoff(connection, **params): - paginator = connection.get_paginator("describe_carrier_gateways") - return paginator.paginate(**params).build_full_result()["CarrierGateways"] - - -class AnsibleEc2Cagw: - def __init__(self, module, results): - self._module = module - self._results = results - self._connection = self._module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - self._check_mode = self._module.check_mode - - def process(self): - vpc_id = self._module.params.get("vpc_id") - state = self._module.params.get("state", "present") - tags = self._module.params.get("tags") - purge_tags = self._module.params.get("purge_tags") - - if state == "present": - self.ensure_cagw_present(vpc_id, tags, purge_tags) - elif state == "absent": - self.ensure_cagw_absent(vpc_id) - - def get_matching_cagw(self, vpc_id, carrier_gateway_id=None): - """ - Returns the carrier gateway found. - Parameters: - vpc_id (str): VPC ID - carrier_gateway_id (str): Carrier Gateway ID, if specified - Returns: - cagw (dict): dict of cagw found, None if none found - """ - filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) - try: - if not carrier_gateway_id: - cagws = describe_cagws_with_backoff( - self._connection, - Filters=filters, - ) - else: - cagws = describe_cagws_with_backoff( - self._connection, - CarrierGatewayIds=[carrier_gateway_id], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e) - - cagw = None - if len(cagws) > 1: - self._module.fail_json(msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting") - elif cagws: - cagw = camel_dict_to_snake_dict(cagws[0]) - - return cagw - - @staticmethod - def get_cagw_info(cagw, vpc_id): - return { - "carrier_gateway_id": cagw["carrier_gateway_id"], - "tags": boto3_tag_list_to_ansible_dict(cagw["tags"]), - "vpc_id": vpc_id, - } - - def ensure_cagw_absent(self, vpc_id): - cagw = self.get_matching_cagw(vpc_id) - if cagw is None: - return self._results - - if self._check_mode: - self._results["changed"] = True - return self._results - - try: - self._results["changed"] = True - self._connection.delete_carrier_gateway( - aws_retry=True, - CarrierGatewayId=cagw["carrier_gateway_id"], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Unable to delete Carrier Gateway") - - return self._results - - def ensure_cagw_present(self, vpc_id, tags, purge_tags): - cagw = self.get_matching_cagw(vpc_id) - - if cagw is None: - if self._check_mode: - self._results["changed"] = True - self._results["carrier_gateway_id"] = None - return self._results - - try: - response = self._connection.create_carrier_gateway(VpcId=vpc_id, aws_retry=True) - cagw = camel_dict_to_snake_dict(response["CarrierGateway"]) - self._results["changed"] = True - except is_boto3_error_message("You must be opted into a wavelength zone to create a carrier gateway.") as e: - self._module.fail_json(msg="You must be opted into a wavelength zone to create a carrier gateway") - except botocore.exceptions.WaiterError as e: - self._module.fail_json_aws(e, msg="No Carrier Gateway exists.") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg="Unable to create Carrier Gateway") - - # Modify tags - self._results["changed"] |= ensure_ec2_tags( - self._connection, - self._module, - cagw["carrier_gateway_id"], - resource_type="carrier-gateway", - tags=tags, - purge_tags=purge_tags, - retry_codes="InvalidCarrierGatewayID.NotFound", - ) - - # Update cagw - cagw = self.get_matching_cagw(vpc_id, carrier_gateway_id=cagw["carrier_gateway_id"]) - cagw_info = self.get_cagw_info(cagw, vpc_id) - self._results.update(cagw_info) - - return self._results - - -def main(): - argument_spec = dict( - carrier_gateway_id=dict(required=False), - vpc_id=dict(required=True), - state=dict(default="present", choices=["present", "absent"]), - tags=dict(required=False, type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=[["vpc_id", "carrier_gateway_id"]], - supports_check_mode=True, - ) - results = dict( - changed=False, - ) - cagw_manager = AnsibleEc2Cagw(module=module, results=results) - cagw_manager.process() - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ec2_carrier_gateway_info.py b/ec2_carrier_gateway_info.py deleted file mode 100644 index 43d77d59aa6..00000000000 --- a/ec2_carrier_gateway_info.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_carrier_gateway_info -version_added: 6.0.0 -short_description: Gather information about carrier gateways in AWS -description: - - Gather information about carrier gateways in AWS. -author: - - "Marco Braga (@mtulio)" -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCarrierGateways.html) for possible filters. - required: false - default: {} - type: dict - carrier_gateway_ids: - description: - - Get details of specific Carrier Gateway ID. - required: false - type: list - elements: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# # Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all Carrier Gateways for an account or profile - community.aws.ec2_carrier_gateway_info: - region: ap-southeast-2 - register: cagw_info - -- name: Gather information about a filtered list of Carrier Gateways - community.aws.ec2_carrier_gateway_info: - region: ap-southeast-2 - filters: - "tag:Name": "cagw-123" - register: cagw_info - -- name: Gather information about a specific carrier gateway by CarrierGatewayId - community.aws.ec2_carrier_gateway_info: - region: ap-southeast-2 - carrier_gateway_ids: cagw-c1231234 - register: cagw_info -""" - -RETURN = r""" -changed: - description: True if listing the carrier gateways succeeds. - type: bool - returned: always - sample: "false" -carrier_gateways: - description: The carrier gateways for the account. - returned: always - type: complex - contains: - vpc_id: - description: The ID of the VPC. - returned: I(state=present) - type: str - sample: vpc-02123b67 - carrier_gateway_id: - description: The ID of the carrier gateway. - returned: I(state=present) - type: str - sample: cagw-2123634d - tags: - description: Any tags assigned to the carrier gateway. - returned: I(state=present) - type: dict - sample: - tags: - "Ansible": "Test" -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_carrier_gateway_info(carrier_gateway): - tags = boto3_tag_list_to_ansible_dict(carrier_gateway["Tags"]) - ignore_list = [] - carrier_gateway_info = { - "CarrierGatewayId": carrier_gateway["CarrierGatewayId"], - "VpcId": carrier_gateway["VpcId"], - "Tags": tags, - } - - carrier_gateway_info = camel_dict_to_snake_dict(carrier_gateway_info, ignore_list=ignore_list) - return carrier_gateway_info - - -def list_carrier_gateways(connection, module): - params = dict() - - params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - if module.params.get("carrier_gateway_ids"): - params["CarrierGatewayIds"] = module.params.get("carrier_gateway_ids") - - try: - all_carrier_gateways = connection.describe_carrier_gateways(aws_retry=True, **params) - except is_boto3_error_code("InvalidCarrierGatewayID.NotFound"): - module.fail_json("CarrierGateway not found") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "Unable to describe carrier gateways") - - return [get_carrier_gateway_info(cagw) for cagw in all_carrier_gateways["CarrierGateways"]] - - -def main(): - argument_spec = dict( - carrier_gateway_ids=dict(default=None, elements="str", type="list"), - filters=dict(default={}, type="dict"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - # Validate Requirements - try: - connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - results = list_carrier_gateways(connection, module) - - module.exit_json(carrier_gateways=results) - - -if __name__ == "__main__": - main() diff --git a/ec2_customer_gateway.py b/ec2_customer_gateway.py deleted file mode 100644 index 19fc8eab7f5..00000000000 --- a/ec2_customer_gateway.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_customer_gateway -version_added: 1.0.0 -short_description: Manage an AWS customer gateway -description: - - Manage an AWS customer gateway. -author: - - Michael Baydoun (@MichaelBaydoun) -notes: - - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the - first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent - requests do not create new customer gateway resources. - - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use - customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. -options: - bgp_asn: - description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN). - - Defaults to C(65000) if not specified when I(state=present). - type: int - ip_address: - description: - - Internet-routable IP address for customers gateway, must be a static address. - required: true - type: str - name: - description: - - Name of the customer gateway. - required: true - type: str - routing: - description: - - The type of routing. - choices: ['static', 'dynamic'] - default: dynamic - type: str - state: - description: - - Create or terminate the Customer Gateway. - default: present - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create Customer Gateway - community.aws.ec2_customer_gateway: - bgp_asn: 12345 - ip_address: 1.2.3.4 - name: IndianapolisOffice - region: us-east-1 - register: cgw - -- name: Delete Customer Gateway - community.aws.ec2_customer_gateway: - ip_address: 1.2.3.4 - name: IndianapolisOffice - state: absent - region: us-east-1 - register: cgw -""" - -RETURN = r""" -gateway.customer_gateways: - description: details about the gateway that was created. - returned: success - type: complex - contains: - bgp_asn: - description: The Border Gateway Autonomous System Number. - returned: when exists and gateway is available. - sample: 65123 - type: str - customer_gateway_id: - description: gateway id assigned by amazon. - returned: when exists and gateway is available. - sample: cgw-cb6386a2 - type: str - ip_address: - description: ip address of your gateway device. - returned: when exists and gateway is available. - sample: 1.2.3.4 - type: str - state: - description: state of gateway. - returned: when gateway exists and is available. - sample: available - type: str - tags: - description: Any tags on the gateway. - returned: when gateway exists and is available, and when tags exist. - type: list - type: - description: encryption type. - returned: when gateway exists and is available. - sample: ipsec.1 - type: str -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class Ec2CustomerGatewayManager: - def __init__(self, module): - self.module = module - - try: - self.ec2 = module.client("ec2") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=["IncorrectState"]) - def ensure_cgw_absent(self, gw_id): - response = self.ec2.delete_customer_gateway(DryRun=False, CustomerGatewayId=gw_id) - return response - - def ensure_cgw_present(self, bgp_asn, ip_address): - if not bgp_asn: - bgp_asn = 65000 - response = self.ec2.create_customer_gateway( - DryRun=False, - Type="ipsec.1", - PublicIp=ip_address, - BgpAsn=bgp_asn, - ) - return response - - def tag_cgw_name(self, gw_id, name): - response = self.ec2.create_tags( - DryRun=False, - Resources=[ - gw_id, - ], - Tags=[ - {"Key": "Name", "Value": name}, - ], - ) - return response - - def describe_gateways(self, ip_address): - response = self.ec2.describe_customer_gateways( - DryRun=False, - Filters=[ - { - "Name": "state", - "Values": [ - "available", - ], - }, - { - "Name": "ip-address", - "Values": [ - ip_address, - ], - }, - ], - ) - return response - - -def main(): - argument_spec = dict( - bgp_asn=dict(required=False, type="int"), - ip_address=dict(required=True), - name=dict(required=True), - routing=dict(default="dynamic", choices=["dynamic", "static"]), - state=dict(default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[ - ("routing", "dynamic", ["bgp_asn"]), - ], - ) - - gw_mgr = Ec2CustomerGatewayManager(module) - - name = module.params.get("name") - - existing = gw_mgr.describe_gateways(module.params["ip_address"]) - - results = dict(changed=False) - if module.params["state"] == "present": - if existing["CustomerGateways"]: - existing["CustomerGateway"] = existing["CustomerGateways"][0] - results["gateway"] = existing - if existing["CustomerGateway"]["Tags"]: - tag_array = existing["CustomerGateway"]["Tags"] - for key, value in enumerate(tag_array): - if value["Key"] == "Name": - current_name = value["Value"] - if current_name != name: - results["name"] = gw_mgr.tag_cgw_name( - results["gateway"]["CustomerGateway"]["CustomerGatewayId"], - module.params["name"], - ) - results["changed"] = True - else: - if not module.check_mode: - results["gateway"] = gw_mgr.ensure_cgw_present( - module.params["bgp_asn"], - module.params["ip_address"], - ) - results["name"] = gw_mgr.tag_cgw_name( - results["gateway"]["CustomerGateway"]["CustomerGatewayId"], - module.params["name"], - ) - results["changed"] = True - - elif module.params["state"] == "absent": - if existing["CustomerGateways"]: - existing["CustomerGateway"] = existing["CustomerGateways"][0] - results["gateway"] = existing - if not module.check_mode: - results["gateway"] = gw_mgr.ensure_cgw_absent(existing["CustomerGateway"]["CustomerGatewayId"]) - results["changed"] = True - - pretty_results = camel_dict_to_snake_dict(results) - module.exit_json(**pretty_results) - - -if __name__ == "__main__": - main() diff --git a/ec2_customer_gateway_info.py b/ec2_customer_gateway_info.py deleted file mode 100644 index 976d3f370d2..00000000000 --- a/ec2_customer_gateway_info.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_customer_gateway_info -version_added: 1.0.0 -short_description: Gather information about customer gateways in AWS -description: - - Gather information about customer gateways in AWS. -author: - - Madhura Naniwadekar (@Madhura-CSI) -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters. - type: dict - default: {} - customer_gateway_ids: - description: - - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list. - type: list - elements: str - default: [] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# # Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all customer gateways - community.aws.ec2_customer_gateway_info: - -- name: Gather information about a filtered list of customer gateways, based on tags - community.aws.ec2_customer_gateway_info: - region: ap-southeast-2 - filters: - "tag:Name": test-customer-gateway - "tag:AltName": test-customer-gateway-alt - register: cust_gw_info - -- name: Gather information about a specific customer gateway by specifying customer gateway ID - community.aws.ec2_customer_gateway_info: - region: ap-southeast-2 - customer_gateway_ids: - - 'cgw-48841a09' - - 'cgw-fec021ce' - register: cust_gw_info -""" - -RETURN = r""" -customer_gateways: - description: List of one or more customer gateways. - returned: always - type: list - sample: [ - { - "bgp_asn": "65000", - "customer_gateway_id": "cgw-fec844ce", - "customer_gateway_name": "test-customer-gw", - "ip_address": "110.112.113.120", - "state": "available", - "tags": [ - { - "key": "Name", - "value": "test-customer-gw" - } - ], - "type": "ipsec.1" - } - ] -""" - -import json - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def date_handler(obj): - return obj.isoformat() if hasattr(obj, "isoformat") else obj - - -def list_customer_gateways(connection, module): - params = dict() - - params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - params["CustomerGatewayIds"] = module.params.get("customer_gateway_ids") - - try: - result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler)) - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result["CustomerGateways"]] - if snaked_customer_gateways: - for customer_gateway in snaked_customer_gateways: - customer_gateway["tags"] = boto3_tag_list_to_ansible_dict(customer_gateway.get("tags", [])) - customer_gateway_name = customer_gateway["tags"].get("Name") - if customer_gateway_name: - customer_gateway["customer_gateway_name"] = customer_gateway_name - module.exit_json(changed=False, customer_gateways=snaked_customer_gateways) - - -def main(): - argument_spec = dict( - customer_gateway_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict") - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ["customer_gateway_ids", "filters"], - ], - supports_check_mode=True, - ) - - connection = module.client("ec2") - - list_customer_gateways(connection, module) - - -if __name__ == "__main__": - main() diff --git a/ec2_launch_template.py b/ec2_launch_template.py deleted file mode 100644 index 6cd1de3fb0d..00000000000 --- a/ec2_launch_template.py +++ /dev/null @@ -1,863 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_launch_template -version_added: 1.0.0 -short_description: Manage EC2 launch templates -description: -- Create, modify, and delete EC2 Launch Templates, which can be used to - create individual instances or with Autoscaling Groups. -- The M(amazon.aws.ec2_instance) and M(community.aws.autoscaling_group) modules can, instead of specifying all - parameters on those tasks, be passed a Launch Template which contains - settings like instance size, disk type, subnet, and more. -author: -- Ryan Scott Brown (@ryansb) -options: - template_id: - description: - - The ID for the launch template, can be used for all cases except creating a new Launch Template. - aliases: [id] - type: str - template_name: - description: - - The template name. This must be unique in the region-account combination you are using. - - If no launch template exists with the specified name, a new launch template is created. - - If a launch template with the specified name already exists and the configuration has not changed, - nothing happens. - - If a launch template with the specified name already exists and the configuration has changed, - a new version of the launch template is created. - aliases: [name] - type: str - default_version: - description: - - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default. - type: str - default: latest - version_description: - version_added: 5.5.0 - description: - - The description of a launch template version. - default: "" - type: str - state: - description: - - Whether the launch template should exist or not. - - Deleting specific versions of a launch template is not supported at this time. - choices: [present, absent] - default: present - type: str - block_device_mappings: - description: - - The block device mapping. Supplying both a snapshot ID and an encryption - value as arguments for block-device mapping results in an error. This is - because only blank volumes can be encrypted on start, and these are not - created from a snapshot. If a snapshot is the basis for the volume, it - contains data by definition and its encryption status cannot be changed - using this action. - type: list - elements: dict - suboptions: - device_name: - description: The device name (for example, /dev/sdh or xvdh). - type: str - no_device: - description: Suppresses the specified device included in the block device mapping of the AMI. - type: str - virtual_name: - description: > - The virtual device name (ephemeralN). Instance store volumes are - numbered starting from 0. An instance type with 2 available instance - store volumes can specify mappings for ephemeral0 and ephemeral1. The - number of available instance store volumes depends on the instance - type. After you connect to the instance, you must mount the volume. - type: str - ebs: - description: Parameters used to automatically set up EBS volumes when the instance is launched. - type: dict - suboptions: - delete_on_termination: - description: Indicates whether the EBS volume is deleted on instance termination. - type: bool - encrypted: - description: > - Indicates whether the EBS volume is encrypted. Encrypted volumes - can only be attached to instances that support Amazon EBS - encryption. If you are creating a volume from a snapshot, you - can't specify an encryption value. - type: bool - iops: - description: - - The number of I/O operations per second (IOPS) that the volume - supports. For io1, this represents the number of IOPS that are - provisioned for the volume. For gp2, this represents the baseline - performance of the volume and the rate at which the volume - accumulates I/O credits for bursting. For more information about - General Purpose SSD baseline performance, I/O credits, and - bursting, see Amazon EBS Volume Types in the Amazon Elastic - Compute Cloud User Guide. - - > - Condition: This parameter is required for requests to create io1 - volumes; it is not used in requests to create gp2, st1, sc1, or - standard volumes. - type: int - kms_key_id: - description: The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption. - type: str - snapshot_id: - description: The ID of the snapshot to create the volume from. - type: str - volume_size: - description: - - The size of the volume, in GiB. - - "Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size." - type: int - volume_type: - description: The volume type - type: str - cpu_options: - description: - - Choose CPU settings for the EC2 instances that will be created with this template. - - For more information, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) - type: dict - suboptions: - core_count: - description: The number of CPU cores for the instance. - type: int - threads_per_core: - description: > - The number of threads per CPU core. To disable Intel Hyper-Threading - Technology for the instance, specify a value of 1. Otherwise, specify - the default value of 2. - type: int - credit_specification: - description: The credit option for CPU usage of the instance. Valid for T2 or T3 instances only. - type: dict - suboptions: - cpu_credits: - description: > - The credit option for CPU usage of a T2 or T3 instance. Valid values - are C(standard) and C(unlimited). - type: str - disable_api_termination: - description: > - This helps protect instances from accidental termination. If set to true, - you can't terminate the instance using the Amazon EC2 console, CLI, or - API. To change this attribute to false after launch, use - I(ModifyInstanceAttribute). - type: bool - ebs_optimized: - description: > - Indicates whether the instance is optimized for Amazon EBS I/O. This - optimization provides dedicated throughput to Amazon EBS and an optimized - configuration stack to provide optimal Amazon EBS I/O performance. This - optimization isn't available with all instance types. Additional usage - charges apply when using an EBS-optimized instance. - type: bool - elastic_gpu_specifications: - type: list - elements: dict - description: Settings for Elastic GPU attachments. See U(https://aws.amazon.com/ec2/elastic-gpus/) for details. - suboptions: - type: - description: The type of Elastic GPU to attach - type: str - iam_instance_profile: - description: > - The name or ARN of an IAM instance profile. Requires permissions to - describe existing instance roles to confirm ARN is properly formed. - type: str - image_id: - description: > - The AMI ID to use for new instances launched with this template. This - value is region-dependent since AMIs are not global resources. - type: str - instance_initiated_shutdown_behavior: - description: > - Indicates whether an instance stops or terminates when you initiate - shutdown from the instance using the operating system shutdown command. - choices: [stop, terminate] - type: str - instance_market_options: - description: Options for alternative instance markets, currently only the spot market is supported. - type: dict - suboptions: - market_type: - description: The market type. This should always be 'spot'. - type: str - spot_options: - description: Spot-market specific settings. - type: dict - suboptions: - block_duration_minutes: - description: > - The required duration for the Spot Instances (also known as Spot - blocks), in minutes. This value must be a multiple of 60 (60, - 120, 180, 240, 300, or 360). - type: int - instance_interruption_behavior: - description: The behavior when a Spot Instance is interrupted. The default is C(terminate). - choices: [hibernate, stop, terminate] - type: str - max_price: - description: The highest hourly price you're willing to pay for this Spot Instance. - type: str - spot_instance_type: - description: The request type to send. - choices: [one-time, persistent] - type: str - instance_type: - description: > - The instance type, such as C(c5.2xlarge). For a full list of instance types, see - U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). - type: str - kernel_id: - description: > - The ID of the kernel. We recommend that you use PV-GRUB instead of - kernels and RAM disks. For more information, see - U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) - type: str - key_name: - description: - - The name of the key pair. You can create a key pair using M(amazon.aws.ec2_key). - - If you do not specify a key pair, you can't connect to the instance - unless you choose an AMI that is configured to allow users another way to - log in. - type: str - monitoring: - description: Settings for instance monitoring. - type: dict - suboptions: - enabled: - type: bool - description: Whether to turn on detailed monitoring for new instances. This will incur extra charges. - network_interfaces: - description: One or more network interfaces. - type: list - elements: dict - suboptions: - associate_public_ip_address: - description: Associates a public IPv4 address with eth0 for a new network interface. - type: bool - delete_on_termination: - description: Indicates whether the network interface is deleted when the instance is terminated. - type: bool - description: - description: A description for the network interface. - type: str - device_index: - description: The device index for the network interface attachment. - type: int - groups: - description: List of security group IDs to include on this instance. - type: list - elements: str - ipv6_address_count: - description: > - The number of IPv6 addresses to assign to a network interface. Amazon - EC2 automatically selects the IPv6 addresses from the subnet range. - You can't use this option if specifying the I(ipv6_addresses) option. - type: int - ipv6_addresses: - description: > - A list of one or more specific IPv6 addresses from the IPv6 CIDR - block range of your subnet. You can't use this option if you're - specifying the I(ipv6_address_count) option. - type: list - elements: str - network_interface_id: - description: The eni ID of a network interface to attach. - type: str - private_ip_address: - description: The primary private IPv4 address of the network interface. - type: str - subnet_id: - description: The ID of the subnet for the network interface. - type: str - placement: - description: The placement group settings for the instance. - type: dict - suboptions: - affinity: - description: The affinity setting for an instance on a Dedicated Host. - type: str - availability_zone: - description: The Availability Zone for the instance. - type: str - group_name: - description: The name of the placement group for the instance. - type: str - host_id: - description: The ID of the Dedicated Host for the instance. - type: str - tenancy: - description: > - The tenancy of the instance (if the instance is running in a VPC). An - instance with a tenancy of dedicated runs on single-tenant hardware. - type: str - ram_disk_id: - description: > - The ID of the RAM disk to launch the instance with. We recommend that you - use PV-GRUB instead of kernels and RAM disks. For more information, see - U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) - type: str - security_group_ids: - description: A list of security group IDs (VPC or EC2-Classic) that the new instances will be added to. - type: list - elements: str - security_groups: - description: > - A list of security group names (Default VPC or EC2-Classic) that the new instances will be added to. - For any VPC other than Default, you must use I(security_group_ids). - type: list - elements: str - source_version: - description: > - The version number of the launch template version on which to base the new version. - The new version inherits the same launch parameters as the source version, except for parameters that you explicity specify. - Snapshots applied to the block device mapping are ignored when creating a new version unless they are explicitly included. - type: str - default: latest - version_added: 4.1.0 - tags: - type: dict - description: - - A set of key-value pairs to be applied to resources when this Launch Template is used. - - "Tag key constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with I(aws:)" - - "Tag value constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters." - aliases: ['resource_tags'] - user_data: - description: > - The Base64-encoded user data to make available to the instance. For more information, see the Linux - U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Windows - U(http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - documentation on user-data. - type: str - metadata_options: - description: - - Configure EC2 Metadata options. - - For more information see the IMDS documentation - U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html). - type: dict - version_added: 1.5.0 - suboptions: - http_endpoint: - type: str - description: > - This parameter enables or disables the HTTP metadata endpoint on your instances. - choices: [enabled, disabled] - default: 'enabled' - http_put_response_hop_limit: - type: int - description: > - The desired HTTP PUT response hop limit for instance metadata requests. - The larger the number, the further instance metadata requests can travel. - default: 1 - http_tokens: - type: str - description: > - The state of token usage for your instance metadata requests. - choices: [optional, required] - default: 'optional' - http_protocol_ipv6: - version_added: 3.1.0 - type: str - description: > - - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). - - Requires botocore >= 1.21.29 - choices: [enabled, disabled] - default: 'disabled' - instance_metadata_tags: - version_added: 3.1.0 - type: str - description: - - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). - - Requires botocore >= 1.23.30 - choices: [enabled, disabled] - default: 'disabled' -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create an ec2 launch template - community.aws.ec2_launch_template: - name: "my_template" - image_id: "ami-04b762b4289fba92b" - key_name: my_ssh_key - instance_type: t2.micro - iam_instance_profile: myTestProfile - disable_api_termination: true - -- name: > - Create a new version of an existing ec2 launch template with a different instance type, - while leaving an older version as the default version - community.aws.ec2_launch_template: - name: "my_template" - default_version: 1 - instance_type: c5.4xlarge - -- name: Delete an ec2 launch template - community.aws.ec2_launch_template: - name: "my_template" - state: absent - -# This module does not yet allow deletion of specific versions of launch templates -""" - -RETURN = r""" -latest_version: - description: Latest available version of the launch template - returned: when state=present - type: int -default_version: - description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always. - returned: when state=present - type: int -""" - -from uuid import uuid4 - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError - from botocore.exceptions import WaiterError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def determine_iam_role(module, name_or_arn): - if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): - return {"arn": name_or_arn} - iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - try: - role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return {"arn": role["InstanceProfile"]["Arn"]} - except is_boto3_error_code("NoSuchEntity") as e: - module.fail_json_aws(e, msg=f"Could not find instance_role {name_or_arn}") - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg=f"An error occurred while searching for instance_role {name_or_arn}. Please try supplying the full ARN.", - ) - - -def existing_templates(module): - ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - matches = None - try: - if module.params.get("template_id"): - matches = ec2.describe_launch_templates( - LaunchTemplateIds=[module.params.get("template_id")], aws_retry=True - ) - elif module.params.get("template_name"): - matches = ec2.describe_launch_templates( - LaunchTemplateNames=[module.params.get("template_name")], aws_retry=True - ) - except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException") as e: - # no named template was found, return nothing/empty versions - return None, [] - except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg=( - f"Launch template with ID {module.params.get('launch_template_id')} is not a valid ID. It should start" - " with `lt-....`" - ), - ) - except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, - msg=( - f"Launch template with ID {module.params.get('launch_template_id')} could not be found, please supply a" - " name instead so that a new template can be created" - ), - ) - except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.") - else: - template = matches["LaunchTemplates"][0] - template_id, template_version, template_default = ( - template["LaunchTemplateId"], - template["LatestVersionNumber"], - template["DefaultVersionNumber"], - ) - try: - return ( - template, - ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)[ - "LaunchTemplateVersions" - ], - ) - except (ClientError, BotoCoreError, WaiterError) as e: - module.fail_json_aws( - e, - msg=f"Could not find launch template versions for {template['LaunchTemplateName']} (ID: {template_id}).", - ) - - -def params_to_launch_data(module, template_params): - if template_params.get("tags"): - tag_list = ansible_dict_to_boto3_tag_list(template_params.get("tags")) - template_params["tag_specifications"] = [ - {"resource_type": r_type, "tags": tag_list} for r_type in ("instance", "volume") - ] - del template_params["tags"] - if module.params.get("iam_instance_profile"): - template_params["iam_instance_profile"] = determine_iam_role(module, module.params["iam_instance_profile"]) - params = snake_dict_to_camel_dict( - dict((k, v) for k, v in template_params.items() if v is not None), - capitalize_first=True, - ) - return params - - -def delete_template(module): - ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - template, template_versions = existing_templates(module) - deleted_versions = [] - if template or template_versions: - non_default_versions = [to_text(t["VersionNumber"]) for t in template_versions if not t["DefaultVersion"]] - if non_default_versions: - try: - v_resp = ec2.delete_launch_template_versions( - LaunchTemplateId=template["LaunchTemplateId"], - Versions=non_default_versions, - aws_retry=True, - ) - if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]: - module.warn( - f"Failed to delete template versions {v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']} on" - f" launch template {template['LaunchTemplateId']}" - ) - deleted_versions = [ - camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"] - ] - except (ClientError, BotoCoreError) as e: - module.fail_json_aws( - e, - msg=f"Could not delete existing versions of the launch template {template['LaunchTemplateId']}", - ) - try: - resp = ec2.delete_launch_template( - LaunchTemplateId=template["LaunchTemplateId"], - aws_retry=True, - ) - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Could not delete launch template {template['LaunchTemplateId']}") - return { - "deleted_versions": deleted_versions, - "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]), - "changed": True, - } - else: - return {"changed": False} - - -def create_or_update(module, template_options): - ec2 = module.client( - "ec2", retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidLaunchTemplateId.NotFound"]) - ) - template, template_versions = existing_templates(module) - out = {} - lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) - lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) - - if lt_data.get("MetadataOptions"): - if not module.botocore_at_least("1.23.30"): - # fail only if enabled is requested - if lt_data["MetadataOptions"].get("InstanceMetadataTags") == "enabled": - module.require_botocore_at_least("1.23.30", reason="to set instance_metadata_tags") - # pop if it's not requested to keep backwards compatibility. - # otherwise the modules failes because parameters are set due default values - lt_data["MetadataOptions"].pop("InstanceMetadataTags") - - if not module.botocore_at_least("1.21.29"): - # fail only if enabled is requested - if lt_data["MetadataOptions"].get("HttpProtocolIpv6") == "enabled": - module.require_botocore_at_least("1.21.29", reason="to set http_protocol_ipv6") - # pop if it's not requested to keep backwards compatibility. - # otherwise the modules failes because parameters are set due default values - lt_data["MetadataOptions"].pop("HttpProtocolIpv6") - - if not (template or template_versions): - # create a full new one - try: - resp = ec2.create_launch_template( - LaunchTemplateName=module.params["template_name"], - LaunchTemplateData=lt_data, - ClientToken=uuid4().hex, - aws_retry=True, - ) - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create launch template") - template, template_versions = existing_templates(module) - out["changed"] = True - elif template and template_versions: - most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] - if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( - "VersionDescription", "" - ): - out["changed"] = False - return out - try: - if module.params.get("source_version") in (None, ""): - resp = ec2.create_launch_template_version( - LaunchTemplateId=template["LaunchTemplateId"], - LaunchTemplateData=lt_data, - ClientToken=uuid4().hex, - VersionDescription=str(module.params["version_description"]), - aws_retry=True, - ) - elif module.params.get("source_version") == "latest": - resp = ec2.create_launch_template_version( - LaunchTemplateId=template["LaunchTemplateId"], - LaunchTemplateData=lt_data, - ClientToken=uuid4().hex, - SourceVersion=str(most_recent["VersionNumber"]), - VersionDescription=str(module.params["version_description"]), - aws_retry=True, - ) - else: - try: - int(module.params.get("source_version")) - except ValueError: - module.fail_json( - msg=f"source_version param was not a valid integer, got \"{module.params.get('source_version')}\"" - ) - # get source template version - source_version = next( - (v for v in template_versions if v["VersionNumber"] == int(module.params.get("source_version"))), - None, - ) - if source_version is None: - module.fail_json( - msg=f"source_version does not exist, got \"{module.params.get('source_version')}\"" - ) - resp = ec2.create_launch_template_version( - LaunchTemplateId=template["LaunchTemplateId"], - LaunchTemplateData=lt_data, - ClientToken=uuid4().hex, - SourceVersion=str(source_version["VersionNumber"]), - VersionDescription=str(module.params["version_description"]), - aws_retry=True, - ) - - if module.params.get("default_version") in (None, ""): - # no need to do anything, leave the existing version as default - pass - elif module.params.get("default_version") == "latest": - set_default = ec2.modify_launch_template( - LaunchTemplateId=template["LaunchTemplateId"], - DefaultVersion=to_text(resp["LaunchTemplateVersion"]["VersionNumber"]), - ClientToken=uuid4().hex, - aws_retry=True, - ) - else: - try: - int(module.params.get("default_version")) - except ValueError: - module.fail_json( - msg=f"default_version param was not a valid integer, got \"{module.params.get('default_version')}\"" - ) - set_default = ec2.modify_launch_template( - LaunchTemplateId=template["LaunchTemplateId"], - DefaultVersion=to_text(int(module.params.get("default_version"))), - ClientToken=uuid4().hex, - aws_retry=True, - ) - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create subsequent launch template version") - template, template_versions = existing_templates(module) - out["changed"] = True - return out - - -def format_module_output(module): - output = {} - template, template_versions = existing_templates(module) - template = camel_dict_to_snake_dict(template) - template_versions = [camel_dict_to_snake_dict(v) for v in template_versions] - for v in template_versions: - for ts in v["launch_template_data"].get("tag_specifications") or []: - ts["tags"] = boto3_tag_list_to_ansible_dict(ts.pop("tags")) - output.update(dict(template=template, versions=template_versions)) - output["default_template"] = [v for v in template_versions if v.get("default_version")][0] - output["latest_template"] = [ - v - for v in template_versions - if (v.get("version_number") and int(v["version_number"]) == int(template["latest_version_number"])) - ][0] - if "version_number" in output["default_template"]: - output["default_version"] = output["default_template"]["version_number"] - if "version_number" in output["latest_template"]: - output["latest_version"] = output["latest_template"]["version_number"] - return output - - -def main(): - template_options = dict( - block_device_mappings=dict( - type="list", - elements="dict", - options=dict( - device_name=dict(), - ebs=dict( - type="dict", - options=dict( - delete_on_termination=dict(type="bool"), - encrypted=dict(type="bool"), - iops=dict(type="int"), - kms_key_id=dict(), - snapshot_id=dict(), - volume_size=dict(type="int"), - volume_type=dict(), - ), - ), - no_device=dict(), - virtual_name=dict(), - ), - ), - cpu_options=dict( - type="dict", - options=dict( - core_count=dict(type="int"), - threads_per_core=dict(type="int"), - ), - ), - credit_specification=dict( - dict(type="dict"), - options=dict( - cpu_credits=dict(), - ), - ), - disable_api_termination=dict(type="bool"), - ebs_optimized=dict(type="bool"), - elastic_gpu_specifications=dict( - options=dict(type=dict()), - type="list", - elements="dict", - ), - iam_instance_profile=dict(), - image_id=dict(), - instance_initiated_shutdown_behavior=dict(choices=["stop", "terminate"]), - instance_market_options=dict( - type="dict", - options=dict( - market_type=dict(), - spot_options=dict( - type="dict", - options=dict( - block_duration_minutes=dict(type="int"), - instance_interruption_behavior=dict(choices=["hibernate", "stop", "terminate"]), - max_price=dict(), - spot_instance_type=dict(choices=["one-time", "persistent"]), - ), - ), - ), - ), - instance_type=dict(), - kernel_id=dict(), - key_name=dict(), - monitoring=dict( - type="dict", - options=dict(enabled=dict(type="bool")), - ), - metadata_options=dict( - type="dict", - options=dict( - http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"), - http_put_response_hop_limit=dict(type="int", default=1), - http_tokens=dict(choices=["optional", "required"], default="optional"), - http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"), - instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"), - ), - ), - network_interfaces=dict( - type="list", - elements="dict", - options=dict( - associate_public_ip_address=dict(type="bool"), - delete_on_termination=dict(type="bool"), - description=dict(), - device_index=dict(type="int"), - groups=dict(type="list", elements="str"), - ipv6_address_count=dict(type="int"), - ipv6_addresses=dict(type="list", elements="str"), - network_interface_id=dict(), - private_ip_address=dict(), - subnet_id=dict(), - ), - ), - placement=dict( - options=dict( - affinity=dict(), - availability_zone=dict(), - group_name=dict(), - host_id=dict(), - tenancy=dict(), - ), - type="dict", - ), - ram_disk_id=dict(), - security_group_ids=dict(type="list", elements="str"), - security_groups=dict(type="list", elements="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - user_data=dict(), - ) - - arg_spec = dict( - state=dict(choices=["present", "absent"], default="present"), - template_name=dict(aliases=["name"]), - template_id=dict(aliases=["id"]), - default_version=dict(default="latest"), - source_version=dict(default="latest"), - version_description=dict(default=""), - ) - - arg_spec.update(template_options) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - required_one_of=[ - ("template_name", "template_id"), - ], - supports_check_mode=True, - ) - - for interface in module.params.get("network_interfaces") or []: - if interface.get("ipv6_addresses"): - interface["ipv6_addresses"] = [{"ipv6_address": x} for x in interface["ipv6_addresses"]] - - if module.params.get("state") == "present": - out = create_or_update(module, template_options) - out.update(format_module_output(module)) - elif module.params.get("state") == "absent": - out = delete_template(module) - else: - module.fail_json(msg=f"Unsupported value \"{module.params.get('state')}\" for `state` parameter") - - module.exit_json(**out) - - -if __name__ == "__main__": - main() diff --git a/ec2_placement_group.py b/ec2_placement_group.py deleted file mode 100644 index ccdd7d54785..00000000000 --- a/ec2_placement_group.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_placement_group -version_added: 1.0.0 -short_description: Create or delete an EC2 Placement Group -description: - - Create an EC2 Placement Group; if the placement group already exists, - nothing is done. Or, delete an existing placement group. If the placement - group is absent, do nothing. See also - U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) -author: - - "Brad Macpherson (@iiibrad)" -options: - name: - description: - - The name for the placement group. - required: true - type: str - partition_count: - description: - - The number of partitions. - - Valid only when I(Strategy) is set to C(partition). - - Must be a value between C(1) and C(7). - type: int - version_added: 3.1.0 - state: - description: - - Create or delete placement group. - default: present - choices: [ 'present', 'absent' ] - type: str - strategy: - description: - - Placement group strategy. Cluster will cluster instances into a - low-latency group in a single Availability Zone, while Spread spreads - instances across underlying hardware. - default: cluster - choices: [ 'cluster', 'spread', 'partition' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide -# for details. - -- name: Create a placement group. - community.aws.ec2_placement_group: - name: my-cluster - state: present - -- name: Create a Spread placement group. - community.aws.ec2_placement_group: - name: my-cluster - state: present - strategy: spread - -- name: Create a Partition strategy placement group. - community.aws.ec2_placement_group: - name: my-cluster - state: present - strategy: partition - partition_count: 3 - -- name: Delete a placement group. - community.aws.ec2_placement_group: - name: my-cluster - state: absent - -""" - - -RETURN = r""" -placement_group: - description: Placement group attributes - returned: when state != absent - type: complex - contains: - name: - description: PG name - type: str - sample: my-cluster - state: - description: PG state - type: str - sample: "available" - strategy: - description: PG strategy - type: str - sample: "cluster" - -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.exponential_backoff() -def search_placement_group(connection, module): - """ - Check if a placement group exists. - """ - name = module.params.get("name") - try: - response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't find placement group named [{name}]") - - if len(response["PlacementGroups"]) != 1: - return None - else: - placement_group = response["PlacementGroups"][0] - return { - "name": placement_group["GroupName"], - "state": placement_group["State"], - "strategy": placement_group["Strategy"], - } - - -@AWSRetry.exponential_backoff(catch_extra_error_codes=["InvalidPlacementGroup.Unknown"]) -def get_placement_group_information(connection, name): - """ - Retrieve information about a placement group. - """ - response = connection.describe_placement_groups(GroupNames=[name]) - placement_group = response["PlacementGroups"][0] - return { - "name": placement_group["GroupName"], - "state": placement_group["State"], - "strategy": placement_group["Strategy"], - } - - -@AWSRetry.exponential_backoff() -def create_placement_group(connection, module): - name = module.params.get("name") - strategy = module.params.get("strategy") - partition_count = module.params.get("partition_count") - - if strategy != "partition" and partition_count: - module.fail_json(msg="'partition_count' can only be set when strategy is set to 'partition'.") - - params = {} - params["GroupName"] = name - params["Strategy"] = strategy - if partition_count: - params["PartitionCount"] = partition_count - params["DryRun"] = module.check_mode - - try: - connection.create_placement_group(**params) - except is_boto3_error_code("DryRunOperation"): - module.exit_json( - changed=True, - placement_group={ - "name": name, - "state": "DryRun", - "strategy": strategy, - }, - ) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Couldn't create placement group [{name}]") - - module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name)) - - -@AWSRetry.exponential_backoff() -def delete_placement_group(connection, module): - name = module.params.get("name") - - try: - connection.delete_placement_group(GroupName=name, DryRun=module.check_mode) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete placement group [{name}]") - - module.exit_json(changed=True) - - -def main(): - argument_spec = dict( - name=dict(required=True, type="str"), - partition_count=dict(type="int"), - state=dict(default="present", choices=["present", "absent"]), - strategy=dict(default="cluster", choices=["cluster", "spread", "partition"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("ec2") - - state = module.params.get("state") - - if state == "present": - placement_group = search_placement_group(connection, module) - if placement_group is None: - create_placement_group(connection, module) - else: - strategy = module.params.get("strategy") - if placement_group["strategy"] == strategy: - module.exit_json(changed=False, placement_group=placement_group) - else: - name = module.params.get("name") - module.fail_json( - msg=f"Placement group '{name}' exists, can't change strategy from '{placement_group['strategy']}' to '{strategy}'" - ) - - elif state == "absent": - placement_group = search_placement_group(connection, module) - if placement_group is None: - module.exit_json(changed=False) - else: - delete_placement_group(connection, module) - - -if __name__ == "__main__": - main() diff --git a/ec2_placement_group_info.py b/ec2_placement_group_info.py deleted file mode 100644 index 75cbc72585c..00000000000 --- a/ec2_placement_group_info.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_placement_group_info -version_added: 1.0.0 -short_description: List EC2 Placement Group(s) details -description: - - List details of EC2 Placement Group(s). -author: - - "Brad Macpherson (@iiibrad)" -options: - names: - description: - - A list of names to filter on. If a listed group does not exist, there - will be no corresponding entry in the result; no error will be raised. - type: list - elements: str - required: false - default: [] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details or the AWS region, -# see the AWS Guide for details. - -- name: List all placement groups. - community.aws.ec2_placement_group_info: - register: all_ec2_placement_groups - -- name: List two placement groups. - community.aws.ec2_placement_group_info: - names: - - my-cluster - - my-other-cluster - register: specific_ec2_placement_groups - -- ansible.builtin.debug: - msg: > - {{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }} - -""" - - -RETURN = r""" -placement_groups: - description: Placement group attributes - returned: always - type: complex - contains: - name: - description: PG name - type: str - sample: my-cluster - state: - description: PG state - type: str - sample: "available" - strategy: - description: PG strategy - type: str - sample: "cluster" - -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_placement_groups_details(connection, module): - names = module.params.get("names") - try: - if len(names) > 0: - response = connection.describe_placement_groups( - Filters=[ - { - "Name": "group-name", - "Values": names, - } - ] - ) - else: - response = connection.describe_placement_groups() - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't find placement groups named [{names}]") - - results = [] - for placement_group in response["PlacementGroups"]: - results.append( - { - "name": placement_group["GroupName"], - "state": placement_group["State"], - "strategy": placement_group["Strategy"], - } - ) - return results - - -def main(): - argument_spec = dict( - names=dict(type="list", default=[], elements="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - connection = module.client("ec2") - - placement_groups = get_placement_groups_details(connection, module) - module.exit_json(changed=False, placement_groups=placement_groups) - - -if __name__ == "__main__": - main() diff --git a/ec2_snapshot_copy.py b/ec2_snapshot_copy.py deleted file mode 100644 index ce73191cb79..00000000000 --- a/ec2_snapshot_copy.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_snapshot_copy -version_added: 1.0.0 -short_description: Copies an EC2 snapshot and returns the new Snapshot ID -description: - - Copies an EC2 Snapshot from a source region to a destination region. -options: - source_region: - description: - - The source region the Snapshot should be copied from. - required: true - type: str - source_snapshot_id: - description: - - The ID of the Snapshot in source region that should be copied. - required: true - type: str - description: - description: - - An optional human-readable string describing purpose of the new Snapshot. - type: str - default: '' - encrypted: - description: - - Whether or not the destination Snapshot should be encrypted. - type: bool - default: false - kms_key_id: - description: - - KMS key id used to encrypt snapshot. If not specified, AWS defaults to C(alias/aws/ebs). - type: str - wait: - description: - - Wait for the copied Snapshot to be in the C(Available) state before returning. - type: bool - default: false - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 600 - type: int - tags: - description: - - A dictionary representing the tags to be applied to the newly created resource. - type: dict - aliases: ['resource_tags'] -author: - - Deepak Kothandan (@Deepakkothandan) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Basic Snapshot Copy - community.aws.ec2_snapshot_copy: - source_region: eu-central-1 - region: eu-west-1 - source_snapshot_id: snap-xxxxxxx - -- name: Copy Snapshot and wait until available - community.aws.ec2_snapshot_copy: - source_region: eu-central-1 - region: eu-west-1 - source_snapshot_id: snap-xxxxxxx - wait: true - wait_timeout: 1200 # Default timeout is 600 - register: snapshot_id - -- name: Tagged Snapshot copy - community.aws.ec2_snapshot_copy: - source_region: eu-central-1 - region: eu-west-1 - source_snapshot_id: snap-xxxxxxx - tags: - Name: Snapshot-Name - -- name: Encrypted Snapshot copy - community.aws.ec2_snapshot_copy: - source_region: eu-central-1 - region: eu-west-1 - source_snapshot_id: snap-xxxxxxx - encrypted: true - -- name: Encrypted Snapshot copy with specified key - community.aws.ec2_snapshot_copy: - source_region: eu-central-1 - region: eu-west-1 - source_snapshot_id: snap-xxxxxxx - encrypted: true - kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b -""" - -RETURN = r""" -snapshot_id: - description: snapshot id of the newly created snapshot - returned: when snapshot copy is successful - type: str - sample: "snap-e9095e8c" -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def copy_snapshot(module, ec2): - """ - Copies an EC2 Snapshot to another region - - module : AnsibleAWSModule object - ec2: ec2 connection object - """ - - params = { - "SourceRegion": module.params.get("source_region"), - "SourceSnapshotId": module.params.get("source_snapshot_id"), - "Description": module.params.get("description"), - } - - if module.params.get("encrypted"): - params["Encrypted"] = True - - if module.params.get("kms_key_id"): - params["KmsKeyId"] = module.params.get("kms_key_id") - - if module.params.get("tags"): - params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), types=["snapshot"]) - - try: - snapshot_id = ec2.copy_snapshot(**params)["SnapshotId"] - if module.params.get("wait"): - delay = 15 - # Add one to max_attempts as wait() increment - # its counter before assessing it for time.sleep() - max_attempts = (module.params.get("wait_timeout") // delay) + 1 - ec2.get_waiter("snapshot_completed").wait( - SnapshotIds=[snapshot_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts) - ) - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="An error occurred waiting for the snapshot to become available.") - - module.exit_json(changed=True, snapshot_id=snapshot_id) - - -def main(): - argument_spec = dict( - source_region=dict(required=True), - source_snapshot_id=dict(required=True), - description=dict(default=""), - encrypted=dict(type="bool", default=False, required=False), - kms_key_id=dict(type="str", required=False), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=600), - tags=dict(type="dict", aliases=["resource_tags"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - try: - client = module.client("ec2") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - copy_snapshot(module, client) - - -if __name__ == "__main__": - main() diff --git a/ec2_transit_gateway.py b/ec2_transit_gateway.py deleted file mode 100644 index 9b50cb21b9c..00000000000 --- a/ec2_transit_gateway.py +++ /dev/null @@ -1,516 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_transit_gateway -short_description: Create and delete AWS Transit Gateways -version_added: 1.0.0 -description: - - Creates AWS Transit Gateways. - - Deletes AWS Transit Gateways. - - Updates tags on existing transit gateways. -options: - asn: - description: - - A private Autonomous System Number (ASN) for the Amazon side of a BGP session. - - The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs. - type: int - auto_associate: - description: - - Enable or disable automatic association with the default association route table. - default: true - type: bool - auto_attach: - description: - - Enable or disable automatic acceptance of attachment requests. - default: false - type: bool - auto_propagate: - description: - - Enable or disable automatic propagation of routes to the default propagation route table. - default: true - type: bool - description: - description: - - The description of the transit gateway. - type: str - dns_support: - description: - - Whether to enable AWS DNS support. - default: true - type: bool - state: - description: - - C(present) to ensure resource is created. - - C(absent) to remove resource. - default: present - choices: [ "present", "absent"] - type: str - transit_gateway_id: - description: - - The ID of the transit gateway. - type: str - vpn_ecmp_support: - description: - - Enable or disable Equal Cost Multipath Protocol support. - default: true - type: bool - wait: - description: - - Whether to wait for status - default: true - type: bool - wait_timeout: - description: - - number of seconds to wait for status - default: 300 - type: int - -author: - - "Bob Boldin (@BobBoldin)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 - - amazon.aws.tags -""" - -EXAMPLES = r""" -- name: Create a new transit gateway using defaults - community.aws.ec2_transit_gateway: - state: present - region: us-east-1 - description: personal-testing - register: created_tgw - -- name: Create a new transit gateway with options - community.aws.ec2_transit_gateway: - asn: 64514 - auto_associate: false - auto_propagate: false - dns_support: True - description: "nonprod transit gateway" - purge_tags: False - state: present - region: us-east-1 - tags: - Name: nonprod transit gateway - status: testing - -- name: Remove a transit gateway by description - community.aws.ec2_transit_gateway: - state: absent - region: us-east-1 - description: personal-testing - -- name: Remove a transit gateway by id - community.aws.ec2_transit_gateway: - state: absent - region: ap-southeast-2 - transit_gateway_id: tgw-3a9aa123 - register: deleted_tgw -""" - -RETURN = r""" -transit_gateway: - description: The attributes of the transit gateway. - type: complex - returned: I(state=present) - contains: - creation_time: - description: The creation time of the transit gateway. - returned: always - type: str - sample: "2019-03-06T17:13:51+00:00" - description: - description: The description of the transit gateway. - returned: always - type: str - sample: my test tgw - options: - description: The options attributes of the transit gateway - returned: always - type: complex - contains: - amazon_side_asn: - description: - - A private Autonomous System Number (ASN) for the Amazon side of a BGP session. - The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs. - returned: always - type: str - sample: 64512 - auto_accept_shared_attachements: - description: Indicates whether attachment requests are automatically accepted. - returned: always - type: str - sample: disable - default_route_table_association: - description: - - Indicates whether resource attachments are automatically - associated with the default association route table. - returned: always - type: str - sample: enable - association_default_route_table_id: - description: The ID of the default association route table. - returned: Iwhen exists - type: str - sample: tgw-rtb-abc123444 - default_route_table_propagation: - description: - - Indicates whether resource attachments automatically - propagate routes to the default propagation route table. - returned: always - type: str - sample: disable - propagation_default_route_table_id: - description: The ID of the default propagation route table. - returned: when exists - type: str - sample: tgw-rtb-def456777 - vpn_ecmp_support: - description: Indicates whether Equal Cost Multipath Protocol support is enabled. - returned: always - type: str - sample: enable - dns_support: - description: Indicates whether DNS support is enabled. - returned: always - type: str - sample: enable - owner_id: - description: The account that owns the transit gateway. - returned: always - type: str - sample: '123456789012' - state: - description: The state of the transit gateway. - returned: always - type: str - sample: pending - tags: - description: A dictionary of resource tags - returned: always - type: dict - sample: - tags: - Name: nonprod_tgw - transit_gateway_arn: - description: The ID of the transit_gateway. - returned: always - type: str - sample: tgw-3a9aa123 - transit_gateway_id: - description: The ID of the transit_gateway. - returned: always - type: str - sample: tgw-3a9aa123 -""" - -from time import sleep -from time import time - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # handled by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class AnsibleEc2Tgw(object): - def __init__(self, module, results): - self._module = module - self._results = results - retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=["IncorrectState"], - ) - connection = module.client("ec2", retry_decorator=retry_decorator) - self._connection = connection - self._check_mode = self._module.check_mode - - def process(self): - """Process the request based on state parameter . - state = present will search for an existing tgw based and return the object data. - if no object is found it will be created - - state = absent will attempt to remove the tgw however will fail if it still has - attachments or associations - """ - description = self._module.params.get("description") - state = self._module.params.get("state", "present") - tgw_id = self._module.params.get("transit_gateway_id") - - if state == "present": - self.ensure_tgw_present(tgw_id, description) - elif state == "absent": - self.ensure_tgw_absent(tgw_id, description) - - def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True): - """ - Wait for the Transit Gateway to reach the specified status - :param wait_timeout: Number of seconds to wait, until this timeout is reached. - :param tgw_id: The Amazon nat id. - :param status: The status to wait for. - examples. status=available, status=deleted - :param skip_deleted: ignore deleted transit gateways - :return dict: transit gateway object - """ - polling_increment_secs = 5 - wait_timeout = time() + wait_timeout - status_achieved = False - transit_gateway = dict() - - while wait_timeout > time(): - try: - transit_gateway = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=skip_deleted) - - if transit_gateway: - if self._check_mode: - transit_gateway["state"] = status - - if transit_gateway.get("state") == status: - status_achieved = True - break - - elif transit_gateway.get("state") == "failed": - break - - else: - sleep(polling_increment_secs) - - except ClientError as e: - self._module.fail_json_aws(e) - - if not status_achieved: - self._module.fail_json(msg="Wait time out reached, while waiting for results") - - return transit_gateway - - def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): - """search for an existing tgw by either tgw_id or description - :param tgw_id: The AWS id of the transit gateway - :param description: The description of the transit gateway. - :param skip_deleted: ignore deleted transit gateways - :return dict: transit gateway object - """ - filters = [] - if tgw_id: - filters = ansible_dict_to_boto3_filter_list({"transit-gateway-id": tgw_id}) - - try: - response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters) - except (ClientError, BotoCoreError) as e: - self._module.fail_json_aws(e) - - tgw = None - tgws = [] - - if len(response.get("TransitGateways", [])) == 1 and tgw_id: - if (response["TransitGateways"][0]["State"] != "deleted") or not skip_deleted: - tgws.extend(response["TransitGateways"]) - - for gateway in response.get("TransitGateways", []): - if description == gateway["Description"] and gateway["State"] != "deleted": - tgws.append(gateway) - - if len(tgws) > 1: - self._module.fail_json( - msg=f"EC2 returned more than one transit Gateway for description {description}, aborting" - ) - elif tgws: - tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"]) - tgw["tags"] = boto3_tag_list_to_ansible_dict(tgws[0]["Tags"]) - - return tgw - - @staticmethod - def enable_option_flag(flag): - disabled = "disable" - enabled = "enable" - if flag: - return enabled - return disabled - - def create_tgw(self, description): - """ - Create a transit gateway and optionally wait for status to become available. - - :param description: The description of the transit gateway. - :return dict: transit gateway object - """ - options = dict() - wait = self._module.params.get("wait") - wait_timeout = self._module.params.get("wait_timeout") - - if self._module.params.get("asn"): - options["AmazonSideAsn"] = self._module.params.get("asn") - - options["AutoAcceptSharedAttachments"] = self.enable_option_flag(self._module.params.get("auto_attach")) - options["DefaultRouteTableAssociation"] = self.enable_option_flag(self._module.params.get("auto_associate")) - options["DefaultRouteTablePropagation"] = self.enable_option_flag(self._module.params.get("auto_propagate")) - options["VpnEcmpSupport"] = self.enable_option_flag(self._module.params.get("vpn_ecmp_support")) - options["DnsSupport"] = self.enable_option_flag(self._module.params.get("dns_support")) - - try: - response = self._connection.create_transit_gateway(Description=description, Options=options) - except (ClientError, BotoCoreError) as e: - self._module.fail_json_aws(e) - - tgw_id = response["TransitGateway"]["TransitGatewayId"] - - if wait: - result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available") - else: - result = self.get_matching_tgw(tgw_id=tgw_id) - - self._results["msg"] = f" Transit gateway {result['transit_gateway_id']} created" - - return result - - def delete_tgw(self, tgw_id): - """ - De;lete the transit gateway and optionally wait for status to become deleted - - :param tgw_id: The id of the transit gateway - :return dict: transit gateway object - """ - wait = self._module.params.get("wait") - wait_timeout = self._module.params.get("wait_timeout") - - try: - response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id) - except (ClientError, BotoCoreError) as e: - self._module.fail_json_aws(e) - - if wait: - result = self.wait_for_status( - wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False - ) - else: - result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) - - self._results["msg"] = f" Transit gateway {tgw_id} deleted" - - return result - - def ensure_tgw_present(self, tgw_id=None, description=None): - """ - Will create a tgw if no match to the tgw_id or description are found - Will update the tgw tags if matching one found but tags are not synced - - :param tgw_id: The AWS id of the transit gateway - :param description: The description of the transit gateway. - :return dict: transit gateway object - """ - tgw = self.get_matching_tgw(tgw_id, description) - - if tgw is None: - if self._check_mode: - self._results["changed"] = True - self._results["transit_gateway_id"] = None - return self._results - - try: - if not description: - self._module.fail_json(msg="Failed to create Transit Gateway: description argument required") - tgw = self.create_tgw(description) - self._results["changed"] = True - except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg="Unable to create Transit Gateway") - - self._results["changed"] |= ensure_ec2_tags( - self._connection, - self._module, - tgw["transit_gateway_id"], - tags=self._module.params.get("tags"), - purge_tags=self._module.params.get("purge_tags"), - ) - - self._results["transit_gateway"] = self.get_matching_tgw(tgw_id=tgw["transit_gateway_id"]) - - return self._results - - def ensure_tgw_absent(self, tgw_id=None, description=None): - """ - Will delete the tgw if a single tgw is found not yet in deleted status - - :param tgw_id: The AWS id of the transit gateway - :param description: The description of the transit gateway. - :return doct: transit gateway object - """ - self._results["transit_gateway_id"] = None - tgw = self.get_matching_tgw(tgw_id, description) - - if tgw is not None: - if self._check_mode: - self._results["changed"] = True - return self._results - - try: - tgw = self.delete_tgw(tgw_id=tgw["transit_gateway_id"]) - self._results["changed"] = True - self._results["transit_gateway"] = self.get_matching_tgw( - tgw_id=tgw["transit_gateway_id"], skip_deleted=False - ) - except (BotoCoreError, ClientError) as e: - self._module.fail_json_aws(e, msg="Unable to delete Transit Gateway") - - return self._results - - -def setup_module_object(): - """ - merge argument spec and create Ansible module object - :return: Ansible module object - """ - - argument_spec = dict( - asn=dict(type="int"), - auto_associate=dict(type="bool", default=True), - auto_attach=dict(type="bool", default=False), - auto_propagate=dict(type="bool", default=True), - description=dict(type="str"), - dns_support=dict(type="bool", default=True), - purge_tags=dict(type="bool", default=True), - state=dict(default="present", choices=["present", "absent"]), - tags=dict(type="dict", aliases=["resource_tags"]), - transit_gateway_id=dict(type="str"), - vpn_ecmp_support=dict(type="bool", default=True), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=300), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=[("description", "transit_gateway_id")], - supports_check_mode=True, - ) - - return module - - -def main(): - module = setup_module_object() - - results = dict(changed=False) - - tgw_manager = AnsibleEc2Tgw(module=module, results=results) - tgw_manager.process() - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ec2_transit_gateway_info.py b/ec2_transit_gateway_info.py deleted file mode 100644 index b25346b84b8..00000000000 --- a/ec2_transit_gateway_info.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_transit_gateway_info -short_description: Gather information about ec2 transit gateways in AWS -version_added: 1.0.0 -description: - - Gather information about ec2 transit gateways in AWS -author: - - "Bob Boldin (@BobBoldin)" -options: - transit_gateway_ids: - description: - - A list of transit gateway IDs to gather information for. - aliases: [transit_gateway_id] - type: list - elements: str - default: [] - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters. - type: dict - default: {} -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather info about all transit gateways - community.aws.ec2_transit_gateway_info: - -- name: Gather info about a particular transit gateway using filter transit gateway ID - community.aws.ec2_transit_gateway_info: - filters: - transit-gateway-id: tgw-02c42332e6b7da829 - -- name: Gather info about a particular transit gateway using multiple option filters - community.aws.ec2_transit_gateway_info: - filters: - options.dns-support: enable - options.vpn-ecmp-support: enable - -- name: Gather info about multiple transit gateways using module param - community.aws.ec2_transit_gateway_info: - transit_gateway_ids: - - tgw-02c42332e6b7da829 - - tgw-03c53443d5a8cb716 -""" - -RETURN = r""" -transit_gateways: - description: > - Transit gateways that match the provided filters. Each element consists of a dict with all the information - related to that transit gateway. - returned: on success - type: complex - contains: - creation_time: - description: The creation time. - returned: always - type: str - sample: "2019-02-05T16:19:58+00:00" - description: - description: The description of the transit gateway. - returned: always - type: str - sample: "A transit gateway" - options: - description: A dictionary of the transit gateway options. - returned: always - type: complex - contains: - amazon_side_asn: - description: - - A private Autonomous System Number (ASN) for the Amazon - side of a BGP session. The range is 64512 to 65534 for - 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs. - returned: always - type: int - sample: 64512 - auto_accept_shared_attachments: - description: - - Indicates whether attachment requests are automatically accepted. - returned: always - type: str - sample: "enable" - default_route_table_association: - description: - - Indicates whether resource attachments are automatically - associated with the default association route table. - returned: always - type: str - sample: "disable" - association_default_route_table_id: - description: - - The ID of the default association route table. - returned: when present - type: str - sample: "rtb-11223344" - default_route_table_propagation: - description: - - Indicates whether resource attachments automatically - propagate routes to the default propagation route table. - returned: always - type: str - sample: "disable" - dns_support: - description: - - Indicates whether DNS support is enabled. - returned: always - type: str - sample: "enable" - propagation_default_route_table_id: - description: - - The ID of the default propagation route table. - returned: when present - type: str - sample: "rtb-11223344" - vpn_ecmp_support: - description: - - Indicates whether Equal Cost Multipath Protocol support - is enabled. - returned: always - type: str - sample: "enable" - owner_id: - description: The AWS account number ID which owns the transit gateway. - returned: always - type: str - sample: "123456789012" - state: - description: The state of the transit gateway. - returned: always - type: str - sample: "available" - tags: - description: A dict of tags associated with the transit gateway. - returned: always - type: dict - sample: '{ - "Name": "A sample TGW" - }' - transit_gateway_arn: - description: The Amazon Resource Name (ARN) of the transit gateway. - returned: always - type: str - sample: "arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-02c42332e6b7da829" - transit_gateway_id: - description: The ID of the transit gateway. - returned: always - type: str - sample: "tgw-02c42332e6b7da829" -""" - -try: - import botocore -except ImportError: - pass # handled by imported AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class AnsibleEc2TgwInfo(object): - def __init__(self, module, results): - self._module = module - self._results = results - self._connection = self._module.client("ec2") - self._check_mode = self._module.check_mode - - @AWSRetry.exponential_backoff() - def describe_transit_gateways(self): - """ - Describe transit gateways. - - module : AnsibleAWSModule object - connection : boto3 client connection object - """ - # collect parameters - filters = ansible_dict_to_boto3_filter_list(self._module.params["filters"]) - transit_gateway_ids = self._module.params["transit_gateway_ids"] - - # init empty list for return vars - transit_gateway_info = list() - - # Get the basic transit gateway info - try: - response = self._connection.describe_transit_gateways( - TransitGatewayIds=transit_gateway_ids, Filters=filters - ) - except is_boto3_error_code("InvalidTransitGatewayID.NotFound"): - self._results["transit_gateways"] = [] - return - - for transit_gateway in response["TransitGateways"]: - transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=["Tags"])) - # convert tag list to ansible dict - transit_gateway_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(transit_gateway.get("Tags", [])) - - self._results["transit_gateways"] = transit_gateway_info - return - - -def setup_module_object(): - """ - merge argument spec and create Ansible module object - :return: Ansible module object - """ - - argument_spec = dict( - transit_gateway_ids=dict(type="list", default=[], elements="str", aliases=["transit_gateway_id"]), - filters=dict(type="dict", default={}), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - return module - - -def main(): - module = setup_module_object() - - results = dict(changed=False) - - tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results) - try: - tgwf_manager.describe_transit_gateways() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ec2_transit_gateway_vpc_attachment.py b/ec2_transit_gateway_vpc_attachment.py deleted file mode 100644 index 301fefb0513..00000000000 --- a/ec2_transit_gateway_vpc_attachment.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_transit_gateway_vpc_attachment -short_description: Create and delete AWS Transit Gateway VPC attachments -version_added: 4.0.0 -description: - - Creates, Deletes and Updates AWS Transit Gateway VPC Attachments. -options: - transit_gateway: - description: - - The ID of the Transit Gateway that the attachment belongs to. - - When creating a new attachment, I(transit_gateway) must be provided. - - At least one of I(name), I(transit_gateway) and I(id) must be provided. - - I(transit_gateway) is an immutable setting and can not be updated on an - existing attachment. - type: str - required: false - aliases: ['transit_gateway_id'] - id: - description: - - The ID of the Transit Gateway Attachment. - - When I(id) is not set, a search using I(transit_gateway) and I(name) will be - performed. If multiple results are returned, the module will fail. - - At least one of I(name), I(transit_gateway) and I(id) must be provided. - type: str - required: false - aliases: ['attachment_id'] - name: - description: - - The C(Name) tag of the Transit Gateway attachment. - - Providing both I(id) and I(name) will set the C(Name) tag on an existing - attachment the matching I(id). - - Setting the C(Name) tag in I(tags) will also result in the C(Name) tag being - updated. - - At least one of I(name), I(transit_gateway) and I(id) must be provided. - type: str - required: false - state: - description: - - Create or remove the Transit Gateway attachment. - type: str - required: false - choices: ['present', 'absent'] - default: 'present' - subnets: - description: - - The ID of the subnets in which to create the transit gateway VPC attachment. - - Required when creating a new attachment. - type: list - elements: str - required: false - purge_subnets: - description: - - If I(purge_subnets=true), existing subnets will be removed from the - attachment as necessary to match exactly what is defined by I(subnets). - type: bool - required: false - default: true - dns_support: - description: - - Whether DNS support is enabled. - type: bool - required: false - ipv6_support: - description: - - Whether IPv6 support is enabled. - type: bool - required: false - appliance_mode_support: - description: - - Whether the attachment is configured for appliance mode. - - When appliance mode is enabled, Transit Gateway, using 4-tuples of an - IP packet, selects a single Transit Gateway ENI in the Appliance VPC - for the life of a flow to send traffic to. - type: bool - required: false - wait: - description: - - Whether to wait for the Transit Gateway attachment to reach the - C(Available) or C(Deleted) state before the module returns. - type: bool - required: false - default: true - wait_timeout: - description: - - Maximum time, in seconds, to wait for the Transit Gateway attachment - to reach the expected state. - - Defaults to 600 seconds. - type: int - required: false -author: - - "Mark Chappell (@tremble)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create a Transit Gateway attachment -- community.aws.ec2_transit_gateway_vpc_attachment: - state: present - transit_gateway: 'tgw-123456789abcdef01' - name: AnsibleTest-1 - subnets: - - subnet-00000000000000000 - - subnet-11111111111111111 - - subnet-22222222222222222 - ipv6_support: True - purge_subnets: True - dns_support: True - appliance_mode_support: True - tags: - TestTag: changed data in Test Tag - -# Set sub options on a Transit Gateway attachment -- community.aws.ec2_transit_gateway_vpc_attachment: - state: present - id: 'tgw-attach-0c0c5fd0b0f01d1c9' - name: AnsibleTest-1 - ipv6_support: True - purge_subnets: False - dns_support: False - appliance_mode_support: True - -# Delete the transit gateway -- community.aws.ec2_transit_gateway_vpc_attachment: - state: absent - id: 'tgw-attach-0c0c5fd0b0f01d1c9' -""" - -RETURN = r""" -transit_gateway_attachments: - description: The attributes of the Transit Gateway attachments. - type: list - elements: dict - returned: success - contains: - creation_time: - description: - - An ISO 8601 date time stamp of when the attachment was created. - type: str - returned: success - example: '2022-03-10T16:40:26+00:00' - options: - description: - - Additional VPC attachment options. - type: dict - returned: success - contains: - appliance_mode_support: - description: - - Indicates whether appliance mode support is enabled. - type: str - returned: success - example: 'enable' - dns_support: - description: - - Indicates whether DNS support is enabled. - type: str - returned: success - example: 'disable' - ipv6_support: - description: - - Indicates whether IPv6 support is disabled. - type: str - returned: success - example: 'disable' - state: - description: - - The state of the attachment. - type: str - returned: success - example: 'deleting' - subnet_ids: - description: - - The IDs of the subnets in use by the attachment. - type: list - elements: str - returned: success - example: ['subnet-0123456789abcdef0', 'subnet-11111111111111111'] - tags: - description: - - A dictionary representing the resource tags. - type: dict - returned: success - transit_gateway_attachment_id: - description: - - The ID of the attachment. - type: str - returned: success - example: 'tgw-attach-0c0c5fd0b0f01d1c9' - transit_gateway_id: - description: - - The ID of the transit gateway that the attachment is connected to. - type: str - returned: success - example: 'tgw-0123456789abcdef0' - vpc_id: - description: - - The ID of the VPC that the attachment is connected to. - type: str - returned: success - example: 'vpc-0123456789abcdef0' - vpc_owner_id: - description: - - The ID of the account that the VPC belongs to. - type: str - returned: success - example: '123456789012' -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager - - -def main(): - argument_spec = dict( - state=dict(type="str", required=False, default="present", choices=["absent", "present"]), - transit_gateway=dict(type="str", required=False, aliases=["transit_gateway_id"]), - id=dict(type="str", required=False, aliases=["attachment_id"]), - name=dict(type="str", required=False), - subnets=dict(type="list", elements="str", required=False), - purge_subnets=dict(type="bool", required=False, default=True), - tags=dict(type="dict", required=False, aliases=["resource_tags"]), - purge_tags=dict(type="bool", required=False, default=True), - appliance_mode_support=dict(type="bool", required=False), - dns_support=dict(type="bool", required=False), - ipv6_support=dict(type="bool", required=False), - wait=dict(type="bool", required=False, default=True), - wait_timeout=dict(type="int", required=False), - ) - - one_of = [ - ["id", "transit_gateway", "name"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=one_of, - ) - - attach_id = module.params.get("id", None) - tgw = module.params.get("transit_gateway", None) - name = module.params.get("name", None) - tags = module.params.get("tags", None) - purge_tags = module.params.get("purge_tags") - state = module.params.get("state") - subnets = module.params.get("subnets", None) - purge_subnets = module.params.get("purge_subnets") - - # When not provided with an ID see if one exists. - if not attach_id: - search_manager = TransitGatewayVpcAttachmentManager(module=module) - filters = dict() - if tgw: - filters["transit-gateway-id"] = tgw - if name: - filters["tag:Name"] = name - if subnets: - vpc_id = search_manager.subnets_to_vpc(subnets) - filters["vpc-id"] = vpc_id - - # Attachments lurk in a 'deleted' state, for a while, ignore them so we - # can reuse the names - filters["state"] = [ - "available", - "deleting", - "failed", - "failing", - "initiatingRequest", - "modifying", - "pendingAcceptance", - "pending", - "rollingBack", - "rejected", - "rejecting", - ] - attachments = search_manager.list(filters=filters) - if len(attachments) > 1: - module.fail_json("Multiple matching attachments found, provide an ID", attachments=attachments) - # If we find a match then we'll modify it by ID, otherwise we'll be - # creating a new RTB. - if attachments: - attach_id = attachments[0]["transit_gateway_attachment_id"] - - manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id) - manager.set_wait(module.params.get("wait", None)) - manager.set_wait_timeout(module.params.get("wait_timeout", None)) - - if state == "absent": - manager.delete() - else: - if not attach_id: - if not tgw: - module.fail_json( - "No existing attachment found. To create a new attachment" - " the `transit_gateway` parameter must be provided." - ) - if not subnets: - module.fail_json( - "No existing attachment found. To create a new attachment" - " the `subnets` parameter must be provided." - ) - - # name is just a special case of tags. - if name: - new_tags = dict(Name=name) - if tags is None: - purge_tags = False - else: - new_tags.update(tags) - tags = new_tags - - manager.set_transit_gateway(tgw) - manager.set_subnets(subnets, purge_subnets) - manager.set_tags(tags, purge_tags) - manager.set_dns_support(module.params.get("dns_support", None)) - manager.set_ipv6_support(module.params.get("ipv6_support", None)) - manager.set_appliance_mode_support(module.params.get("appliance_mode_support", None)) - manager.flush_changes() - - results = dict( - changed=manager.changed, - attachments=[manager.updated_resource], - ) - if manager.changed: - results["diff"] = dict( - before=manager.original_resource, - after=manager.updated_resource, - ) - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ec2_transit_gateway_vpc_attachment_info.py b/ec2_transit_gateway_vpc_attachment_info.py deleted file mode 100644 index a665e4080cc..00000000000 --- a/ec2_transit_gateway_vpc_attachment_info.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_transit_gateway_vpc_attachment_info -short_description: describes AWS Transit Gateway VPC attachments -version_added: 4.0.0 -description: - - Describes AWS Transit Gateway VPC Attachments. -options: - id: - description: - - The ID of the Transit Gateway Attachment. - - Mutually exclusive with I(name) and I(filters) - type: str - required: false - aliases: ['attachment_id'] - name: - description: - - The C(Name) tag of the Transit Gateway attachment. - type: str - required: false - filters: - description: - - A dictionary of filters to apply. Each dict item consists of a filter key and a filter value. - - Setting a C(tag:Name) filter will override the I(name) parameter. - type: dict - required: false - include_deleted: - description: - - If I(include_deleted=True), then attachments in a deleted state will - also be returned. - - Setting a C(state) filter will override the I(include_deleted) parameter. - type: bool - required: false - default: false -author: - - "Mark Chappell (@tremble)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Describe a specific Transit Gateway attachment. -- community.aws.ec2_transit_gateway_vpc_attachment_info: - id: 'tgw-attach-0123456789abcdef0' - -# Describe all attachments attached to a transit gateway. -- community.aws.ec2_transit_gateway_vpc_attachment_info: - filters: - transit-gateway-id: tgw-0fedcba9876543210' - -# Describe all attachments in an account. -- community.aws.ec2_transit_gateway_vpc_attachment_info: - filters: - transit-gateway-id: tgw-0fedcba9876543210' -""" - -RETURN = r""" -transit_gateway_attachments: - description: The attributes of the Transit Gateway attachments. - type: list - elements: dict - returned: success - contains: - creation_time: - description: - - An ISO 8601 date time stamp of when the attachment was created. - type: str - returned: success - example: '2022-03-10T16:40:26+00:00' - options: - description: - - Additional VPC attachment options. - type: dict - returned: success - contains: - appliance_mode_support: - description: - - Indicates whether appliance mode support is enabled. - type: str - returned: success - example: 'enable' - dns_support: - description: - - Indicates whether DNS support is enabled. - type: str - returned: success - example: 'disable' - ipv6_support: - description: - - Indicates whether IPv6 support is disabled. - type: str - returned: success - example: 'disable' - state: - description: - - The state of the attachment. - type: str - returned: success - example: 'deleting' - subnet_ids: - description: - - The IDs of the subnets in use by the attachment. - type: list - elements: str - returned: success - example: ['subnet-0123456789abcdef0', 'subnet-11111111111111111'] - tags: - description: - - A dictionary representing the resource tags. - type: dict - returned: success - transit_gateway_attachment_id: - description: - - The ID of the attachment. - type: str - returned: success - example: 'tgw-attach-0c0c5fd0b0f01d1c9' - transit_gateway_id: - description: - - The ID of the transit gateway that the attachment is connected to. - type: str - returned: success - example: 'tgw-0123456789abcdef0' - vpc_id: - description: - - The ID of the VPC that the attachment is connected to. - type: str - returned: success - example: 'vpc-0123456789abcdef0' - vpc_owner_id: - description: - - The ID of the account that the VPC belongs to. - type: str - returned: success - example: '123456789012' -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager - - -def main(): - argument_spec = dict( - id=dict(type="str", required=False, aliases=["attachment_id"]), - name=dict(type="str", required=False), - filters=dict(type="dict", required=False), - include_deleted=dict(type="bool", required=False, default=False), - ) - - mutually_exclusive = [ - ["id", "name"], - ["id", "filters"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - name = module.params.get("name", None) - id = module.params.get("id", None) - opt_filters = module.params.get("filters", None) - - search_manager = TransitGatewayVpcAttachmentManager(module=module) - filters = dict() - - if name: - filters["tag:Name"] = name - - if not module.params.get("include_deleted"): - # Attachments lurk in a 'deleted' state, for a while, ignore them so we - # can reuse the names - filters["state"] = [ - "available", - "deleting", - "failed", - "failing", - "initiatingRequest", - "modifying", - "pendingAcceptance", - "pending", - "rollingBack", - "rejected", - "rejecting", - ] - - if opt_filters: - filters.update(opt_filters) - - attachments = search_manager.list(filters=filters, id=id) - - module.exit_json(changed=False, attachments=attachments, filters=filters) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_egress_igw.py b/ec2_vpc_egress_igw.py deleted file mode 100644 index 0a309b4863c..00000000000 --- a/ec2_vpc_egress_igw.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_vpc_egress_igw -version_added: 1.0.0 -short_description: Manage an AWS VPC Egress Only Internet gateway -description: - - Manage an AWS VPC Egress Only Internet gateway -author: - - Daniel Shepherd (@shepdelacreme) -options: - vpc_id: - description: - - The VPC ID for the VPC that this Egress Only Internet Gateway should be attached. - required: true - type: str - state: - description: - - Create or delete the EIGW. - default: present - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Ensure that the VPC has an Internet Gateway. -# The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc. -- community.aws.ec2_vpc_egress_igw: - vpc_id: vpc-abcdefgh - state: present - register: eigw - -""" - -RETURN = r""" -gateway_id: - description: The ID of the Egress Only Internet Gateway or Null. - returned: always - type: str - sample: eigw-0e00cf111ba5bc11e -vpc_id: - description: The ID of the VPC to attach or detach gateway from. - returned: always - type: str - sample: vpc-012345678 -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def delete_eigw(module, connection, eigw_id): - """ - Delete EIGW. - - module : AnsibleAWSModule object - connection : boto3 client connection object - eigw_id : ID of the EIGW to delete - """ - changed = False - - try: - response = connection.delete_egress_only_internet_gateway( - aws_retry=True, DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id - ) - except is_boto3_error_code("DryRunOperation"): - changed = True - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Could not delete Egress-Only Internet Gateway {eigw_id} from VPC {module.vpc_id}") - - if not module.check_mode: - changed = response.get("ReturnCode", False) - - return changed - - -def create_eigw(module, connection, vpc_id): - """ - Create EIGW. - - module : AnsibleAWSModule object - connection : boto3 client connection object - vpc_id : ID of the VPC we are operating on - """ - gateway_id = None - changed = False - - try: - response = connection.create_egress_only_internet_gateway( - aws_retry=True, DryRun=module.check_mode, VpcId=vpc_id - ) - except is_boto3_error_code("DryRunOperation"): - # When boto3 method is run with DryRun=True it returns an error on success - # We need to catch the error and return something valid - changed = True - except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"invalid vpc ID '{vpc_id}' provided") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Could not create Egress-Only Internet Gateway for vpc ID {vpc_id}") - - if not module.check_mode: - gateway = response.get("EgressOnlyInternetGateway", {}) - state = gateway.get("Attachments", [{}])[0].get("State") - gateway_id = gateway.get("EgressOnlyInternetGatewayId") - - if gateway_id and state in ("attached", "attaching"): - changed = True - else: - # EIGW gave back a bad attachment state or an invalid response so we error out - module.fail_json( - msg=f"Unable to create and attach Egress Only Internet Gateway to VPCId: {vpc_id}. Bad or no state in response", - **camel_dict_to_snake_dict(response), - ) - - return changed, gateway_id - - -def describe_eigws(module, connection, vpc_id): - """ - Describe EIGWs. - - module : AnsibleAWSModule object - connection : boto3 client connection object - vpc_id : ID of the VPC we are operating on - """ - gateway_id = None - - try: - response = connection.describe_egress_only_internet_gateways(aws_retry=True) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways") - - for eigw in response.get("EgressOnlyInternetGateways", []): - for attachment in eigw.get("Attachments", []): - if attachment.get("VpcId") == vpc_id and attachment.get("State") in ("attached", "attaching"): - gateway_id = eigw.get("EgressOnlyInternetGatewayId") - - return gateway_id - - -def main(): - argument_spec = dict(vpc_id=dict(required=True), state=dict(default="present", choices=["present", "absent"])) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client("ec2", retry_decorator=retry_decorator) - - vpc_id = module.params.get("vpc_id") - state = module.params.get("state") - - eigw_id = describe_eigws(module, connection, vpc_id) - - result = dict(gateway_id=eigw_id, vpc_id=vpc_id) - changed = False - - if state == "present" and not eigw_id: - changed, result["gateway_id"] = create_eigw(module, connection, vpc_id) - elif state == "absent" and eigw_id: - changed = delete_eigw(module, connection, eigw_id) - - module.exit_json(changed=changed, **result) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py deleted file mode 100644 index 022f058d0f9..00000000000 --- a/ec2_vpc_nacl.py +++ /dev/null @@ -1,607 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_vpc_nacl -short_description: create and delete Network ACLs -version_added: 1.0.0 -description: - - Read the AWS documentation for Network ACLS - U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) -options: - name: - description: - - Tagged name identifying a network ACL. - - One and only one of the I(name) or I(nacl_id) is required. - required: false - type: str - nacl_id: - description: - - NACL id identifying a network ACL. - - One and only one of the I(name) or I(nacl_id) is required. - required: false - type: str - vpc_id: - description: - - VPC id of the requesting VPC. - - Required when state present. - required: false - type: str - subnets: - description: - - The list of subnets that should be associated with the network ACL. - - Must be specified as a list - - Each subnet can be specified as subnet ID, or its tagged name. - required: false - type: list - elements: str - default: [] - egress: - description: - - A list of rules for outgoing traffic. Each rule must be specified as a list. - Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']), - the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny, - the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for - TCP or UDP protocols, and the first port in the range for TCP or UDP protocols. - See examples. - default: [] - required: false - type: list - elements: list - ingress: - description: - - List of rules for incoming traffic. Each rule must be specified as a list. - Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']), - the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny, - the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for - TCP or UDP protocols, and the first port in the range for TCP or UDP protocols. - See examples. - default: [] - required: false - type: list - elements: list - state: - description: - - Creates or modifies an existing NACL - - Deletes a NACL and reassociates subnets to the default NACL - required: false - type: str - choices: ['present', 'absent'] - default: present -author: - - Mike Mochan (@mmochan) -notes: - - Support for I(purge_tags) was added in release 4.0.0. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 - - amazon.aws.tags -""" - -EXAMPLES = r""" - -# Complete example to create and delete a network ACL -# that allows SSH, HTTP and ICMP in, and all traffic out. -- name: "Create and associate production DMZ network ACL with DMZ subnets" - community.aws.ec2_vpc_nacl: - vpc_id: vpc-12345678 - name: prod-dmz-nacl - region: ap-southeast-2 - subnets: ['prod-dmz-1', 'prod-dmz-2'] - tags: - CostCode: CC1234 - Project: phoenix - Description: production DMZ - ingress: - # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code, - # port from, port to - - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22] - - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80] - - [205, 'tcp', 'allow', '::/0', null, null, 80, 80] - - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8] - - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8] - egress: - - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] - - [105, 'all', 'allow', '::/0', null, null, null, null] - state: 'present' - -- name: "Remove the ingress and egress rules - defaults to deny all" - community.aws.ec2_vpc_nacl: - vpc_id: vpc-12345678 - name: prod-dmz-nacl - region: ap-southeast-2 - subnets: - - prod-dmz-1 - - prod-dmz-2 - tags: - CostCode: CC1234 - Project: phoenix - Description: production DMZ - state: present - -- name: "Remove the NACL subnet associations and tags" - community.aws.ec2_vpc_nacl: - vpc_id: 'vpc-12345678' - name: prod-dmz-nacl - region: ap-southeast-2 - state: present - -- name: "Delete nacl and subnet associations" - community.aws.ec2_vpc_nacl: - vpc_id: vpc-12345678 - name: prod-dmz-nacl - state: absent - -- name: "Delete nacl by its id" - community.aws.ec2_vpc_nacl: - nacl_id: acl-33b4ee5b - state: absent -""" -RETURN = r""" -task: - description: The result of the create, or delete action. - returned: success - type: dict -nacl_id: - description: The id of the NACL (when creating or updating an ACL) - returned: success - type: str - sample: acl-123456789abcdef01 -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# VPC-supported IANA protocol numbers -# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NUMBERS = {"all": -1, "icmp": 1, "tcp": 6, "udp": 17, "ipv6-icmp": 58} - - -# Utility methods -def icmp_present(entry): - if len(entry) == 6 and entry[1] in ["icmp", "ipv6-icmp"] or entry[1] in [1, 58]: - return True - - -def subnets_removed(nacl_id, subnets, client, module): - results = find_acl_by_id(nacl_id, client, module) - associations = results["NetworkAcls"][0]["Associations"] - subnet_ids = [assoc["SubnetId"] for assoc in associations] - return [subnet for subnet in subnet_ids if subnet not in subnets] - - -def subnets_added(nacl_id, subnets, client, module): - results = find_acl_by_id(nacl_id, client, module) - associations = results["NetworkAcls"][0]["Associations"] - subnet_ids = [assoc["SubnetId"] for assoc in associations] - return [subnet for subnet in subnets if subnet not in subnet_ids] - - -def subnets_changed(nacl, client, module): - changed = False - vpc_id = module.params.get("vpc_id") - nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] - subnets = subnets_to_associate(nacl, client, module) - if not subnets: - default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] - subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module) - if subnets: - replace_network_acl_association(default_nacl_id, subnets, client, module) - changed = True - return changed - changed = False - return changed - subs_added = subnets_added(nacl_id, subnets, client, module) - if subs_added: - replace_network_acl_association(nacl_id, subs_added, client, module) - changed = True - subs_removed = subnets_removed(nacl_id, subnets, client, module) - if subs_removed: - default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] - replace_network_acl_association(default_nacl_id, subs_removed, client, module) - changed = True - return changed - - -def nacls_changed(nacl, client, module): - changed = False - params = dict() - params["egress"] = module.params.get("egress") - params["ingress"] = module.params.get("ingress") - - nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] - nacl = describe_network_acl(client, module) - entries = nacl["NetworkAcls"][0]["Entries"] - egress = [rule for rule in entries if rule["Egress"] is True and rule["RuleNumber"] < 32767] - ingress = [rule for rule in entries if rule["Egress"] is False and rule["RuleNumber"] < 32767] - if rules_changed(egress, params["egress"], True, nacl_id, client, module): - changed = True - if rules_changed(ingress, params["ingress"], False, nacl_id, client, module): - changed = True - return changed - - -def tags_changed(nacl_id, client, module): - tags = module.params.get("tags") - name = module.params.get("name") - purge_tags = module.params.get("purge_tags") - - if name is None and tags is None: - return False - - if module.params.get("tags") is None: - # Only purge tags if tags is explicitly set to {} and purge_tags is True - purge_tags = False - - new_tags = dict() - if module.params.get("name") is not None: - new_tags["Name"] = module.params.get("name") - new_tags.update(module.params.get("tags") or {}) - - return ensure_ec2_tags( - client, module, nacl_id, tags=new_tags, purge_tags=purge_tags, retry_codes=["InvalidNetworkAclID.NotFound"] - ) - - -def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): - changed = False - rules = list() - for entry in param_rules: - rules.append(process_rule_entry(entry, Egress)) - if rules == aws_rules: - return changed - else: - removed_rules = [x for x in aws_rules if x not in rules] - if removed_rules: - params = dict() - for rule in removed_rules: - params["NetworkAclId"] = nacl_id - params["RuleNumber"] = rule["RuleNumber"] - params["Egress"] = Egress - delete_network_acl_entry(params, client, module) - changed = True - added_rules = [x for x in rules if x not in aws_rules] - if added_rules: - for rule in added_rules: - rule["NetworkAclId"] = nacl_id - create_network_acl_entry(rule, client, module) - changed = True - return changed - - -def is_ipv6(cidr): - return ":" in cidr - - -def process_rule_entry(entry, Egress): - params = dict() - params["RuleNumber"] = entry[0] - params["Protocol"] = str(PROTOCOL_NUMBERS[entry[1]]) - params["RuleAction"] = entry[2] - params["Egress"] = Egress - if is_ipv6(entry[3]): - params["Ipv6CidrBlock"] = entry[3] - else: - params["CidrBlock"] = entry[3] - if icmp_present(entry): - params["IcmpTypeCode"] = {"Type": int(entry[4]), "Code": int(entry[5])} - else: - if entry[6] or entry[7]: - params["PortRange"] = {"From": entry[6], "To": entry[7]} - return params - - -def restore_default_associations(assoc_ids, default_nacl_id, client, module): - if assoc_ids: - params = dict() - params["NetworkAclId"] = default_nacl_id[0] - for assoc_id in assoc_ids: - params["AssociationId"] = assoc_id - restore_default_acl_association(params, client, module) - return True - - -def construct_acl_entries(nacl, client, module): - for entry in module.params.get("ingress"): - params = process_rule_entry(entry, Egress=False) - params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] - create_network_acl_entry(params, client, module) - for rule in module.params.get("egress"): - params = process_rule_entry(rule, Egress=True) - params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"] - create_network_acl_entry(params, client, module) - - -# Module invocations -def setup_network_acl(client, module): - changed = False - nacl = describe_network_acl(client, module) - if not nacl["NetworkAcls"]: - tags = {} - if module.params.get("name"): - tags["Name"] = module.params.get("name") - tags.update(module.params.get("tags") or {}) - nacl = create_network_acl(module.params.get("vpc_id"), client, module, tags) - nacl_id = nacl["NetworkAcl"]["NetworkAclId"] - subnets = subnets_to_associate(nacl, client, module) - replace_network_acl_association(nacl_id, subnets, client, module) - construct_acl_entries(nacl, client, module) - changed = True - return changed, nacl["NetworkAcl"]["NetworkAclId"] - else: - changed = False - nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] - changed |= subnets_changed(nacl, client, module) - changed |= nacls_changed(nacl, client, module) - changed |= tags_changed(nacl_id, client, module) - return changed, nacl_id - - -def remove_network_acl(client, module): - changed = False - result = dict() - nacl = describe_network_acl(client, module) - if nacl["NetworkAcls"]: - nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"] - vpc_id = nacl["NetworkAcls"][0]["VpcId"] - associations = nacl["NetworkAcls"][0]["Associations"] - assoc_ids = [a["NetworkAclAssociationId"] for a in associations] - default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) - if not default_nacl_id: - result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} - return changed, result - if restore_default_associations(assoc_ids, default_nacl_id, client, module): - delete_network_acl(nacl_id, client, module) - changed = True - result[nacl_id] = "Successfully deleted" - return changed, result - if not assoc_ids: - delete_network_acl(nacl_id, client, module) - changed = True - result[nacl_id] = "Successfully deleted" - return changed, result - return changed, result - - -# Boto3 client methods -@AWSRetry.jittered_backoff() -def _create_network_acl(client, *args, **kwargs): - return client.create_network_acl(*args, **kwargs) - - -def create_network_acl(vpc_id, client, module, tags): - params = dict(VpcId=vpc_id) - if tags: - params["TagSpecifications"] = boto3_tag_specifications(tags, ["network-acl"]) - try: - if module.check_mode: - nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) - else: - nacl = _create_network_acl(client, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - return nacl - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) -def _create_network_acl_entry(client, *args, **kwargs): - return client.create_network_acl_entry(*args, **kwargs) - - -def create_network_acl_entry(params, client, module): - try: - if not module.check_mode: - _create_network_acl_entry(client, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -@AWSRetry.jittered_backoff() -def _delete_network_acl(client, *args, **kwargs): - return client.delete_network_acl(*args, **kwargs) - - -def delete_network_acl(nacl_id, client, module): - try: - if not module.check_mode: - _delete_network_acl(client, NetworkAclId=nacl_id) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) -def _delete_network_acl_entry(client, *args, **kwargs): - return client.delete_network_acl_entry(*args, **kwargs) - - -def delete_network_acl_entry(params, client, module): - try: - if not module.check_mode: - _delete_network_acl_entry(client, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -@AWSRetry.jittered_backoff() -def _describe_network_acls(client, **kwargs): - return client.describe_network_acls(**kwargs) - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) -def _describe_network_acls_retry_missing(client, **kwargs): - return client.describe_network_acls(**kwargs) - - -def describe_acl_associations(subnets, client, module): - if not subnets: - return [] - try: - results = _describe_network_acls_retry_missing( - client, Filters=[{"Name": "association.subnet-id", "Values": subnets}] - ) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - associations = results["NetworkAcls"][0]["Associations"] - return [a["NetworkAclAssociationId"] for a in associations if a["SubnetId"] in subnets] - - -def describe_network_acl(client, module): - try: - if module.params.get("nacl_id"): - nacl = _describe_network_acls( - client, Filters=[{"Name": "network-acl-id", "Values": [module.params.get("nacl_id")]}] - ) - else: - nacl = _describe_network_acls(client, Filters=[{"Name": "tag:Name", "Values": [module.params.get("name")]}]) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - return nacl - - -def find_acl_by_id(nacl_id, client, module): - try: - return _describe_network_acls_retry_missing(client, NetworkAclIds=[nacl_id]) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -def find_default_vpc_nacl(vpc_id, client, module): - try: - response = _describe_network_acls_retry_missing(client, Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - nacls = response["NetworkAcls"] - return [n["NetworkAclId"] for n in nacls if n["IsDefault"] is True] - - -def find_subnet_ids_by_nacl_id(nacl_id, client, module): - try: - results = _describe_network_acls_retry_missing( - client, Filters=[{"Name": "association.network-acl-id", "Values": [nacl_id]}] - ) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - if results["NetworkAcls"]: - associations = results["NetworkAcls"][0]["Associations"] - return [s["SubnetId"] for s in associations if s["SubnetId"]] - else: - return [] - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) -def _replace_network_acl_association(client, *args, **kwargs): - return client.replace_network_acl_association(*args, **kwargs) - - -def replace_network_acl_association(nacl_id, subnets, client, module): - params = dict() - params["NetworkAclId"] = nacl_id - for association in describe_acl_associations(subnets, client, module): - params["AssociationId"] = association - try: - if not module.check_mode: - _replace_network_acl_association(client, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) -def _replace_network_acl_entry(client, *args, **kwargs): - return client.replace_network_acl_entry(*args, **kwargs) - - -def replace_network_acl_entry(entries, Egress, nacl_id, client, module): - for entry in entries: - params = entry - params["NetworkAclId"] = nacl_id - try: - if not module.check_mode: - _replace_network_acl_entry(client, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"]) -def _replace_network_acl_association(client, *args, **kwargs): - return client.replace_network_acl_association(*args, **kwargs) - - -def restore_default_acl_association(params, client, module): - try: - if not module.check_mode: - _replace_network_acl_association(client, **params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -@AWSRetry.jittered_backoff() -def _describe_subnets(client, *args, **kwargs): - return client.describe_subnets(*args, **kwargs) - - -def subnets_to_associate(nacl, client, module): - params = list(module.params.get("subnets")) - if not params: - return [] - all_found = [] - if any(x.startswith("subnet-") for x in params): - try: - subnets = _describe_subnets(client, Filters=[{"Name": "subnet-id", "Values": params}]) - all_found.extend(subnets.get("Subnets", [])) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - if len(params) != len(all_found): - try: - subnets = _describe_subnets(client, Filters=[{"Name": "tag:Name", "Values": params}]) - all_found.extend(subnets.get("Subnets", [])) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - return list(set(s["SubnetId"] for s in all_found if s.get("SubnetId"))) - - -def main(): - argument_spec = dict( - vpc_id=dict(), - name=dict(), - nacl_id=dict(), - subnets=dict(required=False, type="list", default=list(), elements="str"), - tags=dict(required=False, type="dict", aliases=["resource_tags"]), - purge_tags=dict(required=False, type="bool", default=True), - ingress=dict(required=False, type="list", default=list(), elements="list"), - egress=dict(required=False, type="list", default=list(), elements="list"), - state=dict(default="present", choices=["present", "absent"]), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[["name", "nacl_id"]], - required_if=[["state", "present", ["vpc_id"]]], - ) - - state = module.params.get("state").lower() - - client = module.client("ec2") - - invocations = { - "present": setup_network_acl, - "absent": remove_network_acl, - } - (changed, results) = invocations[state](client, module) - module.exit_json(changed=changed, nacl_id=results) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_nacl_info.py b/ec2_vpc_nacl_info.py deleted file mode 100644 index 40e0398b974..00000000000 --- a/ec2_vpc_nacl_info.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_vpc_nacl_info -version_added: 1.0.0 -short_description: Gather information about Network ACLs in an AWS VPC -description: - - Gather information about Network ACLs in an AWS VPC -author: - - "Brad Davidson (@brandond)" -options: - nacl_ids: - description: - - A list of Network ACL IDs to retrieve information about. - required: false - default: [] - aliases: [nacl_id] - type: list - elements: str - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See - U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter - names and values are case sensitive. - required: false - default: {} - type: dict -notes: - - By default, the module will return all Network ACLs. - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Gather information about all Network ACLs: -- name: Get All NACLs - community.aws.ec2_vpc_nacl_info: - region: us-west-2 - register: all_nacls - -# Retrieve default Network ACLs: -- name: Get Default NACLs - community.aws.ec2_vpc_nacl_info: - region: us-west-2 - filters: - 'default': 'true' - register: default_nacls -""" - -RETURN = r""" -nacls: - description: Returns an array of complex objects as described below. - returned: success - type: complex - contains: - nacl_id: - description: The ID of the Network Access Control List. - returned: always - type: str - vpc_id: - description: The ID of the VPC that the NACL is attached to. - returned: always - type: str - is_default: - description: True if the NACL is the default for its VPC. - returned: always - type: bool - tags: - description: A dict of tags associated with the NACL. - returned: always - type: dict - subnets: - description: A list of subnet IDs that are associated with the NACL. - returned: always - type: list - elements: str - ingress: - description: - - A list of NACL ingress rules with the following format. - - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])" - returned: always - type: list - elements: list - sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]] - egress: - description: - - A list of NACL egress rules with the following format. - - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])" - returned: always - type: list - elements: list - sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]] -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# VPC-supported IANA protocol numbers -# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -PROTOCOL_NAMES = {"-1": "all", "1": "icmp", "6": "tcp", "17": "udp"} - - -def list_ec2_vpc_nacls(connection, module): - nacl_ids = module.params.get("nacl_ids") - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - - if nacl_ids is None: - nacl_ids = [] - - try: - nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters) - except is_boto3_error_code("InvalidNetworkAclID.NotFound"): - module.fail_json(msg="Unable to describe ACL. NetworkAcl does not exist") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Unable to describe network ACLs {nacl_ids}") - - # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_nacls = [] - for nacl in nacls["NetworkAcls"]: - snaked_nacls.append(camel_dict_to_snake_dict(nacl)) - - # Turn the boto3 result in to ansible friendly tag dictionary - for nacl in snaked_nacls: - if "tags" in nacl: - nacl["tags"] = boto3_tag_list_to_ansible_dict(nacl["tags"], "key", "value") - if "entries" in nacl: - nacl["egress"] = [ - nacl_entry_to_list(entry) - for entry in nacl["entries"] - if entry["rule_number"] < 32767 and entry["egress"] - ] - nacl["ingress"] = [ - nacl_entry_to_list(entry) - for entry in nacl["entries"] - if entry["rule_number"] < 32767 and not entry["egress"] - ] - del nacl["entries"] - if "associations" in nacl: - nacl["subnets"] = [a["subnet_id"] for a in nacl["associations"]] - del nacl["associations"] - if "network_acl_id" in nacl: - nacl["nacl_id"] = nacl["network_acl_id"] - del nacl["network_acl_id"] - - module.exit_json(nacls=snaked_nacls) - - -def nacl_entry_to_list(entry): - # entry list format - # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to] - elist = [] - - elist.append(entry["rule_number"]) - - if entry.get("protocol") in PROTOCOL_NAMES: - elist.append(PROTOCOL_NAMES[entry["protocol"]]) - else: - elist.append(entry.get("protocol")) - - elist.append(entry["rule_action"]) - - if entry.get("cidr_block"): - elist.append(entry["cidr_block"]) - elif entry.get("ipv6_cidr_block"): - elist.append(entry["ipv6_cidr_block"]) - else: - elist.append(None) - - elist = elist + [None, None, None, None] - - if entry["protocol"] in ("1", "58"): - elist[4] = entry.get("icmp_type_code", {}).get("type") - elist[5] = entry.get("icmp_type_code", {}).get("code") - - if entry["protocol"] not in ("1", "6", "17", "58"): - elist[6] = 0 - elist[7] = 65535 - elif "port_range" in entry: - elist[6] = entry["port_range"]["from"] - elist[7] = entry["port_range"]["to"] - - return elist - - -def main(): - argument_spec = dict( - nacl_ids=dict(default=[], type="list", aliases=["nacl_id"], elements="str"), - filters=dict(default={}, type="dict"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - - list_ec2_vpc_nacls(connection, module) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_peer.py b/ec2_vpc_peer.py deleted file mode 100644 index 465c9c852eb..00000000000 --- a/ec2_vpc_peer.py +++ /dev/null @@ -1,611 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_vpc_peer -short_description: create, delete, accept, and reject VPC peering connections between two VPCs. -version_added: 1.0.0 -description: - - Read the AWS documentation for VPC Peering Connections - U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html). -options: - vpc_id: - description: - - VPC id of the requesting VPC. - required: false - type: str - peering_id: - description: - - Peering connection id. - required: false - type: str - peer_region: - description: - - Region of the accepting VPC. - required: false - type: str - peer_vpc_id: - description: - - VPC id of the accepting VPC. - required: false - type: str - peer_owner_id: - description: - - The AWS account number for cross account peering. - required: false - type: str - state: - description: - - Create, delete, accept, reject a peering connection. - required: false - default: present - choices: ['present', 'absent', 'accept', 'reject'] - type: str - wait: - description: - - Wait for peering state changes to complete. - required: false - default: false - type: bool -notes: - - Support for I(purge_tags) was added in release 2.0.0. -author: - - Mike Mochan (@mmochan) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Complete example to create and accept a local peering connection. -- name: Create local account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-87654321 - state: present - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: Accept local VPC peering request - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - peering_id: "{{ vpc_peer.peering_id }}" - state: accept - register: action_peer - -# Complete example to delete a local peering connection. -- name: Create local account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-87654321 - state: present - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: delete a local VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - peering_id: "{{ vpc_peer.peering_id }}" - state: absent - register: vpc_peer - - # Complete example to create and accept a cross account peering connection. -- name: Create cross account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-12345678 - peer_owner_id: 123456789012 - state: present - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: Accept peering connection from remote account - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - peering_id: "{{ vpc_peer.peering_id }}" - profile: bot03_profile_for_cross_account - state: accept - register: vpc_peer - -# Complete example to create and accept an intra-region peering connection. -- name: Create intra-region VPC peering Connection - community.aws.ec2_vpc_peer: - region: us-east-1 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-87654321 - peer_region: us-west-2 - state: present - tags: - Name: Peering connection for us-east-1 VPC to us-west-2 VPC - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: Accept peering connection from peer region - community.aws.ec2_vpc_peer: - region: us-west-2 - peering_id: "{{ vpc_peer.peering_id }}" - state: accept - register: vpc_peer - -# Complete example to create and reject a local peering connection. -- name: Create local account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-87654321 - state: present - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: Reject a local VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - peering_id: "{{ vpc_peer.peering_id }}" - state: reject - -# Complete example to create and accept a cross account peering connection. -- name: Create cross account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-12345678 - peer_owner_id: 123456789012 - state: present - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: Accept a cross account VPC peering connection request - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - peering_id: "{{ vpc_peer.peering_id }}" - profile: bot03_profile_for_cross_account - state: accept - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - -# Complete example to create and reject a cross account peering connection. -- name: Create cross account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - vpc_id: vpc-12345678 - peer_vpc_id: vpc-12345678 - peer_owner_id: 123456789012 - state: present - tags: - Name: Peering connection for VPC 21 to VPC 22 - CostCode: CC1234 - Project: phoenix - register: vpc_peer - -- name: Reject a cross account VPC peering Connection - community.aws.ec2_vpc_peer: - region: ap-southeast-2 - peering_id: "{{ vpc_peer.peering_id }}" - profile: bot03_profile_for_cross_account - state: reject - -""" -RETURN = r""" -peering_id: - description: The id of the VPC peering connection created/deleted. - returned: always - type: str - sample: pcx-034223d7c0aec3cde -vpc_peering_connection: - description: The details of the VPC peering connection as returned by Boto3 (snake cased). - returned: success - type: complex - contains: - accepter_vpc_info: - description: Information about the VPC which accepted the connection. - returned: success - type: complex - contains: - cidr_block: - description: The primary CIDR for the VPC. - returned: when connection is in the accepted state. - type: str - example: '10.10.10.0/23' - cidr_block_set: - description: A list of all CIDRs for the VPC. - returned: when connection is in the accepted state. - type: complex - contains: - cidr_block: - description: A CIDR block used by the VPC. - returned: success - type: str - example: '10.10.10.0/23' - owner_id: - description: The AWS account that owns the VPC. - returned: success - type: str - example: 123456789012 - peering_options: - description: Additional peering configuration. - returned: when connection is in the accepted state. - type: dict - contains: - allow_dns_resolution_from_remote_vpc: - description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. - returned: success - type: bool - allow_egress_from_local_classic_link_to_remote_vpc: - description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. - returned: success - type: bool - allow_egress_from_local_vpc_to_remote_classic_link: - description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. - returned: success - type: bool - region: - description: The AWS region that the VPC is in. - returned: success - type: str - example: us-east-1 - vpc_id: - description: The ID of the VPC - returned: success - type: str - example: vpc-0123456789abcdef0 - requester_vpc_info: - description: Information about the VPC which requested the connection. - returned: success - type: complex - contains: - cidr_block: - description: The primary CIDR for the VPC. - returned: when connection is not in the deleted state. - type: str - example: '10.10.10.0/23' - cidr_block_set: - description: A list of all CIDRs for the VPC. - returned: when connection is not in the deleted state. - type: complex - contains: - cidr_block: - description: A CIDR block used by the VPC - returned: success - type: str - example: '10.10.10.0/23' - owner_id: - description: The AWS account that owns the VPC. - returned: success - type: str - example: 123456789012 - peering_options: - description: Additional peering configuration. - returned: when connection is not in the deleted state. - type: dict - contains: - allow_dns_resolution_from_remote_vpc: - description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. - returned: success - type: bool - allow_egress_from_local_classic_link_to_remote_vpc: - description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. - returned: success - type: bool - allow_egress_from_local_vpc_to_remote_classic_link: - description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. - returned: success - type: bool - region: - description: The AWS region that the VPC is in. - returned: success - type: str - example: us-east-1 - vpc_id: - description: The ID of the VPC - returned: success - type: str - example: vpc-0123456789abcdef0 - status: - description: Details of the current status of the connection. - returned: success - type: complex - contains: - code: - description: A short code describing the status of the connection. - returned: success - type: str - example: active - message: - description: Additional information about the status of the connection. - returned: success - type: str - example: Pending Acceptance by 123456789012 - tags: - description: Tags applied to the connection. - returned: success - type: dict - vpc_peering_connection_id: - description: The ID of the VPC peering connection. - returned: success - type: str - example: "pcx-0123456789abcdef0" -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def wait_for_state(client, module, state, pcx_id): - waiter = client.get_waiter("vpc_peering_connection_exists") - peer_filter = { - "vpc-peering-connection-id": pcx_id, - "status-code": state, - } - try: - waiter.wait(Filters=ansible_dict_to_boto3_filter_list(peer_filter)) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Failed to wait for state change") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Enable to describe Peerig Connection while waiting for state to change") - - -def describe_peering_connections(params, client): - peer_filter = { - "requester-vpc-info.vpc-id": params["VpcId"], - "accepter-vpc-info.vpc-id": params["PeerVpcId"], - } - result = client.describe_vpc_peering_connections( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(peer_filter), - ) - if result["VpcPeeringConnections"] == []: - # Try again with the VPC/Peer relationship reversed - peer_filter = { - "requester-vpc-info.vpc-id": params["PeerVpcId"], - "accepter-vpc-info.vpc-id": params["VpcId"], - } - result = client.describe_vpc_peering_connections( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(peer_filter), - ) - - return result - - -def is_active(peering_conn): - return peering_conn["Status"]["Code"] == "active" - - -def is_pending(peering_conn): - return peering_conn["Status"]["Code"] == "pending-acceptance" - - -def create_peer_connection(client, module): - changed = False - params = dict() - params["VpcId"] = module.params.get("vpc_id") - params["PeerVpcId"] = module.params.get("peer_vpc_id") - if module.params.get("peer_region"): - params["PeerRegion"] = module.params.get("peer_region") - if module.params.get("peer_owner_id"): - params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) - peering_conns = describe_peering_connections(params, client) - for peering_conn in peering_conns["VpcPeeringConnections"]: - pcx_id = peering_conn["VpcPeeringConnectionId"] - if ensure_ec2_tags( - client, - module, - pcx_id, - purge_tags=module.params.get("purge_tags"), - tags=module.params.get("tags"), - ): - changed = True - if is_active(peering_conn): - return (changed, peering_conn) - if is_pending(peering_conn): - return (changed, peering_conn) - try: - peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params) - pcx_id = peering_conn["VpcPeeringConnection"]["VpcPeeringConnectionId"] - if module.params.get("tags"): - # Once the minimum botocore version is bumped to > 1.17.24 - # (hopefully community.aws 3.0.0) we can add the tags to the - # creation parameters - add_ec2_tags( - client, - module, - pcx_id, - module.params.get("tags"), - retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], - ) - if module.params.get("wait"): - wait_for_state(client, module, "pending-acceptance", pcx_id) - changed = True - return (changed, peering_conn["VpcPeeringConnection"]) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - - -def remove_peer_connection(client, module): - pcx_id = module.params.get("peering_id") - if pcx_id: - peering_conn = get_peering_connection_by_id(pcx_id, client, module) - else: - params = dict() - params["VpcId"] = module.params.get("vpc_id") - params["PeerVpcId"] = module.params.get("peer_vpc_id") - params["PeerRegion"] = module.params.get("peer_region") - if module.params.get("peer_owner_id"): - params["PeerOwnerId"] = str(module.params.get("peer_owner_id")) - peering_conn = describe_peering_connections(params, client)["VpcPeeringConnections"][0] - - if not peering_conn: - module.exit_json(changed=False) - else: - pcx_id = pcx_id or peering_conn["VpcPeeringConnectionId"] - - if peering_conn["Status"]["Code"] == "deleted": - module.exit_json(msg="Connection in deleted state.", changed=False, peering_id=pcx_id) - if peering_conn["Status"]["Code"] == "rejected": - module.exit_json( - msg="Connection has been rejected. State cannot be changed and will be removed automatically by AWS", - changed=False, - peering_id=pcx_id, - ) - - try: - params = dict() - params["VpcPeeringConnectionId"] = pcx_id - client.delete_vpc_peering_connection(aws_retry=True, **params) - if module.params.get("wait"): - wait_for_state(client, module, "deleted", pcx_id) - module.exit_json(changed=True, peering_id=pcx_id) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - - -def get_peering_connection_by_id(peering_id, client, module): - params = dict() - params["VpcPeeringConnectionIds"] = [peering_id] - try: - vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params) - return vpc_peering_connection["VpcPeeringConnections"][0] - except is_boto3_error_code("InvalidVpcPeeringConnectionId.Malformed") as e: - module.fail_json_aws(e, msg="Malformed connection ID") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error while describing peering connection by peering_id") - - -def accept_reject(state, client, module): - changed = False - params = dict() - peering_id = module.params.get("peering_id") - params["VpcPeeringConnectionId"] = peering_id - vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) - peering_status = vpc_peering_connection["Status"]["Code"] - - if peering_status not in ["active", "rejected"]: - try: - if state == "accept": - client.accept_vpc_peering_connection(aws_retry=True, **params) - target_state = "active" - else: - client.reject_vpc_peering_connection(aws_retry=True, **params) - target_state = "rejected" - if module.params.get("tags"): - add_ec2_tags( - client, - module, - peering_id, - module.params.get("tags"), - retry_codes=["InvalidVpcPeeringConnectionID.NotFound"], - ) - changed = True - if module.params.get("wait"): - wait_for_state(client, module, target_state, peering_id) - except botocore.exceptions.ClientError as e: - module.fail_json(msg=str(e)) - if ensure_ec2_tags( - client, - module, - peering_id, - purge_tags=module.params.get("purge_tags"), - tags=module.params.get("tags"), - ): - changed = True - - # Relaod peering conection infos to return latest state/params - vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module) - return (changed, vpc_peering_connection) - - -def main(): - argument_spec = dict( - vpc_id=dict(), - peer_vpc_id=dict(), - peer_region=dict(), - peering_id=dict(), - peer_owner_id=dict(), - tags=dict(required=False, type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - state=dict(default="present", choices=["present", "absent", "accept", "reject"]), - wait=dict(default=False, type="bool"), - ) - required_if = [ - ("state", "present", ["vpc_id", "peer_vpc_id"]), - ("state", "accept", ["peering_id"]), - ("state", "reject", ["peering_id"]), - ] - - module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) - - state = module.params.get("state") - peering_id = module.params.get("peering_id") - vpc_id = module.params.get("vpc_id") - peer_vpc_id = module.params.get("peer_vpc_id") - - try: - client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - if state == "present": - (changed, results) = create_peer_connection(client, module) - elif state == "absent": - if not peering_id and (not vpc_id or not peer_vpc_id): - module.fail_json( - msg="state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]" - ) - - remove_peer_connection(client, module) - else: - (changed, results) = accept_reject(state, client, module) - - formatted_results = camel_dict_to_snake_dict(results) - # Turn the resource tags from boto3 into an ansible friendly tag dictionary - formatted_results["tags"] = boto3_tag_list_to_ansible_dict(formatted_results.get("tags", [])) - - module.exit_json( - changed=changed, vpc_peering_connection=formatted_results, peering_id=results["VpcPeeringConnectionId"] - ) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_peering_info.py b/ec2_vpc_peering_info.py deleted file mode 100644 index ee9fda32118..00000000000 --- a/ec2_vpc_peering_info.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_vpc_peering_info -short_description: Retrieves AWS VPC Peering details using AWS methods. -version_added: 1.0.0 -description: - - Gets various details related to AWS VPC Peers -options: - peer_connection_ids: - description: - - List of specific VPC peer IDs to get details for. - type: list - elements: str - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html) - for possible filters. - type: dict - default: {} -author: - - Karen Cheng (@Etherdaemon) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Simple example of listing all VPC Peers -- name: List all vpc peers - community.aws.ec2_vpc_peering_info: - region: ap-southeast-2 - register: all_vpc_peers - -- name: Debugging the result - ansible.builtin.debug: - msg: "{{ all_vpc_peers.result }}" - -- name: Get details on specific VPC peer - community.aws.ec2_vpc_peering_info: - peer_connection_ids: - - pcx-12345678 - - pcx-87654321 - region: ap-southeast-2 - register: all_vpc_peers - -- name: Get all vpc peers with specific filters - community.aws.ec2_vpc_peering_info: - region: ap-southeast-2 - filters: - status-code: ['pending-acceptance'] - register: pending_vpc_peers -""" - -RETURN = r""" -vpc_peering_connections: - description: Details of the matching VPC peering connections. - returned: success - type: list - contains: - accepter_vpc_info: - description: Information about the VPC which accepted the connection. - returned: success - type: complex - contains: - cidr_block: - description: The primary CIDR for the VPC. - returned: when connection is in the accepted state. - type: str - example: '10.10.10.0/23' - cidr_block_set: - description: A list of all CIDRs for the VPC. - returned: when connection is in the accepted state. - type: complex - contains: - cidr_block: - description: A CIDR block used by the VPC. - returned: success - type: str - example: '10.10.10.0/23' - owner_id: - description: The AWS account that owns the VPC. - returned: success - type: str - example: 123456789012 - peering_options: - description: Additional peering configuration. - returned: when connection is in the accepted state. - type: dict - contains: - allow_dns_resolution_from_remote_vpc: - description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. - returned: success - type: bool - allow_egress_from_local_classic_link_to_remote_vpc: - description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. - returned: success - type: bool - allow_egress_from_local_vpc_to_remote_classic_link: - description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. - returned: success - type: bool - region: - description: The AWS region that the VPC is in. - returned: success - type: str - example: us-east-1 - vpc_id: - description: The ID of the VPC - returned: success - type: str - example: vpc-0123456789abcdef0 - requester_vpc_info: - description: Information about the VPC which requested the connection. - returned: success - type: complex - contains: - cidr_block: - description: The primary CIDR for the VPC. - returned: when connection is not in the deleted state. - type: str - example: '10.10.10.0/23' - cidr_block_set: - description: A list of all CIDRs for the VPC. - returned: when connection is not in the deleted state. - type: complex - contains: - cidr_block: - description: A CIDR block used by the VPC - returned: success - type: str - example: '10.10.10.0/23' - owner_id: - description: The AWS account that owns the VPC. - returned: success - type: str - example: 123456789012 - peering_options: - description: Additional peering configuration. - returned: when connection is not in the deleted state. - type: dict - contains: - allow_dns_resolution_from_remote_vpc: - description: Indicates whether a VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC. - returned: success - type: bool - allow_egress_from_local_classic_link_to_remote_vpc: - description: Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection. - returned: success - type: bool - allow_egress_from_local_vpc_to_remote_classic_link: - description: Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. - returned: success - type: bool - region: - description: The AWS region that the VPC is in. - returned: success - type: str - example: us-east-1 - vpc_id: - description: The ID of the VPC - returned: success - type: str - example: vpc-0123456789abcdef0 - status: - description: Details of the current status of the connection. - returned: success - type: complex - contains: - code: - description: A short code describing the status of the connection. - returned: success - type: str - example: active - message: - description: Additional information about the status of the connection. - returned: success - type: str - example: Pending Acceptance by 123456789012 - tags: - description: Tags applied to the connection. - returned: success - type: dict - vpc_peering_connection_id: - description: The ID of the VPC peering connection. - returned: success - type: str - example: "pcx-0123456789abcdef0" - -result: - description: The result of the describe. - returned: success - type: list -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_vpc_peers(client, module): - params = dict() - params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - if module.params.get("peer_connection_ids"): - params["VpcPeeringConnectionIds"] = module.params.get("peer_connection_ids") - try: - result = client.describe_vpc_peering_connections(aws_retry=True, **params) - result = normalize_boto3_result(result) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe peering connections") - - return result["VpcPeeringConnections"] - - -def main(): - argument_spec = dict( - filters=dict(default=dict(), type="dict"), - peer_connection_ids=dict(default=None, type="list", elements="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - # Turn the boto3 result in to ansible friendly_snaked_names - results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)] - - # Turn the boto3 result in to ansible friendly tag dictionary - for peer in results: - peer["tags"] = boto3_tag_list_to_ansible_dict(peer.get("tags", [])) - - module.exit_json(result=results, vpc_peering_connections=results) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_vgw.py b/ec2_vpc_vgw.py deleted file mode 100644 index 3ca4d8013e3..00000000000 --- a/ec2_vpc_vgw.py +++ /dev/null @@ -1,534 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: ec2_vpc_vgw -short_description: Create and delete AWS VPN Virtual Gateways -version_added: 1.0.0 -description: - - Creates AWS VPN Virtual Gateways - - Deletes AWS VPN Virtual Gateways - - Attaches Virtual Gateways to VPCs - - Detaches Virtual Gateways from VPCs -options: - state: - description: - - C(present) to ensure resource is created. - - C(absent) to remove resource. - default: present - choices: [ "present", "absent"] - type: str - name: - description: - - Name of the VGW to be created or deleted. - type: str - type: - description: - - Type of the virtual gateway to be created. - choices: [ "ipsec.1" ] - default: "ipsec.1" - type: str - vpn_gateway_id: - description: - - VPN gateway ID of an existing virtual gateway. - type: str - vpc_id: - description: - - The ID of a VPC to attach or detach to the VGW. - type: str - asn: - description: - - The BGP ASN on the Amazon side. - type: int - wait_timeout: - description: - - Number of seconds to wait for status during VPC attach and detach. - default: 320 - type: int -notes: - - Support for I(purge_tags) was added in release 4.0.0. -author: - - Nick Aslanidis (@naslanidis) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create a new VGW attached to a specific VPC - community.aws.ec2_vpc_vgw: - state: present - region: ap-southeast-2 - profile: personal - vpc_id: vpc-12345678 - name: personal-testing - type: ipsec.1 - register: created_vgw - -- name: Create a new unattached VGW - community.aws.ec2_vpc_vgw: - state: present - region: ap-southeast-2 - profile: personal - name: personal-testing - type: ipsec.1 - tags: - environment: production - owner: ABC - register: created_vgw - -- name: Remove a new VGW using the name - community.aws.ec2_vpc_vgw: - state: absent - region: ap-southeast-2 - profile: personal - name: personal-testing - type: ipsec.1 - register: deleted_vgw - -- name: Remove a new VGW using the vpn_gateway_id - community.aws.ec2_vpc_vgw: - state: absent - region: ap-southeast-2 - profile: personal - vpn_gateway_id: vgw-3a9aa123 - register: deleted_vgw -""" - -RETURN = r""" -vgw: - description: A description of the VGW - returned: success - type: dict - contains: - id: - description: The ID of the VGW. - type: str - returned: success - example: "vgw-0123456789abcdef0" - state: - description: The state of the VGW. - type: str - returned: success - example: "available" - tags: - description: A dictionary representing the tags attached to the VGW - type: dict - returned: success - example: { "Name": "ansible-test-ec2-vpc-vgw" } - type: - description: The type of VPN connection the virtual private gateway supports. - type: str - returned: success - example: "ipsec.1" - vpc_id: - description: The ID of the VPC to which the VGW is attached. - type: str - returned: success - example: vpc-123456789abcdef01 -""" - -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' -# we need to look at the mesage to tell the difference. -class VGWRetry(AWSRetry): - @staticmethod - def status_code_from_exception(error): - return ( - error.response["Error"]["Code"], - error.response["Error"]["Message"], - ) - - @staticmethod - def found(response_code, catch_extra_error_codes=None): - retry_on = ["The maximum number of mutating objects has been reached."] - - if catch_extra_error_codes: - retry_on.extend(catch_extra_error_codes) - if not isinstance(response_code, tuple): - response_code = (response_code,) - - for code in response_code: - if super().found(response_code, catch_extra_error_codes): - return True - - return False - - -def get_vgw_info(vgws): - if not isinstance(vgws, list): - return - - for vgw in vgws: - vgw_info = { - "id": vgw["VpnGatewayId"], - "type": vgw["Type"], - "state": vgw["State"], - "vpc_id": None, - "tags": dict(), - } - - if vgw["Tags"]: - vgw_info["tags"] = boto3_tag_list_to_ansible_dict(vgw["Tags"]) - - if len(vgw["VpcAttachments"]) != 0 and vgw["VpcAttachments"][0]["State"] == "attached": - vgw_info["vpc_id"] = vgw["VpcAttachments"][0]["VpcId"] - - return vgw_info - - -def wait_for_status(client, module, vpn_gateway_id, status): - polling_increment_secs = 15 - max_retries = module.params.get("wait_timeout") // polling_increment_secs - status_achieved = False - - for x in range(0, max_retries): - try: - response = find_vgw(client, module, vpn_gateway_id) - if response[0]["VpcAttachments"][0]["State"] == status: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failure while waiting for status update") - - result = response - return status_achieved, result - - -def attach_vgw(client, module, vpn_gateway_id): - params = dict() - params["VpcId"] = module.params.get("vpc_id") - - try: - # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State - # as available several seconds before actually permitting a new attachment. - # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185 - response = VGWRetry.jittered_backoff(retries=5, catch_extra_error_codes=["InvalidParameterValue"])( - client.attach_vpn_gateway - )(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to attach VPC") - - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "attached") - if not status_achieved: - module.fail_json(msg="Error waiting for vpc to attach to vgw - please check the AWS console") - - result = response - return result - - -def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): - params = dict() - params["VpcId"] = module.params.get("vpc_id") - - try: - if vpc_id: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True) - else: - response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"], aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to detach gateway") - - status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "detached") - if not status_achieved: - module.fail_json(msg="Error waiting for vpc to detach from vgw - please check the AWS console") - - result = response - return result - - -def create_vgw(client, module): - params = dict() - params["Type"] = module.params.get("type") - tags = module.params.get("tags") or {} - tags["Name"] = module.params.get("name") - params["TagSpecifications"] = boto3_tag_specifications(tags, ["vpn-gateway"]) - if module.params.get("asn"): - params["AmazonSideAsn"] = module.params.get("asn") - - try: - response = client.create_vpn_gateway(aws_retry=True, **params) - get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]]) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws( - e, msg=f"Failed to wait for Vpn Gateway {response['VpnGateway']['VpnGatewayId']} to be available" - ) - except is_boto3_error_code("VpnGatewayLimitExceeded") as e: - module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to create gateway") - - result = response - return result - - -def delete_vgw(client, module, vpn_gateway_id): - try: - response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete gateway") - - # return the deleted VpnGatewayId as this is not included in the above response - result = vpn_gateway_id - return result - - -def find_vpc(client, module): - params = dict() - params["vpc_id"] = module.params.get("vpc_id") - - if params["vpc_id"]: - try: - response = client.describe_vpcs(VpcIds=[params["vpc_id"]], aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe VPC") - - result = response - return result - - -def find_vgw(client, module, vpn_gateway_id=None): - params = dict() - if vpn_gateway_id: - params["VpnGatewayIds"] = vpn_gateway_id - else: - params["Filters"] = [ - {"Name": "type", "Values": [module.params.get("type")]}, - {"Name": "tag:Name", "Values": [module.params.get("name")]}, - ] - if module.params.get("state") == "present": - params["Filters"].append({"Name": "state", "Values": ["pending", "available"]}) - try: - response = client.describe_vpn_gateways(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe gateway using filters") - - return sorted(response["VpnGateways"], key=lambda k: k["VpnGatewayId"]) - - -def ensure_vgw_present(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been - # found and we will not create another vgw. - - changed = False - params = dict() - result = dict() - params["Name"] = module.params.get("name") - params["VpcId"] = module.params.get("vpc_id") - params["Type"] = module.params.get("type") - params["Tags"] = module.params.get("tags") - params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") - - # check that the vpc_id exists. If not, an exception is thrown - if params["VpcId"]: - vpc = find_vpc(client, module) - - # check if a gateway matching our module args already exists - existing_vgw = find_vgw(client, module) - - if existing_vgw != []: - vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] - desired_tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - if desired_tags is None: - desired_tags = dict() - purge_tags = False - tags = dict(Name=module.params.get("name")) - tags.update(desired_tags) - changed = ensure_ec2_tags( - client, module, vpn_gateway_id, resource_type="vpn-gateway", tags=tags, purge_tags=purge_tags - ) - - # if a vpc_id was provided, check if it exists and if it's attached - if params["VpcId"]: - current_vpc_attachments = existing_vgw[0]["VpcAttachments"] - - if current_vpc_attachments != [] and current_vpc_attachments[0]["State"] == "attached": - if ( - current_vpc_attachments[0]["VpcId"] != params["VpcId"] - or current_vpc_attachments[0]["State"] != "attached" - ): - # detach the existing vpc from the virtual gateway - vpc_to_detach = current_vpc_attachments[0]["VpcId"] - detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) - get_waiter(client, "vpn_gateway_detached").wait(VpnGatewayIds=[vpn_gateway_id]) - attached_vgw = attach_vgw(client, module, vpn_gateway_id) - changed = True - else: - # attach the vgw to the supplied vpc - attached_vgw = attach_vgw(client, module, vpn_gateway_id) - changed = True - - # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it. - else: - existing_vgw = find_vgw(client, module, [vpn_gateway_id]) - - if existing_vgw[0]["VpcAttachments"] != []: - if existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": - # detach the vpc from the vgw - vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] - detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) - changed = True - - else: - # create a new vgw - new_vgw = create_vgw(client, module) - changed = True - vpn_gateway_id = new_vgw["VpnGateway"]["VpnGatewayId"] - - # if a vpc-id was supplied, attempt to attach it to the vgw - if params["VpcId"]: - attached_vgw = attach_vgw(client, module, vpn_gateway_id) - changed = True - - # return current state of the vgw - vgw = find_vgw(client, module, [vpn_gateway_id]) - result = get_vgw_info(vgw) - return changed, result - - -def ensure_vgw_absent(client, module): - # If an existing vgw name and type matches our args, then a match is considered to have been - # found and we will take steps to delete it. - - changed = False - params = dict() - result = dict() - params["Name"] = module.params.get("name") - params["VpcId"] = module.params.get("vpc_id") - params["Type"] = module.params.get("type") - params["Tags"] = module.params.get("tags") - params["VpnGatewayIds"] = module.params.get("vpn_gateway_id") - - # check if a gateway matching our module args already exists - if params["VpnGatewayIds"]: - existing_vgw_with_id = find_vgw(client, module, [params["VpnGatewayIds"]]) - if existing_vgw_with_id != [] and existing_vgw_with_id[0]["State"] != "deleted": - existing_vgw = existing_vgw_with_id - if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": - if params["VpcId"]: - if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: - module.fail_json( - msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" - ) - - else: - # detach the vpc from the vgw - detach_vgw(client, module, params["VpnGatewayIds"], params["VpcId"]) - deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) - changed = True - - else: - # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] - detach_vgw(client, module, params["VpnGatewayIds"], vpc_to_detach) - deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) - changed = True - - else: - # no vpc's are attached so attempt to delete the vgw - deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"]) - changed = True - - else: - changed = False - deleted_vgw = "Nothing to do" - - else: - # Check that a name and type argument has been supplied if no vgw-id - if not module.params.get("name") or not module.params.get("type"): - module.fail_json(msg="A name and type is required when no vgw-id and a status of 'absent' is supplied") - - existing_vgw = find_vgw(client, module) - if existing_vgw != [] and existing_vgw[0]["State"] != "deleted": - vpn_gateway_id = existing_vgw[0]["VpnGatewayId"] - if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached": - if params["VpcId"]: - if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]: - module.fail_json( - msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console" - ) - - else: - # detach the vpc from the vgw - detach_vgw(client, module, vpn_gateway_id, params["VpcId"]) - - # now that the vpc has been detached, delete the vgw - deleted_vgw = delete_vgw(client, module, vpn_gateway_id) - changed = True - - else: - # attempt to detach any attached vpcs - vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"] - detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) - changed = True - - # now that the vpc has been detached, delete the vgw - deleted_vgw = delete_vgw(client, module, vpn_gateway_id) - - else: - # no vpc's are attached so attempt to delete the vgw - deleted_vgw = delete_vgw(client, module, vpn_gateway_id) - changed = True - - else: - changed = False - deleted_vgw = None - - result = deleted_vgw - return changed, result - - -def main(): - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(), - vpn_gateway_id=dict(), - vpc_id=dict(), - asn=dict(type="int"), - wait_timeout=dict(type="int", default=320), - type=dict(default="ipsec.1", choices=["ipsec.1"]), - tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["name"]]]) - - state = module.params.get("state").lower() - - client = module.client("ec2", retry_decorator=VGWRetry.jittered_backoff(retries=10)) - - if state == "present": - (changed, results) = ensure_vgw_present(client, module) - else: - (changed, results) = ensure_vgw_absent(client, module) - module.exit_json(changed=changed, vgw=results) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_vgw_info.py b/ec2_vpc_vgw_info.py deleted file mode 100644 index d8bfcc78ecb..00000000000 --- a/ec2_vpc_vgw_info.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_vpc_vgw_info -version_added: 1.0.0 -short_description: Gather information about virtual gateways in AWS -description: - - Gather information about virtual gateways (VGWs) in AWS. -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters. - type: dict - default: {} - vpn_gateway_ids: - description: - - Get details of a specific Virtual Gateway ID. - type: list - elements: str -author: - - "Nick Aslanidis (@naslanidis)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# # Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all virtual gateways for an account or profile - community.aws.ec2_vpc_vgw_info: - region: ap-southeast-2 - profile: production - register: vgw_info - -- name: Gather information about a filtered list of Virtual Gateways - community.aws.ec2_vpc_vgw_info: - region: ap-southeast-2 - profile: production - filters: - "tag:Name": "main-virt-gateway" - register: vgw_info - -- name: Gather information about a specific virtual gateway by VpnGatewayIds - community.aws.ec2_vpc_vgw_info: - region: ap-southeast-2 - profile: production - vpn_gateway_ids: vgw-c432f6a7 - register: vgw_info -""" - -RETURN = r""" -virtual_gateways: - description: The virtual gateways for the account. - returned: always - type: list - elements: dict - contains: - vpn_gateway_id: - description: The ID of the VGW. - type: str - returned: success - example: "vgw-0123456789abcdef0" - state: - description: The current state of the VGW. - type: str - returned: success - example: "available" - type: - description: The type of VPN connection the VGW supports. - type: str - returned: success - example: "ipsec.1" - vpc_attachments: - description: A description of the attachment of VPCs to the VGW. - type: list - elements: dict - returned: success - contains: - state: - description: The current state of the attachment. - type: str - returned: success - example: available - vpc_id: - description: The ID of the VPC. - type: str - returned: success - example: vpc-12345678901234567 - tags: - description: - - A list of dictionaries representing the tags attached to the VGW. - - Represents the same details as I(resource_tags). - type: list - elements: dict - returned: success - contains: - key: - description: The key of the tag. - type: str - returned: success - example: MyKey - value: - description: The value of the tag. - type: str - returned: success - example: MyValue - resource_tags: - description: - - A dictionary representing the tags attached to the VGW. - - Represents the same details as I(tags). - type: dict - returned: success - example: {"MyKey": "MyValue"} -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_virtual_gateway_info(virtual_gateway): - tags = virtual_gateway.get("Tags", []) - resource_tags = boto3_tag_list_to_ansible_dict(tags) - virtual_gateway_info = dict( - VpnGatewayId=virtual_gateway["VpnGatewayId"], - State=virtual_gateway["State"], - Type=virtual_gateway["Type"], - VpcAttachments=virtual_gateway["VpcAttachments"], - Tags=tags, - ResourceTags=resource_tags, - ) - return virtual_gateway_info - - -def list_virtual_gateways(client, module): - params = dict() - - params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - - if module.params.get("vpn_gateway_ids"): - params["VpnGatewayIds"] = module.params.get("vpn_gateway_ids") - - try: - all_virtual_gateways = client.describe_vpn_gateways(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to list gateways") - - return [ - camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=["ResourceTags"]) - for vgw in all_virtual_gateways["VpnGateways"] - ] - - -def main(): - argument_spec = dict( - filters=dict(type="dict", default=dict()), - vpn_gateway_ids=dict(type="list", default=None, elements="str"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - try: - connection = module.client("ec2") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - # call your function here - results = list_virtual_gateways(connection, module) - - module.exit_json(virtual_gateways=results) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_vpn.py b/ec2_vpc_vpn.py deleted file mode 100644 index abc97f796b7..00000000000 --- a/ec2_vpc_vpn.py +++ /dev/null @@ -1,866 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_vpc_vpn -version_added: 1.0.0 -short_description: Create, modify, and delete EC2 VPN connections -description: - - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters - option or specifying the VPN connection identifier. -author: - - "Sloane Hertel (@s-hertel)" -options: - state: - description: - - The desired state of the VPN connection. - choices: ['present', 'absent'] - default: present - required: false - type: str - customer_gateway_id: - description: - - The ID of the customer gateway. - type: str - connection_type: - description: - - The type of VPN connection. - - At this time only C(ipsec.1) is supported. - default: ipsec.1 - type: str - vpn_gateway_id: - description: - - The ID of the virtual private gateway. - - Mutually exclusive with I(transit_gateway_id). - type: str - vpn_connection_id: - description: - - The ID of the VPN connection. Required to modify or delete a connection if the filters option does not provide a unique match. - type: str - static_only: - description: - - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP. - default: False - type: bool - required: false - transit_gateway_id: - description: - - The ID of the transit gateway. - - Mutually exclusive with I(vpn_gateway_id). - type: str - version_added: 6.2.0 - tunnel_options: - description: - - An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr) - and/or I(PreSharedKey) keys with appropriate string values. AWS defaults will apply in absence of either of - the aforementioned keys. - required: false - type: list - elements: dict - default: [] - suboptions: - TunnelInsideCidr: - type: str - description: The range of inside IP addresses for the tunnel. - PreSharedKey: - type: str - description: The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway. - filters: - description: - - An alternative to using I(vpn_connection_id). If multiple matches are found, vpn_connection_id is required. - If one of the following suboptions is a list of items to filter by, only one item needs to match to find the VPN - that correlates. e.g. if the filter I(cidr) is C(['194.168.2.0/24', '192.168.2.0/24']) and the VPN route only has the - destination cidr block of C(192.168.2.0/24) it will be found with this filter (assuming there are not multiple - VPNs that are matched). Another example, if the filter I(vpn) is equal to C(['vpn-ccf7e7ad', 'vpn-cb0ae2a2']) and one - of of the VPNs has the state deleted (exists but is unmodifiable) and the other exists and is not deleted, - it will be found via this filter. See examples. - suboptions: - cgw-config: - description: - - The customer gateway configuration of the VPN as a string (in the format of the return value) or a list of those strings. - static-routes-only: - description: - - The type of routing; C(true) or C(false). - cidr: - description: - - The destination cidr of the VPN's route as a string or a list of those strings. - bgp: - description: - - The BGP ASN number associated with a BGP device. Only works if the connection is attached. - This filtering option is currently not working. - vpn: - description: - - The VPN connection id as a string or a list of those strings. - vgw: - description: - - The virtual private gateway as a string or a list of those strings. - tag-keys: - description: - - The key of a tag as a string or a list of those strings. - tag-values: - description: - - The value of a tag as a string or a list of those strings. - tags: - description: - - A dict of key value pairs. - cgw: - description: - - The customer gateway id as a string or a list of those strings. - type: dict - default: {} - routes: - description: - - Routes to add to the connection. - type: list - elements: str - default: [] - purge_routes: - description: - - Whether or not to delete VPN connections routes that are not specified in the task. - type: bool - default: false - wait_timeout: - description: - - How long, in seconds, before wait gives up. - default: 600 - type: int - required: false - delay: - description: - - The time, in seconds, to wait before checking operation again. - required: false - type: int - default: 15 -extends_documentation_fragment: - - amazon.aws.region.modules - - amazon.aws.common.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: create a VPN connection with vpn_gateway_id - community.aws.ec2_vpc_vpn: - state: present - vpn_gateway_id: vgw-XXXXXXXX - customer_gateway_id: cgw-XXXXXXXX - -- name: Attach a vpn connection to transit gateway - community.aws.ec2_vpc_vpn: - state: present - transit_gateway_id: tgw-XXXXXXXX - customer_gateway_id: cgw-XXXXXXXX - -- name: modify VPN connection tags - community.aws.ec2_vpc_vpn: - state: present - vpn_connection_id: vpn-XXXXXXXX - tags: - Name: ansible-tag-1 - Other: ansible-tag-2 - -- name: delete a connection - community.aws.ec2_vpc_vpn: - vpn_connection_id: vpn-XXXXXXXX - state: absent - -- name: modify VPN tags (identifying VPN by filters) - community.aws.ec2_vpc_vpn: - state: present - filters: - cidr: 194.168.1.0/24 - tag-keys: - - Ansible - - Other - tags: - New: Tag - purge_tags: true - static_only: true - -- name: set up VPN with tunnel options utilizing 'TunnelInsideCidr' only - community.aws.ec2_vpc_vpn: - state: present - filters: - vpn: vpn-XXXXXXXX - static_only: true - tunnel_options: - - - TunnelInsideCidr: '169.254.100.1/30' - - - TunnelInsideCidr: '169.254.100.5/30' - -- name: add routes and remove any preexisting ones - community.aws.ec2_vpc_vpn: - state: present - filters: - vpn: vpn-XXXXXXXX - routes: - - 195.168.2.0/24 - - 196.168.2.0/24 - purge_routes: true - -- name: remove all routes - community.aws.ec2_vpc_vpn: - state: present - vpn_connection_id: vpn-XXXXXXXX - routes: [] - purge_routes: true - -- name: delete a VPN identified by filters - community.aws.ec2_vpc_vpn: - state: absent - filters: - tags: - Ansible: Tag -""" - -RETURN = r""" -changed: - description: If the VPN connection has changed. - type: bool - returned: always - sample: - changed: true -customer_gateway_configuration: - description: The configuration of the VPN connection. - returned: I(state=present) - type: str -customer_gateway_id: - description: The customer gateway connected via the connection. - type: str - returned: I(state=present) - sample: - customer_gateway_id: cgw-1220c87b -vpn_gateway_id: - description: The virtual private gateway connected via the connection. - type: str - returned: I(state=present) - sample: - vpn_gateway_id: vgw-cb0ae2a2 -transit_gateway_id: - description: The transit gateway id to which the vpn connection can be attached. - type: str - returned: I(state=present) - sample: - transit_gateway_id: tgw-cb0ae2a2 -options: - description: The VPN connection options (currently only containing static_routes_only). - type: complex - returned: I(state=present) - contains: - static_routes_only: - description: If the VPN connection only allows static routes. - returned: I(state=present) - type: str - sample: - static_routes_only: true -routes: - description: The routes of the VPN connection. - type: list - returned: I(state=present) - sample: - routes: [{ - 'destination_cidr_block': '192.168.1.0/24', - 'state': 'available' - }] -state: - description: The status of the VPN connection. - type: str - returned: I(state=present) - sample: - state: available -tags: - description: The tags associated with the connection. - type: dict - returned: I(state=present) - sample: - tags: - name: ansible-test - other: tag -type: - description: The type of VPN connection (currently only ipsec.1 is available). - type: str - returned: I(state=present) - sample: - type: "ipsec.1" -vgw_telemetry: - type: list - returned: I(state=present) - description: The telemetry for the VPN tunnel. - sample: - vgw_telemetry: [{ - 'outside_ip_address': 'string', - 'status': 'up', - 'last_status_change': 'datetime(2015, 1, 1)', - 'status_message': 'string', - 'accepted_route_count': 123 - }] -vpn_connection_id: - description: The identifier for the VPN connection. - type: str - returned: I(state=present) - sample: - vpn_connection_id: vpn-781e0e19 -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError - from botocore.exceptions import WaiterError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class VPNConnectionException(Exception): - def __init__(self, msg, exception=None): - super(VPNConnectionException, self).__init__(msg) - self.msg = msg - self.exception = exception - - -# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes' -# we need to look at the mesage to tell the difference. -class VPNRetry(AWSRetry): - @staticmethod - def status_code_from_exception(error): - return ( - error.response["Error"]["Code"], - error.response["Error"]["Message"], - ) - - @staticmethod - def found(response_code, catch_extra_error_codes=None): - retry_on = ["The maximum number of mutating objects has been reached."] - - if catch_extra_error_codes: - retry_on.extend(catch_extra_error_codes) - if not isinstance(response_code, tuple): - response_code = (response_code,) - - for code in response_code: - if super().found(response_code, catch_extra_error_codes): - return True - - return False - - -def find_connection(connection, module_params, vpn_connection_id=None): - """Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None, - or raise an error if there were multiple viable connections.""" - - filters = module_params.get("filters") - - # vpn_connection_id may be provided via module option; takes precedence over any filter values - if not vpn_connection_id and module_params.get("vpn_connection_id"): - vpn_connection_id = module_params.get("vpn_connection_id") - - if not isinstance(vpn_connection_id, list) and vpn_connection_id: - vpn_connection_id = [to_text(vpn_connection_id)] - elif isinstance(vpn_connection_id, list): - vpn_connection_id = [to_text(connection) for connection in vpn_connection_id] - - formatted_filter = [] - # if vpn_connection_id is provided it will take precedence over any filters since it is a unique identifier - if not vpn_connection_id: - formatted_filter = create_filter(module_params, provided_filters=filters) - - # see if there is a unique matching connection - try: - if vpn_connection_id: - existing_conn = connection.describe_vpn_connections( - aws_retry=True, VpnConnectionIds=vpn_connection_id, Filters=formatted_filter - ) - else: - existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e) - - return find_connection_response(connections=existing_conn) - - -def add_routes(connection, vpn_connection_id, routes_to_add): - for route in routes_to_add: - try: - connection.create_vpn_connection_route( - aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route - ) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException( - msg=f"Failed while adding route {route} to the VPN connection {vpn_connection_id}.", - exception=e, - ) - - -def remove_routes(connection, vpn_connection_id, routes_to_remove): - for route in routes_to_remove: - try: - connection.delete_vpn_connection_route( - aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route - ) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException( - msg=f"Failed to remove route {route} from the VPN connection {vpn_connection_id}.", - exception=e, - ) - - -def create_filter(module_params, provided_filters): - """Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task""" - boto3ify_filter = { - "cgw-config": "customer-gateway-configuration", - "static-routes-only": "option.static-routes-only", - "cidr": "route.destination-cidr-block", - "bgp": "bgp-asn", - "vpn": "vpn-connection-id", - "vgw": "vpn-gateway-id", - "tag-keys": "tag-key", - "tag-values": "tag-value", - "tags": "tag", - "cgw": "customer-gateway-id", - } - - # unmodifiable options and their filter name counterpart - param_to_filter = { - "customer_gateway_id": "customer-gateway-id", - "vpn_gateway_id": "vpn-gateway-id", - "transit_gateway_id": "transit-gateway-id", - "vpn_connection_id": "vpn-connection-id", - } - - flat_filter_dict = {} - formatted_filter = [] - - for raw_param in dict(provided_filters): - # fix filter names to be recognized by boto3 - if raw_param in boto3ify_filter: - param = boto3ify_filter[raw_param] - provided_filters[param] = provided_filters.pop(raw_param) - elif raw_param in list(boto3ify_filter.items()): - param = raw_param - else: - raise VPNConnectionException(msg=f"{raw_param} is not a valid filter.") - - # reformat filters with special formats - if param == "tag": - for key in provided_filters[param]: - formatted_key = "tag:" + key - if isinstance(provided_filters[param][key], list): - flat_filter_dict[formatted_key] = str(provided_filters[param][key]) - else: - flat_filter_dict[formatted_key] = [str(provided_filters[param][key])] - elif param == "option.static-routes-only": - flat_filter_dict[param] = [str(provided_filters[param]).lower()] - else: - if isinstance(provided_filters[param], list): - flat_filter_dict[param] = provided_filters[param] - else: - flat_filter_dict[param] = [str(provided_filters[param])] - - # if customer_gateway, vpn_gateway, or vpn_connection was specified in the task but not the filter, add it - for param in param_to_filter: - if param_to_filter[param] not in flat_filter_dict and module_params.get(param): - flat_filter_dict[param_to_filter[param]] = [module_params.get(param)] - - # change the flat dict into something boto3 will understand - formatted_filter = [{"Name": key, "Values": value} for key, value in flat_filter_dict.items()] - - return formatted_filter - - -def find_connection_response(connections=None): - """Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found, - returns None if the connection does not exist, raise an error if multiple matches are found.""" - - # Found no connections - if not connections or "VpnConnections" not in connections: - return None - - # Too many results - elif connections and len(connections["VpnConnections"]) > 1: - viable = [] - for each in connections["VpnConnections"]: - # deleted connections are not modifiable - if each["State"] not in ("deleted", "deleting"): - viable.append(each) - if len(viable) == 1: - # Found one viable result; return unique match - return viable[0] - elif len(viable) == 0: - # Found a result but it was deleted already; since there was only one viable result create a new one - return None - else: - raise VPNConnectionException( - msg=( - "More than one matching VPN connection was found. " - "To modify or delete a VPN please specify vpn_connection_id or add filters." - ) - ) - - # Found unique match - elif connections and len(connections["VpnConnections"]) == 1: - # deleted connections are not modifiable - if connections["VpnConnections"][0]["State"] not in ("deleted", "deleting"): - return connections["VpnConnections"][0] - - -def create_connection( - connection, - customer_gateway_id, - static_only, - vpn_gateway_id, - transit_gateway_id, - connection_type, - max_attempts, - delay, - tunnel_options=None, -): - """Creates a VPN connection""" - - options = {"StaticRoutesOnly": static_only} - if tunnel_options and len(tunnel_options) <= 2: - t_opt = [] - for m in tunnel_options: - # See Boto3 docs regarding 'create_vpn_connection' - # tunnel options for allowed 'TunnelOptions' keys. - if not isinstance(m, dict): - raise TypeError("non-dict list member") - t_opt.append(m) - if t_opt: - options["TunnelOptions"] = t_opt - - if not (customer_gateway_id and (vpn_gateway_id or transit_gateway_id)): - raise VPNConnectionException( - msg=( - "No matching connection was found. To create a new connection you must provide " - "customer_gateway_id and one of either transit_gateway_id or vpn_gateway_id." - ) - ) - vpn_connection_params = {"Type": connection_type, "CustomerGatewayId": customer_gateway_id, "Options": options} - if vpn_gateway_id: - vpn_connection_params["VpnGatewayId"] = vpn_gateway_id - if transit_gateway_id: - vpn_connection_params["TransitGatewayId"] = transit_gateway_id - - try: - vpn = connection.create_vpn_connection(**vpn_connection_params) - connection.get_waiter("vpn_connection_available").wait( - VpnConnectionIds=[vpn["VpnConnection"]["VpnConnectionId"]], - WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, - ) - except WaiterError as e: - raise VPNConnectionException( - msg=f"Failed to wait for VPN connection {vpn['VpnConnection']['VpnConnectionId']} to be available", - exception=e, - ) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to create VPN connection", exception=e) - - return vpn["VpnConnection"] - - -def delete_connection(connection, vpn_connection_id, delay, max_attempts): - """Deletes a VPN connection""" - try: - connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id) - connection.get_waiter("vpn_connection_deleted").wait( - VpnConnectionIds=[vpn_connection_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} - ) - except WaiterError as e: - raise VPNConnectionException( - msg=f"Failed to wait for VPN connection {vpn_connection_id} to be removed", exception=e - ) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg=f"Failed to delete the VPN connection: {vpn_connection_id}", exception=e) - - -def add_tags(connection, vpn_connection_id, add): - try: - connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg=f"Failed to add the tags: {add}.", exception=e) - - -def remove_tags(connection, vpn_connection_id, remove): - # format tags since they are a list in the format ['tag1', 'tag2', 'tag3'] - key_dict_list = [{"Key": tag} for tag in remove] - try: - connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list) - except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg=f"Failed to remove the tags: {remove}.", exception=e) - - -def check_for_update(connection, module_params, vpn_connection_id): - """Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change.""" - tags = module_params.get("tags") - routes = module_params.get("routes") - purge_tags = module_params.get("purge_tags") - purge_routes = module_params.get("purge_routes") - - vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id) - current_attrs = camel_dict_to_snake_dict(vpn_connection) - - # Initialize changes dict - changes = {"tags_to_add": [], "tags_to_remove": [], "routes_to_add": [], "routes_to_remove": []} - - # Get changes to tags - current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get("tags", []), "key", "value") - if tags is None: - changes["tags_to_remove"] = [] - changes["tags_to_add"] = [] - else: - tags_to_add, changes["tags_to_remove"] = compare_aws_tags(current_tags, tags, purge_tags) - changes["tags_to_add"] = ansible_dict_to_boto3_tag_list(tags_to_add) - # Get changes to routes - if "Routes" in vpn_connection: - current_routes = [route["DestinationCidrBlock"] for route in vpn_connection["Routes"]] - if purge_routes: - changes["routes_to_remove"] = [old_route for old_route in current_routes if old_route not in routes] - changes["routes_to_add"] = [new_route for new_route in routes if new_route not in current_routes] - - # Check if nonmodifiable attributes are attempted to be modified - for attribute in current_attrs: - if attribute in ("tags", "routes", "state"): - continue - elif attribute == "options": - will_be = module_params.get("static_only", None) - is_now = bool(current_attrs[attribute]["static_routes_only"]) - attribute = "static_only" - elif attribute == "type": - will_be = module_params.get("connection_type", None) - is_now = current_attrs[attribute] - else: - is_now = current_attrs[attribute] - will_be = module_params.get(attribute, None) - - if will_be is not None and to_text(will_be) != to_text(is_now): - raise VPNConnectionException( - msg=( - f"You cannot modify {attribute}, the current value of which is {is_now}. Modifiable VPN connection" - f" attributes are tags and routes. The value you tried to change it to is {will_be}." - ) - ) - - return changes - - -def make_changes(connection, vpn_connection_id, changes): - """changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove', - the values of which are lists (generated by check_for_update()). - """ - changed = False - - if changes["tags_to_add"]: - changed = True - add_tags(connection, vpn_connection_id, changes["tags_to_add"]) - - if changes["tags_to_remove"]: - changed = True - remove_tags(connection, vpn_connection_id, changes["tags_to_remove"]) - - if changes["routes_to_add"]: - changed = True - add_routes(connection, vpn_connection_id, changes["routes_to_add"]) - - if changes["routes_to_remove"]: - changed = True - remove_routes(connection, vpn_connection_id, changes["routes_to_remove"]) - - return changed - - -def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None): - """Returns the changes that would be made to a VPN Connection""" - state = module_params.get("state") - if state == "absent": - if vpn_connection_id: - return True, {} - else: - return False, {} - - changed = False - results = { - "customer_gateway_configuration": "", - "customer_gateway_id": module_params.get("customer_gateway_id"), - "vpn_gateway_id": module_params.get("vpn_gateway_id"), - "transit_gateway_id": module_params.get("transit_gateway_id"), - "options": {"static_routes_only": module_params.get("static_only")}, - "routes": [module_params.get("routes")], - } - - # get combined current tags and tags to set - present_tags = module_params.get("tags") - if present_tags is None: - pass - elif current_state and "Tags" in current_state: - current_tags = boto3_tag_list_to_ansible_dict(current_state["Tags"]) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get("purge_tags")) - changed |= bool(tags_to_remove) or bool(tags_to_add) - if module_params.get("purge_tags"): - current_tags = {} - current_tags.update(present_tags) - results["tags"] = current_tags - elif module_params.get("tags"): - changed = True - if present_tags: - results["tags"] = present_tags - - # get combined current routes and routes to add - present_routes = module_params.get("routes") - if current_state and "Routes" in current_state: - current_routes = [route["DestinationCidrBlock"] for route in current_state["Routes"]] - if module_params.get("purge_routes"): - if set(current_routes) != set(present_routes): - changed = True - elif set(present_routes) != set(current_routes): - if not set(present_routes) < set(current_routes): - changed = True - present_routes.extend([route for route in current_routes if route not in present_routes]) - elif module_params.get("routes"): - changed = True - results["routes"] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes] - - # return the vpn_connection_id if it's known - if vpn_connection_id: - results["vpn_connection_id"] = vpn_connection_id - else: - changed = True - results["vpn_connection_id"] = "vpn-XXXXXXXX" - - return changed, results - - -def ensure_present(connection, module_params, check_mode=False): - """Creates and adds tags to a VPN connection. If the connection already exists update tags.""" - vpn_connection = find_connection(connection, module_params) - changed = False - delay = module_params.get("delay") - max_attempts = module_params.get("wait_timeout") // delay - - # No match but vpn_connection_id was specified. - if not vpn_connection and module_params.get("vpn_connection_id"): - raise VPNConnectionException( - msg="There is no VPN connection available or pending with that id. Did you delete it?" - ) - - # Unique match was found. Check if attributes provided differ. - elif vpn_connection: - vpn_connection_id = vpn_connection["VpnConnectionId"] - # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove - changes = check_for_update(connection, module_params, vpn_connection_id) - if check_mode: - return get_check_mode_results(connection, module_params, vpn_connection_id, current_state=vpn_connection) - changed = make_changes(connection, vpn_connection_id, changes) - - # No match was found. Create and tag a connection and add routes. - else: - changed = True - if check_mode: - return get_check_mode_results(connection, module_params) - vpn_connection = create_connection( - connection, - customer_gateway_id=module_params.get("customer_gateway_id"), - static_only=module_params.get("static_only"), - vpn_gateway_id=module_params.get("vpn_gateway_id"), - transit_gateway_id=module_params.get("transit_gateway_id"), - connection_type=module_params.get("connection_type"), - tunnel_options=module_params.get("tunnel_options"), - max_attempts=max_attempts, - delay=delay, - ) - changes = check_for_update(connection, module_params, vpn_connection["VpnConnectionId"]) - make_changes(connection, vpn_connection["VpnConnectionId"], changes) - - # get latest version if a change has been made and make tags output nice before returning it - if vpn_connection: - vpn_connection = find_connection(connection, module_params, vpn_connection["VpnConnectionId"]) - if "Tags" in vpn_connection: - vpn_connection["Tags"] = boto3_tag_list_to_ansible_dict(vpn_connection["Tags"]) - - return changed, vpn_connection - - -def ensure_absent(connection, module_params, check_mode=False): - """Deletes a VPN connection if it exists.""" - vpn_connection = find_connection(connection, module_params) - - if check_mode: - return get_check_mode_results( - connection, module_params, vpn_connection["VpnConnectionId"] if vpn_connection else None - ) - - delay = module_params.get("delay") - max_attempts = module_params.get("wait_timeout") // delay - - if vpn_connection: - delete_connection(connection, vpn_connection["VpnConnectionId"], delay=delay, max_attempts=max_attempts) - changed = True - else: - changed = False - - return changed, {} - - -def main(): - argument_spec = dict( - state=dict(type="str", default="present", choices=["present", "absent"]), - filters=dict(type="dict", default={}), - vpn_gateway_id=dict(type="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - connection_type=dict(default="ipsec.1", type="str"), - transit_gateway_id=dict(type="str"), - tunnel_options=dict(no_log=True, type="list", default=[], elements="dict"), - static_only=dict(default=False, type="bool"), - customer_gateway_id=dict(type="str"), - vpn_connection_id=dict(type="str"), - purge_tags=dict(type="bool", default=True), - routes=dict(type="list", default=[], elements="str"), - purge_routes=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=600), - delay=dict(type="int", default=15), - ) - mutually_exclusive = [ - ["vpn_gateway_id", "transit_gateway_id"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - ) - connection = module.client("ec2", retry_decorator=VPNRetry.jittered_backoff(retries=10)) - - state = module.params.get("state") - parameters = dict(module.params) - - try: - if state == "present": - changed, response = ensure_present(connection, parameters, module.check_mode) - elif state == "absent": - changed, response = ensure_absent(connection, parameters, module.check_mode) - except VPNConnectionException as e: - if e.exception: - module.fail_json_aws(e.exception, msg=e.msg) - else: - module.fail_json(msg=e.msg) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) - - -if __name__ == "__main__": - main() diff --git a/ec2_vpc_vpn_info.py b/ec2_vpc_vpn_info.py deleted file mode 100644 index 95d8a8ca7e5..00000000000 --- a/ec2_vpc_vpn_info.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_vpc_vpn_info -version_added: 1.0.0 -short_description: Gather information about VPN Connections in AWS. -description: - - Gather information about VPN Connections in AWS. -author: - - Madhura Naniwadekar (@Madhura-CSI) -options: - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters. - required: false - type: dict - default: {} - vpn_connection_ids: - description: - - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list. - required: false - type: list - elements: str - default: [] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# # Note: These examples do not set authentication details, see the AWS Guide for details. -- name: Gather information about all vpn connections - community.aws.ec2_vpc_vpn_info: - -- name: Gather information about a filtered list of vpn connections, based on tags - community.aws.ec2_vpc_vpn_info: - filters: - "tag:Name": test-connection - register: vpn_conn_info - -- name: Gather information about vpn connections by specifying connection IDs. - community.aws.ec2_vpc_vpn_info: - filters: - vpn-gateway-id: vgw-cbe66beb - register: vpn_conn_info -""" - -RETURN = r""" -vpn_connections: - description: List of one or more VPN Connections. - returned: always - type: complex - contains: - category: - description: The category of the VPN connection. - returned: always - type: str - sample: VPN - customer_gatway_configuration: - description: The configuration information for the VPN connection's customer gateway (in the native XML format). - returned: always - type: str - customer_gateway_id: - description: The ID of the customer gateway at your end of the VPN connection. - returned: always - type: str - sample: cgw-17a53c37 - options: - description: The VPN connection options. - returned: always - type: dict - sample: { - "static_routes_only": false - } - routes: - description: List of static routes associated with the VPN connection. - returned: always - type: complex - contains: - destination_cidr_block: - description: The CIDR block associated with the local subnet of the customer data center. - returned: always - type: str - sample: 10.0.0.0/16 - state: - description: The current state of the static route. - returned: always - type: str - sample: available - state: - description: The current state of the VPN connection. - returned: always - type: str - sample: available - tags: - description: Any tags assigned to the VPN connection. - returned: always - type: dict - sample: { - "Name": "test-conn" - } - type: - description: The type of VPN connection. - returned: always - type: str - sample: ipsec.1 - vgw_telemetry: - description: Information about the VPN tunnel. - returned: always - type: complex - contains: - accepted_route_count: - description: The number of accepted routes. - returned: always - type: int - sample: 0 - last_status_change: - description: The date and time of the last change in status. - returned: always - type: str - sample: "2018-02-09T14:35:27+00:00" - outside_ip_address: - description: The Internet-routable IP address of the virtual private gateway's outside interface. - returned: always - type: str - sample: 13.127.79.191 - status: - description: The status of the VPN tunnel. - returned: always - type: str - sample: DOWN - status_message: - description: If an error occurs, a description of the error. - returned: always - type: str - sample: IPSEC IS DOWN - certificate_arn: - description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate. - returned: when a private certificate is used for authentication - type: str - sample: "arn:aws:acm:us-east-1:123456789012:certificate/c544d8ce-20b8-4fff-98b0-example" - vpn_connection_id: - description: The ID of the VPN connection. - returned: always - type: str - sample: vpn-f700d5c0 - vpn_gateway_id: - description: The ID of the virtual private gateway at the AWS side of the VPN connection. - returned: always - type: str - sample: vgw-cbe56bfb -""" - -import json - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def date_handler(obj): - return obj.isoformat() if hasattr(obj, "isoformat") else obj - - -def list_vpn_connections(connection, module): - params = dict() - - params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - params["VpnConnectionIds"] = module.params.get("vpn_connection_ids") - - try: - result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler)) - except ValueError as e: - module.fail_json_aws(e, msg="Cannot validate JSON data") - except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not describe customer gateways") - snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result["VpnConnections"]] - if snaked_vpn_connections: - for vpn_connection in snaked_vpn_connections: - vpn_connection["tags"] = boto3_tag_list_to_ansible_dict(vpn_connection.get("tags", [])) - module.exit_json(changed=False, vpn_connections=snaked_vpn_connections) - - -def main(): - argument_spec = dict( - vpn_connection_ids=dict(default=[], type="list", elements="str"), - filters=dict(default={}, type="dict"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[["vpn_connection_ids", "filters"]], - supports_check_mode=True, - ) - - connection = module.client("ec2") - - list_vpn_connections(connection, module) - - -if __name__ == "__main__": - main() diff --git a/ec2_win_password.py b/ec2_win_password.py deleted file mode 100644 index a9ca8e94ca1..00000000000 --- a/ec2_win_password.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ec2_win_password -version_added: 1.0.0 -short_description: Gets the default administrator password for EC2 Windows instances -description: - - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)). -author: - - "Rick Mendes (@rickmendes)" -options: - instance_id: - description: - - The instance id to get the password data from. - required: true - type: str - key_file: - description: - - Path to the file containing the key pair used on the instance. - - Conflicts with I(key_data). - required: false - type: path - key_data: - description: - - The private key (usually stored in vault). - - Conflicts with I(key_file), - required: false - type: str - key_passphrase: - description: - - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to - convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3). - type: str - wait: - description: - - Whether or not to wait for the password to be available before returning. - type: bool - default: false - wait_timeout: - description: - - Number of seconds to wait before giving up. - default: 120 - type: int - -requirements: - - cryptography - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" # """ - -EXAMPLES = r""" -# Example of getting a password -- name: get the Administrator password - community.aws.ec2_win_password: - profile: my-boto-profile - instance_id: i-XXXXXX - region: us-east-1 - key_file: "~/aws-creds/my_test_key.pem" - -# Example of getting a password using a variable -- name: get the Administrator password - community.aws.ec2_win_password: - profile: my-boto-profile - instance_id: i-XXXXXX - region: us-east-1 - key_data: "{{ ec2_private_key }}" - -# Example of getting a password with a password protected key -- name: get the Administrator password - community.aws.ec2_win_password: - profile: my-boto-profile - instance_id: i-XXXXXX - region: us-east-1 - key_file: "~/aws-creds/my_protected_test_key.pem" - key_passphrase: "secret" - -# Example of waiting for a password -- name: get the Administrator password - community.aws.ec2_win_password: - profile: my-boto-profile - instance_id: i-XXXXXX - region: us-east-1 - key_file: "~/aws-creds/my_test_key.pem" - wait: true - wait_timeout: 45 -""" - -import datetime -import time -from base64 import b64decode - -try: - from cryptography.hazmat.backends import default_backend - from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 - from cryptography.hazmat.primitives.serialization import load_pem_private_key - - HAS_CRYPTOGRAPHY = True -except ImportError: - HAS_CRYPTOGRAPHY = False - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_bytes - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def setup_module_object(): - argument_spec = dict( - instance_id=dict(required=True), - key_file=dict(required=False, default=None, type="path"), - key_passphrase=dict(no_log=True, default=None, required=False), - key_data=dict(no_log=True, default=None, required=False), - wait=dict(type="bool", default=False, required=False), - wait_timeout=dict(default=120, required=False, type="int"), - ) - mutually_exclusive = [["key_file", "key_data"]] - module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) - return module - - -def _get_password(module, client, instance_id): - try: - data = client.get_password_data(aws_retry=True, InstanceId=instance_id)["PasswordData"] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to get password data") - return data - - -def ec2_win_password(module): - instance_id = module.params.get("instance_id") - key_file = module.params.get("key_file") - if module.params.get("key_passphrase") is None: - b_key_passphrase = None - else: - b_key_passphrase = to_bytes(module.params.get("key_passphrase"), errors="surrogate_or_strict") - if module.params.get("key_data") is None: - b_key_data = None - else: - b_key_data = to_bytes(module.params.get("key_data"), errors="surrogate_or_strict") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - - client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - - if wait: - start = datetime.datetime.now() - end = start + datetime.timedelta(seconds=wait_timeout) - - while datetime.datetime.now() < end: - data = _get_password(module, client, instance_id) - decoded = b64decode(data) - if not decoded: - time.sleep(5) - else: - break - else: - data = _get_password(module, client, instance_id) - decoded = b64decode(data) - - if wait and datetime.datetime.now() >= end: - module.fail_json(msg=f"wait for password timeout after {int(wait_timeout)} seconds") - - if key_file is not None and b_key_data is None: - try: - with open(key_file, "rb") as f: - key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) - except IOError as e: - # Handle bad files - module.fail_json(msg=f"I/O error ({int(e.errno)}) opening key file: {e.strerror}") - except (ValueError, TypeError) as e: - # Handle issues loading key - module.fail_json(msg="unable to parse key file") - elif b_key_data is not None and key_file is None: - try: - key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend()) - except (ValueError, TypeError) as e: - module.fail_json(msg="unable to parse key data") - - try: - decrypted = key.decrypt(decoded, PKCS1v15()) - except ValueError as e: - decrypted = None - - if decrypted is None: - module.fail_json(msg="unable to decrypt password", win_password="", changed=False) - else: - if wait: - elapsed = datetime.datetime.now() - start - module.exit_json(win_password=decrypted, changed=False, elapsed=elapsed.seconds) - else: - module.exit_json(win_password=decrypted, changed=False) - - -def main(): - module = setup_module_object() - - if not HAS_CRYPTOGRAPHY: - module.fail_json(msg="cryptography package required for this module.") - - ec2_win_password(module) - - -if __name__ == "__main__": - main() diff --git a/ecs_attribute.py b/ecs_attribute.py deleted file mode 100644 index 682014675a1..00000000000 --- a/ecs_attribute.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_attribute -version_added: 1.0.0 -short_description: manage ecs attributes -description: - - Create, update or delete ECS container instance attributes. -author: - - Andrej Svenke (@anryko) -options: - cluster: - description: - - The short name or full Amazon Resource Name (ARN) of the cluster - that contains the resource to apply attributes. - required: true - type: str - state: - description: - - The desired state of the attributes. - required: false - default: present - choices: ['present', 'absent'] - type: str - attributes: - description: - - List of attributes. - required: true - type: list - elements: dict - suboptions: - name: - description: - - The name of the attribute. Up to 128 letters (uppercase and lowercase), - numbers, hyphens, underscores, and periods are allowed. - required: true - type: str - value: - description: - - The value of the attribute. Up to 128 letters (uppercase and lowercase), - numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, - and spaces are allowed. - required: false - type: str - ec2_instance_id: - description: - - EC2 instance ID of ECS cluster container instance. - required: true - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Set attributes - community.aws.ecs_attribute: - state: present - cluster: test-cluster - ec2_instance_id: "{{ ec2_id }}" - attributes: - - flavor: test - - migrated - delegate_to: localhost - -- name: Delete attributes - community.aws.ecs_attribute: - state: absent - cluster: test-cluster - ec2_instance_id: "{{ ec2_id }}" - attributes: - - flavor: test - - migrated - delegate_to: localhost -""" - -RETURN = r""" -attributes: - description: attributes - type: complex - returned: always - contains: - cluster: - description: cluster name - type: str - ec2_instance_id: - description: ec2 instance id of ecs container instance - type: str - attributes: - description: list of attributes - type: list - elements: dict - contains: - name: - description: name of the attribute - type: str - value: - description: value of the attribute - returned: if present - type: str -""" - -try: - import botocore - from botocore.exceptions import ClientError - from botocore.exceptions import EndpointConnectionError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class EcsAttributes(object): - """Handles ECS Cluster Attribute""" - - def __init__(self, module, attributes): - self.module = module - self.attributes = attributes if self._validate_attrs(attributes) else self._parse_attrs(attributes) - - def __bool__(self): - return bool(self.attributes) - - __nonzero__ = __bool__ - - def __iter__(self): - return iter(self.attributes) - - @staticmethod - def _validate_attrs(attrs): - return all(tuple(attr.keys()) in (("name", "value"), ("value", "name")) for attr in attrs) - - def _parse_attrs(self, attrs): - attrs_parsed = [] - for attr in attrs: - if isinstance(attr, dict): - if len(attr) != 1: - self.module.fail_json(msg=f"Incorrect attribute format - {str(attr)}") - name, value = list(attr.items())[0] - attrs_parsed.append({"name": name, "value": value}) - elif isinstance(attr, str): - attrs_parsed.append({"name": attr, "value": None}) - else: - self.module.fail_json(msg=f"Incorrect attributes format - {str(attrs)}") - - return attrs_parsed - - def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False): - attr_obj = {"targetType": "container-instance", "targetId": ecs_arn, "name": name} - if not skip_value and value is not None: - attr_obj["value"] = value - - return attr_obj - - def get_for_ecs_arn(self, ecs_arn, skip_value=False): - """ - Returns list of attribute dicts ready to be passed to boto3 - attributes put/delete methods. - """ - return [self._setup_attr_obj(ecs_arn, skip_value=skip_value, **attr) for attr in self.attributes] - - def diff(self, attrs): - """ - Returns EcsAttributes Object containing attributes which are present - in self but are absent in passed attrs (EcsAttributes Object). - """ - attrs_diff = [attr for attr in self.attributes if attr not in attrs] - return EcsAttributes(self.module, attrs_diff) - - -class Ec2EcsInstance(object): - """Handle ECS Cluster Remote Operations""" - - def __init__(self, module, cluster, ec2_id): - self.module = module - self.cluster = cluster - self.ec2_id = ec2_id - - try: - self.ecs = module.client("ecs") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - self.ecs_arn = self._get_ecs_arn() - - def _get_ecs_arn(self): - try: - ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)["containerInstanceArns"] - ec2_instances = self.ecs.describe_container_instances( - cluster=self.cluster, containerInstances=ecs_instances_arns - )["containerInstances"] - except (ClientError, EndpointConnectionError) as e: - self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") - - try: - ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[ - "containerInstanceArn" - ] - except StopIteration: - self.module.fail_json(msg=f"EC2 instance Id not found in ECS cluster - {str(self.cluster)}") - - return ecs_arn - - def attrs_put(self, attrs): - """Puts attributes on ECS container instance""" - try: - self.ecs.put_attributes(cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn)) - except ClientError as e: - self.module.fail_json(msg=str(e)) - - def attrs_delete(self, attrs): - """Deletes attributes from ECS container instance.""" - try: - self.ecs.delete_attributes( - cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True) - ) - except ClientError as e: - self.module.fail_json(msg=str(e)) - - def attrs_get_by_name(self, attrs): - """ - Returns EcsAttributes object containing attributes from ECS container instance with names - matching to attrs.attributes (EcsAttributes Object). - """ - attr_objs = [{"targetType": "container-instance", "attributeName": attr["name"]} for attr in attrs] - - try: - matched_ecs_targets = [ - attr_found - for attr_obj in attr_objs - for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"] - ] - except ClientError as e: - self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") - - matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn] - - results = [{"name": match["name"], "value": match.get("value", None)} for match in matched_objs] - - return EcsAttributes(self.module, results) - - -def main(): - argument_spec = dict( - state=dict(required=False, default="present", choices=["present", "absent"]), - cluster=dict(required=True, type="str"), - ec2_instance_id=dict(required=True, type="str"), - attributes=dict(required=True, type="list", elements="dict"), - ) - - required_together = [["cluster", "ec2_instance_id", "attributes"]] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_together=required_together, - ) - - cluster = module.params["cluster"] - ec2_instance_id = module.params["ec2_instance_id"] - attributes = module.params["attributes"] - - conti = Ec2EcsInstance(module, cluster, ec2_instance_id) - attrs = EcsAttributes(module, attributes) - - results = { - "changed": False, - "attributes": [ - { - "cluster": cluster, - "ec2_instance_id": ec2_instance_id, - "attributes": attributes, - } - ], - } - - attrs_present = conti.attrs_get_by_name(attrs) - - if module.params["state"] == "present": - attrs_diff = attrs.diff(attrs_present) - if not attrs_diff: - module.exit_json(**results) - - conti.attrs_put(attrs_diff) - results["changed"] = True - - elif module.params["state"] == "absent": - if not attrs_present: - module.exit_json(**results) - - conti.attrs_delete(attrs_present) - results["changed"] = True - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ecs_cluster.py b/ecs_cluster.py deleted file mode 100644 index fca35331f69..00000000000 --- a/ecs_cluster.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_cluster -version_added: 1.0.0 -short_description: Create or terminate ECS clusters. -notes: - - When deleting a cluster, the information returned is the state of the cluster prior to deletion. - - It will also wait for a cluster to have instances registered to it. -description: - - Creates or terminates ecs clusters. -author: - - Mark Chance (@Java1Guy) -options: - state: - description: - - The desired state of the cluster. - required: true - choices: ['present', 'absent', 'has_instances'] - type: str - name: - description: - - The cluster name. - required: true - type: str - delay: - description: - - Number of seconds to wait. - required: false - type: int - default: 10 - repeat: - description: - - The number of times to wait for the cluster to have an instance. - required: false - type: int - default: 10 - capacity_providers: - version_added: 5.2.0 - description: - - List of capacity providers to use for the cluster. - required: false - type: list - elements: str - capacity_provider_strategy: - version_added: 5.2.0 - description: - - List of capacity provider strategies to use for the cluster. - required: false - type: list - elements: dict - suboptions: - capacity_provider: - description: - - Name of capacity provider. - type: str - weight: - description: - - The relative percentage of the total number of launched tasks that should use the specified provider. - type: int - base: - description: - - How many tasks, at a minimum, should use the specified provider. - type: int - default: 0 - purge_capacity_providers: - version_added: 5.2.0 - description: - - Toggle overwriting of existing capacity providers or strategy. This is needed for backwards compatibility. - - By default I(purge_capacity_providers=false). In a release after 2024-06-01 this will be changed to I(purge_capacity_providers=true). - required: false - type: bool - default: false -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Cluster creation - community.aws.ecs_cluster: - name: default - state: present - -- name: Cluster creation with capacity providers and strategies. - community.aws.ecs_cluster: - name: default - state: present - capacity_providers: - - FARGATE - - FARGATE_SPOT - capacity_provider_strategy: - - capacity_provider: FARGATE - base: 1 - weight: 1 - - capacity_provider: FARGATE_SPOT - weight: 100 - purge_capacity_providers: True - -- name: Cluster deletion - community.aws.ecs_cluster: - name: default - state: absent - -- name: Wait for register - community.aws.ecs_cluster: - name: "{{ new_cluster }}" - state: has_instances - delay: 10 - repeat: 10 - register: task_output - -""" -RETURN = r""" -activeServicesCount: - description: how many services are active in this cluster - returned: 0 if a new cluster - type: int -capacityProviders: - version_added: 5.2.0 - description: list of capacity providers used in this cluster - returned: always - type: list -defaultCapacityProviderStrategy: - version_added: 5.2.0 - description: list of capacity provider strategies used in this cluster - returned: always - type: list -clusterArn: - description: the ARN of the cluster just created - type: str - returned: 0 if a new cluster - sample: arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster -clusterName: - description: name of the cluster just created (should match the input argument) - type: str - returned: always - sample: test-cluster -pendingTasksCount: - description: how many tasks are waiting to run in this cluster - returned: 0 if a new cluster - type: int -registeredContainerInstancesCount: - description: how many container instances are available in this cluster - returned: 0 if a new cluster - type: int -runningTasksCount: - description: how many tasks are running in this cluster - returned: 0 if a new cluster - type: int -status: - description: the status of the new cluster - returned: always - type: str - sample: ACTIVE -""" - -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class EcsClusterManager: - """Handles ECS Clusters""" - - def __init__(self, module): - self.module = module - try: - self.ecs = module.client("ecs") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - def find_in_array(self, array_of_clusters, cluster_name, field_name="clusterArn"): - for c in array_of_clusters: - if c[field_name].endswith(cluster_name): - return c - return None - - def describe_cluster(self, cluster_name): - response = self.ecs.describe_clusters(clusters=[cluster_name]) - if len(response["failures"]) > 0: - c = self.find_in_array(response["failures"], cluster_name, "arn") - if c and c["reason"] == "MISSING": - return None - # fall thru and look through found ones - if len(response["clusters"]) > 0: - c = self.find_in_array(response["clusters"], cluster_name) - if c: - return c - raise Exception(f"Unknown problem describing cluster {cluster_name}.") - - def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): - params = dict(clusterName=cluster_name) - if capacity_providers: - params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) - if capacity_provider_strategy: - params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) - response = self.ecs.create_cluster(**params) - return response["cluster"] - - def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): - params = dict(cluster=cluster_name) - if capacity_providers: - params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers) - else: - params["capacityProviders"] = [] - if capacity_provider_strategy: - params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy) - else: - params["defaultCapacityProviderStrategy"] = [] - response = self.ecs.put_cluster_capacity_providers(**params) - return response["cluster"] - - def delete_cluster(self, clusterName): - return self.ecs.delete_cluster(cluster=clusterName) - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=["present", "absent", "has_instances"]), - name=dict(required=True, type="str"), - delay=dict(required=False, type="int", default=10), - repeat=dict(required=False, type="int", default=10), - purge_capacity_providers=dict(required=False, type="bool", default=False), - capacity_providers=dict(required=False, type="list", elements="str"), - capacity_provider_strategy=dict( - required=False, - type="list", - elements="dict", - options=dict( - capacity_provider=dict(type="str"), - weight=dict(type="int"), - base=dict(type="int", default=0), - ), - ), - ) - required_together = [["state", "name"]] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_together=required_together, - ) - - cluster_mgr = EcsClusterManager(module) - try: - existing = cluster_mgr.describe_cluster(module.params["name"]) - except Exception as e: - module.fail_json(msg="Exception describing cluster '" + module.params["name"] + "': " + str(e)) - - results = dict(changed=False) - if module.params["state"] == "present": - # Pull requested and existing capacity providers and strategies. - purge_capacity_providers = module.params["purge_capacity_providers"] - requested_cp = module.params["capacity_providers"] - requested_cps = module.params["capacity_provider_strategy"] - if existing and "status" in existing and existing["status"] == "ACTIVE": - existing_cp = existing["capacityProviders"] - existing_cps = existing["defaultCapacityProviderStrategy"] - - if requested_cp is None: - requested_cp = [] - - # Check if capacity provider strategy needs to trigger an update. - cps_update_needed = False - if requested_cps is not None: - for strategy in requested_cps: - if snake_dict_to_camel_dict(strategy) not in existing_cps: - cps_update_needed = True - for strategy in existing_cps: - if camel_dict_to_snake_dict(strategy) not in requested_cps: - cps_update_needed = True - elif requested_cps is None and existing_cps != []: - cps_update_needed = True - - # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. - if not purge_capacity_providers: - module.deprecate( - "After 2024-06-01 the default value of purge_capacity_providers will change from false to true." - " To maintain the existing behaviour explicitly set purge_capacity_providers=true", - date="2024-06-01", - collection_name="community.aws", - ) - cps_update_needed = False - requested_cp = existing_cp - requested_cps = existing_cps - - # If either the providers or strategy differ, update the cluster. - if requested_cp != existing_cp or cps_update_needed: - if not module.check_mode: - results["cluster"] = cluster_mgr.update_cluster( - cluster_name=module.params["name"], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps, - ) - results["changed"] = True - else: - results["cluster"] = existing - else: - if not module.check_mode: - # doesn't exist. create it. - results["cluster"] = cluster_mgr.create_cluster( - cluster_name=module.params["name"], - capacity_providers=requested_cp, - capacity_provider_strategy=requested_cps, - ) - results["changed"] = True - - # delete the cluster - elif module.params["state"] == "absent": - if not existing: - pass - else: - # it exists, so we should delete it and mark changed. - # return info about the cluster deleted - results["cluster"] = existing - if "status" in existing and existing["status"] == "INACTIVE": - results["changed"] = False - else: - if not module.check_mode: - cluster_mgr.delete_cluster(module.params["name"]) - results["changed"] = True - elif module.params["state"] == "has_instances": - if not existing: - module.fail_json(msg="Cluster '" + module.params["name"] + " not found.") - return - # it exists, so we should delete it and mark changed. - # return info about the cluster deleted - delay = module.params["delay"] - repeat = module.params["repeat"] - time.sleep(delay) - count = 0 - for i in range(repeat): - existing = cluster_mgr.describe_cluster(module.params["name"]) - count = existing["registeredContainerInstancesCount"] - if count > 0: - results["changed"] = True - break - time.sleep(delay) - if count == 0 and i is repeat - 1: - module.fail_json( - msg="Cluster instance count still zero after " - + str(repeat) - + " tries of " - + str(delay) - + " seconds each." - ) - return - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ecs_ecr.py b/ecs_ecr.py deleted file mode 100644 index fb812ca0a45..00000000000 --- a/ecs_ecr.py +++ /dev/null @@ -1,597 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_ecr -version_added: 1.0.0 -short_description: Manage Elastic Container Registry repositories -description: - - Manage Elastic Container Registry repositories. -options: - name: - description: - - The name of the repository. - required: true - type: str - registry_id: - description: - - AWS account id associated with the registry. - - If not specified, the default registry is assumed. - required: false - type: str - policy: - description: - - JSON or dict that represents the new policy. - required: false - type: json - force_absent: - description: - - If I(force_absent=true), the repository will be removed, even if images are present. - required: false - default: false - type: bool - version_added: 4.1.0 - force_set_policy: - description: - - If I(force_set_policy=false), it prevents setting a policy that would prevent you from - setting another policy in the future. - required: false - default: false - type: bool - purge_policy: - description: - - If C(true), remove the policy from the repository. - - Defaults to C(false). - required: false - type: bool - image_tag_mutability: - description: - - Configure whether repository should be mutable (ie. an already existing tag can be overwritten) or not. - required: false - choices: [mutable, immutable] - default: 'mutable' - type: str - lifecycle_policy: - description: - - JSON or dict that represents the new lifecycle policy. - required: false - type: json - purge_lifecycle_policy: - description: - - if C(true), remove the lifecycle policy from the repository. - - Defaults to C(false). - required: false - type: bool - state: - description: - - Create or destroy the repository. - required: false - choices: [present, absent] - default: 'present' - type: str - scan_on_push: - description: - - if C(true), images are scanned for known vulnerabilities after being pushed to the repository. - required: false - default: false - type: bool - version_added: 1.3.0 - encryption_configuration: - description: - - The encryption configuration for the repository. - required: false - suboptions: - encryption_type: - description: - - The encryption type to use. - choices: [AES256, KMS] - default: 'AES256' - type: str - kms_key: - description: - - If I(encryption_type=KMS), specify the KMS key to use for encryption. - - The alias, key ID, or full ARN of the KMS key can be specified. - type: str - type: dict - version_added: 5.2.0 -author: - - David M. Lee (@leedm777) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# If the repository does not exist, it is created. If it does exist, would not -# affect any policies already on it. -- name: ecr-repo - community.aws.ecs_ecr: - name: super/cool - -- name: destroy-ecr-repo - community.aws.ecs_ecr: - name: old/busted - state: absent - -- name: Cross account ecr-repo - community.aws.ecs_ecr: - registry_id: 123456789012 - name: cross/account - -- name: set-policy as object - community.aws.ecs_ecr: - name: needs-policy-object - policy: - Version: '2008-10-17' - Statement: - - Sid: read-only - Effect: Allow - Principal: - AWS: '{{ read_only_arn }}' - Action: - - ecr:GetDownloadUrlForLayer - - ecr:BatchGetImage - - ecr:BatchCheckLayerAvailability - -- name: set-policy as string - community.aws.ecs_ecr: - name: needs-policy-string - policy: "{{ lookup('template', 'policy.json.j2') }}" - -- name: delete-policy - community.aws.ecs_ecr: - name: needs-no-policy - purge_policy: true - -- name: create immutable ecr-repo - community.aws.ecs_ecr: - name: super/cool - image_tag_mutability: immutable - -- name: set-lifecycle-policy - community.aws.ecs_ecr: - name: needs-lifecycle-policy - scan_on_push: true - lifecycle_policy: - rules: - - rulePriority: 1 - description: new policy - selection: - tagStatus: untagged - countType: sinceImagePushed - countUnit: days - countNumber: 365 - action: - type: expire - -- name: purge-lifecycle-policy - community.aws.ecs_ecr: - name: needs-no-lifecycle-policy - purge_lifecycle_policy: true - -- name: set-encryption-configuration - community.aws.ecs_ecr: - name: uses-custom-kms-key - encryption_configuration: - encryption_type: KMS - kms_key: custom-kms-key-alias -""" - -RETURN = r""" -state: - type: str - description: The asserted state of the repository (present, absent) - returned: always -created: - type: bool - description: If true, the repository was created - returned: always -name: - type: str - description: The name of the repository - returned: I(state=absent) -policy: - type: dict - description: The existing, created or updated repository policy. - returned: I(state=present) - version_added: 4.0.0 -repository: - type: dict - description: The created or updated repository - returned: I(state=present) - sample: - createdAt: '2017-01-17T08:41:32-06:00' - registryId: '123456789012' - repositoryArn: arn:aws:ecr:us-east-1:123456789012:repository/ecr-test-1484664090 - repositoryName: ecr-test-1484664090 - repositoryUri: 123456789012.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090 -""" - -import json -import traceback - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible.module_utils.six import string_types - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def build_kwargs(registry_id): - """ - Builds a kwargs dict which may contain the optional registryId. - - :param registry_id: Optional string containing the registryId. - :return: kwargs dict with registryId, if given - """ - if not registry_id: - return dict() - else: - return dict(registryId=registry_id) - - -class EcsEcr: - def __init__(self, module): - self.ecr = module.client("ecr") - self.sts = module.client("sts") - self.check_mode = module.check_mode - self.changed = False - self.skipped = False - - def get_repository(self, registry_id, name): - try: - res = self.ecr.describe_repositories(repositoryNames=[name], **build_kwargs(registry_id)) - repos = res.get("repositories") - return repos and repos[0] - except is_boto3_error_code("RepositoryNotFoundException"): - return None - - def get_repository_policy(self, registry_id, name): - try: - res = self.ecr.get_repository_policy(repositoryName=name, **build_kwargs(registry_id)) - text = res.get("policyText") - return text and json.loads(text) - except is_boto3_error_code(["RepositoryNotFoundException", "RepositoryPolicyNotFoundException"]): - return None - - def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): - if registry_id: - default_registry_id = self.sts.get_caller_identity().get("Account") - if registry_id != default_registry_id: - raise Exception( - f"Cannot create repository in registry {registry_id}. Would be created in {default_registry_id} instead." - ) - - if encryption_configuration is None: - encryption_configuration = dict(encryptionType="AES256") - - if not self.check_mode: - repo = self.ecr.create_repository( - repositoryName=name, - imageTagMutability=image_tag_mutability, - encryptionConfiguration=encryption_configuration, - ).get("repository") - self.changed = True - return repo - else: - self.skipped = True - return dict(repositoryName=name) - - def set_repository_policy(self, registry_id, name, policy_text, force): - if not self.check_mode: - policy = self.ecr.set_repository_policy( - repositoryName=name, policyText=policy_text, force=force, **build_kwargs(registry_id) - ) - self.changed = True - return policy - else: - self.skipped = True - if self.get_repository(registry_id, name) is None: - printable = name - if registry_id: - printable = f"{registry_id}:{name}" - raise Exception(f"could not find repository {printable}") - return - - def delete_repository(self, registry_id, name, force): - if not self.check_mode: - repo = self.ecr.delete_repository(repositoryName=name, force=force, **build_kwargs(registry_id)) - self.changed = True - return repo - else: - repo = self.get_repository(registry_id, name) - if repo: - self.skipped = True - return repo - return None - - def delete_repository_policy(self, registry_id, name): - if not self.check_mode: - policy = self.ecr.delete_repository_policy(repositoryName=name, **build_kwargs(registry_id)) - self.changed = True - return policy - else: - policy = self.get_repository_policy(registry_id, name) - if policy: - self.skipped = True - return policy - return None - - def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration): - repo = self.get_repository(registry_id, name) - current_mutability_configuration = repo.get("imageTagMutability") - - if current_mutability_configuration != new_mutability_configuration: - if not self.check_mode: - self.ecr.put_image_tag_mutability( - repositoryName=name, imageTagMutability=new_mutability_configuration, **build_kwargs(registry_id) - ) - else: - self.skipped = True - self.changed = True - - repo["imageTagMutability"] = new_mutability_configuration - return repo - - def get_lifecycle_policy(self, registry_id, name): - try: - res = self.ecr.get_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) - text = res.get("lifecyclePolicyText") - return text and json.loads(text) - except is_boto3_error_code(["LifecyclePolicyNotFoundException", "RepositoryNotFoundException"]): - return None - - def put_lifecycle_policy(self, registry_id, name, policy_text): - if not self.check_mode: - policy = self.ecr.put_lifecycle_policy( - repositoryName=name, lifecyclePolicyText=policy_text, **build_kwargs(registry_id) - ) - self.changed = True - return policy - else: - self.skipped = True - if self.get_repository(registry_id, name) is None: - printable = name - if registry_id: - printable = f"{registry_id}:{name}" - raise Exception(f"could not find repository {printable}") - return - - def purge_lifecycle_policy(self, registry_id, name): - if not self.check_mode: - policy = self.ecr.delete_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id)) - self.changed = True - return policy - else: - policy = self.get_lifecycle_policy(registry_id, name) - if policy: - self.skipped = True - return policy - return None - - def put_image_scanning_configuration(self, registry_id, name, scan_on_push): - if not self.check_mode: - if registry_id: - scan = self.ecr.put_image_scanning_configuration( - registryId=registry_id, repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} - ) - else: - scan = self.ecr.put_image_scanning_configuration( - repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push} - ) - self.changed = True - return scan - else: - self.skipped = True - return None - - -def sort_lists_of_strings(policy): - for statement_index in range(0, len(policy.get("Statement", []))): - for key in policy["Statement"][statement_index]: - value = policy["Statement"][statement_index][key] - if isinstance(value, list) and all(isinstance(item, string_types) for item in value): - policy["Statement"][statement_index][key] = sorted(value) - return policy - - -def run(ecr, params): - # type: (EcsEcr, dict, int) -> Tuple[bool, dict] - result = {} - try: - name = params["name"] - state = params["state"] - policy_text = params["policy"] - purge_policy = params["purge_policy"] - force_absent = params["force_absent"] - registry_id = params["registry_id"] - force_set_policy = params["force_set_policy"] - image_tag_mutability = params["image_tag_mutability"].upper() - lifecycle_policy_text = params["lifecycle_policy"] - purge_lifecycle_policy = params["purge_lifecycle_policy"] - scan_on_push = params["scan_on_push"] - encryption_configuration = snake_dict_to_camel_dict(params["encryption_configuration"]) - - # Parse policies, if they are given - try: - policy = policy_text and json.loads(policy_text) - except ValueError: - result["policy"] = policy_text - result["msg"] = "Could not parse policy" - return False, result - - try: - lifecycle_policy = lifecycle_policy_text and json.loads(lifecycle_policy_text) - except ValueError: - result["lifecycle_policy"] = lifecycle_policy_text - result["msg"] = "Could not parse lifecycle_policy" - return False, result - - result["state"] = state - result["created"] = False - - repo = ecr.get_repository(registry_id, name) - - if state == "present": - result["created"] = False - - if not repo: - repo = ecr.create_repository(registry_id, name, image_tag_mutability, encryption_configuration) - result["changed"] = True - result["created"] = True - else: - if encryption_configuration is not None: - if repo.get("encryptionConfiguration") != encryption_configuration: - result["msg"] = "Cannot modify repository encryption type" - return False, result - - repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) - result["repository"] = repo - - if purge_lifecycle_policy: - original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) - - result["lifecycle_policy"] = None - - if original_lifecycle_policy: - ecr.purge_lifecycle_policy(registry_id, name) - result["changed"] = True - - elif lifecycle_policy_text is not None: - try: - result["lifecycle_policy"] = lifecycle_policy - original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name) - - if compare_policies(original_lifecycle_policy, lifecycle_policy): - ecr.put_lifecycle_policy(registry_id, name, lifecycle_policy_text) - result["changed"] = True - except Exception: - # Some failure w/ the policy. It's helpful to know what the - # policy is. - result["lifecycle_policy"] = lifecycle_policy_text - raise - - if purge_policy: - original_policy = ecr.get_repository_policy(registry_id, name) - - result["policy"] = None - - if original_policy: - ecr.delete_repository_policy(registry_id, name) - result["changed"] = True - - elif policy_text is not None: - try: - # Sort any lists containing only string types - policy = sort_lists_of_strings(policy) - - result["policy"] = policy - - original_policy = ecr.get_repository_policy(registry_id, name) - if original_policy: - original_policy = sort_lists_of_strings(original_policy) - - if compare_policies(original_policy, policy): - ecr.set_repository_policy(registry_id, name, policy_text, force_set_policy) - result["changed"] = True - except Exception: - # Some failure w/ the policy. It's helpful to know what the - # policy is. - result["policy"] = policy_text - raise - - else: - original_policy = ecr.get_repository_policy(registry_id, name) - if original_policy: - result["policy"] = original_policy - - original_scan_on_push = ecr.get_repository(registry_id, name) - if original_scan_on_push is not None: - if scan_on_push != original_scan_on_push["imageScanningConfiguration"]["scanOnPush"]: - result["changed"] = True - result["repository"]["imageScanningConfiguration"]["scanOnPush"] = scan_on_push - response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) - - elif state == "absent": - result["name"] = name - if repo: - ecr.delete_repository(registry_id, name, force_absent) - result["changed"] = True - - except Exception as err: - msg = str(err) - if isinstance(err, botocore.exceptions.ClientError): - msg = boto_exception(err) - result["msg"] = msg - result["exception"] = traceback.format_exc() - return False, result - - if ecr.skipped: - result["skipped"] = True - - if ecr.changed: - result["changed"] = True - - return True, result - - -def main(): - argument_spec = dict( - name=dict(required=True), - registry_id=dict(required=False), - state=dict(required=False, choices=["present", "absent"], default="present"), - force_absent=dict(required=False, type="bool", default=False), - force_set_policy=dict(required=False, type="bool", default=False), - policy=dict(required=False, type="json"), - image_tag_mutability=dict(required=False, choices=["mutable", "immutable"], default="mutable"), - purge_policy=dict(required=False, type="bool"), - lifecycle_policy=dict(required=False, type="json"), - purge_lifecycle_policy=dict(required=False, type="bool"), - scan_on_push=(dict(required=False, type="bool", default=False)), - encryption_configuration=dict( - required=False, - type="dict", - options=dict( - encryption_type=dict(required=False, type="str", default="AES256", choices=["AES256", "KMS"]), - kms_key=dict(required=False, type="str", no_log=False), - ), - required_if=[ - ["encryption_type", "KMS", ["kms_key"]], - ], - ), - ) - mutually_exclusive = [ - ["policy", "purge_policy"], - ["lifecycle_policy", "purge_lifecycle_policy"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - ) - - ecr = EcsEcr(module) - passed, result = run(ecr, module.params) - - if passed: - module.exit_json(**result) - else: - module.fail_json(**result) - - -if __name__ == "__main__": - main() diff --git a/ecs_service.py b/ecs_service.py deleted file mode 100644 index 8115b3b34fd..00000000000 --- a/ecs_service.py +++ /dev/null @@ -1,1273 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_service -version_added: 1.0.0 -short_description: Create, terminate, start or stop a service in ECS -description: - - Creates or terminates ECS services. -notes: - - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com) - - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html). - - An IAM role must have been previously created. -author: - - "Mark Chance (@Java1Guy)" - - "Darek Kaczynski (@kaczynskid)" - - "Stephane Maarek (@simplesteph)" - - "Zac Blazic (@zacblazic)" -options: - state: - description: - - The desired state of the service. - required: true - choices: ["present", "absent", "deleting"] - type: str - name: - description: - - The name of the service. - required: true - type: str - aliases: ['service'] - cluster: - description: - - The name of the cluster in which the service exists. - - If not specified, the cluster name will be C(default). - required: false - type: str - default: 'default' - task_definition: - description: - - The task definition the service will run. - - This parameter is required when I(state=present) unless I(force_new_deployment=True). - - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case - the task definition is managed by Code Pipeline and cannot be updated. - required: false - type: str - load_balancers: - description: - - The list of ELBs defined for this service. - - Load balancers for an existing service cannot be updated, and it is an error to do so. - - When the deployment controller is CODE_DEPLOY changes to this value are simply ignored, and do not cause an error. - required: false - type: list - elements: dict - default: [] - desired_count: - description: - - The count of how many instances of the service. - - This parameter is required when I(state=present). - required: false - type: int - client_token: - description: - - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed. - required: false - type: str - default: '' - role: - description: - - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer - on your behalf. - - This parameter is only required if you are using a load balancer with your service in a network mode other than C(awsvpc). - required: false - type: str - default: '' - delay: - description: - - The time to wait before checking that the service is available. - required: false - default: 10 - type: int - repeat: - description: - - The number of times to check that the service is available. - required: false - default: 10 - type: int - force_new_deployment: - description: - - Force deployment of service even if there are no changes. - required: false - type: bool - default: false - deployment_controller: - description: - - The deployment controller to use for the service. If no deploymenet controller is specified, the ECS controller is used. - required: false - version_added: 4.1.0 - type: dict - default: {} - suboptions: - type: - type: str - choices: ["ECS", "CODE_DEPLOY", "EXTERNAL"] - description: The deployment controller type to use. - deployment_configuration: - description: - - Optional parameters that control the deployment_configuration. - - Format is '{"maximum_percent":, "minimum_healthy_percent":} - required: false - type: dict - default: {} - suboptions: - maximum_percent: - type: int - description: Upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. - minimum_healthy_percent: - type: int - description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. - deployment_circuit_breaker: - type: dict - description: The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. - suboptions: - enable: - type: bool - description: If enabled, a service deployment will transition to a failed state and stop launching new tasks. - rollback: - type: bool - description: If enabled, ECS will roll back your service to the last completed deployment after a failure. - enable_execute_command: - description: - - Whether or not to enable the execute command functionality for the containers in the ECS task. - - If I(enable_execute_command=true) execute command functionality is enabled on all containers in the ECS task. - required: false - type: bool - version_added: 5.4.0 - placement_constraints: - description: - - The placement constraints for the tasks in the service. - - See U(https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementConstraint.html) for more details. - required: false - type: list - elements: dict - default: [] - suboptions: - type: - description: The type of constraint. - type: str - expression: - description: A cluster query language expression to apply to the constraint. - required: false - type: str - purge_placement_constraints: - version_added: 5.3.0 - description: - - Toggle overwriting of existing placement constraints. This is needed for backwards compatibility. - - By default I(purge_placement_constraints=false). In a release after 2024-06-01 this will be changed to I(purge_placement_constraints=true). - required: false - type: bool - default: false - placement_strategy: - description: - - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service. - required: false - type: list - elements: dict - default: [] - suboptions: - type: - description: The type of placement strategy. - type: str - field: - description: The field to apply the placement strategy against. - type: str - purge_placement_strategy: - version_added: 5.3.0 - description: - - Toggle overwriting of existing placement strategy. This is needed for backwards compatibility. - - By default I(purge_placement_strategy=false). In a release after 2024-06-01 this will be changed to I(purge_placement_strategy=true). - required: false - type: bool - default: false - force_deletion: - description: - - Forcibly delete the service. Required when deleting a service with >0 scale, or no target group. - default: False - type: bool - version_added: 2.1.0 - network_configuration: - description: - - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). - type: dict - suboptions: - subnets: - description: - - A list of subnet IDs to associate with the task. - type: list - elements: str - security_groups: - description: - - A list of security group names or group IDs to associate with the task. - type: list - elements: str - assign_public_ip: - description: - - Whether the task's elastic network interface receives a public IP address. - type: bool - launch_type: - description: - - The launch type on which to run your service. - required: false - choices: ["EC2", "FARGATE"] - type: str - capacity_provider_strategy: - version_added: 4.0.0 - description: - - The capacity provider strategy to use with your service. You can specify a maximum of 6 providers per strategy. - required: false - type: list - elements: dict - default: [] - suboptions: - capacity_provider: - description: - - Name of capacity provider. - type: str - weight: - description: - - The relative percentage of the total number of launched tasks that should use the specified provider. - type: int - base: - description: - - How many tasks, at a minimum, should use the specified provider. - type: int - platform_version: - type: str - description: - - Numeric part of platform version or LATEST - - See U(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) for more details. - required: false - version_added: 1.5.0 - health_check_grace_period_seconds: - description: - - Seconds to wait before health checking the freshly added/updated services. - required: false - type: int - service_registries: - description: - - Describes service discovery registries this service will register with. - type: list - elements: dict - default: [] - required: false - suboptions: - container_name: - description: - - Container name for service discovery registration. - type: str - container_port: - description: - - Container port for service discovery registration. - type: int - arn: - description: - - Service discovery registry ARN. - type: str - scheduling_strategy: - description: - - The scheduling strategy. - - Defaults to C(REPLICA) if not given to preserve previous behavior. - required: false - choices: ["DAEMON", "REPLICA"] - type: str - wait: - description: - - Whether or not to wait for the service to be inactive. - - Waits only when I(state) is C(absent). - type: bool - default: false - version_added: 4.1.0 - propagate_tags: - description: - - Propagate tags from ECS task defintition or ECS service to ECS task. - required: false - choices: ["TASK_DEFINITION", "SERVICE"] - type: str - version_added: 4.1.0 - tags: - description: - - A dictionary of tags to add or remove from the resource. - type: dict - required: false - version_added: 4.1.0 -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. -# Basic provisioning example -- community.aws.ecs_service: - state: present - name: console-test-service - cluster: new_cluster - task_definition: 'new_cluster-task:1' - desired_count: 0 - -- name: create ECS service on VPC network - community.aws.ecs_service: - state: present - name: console-test-service - cluster: new_cluster - task_definition: 'new_cluster-task:1' - desired_count: 0 - network_configuration: - subnets: - - subnet-abcd1234 - security_groups: - - sg-aaaa1111 - - my_security_group - -# Simple example to delete -- community.aws.ecs_service: - name: default - state: absent - cluster: new_cluster - -# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4) -- community.aws.ecs_service: - state: present - name: test-service - cluster: test-cluster - task_definition: test-task-definition - desired_count: 3 - deployment_configuration: - minimum_healthy_percent: 75 - maximum_percent: 150 - placement_constraints: - - type: memberOf - expression: 'attribute:flavor==test' - placement_strategy: - - type: binpack - field: memory - -# With deployment circuit breaker (added in version 4.0) -- community.aws.ecs_service: - state: present - name: test-service - cluster: test-cluster - task_definition: test-task-definition - desired_count: 3 - deployment_configuration: - deployment_circuit_breaker: - enable: True - rollback: True - -# With capacity_provider_strategy (added in version 4.0) -- community.aws.ecs_service: - state: present - name: test-service - cluster: test-cluster - task_definition: test-task-definition - desired_count: 1 - capacity_provider_strategy: - - capacity_provider: test-capacity-provider-1 - weight: 1 - base: 0 - -# With tags and tag propagation -- community.aws.ecs_service: - state: present - name: tags-test-service - cluster: new_cluster - task_definition: 'new_cluster-task:1' - desired_count: 1 - tags: - Firstname: jane - lastName: doe - propagate_tags: SERVICE -""" - -RETURN = r""" -service: - description: Details of created service. - returned: when creating a service - type: complex - contains: - capacityProviderStrategy: - version_added: 4.0.0 - description: The capacity provider strategy to use with your service. - returned: always - type: complex - contains: - base: - description: How many tasks, at a minimum, should use the specified provider. - returned: always - type: int - capacityProvider: - description: Name of capacity provider. - returned: always - type: str - weight: - description: The relative percentage of the total number of launched tasks that should use the specified provider. - returned: always - type: int - clusterArn: - description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. - returned: always - type: str - desiredCount: - description: The desired number of instantiations of the task definition to keep running on the service. - returned: always - type: int - loadBalancers: - description: - - A list of load balancer objects - - Updating the loadbalancer configuration of an existing service requires botocore>=1.24.14. - returned: always - type: complex - contains: - loadBalancerName: - description: the name - returned: always - type: str - containerName: - description: The name of the container to associate with the load balancer. - returned: always - type: str - containerPort: - description: The port on the container to associate with the load balancer. - returned: always - type: int - pendingCount: - description: The number of tasks in the cluster that are in the PENDING state. - returned: always - type: int - runningCount: - description: The number of tasks in the cluster that are in the RUNNING state. - returned: always - type: int - serviceArn: - description: - - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the C(arn:aws:ecs) namespace, followed by - the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. - sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service' - returned: always - type: str - serviceName: - description: A user-generated string used to identify the service - returned: always - type: str - status: - description: The valid values are ACTIVE, DRAINING, or INACTIVE. - returned: always - type: str - tags: - description: The tags applied to this resource. - returned: success - type: dict - taskDefinition: - description: The ARN of a task definition to use for tasks in the service. - returned: always - type: str - deployments: - description: list of service deployments - returned: always - type: list - elements: dict - deploymentConfiguration: - description: dictionary of deploymentConfiguration - returned: always - type: complex - contains: - maximumPercent: - description: maximumPercent param - returned: always - type: int - minimumHealthyPercent: - description: minimumHealthyPercent param - returned: always - type: int - deploymentCircuitBreaker: - description: dictionary of deploymentCircuitBreaker - returned: always - type: complex - contains: - enable: - description: The state of the circuit breaker feature. - returned: always - type: bool - rollback: - description: The state of the rollback feature of the circuit breaker. - returned: always - type: bool - events: - description: list of service events - returned: always - type: list - elements: dict - placementConstraints: - description: List of placement constraints objects - returned: always - type: list - elements: dict - contains: - type: - description: The type of constraint. Valid values are distinctInstance and memberOf. - returned: always - type: str - expression: - description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is - distinctInstance. - returned: always - type: str - placementStrategy: - description: List of placement strategy objects - returned: always - type: list - elements: dict - contains: - type: - description: The type of placement strategy. Valid values are random, spread and binpack. - returned: always - type: str - field: - description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId - (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, - such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY. - returned: always - type: str - propagateTags: - description: The type of tag propagation applied to the resource. - returned: always - type: str -ansible_facts: - description: Facts about deleted service. - returned: when deleting a service - type: complex - contains: - service: - description: Details of deleted service. - returned: when service existed and was deleted - type: complex - contains: - clusterArn: - description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. - returned: always - type: str - desiredCount: - description: The desired number of instantiations of the task definition to keep running on the service. - returned: always - type: int - loadBalancers: - description: A list of load balancer objects - returned: always - type: complex - contains: - loadBalancerName: - description: the name - returned: always - type: str - containerName: - description: The name of the container to associate with the load balancer. - returned: always - type: str - containerPort: - description: The port on the container to associate with the load balancer. - returned: always - type: int - pendingCount: - description: The number of tasks in the cluster that are in the PENDING state. - returned: always - type: int - runningCount: - description: The number of tasks in the cluster that are in the RUNNING state. - returned: always - type: int - serviceArn: - description: - - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region - of the service, the AWS account ID of the service owner, the service namespace, and then the service name. - sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service' - returned: always - type: str - serviceName: - description: A user-generated string used to identify the service - returned: always - type: str - status: - description: The valid values are ACTIVE, DRAINING, or INACTIVE. - returned: always - type: str - tags: - description: The tags applied to this resource. - returned: when tags found - type: list - elements: dict - taskDefinition: - description: The ARN of a task definition to use for tasks in the service. - returned: always - type: str - deployments: - description: list of service deployments - returned: always - type: list - elements: dict - deploymentConfiguration: - description: dictionary of deploymentConfiguration - returned: always - type: complex - contains: - maximumPercent: - description: maximumPercent param - returned: always - type: int - minimumHealthyPercent: - description: minimumHealthyPercent param - returned: always - type: int - deploymentCircuitBreaker: - description: dictionary of deploymentCircuitBreaker - returned: always - type: complex - contains: - enable: - description: The state of the circuit breaker feature. - returned: always - type: bool - rollback: - description: The state of the rollback feature of the circuit breaker. - returned: always - type: bool - events: - description: list of service events - returned: always - type: list - elements: dict - placementConstraints: - description: List of placement constraints objects - returned: always - type: list - elements: dict - contains: - type: - description: The type of constraint. Valid values are distinctInstance and memberOf. - returned: always - type: str - expression: - description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if - the constraint type is distinctInstance. - returned: always - type: str - placementStrategy: - description: List of placement strategy objects - returned: always - type: list - elements: dict - contains: - type: - description: The type of placement strategy. Valid values are random, spread and binpack. - returned: always - type: str - field: - description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId - (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, - such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY. - returned: always - type: str - propagateTags: - description: The type of tag propagation applied to the resource - returned: always - type: str - -""" - -import time - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.transformation import map_complex_type - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -DEPLOYMENT_CONTROLLER_TYPE_MAP = { - "type": "str", -} - -DEPLOYMENT_CONFIGURATION_TYPE_MAP = { - "maximum_percent": "int", - "minimum_healthy_percent": "int", - "deployment_circuit_breaker": "dict", -} - - -class EcsServiceManager: - """Handles ECS Services""" - - def __init__(self, module): - self.module = module - self.ecs = module.client("ecs") - self.ec2 = module.client("ec2") - - def format_network_configuration(self, network_config): - result = dict() - if network_config["subnets"] is not None: - result["subnets"] = network_config["subnets"] - else: - self.module.fail_json(msg="Network configuration must include subnets") - if network_config["security_groups"] is not None: - groups = network_config["security_groups"] - if any(not sg.startswith("sg-") for sg in groups): - try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] - groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result["securityGroups"] = groups - if network_config["assign_public_ip"] is not None: - if network_config["assign_public_ip"] is True: - result["assignPublicIp"] = "ENABLED" - else: - result["assignPublicIp"] = "DISABLED" - return dict(awsvpcConfiguration=result) - - def find_in_array(self, array_of_services, service_name, field_name="serviceArn"): - for c in array_of_services: - if c[field_name].endswith(service_name): - return c - return None - - def describe_service(self, cluster_name, service_name): - response = self.ecs.describe_services( - cluster=cluster_name, - services=[service_name], - include=["TAGS"], - ) - msg = "" - - if len(response["failures"]) > 0: - c = self.find_in_array(response["failures"], service_name, "arn") - msg += ", failure reason is " + c["reason"] - if c and c["reason"] == "MISSING": - return None - # fall thru and look through found ones - if len(response["services"]) > 0: - c = self.find_in_array(response["services"], service_name) - if c: - return c - raise Exception(f"Unknown problem describing service {service_name}.") - - def is_matching_service(self, expected, existing): - # aws returns the arn of the task definition - # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3 - # but the user is just entering - # ansible-fargate-nginx:3 - if expected["task_definition"] != existing["taskDefinition"].split("/")[-1]: - if existing.get("deploymentController", {}).get("type", None) != "CODE_DEPLOY": - return False - - if expected.get("health_check_grace_period_seconds"): - if expected.get("health_check_grace_period_seconds") != existing.get("healthCheckGracePeriodSeconds"): - return False - - if (expected["load_balancers"] or []) != existing["loadBalancers"]: - return False - - if (expected["propagate_tags"] or "NONE") != existing["propagateTags"]: - return False - - if boto3_tag_list_to_ansible_dict(existing.get("tags", [])) != (expected["tags"] or {}): - return False - - if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False): - return False - - # expected is params. DAEMON scheduling strategy returns desired count equal to - # number of instances running; don't check desired count if scheduling strat is daemon - if expected["scheduling_strategy"] != "DAEMON": - if (expected["desired_count"] or 0) != existing["desiredCount"]: - return False - - return True - - def create_service( - self, - service_name, - cluster_name, - task_definition, - load_balancers, - desired_count, - client_token, - role, - deployment_controller, - deployment_configuration, - placement_constraints, - placement_strategy, - health_check_grace_period_seconds, - network_configuration, - service_registries, - launch_type, - platform_version, - scheduling_strategy, - capacity_provider_strategy, - tags, - propagate_tags, - enable_execute_command, - ): - params = dict( - cluster=cluster_name, - serviceName=service_name, - taskDefinition=task_definition, - loadBalancers=load_balancers, - clientToken=client_token, - role=role, - deploymentConfiguration=deployment_configuration, - placementStrategy=placement_strategy, - ) - if network_configuration: - params["networkConfiguration"] = network_configuration - if deployment_controller: - params["deploymentController"] = deployment_controller - if launch_type: - params["launchType"] = launch_type - if platform_version: - params["platformVersion"] = platform_version - if self.health_check_setable(params) and health_check_grace_period_seconds is not None: - params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds - if service_registries: - params["serviceRegistries"] = service_registries - - # filter placement_constraint and left only those where value is not None - # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation - if placement_constraints: - params["placementConstraints"] = [ - {key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints - ] - - # desired count is not required if scheduling strategy is daemon - if desired_count is not None: - params["desiredCount"] = desired_count - if capacity_provider_strategy: - params["capacityProviderStrategy"] = capacity_provider_strategy - if propagate_tags: - params["propagateTags"] = propagate_tags - # desired count is not required if scheduling strategy is daemon - if desired_count is not None: - params["desiredCount"] = desired_count - if tags: - params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") - - if scheduling_strategy: - params["schedulingStrategy"] = scheduling_strategy - if enable_execute_command: - params["enableExecuteCommand"] = enable_execute_command - - response = self.ecs.create_service(**params) - return self.jsonize(response["service"]) - - def update_service( - self, - service_name, - cluster_name, - task_definition, - desired_count, - deployment_configuration, - placement_constraints, - placement_strategy, - network_configuration, - health_check_grace_period_seconds, - force_new_deployment, - capacity_provider_strategy, - load_balancers, - purge_placement_constraints, - purge_placement_strategy, - enable_execute_command, - ): - params = dict( - cluster=cluster_name, - service=service_name, - taskDefinition=task_definition, - deploymentConfiguration=deployment_configuration, - ) - # filter placement_constraint and left only those where value is not None - # use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation - if placement_constraints: - params["placementConstraints"] = [ - {key: value for key, value in constraint.items() if value is not None} - for constraint in placement_constraints - ] - - if purge_placement_constraints and not placement_constraints: - params["placementConstraints"] = [] - - if placement_strategy: - params["placementStrategy"] = placement_strategy - - if purge_placement_strategy and not placement_strategy: - params["placementStrategy"] = [] - - if network_configuration: - params["networkConfiguration"] = network_configuration - if force_new_deployment: - params["forceNewDeployment"] = force_new_deployment - if capacity_provider_strategy: - params["capacityProviderStrategy"] = capacity_provider_strategy - if health_check_grace_period_seconds is not None: - params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds - # desired count is not required if scheduling strategy is daemon - if desired_count is not None: - params["desiredCount"] = desired_count - if enable_execute_command is not None: - params["enableExecuteCommand"] = enable_execute_command - - if load_balancers: - params["loadBalancers"] = load_balancers - - response = self.ecs.update_service(**params) - - return self.jsonize(response["service"]) - - def jsonize(self, service): - # some fields are datetime which is not JSON serializable - # make them strings - if "createdAt" in service: - service["createdAt"] = str(service["createdAt"]) - if "deployments" in service: - for d in service["deployments"]: - if "createdAt" in d: - d["createdAt"] = str(d["createdAt"]) - if "updatedAt" in d: - d["updatedAt"] = str(d["updatedAt"]) - if "events" in service: - for e in service["events"]: - if "createdAt" in e: - e["createdAt"] = str(e["createdAt"]) - return service - - def delete_service(self, service, cluster=None, force=False): - return self.ecs.delete_service(cluster=cluster, service=service, force=force) - - def health_check_setable(self, params): - load_balancers = params.get("loadBalancers", []) - return len(load_balancers) > 0 - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=["present", "absent", "deleting"]), - name=dict(required=True, type="str", aliases=["service"]), - cluster=dict(required=False, type="str", default="default"), - task_definition=dict(required=False, type="str"), - load_balancers=dict(required=False, default=[], type="list", elements="dict"), - desired_count=dict(required=False, type="int"), - client_token=dict(required=False, default="", type="str", no_log=False), - role=dict(required=False, default="", type="str"), - delay=dict(required=False, type="int", default=10), - repeat=dict(required=False, type="int", default=10), - force_new_deployment=dict(required=False, default=False, type="bool"), - force_deletion=dict(required=False, default=False, type="bool"), - deployment_controller=dict(required=False, default={}, type="dict"), - deployment_configuration=dict(required=False, default={}, type="dict"), - wait=dict(required=False, default=False, type="bool"), - placement_constraints=dict( - required=False, - default=[], - type="list", - elements="dict", - options=dict(type=dict(type="str"), expression=dict(required=False, type="str")), - ), - purge_placement_constraints=dict(required=False, default=False, type="bool"), - placement_strategy=dict( - required=False, - default=[], - type="list", - elements="dict", - options=dict( - type=dict(type="str"), - field=dict(type="str"), - ), - ), - purge_placement_strategy=dict(required=False, default=False, type="bool"), - health_check_grace_period_seconds=dict(required=False, type="int"), - network_configuration=dict( - required=False, - type="dict", - options=dict( - subnets=dict(type="list", elements="str"), - security_groups=dict(type="list", elements="str"), - assign_public_ip=dict(type="bool"), - ), - ), - launch_type=dict(required=False, choices=["EC2", "FARGATE"]), - platform_version=dict(required=False, type="str"), - service_registries=dict(required=False, type="list", default=[], elements="dict"), - scheduling_strategy=dict(required=False, choices=["DAEMON", "REPLICA"]), - capacity_provider_strategy=dict( - required=False, - type="list", - default=[], - elements="dict", - options=dict( - capacity_provider=dict(type="str"), - weight=dict(type="int"), - base=dict(type="int"), - ), - ), - propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]), - tags=dict(required=False, type="dict"), - enable_execute_command=dict(required=False, type="bool"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[("launch_type", "FARGATE", ["network_configuration"])], - required_together=[["load_balancers", "role"]], - mutually_exclusive=[["launch_type", "capacity_provider_strategy"]], - ) - - if module.params["state"] == "present": - if module.params["scheduling_strategy"] == "REPLICA" and module.params["desired_count"] is None: - module.fail_json(msg="state is present, scheduling_strategy is REPLICA; missing desired_count") - if module.params["task_definition"] is None and not module.params["force_new_deployment"]: - module.fail_json(msg="Either task_definition or force_new_deployment is required when status is present.") - - if len(module.params["capacity_provider_strategy"]) > 6: - module.fail_json(msg="AWS allows a maximum of six capacity providers in the strategy.") - - service_mgr = EcsServiceManager(module) - if module.params["network_configuration"]: - network_configuration = service_mgr.format_network_configuration(module.params["network_configuration"]) - else: - network_configuration = None - - deployment_controller = map_complex_type(module.params["deployment_controller"], DEPLOYMENT_CONTROLLER_TYPE_MAP) - - deploymentController = snake_dict_to_camel_dict(deployment_controller) - - deployment_configuration = map_complex_type( - module.params["deployment_configuration"], DEPLOYMENT_CONFIGURATION_TYPE_MAP - ) - - deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) - serviceRegistries = list(map(snake_dict_to_camel_dict, module.params["service_registries"])) - capacityProviders = list(map(snake_dict_to_camel_dict, module.params["capacity_provider_strategy"])) - - try: - existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) - except Exception as e: - module.fail_json_aws( - e, - msg=f"Exception describing service '{module.params['name']}' in cluster '{module.params['cluster']}'", - ) - - results = dict(changed=False) - - if module.params["state"] == "present": - matching = False - update = False - - if existing and "status" in existing and existing["status"] == "ACTIVE": - if module.params["force_new_deployment"]: - update = True - elif service_mgr.is_matching_service(module.params, existing): - matching = True - results["service"] = existing - else: - update = True - - if not matching: - if not module.check_mode: - role = module.params["role"] - clientToken = module.params["client_token"] - - loadBalancers = [] - for loadBalancer in module.params["load_balancers"]: - if "containerPort" in loadBalancer: - loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) - loadBalancers.append(loadBalancer) - - for loadBalancer in loadBalancers: - if "containerPort" in loadBalancer: - loadBalancer["containerPort"] = int(loadBalancer["containerPort"]) - - if update: - # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature - - if module.params["scheduling_strategy"]: - if (existing["schedulingStrategy"]) != module.params["scheduling_strategy"]: - module.fail_json( - msg="It is not possible to update the scheduling strategy of an existing service" - ) - - if module.params["service_registries"]: - if (existing["serviceRegistries"] or []) != serviceRegistries: - module.fail_json( - msg="It is not possible to update the service registries of an existing service" - ) - if module.params["capacity_provider_strategy"]: - if "launchType" in existing.keys(): - module.fail_json( - msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy." - ) - if module.params["launch_type"]: - if "capacityProviderStrategy" in existing.keys(): - module.fail_json( - msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type." - ) - if (existing["loadBalancers"] or []) != loadBalancers: - # fails if deployment type is not CODE_DEPLOY or ECS - if existing["deploymentController"]["type"] not in ["CODE_DEPLOY", "ECS"]: - module.fail_json( - msg="It is not possible to update the load balancers of an existing service" - ) - - if existing.get("deploymentController", {}).get("type", None) == "CODE_DEPLOY": - task_definition = "" - network_configuration = [] - else: - task_definition = module.params["task_definition"] - - if module.params["propagate_tags"] and module.params["propagate_tags"] != existing["propagateTags"]: - module.fail_json( - msg="It is not currently supported to enable propagation tags of an existing service" - ) - - if ( - module.params["tags"] - and boto3_tag_list_to_ansible_dict(existing["tags"]) != module.params["tags"] - ): - module.fail_json(msg="It is not currently supported to change tags of an existing service") - - updatedLoadBalancers = loadBalancers if existing["deploymentController"]["type"] == "ECS" else [] - - if task_definition is None and module.params["force_new_deployment"]: - task_definition = existing["taskDefinition"] - - try: - # update required - response = service_mgr.update_service( - module.params["name"], - module.params["cluster"], - task_definition, - module.params["desired_count"], - deploymentConfiguration, - module.params["placement_constraints"], - module.params["placement_strategy"], - network_configuration, - module.params["health_check_grace_period_seconds"], - module.params["force_new_deployment"], - capacityProviders, - updatedLoadBalancers, - module.params["purge_placement_constraints"], - module.params["purge_placement_strategy"], - module.params["enable_execute_command"], - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create service") - - else: - try: - response = service_mgr.create_service( - module.params["name"], - module.params["cluster"], - module.params["task_definition"], - loadBalancers, - module.params["desired_count"], - clientToken, - role, - deploymentController, - deploymentConfiguration, - module.params["placement_constraints"], - module.params["placement_strategy"], - module.params["health_check_grace_period_seconds"], - network_configuration, - serviceRegistries, - module.params["launch_type"], - module.params["platform_version"], - module.params["scheduling_strategy"], - capacityProviders, - module.params["tags"], - module.params["propagate_tags"], - module.params["enable_execute_command"], - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create service") - - if response.get("tags", None): - response["tags"] = boto3_tag_list_to_ansible_dict(response["tags"]) - results["service"] = response - - results["changed"] = True - - elif module.params["state"] == "absent": - if not existing: - pass - else: - # it exists, so we should delete it and mark changed. - # return info about the cluster deleted - del existing["deployments"] - del existing["events"] - results["ansible_facts"] = existing - if "status" in existing and existing["status"] == "INACTIVE": - results["changed"] = False - else: - if not module.check_mode: - try: - service_mgr.delete_service( - module.params["name"], - module.params["cluster"], - module.params["force_deletion"], - ) - - # Wait for service to be INACTIVE prior to exiting - if module.params["wait"]: - waiter = service_mgr.ecs.get_waiter("services_inactive") - try: - waiter.wait( - services=[module.params["name"]], - cluster=module.params["cluster"], - WaiterConfig={ - "Delay": module.params["delay"], - "MaxAttempts": module.params["repeat"], - }, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Timeout waiting for service removal") - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Couldn't delete service") - - results["changed"] = True - - elif module.params["state"] == "deleting": - if not existing: - module.fail_json(msg="Service '" + module.params["name"] + " not found.") - return - # it exists, so we should delete it and mark changed. - # return info about the cluster deleted - delay = module.params["delay"] - repeat = module.params["repeat"] - time.sleep(delay) - for i in range(repeat): - existing = service_mgr.describe_service(module.params["cluster"], module.params["name"]) - status = existing["status"] - if status == "INACTIVE": - results["changed"] = True - break - time.sleep(delay) - if i is repeat - 1: - module.fail_json(msg=f"Service still not deleted after {repeat} tries of {delay} seconds each.") - return - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ecs_service_info.py b/ecs_service_info.py deleted file mode 100644 index 02a6abff207..00000000000 --- a/ecs_service_info.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_service_info -version_added: 1.0.0 -short_description: List or describe services in ECS -description: - - Lists or describes services in ECS. -author: - - "Mark Chance (@Java1Guy)" - - "Darek Kaczynski (@kaczynskid)" -options: - details: - description: - - Set this to true if you want detailed information about the services. - required: false - default: false - type: bool - events: - description: - - Whether to return ECS service events. Only has an effect if I(details=true). - required: false - default: true - type: bool - cluster: - description: - - The cluster ARNS in which to list the services. - required: false - type: str - service: - description: - - One or more services to get details for - required: false - type: list - elements: str - aliases: ['name'] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Basic listing example -- community.aws.ecs_service_info: - cluster: test-cluster - service: console-test-service - details: true - register: output - -# Basic listing example -- community.aws.ecs_service_info: - cluster: test-cluster - register: output -""" - -RETURN = r""" -services: - description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below. - returned: success - type: complex - contains: - clusterArn: - description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. - returned: always - type: str - desiredCount: - description: The desired number of instantiations of the task definition to keep running on the service. - returned: always - type: int - loadBalancers: - description: A list of load balancer objects - returned: always - type: complex - contains: - loadBalancerName: - description: the name - returned: always - type: str - containerName: - description: The name of the container to associate with the load balancer. - returned: always - type: str - containerPort: - description: The port on the container to associate with the load balancer. - returned: always - type: int - pendingCount: - description: The number of tasks in the cluster that are in the PENDING state. - returned: always - type: int - runningCount: - description: The number of tasks in the cluster that are in the RUNNING state. - returned: always - type: int - serviceArn: - description: - - The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the - service, the AWS account ID of the service owner, the service namespace, and then the service name. - sample: 'arn:aws:ecs:us-east-1:123456789012:service/my-service' - returned: always - type: str - serviceName: - description: A user-generated string used to identify the service - returned: always - type: str - status: - description: The valid values are ACTIVE, DRAINING, or INACTIVE. - returned: always - type: str - taskDefinition: - description: The ARN of a task definition to use for tasks in the service. - returned: always - type: str - deployments: - description: list of service deployments - returned: always - type: list - elements: dict - events: - description: list of service events - returned: when events is true - type: list - elements: dict -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class EcsServiceManager: - """Handles ECS Services""" - - def __init__(self, module): - self.module = module - self.ecs = module.client("ecs") - - @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) - def list_services_with_backoff(self, **kwargs): - paginator = self.ecs.get_paginator("list_services") - try: - return paginator.paginate(**kwargs).build_full_result() - except is_boto3_error_code("ClusterNotFoundException") as e: - self.module.fail_json_aws(e, "Could not find cluster to list services") - - @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) - def describe_services_with_backoff(self, **kwargs): - return self.ecs.describe_services(**kwargs) - - def list_services(self, cluster): - fn_args = dict() - if cluster and cluster is not None: - fn_args["cluster"] = cluster - try: - response = self.list_services_with_backoff(**fn_args) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't list ECS services") - relevant_response = dict(services=response["serviceArns"]) - return relevant_response - - def describe_services(self, cluster, services): - fn_args = dict() - if cluster and cluster is not None: - fn_args["cluster"] = cluster - fn_args["services"] = services - try: - response = self.describe_services_with_backoff(**fn_args) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't describe ECS services") - running_services = [self.extract_service_from(service) for service in response.get("services", [])] - services_not_running = response.get("failures", []) - return running_services, services_not_running - - def extract_service_from(self, service): - # some fields are datetime which is not JSON serializable - # make them strings - if "deployments" in service: - for d in service["deployments"]: - if "createdAt" in d: - d["createdAt"] = str(d["createdAt"]) - if "updatedAt" in d: - d["updatedAt"] = str(d["updatedAt"]) - if "events" in service: - if not self.module.params["events"]: - del service["events"] - else: - for e in service["events"]: - if "createdAt" in e: - e["createdAt"] = str(e["createdAt"]) - return service - - -def chunks(l, n): - """Yield successive n-sized chunks from l.""" - """ https://stackoverflow.com/a/312464 """ - for i in range(0, len(l), n): - yield l[i:i + n] # fmt: skip - - -def main(): - argument_spec = dict( - details=dict(type="bool", default=False), - events=dict(type="bool", default=True), - cluster=dict(), - service=dict(type="list", elements="str", aliases=["name"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - show_details = module.params.get("details") - - task_mgr = EcsServiceManager(module) - if show_details: - if module.params["service"]: - services = module.params["service"] - else: - services = task_mgr.list_services(module.params["cluster"])["services"] - ecs_info = dict(services=[], services_not_running=[]) - for chunk in chunks(services, 10): - running_services, services_not_running = task_mgr.describe_services(module.params["cluster"], chunk) - ecs_info["services"].extend(running_services) - ecs_info["services_not_running"].extend(services_not_running) - else: - ecs_info = task_mgr.list_services(module.params["cluster"]) - - module.exit_json(changed=False, **ecs_info) - - -if __name__ == "__main__": - main() diff --git a/ecs_tag.py b/ecs_tag.py deleted file mode 100644 index 109b974eea6..00000000000 --- a/ecs_tag.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Michael Pechner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_tag -version_added: 1.0.0 -short_description: create and remove tags on Amazon ECS resources -description: - - Creates and removes tags for Amazon ECS resources. - - Resources are referenced by their cluster name. -author: - - Michael Pechner (@mpechner) -options: - cluster_name: - description: - - The name of the cluster whose resources we are tagging. - required: true - type: str - resource: - description: - - The ECS resource name. - - Required unless I(resource_type=cluster). - type: str - resource_type: - description: - - The type of resource. - default: cluster - choices: ['cluster', 'task', 'service', 'task_definition', 'container'] - type: str - state: - description: - - Whether the tags should be present or absent on the resource. - default: present - choices: ['present', 'absent'] - type: str - tags: - description: - - A dictionary of tags to add or remove from the resource. - - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. - type: dict - aliases: ['resource_tags'] - purge_tags: - description: - - Whether unspecified tags should be removed from the resource. - - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. - type: bool - default: false -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Ensure tags are present on a resource - community.aws.ecs_tag: - cluster_name: mycluster - resource_type: cluster - state: present - tags: - Name: ubervol - env: prod - -- name: Remove the Env tag - community.aws.ecs_tag: - cluster_name: mycluster - resource_type: cluster - tags: - Env: - state: absent - -- name: Remove the Env tag if it's currently 'development' - community.aws.ecs_tag: - cluster_name: mycluster - resource_type: cluster - tags: - Env: development - state: absent - -- name: Remove all tags except for Name from a cluster - community.aws.ecs_tag: - cluster_name: mycluster - resource_type: cluster - tags: - Name: foo - state: absent - purge_tags: true -""" - -RETURN = r""" -tags: - description: A dict containing the tags on the resource - returned: always - type: dict -added_tags: - description: A dict of tags that were added to the resource - returned: If tags were added - type: dict -removed_tags: - description: A dict of tags that were removed from the resource - returned: If tags were removed - type: dict -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_tags(ecs, module, resource): - try: - return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"]) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to fetch tags for resource {resource}") - - -def get_arn(ecs, module, cluster_name, resource_type, resource): - try: - if resource_type == "cluster": - description = ecs.describe_clusters(clusters=[resource]) - resource_arn = description["clusters"][0]["clusterArn"] - elif resource_type == "task": - description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource]) - resource_arn = description["tasks"][0]["taskArn"] - elif resource_type == "service": - description = ecs.describe_services(cluster=cluster_name, services=[resource]) - resource_arn = description["services"][0]["serviceArn"] - elif resource_type == "task_definition": - description = ecs.describe_task_definition(taskDefinition=resource) - resource_arn = description["taskDefinition"]["taskDefinitionArn"] - elif resource_type == "container": - description = ecs.describe_container_instances(clusters=[resource]) - resource_arn = description["containerInstances"][0]["containerInstanceArn"] - except (IndexError, KeyError): - module.fail_json(msg=f"Failed to find {resource_type} {resource}") - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to find {resource_type} {resource}") - - return resource_arn - - -def main(): - argument_spec = dict( - cluster_name=dict(required=True), - resource=dict(required=False), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=False), - state=dict(default="present", choices=["present", "absent"]), - resource_type=dict(default="cluster", choices=["cluster", "task", "service", "task_definition", "container"]), - ) - required_if = [("state", "present", ["tags"]), ("state", "absent", ["tags"])] - - module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - - resource_type = module.params["resource_type"] - cluster_name = module.params["cluster_name"] - if resource_type == "cluster": - resource = cluster_name - else: - resource = module.params["resource"] - tags = module.params["tags"] - state = module.params["state"] - purge_tags = module.params["purge_tags"] - - result = {"changed": False} - - ecs = module.client("ecs") - - resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource) - - current_tags = get_tags(ecs, module, resource_arn) - - add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) - - remove_tags = {} - if state == "absent": - for key in tags: - if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): - remove_tags[key] = current_tags[key] - - for key in remove: - remove_tags[key] = current_tags[key] - - if remove_tags: - result["changed"] = True - result["removed_tags"] = remove_tags - if not module.check_mode: - try: - ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to remove tags {remove_tags} from resource {resource}") - - if state == "present" and add_tags: - result["changed"] = True - result["added_tags"] = add_tags - current_tags.update(add_tags) - if not module.check_mode: - try: - tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value") - ecs.tag_resource(resourceArn=resource_arn, tags=tags) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to set tags {add_tags} on resource {resource}") - - result["tags"] = get_tags(ecs, module, resource_arn) - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/ecs_task.py b/ecs_task.py deleted file mode 100644 index dfd7d9a7902..00000000000 --- a/ecs_task.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_task -version_added: 1.0.0 -short_description: Run, start or stop a task in ECS -description: - - Creates or deletes instances of task definitions. -author: - - Mark Chance (@Java1Guy) -options: - operation: - description: - - Which task operation to execute. - - When I(operation=run) I(task_definition) must be set. - - When I(operation=start) both I(task_definition) and I(container_instances) must be set. - - When I(operation=stop) both I(task_definition) and I(task) must be set. - required: True - choices: ['run', 'start', 'stop'] - type: str - cluster: - description: - - The name of the cluster to run the task on. - - If not specified, the cluster name will be C(default). - required: False - type: str - default: 'default' - task_definition: - description: - - The task definition to start, run or stop. - required: False - type: str - overrides: - description: - - A dictionary of values to pass to the new instances. - required: False - type: dict - count: - description: - - How many new instances to start. - required: False - type: int - task: - description: - - The ARN of the task to stop. - required: False - type: str - container_instances: - description: - - The list of container instances on which to deploy the task. - required: False - type: list - elements: str - started_by: - description: - - A value showing who or what started the task (for informational purposes). - required: False - type: str - network_configuration: - description: - - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc). - type: dict - suboptions: - assign_public_ip: - description: Whether the task's elastic network interface receives a public IP address. - type: bool - version_added: 1.5.0 - subnets: - description: A list of subnet IDs to which the task is attached. - type: list - elements: str - security_groups: - description: A list of group names or group IDs for the task. - type: list - elements: str - launch_type: - description: - - The launch type on which to run your service. - required: false - choices: ["EC2", "FARGATE"] - type: str - tags: - type: dict - description: - - Tags that will be added to ecs tasks on start and run - required: false - aliases: ['resource_tags'] - wait: - description: - - Whether or not to wait for the desired state. - type: bool - default: false - version_added: 4.1.0 -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Simple example of run task -- name: Run task - community.aws.ecs_task: - operation: run - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - count: 1 - started_by: ansible_user - register: task_output - -# Simple example of start task - -- name: Start a task - community.aws.ecs_task: - operation: start - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" - tags: - resourceName: a_task_for_ansible_to_run - type: long_running_task - network: internal - version: 1.4 - container_instances: - - arn:aws:ecs:us-west-2:123456789012:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8 - started_by: ansible_user - network_configuration: - subnets: - - subnet-abcd1234 - security_groups: - - sg-aaaa1111 - - my_security_group - register: task_output - -- name: RUN a task on Fargate - community.aws.ecs_task: - operation: run - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" - started_by: ansible_user - launch_type: FARGATE - network_configuration: - subnets: - - subnet-abcd1234 - security_groups: - - sg-aaaa1111 - - my_security_group - register: task_output - -- name: RUN a task on Fargate with public ip assigned - community.aws.ecs_task: - operation: run - count: 2 - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" - started_by: ansible_user - launch_type: FARGATE - network_configuration: - assign_public_ip: true - subnets: - - subnet-abcd1234 - register: task_output - -- name: Stop a task - community.aws.ecs_task: - operation: stop - cluster: console-sample-app-static-cluster - task_definition: console-sample-app-static-taskdef - task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" -""" - -RETURN = r""" -task: - description: details about the task that was started - returned: success - type: complex - contains: - taskArn: - description: The Amazon Resource Name (ARN) that identifies the task. - returned: always - type: str - clusterArn: - description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task. - returned: only when details is true - type: str - taskDefinitionArn: - description: The Amazon Resource Name (ARN) of the task definition. - returned: only when details is true - type: str - containerInstanceArn: - description: The Amazon Resource Name (ARN) of the container running the task. - returned: only when details is true - type: str - overrides: - description: The container overrides set for this task. - returned: only when details is true - type: list - elements: dict - lastStatus: - description: The last recorded status of the task. - returned: only when details is true - type: str - desiredStatus: - description: The desired status of the task. - returned: only when details is true - type: str - containers: - description: The container details. - returned: only when details is true - type: list - elements: dict - startedBy: - description: The used who started the task. - returned: only when details is true - type: str - stoppedReason: - description: The reason why the task was stopped. - returned: only when details is true - type: str - createdAt: - description: The timestamp of when the task was created. - returned: only when details is true - type: str - startedAt: - description: The timestamp of when the task was started. - returned: only when details is true - type: str - stoppedAt: - description: The timestamp of when the task was stopped. - returned: only when details is true - type: str - launchType: - description: The launch type on which to run your task. - returned: always - type: str -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class EcsExecManager: - """Handles ECS Tasks""" - - def __init__(self, module): - self.module = module - self.ecs = module.client("ecs") - self.ec2 = module.client("ec2") - - def format_network_configuration(self, network_config): - result = dict() - if "subnets" in network_config: - result["subnets"] = network_config["subnets"] - else: - self.module.fail_json(msg="Network configuration must include subnets") - if "security_groups" in network_config: - groups = network_config["security_groups"] - if any(not sg.startswith("sg-") for sg in groups): - try: - vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"] - groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't look up security groups") - result["securityGroups"] = groups - if "assign_public_ip" in network_config: - if network_config["assign_public_ip"] is True: - result["assignPublicIp"] = "ENABLED" - else: - result["assignPublicIp"] = "DISABLED" - - return dict(awsvpcConfiguration=result) - - def list_tasks(self, cluster_name, service_name, status): - response = self.ecs.list_tasks( - cluster=cluster_name, - family=service_name, - desiredStatus=status, - ) - if len(response["taskArns"]) > 0: - for c in response["taskArns"]: - if c.endswith(service_name): - return c - return None - - def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags): - if overrides is None: - overrides = dict() - params = dict( - cluster=cluster, taskDefinition=task_definition, overrides=overrides, count=count, startedBy=startedBy - ) - if self.module.params["network_configuration"]: - params["networkConfiguration"] = self.format_network_configuration( - self.module.params["network_configuration"] - ) - if launch_type: - params["launchType"] = launch_type - if tags: - params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") - - # TODO: need to check if long arn format enabled. - try: - response = self.ecs.run_task(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't run task") - # include tasks and failures - return response["tasks"] - - def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags): - args = dict() - if cluster: - args["cluster"] = cluster - if task_definition: - args["taskDefinition"] = task_definition - if overrides: - args["overrides"] = overrides - if container_instances: - args["containerInstances"] = container_instances - if startedBy: - args["startedBy"] = startedBy - if self.module.params["network_configuration"]: - args["networkConfiguration"] = self.format_network_configuration( - self.module.params["network_configuration"] - ) - if tags: - args["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value") - try: - response = self.ecs.start_task(**args) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't start task") - # include tasks and failures - return response["tasks"] - - def stop_task(self, cluster, task): - response = self.ecs.stop_task(cluster=cluster, task=task) - return response["task"] - - def ecs_task_long_format_enabled(self): - account_support = self.ecs.list_account_settings(name="taskLongArnFormat", effectiveSettings=True) - return account_support["settings"][0]["value"] == "enabled" - - -def main(): - argument_spec = dict( - operation=dict(required=True, choices=["run", "start", "stop"]), - cluster=dict(required=False, type="str", default="default"), # R S P - task_definition=dict(required=False, type="str"), # R* S* - overrides=dict(required=False, type="dict"), # R S - count=dict(required=False, type="int"), # R - task=dict(required=False, type="str"), # P* - container_instances=dict(required=False, type="list", elements="str"), # S* - started_by=dict(required=False, type="str"), # R S - network_configuration=dict(required=False, type="dict"), - launch_type=dict(required=False, choices=["EC2", "FARGATE"]), - tags=dict(required=False, type="dict", aliases=["resource_tags"]), - wait=dict(required=False, default=False, type="bool"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[ - ("launch_type", "FARGATE", ["network_configuration"]), - ("operation", "run", ["task_definition"]), - ("operation", "start", ["task_definition", "container_instances"]), - ("operation", "stop", ["task_definition", "task"]), - ], - ) - - # Validate Inputs - if module.params["operation"] == "run": - task_to_list = module.params["task_definition"] - status_type = "RUNNING" - - if module.params["operation"] == "start": - task_to_list = module.params["task"] - status_type = "RUNNING" - - if module.params["operation"] == "stop": - task_to_list = module.params["task_definition"] - status_type = "STOPPED" - - service_mgr = EcsExecManager(module) - - if module.params["tags"]: - if not service_mgr.ecs_task_long_format_enabled(): - module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags") - - existing = service_mgr.list_tasks(module.params["cluster"], task_to_list, status_type) - - results = dict(changed=False) - if module.params["operation"] == "run": - if existing: - # TBD - validate the rest of the details - results["task"] = existing - else: - if not module.check_mode: - # run_task returns a list of tasks created - tasks = service_mgr.run_task( - module.params["cluster"], - module.params["task_definition"], - module.params["overrides"], - module.params["count"], - module.params["started_by"], - module.params["launch_type"], - module.params["tags"], - ) - - # Wait for task(s) to be running prior to exiting - if module.params["wait"]: - waiter = service_mgr.ecs.get_waiter("tasks_running") - try: - waiter.wait( - tasks=[task["taskArn"] for task in tasks], - cluster=module.params["cluster"], - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Timeout waiting for tasks to run") - - results["task"] = tasks - - results["changed"] = True - - elif module.params["operation"] == "start": - if existing: - # TBD - validate the rest of the details - results["task"] = existing - else: - if not module.check_mode: - results["task"] = service_mgr.start_task( - module.params["cluster"], - module.params["task_definition"], - module.params["overrides"], - module.params["container_instances"], - module.params["started_by"], - module.params["tags"], - ) - - results["changed"] = True - - elif module.params["operation"] == "stop": - if existing: - results["task"] = existing - else: - if not module.check_mode: - # it exists, so we should delete it and mark changed. - # return info about the cluster deleted - results["task"] = service_mgr.stop_task(module.params["cluster"], module.params["task"]) - - # Wait for task to be stopped prior to exiting - if module.params["wait"]: - waiter = service_mgr.ecs.get_waiter("tasks_stopped") - try: - waiter.wait( - tasks=[module.params["task"]], - cluster=module.params["cluster"], - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Timeout waiting for task to stop") - - results["changed"] = True - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ecs_taskdefinition.py b/ecs_taskdefinition.py deleted file mode 100644 index 4c4aefc2032..00000000000 --- a/ecs_taskdefinition.py +++ /dev/null @@ -1,1239 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_taskdefinition -version_added: 1.0.0 -short_description: register a task definition in ecs -description: - - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS). -author: - - Mark Chance (@Java1Guy) - - Alina Buzachis (@alinabuzachis) -options: - state: - description: - - State whether the task definition should exist or be deleted. - required: true - choices: ['present', 'absent'] - type: str - arn: - description: - - The ARN of the task description to delete. - required: false - type: str - family: - description: - - A Name that would be given to the task definition. - required: false - type: str - revision: - description: - - A revision number for the task definition. - required: False - type: int - force_create: - description: - - Always create new task definition. - required: False - type: bool - default: false - containers: - description: - - A list of containers definitions. - - See U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html) for a complete list of parameters. - required: True - type: list - elements: dict - suboptions: - name: - description: The name of a container. - required: False - type: str - image: - description: The image used to start a container. - required: False - type: str - repositoryCredentials: - description: The private repository authentication credentials to use. - required: False - type: dict - suboptions: - credentialsParameter: - description: - - The Amazon Resource Name (ARN) of the secret containing the private repository credentials. - required: True - type: str - cpu: - description: The number of cpu units reserved for the container. - required: False - type: int - memory: - description: The amount (in MiB) of memory to present to the container. - required: False - type: int - memoryReservation: - description: The soft limit (in MiB) of memory to reserve for the container. - required: False - type: int - links: - description: - - Allows containers to communicate with each other without the need for port mappings. - - This parameter is only supported if I(network_mode=bridge). - required: False - type: list - elements: str - portMappings: - description: The list of port mappings for the container. - required: False - type: list - elements: dict - suboptions: - containerPort: - description: The port number on the container that is bound to the user-specified or automatically assigned host port. - required: False - type: int - hostPort: - description: The port number on the container instance to reserve for your container. - required: False - type: int - protocol: - description: The protocol used for the port mapping. - required: False - type: str - default: tcp - choices: ['tcp', 'udp'] - essential: - description: - - If I(essential=True), and the container fails or stops for any reason, all other containers that are part of the task are stopped. - required: False - type: bool - entryPoint: - description: The entry point that is passed to the container. - required: False - type: str - command: - description: The command that is passed to the container. If there are multiple arguments, each argument is a separated string in the array. - required: False - type: list - elements: str - environment: - description: The environment variables to pass to a container. - required: False - type: list - elements: dict - suboptions: - name: - description: The name of the key-value pair. - required: False - type: str - value: - description: The value of the key-value pair. - required: False - type: str - environmentFiles: - description: A list of files containing the environment variables to pass to a container. - required: False - type: list - elements: dict - suboptions: - value: - description: The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. - required: False - type: str - type: - description: The file type to use. The only supported value is C(s3). - required: False - type: str - mountPoints: - description: The mount points for data volumes in your container. - required: False - type: list - elements: dict - suboptions: - sourceVolume: - description: The name of the volume to mount. - required: False - type: str - containerPath: - description: The path on the container to mount the host volume at. - required: False - type: str - readOnly: - description: - - If this value is C(True), the container has read-only access to the volume. - - If this value is C(False), then the container can write to the volume. - required: False - default: False - type: bool - volumesFrom: - description: Data volumes to mount from another container. - required: False - type: list - elements: dict - suboptions: - sourceContainer: - description: - - The name of another container within the same task definition from which to mount volumes. - required: False - type: str - readOnly: - description: - - If this value is C(True), the container has read-only access to the volume. - - If this value is C(False), then the container can write to the volume. - required: False - default: False - type: bool - linuxParameters: - description: Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. - required: False - type: dict - suboptions: - capabilities: - description: - - The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. - required: False - type: dict - suboptions: - add: - description: - - The Linux capabilities for the container that have been added to the default configuration provided by Docker. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: list - choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER", - "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD", - "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", - "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO", - "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"] - elements: str - drop: - description: - - The Linux capabilities for the container that have been removed from the default configuration provided by Docker. - required: False - type: list - choices: ["ALL", "AUDIT_CONTROL", "AUDIT_WRITE", "BLOCK_SUSPEND", "CHOWN", "DAC_OVERRIDE", "DAC_READ_SEARCH", "FOWNER", - "FSETID", "IPC_LOCK", "IPC_OWNER", "KILL", "LEASE", "LINUX_IMMUTABLE", "MAC_ADMIN", "MAC_OVERRIDE", "MKNOD", - "NET_ADMIN", "NET_BIND_SERVICE", "NET_BROADCAST", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", - "SYS_ADMIN", "SYS_BOOT", "SYS_CHROOT", "SYS_MODULE", "SYS_NICE", "SYS_PACCT", "SYS_PTRACE", "SYS_RAWIO", - "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "SYSLOG", "WAKE_ALARM"] - elements: str - devices: - description: - - Any host devices to expose to the container. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: list - elements: dict - suboptions: - hostPath: - description: The path for the device on the host container instance. - required: True - type: str - containerPath: - description: The path inside the container at which to expose the host device. - required: False - type: str - permissions: - description: The explicit permissions to provide to the container for the device. - required: False - type: list - elements: str - initProcessEnabled: - description: Run an init process inside the container that forwards signals and reaps processes. - required: False - type: bool - sharedMemorySize: - description: - - The value for the size (in MiB) of the /dev/shm volume. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: int - tmpfs: - description: - - The container path, mount options, and size (in MiB) of the tmpfs mount. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: list - elements: dict - suboptions: - containerPath: - description: The absolute file path where the tmpfs volume is to be mounted. - required: True - type: str - size: - description: The size (in MiB) of the tmpfs volume. - required: True - type: int - mountOptions: - description: The list of tmpfs volume mount options. - required: False - type: list - choices: ["defaults", "ro", "rw", "suid", "nosuid", "dev", "nodev", "exec", "noexec", "sync", "async", "dirsync", - "remount", "mand", "nomand", "atime", "noatime", "diratime", "nodiratime", "bind", "rbind", "unbindable", - "runbindable", "private", "rprivate", "shared", "rshared", "slave", "rslave", "relatime", "norelatime", - "strictatime", "nostrictatime", "mode", "uid", "gid", "nr_inodes", "nr_blocks", "mpol"] - elements: str - maxSwap: - description: - - The total amount of swap memory (in MiB) a container can use. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: int - swappiness: - description: - - This allows you to tune a container's memory swappiness behavior. - - If I(launch_type=FARGATE), this parameter is not supported. - required: False - type: int - secrets: - description: The secrets to pass to the container. - required: False - type: list - elements: dict - suboptions: - name: - description: The value to set as the environment variable on the container. - required: True - type: str - size: - description: The secret to expose to the container. - required: True - type: str - dependsOn: - description: - - The dependencies defined for container startup and shutdown. - - When a dependency is defined for container startup, for container shutdown it is reversed. - required: False - type: list - elements: dict - suboptions: - containerName: - description: The name of a container. - type: str - required: True - condition: - description: The dependency condition of the container. - type: str - required: True - choices: ["start", "complete", "success", "healthy"] - startTimeout: - description: Time duration (in seconds) to wait before giving up on resolving dependencies for a container. - required: False - type: int - stopTimeout: - description: Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. - required: False - type: int - hostname: - description: - - The hostname to use for your container. - - This parameter is not supported if I(network_mode=awsvpc). - required: False - type: str - user: - description: - - The user to use inside the container. - - This parameter is not supported for Windows containers. - required: False - type: str - workingDirectory: - description: The working directory in which to run commands inside the container. - required: False - type: str - disableNetworking: - description: When this parameter is C(True), networking is disabled within the container. - required: False - type: bool - privileged: - description: When this parameter is C(True), the container is given elevated privileges on the host container instance. - required: False - type: bool - readonlyRootFilesystem: - description: When this parameter is C(True), the container is given read-only access to its root file system. - required: false - type: bool - dnsServers: - description: - - A list of DNS servers that are presented to the container. - - This parameter is not supported for Windows containers. - required: False - type: list - elements: str - dnsSearchDomains: - description: - - A list of DNS search domains that are presented to the container. - - This parameter is not supported for Windows containers. - required: False - type: list - elements: str - extraHosts: - description: - - A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. - - This parameter is not supported for Windows containers or tasks that use I(network_mode=awsvpc). - required: False - type: list - elements: dict - suboptions: - hostname: - description: The hostname to use in the /etc/hosts entry. - type: str - required: False - ipAddress: - description: The IP address to use in the /etc/hosts entry. - type: str - required: False - dockerSecurityOptions: - description: - - A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. - - This parameter is not supported for Windows containers. - required: False - type: list - elements: str - interactive: - description: - - When I(interactive=True), it allows to deploy containerized applications that require stdin or a tty to be allocated. - required: False - type: bool - pseudoTerminal: - description: When this parameter is C(True), a TTY is allocated. - required: False - type: bool - dockerLabels: - description: A key/value map of labels to add to the container. - required: False - type: dict - ulimits: - description: - - A list of ulimits to set in the container. - - This parameter is not supported for Windows containers. - required: False - type: list - elements: dict - suboptions: - name: - description: The type of the ulimit. - type: str - required: False - choices: ['core', 'cpu', 'data', 'fsize', 'locks', 'memlock', 'msgqueue', 'nice', 'nofile', 'nproc', 'rss', - 'rtprio', 'rttime', 'sigpending', 'stack'] - softLimit: - description: The soft limit for the ulimit type. - type: int - required: False - hardLimit: - description: The hard limit for the ulimit type. - type: int - required: False - logConfiguration: - description: The log configuration specification for the container. - required: False - type: dict - suboptions: - logDriver: - description: - - The log driver to use for the container. - - For tasks on AWS Fargate, the supported log drivers are C(awslogs), C(splunk), and C(awsfirelens). - - For tasks hosted on Amazon EC2 instances, the supported log drivers are C(awslogs), C(fluentd), - C(gelf), C(json-file), C(journald), C(logentries), C(syslog), C(splunk), and C(awsfirelens). - type: str - required: False - options: - description: The configuration options to send to the log driver. - required: False - type: str - secretOptions: - description: The secrets to pass to the log configuration. - required: False - type: list - elements: dict - suboptions: - name: - description: The name of the secret. - type: str - required: False - valueFrom: - description: The secret to expose to the container. - type: str - required: False - healthCheck: - description: The health check command and associated configuration parameters for the container. - required: False - type: dict - suboptions: - command: - description: - - A string array representing the command that the container runs to determine if it is healthy. - - > - The string array must start with CMD to run the command arguments directly, - or CMD-SHELL to run the command with the container's default shell. - - An exit code of 0 indicates success, and non-zero exit code indicates failure. - required: False - type: list - elements: str - interval: - description: - - The time period in seconds between each health check execution. - - You may specify between 5 and 300 seconds. The default value is 30 seconds. - required: False - type: int - default: 30 - retries: - description: - - The number of times to retry a failed health check before the container is considered unhealthy. - - You may specify between 1 and 10 retries. The default value is 3. - required: False - type: int - default: 3 - startPeriod: - description: - - > - The optional grace period to provide containers time to bootstrap - before failed health checks count towards the maximum number of retries. - - You can specify between 0 and 300 seconds. By default, the startPeriod is disabled. - - > - Note: If a health check succeeds within the startPeriod, - then the container is considered healthy and any subsequent failures count toward the maximum number of retries. - required: False - type: int - timeout: - description: - - The time period in seconds to wait for a health check to succeed before it is considered a failure. - - You may specify between 2 and 60 seconds. The default value is 5. - required: False - type: int - default: 5 - systemControls: - description: A list of namespaced kernel parameters to set in the container. - required: False - type: list - elements: dict - suboptions: - namespace: - description: The namespaced kernel parameter to set a C(value) for. - type: str - value: - description: The value for the namespaced kernel parameter that's specified in C(namespace). - type: str - resourceRequirements: - description: - - The type and amount of a resource to assign to a container. - - The only supported resources are C(GPU) and C(InferenceAccelerator). - required: False - type: list - elements: dict - suboptions: - value: - description: The value for the specified resource type. - type: str - type: - description: The type of resource to assign to a container. - type: str - choices: ['GPU', 'InferenceAccelerator'] - firelensConfiguration: - description: - - The FireLens configuration for the container. - - This is used to specify and configure a log router for container logs. - required: False - type: dict - suboptions: - type: - description: - - The log router to use. The valid values are C(fluentd) or C(fluentbit). - required: False - type: str - choices: - - fluentd - - fluentbit - options: - description: - - The options to use when configuring the log router. - - This field is optional and can be used to specify a custom configuration - file or to add additional metadata, such as the task, task definition, cluster, - and container instance details to the log event. - - If specified, the syntax to use is - C({"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"}). - - For more information, see U(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html#firelens-taskdef). - required: False - type: dict - network_mode: - description: - - The Docker networking mode to use for the containers in the task. - - Windows containers must use I(network_mode=default), which will utilize docker NAT networking. - - Setting I(network_mode=default) for a Linux container will use C(bridge) mode. - required: false - default: bridge - choices: [ 'default', 'bridge', 'host', 'none', 'awsvpc' ] - type: str - task_role_arn: - description: - - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted - the permissions that are specified in this role. - required: false - type: str - default: '' - execution_role_arn: - description: - - The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. - required: false - type: str - default: '' - volumes: - description: - - A list of names of volumes to be attached. - required: False - type: list - elements: dict - suboptions: - name: - type: str - description: The name of the volume. - required: true - launch_type: - description: - - The launch type on which to run your task. - required: false - type: str - choices: ["EC2", "FARGATE"] - cpu: - description: - - The number of cpu units used by the task. If I(launch_type=EC2), this field is optional and any value can be used. - - If I(launch_type=FARGATE), this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096). - required: false - type: str - memory: - description: - - The amount (in MiB) of memory used by the task. If I(launch_type=EC2), this field is optional and any value can be used. - - If I(launch_type=FARGATE), this field is required and is limited by the CPU. - required: false - type: str - placement_constraints: - version_added: 2.1.0 - description: - - Placement constraint objects to use for the task. - - You can specify a maximum of 10 constraints per task. - - Task placement constraints are not supported for tasks run on Fargate. - required: false - type: list - elements: dict - suboptions: - type: - description: The type of constraint. - type: str - expression: - description: A cluster query language expression to apply to the constraint. - type: str - runtime_platform: - version_added: 6.4.0 - description: - - runtime platform configuration for the task - required: false - type: dict - default: { - "operatingSystemFamily": "LINUX", - "cpuArchitecture": "X86_64" - } - suboptions: - cpuArchitecture: - description: The CPU Architecture type to be used by the task - type: str - required: false - choices: ['X86_64', 'ARM64'] - operatingSystemFamily: - description: OS type to be used by the task - type: str - required: false - choices: ['LINUX', 'WINDOWS_SERVER_2019_FULL', 'WINDOWS_SERVER_2019_CORE', 'WINDOWS_SERVER_2022_FULL', 'WINDOWS_SERVER_2022_CORE'] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create task definition - community.aws.ecs_taskdefinition: - containers: - - name: simple-app - cpu: 10 - essential: true - image: "httpd:2.4" - memory: 300 - mountPoints: - - containerPath: /usr/local/apache2/htdocs - sourceVolume: my-vol - portMappings: - - containerPort: 80 - hostPort: 80 - logConfiguration: - logDriver: awslogs - options: - awslogs-group: /ecs/test-cluster-taskdef - awslogs-region: us-west-2 - awslogs-stream-prefix: ecs - - name: busybox - command: - - > - /bin/sh -c "while true; do echo 'Amazon ECS Sample App

Amazon ECS Sample App

Congratulations! -

Your application is now running on a container in Amazon ECS.

' > top; /bin/date > date ; echo '
' > bottom; - cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done" - cpu: 10 - entryPoint: - - sh - - "-c" - essential: false - image: busybox - memory: 200 - volumesFrom: - - sourceContainer: simple-app - volumes: - - name: my-vol - family: test-cluster-taskdef - state: present - register: task_output - -- name: Create task definition - community.aws.ecs_taskdefinition: - family: nginx - containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - cpu: 512 - memory: 1024 - state: present - -- name: Create task definition - community.aws.ecs_taskdefinition: - family: nginx - containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - launch_type: FARGATE - cpu: 512 - memory: 1024 - state: present - network_mode: awsvpc - -- name: Create task definition - community.aws.ecs_taskdefinition: - family: nginx - containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - cpu: 512 - memory: 1024 - dependsOn: - - containerName: "simple-app" - condition: "start" - -# Create Task Definition with Environment Variables and Secrets -- name: Create task definition - community.aws.ecs_taskdefinition: - family: nginx - containers: - - name: nginx - essential: true - image: "nginx" - environment: - - name: "PORT" - value: "8080" - secrets: - # For variables stored in Secrets Manager - - name: "NGINX_HOST" - valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST" - # For variables stored in Parameter Store - - name: "API_KEY" - valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY" - launch_type: FARGATE - cpu: 512 - memory: 1GB - state: present - network_mode: awsvpc - -# Create Task Definition with health check -- name: Create task definition - community.aws.ecs_taskdefinition: - family: nginx - containers: - - name: nginx - essential: true - image: "nginx" - portMappings: - - containerPort: 8080 - hostPort: 8080 - cpu: 512 - memory: 1024 - healthCheck: - command: - - CMD-SHELL - - /app/healthcheck.py - interval: 60 - retries: 3 - startPeriod: 15 - timeout: 15 - state: present -""" - -RETURN = r""" -taskdefinition: - description: a reflection of the input parameters - type: dict - returned: always -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class EcsTaskManager: - """Handles ECS Tasks""" - - def __init__(self, module): - self.module = module - - self.ecs = module.client("ecs", AWSRetry.jittered_backoff()) - - def describe_task(self, task_name): - try: - response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name) - return response["taskDefinition"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - return None - - def register_task( - self, - family, - task_role_arn, - execution_role_arn, - network_mode, - container_definitions, - volumes, - launch_type, - cpu, - memory, - placement_constraints, - runtime_platform, - ): - validated_containers = [] - - # Ensures the number parameters are int as required by the AWS SDK - for container in container_definitions: - for param in ("memory", "cpu", "memoryReservation", "startTimeout", "stopTimeout"): - if param in container: - container[param] = int(container[param]) - - if "portMappings" in container: - for port_mapping in container["portMappings"]: - for port in ("hostPort", "containerPort"): - if port in port_mapping: - port_mapping[port] = int(port_mapping[port]) - if network_mode == "awsvpc" and "hostPort" in port_mapping: - if port_mapping["hostPort"] != port_mapping.get("containerPort"): - self.module.fail_json( - msg=( - "In awsvpc network mode, host port must be set to the same as " - "container port or not be set" - ) - ) - - if "linuxParameters" in container: - for linux_param in container.get("linuxParameters"): - if linux_param == "tmpfs": - for tmpfs_param in container["linuxParameters"]["tmpfs"]: - if "size" in tmpfs_param: - tmpfs_param["size"] = int(tmpfs_param["size"]) - - for param in ("maxSwap", "swappiness", "sharedMemorySize"): - if param in linux_param: - container["linuxParameters"][param] = int(container["linuxParameters"][param]) - - if "ulimits" in container: - for limits_mapping in container["ulimits"]: - for limit in ("softLimit", "hardLimit"): - if limit in limits_mapping: - limits_mapping[limit] = int(limits_mapping[limit]) - - validated_containers.append(container) - - params = dict( - family=family, - taskRoleArn=task_role_arn, - containerDefinitions=container_definitions, - volumes=volumes, - ) - if network_mode != "default": - params["networkMode"] = network_mode - if cpu: - params["cpu"] = cpu - if memory: - params["memory"] = memory - if launch_type: - params["requiresCompatibilities"] = [launch_type] - if execution_role_arn: - params["executionRoleArn"] = execution_role_arn - if placement_constraints: - params["placementConstraints"] = placement_constraints - if runtime_platform: - params["runtimePlatform"] = runtime_platform - - try: - response = self.ecs.register_task_definition(aws_retry=True, **params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to register task") - - return response["taskDefinition"] - - def describe_task_definitions(self, family): - data = {"taskDefinitionArns": [], "nextToken": None} - - def fetch(): - # Boto3 is weird about params passed, so only pass nextToken if we have a value - params = {"familyPrefix": family} - - if data["nextToken"]: - params["nextToken"] = data["nextToken"] - - result = self.ecs.list_task_definitions(**params) - data["taskDefinitionArns"] += result["taskDefinitionArns"] - data["nextToken"] = result.get("nextToken", None) - return data["nextToken"] is not None - - # Fetch all the arns, possibly across multiple pages - while fetch(): - pass - - # Return the full descriptions of the task definitions, sorted ascending by revision - return list( - sorted( - [ - self.ecs.describe_task_definition(taskDefinition=arn)["taskDefinition"] - for arn in data["taskDefinitionArns"] - ], - key=lambda td: td["revision"], - ) - ) - - def deregister_task(self, taskArn): - response = self.ecs.deregister_task_definition(taskDefinition=taskArn) - return response["taskDefinition"] - - -def main(): - argument_spec = dict( - state=dict(required=True, choices=["present", "absent"]), - arn=dict(required=False, type="str"), - family=dict(required=False, type="str"), - revision=dict(required=False, type="int"), - force_create=dict(required=False, default=False, type="bool"), - containers=dict(required=True, type="list", elements="dict"), - network_mode=dict( - required=False, default="bridge", choices=["default", "bridge", "host", "none", "awsvpc"], type="str" - ), - task_role_arn=dict(required=False, default="", type="str"), - execution_role_arn=dict(required=False, default="", type="str"), - volumes=dict(required=False, type="list", elements="dict"), - launch_type=dict(required=False, choices=["EC2", "FARGATE"]), - cpu=dict(), - memory=dict(required=False, type="str"), - placement_constraints=dict( - required=False, - type="list", - elements="dict", - options=dict(type=dict(type="str"), expression=dict(type="str")), - ), - runtime_platform=dict( - required=False, - default={"operatingSystemFamily": "LINUX", "cpuArchitecture": "X86_64"}, - type="dict", - options=dict( - cpuArchitecture=dict(required=False, choices=["X86_64", "ARM64"]), - operatingSystemFamily=dict( - required=False, - choices=[ - "LINUX", - "WINDOWS_SERVER_2019_FULL", - "WINDOWS_SERVER_2019_CORE", - "WINDOWS_SERVER_2022_FULL", - "WINDOWS_SERVER_2022_CORE", - ], - ), - ), - ), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[("launch_type", "FARGATE", ["cpu", "memory"])], - ) - - task_to_describe = None - task_mgr = EcsTaskManager(module) - results = dict(changed=False) - - if module.params["state"] == "present": - if "containers" not in module.params or not module.params["containers"]: - module.fail_json(msg="To use task definitions, a list of containers must be specified") - - if "family" not in module.params or not module.params["family"]: - module.fail_json(msg="To use task definitions, a family must be specified") - - network_mode = module.params["network_mode"] - launch_type = module.params["launch_type"] - placement_constraints = module.params["placement_constraints"] - if launch_type == "FARGATE": - if network_mode != "awsvpc": - module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc") - if placement_constraints: - module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate") - - for container in module.params["containers"]: - if container.get("links") and network_mode == "awsvpc": - module.fail_json(msg="links parameter is not supported if network mode is awsvpc.") - - for environment in container.get("environment", []): - environment["value"] = environment["value"] - - for environment_file in container.get("environmentFiles", []): - if environment_file["type"] != "s3": - module.fail_json(msg="The only supported value for environmentFiles is s3.") - - for linux_param in container.get("linuxParameters", {}): - if linux_param == "maxSwap" and launch_type == "FARGATE": - module.fail_json(msg="devices parameter is not supported with the FARGATE launch type.") - - if linux_param == "maxSwap" and launch_type == "FARGATE": - module.fail_json(msg="maxSwap parameter is not supported with the FARGATE launch type.") - elif linux_param == "maxSwap" and int(container["linuxParameters"]["maxSwap"]) < 0: - module.fail_json(msg="Accepted values for maxSwap are 0 or any positive integer.") - - if linux_param == "swappiness" and ( - int(container["linuxParameters"]["swappiness"]) < 0 - or int(container["linuxParameters"]["swappiness"]) > 100 - ): - module.fail_json(msg="Accepted values for swappiness are whole numbers between 0 and 100.") - - if linux_param == "sharedMemorySize" and launch_type == "FARGATE": - module.fail_json(msg="sharedMemorySize parameter is not supported with the FARGATE launch type.") - - if linux_param == "tmpfs" and launch_type == "FARGATE": - module.fail_json(msg="tmpfs parameter is not supported with the FARGATE launch type.") - - if container.get("hostname") and network_mode == "awsvpc": - module.fail_json(msg="hostname parameter is not supported when the awsvpc network mode is used.") - - if container.get("extraHosts") and network_mode == "awsvpc": - module.fail_json(msg="extraHosts parameter is not supported when the awsvpc network mode is used.") - - family = module.params["family"] - existing_definitions_in_family = task_mgr.describe_task_definitions(module.params["family"]) - - if "revision" in module.params and module.params["revision"]: - # The definition specifies revision. We must guarantee that an active revision of that number will result from this. - revision = int(module.params["revision"]) - - # A revision has been explicitly specified. Attempt to locate a matching revision - tasks_defs_for_revision = [td for td in existing_definitions_in_family if td["revision"] == revision] - existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None - - if existing and existing["status"] != "ACTIVE": - # We cannot reactivate an inactive revision - module.fail_json( - msg=f"A task in family '{family}' already exists for revision {int(revision)}, but it is inactive" - ) - elif not existing: - if not existing_definitions_in_family and revision != 1: - module.fail_json( - msg=f"You have specified a revision of {int(revision)} but a created revision would be 1" - ) - elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision: - module.fail_json( - msg=( - f"You have specified a revision of {int(revision)} but a created revision would be" - f" {int(existing_definitions_in_family[-1]['revision'] + 1)}" - ) - ) - else: - existing = None - - def _right_has_values_of_left(left, right): - # Make sure the values are equivalent for everything left has - for k, v in left.items(): - if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])): - # We don't care about list ordering because ECS can change things - if isinstance(v, list) and k in right: - left_list = v - right_list = right[k] or [] - - if len(left_list) != len(right_list): - return False - - for list_val in left_list: - if list_val not in right_list: - # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp') - # fill in that default if absent and see if it is in right_list then - if isinstance(list_val, dict) and not list_val.get("protocol"): - modified_list_val = dict(list_val) - modified_list_val.update(protocol="tcp") - if modified_list_val in right_list: - continue - else: - return False - - # Make sure right doesn't have anything that left doesn't - for k, v in right.items(): - if v and k not in left: - # 'essential' defaults to True when not specified - if k == "essential" and v is True: - pass - else: - return False - - return True - - def _task_definition_matches( - requested_volumes, - requested_containers, - requested_task_role_arn, - requested_launch_type, - existing_task_definition, - ): - if td["status"] != "ACTIVE": - return None - - if requested_task_role_arn != td.get("taskRoleArn", ""): - return None - - if requested_launch_type is not None and requested_launch_type not in td.get( - "requiresCompatibilities", [] - ): - return None - - existing_volumes = td.get("volumes", []) or [] - - if len(requested_volumes) != len(existing_volumes): - # Nope. - return None - - if len(requested_volumes) > 0: - for requested_vol in requested_volumes: - found = False - - for actual_vol in existing_volumes: - if _right_has_values_of_left(requested_vol, actual_vol): - found = True - break - - if not found: - return None - - existing_containers = td.get("containerDefinitions", []) or [] - - if len(requested_containers) != len(existing_containers): - # Nope. - return None - - for requested_container in requested_containers: - found = False - - for actual_container in existing_containers: - if _right_has_values_of_left(requested_container, actual_container): - found = True - break - - if not found: - return None - - return existing_task_definition - - # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested - for td in existing_definitions_in_family: - requested_volumes = module.params["volumes"] or [] - requested_containers = module.params["containers"] or [] - requested_task_role_arn = module.params["task_role_arn"] - requested_launch_type = module.params["launch_type"] - existing = _task_definition_matches( - requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td - ) - - if existing: - break - - if existing and not module.params.get("force_create"): - # Awesome. Have an existing one. Nothing to do. - results["taskdefinition"] = existing - else: - if not module.check_mode: - # Doesn't exist. create it. - volumes = module.params.get("volumes", []) or [] - results["taskdefinition"] = task_mgr.register_task( - module.params["family"], - module.params["task_role_arn"], - module.params["execution_role_arn"], - module.params["network_mode"], - module.params["containers"], - volumes, - module.params["launch_type"], - module.params["cpu"], - module.params["memory"], - module.params["placement_constraints"], - module.params["runtime_platform"], - ) - results["changed"] = True - - elif module.params["state"] == "absent": - # When de-registering a task definition, we can specify the ARN OR the family and revision. - if module.params["state"] == "absent": - if "arn" in module.params and module.params["arn"] is not None: - task_to_describe = module.params["arn"] - elif ( - "family" in module.params - and module.params["family"] is not None - and "revision" in module.params - and module.params["revision"] is not None - ): - task_to_describe = module.params["family"] + ":" + str(module.params["revision"]) - else: - module.fail_json(msg="To use task definitions, an arn or family and revision must be specified") - - existing = task_mgr.describe_task(task_to_describe) - - if not existing: - pass - else: - # It exists, so we should delete it and mark changed. Return info about the task definition deleted - results["taskdefinition"] = existing - if "status" in existing and existing["status"] == "INACTIVE": - results["changed"] = False - else: - if not module.check_mode: - task_mgr.deregister_task(task_to_describe) - results["changed"] = True - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ecs_taskdefinition_info.py b/ecs_taskdefinition_info.py deleted file mode 100644 index 5e235096d96..00000000000 --- a/ecs_taskdefinition_info.py +++ /dev/null @@ -1,378 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ecs_taskdefinition_info -version_added: 1.0.0 -short_description: Describe a task definition in ECS -notes: - - For details of the parameters and returns see - U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition) -description: - - Describes a task definition in ECS. -author: - - Gustavo Maia (@gurumaia) - - Mark Chance (@Java1Guy) - - Darek Kaczynski (@kaczynskid) -options: - task_definition: - description: - - The name of the task definition to get details for - required: true - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- community.aws.ecs_taskdefinition_info: - task_definition: test-td -""" - -RETURN = r""" -container_definitions: - description: Returns a list of complex objects representing the containers - returned: success - type: complex - contains: - name: - description: The name of a container. - returned: always - type: str - image: - description: The image used to start a container. - returned: always - type: str - cpu: - description: The number of cpu units reserved for the container. - returned: always - type: int - memoryReservation: - description: The soft limit (in MiB) of memory to reserve for the container. - returned: when present - type: int - links: - description: Links to other containers. - returned: when present - type: str - portMappings: - description: The list of port mappings for the container. - returned: always - type: complex - contains: - containerPort: - description: The port number on the container. - returned: when present - type: int - hostPort: - description: The port number on the container instance to reserve for your container. - returned: when present - type: int - protocol: - description: The protocol used for the port mapping. - returned: when present - type: str - essential: - description: Whether this is an essential container or not. - returned: always - type: bool - entryPoint: - description: The entry point that is passed to the container. - returned: when present - type: str - command: - description: The command that is passed to the container. - returned: when present - type: str - environment: - description: The environment variables to pass to a container. - returned: always - type: complex - contains: - name: - description: The name of the environment variable. - returned: when present - type: str - value: - description: The value of the environment variable. - returned: when present - type: str - mountPoints: - description: The mount points for data volumes in your container. - returned: always - type: complex - contains: - sourceVolume: - description: The name of the volume to mount. - returned: when present - type: str - containerPath: - description: The path on the container to mount the host volume at. - returned: when present - type: str - readOnly: - description: If this value is true , the container has read-only access to the volume. - If this value is false , then the container can write to the volume. - returned: when present - type: bool - volumesFrom: - description: Data volumes to mount from another container. - returned: always - type: complex - contains: - sourceContainer: - description: The name of another container within the same task definition to mount volumes from. - returned: when present - type: str - readOnly: - description: If this value is true , the container has read-only access to the volume. - If this value is false , then the container can write to the volume. - returned: when present - type: bool - hostname: - description: The hostname to use for your container. - returned: when present - type: str - user: - description: The user name to use inside the container. - returned: when present - type: str - workingDirectory: - description: The working directory in which to run commands inside the container. - returned: when present - type: str - disableNetworking: - description: When this parameter is true, networking is disabled within the container. - returned: when present - type: bool - privileged: - description: When this parameter is true, the container is given elevated - privileges on the host container instance (similar to the root user). - returned: when present - type: bool - readonlyRootFilesystem: - description: When this parameter is true, the container is given read-only access to its root file system. - returned: when present - type: bool - dnsServers: - description: A list of DNS servers that are presented to the container. - returned: when present - type: str - dnsSearchDomains: - description: A list of DNS search domains that are presented to the container. - returned: when present - type: str - extraHosts: - description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. - returned: when present - type: complex - contains: - hostname: - description: The hostname to use in the /etc/hosts entry. - returned: when present - type: str - ipAddress: - description: The IP address to use in the /etc/hosts entry. - returned: when present - type: str - dockerSecurityOptions: - description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. - returned: when present - type: str - dockerLabels: - description: A key/value map of labels to add to the container. - returned: when present - type: str - ulimits: - description: A list of ulimits to set in the container. - returned: when present - type: complex - contains: - name: - description: The type of the ulimit . - returned: when present - type: str - softLimit: - description: The soft limit for the ulimit type. - returned: when present - type: int - hardLimit: - description: The hard limit for the ulimit type. - returned: when present - type: int - logConfiguration: - description: The log configuration specification for the container. - returned: when present - type: str - options: - description: The configuration options to send to the log driver. - returned: when present - type: str - healthCheck: - description: The container health check command and associated configuration parameters for the container. - returned: when present - type: dict - contains: - command: - description: A string array representing the command that the container runs to determine if it is healthy. - type: list - interval: - description: The time period in seconds between each health check execution. - type: int - timeout: - description: The time period in seconds to wait for a health check to succeed before it is considered a failure. - type: int - retries: - description: The number of times to retry a failed health check before the container is considered unhealthy. - type: int - startPeriod: - description: The optional grace period to provide containers time to bootstrap before failed. - type: int - resourceRequirements: - description: The type and amount of a resource to assign to a container. - returned: when present - type: dict - contains: - value: - description: The value for the specified resource type. - type: str - type: - description: The type of resource to assign to a container. - type: str - systemControls: - description: A list of namespaced kernel parameters to set in the container. - returned: when present - type: dict - contains: - namespace: - description: TThe namespaced kernel. - type: str - value: - description: The value for the namespaced kernel. - type: str - firelensConfiguration: - description: The FireLens configuration for the container. - returned: when present - type: dict - contains: - type: - description: The log router. - type: str - options: - description: The options to use when configuring the log router. - type: dict -family: - description: The family of your task definition, used as the definition name - returned: always - type: str -task_definition_arn: - description: ARN of the task definition - returned: always - type: str -task_role_arn: - description: The ARN of the IAM role that containers in this task can assume - returned: when role is set - type: str -network_mode: - description: Network mode for the containers - returned: always - type: str -revision: - description: Revision number that was queried - returned: always - type: int -volumes: - description: The list of volumes in a task - returned: always - type: complex - contains: - name: - description: The name of the volume. - returned: when present - type: str - host: - description: The contents of the host parameter determine whether your data volume - persists on the host container instance and where it is stored. - returned: when present - type: bool - source_path: - description: The path on the host container instance that is presented to the container. - returned: when present - type: str -status: - description: The status of the task definition - returned: always - type: str -requires_attributes: - description: The container instance attributes required by your task - returned: when present - type: complex - contains: - name: - description: The name of the attribute. - returned: when present - type: str - value: - description: The value of the attribute. - returned: when present - type: str - targetType: - description: The type of the target with which to attach the attribute. - returned: when present - type: str - targetId: - description: The ID of the target. - returned: when present - type: str -placement_constraints: - description: A list of placement constraint objects to use for tasks - returned: always - type: complex - contains: - type: - description: The type of constraint. - returned: when present - type: str - expression: - description: A cluster query language expression to apply to the constraint. - returned: when present - type: str -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - argument_spec = dict( - task_definition=dict(required=True, type="str"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - ecs = module.client("ecs") - - try: - ecs_td = ecs.describe_task_definition(taskDefinition=module.params["task_definition"])["taskDefinition"] - except botocore.exceptions.ClientError: - ecs_td = {} - - module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td)) - - -if __name__ == "__main__": - main() diff --git a/efs.py b/efs.py deleted file mode 100644 index df79babc92c..00000000000 --- a/efs.py +++ /dev/null @@ -1,806 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: efs -version_added: 1.0.0 -short_description: create and maintain EFS file systems -description: - - Module allows create, search and destroy Amazon EFS file systems. -author: - - "Ryan Sydnor (@ryansydnor)" - - "Artem Kazakov (@akazakov)" -options: - encrypt: - description: - - If I(encrypt=true) creates an encrypted file system. This can not be modified after the file system is created. - type: bool - default: false - kms_key_id: - description: - - The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only - required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for - Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN. - type: str - state: - description: - - Allows to create, search and destroy Amazon EFS file system. - default: 'present' - choices: ['present', 'absent'] - type: str - name: - description: - - Creation Token of Amazon EFS file system. Required for create and update. Either name or ID required for delete. - type: str - id: - description: - - ID of Amazon EFS. Either name or ID required for delete. - type: str - performance_mode: - description: - - File system's performance mode to use. Only takes effect during creation. - default: 'general_purpose' - choices: ['general_purpose', 'max_io'] - type: str - tags: - description: - - "List of tags of Amazon EFS. Should be defined as dictionary - In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data." - type: dict - targets: - description: - - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes: - This data may be modified for existing EFS using state 'present' and new list of mount targets." - type: list - elements: dict - default: [] - suboptions: - subnet_id: - required: true - description: The ID of the subnet to add the mount target in. - ip_address: - type: str - description: A valid IPv4 address within the address range of the specified subnet. - security_groups: - type: list - elements: str - description: List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified - throughput_mode: - description: - - The throughput_mode for the file system to be created. - choices: ['bursting', 'provisioned'] - type: str - provisioned_throughput_in_mibps: - description: - - If the throughput_mode is provisioned, select the amount of throughput to provisioned in Mibps. - type: float - wait: - description: - - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted') - In case of 'absent' state should wait for EFS 'deleted' life cycle state" - type: bool - default: false - wait_timeout: - description: - - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary. - default: 0 - type: int - transition_to_ia: - description: - - How many days before objects transition to the lower-cost EFS Infrequent Access (IA) storage class. - - If set to the string C(None), any existing lifecyle policy will be removed, and objects will not transition - to an IA storage class. - - If this parameter is absent, any existing lifecycle policy will not be affected. - choices: ['None', '7', '14', '30', '60', '90'] - type: str - version_added: 2.1.0 - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: EFS provisioning - community.aws.efs: - state: present - name: myTestEFS - tags: - Name: myTestNameTag - purpose: file-storage - targets: - - subnet_id: subnet-748c5d03 - security_groups: [ "sg-1a2b3c4d" ] - -- name: Modifying EFS data - community.aws.efs: - state: present - name: myTestEFS - tags: - name: myAnotherTestTag - targets: - - subnet_id: subnet-7654fdca - security_groups: [ "sg-4c5d6f7a" ] - -- name: Set a lifecycle policy - community.aws.efs: - state: present - name: myTestEFS - transition_to_ia: 7 - targets: - - subnet_id: subnet-7654fdca - security_groups: [ "sg-4c5d6f7a" ] - -- name: Remove a lifecycle policy - community.aws.efs: - state: present - name: myTestEFS - transition_to_ia: None - targets: - - subnet_id: subnet-7654fdca - security_groups: [ "sg-4c5d6f7a" ] - -- name: Deleting EFS - community.aws.efs: - state: absent - name: myTestEFS -""" - -RETURN = r""" -creation_time: - description: timestamp of creation date - returned: always - type: str - sample: "2015-11-16 07:30:57-05:00" -creation_token: - description: EFS creation token - returned: always - type: str - sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961" -file_system_id: - description: ID of the file system - returned: always - type: str - sample: "fs-xxxxxxxx" -life_cycle_state: - description: state of the EFS file system - returned: always - type: str - sample: "creating, available, deleting, deleted" -mount_point: - description: url of file system with leading dot from the time when AWS EFS required to add a region suffix to the address - returned: always - type: str - sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/" -filesystem_address: - description: url of file system valid for use with mount - returned: always - type: str - sample: "fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/" -mount_targets: - description: list of mount targets - returned: always - type: list - sample: - [ - { - "file_system_id": "fs-a7ad440e", - "ip_address": "172.31.17.173", - "life_cycle_state": "available", - "mount_target_id": "fsmt-d8907871", - "network_interface_id": "eni-6e387e26", - "owner_id": "123456789012", - "security_groups": [ - "sg-a30b22c6" - ], - "subnet_id": "subnet-e265c895" - }, - ... - ] -name: - description: name of the file system - returned: always - type: str - sample: "my-efs" -number_of_mount_targets: - description: the number of targets mounted - returned: always - type: int - sample: 3 -owner_id: - description: AWS account ID of EFS owner - returned: always - type: str - sample: "XXXXXXXXXXXX" -size_in_bytes: - description: size of the file system in bytes as of a timestamp - returned: always - type: dict - sample: - { - "timestamp": "2015-12-21 13:59:59-05:00", - "value": 12288 - } -performance_mode: - description: performance mode of the file system - returned: always - type: str - sample: "generalPurpose" -tags: - description: tags on the efs instance - returned: always - type: dict - sample: - { - "name": "my-efs", - "key": "Value" - } - -""" - -from time import sleep -from time import time as timestamp - -try: - import botocore -except ImportError as e: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _index_by_key(key, items): - return dict((item[key], item) for item in items) - - -class EFSConnection(object): - DEFAULT_WAIT_TIMEOUT_SECONDS = 0 - - STATE_CREATING = "creating" - STATE_AVAILABLE = "available" - STATE_DELETING = "deleting" - STATE_DELETED = "deleted" - - def __init__(self, module): - self.connection = module.client("efs") - region = module.region - - self.module = module - self.region = region - self.wait = module.params.get("wait") - self.wait_timeout = module.params.get("wait_timeout") - - def get_file_systems(self, **kwargs): - """ - Returns generator of file systems including all attributes of FS - """ - items = iterate_all( - "FileSystems", - self.connection.describe_file_systems, - **kwargs, - ) - for item in items: - item["Name"] = item["CreationToken"] - item["CreationTime"] = str(item["CreationTime"]) - """ - In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it - AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose - And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) - AWS documentation is available here: - https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html - """ - item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" - item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" - if "Timestamp" in item["SizeInBytes"]: - item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) - if item["LifeCycleState"] == self.STATE_AVAILABLE: - item["Tags"] = self.get_tags(FileSystemId=item["FileSystemId"]) - item["MountTargets"] = list(self.get_mount_targets(FileSystemId=item["FileSystemId"])) - else: - item["Tags"] = {} - item["MountTargets"] = [] - yield item - - def get_tags(self, **kwargs): - """ - Returns tag list for selected instance of EFS - """ - tags = self.connection.describe_tags(**kwargs)["Tags"] - return tags - - def get_mount_targets(self, **kwargs): - """ - Returns mount targets for selected instance of EFS - """ - targets = iterate_all( - "MountTargets", - self.connection.describe_mount_targets, - **kwargs, - ) - for target in targets: - if target["LifeCycleState"] == self.STATE_AVAILABLE: - target["SecurityGroups"] = list(self.get_security_groups(MountTargetId=target["MountTargetId"])) - else: - target["SecurityGroups"] = [] - yield target - - def get_security_groups(self, **kwargs): - """ - Returns security groups for selected instance of EFS - """ - return iterate_all( - "SecurityGroups", - self.connection.describe_mount_target_security_groups, - **kwargs, - ) - - def get_file_system_id(self, name): - """ - Returns ID of instance by instance name - """ - info = first_or_default( - iterate_all( - "FileSystems", - self.connection.describe_file_systems, - CreationToken=name, - ) - ) - return info and info["FileSystemId"] or None - - def get_file_system_state(self, name, file_system_id=None): - """ - Returns state of filesystem by EFS id/name - """ - info = first_or_default( - iterate_all( - "FileSystems", - self.connection.describe_file_systems, - CreationToken=name, - FileSystemId=file_system_id, - ) - ) - return info and info["LifeCycleState"] or self.STATE_DELETED - - def get_mount_targets_in_state(self, file_system_id, states=None): - """ - Returns states of mount targets of selected EFS with selected state(s) (optional) - """ - targets = iterate_all( - "MountTargets", - self.connection.describe_mount_targets, - FileSystemId=file_system_id, - ) - - if states: - if not isinstance(states, list): - states = [states] - targets = filter(lambda target: target["LifeCycleState"] in states, targets) - - return list(targets) - - def get_throughput_mode(self, **kwargs): - """ - Returns throughput mode for selected EFS instance - """ - info = first_or_default( - iterate_all( - "FileSystems", - self.connection.describe_file_systems, - **kwargs, - ) - ) - - return info and info["ThroughputMode"] or None - - def get_provisioned_throughput_in_mibps(self, **kwargs): - """ - Returns throughput mode for selected EFS instance - """ - info = first_or_default( - iterate_all( - "FileSystems", - self.connection.describe_file_systems, - **kwargs, - ) - ) - return info.get("ProvisionedThroughputInMibps", None) - - def create_file_system( - self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps - ): - """ - Creates new filesystem with selected name - """ - changed = False - state = self.get_file_system_state(name) - params = {} - params["CreationToken"] = name - params["PerformanceMode"] = performance_mode - if encrypt: - params["Encrypted"] = encrypt - if kms_key_id is not None: - params["KmsKeyId"] = kms_key_id - if throughput_mode: - params["ThroughputMode"] = throughput_mode - if provisioned_throughput_in_mibps: - params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps - - if state in [self.STATE_DELETING, self.STATE_DELETED]: - wait_for( - lambda: self.get_file_system_state(name), - self.STATE_DELETED, - ) - try: - self.connection.create_file_system(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Unable to create file system.") - - # we always wait for the state to be available when creating. - # if we try to take any actions on the file system before it's available - # we'll throw errors - wait_for( - lambda: self.get_file_system_state(name), - self.STATE_AVAILABLE, - self.wait_timeout, - ) - - return changed - - def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mibps): - """ - Update filesystem with new throughput settings - """ - changed = False - state = self.get_file_system_state(name) - if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: - fs_id = self.get_file_system_id(name) - current_mode = self.get_throughput_mode(FileSystemId=fs_id) - current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id) - params = dict() - if throughput_mode and throughput_mode != current_mode: - params["ThroughputMode"] = throughput_mode - if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput: - params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps - if len(params) > 0: - wait_for( - lambda: self.get_file_system_state(name), - self.STATE_AVAILABLE, - self.wait_timeout, - ) - try: - self.connection.update_file_system(FileSystemId=fs_id, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Unable to update file system.") - return changed - - def update_lifecycle_policy(self, name, transition_to_ia): - """ - Update filesystem with new lifecycle policy. - """ - changed = False - state = self.get_file_system_state(name) - if state in [self.STATE_AVAILABLE, self.STATE_CREATING]: - fs_id = self.get_file_system_id(name) - current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id) - if transition_to_ia == "None": - LifecyclePolicies = [] - else: - LifecyclePolicies = [{"TransitionToIA": "AFTER_" + transition_to_ia + "_DAYS"}] - if current_policies.get("LifecyclePolicies") != LifecyclePolicies: - response = self.connection.put_lifecycle_configuration( - FileSystemId=fs_id, - LifecyclePolicies=LifecyclePolicies, - ) - changed = True - return changed - - def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps): - """ - Change attributes (mount targets and tags) of filesystem by name - """ - result = False - fs_id = self.get_file_system_id(name) - - if tags is not None: - tags_need_modify, tags_to_delete = compare_aws_tags( - boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags - ) - - if tags_to_delete: - try: - self.connection.delete_tags(FileSystemId=fs_id, TagKeys=tags_to_delete) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Unable to delete tags.") - - result = True - - if tags_need_modify: - try: - self.connection.create_tags( - FileSystemId=fs_id, Tags=ansible_dict_to_boto3_tag_list(tags_need_modify) - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Unable to create tags.") - - result = True - - if targets is not None: - incomplete_states = [self.STATE_CREATING, self.STATE_DELETING] - wait_for( - lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0, - ) - current_targets = _index_by_key("SubnetId", self.get_mount_targets(FileSystemId=fs_id)) - targets = _index_by_key("SubnetId", targets) - - targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, targets, True) - - # To modify mount target it should be deleted and created again - changed = [ - sid - for sid in intersection - if not targets_equal( - ["SubnetId", "IpAddress", "NetworkInterfaceId"], current_targets[sid], targets[sid] - ) - ] - targets_to_delete = list(targets_to_delete) + changed - targets_to_create = list(targets_to_create) + changed - - if targets_to_delete: - for sid in targets_to_delete: - self.connection.delete_mount_target(MountTargetId=current_targets[sid]["MountTargetId"]) - wait_for( - lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0, - ) - result = True - - if targets_to_create: - for sid in targets_to_create: - self.connection.create_mount_target(FileSystemId=fs_id, **targets[sid]) - wait_for( - lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), - 0, - self.wait_timeout, - ) - result = True - - # If no security groups were passed into the module, then do not change it. - security_groups_to_update = [ - sid - for sid in intersection - if "SecurityGroups" in targets[sid] - and current_targets[sid]["SecurityGroups"] != targets[sid]["SecurityGroups"] - ] - - if security_groups_to_update: - for sid in security_groups_to_update: - self.connection.modify_mount_target_security_groups( - MountTargetId=current_targets[sid]["MountTargetId"], - SecurityGroups=targets[sid].get("SecurityGroups", None), - ) - result = True - - return result - - def delete_file_system(self, name, file_system_id=None): - """ - Removes EFS instance by id/name - """ - result = False - state = self.get_file_system_state(name, file_system_id) - if state in [self.STATE_CREATING, self.STATE_AVAILABLE]: - wait_for( - lambda: self.get_file_system_state(name), - self.STATE_AVAILABLE, - ) - if not file_system_id: - file_system_id = self.get_file_system_id(name) - self.delete_mount_targets(file_system_id) - self.connection.delete_file_system(FileSystemId=file_system_id) - result = True - - if self.wait: - wait_for( - lambda: self.get_file_system_state(name), - self.STATE_DELETED, - self.wait_timeout, - ) - - return result - - def delete_mount_targets(self, file_system_id): - """ - Removes mount targets by EFS id - """ - wait_for( - lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)), - 0, - ) - - targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE) - for target in targets: - self.connection.delete_mount_target(MountTargetId=target["MountTargetId"]) - - wait_for( - lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)), - 0, - ) - - return len(targets) > 0 - - -def iterate_all(attr, map_method, **kwargs): - """ - Method creates iterator from result set - """ - args = dict((key, value) for (key, value) in kwargs.items() if value is not None) - wait = 1 - while True: - try: - data = map_method(**args) - for elm in data[attr]: - yield elm - if "NextMarker" in data: - args["Marker"] = data["Nextmarker"] - continue - break - except is_boto3_error_code("ThrottlingException"): - if wait < 600: - sleep(wait) - wait = wait * 2 - continue - else: - raise - - -def targets_equal(keys, a, b): - """ - Method compare two mount targets by specified attributes - """ - for key in keys: - if key in b and a[key] != b[key]: - return False - - return True - - -def dict_diff(dict1, dict2, by_key=False): - """ - Helper method to calculate difference of two dictionaries - """ - keys1 = set(dict1.keys() if by_key else dict1.items()) - keys2 = set(dict2.keys() if by_key else dict2.items()) - - intersection = keys1 & keys2 - - return keys2 ^ intersection, intersection, keys1 ^ intersection - - -def first_or_default(items, default=None): - """ - Helper method to fetch first element of list (if exists) - """ - for item in items: - return item - return default - - -def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS): - """ - Helper method to wait for desired value returned by callback method - """ - wait_start = timestamp() - while True: - if callback() != value: - if timeout != 0 and (timestamp() - wait_start > timeout): - raise RuntimeError("Wait timeout exceeded (" + str(timeout) + " sec)") - else: - sleep(5) - continue - break - - -def main(): - """ - Module action handler - """ - argument_spec = dict( - encrypt=dict(required=False, type="bool", default=False), - state=dict(required=False, type="str", choices=["present", "absent"], default="present"), - kms_key_id=dict(required=False, type="str", default=None), - purge_tags=dict(default=True, type="bool"), - id=dict(required=False, type="str", default=None), - name=dict(required=False, type="str", default=None), - tags=dict(required=False, type="dict", aliases=["resource_tags"]), - targets=dict(required=False, type="list", default=[], elements="dict"), - performance_mode=dict( - required=False, type="str", choices=["general_purpose", "max_io"], default="general_purpose" - ), - transition_to_ia=dict(required=False, type="str", choices=["None", "7", "14", "30", "60", "90"], default=None), - throughput_mode=dict(required=False, type="str", choices=["bursting", "provisioned"], default=None), - provisioned_throughput_in_mibps=dict(required=False, type="float"), - wait=dict(required=False, type="bool", default=False), - wait_timeout=dict(required=False, type="int", default=0), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - connection = EFSConnection(module) - - name = module.params.get("name") - fs_id = module.params.get("id") - tags = module.params.get("tags") - target_translations = { - "ip_address": "IpAddress", - "security_groups": "SecurityGroups", - "subnet_id": "SubnetId", - } - targets = [ - dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get("targets") - ] - performance_mode_translations = { - "general_purpose": "generalPurpose", - "max_io": "maxIO", - } - encrypt = module.params.get("encrypt") - kms_key_id = module.params.get("kms_key_id") - performance_mode = performance_mode_translations[module.params.get("performance_mode")] - purge_tags = module.params.get("purge_tags") - transition_to_ia = module.params.get("transition_to_ia") - throughput_mode = module.params.get("throughput_mode") - provisioned_throughput_in_mibps = module.params.get("provisioned_throughput_in_mibps") - state = str(module.params.get("state")).lower() - changed = False - - if state == "present": - if not name: - module.fail_json(msg="Name parameter is required for create") - - changed = connection.create_file_system( - name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps - ) - changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed - changed = ( - connection.converge_file_system( - name=name, - tags=tags, - purge_tags=purge_tags, - targets=targets, - throughput_mode=throughput_mode, - provisioned_throughput_in_mibps=provisioned_throughput_in_mibps, - ) - or changed - ) - if transition_to_ia: - changed |= connection.update_lifecycle_policy(name, transition_to_ia) - result = first_or_default(connection.get_file_systems(CreationToken=name)) - - elif state == "absent": - if not name and not fs_id: - module.fail_json(msg="Either name or id parameter is required for delete") - - changed = connection.delete_file_system(name, fs_id) - result = None - if result: - result = camel_dict_to_snake_dict(result) - module.exit_json(changed=changed, efs=result) - - -if __name__ == "__main__": - main() diff --git a/efs_info.py b/efs_info.py deleted file mode 100644 index 76952337b97..00000000000 --- a/efs_info.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: efs_info -version_added: 1.0.0 -short_description: Get information about Amazon EFS file systems -description: -- This module can be used to search Amazon EFS file systems. - Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)! -author: -- "Ryan Sydnor (@ryansydnor)" -options: - name: - description: - - Creation Token of Amazon EFS file system. - aliases: [ creation_token ] - type: str - id: - description: - - ID of Amazon EFS. - type: str - tags: - description: - - List of tags of Amazon EFS. Should be defined as dictionary. - type: dict - default: {} - targets: - description: - - List of targets on which to filter the returned results. - - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address. - type: list - elements: str - default: [] -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Find all existing efs - community.aws.efs_info: - register: result - -- name: Find efs using id - community.aws.efs_info: - id: fs-1234abcd - register: result - -- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a' - community.aws.efs_info: - tags: - Name: myTestNameTag - targets: - - subnet-1a2b3c4d - - sg-4d3c2b1a - register: result - -- ansible.builtin.debug: - msg: "{{ result['efs'] }}" -""" - -RETURN = r""" -creation_time: - description: timestamp of creation date - returned: always - type: str - sample: "2015-11-16 07:30:57-05:00" -creation_token: - description: EFS creation token - returned: always - type: str - sample: console-88609e04-9a0e-4a2e-912c-feaa99509961 -file_system_id: - description: ID of the file system - returned: always - type: str - sample: fs-xxxxxxxx -life_cycle_state: - description: state of the EFS file system - returned: always - type: str - sample: creating, available, deleting, deleted -mount_point: - description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address - returned: always - type: str - sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ -filesystem_address: - description: url of file system - returned: always - type: str - sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ -mount_targets: - description: list of mount targets - returned: always - type: list - sample: - [ - { - "file_system_id": "fs-a7ad440e", - "ip_address": "172.31.17.173", - "life_cycle_state": "available", - "mount_target_id": "fsmt-d8907871", - "network_interface_id": "eni-6e387e26", - "owner_id": "123456789012", - "security_groups": [ - "sg-a30b22c6" - ], - "subnet_id": "subnet-e265c895" - }, - ... - ] -name: - description: name of the file system - returned: always - type: str - sample: my-efs -number_of_mount_targets: - description: the number of targets mounted - returned: always - type: int - sample: 3 -owner_id: - description: AWS account ID of EFS owner - returned: always - type: str - sample: XXXXXXXXXXXX -size_in_bytes: - description: size of the file system in bytes as of a timestamp - returned: always - type: dict - sample: - { - "timestamp": "2015-12-21 13:59:59-05:00", - "value": 12288 - } -performance_mode: - description: performance mode of the file system - returned: always - type: str - sample: "generalPurpose" -throughput_mode: - description: mode of throughput for the file system - returned: always - type: str - sample: "bursting" -provisioned_throughput_in_mibps: - description: throughput provisioned in Mibps - returned: when throughput_mode is set to "provisioned" - type: float - sample: 15.0 -tags: - description: tags on the efs instance - returned: always - type: dict - sample: - { - "name": "my-efs", - "key": "Value" - } - -""" - - -from collections import defaultdict - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class EFSConnection(object): - STATE_CREATING = "creating" - STATE_AVAILABLE = "available" - STATE_DELETING = "deleting" - STATE_DELETED = "deleted" - - def __init__(self, module): - try: - self.connection = module.client("efs") - self.module = module - except Exception as e: - module.fail_json(msg=f"Failed to connect to AWS: {to_native(e)}") - - self.region = module.region - - @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) - def list_file_systems(self, **kwargs): - """ - Returns generator of file systems including all attributes of FS - """ - paginator = self.connection.get_paginator("describe_file_systems") - return paginator.paginate(**kwargs).build_full_result()["FileSystems"] - - @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) - def get_tags(self, file_system_id): - """ - Returns tag list for selected instance of EFS - """ - paginator = self.connection.get_paginator("describe_tags") - return boto3_tag_list_to_ansible_dict( - paginator.paginate(FileSystemId=file_system_id).build_full_result()["Tags"] - ) - - @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"]) - def get_mount_targets(self, file_system_id): - """ - Returns mount targets for selected instance of EFS - """ - paginator = self.connection.get_paginator("describe_mount_targets") - return paginator.paginate(FileSystemId=file_system_id).build_full_result()["MountTargets"] - - @AWSRetry.jittered_backoff(catch_extra_error_codes=["ThrottlingException"]) - def get_security_groups(self, mount_target_id): - """ - Returns security groups for selected instance of EFS - """ - return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)["SecurityGroups"] - - def get_mount_targets_data(self, file_systems): - for item in file_systems: - if item["life_cycle_state"] == self.STATE_AVAILABLE: - try: - mount_targets = self.get_mount_targets(item["file_system_id"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get EFS targets") - for mt in mount_targets: - item["mount_targets"].append(camel_dict_to_snake_dict(mt)) - return file_systems - - def get_security_groups_data(self, file_systems): - for item in file_systems: - if item["life_cycle_state"] == self.STATE_AVAILABLE: - for target in item["mount_targets"]: - if target["life_cycle_state"] == self.STATE_AVAILABLE: - try: - target["security_groups"] = self.get_security_groups(target["mount_target_id"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get EFS security groups") - else: - target["security_groups"] = [] - else: - item["tags"] = {} - item["mount_targets"] = [] - return file_systems - - def get_file_systems(self, file_system_id=None, creation_token=None): - kwargs = dict() - if file_system_id: - kwargs["FileSystemId"] = file_system_id - if creation_token: - kwargs["CreationToken"] = creation_token - try: - file_systems = self.list_file_systems(**kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get EFS file systems") - - results = list() - for item in file_systems: - item["CreationTime"] = str(item["CreationTime"]) - """ - In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it - AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose - And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) - AWS documentation is available here: - U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) - """ - item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" - item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" - - if "Timestamp" in item["SizeInBytes"]: - item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) - result = camel_dict_to_snake_dict(item) - result["tags"] = {} - result["mount_targets"] = [] - # Set tags *after* doing camel to snake - if result["life_cycle_state"] == self.STATE_AVAILABLE: - try: - result["tags"] = self.get_tags(result["file_system_id"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get EFS tags") - results.append(result) - return results - - -def prefix_to_attr(attr_id): - """ - Helper method to convert ID prefix to mount target attribute - """ - attr_by_prefix = { - "fsmt-": "mount_target_id", - "subnet-": "subnet_id", - "eni-": "network_interface_id", - "sg-": "security_groups", - } - return first_or_default( - [attr_name for (prefix, attr_name) in attr_by_prefix.items() if str(attr_id).startswith(prefix)], "ip_address" - ) - - -def first_or_default(items, default=None): - """ - Helper method to fetch first element of list (if exists) - """ - for item in items: - return item - return default - - -def has_tags(available, required): - """ - Helper method to determine if tag requested already exists - """ - for key, value in required.items(): - if key not in available or value != available[key]: - return False - return True - - -def has_targets(available, required): - """ - Helper method to determine if mount target requested already exists - """ - grouped = group_list_of_dict(available) - for value, field in required: - if field not in grouped or value not in grouped[field]: - return False - return True - - -def group_list_of_dict(array): - """ - Helper method to group list of dict to dict with all possible values - """ - result = defaultdict(list) - for item in array: - for key, value in item.items(): - result[key] += value if isinstance(value, list) else [value] - return result - - -def main(): - """ - Module action handler - """ - argument_spec = dict( - id=dict(), - name=dict(aliases=["creation_token"]), - tags=dict(type="dict", default={}), - targets=dict(type="list", default=[], elements="str"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = EFSConnection(module) - - name = module.params.get("name") - fs_id = module.params.get("id") - tags = module.params.get("tags") - targets = module.params.get("targets") - - file_systems_info = connection.get_file_systems(fs_id, name) - - if tags: - file_systems_info = [item for item in file_systems_info if has_tags(item["tags"], tags)] - - file_systems_info = connection.get_mount_targets_data(file_systems_info) - file_systems_info = connection.get_security_groups_data(file_systems_info) - - if targets: - targets = [(item, prefix_to_attr(item)) for item in targets] - file_systems_info = [item for item in file_systems_info if has_targets(item["mount_targets"], targets)] - - module.exit_json(changed=False, efs=file_systems_info) - - -if __name__ == "__main__": - main() diff --git a/efs_tag.py b/efs_tag.py deleted file mode 100644 index 3a4c5c8ced6..00000000000 --- a/efs_tag.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Milan Zink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: efs_tag -version_added: 2.0.0 -short_description: create and remove tags on Amazon EFS resources -description: - - Creates and removes tags for Amazon EFS resources. - - Resources are referenced by their ID (filesystem or filesystem access point). -author: - - Milan Zink (@zeten30) -options: - resource: - description: - - EFS Filesystem ID or EFS Filesystem Access Point ID. - type: str - required: True - state: - description: - - Whether the tags should be present or absent on the resource. - default: present - choices: ['present', 'absent'] - type: str - tags: - description: - - A dictionary of tags to add or remove from the resource. - - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. - type: dict - required: True - aliases: ['resource_tags'] - purge_tags: - description: - - Whether unspecified tags should be removed from the resource. - - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. - type: bool - default: false -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Ensure tags are present on a resource - community.aws.efs_tag: - resource: fs-123456ab - state: present - tags: - Name: MyEFS - Env: Production - -- name: Remove the Env tag if it's currently 'development' - community.aws.efs_tag: - resource: fsap-78945ff - state: absent - tags: - Env: development - -- name: Remove all tags except for Name - community.aws.efs_tag: - resource: fsap-78945ff - state: absent - tags: - Name: foo - purge_tags: true - -- name: Remove all tags - community.aws.efs_tag: - resource: fsap-78945ff - state: absent - tags: {} - purge_tags: true -""" - -RETURN = r""" -tags: - description: A dict containing the tags on the resource - returned: always - type: dict -added_tags: - description: A dict of tags that were added to the resource - returned: If tags were added - type: dict -removed_tags: - description: A dict of tags that were removed from the resource - returned: If tags were removed - type: dict -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - # Handled by AnsibleAWSModule - pass - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing -WAIT_RETRY = 5 # how many seconds to wait between propagation status polls - - -def get_tags(efs, module, resource): - """ - Get resource tags - """ - try: - return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"]) - except (BotoCoreError, ClientError) as get_tags_error: - module.fail_json_aws(get_tags_error, msg=f"Failed to fetch tags for resource {resource}") - - -def main(): - """ - MAIN - """ - argument_spec = dict( - resource=dict(required=True), - tags=dict(type="dict", required=True, aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=False), - state=dict(default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params["resource"] - tags = module.params["tags"] - state = module.params["state"] - purge_tags = module.params["purge_tags"] - - result = {"changed": False} - - efs = module.client("efs", retry_decorator=AWSRetry.jittered_backoff()) - - current_tags = get_tags(efs, module, resource) - - add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) - - remove_tags = {} - - if state == "absent": - for key in tags: - if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): - remove_tags[key] = current_tags[key] - - for key in remove: - remove_tags[key] = current_tags[key] - - if remove_tags: - result["changed"] = True - result["removed_tags"] = remove_tags - if not module.check_mode: - try: - efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) - except (BotoCoreError, ClientError) as remove_tag_error: - module.fail_json_aws( - remove_tag_error, msg=f"Failed to remove tags {remove_tags} from resource {resource}" - ) - - if state == "present" and add_tags: - result["changed"] = True - result["added_tags"] = add_tags - current_tags.update(add_tags) - if not module.check_mode: - try: - tags = ansible_dict_to_boto3_tag_list(add_tags) - efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) - except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {add_tags} on resource {resource}") - - result["tags"] = get_tags(efs, module, resource) - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/eks_cluster.py b/eks_cluster.py deleted file mode 100644 index a445def55c3..00000000000 --- a/eks_cluster.py +++ /dev/null @@ -1,307 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: eks_cluster -version_added: 1.0.0 -short_description: Manage Elastic Kubernetes Service (EKS) Clusters -description: - - Manage Elastic Kubernetes Service (EKS) Clusters. - - Prior to release 5.0.0 this module was called C(community.aws.aws_eks_cluster). - The usage did not change. -author: - - Will Thames (@willthames) -options: - name: - description: Name of the EKS cluster. - required: True - type: str - version: - description: - - Kubernetes version. - - Defaults to C(latest). - type: str - role_arn: - description: ARN of IAM role used by the EKS cluster. - type: str - subnets: - description: List of subnet IDs for the Kubernetes cluster. - type: list - elements: str - security_groups: - description: List of security group names or IDs. - type: list - elements: str - state: - description: Desired state of the EKS cluster. - choices: - - absent - - present - default: present - type: str - tags: - description: - - A dictionary of tags to add the EKS cluster. - type: dict - version_added: 5.3.0 - wait: - description: >- - Specifies whether the module waits until the cluster is active or deleted - before moving on. It takes "usually less than 10 minutes" per AWS documentation. - type: bool - default: false - wait_timeout: - description: >- - The duration in seconds to wait for the cluster to become active. Defaults - to 1200 seconds (20 minutes). - default: 1200 - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create an EKS cluster - community.aws.eks_cluster: - name: my_cluster - version: 1.14 - role_arn: my_eks_role - subnets: - - subnet-aaaa1111 - security_groups: - - my_eks_sg - - sg-abcd1234 - register: caller_facts - -- name: Remove an EKS cluster - community.aws.eks_cluster: - name: my_cluster - wait: true - state: absent -""" - -RETURN = r""" -arn: - description: ARN of the EKS cluster - returned: when state is present - type: str - sample: arn:aws:eks:us-west-2:123456789012:cluster/my-eks-cluster -certificate_authority: - description: Dictionary containing Certificate Authority Data for cluster - returned: after creation - type: complex - contains: - data: - description: Base-64 encoded Certificate Authority Data for cluster - returned: when the cluster has been created and is active - type: str -endpoint: - description: Kubernetes API server endpoint - returned: when the cluster has been created and is active - type: str - sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com -created_at: - description: Cluster creation date and time - returned: when state is present - type: str - sample: '2018-06-06T11:56:56.242000+00:00' -name: - description: EKS cluster name - returned: when state is present - type: str - sample: my-eks-cluster -resources_vpc_config: - description: VPC configuration of the cluster - returned: when state is present - type: complex - contains: - security_group_ids: - description: List of security group IDs - returned: always - type: list - sample: - - sg-abcd1234 - - sg-aaaa1111 - subnet_ids: - description: List of subnet IDs - returned: always - type: list - sample: - - subnet-abcdef12 - - subnet-345678ab - - subnet-cdef1234 - vpc_id: - description: VPC id - returned: always - type: str - sample: vpc-a1b2c3d4 -role_arn: - description: ARN of the IAM role used by the cluster - returned: when state is present - type: str - sample: arn:aws:iam::123456789012:role/eks_cluster_role -status: - description: status of the EKS cluster - returned: when state is present - type: str - sample: - - CREATING - - ACTIVE -version: - description: Kubernetes version of the cluster - returned: when state is present - type: str - sample: '1.10' -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def ensure_present(client, module): - name = module.params.get("name") - subnets = module.params["subnets"] - groups = module.params["security_groups"] - wait = module.params.get("wait") - cluster = get_cluster(client, module) - try: - ec2 = module.client("ec2") - vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])["Subnets"][0]["VpcId"] - groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't lookup security groups") - - if cluster: - if set(cluster["resourcesVpcConfig"]["subnetIds"]) != set(subnets): - module.fail_json(msg="Cannot modify subnets of existing cluster") - if set(cluster["resourcesVpcConfig"]["securityGroupIds"]) != set(groups): - module.fail_json(msg="Cannot modify security groups of existing cluster") - if module.params.get("version") and module.params.get("version") != cluster["version"]: - module.fail_json(msg="Cannot modify version of existing cluster") - - if wait: - wait_until(client, module, "cluster_active") - # Ensure that fields that are only available for active clusters are - # included in the returned value - cluster = get_cluster(client, module) - - module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster)) - - if module.check_mode: - module.exit_json(changed=True) - try: - params = dict( - name=name, - roleArn=module.params["role_arn"], - resourcesVpcConfig=dict(subnetIds=subnets, securityGroupIds=groups), - ) - if module.params["version"]: - params["version"] = module.params["version"] - if module.params["tags"]: - params["tags"] = module.params["tags"] - cluster = client.create_cluster(**params)["cluster"] - except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't create cluster {name}") - - if wait: - wait_until(client, module, "cluster_active") - # Ensure that fields that are only available for active clusters are - # included in the returned value - cluster = get_cluster(client, module) - - module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster)) - - -def ensure_absent(client, module): - name = module.params.get("name") - existing = get_cluster(client, module) - wait = module.params.get("wait") - if not existing: - module.exit_json(changed=False) - if not module.check_mode: - try: - client.delete_cluster(name=module.params["name"]) - except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete cluster {name}") - - if wait: - wait_until(client, module, "cluster_deleted") - - module.exit_json(changed=True) - - -def get_cluster(client, module): - name = module.params.get("name") - try: - return client.describe_cluster(name=name)["cluster"] - except is_boto3_error_code("ResourceNotFoundException"): - return None - except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except - module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Couldn't get cluster {name}") - - -def wait_until(client, module, waiter_name="cluster_active"): - name = module.params.get("name") - wait_timeout = module.params.get("wait_timeout") - - waiter = get_waiter(client, waiter_name) - attempts = 1 + int(wait_timeout / waiter.config.delay) - waiter.wait(name=name, WaiterConfig={"MaxAttempts": attempts}) - - -def main(): - argument_spec = dict( - name=dict(required=True), - version=dict(), - role_arn=dict(), - subnets=dict(type="list", elements="str"), - security_groups=dict(type="list", elements="str"), - state=dict(choices=["absent", "present"], default="present"), - tags=dict(type="dict", required=False), - wait=dict(default=False, type="bool"), - wait_timeout=dict(default=1200, type="int"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[["state", "present", ["role_arn", "subnets", "security_groups"]]], - supports_check_mode=True, - ) - - client = module.client("eks") - - if module.params.get("state") == "present": - ensure_present(client, module) - else: - ensure_absent(client, module) - - -if __name__ == "__main__": - main() diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py deleted file mode 100644 index 131f0651bd3..00000000000 --- a/eks_fargate_profile.py +++ /dev/null @@ -1,365 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2022 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: eks_fargate_profile -version_added: 4.0.0 -short_description: Manage EKS Fargate Profile -description: - - Manage EKS Fargate Profile. -author: - - Tiago Jarra (@tjarra) -options: - name: - description: Name of EKS Fargate Profile. - required: True - type: str - cluster_name: - description: Name of EKS Cluster. - required: True - type: str - role_arn: - description: - - ARN of IAM role used by the EKS cluster. - - Required when I(state=present). - type: str - subnets: - description: - - list of subnet IDs for the Kubernetes cluster. - - Required when I(state=present). - type: list - elements: str - selectors: - description: - - A list of selectors to use in fargate profile. - - Required when I(state=present). - type: list - elements: dict - suboptions: - namespace: - description: A namespace used in fargate profile. - type: str - labels: - description: A dictionary of labels used in fargate profile. - type: dict - default: {} - state: - description: Create or delete the Fargate Profile. - choices: - - absent - - present - default: present - type: str - wait: - description: >- - Specifies whether the module waits until the profile is created or deleted before moving on. - type: bool - default: false - wait_timeout: - description: >- - The duration in seconds to wait for the cluster to become active. Defaults - to 1200 seconds (20 minutes). - default: 1200 - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create an EKS Fargate Profile - community.aws.eks_fargate_profile: - name: test_fargate - cluster_name: test_cluster - role_arn: my_eks_role - subnets: - - subnet-aaaa1111 - selectors: - - namespace: nm-test - labels: - - label1: test - state: present - wait: true - -- name: Remove an EKS Fargate Profile - community.aws.eks_fargate_profile: - name: test_fargate - cluster_name: test_cluster - wait: true - state: absent -""" - -RETURN = r""" -fargate_profile_name: - description: Name of Fargate Profile. - returned: when state is present - type: str - sample: test_profile -fargate_profile_arn: - description: ARN of the Fargate Profile. - returned: when state is present - type: str - sample: arn:aws:eks:us-east-1:1231231123:safd -cluster_name: - description: Name of EKS Cluster. - returned: when state is present - type: str - sample: test-cluster -created_at: - description: Fargate Profile creation date and time. - returned: when state is present - type: str - sample: '2022-01-18T20:00:00.111000+00:00' -pod_execution_role_arn: - description: ARN of the IAM Role used by Fargate Profile. - returned: when state is present - type: str - sample: arn:aws:eks:us-east-1:1231231123:role/asdf -subnets: - description: List of subnets used in Fargate Profile. - returned: when state is present - type: list - sample: - - subnet-qwerty123 - - subnet-asdfg456 -selectors: - description: Selector configuration. - returned: when state is present - type: complex - contains: - namespace: - description: Name of the kubernetes namespace used in profile. - returned: when state is present - type: str - sample: nm-test - labels: - description: List of kubernetes labels used in profile. - returned: when state is present - type: list - sample: - - label1: test1 - - label2: test2 -tags: - description: A dictionary of resource tags. - returned: when state is present - type: dict - sample: - foo: bar - env: test -status: - description: status of the EKS Fargate Profile. - returned: when state is present - type: str - sample: - - CREATING - - ACTIVE -""" - -try: - import botocore -except ImportError: - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def validate_tags(client, module, fargate_profile): - changed = False - - desired_tags = module.params.get("tags") - if desired_tags is None: - return False - - try: - existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to list or compare tags for Fargate Profile {module.params.get('name')}") - - if tags_to_remove: - changed = True - if not module.check_mode: - try: - client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") - - if tags_to_add: - changed = True - if not module.check_mode: - try: - client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") - - return changed - - -def create_or_update_fargate_profile(client, module): - name = module.params.get("name") - subnets = module.params["subnets"] - role_arn = module.params["role_arn"] - cluster_name = module.params["cluster_name"] - selectors = module.params["selectors"] - tags = module.params["tags"] or {} - wait = module.params.get("wait") - fargate_profile = get_fargate_profile(client, module, name, cluster_name) - - if fargate_profile: - changed = False - if set(fargate_profile["podExecutionRoleArn"]) != set(role_arn): - module.fail_json(msg="Cannot modify Execution Role") - if set(fargate_profile["subnets"]) != set(subnets): - module.fail_json(msg="Cannot modify Subnets") - if fargate_profile["selectors"] != selectors: - module.fail_json(msg="Cannot modify Selectors") - - changed = validate_tags(client, module, fargate_profile) - - if wait: - wait_until(client, module, "fargate_profile_active", name, cluster_name) - fargate_profile = get_fargate_profile(client, module, name, cluster_name) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile)) - - if module.check_mode: - module.exit_json(changed=True) - - check_profiles_status(client, module, cluster_name) - - try: - params = dict( - fargateProfileName=name, - podExecutionRoleArn=role_arn, - subnets=subnets, - clusterName=cluster_name, - selectors=selectors, - tags=tags, - ) - fargate_profile = client.create_fargate_profile(**params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't create fargate profile {name}") - - if wait: - wait_until(client, module, "fargate_profile_active", name, cluster_name) - fargate_profile = get_fargate_profile(client, module, name, cluster_name) - - module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile)) - - -def delete_fargate_profile(client, module): - name = module.params.get("name") - cluster_name = module.params["cluster_name"] - existing = get_fargate_profile(client, module, name, cluster_name) - wait = module.params.get("wait") - if not existing or existing["status"] == "DELETING": - module.exit_json(changed=False) - - if not module.check_mode: - check_profiles_status(client, module, cluster_name) - try: - client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete fargate profile {name}") - - if wait: - wait_until(client, module, "fargate_profile_deleted", name, cluster_name) - - module.exit_json(changed=True) - - -def get_fargate_profile(client, module, name, cluster_name): - try: - return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)["fargateProfile"] - except is_boto3_error_code("ResourceNotFoundException"): - return None - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get fargate profile") - - -# Check if any fargate profiles is in changing states, if so, wait for the end -def check_profiles_status(client, module, cluster_name): - try: - list_profiles = client.list_fargate_profiles(clusterName=cluster_name) - - for name in list_profiles["fargateProfileNames"]: - fargate_profile = get_fargate_profile(client, module, name, cluster_name) - if fargate_profile["status"] == "CREATING": - wait_until( - client, module, "fargate_profile_active", fargate_profile["fargateProfileName"], cluster_name - ) - elif fargate_profile["status"] == "DELETING": - wait_until( - client, module, "fargate_profile_deleted", fargate_profile["fargateProfileName"], cluster_name - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't not find EKS cluster") - - -def wait_until(client, module, waiter_name, name, cluster_name): - wait_timeout = module.params.get("wait_timeout") - waiter = get_waiter(client, waiter_name) - attempts = 1 + int(wait_timeout / waiter.config.delay) - try: - waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={"MaxAttempts": attempts}) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="An error occurred waiting") - - -def main(): - argument_spec = dict( - name=dict(required=True), - cluster_name=dict(required=True), - role_arn=dict(), - subnets=dict(type="list", elements="str"), - selectors=dict( - type="list", - elements="dict", - options=dict( - namespace=dict(type="str"), - labels=dict(type="dict", default={}), - ), - ), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - state=dict(choices=["absent", "present"], default="present"), - wait=dict(default=False, type="bool"), - wait_timeout=dict(default=1200, type="int"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[["state", "present", ["role_arn", "subnets", "selectors"]]], - supports_check_mode=True, - ) - - try: - client = module.client("eks") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't connect to AWS") - - if module.params.get("state") == "present": - create_or_update_fargate_profile(client, module) - else: - delete_fargate_profile(client, module) - - -if __name__ == "__main__": - main() diff --git a/eks_nodegroup.py b/eks_nodegroup.py deleted file mode 100644 index f146328f098..00000000000 --- a/eks_nodegroup.py +++ /dev/null @@ -1,753 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2022 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: eks_nodegroup -version_added: 5.3.0 -short_description: Manage EKS Nodegroup module -description: - - Manage EKS Nodegroup. -author: - - Tiago Jarra (@tjarra) -options: - name: - description: Name of EKS Nodegroup. - required: True - type: str - cluster_name: - description: Name of EKS Cluster. - required: True - type: str - node_role: - description: ARN of IAM role used by the EKS cluster Nodegroup. - type: str - subnets: - description: list of subnet IDs for the Kubernetes cluster. - type: list - elements: str - scaling_config: - description: The scaling configuration details for the Auto Scaling group that is created for your node group. - type: dict - default: - min_size: 1 - max_size: 2 - desired_size: 1 - suboptions: - min_size: - description: The minimum number of nodes that the managed node group can scale in to. - type: int - max_size: - description: The maximum number of nodes that the managed node group can scale out to. - type: int - desired_size: - description: The current number of nodes that the managed node group should maintain. - type: int - disk_size: - description: - - Size of disk in nodegroup nodes. - If you specify I(launch_template), then don't specify I(disk_size), or the node group deployment will fail. - type: int - instance_types: - description: - - Specify the instance types for a node group. - If you specify I(launch_template), then don't specify I(instance_types), or the node group deployment will fail. - type: list - elements: str - ami_type: - description: The AMI type for your node group. - type: str - choices: - - AL2_x86_64 - - AL2_x86_64_GPU - - AL2_ARM_64 - - CUSTOM - - BOTTLEROCKET_ARM_64 - - BOTTLEROCKET_x86_64 - remote_access: - description: - - The remote access (SSH) configuration to use with your node group. - If you specify I(launch_template), then don't specify I(remote_access), or the node group deployment will fail. - type: dict - suboptions: - ec2_ssh_key: - description: The Amazon EC2 SSH key that provides access for SSH communication with the nodes in the managed node group. - type: str - source_sg: - description: The security groups that are allowed SSH access (port 22) to the nodes. - type: list - elements: str - update_config: - description: The node group update configuration. - type: dict - default: - max_unavailable: 1 - suboptions: - max_unavailable: - description: The maximum number of nodes unavailable at once during a version update. - type: int - max_unavailable_percentage: - description: The maximum percentage of nodes unavailable during a version update. - type: int - labels: - description: The Kubernetes labels to be applied to the nodes in the node group when they are created. - type: dict - default: {} - taints: - description: The Kubernetes taints to be applied to the nodes in the node group. - type: list - elements: dict - default: [] - suboptions: - key: - description: The key of the taint. - type: str - value: - description: The value of the taint. - type: str - effect: - description: The effect of the taint. - type: str - choices: - - NO_SCHEDULE - - NO_EXECUTE - - PREFER_NO_SCHEDULE - launch_template: - description: - - An object representing a node group's launch template specification. - - If specified, then do not specify I(instanceTypes), I(diskSize), or I(remoteAccess). - type: dict - suboptions: - name: - description: The name of the launch template. - type: str - version: - description: - - The version of the launch template to use. - - If no version is specified, then the template's default version is used. - type: str - id: - description: The ID of the launch template. - type: str - capacity_type: - description: The capacity type for your node group. - default: ON_DEMAND - type: str - choices: - - ON_DEMAND - - SPOT - release_version: - description: The AMI version of the Amazon EKS optimized AMI to use with your node group. - type: str - state: - description: Create or delete the Nodegroup. - choices: - - absent - - present - default: present - type: str - tags: - description: A dictionary of resource tags. - type: dict - aliases: ['resource_tags'] - purge_tags: - description: - - Purge existing tags that are not found in the nodegroup. - type: bool - default: true - wait: - description: Specifies whether the module waits until the profile is created or deleted before moving on. - type: bool - default: false - wait_timeout: - description: The duration in seconds to wait for the nodegroup to become active. Defaults to C(1200) seconds. - default: 1200 - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: create nodegroup - community.aws.eks_nodegroup: - name: test_nodegroup - state: present - cluster_name: test_cluster - node_role: arn:aws:eks:us-east-1:1231231123:role/asdf - subnets: - - subnet-qwerty123 - - subnet-asdfg456 - scaling_config: - - min_size: 1 - - max_size: 2 - - desired_size: 1 - disk_size: 20 - instance_types: 't3.micro' - ami_type: 'AL2_x86_64' - labels: - - 'teste': 'test' - taints: - - key: 'test' - value: 'test' - effect: 'NO_SCHEDULE' - capacity_type: 'on_demand' - -- name: Remove an EKS Nodegrop - community.aws.eks_nodegroup: - name: test_nodegroup - cluster_name: test_cluster - wait: yes - state: absent -""" - -RETURN = r""" -nodegroup_name: - description: The name associated with an Amazon EKS managed node group. - returned: when state is present - type: str - sample: test_cluster -nodegroup_arn: - description: The Amazon Resource Name (ARN) associated with the managed node group. - returned: when state is present - type: str - sample: arn:aws:eks:us-east-1:1231231123:safd -cluster_name: - description: Name of EKS Cluster - returned: when state is present - type: str - sample: test_cluster -version: - description: The Kubernetes version of the managed node group. - returned: when state is present - type: str - sample: need_validate -release_version: - description: This is the version of the Amazon EKS optimized AMI that the node group was deployed with. - returned: when state is present - type: str - sample: need_validate -created_at: - description: Nodegroup creation date and time. - returned: when state is present - type: str - sample: '2022-01-18T20:00:00.111000+00:00' -modified_at: - description: Nodegroup modified date and time. - returned: when state is present - type: str - sample: '2022-01-18T20:00:00.111000+00:00' -status: - description: status of the EKS Nodegroup. - returned: when state is present - type: str - sample: - - CREATING - - ACTIVE -capacity_type: - description: The capacity type of your managed node group. - returned: when state is present - type: str - sample: need_validate -scaling_config: - description: The scaling configuration details for the Auto Scaling group that is associated with your node group. - returned: when state is present - type: dict - sample: need_validate -instance_types: - description: This is the instance type that is associated with the node group. - returned: when state is present - type: list - sample: need_validate -subnets: - description: List of subnets used in Fargate Profile. - returned: when state is present - type: list - sample: - - subnet-qwerty123 - - subnet-asdfg456 -remote_access: - description: This is the remote access configuration that is associated with the node group. - returned: when state is present - type: dict - sample: need_validate -ami_type: - description: This is the AMI type that was specified in the node group configuration. - returned: when state is present - type: str - sample: need_validate -node_role: - description: ARN of the IAM Role used by Nodegroup. - returned: when state is present - type: str - sample: arn:aws:eks:us-east-1:1231231123:role/asdf -labels: - description: The Kubernetes labels applied to the nodes in the node group. - returned: when state is present - type: dict - sample: need_validate -taints: - description: The Kubernetes taints to be applied to the nodes in the node group when they are created. - returned: when state is present - type: list - sample: need_validate -resources: - description: The resources associated with the node group. - returned: when state is present - type: complex - contains: - autoScalingGroups: - description: The Auto Scaling groups associated with the node group. - returned: when state is present - type: list - elements: dict - remoteAccessSecurityGroup: - description: The remote access security group associated with the node group. - returned: when state is present - type: str -diskSize: - description: This is the disk size in the node group configuration. - returned: when state is present - type: int - sample: 20 -health: - description: The health status of the node group. - returned: when state is present - type: dict - sample: need_validate -update_config: - description: The node group update configuration. - returned: when state is present - type: dict - contains: - maxUnavailable: - description: The maximum number of nodes unavailable at once during a version update. - type: int - maxUnavailablePercentage: - description: The maximum percentage of nodes unavailable during a version update. - type: int -launch_template: - description: If a launch template was used to create the node group, then this is the launch template that was used. - returned: when state is present - type: dict - sample: need_validate -tags: - description: Nodegroup tags. - returned: when state is present - type: dict - sample: - foo: bar -""" - -try: - import botocore -except ImportError: - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def validate_tags(client, module, nodegroup): - changed = False - - desired_tags = module.params.get("tags") - if desired_tags is None: - return False - - try: - existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"] - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to list or compare tags for Nodegroup {module.params.get('name')}.") - if tags_to_remove: - if not module.check_mode: - changed = True - try: - client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") - if tags_to_add: - if not module.check_mode: - changed = True - try: - client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") - - return changed - - -def compare_taints(nodegroup_taints, param_taints): - taints_to_unset = [] - taints_to_add_or_update = [] - for taint in nodegroup_taints: - if taint not in param_taints: - taints_to_unset.append(taint) - for taint in param_taints: - if taint not in nodegroup_taints: - taints_to_add_or_update.append(taint) - - return taints_to_add_or_update, taints_to_unset - - -def validate_taints(client, module, nodegroup, param_taints): - changed = False - params = dict() - params["clusterName"] = nodegroup["clusterName"] - params["nodegroupName"] = nodegroup["nodegroupName"] - params["taints"] = [] - if "taints" not in nodegroup: - nodegroup["taints"] = [] - taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup["taints"], param_taints) - - if taints_to_add_or_update: - params["taints"]["addOrUpdateTaints"] = taints_to_add_or_update - if taints_to_unset: - params["taints"]["removeTaints"] = taints_to_unset - if params["taints"]: - if not module.check_mode: - changed = True - try: - client.update_nodegroup_config(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set taints for Nodegroup {params['nodegroupName']}.") - - return changed - - -def compare_labels(nodegroup_labels, param_labels): - labels_to_unset = [] - labels_to_add_or_update = {} - for label in nodegroup_labels.keys(): - if label not in param_labels: - labels_to_unset.append(label) - for key, value in param_labels.items(): - if key not in nodegroup_labels.keys(): - labels_to_add_or_update[key] = value - - return labels_to_add_or_update, labels_to_unset - - -def validate_labels(client, module, nodegroup, param_labels): - changed = False - params = dict() - params["clusterName"] = nodegroup["clusterName"] - params["nodegroupName"] = nodegroup["nodegroupName"] - params["labels"] = {} - labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup["labels"], param_labels) - - if labels_to_add_or_update: - params["labels"]["addOrUpdateLabels"] = labels_to_add_or_update - if labels_to_unset: - params["labels"]["removeLabels"] = labels_to_unset - if params["labels"]: - if not module.check_mode: - changed = True - try: - client.update_nodegroup_config(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set labels for Nodegroup {params['nodegroupName']}.") - - return changed - - -def compare_params(module, params, nodegroup): - for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]: - if (param in nodegroup) and (param in params): - if nodegroup[param] != params[param]: - module.fail_json(msg=f"Cannot modify parameter {param}.") - if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params): - module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") - if nodegroup["updateConfig"] != params["updateConfig"]: - return True - if nodegroup["scalingConfig"] != params["scalingConfig"]: - return True - return False - - -def compare_params_launch_template(module, params, nodegroup): - if "launchTemplate" not in params: - module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.") - else: - for key in ["name", "id"]: - if (key in params["launchTemplate"]) and ( - params["launchTemplate"][key] != nodegroup["launchTemplate"][key] - ): - module.fail_json(msg=f"Cannot modify Launch Template {key}.") - if ("version" in params["launchTemplate"]) and ( - params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"] - ): - return True - return False - - -def create_or_update_nodegroups(client, module): - changed = False - params = dict() - params["nodegroupName"] = module.params["name"] - params["clusterName"] = module.params["cluster_name"] - params["nodeRole"] = module.params["node_role"] - params["subnets"] = module.params["subnets"] - params["tags"] = module.params["tags"] or {} - if module.params["ami_type"] is not None: - params["amiType"] = module.params["ami_type"] - if module.params["disk_size"] is not None: - params["diskSize"] = module.params["disk_size"] - if module.params["instance_types"] is not None: - params["instanceTypes"] = module.params["instance_types"] - if module.params["launch_template"] is not None: - params["launchTemplate"] = dict() - if module.params["launch_template"]["id"] is not None: - params["launchTemplate"]["id"] = module.params["launch_template"]["id"] - if module.params["launch_template"]["version"] is not None: - params["launchTemplate"]["version"] = module.params["launch_template"]["version"] - if module.params["launch_template"]["name"] is not None: - params["launchTemplate"]["name"] = module.params["launch_template"]["name"] - if module.params["release_version"] is not None: - params["releaseVersion"] = module.params["release_version"] - if module.params["remote_access"] is not None: - params["remoteAccess"] = dict() - if module.params["remote_access"]["ec2_ssh_key"] is not None: - params["remoteAccess"]["ec2SshKey"] = module.params["remote_access"]["ec2_ssh_key"] - if module.params["remote_access"]["source_sg"] is not None: - params["remoteAccess"]["sourceSecurityGroups"] = module.params["remote_access"]["source_sg"] - if module.params["capacity_type"] is not None: - params["capacityType"] = module.params["capacity_type"].upper() - if module.params["labels"] is not None: - params["labels"] = module.params["labels"] - if module.params["taints"] is not None: - params["taints"] = module.params["taints"] - if module.params["update_config"] is not None: - params["updateConfig"] = dict() - if module.params["update_config"]["max_unavailable"] is not None: - params["updateConfig"]["maxUnavailable"] = module.params["update_config"]["max_unavailable"] - if module.params["update_config"]["max_unavailable_percentage"] is not None: - params["updateConfig"]["maxUnavailablePercentage"] = module.params["update_config"][ - "max_unavailable_percentage" - ] - if module.params["scaling_config"] is not None: - params["scalingConfig"] = snake_dict_to_camel_dict(module.params["scaling_config"]) - - wait = module.params.get("wait") - nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) - - if nodegroup: - update_params = dict() - update_params["clusterName"] = params["clusterName"] - update_params["nodegroupName"] = params["nodegroupName"] - - if "launchTemplate" in nodegroup: - if compare_params_launch_template(module, params, nodegroup): - update_params["launchTemplate"] = params["launchTemplate"] - if not module.check_mode: - try: - client.update_nodegroup_version(**update_params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't update nodegroup.") - changed |= True - - if compare_params(module, params, nodegroup): - try: - if "launchTemplate" in update_params: - update_params.pop("launchTemplate") - update_params["scalingConfig"] = params["scalingConfig"] - update_params["updateConfig"] = params["updateConfig"] - - if not module.check_mode: - client.update_nodegroup_config(**update_params) - - changed |= True - - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't update nodegroup.") - - changed |= validate_tags(client, module, nodegroup) - - changed |= validate_labels(client, module, nodegroup, params["labels"]) - - if "taints" in nodegroup: - changed |= validate_taints(client, module, nodegroup, params["taints"]) - - if wait: - wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) - - nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup)) - - if module.check_mode: - module.exit_json(changed=True) - - try: - nodegroup = client.create_nodegroup(**params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't create Nodegroup {params['nodegroupName']}.") - - if wait: - wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) - nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"]) - - module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup)) - - -def delete_nodegroups(client, module): - name = module.params.get("name") - clusterName = module.params["cluster_name"] - existing = get_nodegroup(client, module, name, clusterName) - wait = module.params.get("wait") - if not existing or existing["status"] == "DELETING": - module.exit_json(changed=False, msg="Nodegroup not exists or in DELETING status.") - if not module.check_mode: - try: - client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete Nodegroup {name}.") - - if wait: - wait_until(client, module, "nodegroup_deleted", name, clusterName) - - module.exit_json(changed=True) - - -def get_nodegroup(client, module, nodegroup_name, cluster_name): - try: - return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)["nodegroup"] - except is_boto3_error_code("ResourceNotFoundException"): - return None - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Couldn't get Nodegroup {nodegroup_name}.") - - -def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): - wait_timeout = module.params.get("wait_timeout") - waiter = get_waiter(client, waiter_name) - attempts = 1 + int(wait_timeout / waiter.config.delay) - try: - waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={"MaxAttempts": attempts}) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg="An error occurred waiting") - - -def main(): - argument_spec = dict( - name=dict(type="str", required=True), - cluster_name=dict(type="str", required=True), - node_role=dict(), - subnets=dict(type="list", elements="str"), - scaling_config=dict( - type="dict", - default={"min_size": 1, "max_size": 2, "desired_size": 1}, - options=dict( - min_size=dict(type="int"), - max_size=dict(type="int"), - desired_size=dict(type="int"), - ), - ), - disk_size=dict(type="int"), - instance_types=dict(type="list", elements="str"), - ami_type=dict( - choices=[ - "AL2_x86_64", - "AL2_x86_64_GPU", - "AL2_ARM_64", - "CUSTOM", - "BOTTLEROCKET_ARM_64", - "BOTTLEROCKET_x86_64", - ] - ), - remote_access=dict( - type="dict", - options=dict( - ec2_ssh_key=dict(no_log=True), - source_sg=dict(type="list", elements="str"), - ), - ), - update_config=dict( - type="dict", - default={"max_unavailable": 1}, - options=dict( - max_unavailable=dict(type="int"), - max_unavailable_percentage=dict(type="int"), - ), - ), - labels=dict(type="dict", default={}), - taints=dict( - type="list", - elements="dict", - default=[], - options=dict( - key=dict( - type="str", - no_log=False, - ), - value=dict(type="str"), - effect=dict(type="str", choices=["NO_SCHEDULE", "NO_EXECUTE", "PREFER_NO_SCHEDULE"]), - ), - ), - launch_template=dict( - type="dict", - options=dict( - name=dict(type="str"), - version=dict(type="str"), - id=dict(type="str"), - ), - ), - capacity_type=dict(choices=["ON_DEMAND", "SPOT"], default="ON_DEMAND"), - release_version=dict(), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - state=dict(choices=["absent", "present"], default="present"), - wait=dict(default=False, type="bool"), - wait_timeout=dict(default=1200, type="int"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[["state", "present", ["node_role", "subnets"]]], - mutually_exclusive=[ - ("launch_template", "instance_types"), - ("launch_template", "disk_size"), - ("launch_template", "remote_access"), - ("launch_template", "ami_type"), - ], - supports_check_mode=True, - ) - - if module.params["launch_template"] is None: - if module.params["disk_size"] is None: - module.params["disk_size"] = 20 - if module.params["ami_type"] is None: - module.params["ami_type"] = "AL2_x86_64" - if module.params["instance_types"] is None: - module.params["instance_types"] = ["t3.medium"] - else: - if (module.params["launch_template"]["id"] is None) and (module.params["launch_template"]["name"] is None): - module.exit_json(changed=False, msg="To use launch_template, it is necessary to inform the id or name.") - try: - client = module.client("eks") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't connect to AWS.") - - if module.params.get("state") == "present": - create_or_update_nodegroups(client, module) - else: - delete_nodegroups(client, module) - - -if __name__ == "__main__": - main() diff --git a/elasticache.py b/elasticache.py deleted file mode 100644 index d45509cb606..00000000000 --- a/elasticache.py +++ /dev/null @@ -1,552 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elasticache -version_added: 1.0.0 -short_description: Manage cache clusters in Amazon ElastiCache -description: - - Manage cache clusters in Amazon ElastiCache. - - Returns information about the specified cache cluster. -author: - - "Jim Dalton (@jsdalton)" -options: - state: - description: - - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. - - C(rebooted) will reboot the cluster, resulting in a momentary outage. - choices: ['present', 'absent', 'rebooted'] - required: true - type: str - name: - description: - - The cache cluster identifier. - required: true - type: str - engine: - description: - - Name of the cache engine to be used. - - Supported values are C(redis) and C(memcached). - default: memcached - type: str - cache_engine_version: - description: - - The version number of the cache engine. - type: str - default: '' - node_type: - description: - - The compute and memory capacity of the nodes in the cache cluster. - default: cache.t2.small - type: str - num_nodes: - description: - - The initial number of cache nodes that the cache cluster will have. - - Required when I(state=present). - type: int - default: 1 - cache_port: - description: - - The port number on which each of the cache nodes will accept - connections. - type: int - cache_parameter_group: - description: - - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group - for the specified engine will be used. - aliases: [ 'parameter_group' ] - type: str - default: '' - cache_subnet_group: - description: - - The subnet group name to associate with. Only use if inside a VPC. - - Required if inside a VPC. - type: str - default: '' - security_group_ids: - description: - - A list of VPC security group IDs to associate with this cache cluster. Only use if inside a VPC. - type: list - elements: str - default: [] - cache_security_groups: - description: - - A list of cache security group names to associate with this cache cluster. - - Don't use if your Cache is inside a VPC. In that case use I(security_group_ids) instead! - type: list - elements: str - default: [] - zone: - description: - - The EC2 Availability Zone in which the cache cluster will be created. - type: str - wait: - description: - - Wait for cache cluster result before returning. - type: bool - default: true - hard_modify: - description: - - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state. - - Defaults to C(false). - type: bool -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" # """ - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Basic example - community.aws.elasticache: - name: "test-please-delete" - state: present - engine: memcached - cache_engine_version: 1.4.14 - node_type: cache.m3.small - num_nodes: 1 - cache_port: 11211 - cache_security_groups: - - default - zone: us-east-1d - - -- name: Ensure cache cluster is gone - community.aws.elasticache: - name: "test-please-delete" - state: absent - -- name: Reboot cache cluster - community.aws.elasticache: - name: "test-please-delete" - state: rebooted -""" - -from time import sleep - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class ElastiCacheManager: - - """Handles elasticache creation and destruction""" - - EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"] - - def __init__( - self, - module, - name, - engine, - cache_engine_version, - node_type, - num_nodes, - cache_port, - cache_parameter_group, - cache_subnet_group, - cache_security_groups, - security_group_ids, - zone, - wait, - hard_modify, - ): - self.module = module - self.name = name - self.engine = engine.lower() - self.cache_engine_version = cache_engine_version - self.node_type = node_type - self.num_nodes = num_nodes - self.cache_port = cache_port - self.cache_parameter_group = cache_parameter_group - self.cache_subnet_group = cache_subnet_group - self.cache_security_groups = cache_security_groups - self.security_group_ids = security_group_ids - self.zone = zone - self.wait = wait - self.hard_modify = hard_modify - - self.changed = False - self.data = None - self.status = "gone" - self.conn = self._get_elasticache_connection() - self._refresh_data() - - def ensure_present(self): - """Ensure cache cluster exists or create it if not""" - if self.exists(): - self.sync() - else: - self.create() - - def ensure_absent(self): - """Ensure cache cluster is gone or delete it if not""" - self.delete() - - def ensure_rebooted(self): - """Ensure cache cluster is gone or delete it if not""" - self.reboot() - - def exists(self): - """Check if cache cluster exists""" - return self.status in self.EXIST_STATUSES - - def create(self): - """Create an ElastiCache cluster""" - if self.status == "available": - return - if self.status in ["creating", "rebooting", "modifying"]: - if self.wait: - self._wait_for_status("available") - return - if self.status == "deleting": - if self.wait: - self._wait_for_status("gone") - else: - self.module.fail_json(msg=f"'{self.name}' is currently deleting. Cannot create.") - - kwargs = dict( - CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeType=self.node_type, - Engine=self.engine, - EngineVersion=self.cache_engine_version, - CacheSecurityGroupNames=self.cache_security_groups, - SecurityGroupIds=self.security_group_ids, - CacheParameterGroupName=self.cache_parameter_group, - CacheSubnetGroupName=self.cache_subnet_group, - ) - if self.cache_port is not None: - kwargs["Port"] = self.cache_port - if self.zone is not None: - kwargs["PreferredAvailabilityZone"] = self.zone - - try: - self.conn.create_cache_cluster(**kwargs) - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to create cache cluster") - - self._refresh_data() - - self.changed = True - if self.wait: - self._wait_for_status("available") - return True - - def delete(self): - """Destroy an ElastiCache cluster""" - if self.status == "gone": - return - if self.status == "deleting": - if self.wait: - self._wait_for_status("gone") - return - if self.status in ["creating", "rebooting", "modifying"]: - if self.wait: - self._wait_for_status("available") - else: - self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot delete.") - - try: - response = self.conn.delete_cache_cluster(CacheClusterId=self.name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to delete cache cluster") - - cache_cluster_data = response["CacheCluster"] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status("gone") - - def sync(self): - """Sync settings to cluster if required""" - if not self.exists(): - self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot sync.") - - if self.status in ["creating", "rebooting", "modifying"]: - if self.wait: - self._wait_for_status("available") - else: - # Cluster can only be synced if available. If we can't wait - # for this, then just be done. - return - - if self._requires_destroy_and_create(): - if not self.hard_modify: - self.module.fail_json( - msg=f"'{self.name}' requires destructive modification. 'hard_modify' must be set to true to proceed." - ) - if not self.wait: - self.module.fail_json( - msg=f"'{self.name}' requires destructive modification. 'wait' must be set to true to proceed." - ) - self.delete() - self.create() - return - - if self._requires_modification(): - self.modify() - - def modify(self): - """Modify the cache cluster. Note it's only possible to modify a few select options.""" - nodes_to_remove = self._get_nodes_to_remove() - try: - self.conn.modify_cache_cluster( - CacheClusterId=self.name, - NumCacheNodes=self.num_nodes, - CacheNodeIdsToRemove=nodes_to_remove, - CacheSecurityGroupNames=self.cache_security_groups, - CacheParameterGroupName=self.cache_parameter_group, - SecurityGroupIds=self.security_group_ids, - ApplyImmediately=True, - EngineVersion=self.cache_engine_version, - ) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Failed to modify cache cluster") - - self._refresh_data() - - self.changed = True - if self.wait: - self._wait_for_status("available") - - def reboot(self): - """Reboot the cache cluster""" - if not self.exists(): - self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot reboot.") - if self.status == "rebooting": - return - if self.status in ["creating", "modifying"]: - if self.wait: - self._wait_for_status("available") - else: - self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot reboot.") - - # Collect ALL nodes for reboot - cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] - try: - self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Failed to reboot cache cluster") - - self._refresh_data() - - self.changed = True - if self.wait: - self._wait_for_status("available") - - def get_info(self): - """Return basic info about the cache cluster""" - info = {"name": self.name, "status": self.status} - if self.data: - info["data"] = self.data - return info - - def _wait_for_status(self, awaited_status): - """Wait for status to change from present status to awaited_status""" - status_map = {"creating": "available", "rebooting": "available", "modifying": "available", "deleting": "gone"} - if self.status == awaited_status: - # No need to wait, we're already done - return - if status_map[self.status] != awaited_status: - self.module.fail_json( - msg=f"Invalid awaited status. '{self.status}' cannot transition to '{awaited_status}'" - ) - - if awaited_status not in set(status_map.values()): - self.module.fail_json(msg=f"'{awaited_status}' is not a valid awaited status.") - - while True: - sleep(1) - self._refresh_data() - if self.status == awaited_status: - break - - def _requires_modification(self): - """Check if cluster requires (nondestructive) modification""" - # Check modifiable data attributes - modifiable_data = {"NumCacheNodes": self.num_nodes, "EngineVersion": self.cache_engine_version} - for key, value in modifiable_data.items(): - if value is not None and value and self.data[key] != value: - return True - - # Check cache security groups - cache_security_groups = [] - for sg in self.data["CacheSecurityGroups"]: - cache_security_groups.append(sg["CacheSecurityGroupName"]) - if set(cache_security_groups) != set(self.cache_security_groups): - return True - - # check vpc security groups - if self.security_group_ids: - vpc_security_groups = [] - security_groups = self.data.get("SecurityGroups", []) - for sg in security_groups: - vpc_security_groups.append(sg["SecurityGroupId"]) - if set(vpc_security_groups) != set(self.security_group_ids): - return True - - return False - - def _requires_destroy_and_create(self): - """ - Check whether a destroy and create is required to synchronize cluster. - """ - unmodifiable_data = { - "node_type": self.data["CacheNodeType"], - "engine": self.data["Engine"], - "cache_port": self._get_port(), - } - # Only check for modifications if zone is specified - if self.zone is not None: - unmodifiable_data["zone"] = self.data["PreferredAvailabilityZone"] - for key, value in unmodifiable_data.items(): - if getattr(self, key) is not None and getattr(self, key) != value: - return True - return False - - def _get_elasticache_connection(self): - """Get an elasticache connection""" - try: - return self.module.client("elasticache") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to connect to AWS") - - def _get_port(self): - """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data["Engine"] == "memcached": - return self.data["ConfigurationEndpoint"]["Port"] - elif self.data["Engine"] == "redis": - # Redis only supports a single node (presently) so just use - # the first and only - return self.data["CacheNodes"][0]["Endpoint"]["Port"] - - def _refresh_data(self, cache_cluster_data=None): - """Refresh data about this cache cluster""" - - if cache_cluster_data is None: - try: - response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True) - except is_boto3_error_code("CacheClusterNotFound"): - self.data = None - self.status = "gone" - return - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Failed to describe cache clusters") - cache_cluster_data = response["CacheClusters"][0] - self.data = cache_cluster_data - self.status = self.data["CacheClusterStatus"] - - # The documentation for elasticache lies -- status on rebooting is set - # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it - # here to make status checks etc. more sane. - if self.status == "rebooting cache cluster nodes": - self.status = "rebooting" - - def _get_nodes_to_remove(self): - """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data["NumCacheNodes"] - self.num_nodes - if num_nodes_to_remove <= 0: - return [] - - if not self.hard_modify: - self.module.fail_json( - msg=f"'{self.name}' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - ) - - cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] - return cache_node_ids[-num_nodes_to_remove:] - - -def main(): - """elasticache ansible module""" - argument_spec = dict( - state=dict(required=True, choices=["present", "absent", "rebooted"]), - name=dict(required=True), - engine=dict(default="memcached"), - cache_engine_version=dict(default=""), - node_type=dict(default="cache.t2.small"), - num_nodes=dict(default=1, type="int"), - # alias for compat with the original PR 1950 - cache_parameter_group=dict(default="", aliases=["parameter_group"]), - cache_port=dict(type="int"), - cache_subnet_group=dict(default=""), - cache_security_groups=dict(default=[], type="list", elements="str"), - security_group_ids=dict(default=[], type="list", elements="str"), - zone=dict(), - wait=dict(default=True, type="bool"), - hard_modify=dict(type="bool"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - ) - - name = module.params["name"] - state = module.params["state"] - engine = module.params["engine"] - cache_engine_version = module.params["cache_engine_version"] - node_type = module.params["node_type"] - num_nodes = module.params["num_nodes"] - cache_port = module.params["cache_port"] - cache_subnet_group = module.params["cache_subnet_group"] - cache_security_groups = module.params["cache_security_groups"] - security_group_ids = module.params["security_group_ids"] - zone = module.params["zone"] - wait = module.params["wait"] - hard_modify = module.params["hard_modify"] - cache_parameter_group = module.params["cache_parameter_group"] - - if cache_subnet_group and cache_security_groups: - module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups") - - if state == "present" and not num_nodes: - module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - - elasticache_manager = ElastiCacheManager( - module, - name, - engine, - cache_engine_version, - node_type, - num_nodes, - cache_port, - cache_parameter_group, - cache_subnet_group, - cache_security_groups, - security_group_ids, - zone, - wait, - hard_modify, - ) - - if state == "present": - elasticache_manager.ensure_present() - elif state == "absent": - elasticache_manager.ensure_absent() - elif state == "rebooted": - elasticache_manager.ensure_rebooted() - - facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info()) - - module.exit_json(**facts_result) - - -if __name__ == "__main__": - main() diff --git a/elasticache_info.py b/elasticache_info.py deleted file mode 100644 index 50a8cb5ff0d..00000000000 --- a/elasticache_info.py +++ /dev/null @@ -1,496 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: elasticache_info -short_description: Retrieve information for AWS ElastiCache clusters -version_added: 1.0.0 -description: - - Retrieve information from AWS ElastiCache clusters. -options: - name: - description: - - The name of an ElastiCache cluster. - type: str -author: - - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: obtain all ElastiCache information - community.aws.elasticache_info: - -- name: obtain all information for a single ElastiCache cluster - community.aws.elasticache_info: - name: test_elasticache -""" - -RETURN = r""" -elasticache_clusters: - description: List of ElastiCache clusters. - returned: always - type: list - elements: dict - contains: - arn: - description: ARN of the cache cluster. - returned: always - type: str - sample: 'arn:aws:elasticache:us-east-1:123456789012:cluster:ansible-test' - auto_minor_version_upgrade: - description: Whether to automatically upgrade to minor versions. - returned: always - type: bool - sample: true - cache_cluster_create_time: - description: Date and time cluster was created. - returned: always - type: str - sample: '2017-09-15T05:43:46.038000+00:00' - cache_cluster_id: - description: ID of the cache cluster. - returned: always - type: str - sample: abcd-1234-001 - cache_cluster_status: - description: Status of ElastiCache cluster. - returned: always - type: str - sample: available - cache_node_type: - description: Instance type of ElastiCache nodes. - returned: always - type: str - sample: cache.t2.micro - cache_nodes: - description: List of ElastiCache nodes in the cluster. - returned: always - type: list - elements: dict - contains: - cache_node_create_time: - description: Date and time node was created. - returned: always - type: str - sample: '2017-09-15T05:43:46.038000+00:00' - cache_node_id: - description: ID of the cache node. - returned: always - type: str - sample: '0001' - cache_node_status: - description: Status of the cache node. - returned: always - type: str - sample: available - customer_availability_zone: - description: Availability Zone in which the cache node was created. - returned: always - type: str - sample: ap-southeast-2b - endpoint: - description: Connection details for the cache node. - returned: always - type: dict - contains: - address: - description: URL of the cache node endpoint. - returned: always - type: str - sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com - port: - description: Port of the cache node endpoint. - returned: always - type: int - sample: 6379 - parameter_group_status: - description: Status of the Cache Parameter Group. - returned: always - type: str - sample: in-sync - cache_parameter_group: - description: Contents of the Cache Parameter Group. - returned: always - type: dict - contains: - cache_node_ids_to_reboot: - description: Cache nodes which need to be rebooted for parameter changes to be applied. - returned: always - type: list - elements: str - sample: [] - cache_parameter_group_name: - description: Name of the cache parameter group. - returned: always - type: str - sample: default.redis3.2 - parameter_apply_status: - description: Status of parameter updates. - returned: always - type: str - sample: in-sync - cache_security_groups: - description: Security Groups used by the cache. - returned: always - type: list - elements: str - sample: - - 'sg-abcd1234' - cache_subnet_group_name: - description: ElastiCache Subnet Group used by the cache. - returned: always - type: str - sample: abcd-subnet-group - client_download_landing_page: - description: URL of client download web page. - returned: always - type: str - sample: 'https://console.aws.amazon.com/elasticache/home#client-download:' - engine: - description: Engine used by ElastiCache. - returned: always - type: str - sample: redis - engine_version: - description: Version of ElastiCache engine. - returned: always - type: str - sample: 3.2.4 - notification_configuration: - description: Configuration of notifications. - returned: if notifications are enabled - type: dict - contains: - topic_arn: - description: ARN of notification destination topic. - returned: if notifications are enabled - type: str - sample: arn:aws:sns:*:123456789012:my_topic - topic_name: - description: Name of notification destination topic. - returned: if notifications are enabled - type: str - sample: MyTopic - num_cache_nodes: - description: Number of Cache Nodes. - returned: always - type: int - sample: 1 - pending_modified_values: - description: Values that are pending modification. - returned: always - type: dict - preferred_availability_zone: - description: Preferred Availability Zone. - returned: always - type: str - sample: ap-southeast-2b - preferred_maintenance_window: - description: Time slot for preferred maintenance window. - returned: always - type: str - sample: sat:12:00-sat:13:00 - replication_group: - description: Informations about the associated replication group. - version_added: 4.1.0 - returned: if replication is enabled - type: dict - contains: - arn: - description: The ARN (Amazon Resource Name) of the replication group. - returned: always - type: str - at_rest_encryption_enabled: - description: A flag that enables encryption at-rest when set to true. - returned: always - type: bool - auth_token_enabled: - description: A flag that enables using an AuthToken (password) when issuing Redis commands. - returned: always - type: bool - automatic_failover: - description: Indicates the status of automatic failover for this Redis replication group. - returned: always - type: str - sample: enabled - cache_node_type: - description: The name of the compute and memory capacity node type for each node in the replication group. - returned: always - type: str - sample: cache.t3.medium - cluster_enabled: - description: A flag indicating whether or not this replication group is cluster enabled. - returned: always - type: bool - description: - description: The user supplied description of the replication group. - returned: always - type: str - global_replication_group_info: - description: The name of the Global datastore and role of this replication group in the Global datastore. - returned: always - type: dict - contains: - global_replication_group_id: - description: The name of the Global datastore. - returned: always - type: str - global_replication_group_member_role: - description: The role of the replication group in a Global datastore. Can be primary or secondary. - returned: always - type: str - kms_key_id: - description: The ID of the KMS key used to encrypt the disk in the cluster. - returned: always - type: str - member_clusters: - description: The names of all the cache clusters that are part of this replication group. - returned: always - type: list - elements: str - multi_az: - description: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - returned: always - type: str - sample: enabled - node_groups: - description: A list of node groups in this replication group. - returned: always - type: list - elements: dict - contains: - node_group_id: - description: The identifier for the node group (shard). - returned: always - type: str - node_group_members: - description: A list containing information about individual nodes within the node group (shard). - returned: always - type: list - elements: dict - contains: - cache_cluster_id: - description: The ID of the cluster to which the node belongs. - returned: always - type: str - cache_node_id: - description: The ID of the node within its cluster. - returned: always - type: str - current_role: - description: The role that is currently assigned to the node - primary or replica. - returned: always - type: str - sample: primary - preferred_availability_zone: - description: The name of the Availability Zone in which the node is located. - returned: always - type: str - read_endpoint: - description: The information required for client programs to connect to a node for read operations. - returned: always - type: list - elements: dict - contains: - address: - description: The DNS hostname of the cache node. - returned: always - type: str - port: - description: The port number that the cache engine is listening on. - returned: always - type: int - sample: 6379 - primary_endpoint: - description: The endpoint of the primary node in this node group (shard). - returned: always - type: list - elements: dict - contains: - address: - description: The DNS hostname of the cache node. - returned: always - type: str - port: - description: The port number that the cache engine is listening on. - returned: always - type: int - sample: 6379 - reader_endpoint: - description: The endpoint of the cache node. - returned: always - type: dict - contains: - address: - description: The DNS hostname of the cache node. - returned: always - type: str - port: - description: The port number that the cache engine is listening on. - returned: always - type: int - sample: 6379 - status: - description: The current state of this replication group - C(creating), C(available), C(modifying), C(deleting). - returned: always - type: str - sample: available - pending_modified_values: - description: A group of settings to be applied to the replication group, either immediately or during the next maintenance window. - returned: always - type: dict - replication_group_id: - description: Replication Group Id. - returned: always - type: str - sample: replication-001 - snapshot_retention_limit: - description: The number of days for which ElastiCache retains automatic cluster snapshots before deleting them. - returned: always - type: int - snapshot_window: - description: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). - returned: always - type: str - sample: 07:00-09:00 - snapshotting_cluster_id: - description: The cluster ID that is used as the daily snapshot source for the replication group. - returned: always - type: str - status: - description: The current state of this replication group - C(creating), C(available), C(modifying), C(deleting), C(create-failed), C(snapshotting) - returned: always - type: str - transit_encryption_enabled: - description: A flag that enables in-transit encryption when set to C(true). - returned: always - type: bool - replication_group_id: - description: Replication Group Id. - returned: if replication is enabled - type: str - sample: replication-001 - security_groups: - description: List of Security Groups associated with ElastiCache. - returned: always - type: list - elements: dict - contains: - security_group_id: - description: Security Group ID - returned: always - type: str - sample: sg-abcd1234 - status: - description: Status of Security Group - returned: always - type: str - sample: active - tags: - description: Tags applied to the ElastiCache cluster - returned: always - type: dict - sample: - Application: web - Environment: test -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.exponential_backoff() -def describe_cache_clusters_with_backoff(client, cluster_id=None): - paginator = client.get_paginator("describe_cache_clusters") - params = dict(ShowCacheNodeInfo=True) - if cluster_id: - params["CacheClusterId"] = cluster_id - try: - response = paginator.paginate(**params).build_full_result() - except is_boto3_error_code("CacheClusterNotFound"): - return [] - return response["CacheClusters"] - - -@AWSRetry.exponential_backoff() -def describe_replication_group_with_backoff(client, replication_group_id): - try: - response = client.describe_replication_groups(ReplicationGroupId=replication_group_id) - except is_boto3_error_code("ReplicationGroupNotFoundFault"): - return None - - return response["ReplicationGroups"][0] - - -@AWSRetry.exponential_backoff() -def get_elasticache_tags_with_backoff(client, cluster_id): - return client.list_tags_for_resource(ResourceName=cluster_id)["TagList"] - - -def get_elasticache_clusters(client, module): - region = module.region - try: - clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get("name")) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't obtain cache cluster info") - - account_id, partition = get_aws_account_info(module) - results = [] - for cluster in clusters: - cluster = camel_dict_to_snake_dict(cluster) - arn = f"arn:{partition}:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}" - try: - tags = get_elasticache_tags_with_backoff(client, arn) - except is_boto3_error_code("CacheClusterNotFound"): - # e.g: Cluster was listed but is in deleting state - continue - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't get tags for cluster {cluster['cache_cluster_id']}") - - cluster["tags"] = boto3_tag_list_to_ansible_dict(tags) - - if cluster.get("replication_group_id", None): - try: - replication_group = describe_replication_group_with_backoff(client, cluster["replication_group_id"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't obtain replication group info") - - if replication_group is not None: - replication_group = camel_dict_to_snake_dict(replication_group) - cluster["replication_group"] = replication_group - - results.append(cluster) - return results - - -def main(): - argument_spec = dict( - name=dict(required=False), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - client = module.client("elasticache") - - module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module)) - - -if __name__ == "__main__": - main() diff --git a/elasticache_parameter_group.py b/elasticache_parameter_group.py deleted file mode 100644 index 00f2af19a08..00000000000 --- a/elasticache_parameter_group.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elasticache_parameter_group -version_added: 1.0.0 -short_description: Manage cache parameter groups in Amazon ElastiCache. -description: - - Manage cache security groups in Amazon ElastiCache. - - Returns information about the specified cache cluster. -author: - - "Sloane Hertel (@s-hertel)" - -options: - group_family: - description: - - The name of the cache parameter group family that the cache parameter group can be used with. - Required when creating a cache parameter group. - choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x'] - type: str - name: - description: - - A user-specified name for the cache parameter group. - required: true - type: str - description: - description: - - A user-specified description for the cache parameter group. - type: str - default: '' - state: - description: - - Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed. - choices: ['present', 'absent', 'reset'] - required: true - type: str - values: - description: - - A user-specified dictionary of parameters to reset or modify for the cache parameter group. - type: dict - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- hosts: localhost - connection: local - tasks: - - name: 'Create a test parameter group' - community.aws.elasticache_parameter_group: - name: 'test-param-group' - group_family: 'redis3.2' - description: 'This is a cache parameter group' - state: 'present' - - name: 'Modify a test parameter group' - community.aws.elasticache_parameter_group: - name: 'test-param-group' - values: - activerehashing: yes - client-output-buffer-limit-normal-hard-limit: 4 - state: 'present' - - name: 'Reset all modifiable parameters for the test parameter group' - community.aws.elasticache_parameter_group: - name: 'test-param-group' - state: reset - - name: 'Delete a test parameter group' - community.aws.elasticache_parameter_group: - name: 'test-param-group' - state: 'absent' -""" - -RETURN = r""" -elasticache: - description: cache parameter group information and response metadata - returned: always - type: dict - sample: - cache_parameter_group: - cache_parameter_group_family: redis3.2 - cache_parameter_group_name: test-please-delete - description: "initial description" - response_metadata: - http_headers: - content-length: "562" - content-type: text/xml - date: "Mon, 06 Feb 2017 22:14:08 GMT" - x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1 - http_status_code: 200 - request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1 - retry_attempts: 0 -changed: - description: if the cache parameter group has changed - returned: always - type: bool - sample: - changed: true -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_text -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.six import string_types - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create(module, conn, name, group_family, description): - """Create ElastiCache parameter group.""" - try: - response = conn.create_cache_parameter_group( - CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create cache parameter group.") - return response, changed - - -def delete(module, conn, name): - """Delete ElastiCache parameter group.""" - try: - conn.delete_cache_parameter_group(CacheParameterGroupName=name) - response = {} - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete cache parameter group.") - return response, changed - - -def make_current_modifiable_param_dict(module, conn, name): - """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" - current_info = get_info(conn, name) - if current_info is False: - module.fail_json(msg=f"Could not connect to the cache parameter group {name}.") - - parameters = current_info["Parameters"] - modifiable_params = {} - - for param in parameters: - if param["IsModifiable"]: - modifiable_params[param["ParameterName"]] = [param.get("AllowedValues")] - modifiable_params[param["ParameterName"]].append(param["DataType"]) - modifiable_params[param["ParameterName"]].append(param.get("ParameterValue")) - return modifiable_params - - -def check_valid_modification(module, values, modifiable_params): - """Check if the parameters and values in values are valid.""" - changed_with_update = False - - for parameter in values: - new_value = values[parameter] - - # check valid modifiable parameters - if parameter not in modifiable_params: - module.fail_json( - msg=f"{parameter} is not a modifiable parameter. Valid parameters to modify are: {modifiable_params.keys()}." - ) - - # check allowed datatype for modified parameters - str_to_type = {"integer": int, "string": string_types} - expected_type = str_to_type[modifiable_params[parameter][1]] - if not isinstance(new_value, expected_type): - if expected_type == str: - if isinstance(new_value, bool): - values[parameter] = "yes" if new_value else "no" - else: - values[parameter] = to_text(new_value) - elif expected_type == int: - if isinstance(new_value, bool): - values[parameter] = 1 if new_value else 0 - else: - module.fail_json( - msg=( - f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter" - f" {parameter}. Expected a type {modifiable_params[parameter][1]}." - ) - ) - else: - module.fail_json( - msg=( - f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter {parameter}." - f" Expected a type {modifiable_params[parameter][1]}." - ) - ) - - # check allowed values for modifiable parameters - choices = modifiable_params[parameter][0] - if choices: - if not (to_text(new_value) in choices or isinstance(new_value, int)): - module.fail_json( - msg=f"{new_value} is not an allowed value for the parameter {parameter}. Valid parameters are: {choices}." - ) - - # check if a new value is different from current value - if to_text(values[parameter]) != modifiable_params[parameter][2]: - changed_with_update = True - - return changed_with_update, values - - -def check_changed_parameter_values(values, old_parameters, new_parameters): - """Checking if the new values are different than the old values.""" - changed_with_update = False - - # if the user specified parameters to reset, only check those for change - if values: - for parameter in values: - if old_parameters[parameter] != new_parameters[parameter]: - changed_with_update = True - break - # otherwise check all to find a change - else: - for parameter in old_parameters: - if old_parameters[parameter] != new_parameters[parameter]: - changed_with_update = True - break - - return changed_with_update - - -def modify(module, conn, name, values): - """Modify ElastiCache parameter group to reflect the new information if it differs from the current.""" - # compares current group parameters with the parameters we've specified to to a value to see if this will change the group - format_parameters = [] - for key in values: - value = to_text(values[key]) - format_parameters.append({"ParameterName": key, "ParameterValue": value}) - try: - response = conn.modify_cache_parameter_group( - CacheParameterGroupName=name, ParameterNameValues=format_parameters - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to modify cache parameter group.") - return response - - -def reset(module, conn, name, values): - """Reset ElastiCache parameter group if the current information is different from the new information.""" - # used to compare with the reset parameters' dict to see if there have been changes - old_parameters_dict = make_current_modifiable_param_dict(module, conn, name) - - format_parameters = [] - - # determine whether to reset all or specific parameters - if values: - all_parameters = False - format_parameters = [] - for key in values: - value = to_text(values[key]) - format_parameters.append({"ParameterName": key, "ParameterValue": value}) - else: - all_parameters = True - - try: - response = conn.reset_cache_parameter_group( - CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to reset cache parameter group.") - - # determine changed - new_parameters_dict = make_current_modifiable_param_dict(module, conn, name) - changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict) - - return response, changed - - -def get_info(conn, name): - """Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access.""" - try: - data = conn.describe_cache_parameters(CacheParameterGroupName=name) - return data - except botocore.exceptions.ClientError as e: - return False - - -def main(): - argument_spec = dict( - group_family=dict( - type="str", - choices=[ - "memcached1.4", - "memcached1.5", - "redis2.6", - "redis2.8", - "redis3.2", - "redis4.0", - "redis5.0", - "redis6.x", - ], - ), - name=dict(required=True, type="str"), - description=dict(default="", type="str"), - state=dict(required=True, choices=["present", "absent", "reset"]), - values=dict(type="dict"), - ) - module = AnsibleAWSModule(argument_spec=argument_spec) - - parameter_group_family = module.params.get("group_family") - parameter_group_name = module.params.get("name") - group_description = module.params.get("description") - state = module.params.get("state") - values = module.params.get("values") - - try: - connection = module.client("elasticache") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - exists = get_info(connection, parameter_group_name) - - # check that the needed requirements are available - if state == "present" and not (exists or parameter_group_family): - module.fail_json(msg="Creating a group requires a family group.") - elif state == "reset" and not exists: - module.fail_json( - msg=f"No group {parameter_group_name} to reset. Please create the group before using the state 'reset'." - ) - - # Taking action - changed = False - if state == "present": - if exists: - # confirm that the group exists without any actions - if not values: - response = exists - changed = False - # modify existing group - else: - modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) - changed, values = check_valid_modification(module, values, modifiable_params) - response = modify(module, connection, parameter_group_name, values) - # create group - else: - response, changed = create( - module, connection, parameter_group_name, parameter_group_family, group_description - ) - if values: - modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) - changed, values = check_valid_modification(module, values, modifiable_params) - response = modify(module, connection, parameter_group_name, values) - elif state == "absent": - if exists: - # delete group - response, changed = delete(module, connection, parameter_group_name) - else: - response = {} - changed = False - elif state == "reset": - response, changed = reset(module, connection, parameter_group_name, values) - - facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response)) - - module.exit_json(**facts_result) - - -if __name__ == "__main__": - main() diff --git a/elasticache_snapshot.py b/elasticache_snapshot.py deleted file mode 100644 index 0816527fb4f..00000000000 --- a/elasticache_snapshot.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elasticache_snapshot -version_added: 1.0.0 -short_description: Manage cache snapshots in Amazon ElastiCache -description: - - Manage cache snapshots in Amazon ElastiCache. - - Returns information about the specified snapshot. -author: - - "Sloane Hertel (@s-hertel)" -options: - name: - description: - - The name of the snapshot we want to create, copy, delete. - required: true - type: str - state: - description: - - Actions that will create, destroy, or copy a snapshot. - required: true - choices: ['present', 'absent', 'copy'] - type: str - replication_id: - description: - - The name of the existing replication group to make the snapshot. - type: str - cluster_id: - description: - - The name of an existing cache cluster in the replication group to make the snapshot. - type: str - target: - description: - - The name of a snapshot copy. - type: str - bucket: - description: - - The s3 bucket to which the snapshot is exported. - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: 'Create a snapshot' - community.aws.elasticache_snapshot: - name: 'test-snapshot' - state: 'present' - cluster_id: '{{ cluster }}' - replication_id: '{{ replication }}' -""" - -RETURN = r""" -response_metadata: - description: response metadata about the snapshot - returned: always - type: dict - sample: - http_headers: - content-length: 1490 - content-type: text/xml - date: 'Tue, 07 Feb 2017 16:43:04 GMT' - x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d - http_status_code: 200 - request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d - retry_attempts: 0 -snapshot: - description: snapshot data - returned: always - type: dict - sample: - auto_minor_version_upgrade: true - cache_cluster_create_time: '2017-02-01T17:43:58.261000+00:00' - cache_cluster_id: test-please-delete - cache_node_type: cache.m1.small - cache_parameter_group_name: default.redis3.2 - cache_subnet_group_name: default - engine: redis - engine_version: 3.2.4 - node_snapshots: - cache_node_create_time: '2017-02-01T17:43:58.261000+00:00' - cache_node_id: 0001 - cache_size: - num_cache_nodes: 1 - port: 11211 - preferred_availability_zone: us-east-1d - preferred_maintenance_window: wed:03:00-wed:04:00 - snapshot_name: deletesnapshot - snapshot_retention_limit: 0 - snapshot_source: manual - snapshot_status: creating - snapshot_window: 10:00-11:00 - vpc_id: vpc-c248fda4 -changed: - description: if a snapshot has been created, deleted, or copied - returned: always - type: bool - sample: - changed: true -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create(module, connection, replication_id, cluster_id, name): - """Create an ElastiCache backup.""" - try: - response = connection.create_snapshot( - ReplicationGroupId=replication_id, CacheClusterId=cluster_id, SnapshotName=name - ) - changed = True - except is_boto3_error_code("SnapshotAlreadyExistsFault"): - response = {} - changed = False - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to create the snapshot.") - return response, changed - - -def copy(module, connection, name, target, bucket): - """Copy an ElastiCache backup.""" - try: - response = connection.copy_snapshot(SourceSnapshotName=name, TargetSnapshotName=target, TargetBucket=bucket) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to copy the snapshot.") - return response, changed - - -def delete(module, connection, name): - """Delete an ElastiCache backup.""" - try: - response = connection.delete_snapshot(SnapshotName=name) - changed = True - except is_boto3_error_code("SnapshotNotFoundFault"): - response = {} - changed = False - except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except - module.fail_json( - msg=( - "Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow" - " deletion.You may need to wait a few minutes." - ) - ) - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete the snapshot.") - return response, changed - - -def main(): - argument_spec = dict( - name=dict(required=True, type="str"), - state=dict(required=True, type="str", choices=["present", "absent", "copy"]), - replication_id=dict(type="str"), - cluster_id=dict(type="str"), - target=dict(type="str"), - bucket=dict(type="str"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - name = module.params.get("name") - state = module.params.get("state") - replication_id = module.params.get("replication_id") - cluster_id = module.params.get("cluster_id") - target = module.params.get("target") - bucket = module.params.get("bucket") - - try: - connection = module.client("elasticache") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - changed = False - response = {} - - if state == "present": - if not all((replication_id, cluster_id)): - module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'") - response, changed = create(module, connection, replication_id, cluster_id, name) - elif state == "absent": - response, changed = delete(module, connection, name) - elif state == "copy": - if not all((target, bucket)): - module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.") - response, changed = copy(module, connection, name, target, bucket) - - facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response)) - - module.exit_json(**facts_result) - - -if __name__ == "__main__": - main() diff --git a/elasticache_subnet_group.py b/elasticache_subnet_group.py deleted file mode 100644 index f7740e696fb..00000000000 --- a/elasticache_subnet_group.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elasticache_subnet_group -version_added: 1.0.0 -short_description: manage ElastiCache subnet groups -description: - - Creates, modifies, and deletes ElastiCache subnet groups. -options: - state: - description: - - Specifies whether the subnet should be present or absent. - choices: [ 'present' , 'absent' ] - default: 'present' - type: str - name: - description: - - Database subnet group identifier. - - This value is automatically converted to lowercase. - required: true - type: str - description: - description: - - ElastiCache subnet group description. - - When not provided defaults to I(name) on subnet group creation. - type: str - subnets: - description: - - List of subnet IDs that make up the ElastiCache subnet group. - - At least one subnet must be provided when creating an ElastiCache subnet group. - type: list - elements: str -author: - - "Tim Mahoney (@timmahoney)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Add or change a subnet group - community.aws.elasticache_subnet_group: - state: present - name: norwegian-blue - description: My Fancy Ex Parrot Subnet Group - subnets: - - subnet-aaaaaaaa - - subnet-bbbbbbbb - -- name: Remove a subnet group - community.aws.elasticache_subnet_group: - state: absent - name: norwegian-blue -""" - -RETURN = r""" -cache_subnet_group: - description: Description of the Elasticache Subnet Group. - returned: always - type: dict - contains: - arn: - description: The Amazon Resource Name (ARN) of the cache subnet group. - returned: when the subnet group exists - type: str - sample: arn:aws:elasticache:us-east-1:123456789012:subnetgroup:norwegian-blue - description: - description: The description of the cache subnet group. - returned: when the cache subnet group exists - type: str - sample: My Fancy Ex Parrot Subnet Group - name: - description: The name of the cache subnet group. - returned: when the cache subnet group exists - type: str - sample: norwegian-blue - vpc_id: - description: The VPC ID of the cache subnet group. - returned: when the cache subnet group exists - type: str - sample: norwegian-blue - subnet_ids: - description: The IDs of the subnets beloging to the cache subnet group. - returned: when the cache subnet group exists - type: list - elements: str - sample: - - subnet-aaaaaaaa - - subnet-bbbbbbbb -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_subnet_group(name): - try: - groups = client.describe_cache_subnet_groups( - aws_retry=True, - CacheSubnetGroupName=name, - )["CacheSubnetGroups"] - except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe subnet group") - - if not groups: - return None - - if len(groups) > 1: - module.fail_aws( - msg="Found multiple matches for subnet group", - cache_subnet_groups=camel_dict_to_snake_dict(groups), - ) - - subnet_group = camel_dict_to_snake_dict(groups[0]) - - subnet_group["name"] = subnet_group["cache_subnet_group_name"] - subnet_group["description"] = subnet_group["cache_subnet_group_description"] - - subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) - subnet_group["subnet_ids"] = subnet_ids - - return subnet_group - - -def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg="At least one subnet must be provided when creating a subnet group") - - if module.check_mode: - return True - - try: - if not description: - description = name - client.create_cache_subnet_group( - aws_retry=True, - CacheSubnetGroupName=name, - CacheSubnetGroupDescription=description, - SubnetIds=subnets, - ) - return True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create subnet group") - - -def update_subnet_group(subnet_group, name, description, subnets): - update_params = dict() - if description and subnet_group["description"] != description: - update_params["CacheSubnetGroupDescription"] = description - if subnets: - old_subnets = set(subnet_group["subnet_ids"]) - new_subnets = set(subnets) - if old_subnets != new_subnets: - update_params["SubnetIds"] = list(subnets) - - if not update_params: - return False - - if module.check_mode: - return True - - try: - client.modify_cache_subnet_group( - aws_retry=True, - CacheSubnetGroupName=name, - **update_params, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update subnet group") - - return True - - -def delete_subnet_group(name): - if module.check_mode: - return True - - try: - client.delete_cache_subnet_group( - aws_retry=True, - CacheSubnetGroupName=name, - ) - return True - except is_boto3_error_code("CacheSubnetGroupNotFoundFault"): - # AWS is "eventually consistent", cope with the race conditions where - # deletion hadn't completed when we ran describe - return False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete subnet group") - - -def main(): - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(required=True), - description=dict(required=False), - subnets=dict(required=False, type="list", elements="str"), - ) - - global module - global client - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - state = module.params.get("state") - name = module.params.get("name").lower() - description = module.params.get("description") - subnets = module.params.get("subnets") - - client = module.client("elasticache", retry_decorator=AWSRetry.jittered_backoff()) - - subnet_group = get_subnet_group(name) - changed = False - - if state == "present": - if not subnet_group: - result = create_subnet_group(name, description, subnets) - changed |= result - else: - result = update_subnet_group(subnet_group, name, description, subnets) - changed |= result - subnet_group = get_subnet_group(name) - else: - if subnet_group: - result = delete_subnet_group(name) - changed |= result - subnet_group = None - - module.exit_json(changed=changed, cache_subnet_group=subnet_group) - - -if __name__ == "__main__": - main() diff --git a/elasticbeanstalk_app.py b/elasticbeanstalk_app.py deleted file mode 100644 index bf11afbb2bf..00000000000 --- a/elasticbeanstalk_app.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elasticbeanstalk_app -version_added: 1.0.0 - -short_description: Create, update, and delete an Elastic Beanstalk application - -description: - - Creates, updates, deletes Elastic Beanstalk applications if I(app_name) is provided. - - Prior to release 5.0.0 this module was called C(community.aws.aws_elasticbeanstalk_app). - The usage did not change. - -options: - app_name: - description: - - Name of the Beanstalk application you wish to manage. - aliases: [ 'name' ] - type: str - description: - description: - - The description of the application. - type: str - state: - description: - - Whether to ensure the application is present or absent. - default: present - choices: ['absent','present'] - type: str - terminate_by_force: - description: - - When I(terminate_by_force=true), running environments will be terminated before deleting the application. - default: false - type: bool -author: - - Harpreet Singh (@hsingh) - - Stephen Granger (@viper233) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create or update an application -- community.aws.elasticbeanstalk_app: - app_name: Sample_App - description: "Hello World App" - state: present - -# Delete application -- community.aws.elasticbeanstalk_app: - app_name: Sample_App - state: absent - -""" - -RETURN = r""" -app: - description: Beanstalk application. - returned: always - type: dict - sample: { - "ApplicationName": "app-name", - "ConfigurationTemplates": [], - "DateCreated": "2016-12-28T14:50:03.185000+00:00", - "DateUpdated": "2016-12-28T14:50:03.185000+00:00", - "Description": "description", - "Versions": [ - "1.0.0", - "1.0.1" - ] - } -output: - description: Message indicating what change will occur. - returned: in check mode - type: str - sample: App is up-to-date -""" - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def describe_app(ebs, app_name, module): - apps = list_apps(ebs, app_name, module) - - return None if len(apps) != 1 else apps[0] - - -def list_apps(ebs, app_name, module): - try: - if app_name is not None: - apps = ebs.describe_applications(ApplicationNames=[app_name]) - else: - apps = ebs.describe_applications() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not describe application") - - return apps.get("Applications", []) - - -def check_app(ebs, app, module): - app_name = module.params["app_name"] - description = module.params["description"] - state = module.params["state"] - terminate_by_force = module.params["terminate_by_force"] - - result = {} - - if state == "present" and app is None: - result = dict(changed=True, output="App would be created") - elif state == "present" and app.get("Description", None) != description: - result = dict(changed=True, output="App would be updated", app=app) - elif state == "present" and app.get("Description", None) == description: - result = dict(changed=False, output="App is up-to-date", app=app) - elif state == "absent" and app is None: - result = dict(changed=False, output="App does not exist", app={}) - elif state == "absent" and app is not None: - result = dict(changed=True, output="App will be deleted", app=app) - elif state == "absent" and app is not None and terminate_by_force is True: - result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app) - - module.exit_json(**result) - - -def filter_empty(**kwargs): - retval = {} - for k, v in kwargs.items(): - if v: - retval[k] = v - return retval - - -def main(): - argument_spec = dict( - app_name=dict(aliases=["name"], type="str", required=False), - description=dict(), - state=dict(choices=["present", "absent"], default="present"), - terminate_by_force=dict(type="bool", default=False, required=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - app_name = module.params["app_name"] - description = module.params["description"] - state = module.params["state"] - terminate_by_force = module.params["terminate_by_force"] - - if app_name is None: - module.fail_json(msg='Module parameter "app_name" is required') - - result = {} - - ebs = module.client("elasticbeanstalk") - - app = describe_app(ebs, app_name, module) - - if module.check_mode: - check_app(ebs, app, module) - module.fail_json(msg="ASSERTION FAILURE: check_app() should not return control.") - - if state == "present": - if app is None: - try: - create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not create application") - - app = describe_app(ebs, app_name, module) - - result = dict(changed=True, app=app) - else: - if app.get("Description", None) != description: - try: - if not description: - ebs.update_application(ApplicationName=app_name) - else: - ebs.update_application(ApplicationName=app_name, Description=description) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not update application") - - app = describe_app(ebs, app_name, module) - - result = dict(changed=True, app=app) - else: - result = dict(changed=False, app=app) - - else: - if app is None: - result = dict(changed=False, output="Application not found", app={}) - else: - try: - if terminate_by_force: - # Running environments will be terminated before deleting the application - ebs.delete_application(ApplicationName=app_name, TerminateEnvByForce=terminate_by_force) - else: - ebs.delete_application(ApplicationName=app_name) - changed = True - except is_boto3_error_message("It is currently pending deletion"): - changed = False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Cannot terminate app") - - result = dict(changed=changed, app=app) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/elb_classic_lb_info.py b/elb_classic_lb_info.py deleted file mode 100644 index db3fd46ac48..00000000000 --- a/elb_classic_lb_info.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_classic_lb_info -version_added: 1.0.0 -short_description: Gather information about EC2 Elastic Load Balancers in AWS -description: - - Gather information about EC2 Elastic Load Balancers in AWS -author: - - "Michael Schultz (@mjschultz)" - - "Fernando Jose Pando (@nand0p)" -options: - names: - description: - - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned. - type: list - elements: str - default: [] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. -# Output format tries to match amazon.aws.ec2_elb_lb module input parameters - -# Gather information about all ELBs -- community.aws.elb_classic_lb_info: - register: elb_info - -- ansible.builtin.debug: - msg: "{{ item.dns_name }}" - loop: "{{ elb_info.elbs }}" - -# Gather information about a particular ELB -- community.aws.elb_classic_lb_info: - names: frontend-prod-elb - register: elb_info - -- ansible.builtin.debug: - msg: "{{ elb_info.elbs.0.dns_name }}" - -# Gather information about a set of ELBs -- community.aws.elb_classic_lb_info: - names: - - frontend-prod-elb - - backend-prod-elb - register: elb_info - -- ansible.builtin.debug: - msg: "{{ item.dns_name }}" - loop: "{{ elb_info.elbs }}" - -""" - -RETURN = r""" -elbs: - description: a list of load balancers - returned: always - type: list - sample: - elbs: - - attributes: - access_log: - enabled: false - connection_draining: - enabled: true - timeout: 300 - connection_settings: - idle_timeout: 60 - cross_zone_load_balancing: - enabled: true - availability_zones: - - "us-east-1a" - - "us-east-1b" - - "us-east-1c" - - "us-east-1d" - - "us-east-1e" - backend_server_description: [] - canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com - canonical_hosted_zone_name_id: XXXXXXXXXXXXXX - created_time: '2017-08-23T18:25:03.280000+00:00' - dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com - health_check: - healthy_threshold: 10 - interval: 30 - target: HTTP:80/index.html - timeout: 5 - unhealthy_threshold: 2 - instances: [] - instances_inservice: [] - instances_inservice_count: 0 - instances_outofservice: [] - instances_outofservice_count: 0 - instances_unknownservice: [] - instances_unknownservice_count: 0 - listener_descriptions: - - listener: - instance_port: 80 - instance_protocol: HTTP - load_balancer_port: 80 - protocol: HTTP - policy_names: [] - load_balancer_name: test-lb - policies: - app_cookie_stickiness_policies: [] - lb_cookie_stickiness_policies: [] - other_policies: [] - scheme: internet-facing - security_groups: - - sg-29d13055 - source_security_group: - group_name: default - owner_alias: XXXXXXXXXXXX - subnets: - - subnet-XXXXXXXX - - subnet-XXXXXXXX - tags: {} - vpc_id: vpc-c248fda4 -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -MAX_AWS_RETRIES = 5 -MAX_AWS_DELAY = 5 - - -def list_elbs(connection, load_balancer_names): - results = [] - - if not load_balancer_names: - for lb in get_all_lb(connection): - results.append(describe_elb(connection, lb)) - - for load_balancer_name in load_balancer_names: - lb = get_lb(connection, load_balancer_name) - if not lb: - continue - results.append(describe_elb(connection, lb)) - return results - - -def describe_elb(connection, lb): - description = camel_dict_to_snake_dict(lb) - name = lb["LoadBalancerName"] - instances = lb.get("Instances", []) - description["tags"] = get_tags(connection, name) - description["instances_inservice"], description["instances_inservice_count"] = lb_instance_health( - connection, name, instances, "InService" - ) - description["instances_outofservice"], description["instances_outofservice_count"] = lb_instance_health( - connection, name, instances, "OutOfService" - ) - description["instances_unknownservice"], description["instances_unknownservice_count"] = lb_instance_health( - connection, name, instances, "Unknown" - ) - description["attributes"] = get_lb_attributes(connection, name) - return description - - -@AWSRetry.jittered_backoff() -def get_all_lb(connection): - paginator = connection.get_paginator("describe_load_balancers") - return paginator.paginate().build_full_result()["LoadBalancerDescriptions"] - - -def get_lb(connection, load_balancer_name): - try: - return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])[ - "LoadBalancerDescriptions" - ][0] - except is_boto3_error_code("LoadBalancerNotFound"): - return [] - - -def get_lb_attributes(connection, load_balancer_name): - attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get( - "LoadBalancerAttributes", {} - ) - return camel_dict_to_snake_dict(attributes) - - -def get_tags(connection, load_balancer_name): - tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])["TagDescriptions"] - if not tags: - return {} - return boto3_tag_list_to_ansible_dict(tags[0]["Tags"]) - - -def lb_instance_health(connection, load_balancer_name, instances, state): - instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get( - "InstanceStates", [] - ) - instate = [instance["InstanceId"] for instance in instance_states if instance["State"] == state] - return instate, len(instate) - - -def main(): - argument_spec = dict( - names=dict(default=[], type="list", elements="str"), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - connection = module.client( - "elb", retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY) - ) - - try: - elbs = list_elbs(connection, module.params.get("names")) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get load balancer information.") - - module.exit_json(elbs=elbs) - - -if __name__ == "__main__": - main() diff --git a/elb_instance.py b/elb_instance.py deleted file mode 100644 index 6489a86bcf9..00000000000 --- a/elb_instance.py +++ /dev/null @@ -1,403 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_instance -version_added: 1.0.0 -short_description: De-registers or registers instances from EC2 ELBs -description: - - This module de-registers or registers an AWS EC2 instance from the ELBs - that it belongs to. - - Will be marked changed when called only if there are ELBs found to operate on. -author: - - "John Jarvis (@jarv)" -options: - state: - description: - - Register or deregister the instance. - required: true - choices: ['present', 'absent'] - type: str - instance_id: - description: - - EC2 Instance ID. - required: true - type: str - ec2_elbs: - description: - - List of ELB names - - Required when I(state=present). - type: list - elements: str - enable_availability_zone: - description: - - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - been enabled. - - If I(enable_availability_zone=no), the task will fail if the availability zone is not enabled on the ELB. - type: bool - default: true - wait: - description: - - Wait for instance registration or deregistration to complete successfully before returning. - type: bool - default: true - wait_timeout: - description: - - Number of seconds to wait for an instance to change state. - - If I(wait_timeout=0) then this module may return an error if a transient error occurs. - - If non-zero then any transient errors are ignored until the timeout is reached. - - Ignored when I(wait=no). - default: 0 - type: int -notes: - - The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release - 4.0.0 is no longer set. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# basic pre_task and post_task example -pre_tasks: - - name: Instance De-register - community.aws.elb_instance: - instance_id: "{{ ansible_ec2_instance_id }}" - state: absent - register: deregister_instances - delegate_to: localhost -roles: - - myrole -post_tasks: - - name: Instance Register - community.aws.elb_instance: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ deregister_instances.updated_elbs }}" - state: present - delegate_to: localhost -""" - -RETURN = r""" -updated_elbs: - description: A list of ELB names that the instance has been added to or removed from. - returned: always - type: list - elements: str -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class ElbManager: - """Handles EC2 instance ELB registration and de-registration""" - - def __init__(self, module, instance_id=None, ec2_elbs=None): - retry_decorator = AWSRetry.jittered_backoff() - self.module = module - self.client_asg = module.client("autoscaling", retry_decorator=retry_decorator) - self.client_ec2 = module.client("ec2", retry_decorator=retry_decorator) - self.client_elb = module.client("elb", retry_decorator=retry_decorator) - self.instance_id = instance_id - self.lbs = self._get_instance_lbs(ec2_elbs) - self.changed = False - self.updated_elbs = set() - - def deregister(self, wait, timeout): - """De-register the instance from all ELBs and wait for the ELB - to report it out-of-service""" - - for lb in self.lbs: - instance_ids = [i["InstanceId"] for i in lb["Instances"]] - if self.instance_id not in instance_ids: - continue - - self.updated_elbs.add(lb["LoadBalancerName"]) - - if self.module.check_mode: - self.changed = True - continue - - try: - self.client_elb.deregister_instances_from_load_balancer( - aws_retry=True, - LoadBalancerName=lb["LoadBalancerName"], - Instances=[{"InstanceId": self.instance_id}], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws( - e, "Failed to deregister instance from load balancer", load_balancer=lb, instance=self.instance_id - ) - - # The ELB is changing state in some way. Either an instance that's - # InService is moving to OutOfService, or an instance that's - # already OutOfService is being deregistered. - self.changed = True - - if wait: - for lb in self.lbs: - self._await_elb_instance_state(lb, "Deregistered", timeout) - - def register(self, wait, enable_availability_zone, timeout): - """Register the instance for all ELBs and wait for the ELB - to report the instance in-service""" - for lb in self.lbs: - instance_ids = [i["InstanceId"] for i in lb["Instances"]] - if self.instance_id in instance_ids: - continue - - self.updated_elbs.add(lb["LoadBalancerName"]) - - if enable_availability_zone: - self.changed |= self._enable_availailability_zone(lb) - - if self.module.check_mode: - self.changed = True - continue - - try: - self.client_elb.register_instances_with_load_balancer( - aws_retry=True, - LoadBalancerName=lb["LoadBalancerName"], - Instances=[{"InstanceId": self.instance_id}], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws( - e, "Failed to register instance with load balancer", load_balancer=lb, instance=self.instance_id - ) - - self.changed = True - - if wait: - for lb in self.lbs: - self._await_elb_instance_state(lb, "InService", timeout) - - @AWSRetry.jittered_backoff() - def _describe_elbs(self, **params): - paginator = self.client_elb.get_paginator("describe_load_balancers") - results = paginator.paginate(**params).build_full_result() - return results["LoadBalancerDescriptions"] - - def exists(self, lbtest): - """Verify that the named ELB actually exists""" - - found = False - for lb in self.lbs: - if lb["LoadBalancerName"] == lbtest: - found = True - break - return found - - def _enable_availailability_zone(self, lb): - """Enable the current instance's availability zone in the provided lb. - Returns True if the zone was enabled or False if no change was made. - lb: load balancer""" - instance = self._get_instance() - desired_zone = instance["Placement"]["AvailabilityZone"] - - if desired_zone in lb["AvailabilityZones"]: - return False - - if self.module.check_mode: - return True - - try: - self.client_elb.enable_availability_zones_for_load_balancer( - aws_retry=True, - LoadBalancerName=lb["LoadBalancerName"], - AvailabilityZones=[desired_zone], - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, "Failed to enable AZ on load balancers", load_balancer=lb, zone=desired_zone) - - return True - - def _await_elb_instance_state(self, lb, awaited_state, timeout): - """Wait for an ELB to change state""" - if self.module.check_mode: - return - - initial_state = self._get_instance_health(lb) - - if awaited_state == initial_state: - return - - if awaited_state == "InService": - waiter = self.client_elb.get_waiter("instance_in_service") - elif awaited_state == "Deregistered": - waiter = self.client_elb.get_waiter("instance_deregistered") - elif awaited_state == "OutOfService": - waiter = self.client_elb.get_waiter("instance_deregistered") - else: - self.module.fail_json(msg="Could not wait for unknown state", awaited_state=awaited_state) - - try: - waiter.wait( - LoadBalancerName=lb["LoadBalancerName"], - Instances=[{"InstanceId": self.instance_id}], - WaiterConfig={"Delay": 1, "MaxAttempts": timeout}, - ) - except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws( - e, msg="Timeout waiting for instance to reach desired state", awaited_state=awaited_state - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws( - e, msg="Error while waiting for instance to reach desired state", awaited_state=awaited_state - ) - - return - - def _get_instance_health(self, lb): - """ - Check instance health, should return status object or None under - certain error conditions. - """ - try: - status = self.client_elb.describe_instance_health( - aws_retry=True, - LoadBalancerName=lb["LoadBalancerName"], - Instances=[{"InstanceId": self.instance_id}], - )["InstanceStates"] - except is_boto3_error_code("InvalidInstance"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Failed to get instance health") - - if not status: - return None - - return status[0]["State"] - - def _get_instance_lbs(self, ec2_elbs=None): - """Returns a list of ELBs attached to self.instance_id - ec2_elbs: an optional list of elb names that will be used - for elb lookup instead of returning what elbs - are attached to self.instance_id""" - - list_params = dict() - if not ec2_elbs: - ec2_elbs = self._get_auto_scaling_group_lbs() - - if ec2_elbs: - list_params["LoadBalancerNames"] = ec2_elbs - - try: - elbs = self._describe_elbs(**list_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, "Failed to describe load balancers") - - if ec2_elbs: - return elbs - - # If ec2_elbs wasn't specified, then filter out LBs we're not a member - # of. - lbs = [] - for lb in elbs: - instance_ids = [i["InstanceId"] for i in lb["Instances"]] - if self.instance_id in instance_ids: - lbs.append(lb) - - return lbs - - def _get_auto_scaling_group_lbs(self): - """Returns a list of ELBs associated with self.instance_id - indirectly through its auto scaling group membership""" - - try: - asg_instances = self.client_asg.describe_auto_scaling_instances( - aws_retry=True, InstanceIds=[self.instance_id] - )["AutoScalingInstances"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") - - if len(asg_instances) > 1: - self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") - - if not asg_instances: - # Instance isn't a member of an ASG - return [] - - asg_name = asg_instances[0]["AutoScalingGroupName"] - - try: - asg_instances = self.client_asg.describe_auto_scaling_groups( - aws_retry=True, AutoScalingGroupNames=[asg_name] - )["AutoScalingGroups"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") - - if len(asg_instances) != 1: - self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - - return asg_instances[0]["LoadBalancerNames"] - - def _get_instance(self): - """Returns the description of an instance""" - try: - result = self.client_ec2.describe_instances(aws_retry=True, InstanceIds=[self.instance_id]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Failed to describe ASG Instance") - return result["Reservations"][0]["Instances"][0] - - -def main(): - argument_spec = dict( - state={"required": True, "choices": ["present", "absent"]}, - instance_id={"required": True}, - ec2_elbs={"default": None, "required": False, "type": "list", "elements": "str"}, - enable_availability_zone={"default": True, "required": False, "type": "bool"}, - wait={"required": False, "default": True, "type": "bool"}, - wait_timeout={"required": False, "default": 0, "type": "int"}, - ) - required_if = [ - ("state", "present", ["ec2_elbs"]), - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True, - ) - - ec2_elbs = module.params["ec2_elbs"] - wait = module.params["wait"] - enable_availability_zone = module.params["enable_availability_zone"] - timeout = module.params["wait_timeout"] - instance_id = module.params["instance_id"] - - elb_man = ElbManager(module, instance_id, ec2_elbs) - - if ec2_elbs is not None: - for elb in ec2_elbs: - if not elb_man.exists(elb): - module.fail_json(msg=f"ELB {elb} does not exist") - - if module.params["state"] == "present": - elb_man.register(wait, enable_availability_zone, timeout) - elif module.params["state"] == "absent": - elb_man.deregister(wait, timeout) - - module.exit_json( - changed=elb_man.changed, - updated_elbs=list(elb_man.updated_elbs), - ) - - -if __name__ == "__main__": - main() diff --git a/elb_network_lb.py b/elb_network_lb.py deleted file mode 100644 index 76e2454aa65..00000000000 --- a/elb_network_lb.py +++ /dev/null @@ -1,501 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Rob White (@wimnat) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_network_lb -version_added: 1.0.0 -short_description: Manage a Network Load Balancer -description: - - Manage an AWS Network Elastic Load Balancer. See - U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details. -author: - - "Rob White (@wimnat)" -options: - cross_zone_load_balancing: - description: - - Indicates whether cross-zone load balancing is enabled. - - Defaults to C(false). - type: bool - deletion_protection: - description: - - Indicates whether deletion protection for the ELB is enabled. - - Defaults to C(false). - type: bool - listeners: - description: - - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys - are CamelCased. - type: list - elements: dict - suboptions: - Port: - description: The port on which the load balancer is listening. - type: int - required: true - Protocol: - description: The protocol for connections from clients to the load balancer. - type: str - required: true - Certificates: - description: The SSL server certificate. - type: list - elements: dict - suboptions: - CertificateArn: - description: The Amazon Resource Name (ARN) of the certificate. - type: str - SslPolicy: - description: The security policy that defines which ciphers and protocols are supported. - type: str - DefaultActions: - description: The default actions for the listener. - required: true - type: list - elements: dict - suboptions: - Type: - description: The type of action. - type: str - TargetGroupArn: - description: - - The Amazon Resource Name (ARN) of the target group. - - Mutually exclusive with I(TargetGroupName). - type: str - TargetGroupName: - description: - - The name of the target group. - - Mutually exclusive with I(TargetGroupArn). - name: - description: - - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric - characters or hyphens, and must not begin or end with a hyphen. - required: true - type: str - purge_listeners: - description: - - If I(purge_listeners=true), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. - - If the I(listeners) parameter is not set then listeners will not be modified. - default: true - type: bool - subnet_mappings: - description: - - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP - to attach to the load balancer or the internal IP address for an internal load balancer. You can specify one Elastic IP address or internal - address per subnet. - - This parameter is mutually exclusive with I(subnets). - type: list - elements: dict - subnets: - description: - - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from - at least two Availability Zones. - - Required when I(state=present). - - This parameter is mutually exclusive with I(subnet_mappings). - type: list - elements: str - scheme: - description: - - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. - default: internet-facing - choices: [ 'internet-facing', 'internal' ] - type: str - state: - description: - - Create or destroy the load balancer. - - The default changed from C('absent') to C('present') in release 4.0.0. - choices: [ 'present', 'absent' ] - type: str - default: 'present' - wait: - description: - - Whether or not to wait for the network load balancer to reach the desired state. - type: bool - wait_timeout: - description: - - The duration in seconds to wait, used in conjunction with I(wait). - type: int - ip_address_type: - description: - - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. - choices: [ 'ipv4', 'dualstack' ] - type: str -notes: - - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create an ELB and attach a listener - community.aws.elb_network_lb: - name: myelb - subnets: - - subnet-012345678 - - subnet-abcdef000 - listeners: - - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive). - Port: 80 # Required. The port on which the load balancer is listening. - DefaultActions: - - Type: forward # Required. Only 'forward' is accepted at this time - TargetGroupName: mytargetgroup # Required. The name of the target group - state: present - -- name: Create an ELB with an attached Elastic IP address - community.aws.elb_network_lb: - name: myelb - subnet_mappings: - - SubnetId: subnet-012345678 - AllocationId: eipalloc-aabbccdd - listeners: - - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive). - Port: 80 # Required. The port on which the load balancer is listening. - DefaultActions: - - Type: forward # Required. Only 'forward' is accepted at this time - TargetGroupName: mytargetgroup # Required. The name of the target group - state: present - -- name: Create an internal ELB with a specified IP address - community.aws.elb_network_lb: - name: myelb - scheme: internal - subnet_mappings: - - SubnetId: subnet-012345678 - PrivateIPv4Address: 192.168.0.1 # Must be an address from within the CIDR of the subnet. - listeners: - - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive). - Port: 80 # Required. The port on which the load balancer is listening. - DefaultActions: - - Type: forward # Required. Only 'forward' is accepted at this time - TargetGroupName: mytargetgroup # Required. The name of the target group - state: present - -- name: Remove an ELB - community.aws.elb_network_lb: - name: myelb - state: absent - -""" - -RETURN = r""" -load_balancer: - description: A representation of the Network Load Balancer - returned: when state is present - type: dict - version_added: 5.0.0 - contains: - availability_zones: - description: The Availability Zones for the load balancer. - returned: when state is present - type: list - sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]" - canonical_hosted_zone_id: - description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. - returned: when state is present - type: str - sample: ABCDEF12345678 - created_time: - description: The date and time the load balancer was created. - returned: when state is present - type: str - sample: "2015-02-12T02:14:02+00:00" - deletion_protection_enabled: - description: Indicates whether deletion protection is enabled. - returned: when state is present - type: str - sample: true - dns_name: - description: The public DNS name of the load balancer. - returned: when state is present - type: str - sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com - idle_timeout_timeout_seconds: - description: The idle timeout value, in seconds. - returned: when state is present - type: str - sample: 60 - ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. - returned: when state is present - type: str - sample: ipv4 - listeners: - description: Information about the listeners. - returned: when state is present - type: complex - contains: - listener_arn: - description: The Amazon Resource Name (ARN) of the listener. - returned: when state is present - type: str - sample: "" - load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - returned: when state is present - type: str - sample: "" - port: - description: The port on which the load balancer is listening. - returned: when state is present - type: int - sample: 80 - protocol: - description: The protocol for connections from clients to the load balancer. - returned: when state is present - type: str - sample: HTTPS - certificates: - description: The SSL server certificate. - returned: when state is present - type: complex - contains: - certificate_arn: - description: The Amazon Resource Name (ARN) of the certificate. - returned: when state is present - type: str - sample: "" - ssl_policy: - description: The security policy that defines which ciphers and protocols are supported. - returned: when state is present - type: str - sample: "" - default_actions: - description: The default actions for the listener. - returned: when state is present - type: str - contains: - type: - description: The type of action. - returned: when state is present - type: str - sample: "" - target_group_arn: - description: The Amazon Resource Name (ARN) of the target group. - returned: when state is present - type: str - sample: "" - load_balancer_arn: - description: The Amazon Resource Name (ARN) of the load balancer. - returned: when state is present - type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-elb/001122334455 - load_balancer_name: - description: The name of the load balancer. - returned: when state is present - type: str - sample: my-elb - load_balancing_cross_zone_enabled: - description: Indicates whether cross-zone load balancing is enabled. - returned: when state is present - type: str - sample: true - scheme: - description: Internet-facing or internal load balancer. - returned: when state is present - type: str - sample: internal - state: - description: The state of the load balancer. - returned: when state is present - type: dict - sample: "{'code': 'active'}" - tags: - description: The tags attached to the load balancer. - returned: when state is present - type: dict - sample: "{ - 'Tag': 'Example' - }" - type: - description: The type of load balancer. - returned: when state is present - type: str - sample: network - vpc_id: - description: The ID of the VPC for the load balancer. - returned: when state is present - type: str - sample: vpc-0011223344 -""" - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListener -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListeners -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create_or_update_elb(elb_obj): - """Create ELB or modify main attributes. json_exit here""" - if elb_obj.elb: - # ELB exists so check subnets, security groups and tags match what has been passed - - # Subnets - if not elb_obj.compare_subnets(): - elb_obj.modify_subnets() - - # Tags - only need to play with tags if tags parameter has been set to something - if elb_obj.tags is not None: - # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags( - boto3_tag_list_to_ansible_dict(elb_obj.elb["tags"]), - boto3_tag_list_to_ansible_dict(elb_obj.tags), - elb_obj.purge_tags, - ) - if tags_to_delete: - elb_obj.delete_tags(tags_to_delete) - - # Add/update tags - if tags_need_modify: - elb_obj.modify_tags() - - else: - # Create load balancer - elb_obj.create_elb() - - # ELB attributes - elb_obj.update_elb_attributes() - elb_obj.modify_elb_attributes() - - # Listeners - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb["LoadBalancerArn"]) - - listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() - - # Delete listeners - for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener( - elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb["LoadBalancerArn"] - ) - listener_obj.delete() - listeners_obj.changed = True - - # Add listeners - for listener_to_add in listeners_to_add: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb["LoadBalancerArn"]) - listener_obj.add() - listeners_obj.changed = True - - # Modify listeners - for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener( - elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb["LoadBalancerArn"] - ) - listener_obj.modify() - listeners_obj.changed = True - - # If listeners changed, mark ELB as changed - if listeners_obj.changed: - elb_obj.changed = True - - # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get("ip_address_type") is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get("ip_address_type")) - - # Update the objects to pickup changes - # Get the ELB again - elb_obj.update() - # Get the ELB listeners again - listeners_obj.update() - # Update the ELB attributes - elb_obj.update_elb_attributes() - - # Convert to snake_case and merge in everything we want to return to the user - snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) - snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) - snaked_elb["listeners"] = [] - for listener in listeners_obj.current_listeners: - snaked_elb["listeners"].append(camel_dict_to_snake_dict(listener)) - - # Change tags to ansible friendly dict - snaked_elb["tags"] = boto3_tag_list_to_ansible_dict(snaked_elb["tags"]) - - # ip address type - snaked_elb["ip_address_type"] = elb_obj.get_elb_ip_address_type() - - elb_obj.module.exit_json(changed=elb_obj.changed, load_balancer=snaked_elb, **snaked_elb) - - -def delete_elb(elb_obj): - if elb_obj.elb: - elb_obj.delete() - - elb_obj.module.exit_json(changed=elb_obj.changed) - - -def main(): - argument_spec = dict( - cross_zone_load_balancing=dict(type="bool"), - deletion_protection=dict(type="bool"), - listeners=dict( - type="list", - elements="dict", - options=dict( - Protocol=dict(type="str", required=True), - Port=dict(type="int", required=True), - SslPolicy=dict(type="str"), - Certificates=dict(type="list", elements="dict"), - DefaultActions=dict(type="list", required=True, elements="dict"), - ), - ), - name=dict(required=True, type="str"), - purge_listeners=dict(default=True, type="bool"), - purge_tags=dict(default=True, type="bool"), - subnets=dict(type="list", elements="str"), - subnet_mappings=dict(type="list", elements="dict"), - scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]), - state=dict(choices=["present", "absent"], type="str", default="present"), - tags=dict(type="dict", aliases=["resource_tags"]), - wait_timeout=dict(type="int"), - wait=dict(type="bool"), - ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]), - ) - - required_if = [ - ["state", "present", ["subnets", "subnet_mappings"], True], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - mutually_exclusive=[["subnets", "subnet_mappings"]], - ) - - # Check for subnets or subnet_mappings if state is present - state = module.params.get("state") - - # Quick check of listeners parameters - listeners = module.params.get("listeners") - if listeners is not None: - for listener in listeners: - for key in listener.keys(): - protocols_list = ["TCP", "TLS", "UDP", "TCP_UDP"] - if key == "Protocol" and listener[key] not in protocols_list: - module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list)) - - connection = module.client("elbv2") - connection_ec2 = module.client("ec2") - - elb = NetworkLoadBalancer(connection, connection_ec2, module) - - if state == "present": - create_or_update_elb(elb) - else: - delete_elb(elb) - - -if __name__ == "__main__": - main() diff --git a/elb_target.py b/elb_target.py deleted file mode 100644 index d7dfaf824cb..00000000000 --- a/elb_target.py +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_target -version_added: 1.0.0 -short_description: Manage a target in a target group -description: - - Used to register or deregister a target in a target group. -author: - - "Rob White (@wimnat)" -options: - deregister_unused: - description: - - The default behaviour for targets that are unused is to leave them registered. - - If instead you would like to remove them set I(deregister_unused=true). - default: false - type: bool - target_az: - description: - - An Availability Zone or C(all). This determines whether the target receives traffic from the load balancer nodes in the specified - Availability Zone or from all enabled Availability Zones for the load balancer. This parameter is not supported if the target - type of the target group is instance. - type: str - target_group_arn: - description: - - The Amazon Resource Name (ARN) of the target group. - - Mutually exclusive of I(target_group_name). - type: str - target_group_name: - description: - - The name of the target group. - - Mutually exclusive of I(target_group_arn). - type: str - target_id: - description: - - The ID of the target. - required: true - type: str - target_port: - description: - - The port on which the target is listening. You can specify a port override. If a target is already registered, - you can register it again using a different port. - - The default port for a target is the port for the target group. - required: false - type: int - target_status: - description: - - Blocks and waits for the target status to equal given value. For more detail on target status see - U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#target-health-states) - required: false - choices: [ 'initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable' ] - type: str - target_status_timeout: - description: - - Maximum time in seconds to wait for I(target_status) change. - required: false - default: 60 - type: int - state: - description: - - Register or deregister the target. - required: true - choices: [ 'present', 'absent' ] - type: str - -notes: - - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it. - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Register an IP address target to a target group - community.aws.elb_target: - target_group_name: myiptargetgroup - target_id: i-1234567 - state: present - -- name: Register an instance target to a target group - community.aws.elb_target: - target_group_name: mytargetgroup - target_id: i-1234567 - state: present - -- name: Deregister a target from a target group - community.aws.elb_target: - target_group_name: mytargetgroup - target_id: i-1234567 - state: absent - -# Modify a target to use a different port -- name: Register a target to a target group - community.aws.elb_target: - target_group_name: mytargetgroup - target_id: i-1234567 - target_port: 8080 - state: present - -""" - -RETURN = r""" - -""" - -from time import sleep -from time import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) -def describe_target_groups_with_backoff(connection, tg_name): - return connection.describe_target_groups(Names=[tg_name]) - - -def convert_tg_name_to_arn(connection, module, tg_name): - try: - response = describe_target_groups_with_backoff(connection, tg_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to describe target group {tg_name}") - - tg_arn = response["TargetGroups"][0]["TargetGroupArn"] - - return tg_arn - - -@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"]) -def describe_targets_with_backoff(connection, tg_arn, target): - if target is None: - tg = [] - else: - tg = [target] - - return connection.describe_target_health(TargetGroupArn=tg_arn, Targets=tg) - - -def describe_targets(connection, module, tg_arn, target=None): - """ - Describe targets in a target group - - :param module: ansible module object - :param connection: boto3 connection - :param tg_arn: target group arn - :param target: dictionary containing target id and port - :return: - """ - - try: - targets = describe_targets_with_backoff(connection, tg_arn, target)["TargetHealthDescriptions"] - if not targets: - return {} - return targets[0] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to describe target health for target {target}") - - -@AWSRetry.jittered_backoff(retries=10, delay=10) -def register_target_with_backoff(connection, target_group_arn, target): - connection.register_targets(TargetGroupArn=target_group_arn, Targets=[target]) - - -def register_target(connection, module): - """ - Registers a target to a target group - - :param module: ansible module object - :param connection: boto3 connection - :return: - """ - - target_az = module.params.get("target_az") - target_group_arn = module.params.get("target_group_arn") - target_id = module.params.get("target_id") - target_port = module.params.get("target_port") - target_status = module.params.get("target_status") - target_status_timeout = module.params.get("target_status_timeout") - changed = False - - if not target_group_arn: - target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name")) - - target = dict(Id=target_id) - if target_az: - target["AvailabilityZone"] = target_az - if target_port: - target["Port"] = target_port - - target_description = describe_targets(connection, module, target_group_arn, target) - - if "Reason" in target_description["TargetHealth"]: - if target_description["TargetHealth"]["Reason"] == "Target.NotRegistered": - try: - register_target_with_backoff(connection, target_group_arn, target) - changed = True - if target_status: - target_status_check( - connection, module, target_group_arn, target, target_status, target_status_timeout - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to deregister target {target}") - - # Get all targets for the target group - target_descriptions = describe_targets(connection, module, target_group_arn) - - module.exit_json( - changed=changed, - target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), - target_group_arn=target_group_arn, - ) - - -@AWSRetry.jittered_backoff(retries=10, delay=10) -def deregister_target_with_backoff(connection, target_group_arn, target): - connection.deregister_targets(TargetGroupArn=target_group_arn, Targets=[target]) - - -def deregister_target(connection, module): - """ - Deregisters a target to a target group - - :param module: ansible module object - :param connection: boto3 connection - :return: - """ - - deregister_unused = module.params.get("deregister_unused") - target_group_arn = module.params.get("target_group_arn") - target_id = module.params.get("target_id") - target_port = module.params.get("target_port") - target_status = module.params.get("target_status") - target_status_timeout = module.params.get("target_status_timeout") - changed = False - - if not target_group_arn: - target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name")) - - target = dict(Id=target_id) - if target_port: - target["Port"] = target_port - - target_description = describe_targets(connection, module, target_group_arn, target) - current_target_state = target_description["TargetHealth"]["State"] - current_target_reason = target_description["TargetHealth"].get("Reason") - - needs_deregister = False - - if deregister_unused and current_target_state == "unused": - if current_target_reason != "Target.NotRegistered": - needs_deregister = True - elif current_target_state not in ["unused", "draining"]: - needs_deregister = True - - if needs_deregister: - try: - deregister_target_with_backoff(connection, target_group_arn, target) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg=f"Unable to deregister target {target}") - else: - if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": - module.warn( - warning="Your specified target has an 'unused' state but is still registered to the target group. " - + "To force deregistration use the 'deregister_unused' option." - ) - - if target_status: - target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout) - - # Get all targets for the target group - target_descriptions = describe_targets(connection, module, target_group_arn) - - module.exit_json( - changed=changed, - target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), - target_group_arn=target_group_arn, - ) - - -def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout): - reached_state = False - timeout = target_status_timeout + time() - while time() < timeout: - health_state = describe_targets(connection, module, target_group_arn, target)["TargetHealth"]["State"] - if health_state == target_status: - reached_state = True - break - sleep(1) - if not reached_state: - module.fail_json( - msg=f"Status check timeout of {target_status_timeout} exceeded, last status was {health_state}: " - ) - - -def main(): - argument_spec = dict( - deregister_unused=dict(type="bool", default=False), - target_az=dict(type="str"), - target_group_arn=dict(type="str"), - target_group_name=dict(type="str"), - target_id=dict(type="str", required=True), - target_port=dict(type="int"), - target_status=dict( - choices=["initial", "healthy", "unhealthy", "unused", "draining", "unavailable"], type="str" - ), - target_status_timeout=dict(type="int", default=60), - state=dict(required=True, choices=["present", "absent"], type="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[["target_group_arn", "target_group_name"]], - ) - - try: - connection = module.client("elbv2") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - state = module.params.get("state") - - if state == "present": - register_target(connection, module) - else: - deregister_target(connection, module) - - -if __name__ == "__main__": - main() diff --git a/elb_target_group.py b/elb_target_group.py deleted file mode 100644 index 4eb38f4c2d4..00000000000 --- a/elb_target_group.py +++ /dev/null @@ -1,1058 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_target_group -version_added: 1.0.0 -short_description: Manage a target group for an Application or Network load balancer -description: - - Manage an AWS Elastic Load Balancer target group. See - U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or - U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details. -author: - - "Rob White (@wimnat)" -options: - deregistration_delay_timeout: - description: - - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. - The range is 0-3600 seconds. - type: int - deregistration_connection_termination: - description: - - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. - - Using this option is only supported when attaching to a Network Load Balancer (NLB). - type: bool - default: false - required: false - version_added: 3.1.0 - health_check_protocol: - description: - - The protocol the load balancer uses when performing health checks on targets. - required: false - choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] - type: str - health_check_port: - description: - - The port the load balancer uses when performing health checks on targets. - Can be set to 'traffic-port' to match target port. - - When not defined will default to the port on which each target receives traffic from the load balancer. - required: false - type: str - health_check_path: - description: - - The ping path that is the destination on the targets for health checks. The path must be defined in order to set a health check. - - Requires the I(health_check_protocol) parameter to be set. - required: false - type: str - health_check_interval: - description: - - The approximate amount of time, in seconds, between health checks of an individual target. - required: false - type: int - health_check_timeout: - description: - - The amount of time, in seconds, during which no response from a target means a failed health check. - required: false - type: int - healthy_threshold_count: - description: - - The number of consecutive health checks successes required before considering an unhealthy target healthy. - required: false - type: int - modify_targets: - description: - - Whether or not to alter existing targets in the group to match what is passed with the module - required: false - default: true - type: bool - name: - description: - - The name of the target group. - required: true - type: str - port: - description: - - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. - - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb). - required: false - type: int - protocol: - description: - - The protocol to use for routing traffic to the targets. - - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb). - required: false - choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] - type: str - protocol_version: - description: - - Specifies protocol version. - - The protocol_version parameter is immutable and cannot be changed when updating an elb_target_group. - required: false - choices: ['GRPC', 'HTTP1', 'HTTP2'] - type: str - version_added: 5.1.0 - state: - description: - - Create or destroy the target group. - required: true - choices: [ 'present', 'absent' ] - type: str - stickiness_enabled: - description: - - Indicates whether sticky sessions are enabled. - type: bool - stickiness_lb_cookie_duration: - description: - - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load - balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). - type: int - stickiness_app_cookie_duration: - description: - - The time period, in seconds, during which requests from a client - should be routed to the same target. After this time period expires, - the application-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). - type: int - version_added: 1.5.0 - stickiness_app_cookie_name: - description: - - The name of the application cookie. Required if I(stickiness_type=app_cookie). - type: str - version_added: 1.5.0 - stickiness_type: - description: - - The type of sticky sessions. - - Valid values are C(lb_cookie), C(app_cookie) or C(source_ip). - - If not set AWS will default to C(lb_cookie) for Application Load Balancers or C(source_ip) for Network Load Balancers. - type: str - load_balancing_algorithm_type: - description: - - The type of load balancing algorithm to use. - - Changing the load balancing algorithm is only supported when used with Application Load Balancers (ALB). - - If not set AWS will default to C(round_robin). - choices: ['round_robin', 'least_outstanding_requests'] - type: str - version_added: 3.2.0 - successful_response_codes: - description: - - The HTTP codes to use when checking for a successful response from a target. - - Accepts multiple values (for example, "200,202") or a range of values (for example, "200-299"). - - Requires the I(health_check_protocol) parameter to be set. - required: false - type: str - target_type: - description: - - The type of target that you must specify when registering targets with this target group. The possible values are - C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address), C(lambda) (target is specified by ARN), - or C(alb) (target is specified by ARN). - Note that you can't specify targets for a target group using more than one type. Target types lambda and alb only accept one target. When more than - one target is specified, only the first one is used. All additional targets are ignored. - If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target - group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). - You can't specify publicly routable IP addresses. - - The default behavior is C(instance). - required: false - choices: ['instance', 'ip', 'lambda', 'alb'] - type: str - targets: - description: - - A list of targets to assign to the target group. This parameter defaults to an empty list. Unless you set the 'modify_targets' parameter then - all existing targets will be removed from the group. The list should be an Id and a Port parameter. See the Examples for detail. - required: false - type: list - elements: dict - unhealthy_threshold_count: - description: - - The number of consecutive health check failures required before considering a target unhealthy. - required: false - type: int - vpc_id: - description: - - The identifier of the virtual private cloud (VPC). - - Required when I(state) is C(present) and I(target_type) is C(instance), C(ip), or C(alb). - required: false - type: str - preserve_client_ip_enabled: - description: - - Indicates whether client IP preservation is enabled. - - The default is disabled if the target group type is C(ip) address and the target group protocol is C(tcp) or C(tls). - Otherwise, the default is enabled. Client IP preservation cannot be disabled for C(udp) and C(tcp_udp) target groups. - - I(preserve_client_ip_enabled) is supported only by Network Load Balancers. - type: bool - required: false - version_added: 2.1.0 - proxy_protocol_v2_enabled: - description: - - Indicates whether Proxy Protocol version 2 is enabled. - - The value is C(true) or C(false). - - I(proxy_protocol_v2_enabled) is supported only by Network Load Balancers. - type: bool - required: false - version_added: 2.1.0 - wait: - description: - - Whether or not to wait for the target group. - type: bool - default: false - wait_timeout: - description: - - The time to wait for the target group. - default: 200 - type: int - -notes: - - Once a target group has been created, only its health check can then be modified using subsequent calls - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a target group with a default health check - community.aws.elb_target_group: - name: mytargetgroup - protocol: http - port: 80 - vpc_id: vpc-01234567 - state: present - -- name: Create a target group with protocol_version 'GRPC' - community.aws.elb_target_group: - name: mytargetgroup - protocol: http - port: 80 - vpc_id: vpc-01234567 - protocol_version: GRPC - state: present - -- name: Modify the target group with a custom health check - community.aws.elb_target_group: - name: mytargetgroup - protocol: http - port: 80 - vpc_id: vpc-01234567 - health_check_protocol: http - health_check_path: /health_check - health_check_port: 80 - successful_response_codes: 200 - health_check_interval: 15 - health_check_timeout: 3 - healthy_threshold_count: 4 - unhealthy_threshold_count: 3 - state: present - -- name: Delete a target group - community.aws.elb_target_group: - name: mytargetgroup - state: absent - -- name: Create a target group with instance targets - community.aws.elb_target_group: - name: mytargetgroup - protocol: http - port: 81 - vpc_id: vpc-01234567 - health_check_protocol: http - health_check_path: / - successful_response_codes: "200,250-260" - targets: - - Id: i-01234567 - Port: 80 - - Id: i-98765432 - Port: 80 - state: present - wait_timeout: 200 - wait: True - -- name: Create a target group with IP address targets - community.aws.elb_target_group: - name: mytargetgroup - protocol: http - port: 81 - vpc_id: vpc-01234567 - health_check_protocol: http - health_check_path: / - successful_response_codes: "200,250-260" - target_type: ip - targets: - - Id: 10.0.0.10 - Port: 80 - AvailabilityZone: all - - Id: 10.0.0.20 - Port: 80 - state: present - wait_timeout: 200 - wait: True - -# Using lambda as targets require that the target group -# itself is allow to invoke the lambda function. -# therefore you need first to create an empty target group -# to receive its arn, second, allow the target group -# to invoke the lambda function and third, add the target -# to the target group -- name: first, create empty target group - community.aws.elb_target_group: - name: my-lambda-targetgroup - target_type: lambda - state: present - modify_targets: False - register: out - -- name: second, allow invoke of the lambda - community.aws.lambda_policy: - state: "{{ state | default('present') }}" - function_name: my-lambda-function - statement_id: someID - action: lambda:InvokeFunction - principal: elasticloadbalancing.amazonaws.com - source_arn: "{{ out.target_group_arn }}" - -- name: third, add target - community.aws.elb_target_group: - name: my-lambda-targetgroup - target_type: lambda - state: present - targets: - - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function - -""" - -RETURN = r""" -deregistration_delay_timeout_seconds: - description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. - returned: when state present - type: int - sample: 300 -deregistration_connection_termination: - description: Indicates whether the load balancer terminates connections at the end of the deregistration timeout. - returned: when state present - type: bool - sample: True -health_check_interval_seconds: - description: The approximate amount of time, in seconds, between health checks of an individual target. - returned: when state present - type: int - sample: 30 -health_check_path: - description: The destination for the health check request. - returned: when state present - type: str - sample: /index.html -health_check_port: - description: The port to use to connect with the target. - returned: when state present - type: str - sample: traffic-port -health_check_protocol: - description: The protocol to use to connect with the target. - returned: when state present - type: str - sample: HTTP -health_check_timeout_seconds: - description: The amount of time, in seconds, during which no response means a failed health check. - returned: when state present - type: int - sample: 5 -healthy_threshold_count: - description: The number of consecutive health checks successes required before considering an unhealthy target healthy. - returned: when state present - type: int - sample: 5 -load_balancer_arns: - description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group. - returned: when state present - type: list - sample: [] -matcher: - description: The HTTP codes to use when checking for a successful response from a target. - returned: when state present - type: dict - sample: { - "http_code": "200" - } -port: - description: The port on which the targets are listening. - returned: when state present - type: int - sample: 80 -protocol: - description: The protocol to use for routing traffic to the targets. - returned: when state present - type: str - sample: HTTP -stickiness_enabled: - description: Indicates whether sticky sessions are enabled. - returned: when state present - type: bool - sample: true -stickiness_lb_cookie_duration_seconds: - description: The time period, in seconds, during which requests from a client should be routed to the same target. - returned: when state present - type: int - sample: 86400 -stickiness_type: - description: The type of sticky sessions. - returned: when state present - type: str - sample: lb_cookie -load_balancing_algorithm_type: - description: The type load balancing algorithm used. - returned: when state present - type: str - version_added: 3.2.0 - sample: least_outstanding_requests -tags: - description: The tags attached to the target group. - returned: when state present - type: dict - sample: "{ - 'Tag': 'Example' - }" -target_group_arn: - description: The Amazon Resource Name (ARN) of the target group. - returned: when state present - type: str - sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/mytargetgroup/aabbccddee0044332211" -target_group_name: - description: The name of the target group. - returned: when state present - type: str - sample: mytargetgroup -unhealthy_threshold_count: - description: The number of consecutive health check failures required before considering the target unhealthy. - returned: when state present - type: int - sample: 2 -vpc_id: - description: The ID of the VPC for the targets. - returned: when state present - type: str - sample: vpc-0123456 -""" - -import time - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_tg_attributes(connection, module, tg_arn): - try: - _attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True) - tg_attributes = boto3_tag_list_to_ansible_dict(_attributes["Attributes"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get target group attributes") - - # Replace '.' with '_' in attribute key names to make it more Ansible friendly - return dict((k.replace(".", "_"), v) for k, v in tg_attributes.items()) - - -def get_target_group_tags(connection, module, target_group_arn): - try: - _tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True) - return _tags["TagDescriptions"][0]["Tags"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get target group tags") - - -def get_target_group(connection, module, retry_missing=False): - extra_codes = ["TargetGroupNotFound"] if retry_missing else [] - try: - target_group_paginator = connection.get_paginator("describe_target_groups").paginate( - Names=[module.params.get("name")] - ) - jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes) - result = jittered_retry(target_group_paginator.build_full_result)() - except is_boto3_error_code("TargetGroupNotFound"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get target group") - - return result["TargetGroups"][0] - - -def wait_for_status(connection, module, target_group_arn, targets, status): - polling_increment_secs = 5 - max_retries = module.params.get("wait_timeout") // polling_increment_secs - status_achieved = False - - for x in range(0, max_retries): - try: - response = connection.describe_target_health( - TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True - ) - if response["TargetHealthDescriptions"][0]["TargetHealth"]["State"] == status: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't describe target health") - - result = response - return status_achieved, result - - -def create_or_update_attributes(connection, module, target_group, new_target_group): - changed = False - target_type = module.params.get("target_type") - deregistration_delay_timeout = module.params.get("deregistration_delay_timeout") - deregistration_connection_termination = module.params.get("deregistration_connection_termination") - stickiness_enabled = module.params.get("stickiness_enabled") - stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") - stickiness_type = module.params.get("stickiness_type") - stickiness_app_cookie_duration = module.params.get("stickiness_app_cookie_duration") - stickiness_app_cookie_name = module.params.get("stickiness_app_cookie_name") - preserve_client_ip_enabled = module.params.get("preserve_client_ip_enabled") - proxy_protocol_v2_enabled = module.params.get("proxy_protocol_v2_enabled") - load_balancing_algorithm_type = module.params.get("load_balancing_algorithm_type") - - # Now set target group attributes - update_attributes = [] - - # Get current attributes - current_tg_attributes = get_tg_attributes(connection, module, target_group["TargetGroupArn"]) - - if deregistration_delay_timeout is not None: - if str(deregistration_delay_timeout) != current_tg_attributes["deregistration_delay_timeout_seconds"]: - update_attributes.append( - {"Key": "deregistration_delay.timeout_seconds", "Value": str(deregistration_delay_timeout)} - ) - if deregistration_connection_termination is not None: - if ( - deregistration_connection_termination - and current_tg_attributes.get("deregistration_delay_connection_termination_enabled") != "true" - ): - update_attributes.append({"Key": "deregistration_delay.connection_termination.enabled", "Value": "true"}) - if stickiness_enabled is not None: - if stickiness_enabled and current_tg_attributes["stickiness_enabled"] != "true": - update_attributes.append({"Key": "stickiness.enabled", "Value": "true"}) - if stickiness_lb_cookie_duration is not None: - if str(stickiness_lb_cookie_duration) != current_tg_attributes["stickiness_lb_cookie_duration_seconds"]: - update_attributes.append( - {"Key": "stickiness.lb_cookie.duration_seconds", "Value": str(stickiness_lb_cookie_duration)} - ) - if stickiness_type is not None: - if stickiness_type != current_tg_attributes.get("stickiness_type"): - update_attributes.append({"Key": "stickiness.type", "Value": stickiness_type}) - if stickiness_app_cookie_name is not None: - if stickiness_app_cookie_name != current_tg_attributes.get("stickiness_app_cookie_name"): - update_attributes.append( - {"Key": "stickiness.app_cookie.cookie_name", "Value": str(stickiness_app_cookie_name)} - ) - if stickiness_app_cookie_duration is not None: - if str(stickiness_app_cookie_duration) != current_tg_attributes["stickiness_app_cookie_duration_seconds"]: - update_attributes.append( - {"Key": "stickiness.app_cookie.duration_seconds", "Value": str(stickiness_app_cookie_duration)} - ) - if preserve_client_ip_enabled is not None: - if target_type not in ("udp", "tcp_udp"): - if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get("preserve_client_ip_enabled"): - update_attributes.append( - {"Key": "preserve_client_ip.enabled", "Value": str(preserve_client_ip_enabled).lower()} - ) - if proxy_protocol_v2_enabled is not None: - if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get("proxy_protocol_v2_enabled"): - update_attributes.append( - {"Key": "proxy_protocol_v2.enabled", "Value": str(proxy_protocol_v2_enabled).lower()} - ) - if load_balancing_algorithm_type is not None: - if str(load_balancing_algorithm_type) != current_tg_attributes["load_balancing_algorithm_type"]: - update_attributes.append( - {"Key": "load_balancing.algorithm.type", "Value": str(load_balancing_algorithm_type)} - ) - - if update_attributes: - try: - connection.modify_target_group_attributes( - TargetGroupArn=target_group["TargetGroupArn"], Attributes=update_attributes, aws_retry=True - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state - if new_target_group: - connection.delete_target_group(TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True) - module.fail_json_aws(e, msg="Couldn't delete target group") - - return changed - - -def create_or_update_target_group(connection, module): - changed = False - new_target_group = False - params = dict() - target_type = module.params.get("target_type") - params["Name"] = module.params.get("name") - params["TargetType"] = target_type - if target_type != "lambda": - params["Protocol"] = module.params.get("protocol").upper() - if module.params.get("protocol_version") is not None: - params["ProtocolVersion"] = module.params.get("protocol_version") - params["Port"] = module.params.get("port") - params["VpcId"] = module.params.get("vpc_id") - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - - health_option_keys = [ - "health_check_path", - "health_check_protocol", - "health_check_interval", - "health_check_timeout", - "healthy_threshold_count", - "unhealthy_threshold_count", - "successful_response_codes", - ] - health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys) - - # Set health check if anything set - if health_options: - if module.params.get("health_check_protocol") is not None: - params["HealthCheckProtocol"] = module.params.get("health_check_protocol").upper() - - if module.params.get("health_check_port") is not None: - params["HealthCheckPort"] = module.params.get("health_check_port") - - if module.params.get("health_check_interval") is not None: - params["HealthCheckIntervalSeconds"] = module.params.get("health_check_interval") - - if module.params.get("health_check_timeout") is not None: - params["HealthCheckTimeoutSeconds"] = module.params.get("health_check_timeout") - - if module.params.get("healthy_threshold_count") is not None: - params["HealthyThresholdCount"] = module.params.get("healthy_threshold_count") - - if module.params.get("unhealthy_threshold_count") is not None: - params["UnhealthyThresholdCount"] = module.params.get("unhealthy_threshold_count") - - # Only need to check response code and path for http(s) health checks - protocol = module.params.get("health_check_protocol") - if protocol is not None and protocol.upper() in ["HTTP", "HTTPS"]: - if module.params.get("health_check_path") is not None: - params["HealthCheckPath"] = module.params.get("health_check_path") - - if module.params.get("successful_response_codes") is not None: - params["Matcher"] = {} - code_key = "HttpCode" - protocol_version = module.params.get("protocol_version") - if protocol_version is not None and protocol_version.upper() == "GRPC": - code_key = "GrpcCode" - params["Matcher"][code_key] = module.params.get("successful_response_codes") - - # Get target group - target_group = get_target_group(connection, module) - - if target_group: - diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)] - if diffs: - module.fail_json(msg=f"Cannot modify {', '.join(diffs)} parameter(s) for a target group") - # Target group exists so check health check parameters match what has been passed - health_check_params = dict() - - # Modify health check if anything set - if health_options: - # Health check protocol - if "HealthCheckProtocol" in params and target_group["HealthCheckProtocol"] != params["HealthCheckProtocol"]: - health_check_params["HealthCheckProtocol"] = params["HealthCheckProtocol"] - - # Health check port - if "HealthCheckPort" in params and target_group["HealthCheckPort"] != params["HealthCheckPort"]: - health_check_params["HealthCheckPort"] = params["HealthCheckPort"] - - # Health check interval - if ( - "HealthCheckIntervalSeconds" in params - and target_group["HealthCheckIntervalSeconds"] != params["HealthCheckIntervalSeconds"] - ): - health_check_params["HealthCheckIntervalSeconds"] = params["HealthCheckIntervalSeconds"] - - # Health check timeout - if ( - "HealthCheckTimeoutSeconds" in params - and target_group["HealthCheckTimeoutSeconds"] != params["HealthCheckTimeoutSeconds"] - ): - health_check_params["HealthCheckTimeoutSeconds"] = params["HealthCheckTimeoutSeconds"] - - # Healthy threshold - if ( - "HealthyThresholdCount" in params - and target_group["HealthyThresholdCount"] != params["HealthyThresholdCount"] - ): - health_check_params["HealthyThresholdCount"] = params["HealthyThresholdCount"] - - # Unhealthy threshold - if ( - "UnhealthyThresholdCount" in params - and target_group["UnhealthyThresholdCount"] != params["UnhealthyThresholdCount"] - ): - health_check_params["UnhealthyThresholdCount"] = params["UnhealthyThresholdCount"] - - # Only need to check response code and path for http(s) health checks - if target_group["HealthCheckProtocol"] in ["HTTP", "HTTPS"]: - # Health check path - if "HealthCheckPath" in params and target_group["HealthCheckPath"] != params["HealthCheckPath"]: - health_check_params["HealthCheckPath"] = params["HealthCheckPath"] - - # Matcher (successful response codes) - # TODO: required and here? - if "Matcher" in params: - code_key = "HttpCode" - if target_group.get("ProtocolVersion") == "GRPC": - code_key = "GrpcCode" - current_matcher_list = target_group["Matcher"][code_key].split(",") - requested_matcher_list = params["Matcher"][code_key].split(",") - if set(current_matcher_list) != set(requested_matcher_list): - health_check_params["Matcher"] = {} - health_check_params["Matcher"][code_key] = ",".join(requested_matcher_list) - - try: - if health_check_params: - connection.modify_target_group( - TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True, **health_check_params - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't update target group") - - # Do we need to modify targets? - if module.params.get("modify_targets"): - # get list of current target instances. I can't see anything like a describe targets in the doco so - # describe_target_health seems to be the only way to get them - try: - current_targets = connection.describe_target_health( - TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get target group health") - - if module.params.get("targets"): - if target_type != "lambda": - params["Targets"] = module.params.get("targets") - - # Correct type of target ports - for target in params["Targets"]: - target["Port"] = int(target.get("Port", module.params.get("port"))) - - current_instance_ids = [] - - for instance in current_targets["TargetHealthDescriptions"]: - current_instance_ids.append(instance["Target"]["Id"]) - - new_instance_ids = [] - for instance in params["Targets"]: - new_instance_ids.append(instance["Id"]) - - add_instances = set(new_instance_ids) - set(current_instance_ids) - - if add_instances: - instances_to_add = [] - for target in params["Targets"]: - if target["Id"] in add_instances: - tmp_item = {"Id": target["Id"], "Port": target["Port"]} - if target.get("AvailabilityZone"): - tmp_item["AvailabilityZone"] = target["AvailabilityZone"] - instances_to_add.append(tmp_item) - - changed = True - try: - connection.register_targets( - TargetGroupArn=target_group["TargetGroupArn"], Targets=instances_to_add, aws_retry=True - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't register targets") - - if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status( - connection, module, target_group["TargetGroupArn"], instances_to_add, "healthy" - ) - if not status_achieved: - module.fail_json( - msg="Error waiting for target registration to be healthy - please check the AWS console" - ) - - remove_instances = set(current_instance_ids) - set(new_instance_ids) - - if remove_instances: - instances_to_remove = [] - for target in current_targets["TargetHealthDescriptions"]: - if target["Target"]["Id"] in remove_instances: - instances_to_remove.append( - {"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]} - ) - - changed = True - try: - connection.deregister_targets( - TargetGroupArn=target_group["TargetGroupArn"], - Targets=instances_to_remove, - aws_retry=True, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove targets") - - if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status( - connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" - ) - if not status_achieved: - module.fail_json( - msg="Error waiting for target deregistration - please check the AWS console" - ) - - # register lambda target - else: - try: - changed = False - target = module.params.get("targets")[0] - if len(current_targets["TargetHealthDescriptions"]) == 0: - changed = True - else: - for item in current_targets["TargetHealthDescriptions"]: - if target["Id"] != item["Target"]["Id"]: - changed = True - break # only one target is possible with lambda - - if changed: - if target.get("Id"): - response = connection.register_targets( - TargetGroupArn=target_group["TargetGroupArn"], - Targets=[{"Id": target["Id"]}], - aws_retry=True, - ) - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't register targets") - else: - if target_type != "lambda": - current_instances = current_targets["TargetHealthDescriptions"] - - if current_instances: - instances_to_remove = [] - for target in current_targets["TargetHealthDescriptions"]: - instances_to_remove.append({"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]}) - - changed = True - try: - connection.deregister_targets( - TargetGroupArn=target_group["TargetGroupArn"], - Targets=instances_to_remove, - aws_retry=True, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove targets") - - if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status( - connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused" - ) - if not status_achieved: - module.fail_json( - msg="Error waiting for target deregistration - please check the AWS console" - ) - - # remove lambda targets - else: - changed = False - if current_targets["TargetHealthDescriptions"]: - changed = True - # only one target is possible with lambda - target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"] - if changed: - connection.deregister_targets( - TargetGroupArn=target_group["TargetGroupArn"], - Targets=[{"Id": target_to_remove}], - aws_retry=True, - ) - else: - try: - connection.create_target_group(aws_retry=True, **params) - changed = True - new_target_group = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create target group") - - target_group = get_target_group(connection, module, retry_missing=True) - - if module.params.get("targets"): - if target_type != "lambda": - params["Targets"] = module.params.get("targets") - try: - connection.register_targets( - TargetGroupArn=target_group["TargetGroupArn"], Targets=params["Targets"], aws_retry=True - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't register targets") - - if module.params.get("wait"): - status_achieved, registered_instances = wait_for_status( - connection, module, target_group["TargetGroupArn"], params["Targets"], "healthy" - ) - if not status_achieved: - module.fail_json( - msg="Error waiting for target registration to be healthy - please check the AWS console" - ) - - else: - try: - target = module.params.get("targets")[0] - response = connection.register_targets( - TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't register targets") - - attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group) - - if attributes_update: - changed = True - - # Tags - only need to play with tags if tags parameter has been set to something - if tags is not None: - # Get tags - current_tags = get_target_group_tags(connection, module, target_group["TargetGroupArn"]) - - # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags( - boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags - ) - if tags_to_delete: - try: - connection.remove_tags( - ResourceArns=[target_group["TargetGroupArn"]], TagKeys=tags_to_delete, aws_retry=True - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete tags from target group") - changed = True - - # Add/update tags - if tags_need_modify: - try: - connection.add_tags( - ResourceArns=[target_group["TargetGroupArn"]], - Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), - aws_retry=True, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add tags to target group") - changed = True - - # Get the target group again - target_group = get_target_group(connection, module) - - # Get the target group attributes again - target_group.update(get_tg_attributes(connection, module, target_group["TargetGroupArn"])) - - # Convert target_group to snake_case - snaked_tg = camel_dict_to_snake_dict(target_group) - - snaked_tg["tags"] = boto3_tag_list_to_ansible_dict( - get_target_group_tags(connection, module, target_group["TargetGroupArn"]) - ) - - module.exit_json(changed=changed, **snaked_tg) - - -def delete_target_group(connection, module): - changed = False - tg = get_target_group(connection, module) - - if tg: - try: - connection.delete_target_group(TargetGroupArn=tg["TargetGroupArn"], aws_retry=True) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete target group") - - module.exit_json(changed=changed) - - -def main(): - protocols_list = ["http", "https", "tcp", "tls", "udp", "tcp_udp", "HTTP", "HTTPS", "TCP", "TLS", "UDP", "TCP_UDP"] - argument_spec = dict( - deregistration_delay_timeout=dict(type="int"), - deregistration_connection_termination=dict(type="bool", default=False), - health_check_protocol=dict(choices=protocols_list), - health_check_port=dict(), - health_check_path=dict(), - health_check_interval=dict(type="int"), - health_check_timeout=dict(type="int"), - healthy_threshold_count=dict(type="int"), - modify_targets=dict(default=True, type="bool"), - name=dict(required=True), - port=dict(type="int"), - protocol=dict(choices=protocols_list), - protocol_version=dict(type="str", choices=["GRPC", "HTTP1", "HTTP2"]), - purge_tags=dict(default=True, type="bool"), - stickiness_enabled=dict(type="bool"), - stickiness_type=dict(), - stickiness_lb_cookie_duration=dict(type="int"), - stickiness_app_cookie_duration=dict(type="int"), - stickiness_app_cookie_name=dict(), - load_balancing_algorithm_type=dict(type="str", choices=["round_robin", "least_outstanding_requests"]), - state=dict(required=True, choices=["present", "absent"]), - successful_response_codes=dict(), - tags=dict(type="dict", aliases=["resource_tags"]), - target_type=dict(choices=["instance", "ip", "lambda", "alb"]), - targets=dict(type="list", elements="dict"), - unhealthy_threshold_count=dict(type="int"), - vpc_id=dict(), - preserve_client_ip_enabled=dict(type="bool"), - proxy_protocol_v2_enabled=dict(type="bool"), - wait_timeout=dict(type="int", default=200), - wait=dict(type="bool", default=False), - ) - required_by = dict( - health_check_path=["health_check_protocol"], - successful_response_codes=["health_check_protocol"], - ) - required_if = [ - ["target_type", "instance", ["protocol", "port", "vpc_id"]], - ["target_type", "ip", ["protocol", "port", "vpc_id"]], - ["target_type", "alb", ["protocol", "port", "vpc_id"]], - ] - - module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if) - - if module.params.get("target_type") is None: - module.params["target_type"] = "instance" - - connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - - if module.params.get("state") == "present": - if module.params.get("protocol") in ["http", "https", "HTTP", "HTTPS"] and module.params.get( - "deregistration_connection_termination", None - ): - module.fail_json( - msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination" - ) - - create_or_update_target_group(connection, module) - else: - delete_target_group(connection, module) - - -if __name__ == "__main__": - main() diff --git a/elb_target_group_info.py b/elb_target_group_info.py deleted file mode 100644 index bf02db21f15..00000000000 --- a/elb_target_group_info.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_target_group_info -version_added: 1.0.0 -short_description: Gather information about ELB target groups in AWS -description: - - Gather information about ELB target groups in AWS -author: - - Rob White (@wimnat) -options: - load_balancer_arn: - description: - - The Amazon Resource Name (ARN) of the load balancer. - required: false - type: str - target_group_arns: - description: - - The Amazon Resource Names (ARN) of the target groups. - required: false - type: list - elements: str - names: - description: - - The names of the target groups. - required: false - type: list - elements: str - collect_targets_health: - description: - - When set to C(True), output contains targets health description - required: false - default: false - type: bool - -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Gather information about all target groups - community.aws.elb_target_group_info: - -- name: Gather information about the target group attached to a particular ELB - community.aws.elb_target_group_info: - load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-elb/aabbccddeeff" - -- name: Gather information about a target groups named 'tg1' and 'tg2' - community.aws.elb_target_group_info: - names: - - tg1 - - tg2 - -""" - -RETURN = r""" -target_groups: - description: a list of target groups - returned: always - type: complex - contains: - deregistration_delay_timeout_seconds: - description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. - returned: always - type: int - sample: 300 - health_check_interval_seconds: - description: The approximate amount of time, in seconds, between health checks of an individual target. - returned: always - type: int - sample: 30 - health_check_path: - description: The destination for the health check request. - returned: always - type: str - sample: /index.html - health_check_port: - description: The port to use to connect with the target. - returned: always - type: str - sample: traffic-port - health_check_protocol: - description: The protocol to use to connect with the target. - returned: always - type: str - sample: HTTP - health_check_timeout_seconds: - description: The amount of time, in seconds, during which no response means a failed health check. - returned: always - type: int - sample: 5 - healthy_threshold_count: - description: The number of consecutive health checks successes required before considering an unhealthy target healthy. - returned: always - type: int - sample: 5 - load_balancer_arns: - description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group. - returned: always - type: list - sample: [] - matcher: - description: The HTTP codes to use when checking for a successful response from a target. - returned: always - type: dict - sample: { - "http_code": "200" - } - port: - description: The port on which the targets are listening. - returned: always - type: int - sample: 80 - protocol: - description: The protocol to use for routing traffic to the targets. - returned: always - type: str - sample: HTTP - stickiness_enabled: - description: Indicates whether sticky sessions are enabled. - returned: always - type: bool - sample: true - stickiness_lb_cookie_duration_seconds: - description: Indicates whether sticky sessions are enabled. - returned: always - type: int - sample: 86400 - stickiness_type: - description: The type of sticky sessions. - returned: always - type: str - sample: lb_cookie - tags: - description: The tags attached to the target group. - returned: always - type: dict - sample: "{ - 'Tag': 'Example' - }" - target_group_arn: - description: The Amazon Resource Name (ARN) of the target group. - returned: always - type: str - sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/mytargetgroup/aabbccddee0044332211" - targets_health_description: - description: Targets health description. - returned: when collect_targets_health is enabled - type: complex - contains: - health_check_port: - description: The port to check target health. - returned: always - type: str - sample: '80' - target: - description: The target metadata. - returned: always - type: complex - contains: - id: - description: The ID of the target. - returned: always - type: str - sample: i-0123456789 - port: - description: The port to use to connect with the target. - returned: always - type: int - sample: 80 - target_health: - description: The target health status. - returned: always - type: complex - contains: - state: - description: The state of the target health. - returned: always - type: str - sample: healthy - target_group_name: - description: The name of the target group. - returned: always - type: str - sample: mytargetgroup - unhealthy_threshold_count: - description: The number of consecutive health check failures required before considering the target unhealthy. - returned: always - type: int - sample: 2 - vpc_id: - description: The ID of the VPC for the targets. - returned: always - type: str - sample: vpc-0123456 -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=10) -def get_paginator(**kwargs): - paginator = client.get_paginator("describe_target_groups") - return paginator.paginate(**kwargs).build_full_result() - - -def get_target_group_attributes(target_group_arn): - try: - target_group_attributes = boto3_tag_list_to_ansible_dict( - client.describe_target_group_attributes(TargetGroupArn=target_group_arn)["Attributes"] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe target group attributes") - - # Replace '.' with '_' in attribute key names to make it more Ansibley - return dict((k.replace(".", "_"), v) for (k, v) in target_group_attributes.items()) - - -def get_target_group_tags(target_group_arn): - try: - return boto3_tag_list_to_ansible_dict( - client.describe_tags(ResourceArns=[target_group_arn])["TagDescriptions"][0]["Tags"] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe group tags") - - -def get_target_group_targets_health(target_group_arn): - try: - return client.describe_target_health(TargetGroupArn=target_group_arn)["TargetHealthDescriptions"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get target health") - - -def list_target_groups(): - load_balancer_arn = module.params.get("load_balancer_arn") - target_group_arns = module.params.get("target_group_arns") - names = module.params.get("names") - collect_targets_health = module.params.get("collect_targets_health") - - try: - if not load_balancer_arn and not target_group_arns and not names: - target_groups = get_paginator() - if load_balancer_arn: - target_groups = get_paginator(LoadBalancerArn=load_balancer_arn) - if target_group_arns: - target_groups = get_paginator(TargetGroupArns=target_group_arns) - if names: - target_groups = get_paginator(Names=names) - except is_boto3_error_code("TargetGroupNotFound"): - module.exit_json(target_groups=[]) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to list target groups") - - # Get the attributes and tags for each target group - for target_group in target_groups["TargetGroups"]: - target_group.update(get_target_group_attributes(target_group["TargetGroupArn"])) - - # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups["TargetGroups"]] - - # Get tags for each target group - for snaked_target_group in snaked_target_groups: - snaked_target_group["tags"] = get_target_group_tags(snaked_target_group["target_group_arn"]) - if collect_targets_health: - snaked_target_group["targets_health_description"] = [ - camel_dict_to_snake_dict(target) - for target in get_target_group_targets_health(snaked_target_group["target_group_arn"]) - ] - - module.exit_json(target_groups=snaked_target_groups) - - -def main(): - global module - global client - - argument_spec = dict( - load_balancer_arn=dict(type="str"), - target_group_arns=dict(type="list", elements="str"), - names=dict(type="list", elements="str"), - collect_targets_health=dict(default=False, type="bool", required=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[["load_balancer_arn", "target_group_arns", "names"]], - supports_check_mode=True, - ) - - try: - client = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - list_target_groups() - - -if __name__ == "__main__": - main() diff --git a/elb_target_info.py b/elb_target_info.py deleted file mode 100644 index add122416d9..00000000000 --- a/elb_target_info.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Yaakov Kuperman -# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: elb_target_info -version_added: 1.0.0 -short_description: Gathers which target groups a target is associated with. -description: - - This module will search through every target group in a region to find - which ones have registered a given instance ID or IP. -author: - - "Yaakov Kuperman (@yaakov-github)" -options: - instance_id: - description: - - What instance ID to get information for. - type: str - required: true - get_unused_target_groups: - description: - - Whether or not to get target groups not used by any load balancers. - type: bool - default: true - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# practical use case - dynamically de-registering and re-registering nodes - - - name: Get EC2 Metadata - amazon.aws.ec2_metadata_facts: - - - name: Get initial list of target groups - delegate_to: localhost - community.aws.elb_target_info: - instance_id: "{{ ansible_ec2_instance_id }}" - region: "{{ ansible_ec2_placement_region }}" - register: target_info - - - name: save fact for later - ansible.builtin.set_fact: - original_tgs: "{{ target_info.instance_target_groups }}" - - - name: Deregister instance from all target groups - delegate_to: localhost - community.aws.elb_target: - target_group_arn: "{{ item.0.target_group_arn }}" - target_port: "{{ item.1.target_port }}" - target_az: "{{ item.1.target_az }}" - target_id: "{{ item.1.target_id }}" - state: absent - target_status: "draining" - region: "{{ ansible_ec2_placement_region }}" - with_subelements: - - "{{ original_tgs }}" - - "targets" - - # This avoids having to wait for 'elb_target' to serially deregister each - # target group. An alternative would be to run all of the 'elb_target' - # tasks async and wait for them to finish. - - - name: wait for all targets to deregister simultaneously - delegate_to: localhost - community.aws.elb_target_info: - get_unused_target_groups: false - instance_id: "{{ ansible_ec2_instance_id }}" - region: "{{ ansible_ec2_placement_region }}" - register: target_info - until: (target_info.instance_target_groups | length) == 0 - retries: 60 - delay: 10 - - - name: reregister in elbv2s - community.aws.elb_target: - region: "{{ ansible_ec2_placement_region }}" - target_group_arn: "{{ item.0.target_group_arn }}" - target_port: "{{ item.1.target_port }}" - target_az: "{{ item.1.target_az }}" - target_id: "{{ item.1.target_id }}" - state: present - target_status: "initial" - with_subelements: - - "{{ original_tgs }}" - - "targets" - - # wait until all groups associated with this instance are 'healthy' or - # 'unused' - - name: wait for registration - community.aws.elb_target_info: - get_unused_target_groups: false - instance_id: "{{ ansible_ec2_instance_id }}" - region: "{{ ansible_ec2_placement_region }}" - register: target_info - until: (target_info.instance_target_groups | - map(attribute='targets') | - flatten | - map(attribute='target_health') | - rejectattr('state', 'equalto', 'healthy') | - rejectattr('state', 'equalto', 'unused') | - list | - length) == 0 - retries: 61 - delay: 10 - -# using the target groups to generate AWS CLI commands to reregister the -# instance - useful in case the playbook fails mid-run and manual -# rollback is required - - name: "reregistration commands: ELBv2s" - ansible.builtin.debug: - msg: > - aws --region {{ansible_ec2_placement_region}} elbv2 - register-targets --target-group-arn {{item.target_group_arn}} - --targets{%for target in item.targets%} - Id={{target.target_id}}, - Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}} - {%endif%} - {%endfor%} - loop: "{{target_info.instance_target_groups}}" - -""" - -RETURN = r""" -instance_target_groups: - description: a list of target groups to which the instance is registered to - returned: always - type: complex - contains: - target_group_arn: - description: The ARN of the target group - type: str - returned: always - sample: - - "arn:aws:elasticloadbalancing:eu-west-1:123456789012:targetgroup/target-group/deadbeefdeadbeef" - target_group_type: - description: Which target type is used for this group - returned: always - type: str - sample: - - ip - - instance - targets: - description: A list of targets that point to this instance ID - returned: always - type: complex - contains: - target_id: - description: the target ID referring to this instance - type: str - returned: always - sample: - - i-deadbeef - - 1.2.3.4 - target_port: - description: which port this target is listening on - type: str - returned: always - sample: - - 80 - target_az: - description: which availability zone is explicitly - associated with this target - type: str - returned: when an AZ is associated with this instance - sample: - - us-west-2a - target_health: - description: - - The target health description. - - See following link for all the possible values - U(https://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health) - returned: always - type: complex - contains: - description: - description: description of target health - returned: if I(state!=present) - sample: - - "Target desregistration is in progress" - type: str - reason: - description: reason code for target health - returned: if I(state!=healthy) - sample: - - "Target.Deregistration in progress" - type: str - state: - description: health state - returned: always - sample: - - "healthy" - - "draining" - - "initial" - - "unhealthy" - - "unused" - - "unavailable" - type: str -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - # we can handle the lack of boto3 based on the ec2 module - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class Target(object): - """Models a target in a target group""" - - def __init__(self, target_id, port, az, raw_target_health): - self.target_port = port - self.target_id = target_id - self.target_az = az - self.target_health = self.convert_target_health(raw_target_health) - - def convert_target_health(self, raw_target_health): - return camel_dict_to_snake_dict(raw_target_health) - - -class TargetGroup(object): - """Models an elbv2 target group""" - - def __init__(self, **kwargs): - self.target_group_type = kwargs["target_group_type"] - self.target_group_arn = kwargs["target_group_arn"] - # the relevant targets associated with this group - self.targets = [] - - def add_target(self, target_id, target_port, target_az, raw_target_health): - self.targets.append(Target(target_id, target_port, target_az, raw_target_health)) - - def to_dict(self): - object_dict = vars(self) - object_dict["targets"] = [vars(each) for each in self.get_targets()] - return object_dict - - def get_targets(self): - return list(self.targets) - - -class TargetInfoGatherer(object): - def __init__(self, module, instance_id, get_unused_target_groups): - self.module = module - try: - self.ec2 = self.module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - except (ClientError, BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't connect to ec2") - - try: - self.elbv2 = self.module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Could not connect to elbv2") - - self.instance_id = instance_id - self.get_unused_target_groups = get_unused_target_groups - self.tgs = self._get_target_groups() - - def _get_instance_ips(self): - """Fetch all IPs associated with this instance so that we can determine - whether or not an instance is in an IP-based target group""" - try: - # get ahold of the instance in the API - reservations = self.ec2.describe_instances(InstanceIds=[self.instance_id], aws_retry=True)["Reservations"] - except (BotoCoreError, ClientError) as e: - # typically this will happen if the instance doesn't exist - self.module.fail_json_aws( - e, - msg=f"Could not get instance info for instance '{self.instance_id}'", - ) - - if len(reservations) < 1: - self.module.fail_json(msg=f"Instance ID {self.instance_id} could not be found") - - instance = reservations[0]["Instances"][0] - - # IPs are represented in a few places in the API, this should - # account for all of them - ips = set() - ips.add(instance["PrivateIpAddress"]) - for nic in instance["NetworkInterfaces"]: - ips.add(nic["PrivateIpAddress"]) - for ip in nic["PrivateIpAddresses"]: - ips.add(ip["PrivateIpAddress"]) - - return list(ips) - - def _get_target_group_objects(self): - """helper function to build a list of TargetGroup objects based on - the AWS API""" - try: - paginator = self.elbv2.get_paginator("describe_target_groups") - tg_response = paginator.paginate().build_full_result() - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws( - e, - msg="Could not describe target groups", - ) - - # build list of TargetGroup objects representing every target group in - # the system - target_groups = [] - for each_tg in tg_response["TargetGroups"]: - if not self.get_unused_target_groups and len(each_tg["LoadBalancerArns"]) < 1: - # only collect target groups that actually are connected - # to LBs - continue - - target_groups.append( - TargetGroup( - target_group_arn=each_tg["TargetGroupArn"], - target_group_type=each_tg["TargetType"], - ) - ) - return target_groups - - def _get_target_descriptions(self, target_groups): - """Helper function to build a list of all the target descriptions - for this target in a target group""" - # Build a list of all the target groups pointing to this instance - # based on the previous list - tgs = set() - # Loop through all the target groups - for tg in target_groups: - try: - # Get the list of targets for that target group - response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws( - e, msg="Could not describe target " + f"health for target group {tg.target_group_arn}" - ) - - for t in response["TargetHealthDescriptions"]: - # If the target group has this instance as a target, add to - # list. This logic also accounts for the possibility of a - # target being in the target group multiple times with - # overridden ports - if t["Target"]["Id"] == self.instance_id or t["Target"]["Id"] in self.instance_ips: - # The 'AvailabilityZone' parameter is a weird one, see the - # API docs for more. Basically it's only supposed to be - # there under very specific circumstances, so we need - # to account for that - az = t["Target"]["AvailabilityZone"] if "AvailabilityZone" in t["Target"] else None - - tg.add_target(t["Target"]["Id"], t["Target"]["Port"], az, t["TargetHealth"]) - # since tgs is a set, each target group will be added only - # once, even though we call add on each successful match - tgs.add(tg) - return list(tgs) - - def _get_target_groups(self): - # do this first since we need the IPs later on in this function - self.instance_ips = self._get_instance_ips() - - # build list of target groups - target_groups = self._get_target_group_objects() - return self._get_target_descriptions(target_groups) - - -def main(): - argument_spec = dict( - instance_id={"required": True, "type": "str"}, - get_unused_target_groups={"required": False, "default": True, "type": "bool"}, - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - instance_id = module.params["instance_id"] - get_unused_target_groups = module.params["get_unused_target_groups"] - - tg_gatherer = TargetInfoGatherer(module, instance_id, get_unused_target_groups) - - instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] - - module.exit_json(instance_target_groups=instance_target_groups) - - -if __name__ == "__main__": - main() diff --git a/glue_connection.py b/glue_connection.py deleted file mode 100644 index 18039a8616d..00000000000 --- a/glue_connection.py +++ /dev/null @@ -1,416 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Rob White (@wimnat) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: glue_connection -version_added: 1.0.0 -short_description: Manage an AWS Glue connection -description: - - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details. - - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_connection). - The usage did not change. -author: - - "Rob White (@wimnat)" -options: - availability_zone: - description: - - Availability Zone used by the connection - - Required when I(connection_type=NETWORK). - type: str - version_added: 1.5.0 - catalog_id: - description: - - The ID of the Data Catalog in which to create the connection. If none is supplied, - the AWS account ID is used by default. - type: str - connection_properties: - description: - - A dict of key-value pairs used as parameters for this connection. - - Required when I(state=present). - type: dict - connection_type: - description: - - The type of the connection. Currently, SFTP is not supported. - default: JDBC - choices: [ 'CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK' ] - type: str - description: - description: - - The description of the connection. - type: str - match_criteria: - description: - - A list of UTF-8 strings that specify the criteria that you can use in selecting this connection. - type: list - elements: str - name: - description: - - The name of the connection. - required: true - type: str - security_groups: - description: - - A list of security groups to be used by the connection. Use either security group name or ID. - - Required when I(connection_type=NETWORK). - type: list - elements: str - state: - description: - - Create or delete the AWS Glue connection. - required: true - choices: [ 'present', 'absent' ] - type: str - subnet_id: - description: - - The subnet ID used by the connection. - - Required when I(connection_type=NETWORK). - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create an AWS Glue connection -- community.aws.glue_connection: - name: my-glue-connection - connection_properties: - JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename - USERNAME: my-username - PASSWORD: my-password - state: present - -# Create an AWS Glue network connection -- community.aws.glue_connection: - name: my-glue-network-connection - availability_zone: us-east-1a - connection_properties: - JDBC_ENFORCE_SSL: "false" - connection_type: NETWORK - description: Test connection - security_groups: - - sg-glue - subnet_id: subnet-123abc - state: present - -# Delete an AWS Glue connection -- community.aws.glue_connection: - name: my-glue-connection - state: absent -""" - -RETURN = r""" -connection_properties: - description: - - (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection. - - This return key has been deprecated, and will be removed in a release after 2024-06-01. - returned: when state is present - type: dict - sample: {'jdbc_connection_url':'jdbc:mysql://mydb:3306/databasename','username':'x','password':'y'} -connection_type: - description: The type of the connection. - returned: when state is present - type: str - sample: JDBC -creation_time: - description: The time this connection definition was created. - returned: when state is present - type: str - sample: "2018-04-21T05:19:58.326000+00:00" -description: - description: Description of the job being defined. - returned: when state is present - type: str - sample: My first Glue job -last_updated_time: - description: The last time this connection definition was updated. - returned: when state is present - type: str - sample: "2018-04-21T05:19:58.326000+00:00" -match_criteria: - description: A list of criteria that can be used in selecting this connection. - returned: when state is present - type: list - sample: [] -name: - description: The name of the connection definition. - returned: when state is present - type: str - sample: my-glue-connection -physical_connection_requirements: - description: A dict of physical connection requirements, such as VPC and SecurityGroup, - needed for making this connection successfully. - returned: when state is present - type: dict - sample: {'subnet-id':'subnet-aabbccddee'} -raw_connection_properties: - description: A dict of key-value pairs used as parameters for this connection. - returned: when state is present - type: dict - sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'} -""" - -import copy -import time - -try: - import botocore -except ImportError: - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _get_glue_connection(connection, module): - """ - Get an AWS Glue connection based on name. If not found, return None. - - :param connection: AWS boto3 glue connection - :param module: Ansible module - :return: boto3 Glue connection dict or None if not found - """ - - connection_name = module.params.get("name") - connection_catalog_id = module.params.get("catalog_id") - - params = {"Name": connection_name} - if connection_catalog_id is not None: - params["CatalogId"] = connection_catalog_id - - try: - return connection.get_connection(aws_retry=True, **params)["Connection"] - except is_boto3_error_code("EntityNotFoundException"): - return None - - -def _compare_glue_connection_params(user_params, current_params): - """ - Compare Glue connection params. If there is a difference, return True immediately else return False - - :param user_params: the Glue connection parameters passed by the user - :param current_params: the Glue connection parameters currently configured - :return: True if any parameter is mismatched else False - """ - - # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description - # To counter this, add the key if it's missing with a blank value - - if "Description" not in current_params: - current_params["Description"] = "" - if "MatchCriteria" not in current_params: - current_params["MatchCriteria"] = list() - if "PhysicalConnectionRequirements" not in current_params: - current_params["PhysicalConnectionRequirements"] = dict() - current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = [] - current_params["PhysicalConnectionRequirements"]["SubnetId"] = "" - - if ( - "ConnectionProperties" in user_params["ConnectionInput"] - and user_params["ConnectionInput"]["ConnectionProperties"] != current_params["ConnectionProperties"] - ): - return True - if ( - "ConnectionType" in user_params["ConnectionInput"] - and user_params["ConnectionInput"]["ConnectionType"] != current_params["ConnectionType"] - ): - return True - if ( - "Description" in user_params["ConnectionInput"] - and user_params["ConnectionInput"]["Description"] != current_params["Description"] - ): - return True - if "MatchCriteria" in user_params["ConnectionInput"] and set( - user_params["ConnectionInput"]["MatchCriteria"] - ) != set(current_params["MatchCriteria"]): - return True - if "PhysicalConnectionRequirements" in user_params["ConnectionInput"]: - if "SecurityGroupIdList" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] and set( - user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] - ) != set(current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"]): - return True - if ( - "SubnetId" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] - and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] - != current_params["PhysicalConnectionRequirements"]["SubnetId"] - ): - return True - if ( - "AvailabilityZone" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] - and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] - != current_params["PhysicalConnectionRequirements"]["AvailabilityZone"] - ): - return True - - return False - - -# Glue module doesn't appear to have any waiters, unlike EC2 or RDS -def _await_glue_connection(connection, module): - start_time = time.time() - wait_timeout = start_time + 30 - check_interval = 5 - - while wait_timeout > time.time(): - glue_connection = _get_glue_connection(connection, module) - if glue_connection and glue_connection.get("Name"): - return glue_connection - time.sleep(check_interval) - - module.fail_json(msg=f"Timeout waiting for Glue connection {module.params.get('name')}") - - -def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): - """ - Create or update an AWS Glue connection - - :param connection: AWS boto3 glue connection - :param module: Ansible module - :param glue_connection: a dict of AWS Glue connection parameters or None - :return: - """ - changed = False - - params = dict() - params["ConnectionInput"] = dict() - params["ConnectionInput"]["Name"] = module.params.get("name") - params["ConnectionInput"]["ConnectionType"] = module.params.get("connection_type") - params["ConnectionInput"]["ConnectionProperties"] = module.params.get("connection_properties") - if module.params.get("catalog_id") is not None: - params["CatalogId"] = module.params.get("catalog_id") - if module.params.get("description") is not None: - params["ConnectionInput"]["Description"] = module.params.get("description") - if module.params.get("match_criteria") is not None: - params["ConnectionInput"]["MatchCriteria"] = module.params.get("match_criteria") - if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None: - params["ConnectionInput"]["PhysicalConnectionRequirements"] = dict() - if module.params.get("security_groups") is not None: - # Get security group IDs from names - security_group_ids = get_ec2_security_group_ids_from_names( - module.params.get("security_groups"), connection_ec2, boto3=True - ) - params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = security_group_ids - if module.params.get("subnet_id") is not None: - params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] = module.params.get("subnet_id") - if module.params.get("availability_zone") is not None: - params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] = module.params.get( - "availability_zone" - ) - - # If glue_connection is not None then check if it needs to be modified, else create it - if glue_connection: - if _compare_glue_connection_params(params, glue_connection): - try: - # We need to slightly modify the params for an update - update_params = copy.deepcopy(params) - update_params["Name"] = update_params["ConnectionInput"]["Name"] - if not module.check_mode: - connection.update_connection(aws_retry=True, **update_params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - else: - try: - if not module.check_mode: - connection.create_connection(aws_retry=True, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - # If changed, get the Glue connection again - if changed and not module.check_mode: - glue_connection = _await_glue_connection(connection, module) - - if glue_connection: - module.deprecate( - ( - "The 'connection_properties' return key is deprecated and will be replaced" - " by 'raw_connection_properties'. Both values are returned for now." - ), - date="2024-06-01", - collection_name="community.aws", - ) - glue_connection["RawConnectionProperties"] = glue_connection["ConnectionProperties"] - - module.exit_json( - changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=["RawConnectionProperties"]) - ) - - -def delete_glue_connection(connection, module, glue_connection): - """ - Delete an AWS Glue connection - - :param connection: AWS boto3 glue connection - :param module: Ansible module - :param glue_connection: a dict of AWS Glue connection parameters or None - :return: - """ - changed = False - - params = {"ConnectionName": module.params.get("name")} - if module.params.get("catalog_id") is not None: - params["CatalogId"] = module.params.get("catalog_id") - - if glue_connection: - try: - if not module.check_mode: - connection.delete_connection(aws_retry=True, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed) - - -def main(): - argument_spec = dict( - availability_zone=dict(type="str"), - catalog_id=dict(type="str"), - connection_properties=dict(type="dict"), - connection_type=dict( - type="str", default="JDBC", choices=["CUSTOM", "JDBC", "KAFKA", "MARKETPLACE", "MONGODB", "NETWORK"] - ), - description=dict(type="str"), - match_criteria=dict(type="list", elements="str"), - name=dict(required=True, type="str"), - security_groups=dict(type="list", elements="str"), - state=dict(required=True, choices=["present", "absent"], type="str"), - subnet_id=dict(type="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[ - ("state", "present", ["connection_properties"]), - ("connection_type", "NETWORK", ["availability_zone", "security_groups", "subnet_id"]), - ], - supports_check_mode=True, - ) - - retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection_glue = module.client("glue", retry_decorator=retry_decorator) - connection_ec2 = module.client("ec2", retry_decorator=retry_decorator) - - glue_connection = _get_glue_connection(connection_glue, module) - - if module.params.get("state") == "present": - create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection) - else: - delete_glue_connection(connection_glue, module, glue_connection) - - -if __name__ == "__main__": - main() diff --git a/glue_crawler.py b/glue_crawler.py deleted file mode 100644 index 5d92219df8b..00000000000 --- a/glue_crawler.py +++ /dev/null @@ -1,447 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Rob White (@wimnat) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: glue_crawler -version_added: 4.1.0 -short_description: Manage an AWS Glue crawler -description: - - Manage an AWS Glue crawler. See U(https://aws.amazon.com/glue/) for details. - - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_crawler). - The usage did not change. -author: - - 'Ivan Chekaldin (@ichekaldin)' -options: - database_name: - description: - - The name of the database where results are written. - type: str - description: - description: - - Description of the crawler being defined. - type: str - name: - description: - - The name you assign to this crawler definition. It must be unique in your account. - required: true - type: str - recrawl_policy: - description: - - A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run. - suboptions: - recrawl_behavior: - description: - - Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. - - Supported options are C(CRAWL_EVERYTHING) and C(CRAWL_NEW_FOLDERS_ONLY). - type: str - type: dict - role: - description: - - The name or ARN of the IAM role associated with this crawler. - - Required when I(state=present). - type: str - schema_change_policy: - description: - - The policy for the crawler's update and deletion behavior. - suboptions: - delete_behavior: - description: - - Defines the deletion behavior when the crawler finds a deleted object. - - Supported options are C(LOG), C(DELETE_FROM_DATABASE), and C(DEPRECATE_IN_DATABASE). - type: str - update_behavior: - description: - - Defines the update behavior when the crawler finds a changed schema.. - - Supported options are C(LOG) and C(UPDATE_IN_DATABASE). - type: str - type: dict - state: - description: - - Create or delete the AWS Glue crawler. - required: true - choices: [ 'present', 'absent' ] - type: str - table_prefix: - description: - - The table prefix used for catalog tables that are created. - type: str - targets: - description: - - A list of targets to crawl. See example below. - - Required when I(state=present). - type: dict -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create an AWS Glue crawler -- community.aws.glue_crawler: - name: my-glue-crawler - database_name: my_database - role: my-iam-role - schema_change_policy: - delete_behavior: DELETE_FROM_DATABASE - update_behavior: UPDATE_IN_DATABASE - recrawl_policy: - recrawl_ehavior: CRAWL_EVERYTHING - targets: - S3Targets: - - Path: "s3://my-bucket/prefix/folder/" - ConnectionName: my-connection - Exclusions: - - "**.json" - - "**.yml" - state: present - -# Delete an AWS Glue crawler -- community.aws.glue_crawler: - name: my-glue-crawler - state: absent -""" - -RETURN = r""" -creation_time: - description: The time and date that this crawler definition was created. - returned: when state is present - type: str - sample: '2021-04-01T05:19:58.326000+00:00' -database_name: - description: The name of the database where results are written. - returned: when state is present - type: str - sample: my_table -description: - description: Description of the crawler. - returned: when state is present - type: str - sample: My crawler -last_updated: - description: The time and date that this crawler definition was last updated. - returned: when state is present - type: str - sample: '2021-04-01T05:19:58.326000+00:00' -name: - description: The name of the AWS Glue crawler. - returned: always - type: str - sample: my-glue-crawler -recrawl_policy: - description: A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run. - returned: when state is present - type: complex - contains: - RecrawlBehavior: - description: Whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. - returned: when state is present - type: str - sample: CRAWL_EVERYTHING -role: - description: The name or ARN of the IAM role associated with this crawler. - returned: when state is present - type: str - sample: my-iam-role -schema_change_policy: - description: The policy for the crawler's update and deletion behavior. - returned: when state is present - type: complex - contains: - DeleteBehavior: - description: The deletion behavior when the crawler finds a deleted object. - returned: when state is present - type: str - sample: DELETE_FROM_DATABASE - UpdateBehavior: - description: The update behavior when the crawler finds a changed schema. - returned: when state is present - type: str - sample: UPDATE_IN_DATABASE - -table_prefix: - description: The table prefix used for catalog tables that are created. - returned: when state is present - type: str - sample: my_prefix -targets: - description: A list of targets to crawl. - returned: when state is present - type: complex - contains: - S3Targets: - description: List of S3 targets. - returned: when state is present - type: list - JdbcTargets: - description: List of JDBC targets. - returned: when state is present - type: list - MongoDBTargets: - description: List of Mongo DB targets. - returned: when state is present - type: list - DynamoDBTargets: - description: List of DynamoDB targets. - returned: when state is present - type: list - CatalogTargets: - description: List of catalog targets. - returned: when state is present - type: list -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _get_glue_crawler(connection, module, glue_crawler_name): - """ - Get an AWS Glue crawler based on name. If not found, return None. - """ - try: - return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)["Crawler"] - except is_boto3_error_code("EntityNotFoundException"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def _trim_targets(targets): - return [_trim_target(t) for t in targets] - - -def _trim_target(target): - """ - Some target types have optional parameters which AWS will fill in and return - To compare the desired targets and the current targets we need to ignore the defaults - """ - if not target: - return None - retval = target.copy() - if not retval.get("Exclusions", None): - retval.pop("Exclusions", None) - return retval - - -def _compare_glue_crawler_params(user_params, current_params): - """ - Compare Glue crawler params. If there is a difference, return True immediately else return False - """ - if "DatabaseName" in user_params and user_params["DatabaseName"] != current_params["DatabaseName"]: - return True - if "Description" in user_params and user_params["Description"] != current_params["Description"]: - return True - if "RecrawlPolicy" in user_params and user_params["RecrawlPolicy"] != current_params["RecrawlPolicy"]: - return True - if "Role" in user_params and user_params["Role"] != current_params["Role"]: - return True - if ( - "SchemaChangePolicy" in user_params - and user_params["SchemaChangePolicy"] != current_params["SchemaChangePolicy"] - ): - return True - if "TablePrefix" in user_params and user_params["TablePrefix"] != current_params["TablePrefix"]: - return True - if "Targets" in user_params: - if "S3Targets" in user_params["Targets"]: - if _trim_targets(user_params["Targets"]["S3Targets"]) != _trim_targets( - current_params["Targets"]["S3Targets"] - ): - return True - if ( - "JdbcTargets" in user_params["Targets"] - and user_params["Targets"]["JdbcTargets"] != current_params["Targets"]["JdbcTargets"] - ): - if _trim_targets(user_params["Targets"]["JdbcTargets"]) != _trim_targets( - current_params["Targets"]["JdbcTargets"] - ): - return True - if ( - "MongoDBTargets" in user_params["Targets"] - and user_params["Targets"]["MongoDBTargets"] != current_params["Targets"]["MongoDBTargets"] - ): - return True - if ( - "DynamoDBTargets" in user_params["Targets"] - and user_params["Targets"]["DynamoDBTargets"] != current_params["Targets"]["DynamoDBTargets"] - ): - return True - if ( - "CatalogTargets" in user_params["Targets"] - and user_params["Targets"]["CatalogTargets"] != current_params["Targets"]["CatalogTargets"] - ): - return True - - return False - - -def ensure_tags(connection, module, glue_crawler): - changed = False - - if module.params.get("tags") is None: - return False - - account_id, partition = get_aws_account_info(module) - arn = f"arn:{partition}:glue:{module.region}:{account_id}:crawler/{module.params.get('name')}" - - try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if module.check_mode: - existing_tags = {} - else: - module.fail_json_aws(e, msg=f"Unable to get tags for Glue crawler {module.params.get('name')}") - - tags_to_add, tags_to_remove = compare_aws_tags( - existing_tags, module.params.get("tags"), module.params.get("purge_tags") - ) - - if tags_to_remove: - changed = True - if not module.check_mode: - try: - connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") - - if tags_to_add: - changed = True - if not module.check_mode: - try: - connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") - - return changed - - -def create_or_update_glue_crawler(connection, module, glue_crawler): - """ - Create or update an AWS Glue crawler - """ - - changed = False - params = dict() - params["Name"] = module.params.get("name") - params["Role"] = module.params.get("role") - params["Targets"] = module.params.get("targets") - if module.params.get("database_name") is not None: - params["DatabaseName"] = module.params.get("database_name") - if module.params.get("description") is not None: - params["Description"] = module.params.get("description") - if module.params.get("recrawl_policy") is not None: - params["RecrawlPolicy"] = snake_dict_to_camel_dict(module.params.get("recrawl_policy"), capitalize_first=True) - if module.params.get("role") is not None: - params["Role"] = module.params.get("role") - if module.params.get("schema_change_policy") is not None: - params["SchemaChangePolicy"] = snake_dict_to_camel_dict( - module.params.get("schema_change_policy"), capitalize_first=True - ) - if module.params.get("table_prefix") is not None: - params["TablePrefix"] = module.params.get("table_prefix") - if module.params.get("targets") is not None: - params["Targets"] = module.params.get("targets") - - if glue_crawler: - if _compare_glue_crawler_params(params, glue_crawler): - try: - if not module.check_mode: - connection.update_crawler(aws_retry=True, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - else: - try: - if not module.check_mode: - connection.create_crawler(aws_retry=True, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - glue_crawler = _get_glue_crawler(connection, module, params["Name"]) - - changed |= ensure_tags(connection, module, glue_crawler) - - module.exit_json( - changed=changed, - **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=["SchemaChangePolicy", "RecrawlPolicy", "Targets"]), - ) - - -def delete_glue_crawler(connection, module, glue_crawler): - """ - Delete an AWS Glue crawler - """ - changed = False - - if glue_crawler: - try: - if not module.check_mode: - connection.delete_crawler(aws_retry=True, Name=glue_crawler["Name"]) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed) - - -def main(): - argument_spec = dict( - database_name=dict(type="str"), - description=dict(type="str"), - name=dict(required=True, type="str"), - purge_tags=dict(type="bool", default=True), - recrawl_policy=dict(type="dict", options=dict(recrawl_behavior=dict(type="str"))), - role=dict(type="str"), - schema_change_policy=dict( - type="dict", options=dict(delete_behavior=dict(type="str"), update_behavior=dict(type="str")) - ), - state=dict(required=True, choices=["present", "absent"], type="str"), - table_prefix=dict(type="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - targets=dict(type="dict"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[("state", "present", ["role", "targets"])], - supports_check_mode=True, - ) - - connection = module.client("glue", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - - state = module.params.get("state") - - glue_crawler = _get_glue_crawler(connection, module, module.params.get("name")) - - if state == "present": - create_or_update_glue_crawler(connection, module, glue_crawler) - else: - delete_glue_crawler(connection, module, glue_crawler) - - -if __name__ == "__main__": - main() diff --git a/glue_job.py b/glue_job.py deleted file mode 100644 index 2567799757e..00000000000 --- a/glue_job.py +++ /dev/null @@ -1,491 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Rob White (@wimnat) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: glue_job -version_added: 1.0.0 -short_description: Manage an AWS Glue job -description: - - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details. - - Prior to release 5.0.0 this module was called C(community.aws.aws_glue_job). - The usage did not change. -author: - - "Rob White (@wimnat)" - - "Vijayanand Sharma (@vijayanandsharma)" -options: - allocated_capacity: - description: - - The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs - can be allocated; the default is 10. A DPU is a relative measure of processing power that consists - of 4 vCPUs of compute capacity and 16 GB of memory. - type: int - command_name: - description: - - The name of the job command. This must be 'glueetl'. - default: glueetl - type: str - command_python_version: - description: - - Python version being used to execute a Python shell job. - - AWS currently supports C('2') or C('3'). - type: str - version_added: 2.2.0 - command_script_location: - description: - - The S3 path to a script that executes a job. - - Required when I(state=present). - type: str - connections: - description: - - A list of Glue connections used for this job. - type: list - elements: str - default_arguments: - description: - - A dict of default arguments for this job. You can specify arguments here that your own job-execution - script consumes, as well as arguments that AWS Glue itself consumes. - type: dict - description: - description: - - Description of the job being defined. - type: str - glue_version: - description: - - Glue version determines the versions of Apache Spark and Python that AWS Glue supports. - type: str - version_added: 1.5.0 - max_concurrent_runs: - description: - - The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when - this threshold is reached. The maximum value you can specify is controlled by a service limit. - type: int - max_retries: - description: - - The maximum number of times to retry this job if it fails. - type: int - name: - description: - - The name you assign to this job definition. It must be unique in your account. - required: true - type: str - number_of_workers: - description: - - The number of workers of a defined workerType that are allocated when a job runs. - type: int - version_added: 1.5.0 - role: - description: - - The name or ARN of the IAM role associated with this job. - - Required when I(state=present). - type: str - state: - description: - - Create or delete the AWS Glue job. - required: true - choices: [ 'present', 'absent' ] - type: str - timeout: - description: - - The job timeout in minutes. - type: int - worker_type: - description: - - The type of predefined worker that is allocated when a job runs. - choices: [ 'Standard', 'G.1X', 'G.2X' ] - type: str - version_added: 1.5.0 -notes: - - Support for I(tags) and I(purge_tags) was added in release 2.2.0. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create an AWS Glue job -- community.aws.glue_job: - command_script_location: "s3://s3bucket/script.py" - default_arguments: - "--extra-py-files": s3://s3bucket/script-package.zip - "--TempDir": "s3://s3bucket/temp/" - name: my-glue-job - role: my-iam-role - state: present - -# Delete an AWS Glue job -- community.aws.glue_job: - name: my-glue-job - state: absent -""" - -RETURN = r""" -allocated_capacity: - description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to - 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power - that consists of 4 vCPUs of compute capacity and 16 GB of memory. - returned: when state is present - type: int - sample: 10 -command: - description: The JobCommand that executes this job. - returned: when state is present - type: complex - contains: - name: - description: The name of the job command. - returned: when state is present - type: str - sample: glueetl - script_location: - description: Specifies the S3 path to a script that executes a job. - returned: when state is present - type: str - sample: mybucket/myscript.py - python_version: - description: Specifies the Python version. - returned: when state is present - type: str - sample: 3 -connections: - description: The connections used for this job. - returned: when state is present - type: dict - sample: "{ Connections: [ 'list', 'of', 'connections' ] }" -created_on: - description: The time and date that this job definition was created. - returned: when state is present - type: str - sample: "2018-04-21T05:19:58.326000+00:00" -default_arguments: - description: The default arguments for this job, specified as name-value pairs. - returned: when state is present - type: dict - sample: "{ 'mykey1': 'myvalue1' }" -description: - description: Description of the job being defined. - returned: when state is present - type: str - sample: My first Glue job -glue_version: - description: Glue version. - returned: when state is present - type: str - sample: 2.0 -job_name: - description: The name of the AWS Glue job. - returned: always - type: str - sample: my-glue-job -execution_property: - description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. - returned: always - type: complex - contains: - max_concurrent_runs: - description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is - returned when this threshold is reached. The maximum value you can specify is controlled by - a service limit. - returned: when state is present - type: int - sample: 1 -last_modified_on: - description: The last point in time when this job definition was modified. - returned: when state is present - type: str - sample: "2018-04-21T05:19:58.326000+00:00" -max_retries: - description: The maximum number of times to retry this job after a JobRun fails. - returned: when state is present - type: int - sample: 5 -name: - description: The name assigned to this job definition. - returned: when state is present - type: str - sample: my-glue-job -role: - description: The name or ARN of the IAM role associated with this job. - returned: when state is present - type: str - sample: my-iam-role -timeout: - description: The job timeout in minutes. - returned: when state is present - type: int - sample: 300 -""" - -import copy - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _get_glue_job(connection, module, glue_job_name): - """ - Get an AWS Glue job based on name. If not found, return None. - - :param connection: AWS boto3 glue connection - :param module: Ansible module - :param glue_job_name: Name of Glue job to get - :return: boto3 Glue job dict or None if not found - """ - try: - return connection.get_job(aws_retry=True, JobName=glue_job_name)["Job"] - except is_boto3_error_code("EntityNotFoundException"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - - -def _compare_glue_job_params(user_params, current_params): - """ - Compare Glue job params. If there is a difference, return True immediately else return False - - :param user_params: the Glue job parameters passed by the user - :param current_params: the Glue job parameters currently configured - :return: True if any parameter is mismatched else False - """ - # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description - # To counter this, add the key if it's missing with a blank value - - if "Description" not in current_params: - current_params["Description"] = "" - if "DefaultArguments" not in current_params: - current_params["DefaultArguments"] = dict() - - if "AllocatedCapacity" in user_params and user_params["AllocatedCapacity"] != current_params["AllocatedCapacity"]: - return True - if "Command" in user_params: - if user_params["Command"]["ScriptLocation"] != current_params["Command"]["ScriptLocation"]: - return True - if user_params["Command"]["PythonVersion"] != current_params["Command"]["PythonVersion"]: - return True - if "Connections" in user_params and user_params["Connections"] != current_params["Connections"]: - return True - if "DefaultArguments" in user_params and user_params["DefaultArguments"] != current_params["DefaultArguments"]: - return True - if "Description" in user_params and user_params["Description"] != current_params["Description"]: - return True - if ( - "ExecutionProperty" in user_params - and user_params["ExecutionProperty"]["MaxConcurrentRuns"] - != current_params["ExecutionProperty"]["MaxConcurrentRuns"] - ): - return True - if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: - return True - if "MaxRetries" in user_params and user_params["MaxRetries"] != current_params["MaxRetries"]: - return True - if "Role" in user_params and user_params["Role"] != current_params["Role"]: - return True - if "Timeout" in user_params and user_params["Timeout"] != current_params["Timeout"]: - return True - if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]: - return True - if "WorkerType" in user_params and user_params["WorkerType"] != current_params["WorkerType"]: - return True - if "NumberOfWorkers" in user_params and user_params["NumberOfWorkers"] != current_params["NumberOfWorkers"]: - return True - - return False - - -def ensure_tags(connection, module, glue_job): - changed = False - - if module.params.get("tags") is None: - return False - - account_id, partition = get_aws_account_info(module) - arn = f"arn:{partition}:glue:{module.region}:{account_id}:job/{module.params.get('name')}" - - try: - existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if module.check_mode: - existing_tags = {} - else: - module.fail_json_aws(e, msg=f"Unable to get tags for Glue job {module.params.get('name')}") - - tags_to_add, tags_to_remove = compare_aws_tags( - existing_tags, module.params.get("tags"), module.params.get("purge_tags") - ) - - if tags_to_remove: - changed = True - if not module.check_mode: - try: - connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") - - if tags_to_add: - changed = True - if not module.check_mode: - try: - connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") - - return changed - - -def create_or_update_glue_job(connection, module, glue_job): - """ - Create or update an AWS Glue job - - :param connection: AWS boto3 glue connection - :param module: Ansible module - :param glue_job: a dict of AWS Glue job parameters or None - :return: - """ - - changed = False - params = dict() - params["Name"] = module.params.get("name") - params["Role"] = module.params.get("role") - if module.params.get("allocated_capacity") is not None: - params["AllocatedCapacity"] = module.params.get("allocated_capacity") - if module.params.get("command_script_location") is not None: - params["Command"] = { - "Name": module.params.get("command_name"), - "ScriptLocation": module.params.get("command_script_location"), - } - if module.params.get("command_python_version") is not None: - params["Command"]["PythonVersion"] = module.params.get("command_python_version") - if module.params.get("connections") is not None: - params["Connections"] = {"Connections": module.params.get("connections")} - if module.params.get("default_arguments") is not None: - params["DefaultArguments"] = module.params.get("default_arguments") - if module.params.get("description") is not None: - params["Description"] = module.params.get("description") - if module.params.get("glue_version") is not None: - params["GlueVersion"] = module.params.get("glue_version") - if module.params.get("max_concurrent_runs") is not None: - params["ExecutionProperty"] = {"MaxConcurrentRuns": module.params.get("max_concurrent_runs")} - if module.params.get("max_retries") is not None: - params["MaxRetries"] = module.params.get("max_retries") - if module.params.get("timeout") is not None: - params["Timeout"] = module.params.get("timeout") - if module.params.get("glue_version") is not None: - params["GlueVersion"] = module.params.get("glue_version") - if module.params.get("worker_type") is not None: - params["WorkerType"] = module.params.get("worker_type") - if module.params.get("number_of_workers") is not None: - params["NumberOfWorkers"] = module.params.get("number_of_workers") - - # If glue_job is not None then check if it needs to be modified, else create it - if glue_job: - if _compare_glue_job_params(params, glue_job): - try: - # Update job needs slightly modified params - update_params = {"JobName": params["Name"], "JobUpdate": copy.deepcopy(params)} - del update_params["JobUpdate"]["Name"] - if not module.check_mode: - connection.update_job(aws_retry=True, **update_params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - else: - try: - if not module.check_mode: - connection.create_job(aws_retry=True, **params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - glue_job = _get_glue_job(connection, module, params["Name"]) - - changed |= ensure_tags(connection, module, glue_job) - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=["DefaultArguments"])) - - -def delete_glue_job(connection, module, glue_job): - """ - Delete an AWS Glue job - - :param connection: AWS boto3 glue connection - :param module: Ansible module - :param glue_job: a dict of AWS Glue job parameters or None - :return: - """ - changed = False - - if glue_job: - try: - if not module.check_mode: - connection.delete_job(aws_retry=True, JobName=glue_job["Name"]) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed) - - -def main(): - argument_spec = dict( - allocated_capacity=dict(type="int"), - command_name=dict(type="str", default="glueetl"), - command_python_version=dict(type="str"), - command_script_location=dict(type="str"), - connections=dict(type="list", elements="str"), - default_arguments=dict(type="dict"), - description=dict(type="str"), - glue_version=dict(type="str"), - max_concurrent_runs=dict(type="int"), - max_retries=dict(type="int"), - name=dict(required=True, type="str"), - number_of_workers=dict(type="int"), - purge_tags=dict(type="bool", default=True), - role=dict(type="str"), - state=dict(required=True, choices=["present", "absent"], type="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - timeout=dict(type="int"), - worker_type=dict(choices=["Standard", "G.1X", "G.2X"], type="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[("state", "present", ["role", "command_script_location"])], - supports_check_mode=True, - ) - - retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client("glue", retry_decorator=retry_decorator) - - state = module.params.get("state") - - glue_job = _get_glue_job(connection, module, module.params.get("name")) - - if state == "present": - create_or_update_glue_job(connection, module, glue_job) - else: - delete_glue_job(connection, module, glue_job) - - -if __name__ == "__main__": - main() diff --git a/iam_access_key.py b/iam_access_key.py deleted file mode 100644 index ae3e9e7dd11..00000000000 --- a/iam_access_key.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_access_key -version_added: 2.1.0 -short_description: Manage AWS IAM User access keys -description: - - Manage AWS IAM user access keys. -author: - - Mark Chappell (@tremble) -options: - user_name: - description: - - The name of the IAM User to which the key belongs. - required: true - type: str - aliases: ['username'] - id: - description: - - The ID of the access key. - - Required when I(state=absent). - - Mutually exclusive with I(rotate_keys). - required: false - type: str - state: - description: - - Create or remove the access key. - - When I(state=present) and I(id) is not defined a new key will be created. - required: false - type: str - default: 'present' - choices: [ 'present', 'absent' ] - active: - description: - - Whether the key should be enabled or disabled. - - Defaults to C(true) when creating a new key. - required: false - type: bool - aliases: ['enabled'] - rotate_keys: - description: - - When there are already 2 access keys attached to the IAM user the oldest - key will be removed and a new key created. - - Ignored if I(state=absent) - - Mutually exclusive with I(id). - required: false - type: bool - default: false - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a new access key - community.aws.iam_access_key: - user_name: example_user - state: present - -- name: Delete the access_key - community.aws.iam_access_key: - user_name: example_user - id: AKIA1EXAMPLE1EXAMPLE - state: absent -""" - -RETURN = r""" -access_key: - description: A dictionary containing all the access key information. - returned: When the key exists. - type: complex - contains: - access_key_id: - description: The ID for the access key. - returned: success - type: str - sample: AKIA1EXAMPLE1EXAMPLE - create_date: - description: The date and time, in ISO 8601 date-time format, when the access key was created. - returned: success - type: str - sample: "2021-10-09T13:25:42+00:00" - user_name: - description: The name of the IAM user to which the key is attached. - returned: success - type: str - sample: example_user - status: - description: - - The status of the key. - - C(Active) means it can be used. - - C(Inactive) means it can not be used. - returned: success - type: str - sample: Inactive -secret_access_key: - description: - - The secret access key. - - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data. - - Secret access keys can only be accessed at creation time. - returned: When a new key is created. - type: str - sample: example/Example+EXAMPLE+example/Example -deleted_access_key_id: - description: - - The access key deleted during rotation. - returned: When a key was deleted during the rotation of access keys - type: str - sample: AKIA1EXAMPLE1EXAMPLE -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def delete_access_key(access_keys, user, access_key_id): - if not access_key_id: - return False - - if access_key_id not in access_keys: - return False - - if module.check_mode: - return True - - try: - client.delete_access_key( - aws_retry=True, - UserName=user, - AccessKeyId=access_key_id, - ) - except is_boto3_error_code("NoSuchEntityException"): - # Generally occurs when race conditions have happened and someone - # deleted the key while we were checking to see if it existed. - return False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f'Failed to delete access key "{access_key_id}" for user "{user}"') - - return True - - -def update_access_key(access_keys, user, access_key_id, enabled): - if access_key_id not in access_keys: - module.fail_json( - msg=f'Access key "{access_key_id}" not found attached to User "{user}"', - ) - - changes = dict() - access_key = access_keys.get(access_key_id) - - if enabled is not None: - desired_status = "Active" if enabled else "Inactive" - if access_key.get("status") != desired_status: - changes["Status"] = desired_status - - if not changes: - return False - - if module.check_mode: - return True - - try: - client.update_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id, **changes) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - changes=changes, - msg=f'Failed to update access key "{access_key_id}" for user "{user}"', - ) - return True - - -def create_access_key(access_keys, user, rotate_keys, enabled): - changed = False - oldest_key = False - - if len(access_keys) > 1 and rotate_keys: - sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get("create_date", None)) - oldest_key = sorted_keys[0] - changed |= delete_access_key(access_keys, user, oldest_key) - - if module.check_mode: - if changed: - return dict(deleted_access_key=oldest_key) - return True - - try: - results = client.create_access_key(aws_retry=True, UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f'Failed to create access key for user "{user}"') - results = camel_dict_to_snake_dict(results) - access_key = results.get("access_key") - access_key = normalize_boto3_result(access_key) - - # Update settings which can't be managed on creation - if enabled is False: - access_key_id = access_key["access_key_id"] - access_keys = {access_key_id: access_key} - update_access_key(access_keys, user, access_key_id, enabled) - access_key["status"] = "Inactive" - - if oldest_key: - access_key["deleted_access_key"] = oldest_key - - return access_key - - -def get_access_keys(user): - try: - results = client.list_access_keys(aws_retry=True, UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f'Failed to get access keys for user "{user}"') - if not results: - return None - - results = camel_dict_to_snake_dict(results) - access_keys = results.get("access_key_metadata", []) - if not access_keys: - return [] - - access_keys = normalize_boto3_result(access_keys) - access_keys = {k["access_key_id"]: k for k in access_keys} - return access_keys - - -def main(): - global module - global client - - argument_spec = dict( - user_name=dict(required=True, type="str", aliases=["username"]), - id=dict(required=False, type="str"), - state=dict(required=False, choices=["present", "absent"], default="present"), - active=dict(required=False, type="bool", aliases=["enabled"]), - rotate_keys=dict(required=False, type="bool", default=False), - ) - - required_if = [ - ["state", "absent", ("id",)], - ] - mutually_exclusive = [ - ["rotate_keys", "id"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - - changed = False - state = module.params.get("state") - user = module.params.get("user_name") - access_key_id = module.params.get("id") - rotate_keys = module.params.get("rotate_keys") - enabled = module.params.get("active") - - access_keys = get_access_keys(user) - results = dict() - - if state == "absent": - changed |= delete_access_key(access_keys, user, access_key_id) - else: - # If we have an ID then we should try to update it - if access_key_id: - changed |= update_access_key(access_keys, user, access_key_id, enabled) - access_keys = get_access_keys(user) - results["access_key"] = access_keys.get(access_key_id, None) - # Otherwise we try to create a new one - else: - secret_key = create_access_key(access_keys, user, rotate_keys, enabled) - if isinstance(secret_key, bool): - changed |= secret_key - else: - changed = True - results["access_key_id"] = secret_key.get("access_key_id", None) - results["secret_access_key"] = secret_key.pop("secret_access_key", None) - results["deleted_access_key_id"] = secret_key.pop("deleted_access_key", None) - if secret_key: - results["access_key"] = secret_key - results = scrub_none_parameters(results) - - module.exit_json(changed=changed, **results) - - -if __name__ == "__main__": - main() diff --git a/iam_access_key_info.py b/iam_access_key_info.py deleted file mode 100644 index 0ea8b514122..00000000000 --- a/iam_access_key_info.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_access_key_info -version_added: 2.1.0 -short_description: fetch information about AWS IAM User access keys -description: - - 'Fetches information AWS IAM user access keys.' - - 'Note: It is not possible to fetch the secret access key.' -author: - - Mark Chappell (@tremble) -options: - user_name: - description: - - The name of the IAM User to which the keys belong. - required: true - type: str - aliases: ['username'] - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Fetch Access keys for a user - community.aws.iam_access_key_info: - user_name: example_user -""" - -RETURN = r""" -access_key: - description: A dictionary containing all the access key information. - returned: When the key exists. - type: list - elements: dict - contains: - access_key_id: - description: The ID for the access key. - returned: success - type: str - sample: AKIA1EXAMPLE1EXAMPLE - create_date: - description: The date and time, in ISO 8601 date-time format, when the access key was created. - returned: success - type: str - sample: "2021-10-09T13:25:42+00:00" - user_name: - description: The name of the IAM user to which the key is attached. - returned: success - type: str - sample: example_user - status: - description: - - The status of the key. - - C(Active) means it can be used. - - C(Inactive) means it can not be used. - returned: success - type: str - sample: Inactive -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_access_keys(user): - try: - results = client.list_access_keys(aws_retry=True, UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f'Failed to get access keys for user "{user}"') - if not results: - return None - - results = camel_dict_to_snake_dict(results) - access_keys = results.get("access_key_metadata", []) - if not access_keys: - return [] - - access_keys = normalize_boto3_result(access_keys) - access_keys = sorted(access_keys, key=lambda d: d.get("create_date", None)) - return access_keys - - -def main(): - global module - global client - - argument_spec = dict( - user_name=dict(required=True, type="str", aliases=["username"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - - changed = False - user = module.params.get("user_name") - access_keys = get_access_keys(user) - - module.exit_json(changed=changed, access_keys=access_keys) - - -if __name__ == "__main__": - main() diff --git a/iam_group.py b/iam_group.py deleted file mode 100644 index f88ebac120d..00000000000 --- a/iam_group.py +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_group -version_added: 1.0.0 -short_description: Manage AWS IAM groups -description: - - Manage AWS IAM groups. -author: - - Nick Aslanidis (@naslanidis) - - Maksym Postument (@infectsoldier) -options: - name: - description: - - The name of the group to create. - required: true - type: str - managed_policies: - description: - - A list of managed policy ARNs or friendly names to attach to the role. - - To embed an inline policy, use M(community.aws.iam_policy). - required: false - type: list - elements: str - default: [] - aliases: ['managed_policy'] - users: - description: - - A list of existing users to add as members of the group. - required: false - type: list - elements: str - default: [] - state: - description: - - Create or remove the IAM group. - required: true - choices: [ 'present', 'absent' ] - type: str - purge_policies: - description: - - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. - required: false - default: false - type: bool - aliases: ['purge_policy', 'purge_managed_policies'] - purge_users: - description: - - When I(purge_users=true) users which are not included in I(users) will be detached. - required: false - default: false - type: bool -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a group - community.aws.iam_group: - name: testgroup1 - state: present - -- name: Create a group and attach a managed policy using its ARN - community.aws.iam_group: - name: testgroup1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - state: present - -- name: Create a group with users as members and attach a managed policy using its ARN - community.aws.iam_group: - name: testgroup1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - users: - - test_user1 - - test_user2 - state: present - -- name: Remove all managed policies from an existing group with an empty list - community.aws.iam_group: - name: testgroup1 - state: present - purge_policies: true - -- name: Remove all group members from an existing group - community.aws.iam_group: - name: testgroup1 - managed_policies: - - arn:aws:iam::aws:policy/AmazonSNSFullAccess - purge_users: true - state: present - -- name: Delete the group - community.aws.iam_group: - name: testgroup1 - state: absent - -""" -RETURN = r""" -iam_group: - description: dictionary containing all the group information including group membership - returned: success - type: complex - contains: - group: - description: dictionary containing all the group information - returned: success - type: complex - contains: - arn: - description: the Amazon Resource Name (ARN) specifying the group - type: str - sample: "arn:aws:iam::1234567890:group/testgroup1" - create_date: - description: the date and time, in ISO 8601 date-time format, when the group was created - type: str - sample: "2017-02-08T04:36:28+00:00" - group_id: - description: the stable and unique string identifying the group - type: str - sample: AGPA12345EXAMPLE54321 - group_name: - description: the friendly name that identifies the group - type: str - sample: testgroup1 - path: - description: the path to the group - type: str - sample: / - users: - description: list containing all the group members - returned: success - type: complex - contains: - arn: - description: the Amazon Resource Name (ARN) specifying the user - type: str - sample: "arn:aws:iam::1234567890:user/test_user1" - create_date: - description: the date and time, in ISO 8601 date-time format, when the user was created - type: str - sample: "2017-02-08T04:36:28+00:00" - user_id: - description: the stable and unique string identifying the user - type: str - sample: AIDA12345EXAMPLE54321 - user_name: - description: the friendly name that identifies the user - type: str - sample: testgroup1 - path: - description: the path to the user - type: str - sample: / -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def compare_attached_group_policies(current_attached_policies, new_attached_policies): - # If new_attached_policies is None it means we want to remove all policies - if len(current_attached_policies) > 0 and new_attached_policies is None: - return False - - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy["PolicyArn"]) - - if set(current_attached_policies_arn_list) == set(new_attached_policies): - return True - else: - return False - - -def compare_group_members(current_group_members, new_group_members): - # If new_attached_policies is None it means we want to remove all policies - if len(current_group_members) > 0 and new_group_members is None: - return False - if set(current_group_members) == set(new_group_members): - return True - else: - return False - - -def convert_friendly_names_to_arns(connection, module, policy_names): - if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): - return policy_names - allpolicies = {} - paginator = connection.get_paginator("list_policies") - policies = paginator.paginate().build_full_result()["Policies"] - - for policy in policies: - allpolicies[policy["PolicyName"]] = policy["Arn"] - allpolicies[policy["Arn"]] = policy["Arn"] - try: - return [allpolicies[policy] for policy in policy_names if policy is not None] - except KeyError as e: - module.fail_json(msg="Couldn't find policy: " + str(e)) - - -def create_or_update_group(connection, module): - params = dict() - params["GroupName"] = module.params.get("name") - managed_policies = module.params.get("managed_policies") - users = module.params.get("users") - purge_users = module.params.get("purge_users") - purge_policies = module.params.get("purge_policies") - changed = False - if managed_policies: - managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) - - # Get group - try: - group = get_group(connection, module, params["GroupName"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get group") - - # If group is None, create it - if group is None: - # Check mode means we would create the group - if module.check_mode: - module.exit_json(changed=True) - - try: - group = connection.create_group(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create group") - - # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params["GroupName"]) - if not compare_attached_group_policies(current_attached_policies, managed_policies): - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy["PolicyArn"]) - - # If managed_policies has a single empty element we want to remove all attached policies - if purge_policies: - # Detach policies not present - for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): - changed = True - if not module.check_mode: - try: - connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't detach policy from group {params['GroupName']}") - # If there are policies to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(managed_policies) - set(current_attached_policies_arn_list): - changed = True - # If there are policies in managed_policies attach each policy - if managed_policies != [None] and not module.check_mode: - for policy_arn in managed_policies: - try: - connection.attach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't attach policy to group {params['GroupName']}") - - # Manage group memberships - try: - current_group_members = get_group(connection, module, params["GroupName"])["Users"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") - - current_group_members_list = [] - for member in current_group_members: - current_group_members_list.append(member["UserName"]) - - if not compare_group_members(current_group_members_list, users): - if purge_users: - for user in list(set(current_group_members_list) - set(users)): - # Ensure we mark things have changed if any user gets purged - changed = True - # Skip actions for check mode - if not module.check_mode: - try: - connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't remove user {user} from group {params['GroupName']}") - # If there are users to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(users) - set(current_group_members_list): - changed = True - # Skip actions for check mode - if users != [None] and not module.check_mode: - for user in users: - try: - connection.add_user_to_group(GroupName=params["GroupName"], UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't add user {user} to group {params['GroupName']}") - if module.check_mode: - module.exit_json(changed=changed) - - # Get the group again - try: - group = get_group(connection, module, params["GroupName"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") - - module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) - - -def destroy_group(connection, module): - params = dict() - params["GroupName"] = module.params.get("name") - - try: - group = get_group(connection, module, params["GroupName"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") - if group: - # Check mode means we would remove this group - if module.check_mode: - module.exit_json(changed=True) - - # Remove any attached policies otherwise deletion fails - try: - for policy in get_attached_policy_list(connection, module, params["GroupName"]): - connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy["PolicyArn"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't remove policy from group {params['GroupName']}") - - # Remove any users in the group otherwise deletion fails - current_group_members_list = [] - try: - current_group_members = get_group(connection, module, params["GroupName"])["Users"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") - for member in current_group_members: - current_group_members_list.append(member["UserName"]) - for user in current_group_members_list: - try: - connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't remove user {user} from group {params['GroupName']}") - - try: - connection.delete_group(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't delete group {params['GroupName']}") - - else: - module.exit_json(changed=False) - - module.exit_json(changed=True) - - -@AWSRetry.exponential_backoff() -def get_group(connection, module, name): - try: - paginator = connection.get_paginator("get_group") - return paginator.paginate(GroupName=name).build_full_result() - except is_boto3_error_code("NoSuchEntity"): - return None - - -@AWSRetry.exponential_backoff() -def get_attached_policy_list(connection, module, name): - try: - paginator = connection.get_paginator("list_attached_group_policies") - return paginator.paginate(GroupName=name).build_full_result()["AttachedPolicies"] - except is_boto3_error_code("NoSuchEntity"): - return None - - -def main(): - argument_spec = dict( - name=dict(required=True), - managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), - users=dict(default=[], type="list", elements="str"), - state=dict(choices=["present", "absent"], required=True), - purge_users=dict(default=False, type="bool"), - purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - connection = module.client("iam") - - state = module.params.get("state") - - if state == "present": - create_or_update_group(connection, module) - else: - destroy_group(connection, module) - - -if __name__ == "__main__": - main() diff --git a/iam_managed_policy.py b/iam_managed_policy.py deleted file mode 100644 index cc7fd8450e5..00000000000 --- a/iam_managed_policy.py +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_managed_policy -version_added: 1.0.0 -short_description: Manage User Managed IAM policies -description: - - Allows creating and removing managed IAM policies -options: - policy_name: - description: - - The name of the managed policy. - required: True - type: str - policy_description: - description: - - A helpful description of this policy, this value is immutable and only set when creating a new policy. - default: '' - type: str - policy: - description: - - A properly json formatted policy - type: json - make_default: - description: - - Make this revision the default revision. - default: True - type: bool - only_version: - description: - - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted. - type: bool - default: false - state: - description: - - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found. - default: present - choices: [ "present", "absent" ] - type: str - -author: - - "Dan Kozlowski (@dkhenry)" -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create a policy -- name: Create IAM Managed Policy - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy_description: "A Helpful managed policy" - policy: "{{ lookup('template', 'managed_policy.json.j2') }}" - state: present - -# Update a policy with a new default version -- name: Update an IAM Managed Policy with new default version - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy: "{{ lookup('file', 'managed_policy_update.json') }}" - state: present - -# Update a policy with a new non default version -- name: Update an IAM Managed Policy with a non default version - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy: - Version: "2012-10-17" - Statement: - - Effect: "Allow" - Action: "logs:CreateLogGroup" - Resource: "*" - make_default: false - state: present - -# Update a policy and make it the only version and the default version -- name: Update an IAM Managed Policy with default version as the only version - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - policy: | - { - "Version": "2012-10-17", - "Statement":[{ - "Effect": "Allow", - "Action": "logs:PutRetentionPolicy", - "Resource": "*" - }] - } - only_version: true - state: present - -# Remove a policy -- name: Remove an existing IAM Managed Policy - community.aws.iam_managed_policy: - policy_name: "ManagedPolicy" - state: absent -""" - -RETURN = r""" -policy: - description: Returns the policy json structure, when state == absent this will return the value of the removed policy. - returned: success - type: complex - contains: {} - sample: '{ - "arn": "arn:aws:iam::aws:policy/AdministratorAccess " - "attachment_count": 0, - "create_date": "2017-03-01T15:42:55.981000+00:00", - "default_version_id": "v1", - "is_attachable": true, - "path": "/", - "policy_id": "ANPA1245EXAMPLE54321", - "policy_name": "AdministratorAccess", - "update_date": "2017-03-01T15:42:55.981000+00:00" - }' -""" - -import json - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def list_policies_with_backoff(): - paginator = client.get_paginator("list_policies") - return paginator.paginate(Scope="Local").build_full_result() - - -def get_policy_by_name(name): - try: - response = list_policies_with_backoff() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policies") - for policy in response["Policies"]: - if policy["PolicyName"] == name: - return policy - return None - - -def delete_oldest_non_default_version(policy): - try: - versions = [ - v for v in client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] if not v["IsDefaultVersion"] - ] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - versions.sort(key=lambda v: v["CreateDate"], reverse=True) - for v in versions[-1:]: - try: - client.delete_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy version") - - -# This needs to return policy_version, changed -def get_or_create_policy_version(policy, policy_document): - try: - versions = client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - - for v in versions: - try: - document = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"])["PolicyVersion"][ - "Document" - ] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't get policy version {v['VersionId']}") - - if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): - return v, True - - # If the current policy matches the existing one - if not compare_policies(document, json.loads(to_native(policy_document))): - return v, False - - # No existing version so create one - # There is a service limit (typically 5) of policy versions. - # - # Rather than assume that it is 5, we'll try to create the policy - # and if that doesn't work, delete the oldest non default policy version - # and try again. - try: - version = client.create_policy_version(PolicyArn=policy["Arn"], PolicyDocument=policy_document)["PolicyVersion"] - return version, True - except is_boto3_error_code("LimitExceeded"): - delete_oldest_non_default_version(policy) - try: - version = client.create_policy_version(PolicyArn=policy["Arn"], PolicyDocument=policy_document)[ - "PolicyVersion" - ] - return version, True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: - module.fail_json_aws(second_e, msg="Couldn't create policy version") - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create policy version") - - -def set_if_default(policy, policy_version, is_default): - if is_default and not policy_version["IsDefaultVersion"]: - try: - client.set_default_policy_version(PolicyArn=policy["Arn"], VersionId=policy_version["VersionId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't set default policy version") - return True - return False - - -def set_if_only(policy, policy_version, is_only): - if is_only: - try: - versions = [ - v for v in client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"] if not v["IsDefaultVersion"] - ] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - for v in versions: - try: - client.delete_policy_version(PolicyArn=policy["Arn"], VersionId=v["VersionId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy version") - return len(versions) > 0 - return False - - -def detach_all_entities(policy, **kwargs): - try: - entities = client.list_entities_for_policy(PolicyArn=policy["Arn"], **kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't detach list entities for policy {policy['PolicyName']}") - - for g in entities["PolicyGroups"]: - try: - client.detach_group_policy(PolicyArn=policy["Arn"], GroupName=g["GroupName"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't detach group policy {g['GroupName']}") - for u in entities["PolicyUsers"]: - try: - client.detach_user_policy(PolicyArn=policy["Arn"], UserName=u["UserName"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't detach user policy {u['UserName']}") - for r in entities["PolicyRoles"]: - try: - client.detach_role_policy(PolicyArn=policy["Arn"], RoleName=r["RoleName"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't detach role policy {r['RoleName']}") - if entities["IsTruncated"]: - detach_all_entities(policy, marker=entities["Marker"]) - - -def create_or_update_policy(existing_policy): - name = module.params.get("policy_name") - description = module.params.get("policy_description") - default = module.params.get("make_default") - only = module.params.get("only_version") - - policy = None - - if module.params.get("policy") is not None: - policy = json.dumps(json.loads(module.params.get("policy"))) - - if existing_policy is None: - if module.check_mode: - module.exit_json(changed=True) - - # Create policy when none already exists - try: - rvalue = client.create_policy(PolicyName=name, Path="/", PolicyDocument=policy, Description=description) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't create policy {name}") - - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue["Policy"])) - else: - policy_version, changed = get_or_create_policy_version(existing_policy, policy) - changed = set_if_default(existing_policy, policy_version, default) or changed - changed = set_if_only(existing_policy, policy_version, only) or changed - - # If anything has changed we need to refresh the policy - if changed: - try: - updated_policy = client.get_policy(PolicyArn=existing_policy["Arn"])["Policy"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Couldn't get policy") - - module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(updated_policy)) - else: - module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(existing_policy)) - - -def delete_policy(existing_policy): - # Check for existing policy - if existing_policy: - if module.check_mode: - module.exit_json(changed=True) - - # Detach policy - detach_all_entities(existing_policy) - # Delete Versions - try: - versions = client.list_policy_versions(PolicyArn=existing_policy["Arn"])["Versions"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - for v in versions: - if not v["IsDefaultVersion"]: - try: - client.delete_policy_version(PolicyArn=existing_policy["Arn"], VersionId=v["VersionId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete policy version {v['VersionId']}") - # Delete policy - try: - client.delete_policy(PolicyArn=existing_policy["Arn"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete policy {existing_policy['PolicyName']}") - - # This is the one case where we will return the old policy - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) - else: - module.exit_json(changed=False, policy=None) - - -def main(): - global module - global client - - argument_spec = dict( - policy_name=dict(required=True), - policy_description=dict(default=""), - policy=dict(type="json"), - make_default=dict(type="bool", default=True), - only_version=dict(type="bool", default=False), - state=dict(default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[["state", "present", ["policy"]]], - supports_check_mode=True, - ) - - name = module.params.get("policy_name") - state = module.params.get("state") - - try: - client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - existing_policy = get_policy_by_name(name) - - if state == "present": - create_or_update_policy(existing_policy) - else: - delete_policy(existing_policy) - - -if __name__ == "__main__": - main() diff --git a/iam_mfa_device_info.py b/iam_mfa_device_info.py deleted file mode 100644 index 44b38ab90f4..00000000000 --- a/iam_mfa_device_info.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_mfa_device_info -version_added: 1.0.0 -short_description: List the MFA (Multi-Factor Authentication) devices registered for a user -description: - - List the MFA (Multi-Factor Authentication) devices registered for a user -author: - - Victor Costan (@pwnall) -options: - user_name: - description: - - The name of the user whose MFA devices will be listed - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -mfa_devices: - description: The MFA devices registered for the given user - returned: always - type: list - sample: - - enable_date: "2016-03-11T23:25:36+00:00" - serial_number: arn:aws:iam::123456789012:mfa/example - user_name: example - - enable_date: "2016-03-11T23:25:37+00:00" - serial_number: arn:aws:iam::123456789012:mfa/example - user_name: example -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html -- name: List MFA devices - community.aws.iam_mfa_device_info: - register: mfa_devices - -# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html -- name: Assume an existing role - community.aws.sts_assume_role: - mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" - role_arn: "arn:aws:iam::123456789012:role/someRole" - role_session_name: "someRoleSession" - register: assumed_role -""" - -try: - import botocore - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def list_mfa_devices(connection, module): - user_name = module.params.get("user_name") - changed = False - - args = {} - if user_name is not None: - args["UserName"] = user_name - try: - response = connection.list_mfa_devices(**args) - except ClientError as e: - module.fail_json_aws(e, msg="Failed to list MFA devices") - - module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) - - -def main(): - argument_spec = dict( - user_name=dict(required=False, default=None), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - connection = module.client("iam") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - list_mfa_devices(connection, module) - - -if __name__ == "__main__": - main() diff --git a/iam_password_policy.py b/iam_password_policy.py deleted file mode 100644 index 5c65f7ebaec..00000000000 --- a/iam_password_policy.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_password_policy -version_added: 1.0.0 -short_description: Update an IAM Password Policy -description: - - Module updates an IAM Password Policy on a given AWS account -author: - - "Aaron Smith (@slapula)" -options: - state: - description: - - Specifies the overall state of the password policy. - required: true - choices: ['present', 'absent'] - type: str - min_pw_length: - description: - - Minimum password length. - default: 6 - aliases: [minimum_password_length] - type: int - require_symbols: - description: - - Require symbols in password. - default: false - type: bool - require_numbers: - description: - - Require numbers in password. - default: false - type: bool - require_uppercase: - description: - - Require uppercase letters in password. - default: false - type: bool - require_lowercase: - description: - - Require lowercase letters in password. - default: false - type: bool - allow_pw_change: - description: - - Allow users to change their password. - default: false - type: bool - aliases: [allow_password_change] - pw_max_age: - description: - - Maximum age for a password in days. When this option is 0 then passwords - do not expire automatically. - default: 0 - aliases: [password_max_age] - type: int - pw_reuse_prevent: - description: - - Prevent re-use of passwords. - default: 0 - aliases: [password_reuse_prevent, prevent_reuse] - type: int - pw_expire: - description: - - Prevents users from change an expired password. - default: false - type: bool - aliases: [password_expire, expire] -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Password policy for AWS account - community.aws.iam_password_policy: - state: present - min_pw_length: 8 - require_symbols: false - require_numbers: true - require_uppercase: true - require_lowercase: true - allow_pw_change: true - pw_max_age: 60 - pw_reuse_prevent: 5 - pw_expire: false -""" - -RETURN = r""" # """ - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class IAMConnection(object): - def __init__(self, module): - try: - self.connection = module.resource("iam") - self.module = module - except Exception as e: - module.fail_json(msg=f"Failed to connect to AWS: {str(e)}") - - def policy_to_dict(self, policy): - policy_attributes = [ - "allow_users_to_change_password", - "expire_passwords", - "hard_expiry", - "max_password_age", - "minimum_password_length", - "password_reuse_prevention", - "require_lowercase_characters", - "require_numbers", - "require_symbols", - "require_uppercase_characters", - ] - ret = {} - for attr in policy_attributes: - ret[attr] = getattr(policy, attr) - return ret - - def update_password_policy(self, module, policy): - min_pw_length = module.params.get("min_pw_length") - require_symbols = module.params.get("require_symbols") - require_numbers = module.params.get("require_numbers") - require_uppercase = module.params.get("require_uppercase") - require_lowercase = module.params.get("require_lowercase") - allow_pw_change = module.params.get("allow_pw_change") - pw_max_age = module.params.get("pw_max_age") - pw_reuse_prevent = module.params.get("pw_reuse_prevent") - pw_expire = module.params.get("pw_expire") - - update_parameters = dict( - MinimumPasswordLength=min_pw_length, - RequireSymbols=require_symbols, - RequireNumbers=require_numbers, - RequireUppercaseCharacters=require_uppercase, - RequireLowercaseCharacters=require_lowercase, - AllowUsersToChangePassword=allow_pw_change, - HardExpiry=pw_expire, - ) - if pw_reuse_prevent: - update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) - if pw_max_age: - update_parameters.update(MaxPasswordAge=pw_max_age) - - try: - original_policy = self.policy_to_dict(policy) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - original_policy = {} - - try: - results = policy.update(**update_parameters) - policy.reload() - updated_policy = self.policy_to_dict(policy) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") - - changed = original_policy != updated_policy - return (changed, updated_policy, camel_dict_to_snake_dict(results)) - - def delete_password_policy(self, policy): - try: - results = policy.delete() - except is_boto3_error_code("NoSuchEntity"): - self.module.exit_json(changed=False, task_status={"IAM": "Couldn't find IAM Password Policy"}) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") - return camel_dict_to_snake_dict(results) - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "state": dict(choices=["present", "absent"], required=True), - "min_pw_length": dict(type="int", aliases=["minimum_password_length"], default=6), - "require_symbols": dict(type="bool", default=False), - "require_numbers": dict(type="bool", default=False), - "require_uppercase": dict(type="bool", default=False), - "require_lowercase": dict(type="bool", default=False), - "allow_pw_change": dict(type="bool", aliases=["allow_password_change"], default=False), - "pw_max_age": dict(type="int", aliases=["password_max_age"], default=0), - "pw_reuse_prevent": dict(type="int", aliases=["password_reuse_prevent", "prevent_reuse"], default=0), - "pw_expire": dict(type="bool", aliases=["password_expire", "expire"], default=False), - }, - supports_check_mode=True, - ) - - resource = IAMConnection(module) - policy = resource.connection.AccountPasswordPolicy() - - state = module.params.get("state") - - if state == "present": - (changed, new_policy, update_result) = resource.update_password_policy(module, policy) - module.exit_json(changed=changed, task_status={"IAM": update_result}, policy=new_policy) - - if state == "absent": - delete_result = resource.delete_password_policy(policy) - module.exit_json(changed=True, task_status={"IAM": delete_result}) - - -if __name__ == "__main__": - main() diff --git a/iam_saml_federation.py b/iam_saml_federation.py deleted file mode 100644 index acaaa38fc37..00000000000 --- a/iam_saml_federation.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: iam_saml_federation -version_added: 1.0.0 -short_description: Maintain IAM SAML federation configuration. -description: - - Provides a mechanism to manage AWS IAM SAML Identity Federation providers (create/update/delete metadata). -options: - name: - description: - - The name of the provider to create. - required: true - type: str - saml_metadata_document: - description: - - The XML document generated by an identity provider (IdP) that supports SAML 2.0. - type: str - state: - description: - - Whether to create or delete identity provider. If 'present' is specified it will attempt to update the identity provider matching the name field. - default: present - choices: [ "present", "absent" ] - type: str - -author: - - Tony (@axc450) - - Aidan Rowe (@aidan-) - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. -# It is assumed that their matching environment variables are set. -# Creates a new iam saml identity provider if not present -- name: saml provider - community.aws.iam_saml_federation: - name: example1 - # the > below opens an indented block, so no escaping/quoting is needed when in the indentation level under this key - saml_metadata_document: > - ... - >> import boto3 - >>> iam = boto3.client('iam') - >>> name = "server-cert-name" - >>> results = get_server_certs(iam, name) - { - "upload_date": "2015-04-25T00:36:40+00:00", - "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO", - "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----", - "server_certificate_name": "server-cert-name", - "expiration": "2017-06-15T12:00:00+00:00", - "path": "/", - "arn": "arn:aws:iam::123456789012:server-certificate/server-cert-name" - } - """ - results = dict() - try: - if name: - server_certs = [iam.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]] - else: - server_certs = iam.list_server_certificates()["ServerCertificateMetadataList"] - - for server_cert in server_certs: - if not name: - server_cert = iam.get_server_certificate(ServerCertificateName=server_cert["ServerCertificateName"])[ - "ServerCertificate" - ] - cert_md = server_cert["ServerCertificateMetadata"] - results[cert_md["ServerCertificateName"]] = { - "certificate_body": server_cert["CertificateBody"], - "server_certificate_id": cert_md["ServerCertificateId"], - "server_certificate_name": cert_md["ServerCertificateName"], - "arn": cert_md["Arn"], - "path": cert_md["Path"], - "expiration": cert_md["Expiration"].isoformat(), - "upload_date": cert_md["UploadDate"].isoformat(), - } - - except botocore.exceptions.ClientError: - pass - - return results - - -def main(): - argument_spec = dict( - name=dict(type="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - iam = module.client("iam") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - cert_name = module.params.get("name") - results = get_server_certs(iam, cert_name) - module.exit_json(results=results) - - -if __name__ == "__main__": - main() diff --git a/inspector_target.py b/inspector_target.py deleted file mode 100644 index f9ec6d53a84..00000000000 --- a/inspector_target.py +++ /dev/null @@ -1,251 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2018 Dennis Conrad for Sainsbury's -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: inspector_target -version_added: 1.0.0 -short_description: Create, Update and Delete Amazon Inspector Assessment Targets -description: - - Creates, updates, or deletes Amazon Inspector Assessment Targets and manages - the required Resource Groups. - - Prior to release 5.0.0 this module was called C(community.aws.aws_inspector_target). - The usage did not change. -author: - - "Dennis Conrad (@dennisconrad)" -options: - name: - description: - - The user-defined name that identifies the assessment target. - - The name must be unique within the AWS account. - required: true - type: str - state: - description: - - The state of the assessment target. - choices: - - absent - - present - default: present - type: str - tags: - description: - - Tags of the EC2 instances to be added to the assessment target. - - Required if I(state=present). - type: dict -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create my_target Assessment Target - community.aws.inspector_target: - name: my_target - tags: - role: scan_target - -- name: Update Existing my_target Assessment Target with Additional Tags - community.aws.inspector_target: - name: my_target - tags: - env: dev - role: scan_target - -- name: Delete my_target Assessment Target - community.aws.inspector_target: - name: my_target - state: absent -""" - -RETURN = r""" -arn: - description: The ARN that specifies the Amazon Inspector assessment target. - returned: success - type: str - sample: "arn:aws:inspector:eu-west-1:123456789012:target/0-O4LnL7n1" -created_at: - description: The time at which the assessment target was created. - returned: success - type: str - sample: "2018-01-29T13:48:51.958000+00:00" -name: - description: The name of the Amazon Inspector assessment target. - returned: success - type: str - sample: "my_target" -resource_group_arn: - description: The ARN that specifies the resource group that is associated - with the assessment target. - returned: success - type: str - sample: "arn:aws:inspector:eu-west-1:123456789012:resourcegroup/0-qY4gDel8" -tags: - description: The tags of the resource group that is associated with the - assessment target. - returned: success - type: list - sample: {"role": "scan_target", "env": "dev"} -updated_at: - description: The time at which the assessment target was last updated. - returned: success - type: str - sample: "2018-01-29T13:48:51.958000+00:00" -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def main(): - argument_spec = dict( - name=dict(required=True), - state=dict(choices=["absent", "present"], default="present"), - tags=dict(type="dict"), - ) - - required_if = [["state", "present", ["tags"]]] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=False, - required_if=required_if, - ) - - name = module.params.get("name") - state = module.params.get("state").lower() - tags = module.params.get("tags") - if tags: - tags = ansible_dict_to_boto3_tag_list(tags, "key", "value") - - client = module.client("inspector") - - try: - existing_target_arn = client.list_assessment_targets( - filter={"assessmentTargetNamePattern": name}, - ).get( - "assessmentTargetArns" - )[0] - - existing_target = camel_dict_to_snake_dict( - client.describe_assessment_targets( - assessmentTargetArns=[existing_target_arn], - ).get( - "assessmentTargets" - )[0] - ) - - existing_resource_group_arn = existing_target.get("resource_group_arn") - existing_resource_group_tags = ( - client.describe_resource_groups( - resourceGroupArns=[existing_resource_group_arn], - ) - .get("resourceGroups")[0] - .get("tags") - ) - - target_exists = True - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, msg="trying to retrieve targets") - except IndexError: - target_exists = False - - if state == "present" and target_exists: - ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags) - ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(existing_resource_group_tags) - tags_to_add, tags_to_remove = compare_aws_tags(ansible_dict_tags, ansible_dict_existing_tags) - if not (tags_to_add or tags_to_remove): - existing_target.update({"tags": ansible_dict_existing_tags}) - module.exit_json(changed=False, **existing_target) - else: - try: - updated_resource_group_arn = client.create_resource_group( - resourceGroupTags=tags, - ).get("resourceGroupArn") - - client.update_assessment_target( - assessmentTargetArn=existing_target_arn, - assessmentTargetName=name, - resourceGroupArn=updated_resource_group_arn, - ) - - updated_target = camel_dict_to_snake_dict( - client.describe_assessment_targets( - assessmentTargetArns=[existing_target_arn], - ).get( - "assessmentTargets" - )[0] - ) - - updated_target.update({"tags": ansible_dict_tags}) - module.exit_json(changed=True, **updated_target) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, msg="trying to update target") - - elif state == "present" and not target_exists: - try: - new_resource_group_arn = client.create_resource_group( - resourceGroupTags=tags, - ).get("resourceGroupArn") - - new_target_arn = client.create_assessment_target( - assessmentTargetName=name, - resourceGroupArn=new_resource_group_arn, - ).get("assessmentTargetArn") - - new_target = camel_dict_to_snake_dict( - client.describe_assessment_targets( - assessmentTargetArns=[new_target_arn], - ).get( - "assessmentTargets" - )[0] - ) - - new_target.update({"tags": boto3_tag_list_to_ansible_dict(tags)}) - module.exit_json(changed=True, **new_target) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, msg="trying to create target") - - elif state == "absent" and target_exists: - try: - client.delete_assessment_target( - assessmentTargetArn=existing_target_arn, - ) - module.exit_json(changed=True) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, msg="trying to delete target") - - elif state == "absent" and not target_exists: - module.exit_json(changed=False) - - -if __name__ == "__main__": - main() diff --git a/kinesis_stream.py b/kinesis_stream.py deleted file mode 100644 index d1ba65c86b2..00000000000 --- a/kinesis_stream.py +++ /dev/null @@ -1,1144 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: kinesis_stream -version_added: 1.0.0 -short_description: Manage a Kinesis Stream. -description: - - Create or Delete a Kinesis Stream. - - Update the retention period of a Kinesis Stream. - - Update Tags on a Kinesis Stream. - - Enable/disable server side encryption on a Kinesis Stream. -author: - - Allen Sanabria (@linuxdynasty) -options: - name: - description: - - The name of the Kinesis Stream you are managing. - required: true - type: str - shards: - description: - - The number of shards you want to have with this stream. - - This is required when I(state=present) - type: int - retention_period: - description: - - The length of time (in hours) data records are accessible after they are added to - the stream. - - The default retention period is 24 hours and can not be less than 24 hours. - - The maximum retention period is 168 hours. - - The retention period can be modified during any point in time. - type: int - state: - description: - - Create or Delete the Kinesis Stream. - default: present - choices: [ 'present', 'absent' ] - type: str - wait: - description: - - Wait for operation to complete before returning. - default: true - type: bool - wait_timeout: - description: - - How many seconds to wait for an operation to complete before timing out. - default: 300 - type: int - tags: - description: - - "A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 })." - aliases: [ "resource_tags" ] - type: dict - encryption_state: - description: - - Enable or Disable encryption on the Kinesis Stream. - choices: [ 'enabled', 'disabled' ] - type: str - encryption_type: - description: - - The type of encryption. - - Defaults to C(KMS) - choices: ['KMS', 'NONE'] - type: str - key_id: - description: - - The GUID or alias for the KMS key. - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Basic creation example: -- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE - community.aws.kinesis_stream: - name: test-stream - shards: 10 - wait: true - wait_timeout: 600 - register: test_stream - -# Basic creation example with tags: -- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE - community.aws.kinesis_stream: - name: test-stream - shards: 10 - tags: - Env: development - wait: true - wait_timeout: 600 - register: test_stream - -# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours: -- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE - community.aws.kinesis_stream: - name: test-stream - retention_period: 48 - shards: 10 - tags: - Env: development - wait: true - wait_timeout: 600 - register: test_stream - -# Basic delete example: -- name: Delete Kinesis Stream test-stream and wait for it to finish deleting. - community.aws.kinesis_stream: - name: test-stream - state: absent - wait: true - wait_timeout: 600 - register: test_stream - -# Basic enable encryption example: -- name: Encrypt Kinesis Stream test-stream. - community.aws.kinesis_stream: - name: test-stream - state: present - shards: 1 - encryption_state: enabled - encryption_type: KMS - key_id: alias/aws/kinesis - wait: true - wait_timeout: 600 - register: test_stream - -# Basic disable encryption example: -- name: Encrypt Kinesis Stream test-stream. - community.aws.kinesis_stream: - name: test-stream - state: present - shards: 1 - encryption_state: disabled - encryption_type: KMS - key_id: alias/aws/kinesis - wait: true - wait_timeout: 600 - register: test_stream -""" - -RETURN = r""" -stream_name: - description: The name of the Kinesis Stream. - returned: when state == present. - type: str - sample: "test-stream" -stream_arn: - description: The amazon resource identifier - returned: when state == present. - type: str - sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream" -stream_status: - description: The current state of the Kinesis Stream. - returned: when state == present. - type: str - sample: "ACTIVE" -retention_period_hours: - description: Number of hours messages will be kept for a Kinesis Stream. - returned: when state == present. - type: int - sample: 24 -tags: - description: Dictionary containing all the tags associated with the Kinesis stream. - returned: when state == present. - type: dict - sample: { - "Name": "Splunk", - "Env": "development" - } -""" - -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_tags(client, stream_name): - """Retrieve the tags for a Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): Name of the Kinesis stream. - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >> get_tags(client, stream_name) - - Returns: - Tuple (bool, str, dict) - """ - err_msg = "" - success = False - params = { - "StreamName": stream_name, - } - results = dict() - try: - results = client.list_tags_for_stream(**params)["Tags"] - success = True - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - return success, err_msg, boto3_tag_list_to_ansible_dict(results) - - -def find_stream(client, stream_name): - """Retrieve a Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): Name of the Kinesis stream. - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - - Returns: - Tuple (bool, str, dict) - """ - err_msg = "" - success = False - params = { - "StreamName": stream_name, - } - results = dict() - has_more_shards = True - shards = list() - try: - while has_more_shards: - results = client.describe_stream(**params)["StreamDescription"] - shards.extend(results.pop("Shards")) - has_more_shards = results["HasMoreShards"] - if has_more_shards: - params["ExclusiveStartShardId"] = shards[-1]["ShardId"] - results["Shards"] = shards - num_closed_shards = len([s for s in shards if "EndingSequenceNumber" in s["SequenceNumberRange"]]) - results["OpenShardsCount"] = len(shards) - num_closed_shards - results["ClosedShardsCount"] = num_closed_shards - results["ShardsCount"] = len(shards) - success = True - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - return success, err_msg, results - - -def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=False): - """Wait for the status to change for a Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client - stream_name (str): The name of the kinesis stream. - status (str): The status to wait for. - examples. status=available, status=deleted - - Kwargs: - wait_timeout (int): Number of seconds to wait, until this timeout is reached. - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> wait_for_status(client, stream_name, 'ACTIVE', 300) - - Returns: - Tuple (bool, str, dict) - """ - polling_increment_secs = 5 - wait_timeout = time.time() + wait_timeout - status_achieved = False - stream = dict() - err_msg = "" - - while wait_timeout > time.time(): - try: - find_success, find_msg, stream = find_stream(client, stream_name) - if check_mode: - status_achieved = True - break - - elif status != "DELETING": - if find_success and stream: - if stream.get("StreamStatus") == status: - status_achieved = True - break - - else: - if not find_success: - status_achieved = True - break - - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - time.sleep(polling_increment_secs) - - if not status_achieved: - err_msg = "Wait time out reached, while waiting for results" - else: - err_msg = f"Status {status} achieved successfully" - - return status_achieved, err_msg, stream - - -def tags_action(client, stream_name, tags, action="create", check_mode=False): - """Create or delete multiple tags from a Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - resource_id (str): The Amazon resource id. - tags (list): List of dictionaries. - examples.. [{Name: "", Values: [""]}] - - Kwargs: - action (str): The action to perform. - valid actions == create and delete - default=create - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('ec2') - >>> resource_id = 'pcx-123345678' - >>> tags = {'env': 'development'} - >>> update_tags(client, resource_id, tags) - [True, ''] - - Returns: - List (bool, str) - """ - success = False - err_msg = "" - params = {"StreamName": stream_name} - try: - if not check_mode: - if action == "create": - params["Tags"] = tags - client.add_tags_to_stream(**params) - success = True - elif action == "delete": - params["TagKeys"] = tags - client.remove_tags_from_stream(**params) - success = True - else: - err_msg = f"Invalid action {action}" - else: - if action == "create": - success = True - elif action == "delete": - success = True - else: - err_msg = f"Invalid action {action}" - - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - return success, err_msg - - -def update_tags(client, stream_name, tags, check_mode=False): - """Update tags for an amazon resource. - Args: - resource_id (str): The Amazon resource id. - tags (dict): Dictionary of tags you want applied to the Kinesis stream. - - Kwargs: - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('ec2') - >>> stream_name = 'test-stream' - >>> tags = {'env': 'development'} - >>> update_tags(client, stream_name, tags) - [True, ''] - - Return: - Tuple (bool, str) - """ - success = False - changed = False - err_msg = "" - tag_success, tag_msg, current_tags = get_tags(client, stream_name) - - tags_to_set, tags_to_delete = compare_aws_tags( - current_tags, - tags, - purge_tags=True, - ) - if tags_to_delete: - delete_success, delete_msg = tags_action( - client, stream_name, tags_to_delete, action="delete", check_mode=check_mode - ) - if not delete_success: - return delete_success, changed, delete_msg - tag_msg = "Tags removed" - - if tags_to_set: - create_success, create_msg = tags_action( - client, stream_name, tags_to_set, action="create", check_mode=check_mode - ) - if create_success: - changed = True - return create_success, changed, create_msg - - return success, changed, err_msg - - -def stream_action(client, stream_name, shard_count=1, action="create", timeout=300, check_mode=False): - """Create or Delete an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - shard_count (int): Number of shards this stream will use. - action (str): The action to perform. - valid actions == create and delete - default=create - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> shard_count = 20 - >>> stream_action(client, stream_name, shard_count, action='create') - - Returns: - List (bool, str) - """ - success = False - err_msg = "" - params = {"StreamName": stream_name} - try: - if not check_mode: - if action == "create": - params["ShardCount"] = shard_count - client.create_stream(**params) - success = True - elif action == "delete": - client.delete_stream(**params) - success = True - else: - err_msg = f"Invalid action {action}" - else: - if action == "create": - success = True - elif action == "delete": - success = True - else: - err_msg = f"Invalid action {action}" - - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - return success, err_msg - - -def stream_encryption_action( - client, stream_name, action="start_encryption", encryption_type="", key_id="", timeout=300, check_mode=False -): - """Create, Encrypt or Delete an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - shard_count (int): Number of shards this stream will use. - action (str): The action to perform. - valid actions == create and delete - default=create - encryption_type (str): NONE or KMS - key_id (str): The GUID or alias for the KMS key - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> shard_count = 20 - >>> stream_action(client, stream_name, shard_count, action='create', encryption_type='KMS',key_id='alias/aws') - - Returns: - List (bool, str) - """ - success = False - err_msg = "" - params = {"StreamName": stream_name} - try: - if not check_mode: - if action == "start_encryption": - params["EncryptionType"] = encryption_type - params["KeyId"] = key_id - client.start_stream_encryption(**params) - success = True - elif action == "stop_encryption": - params["EncryptionType"] = encryption_type - params["KeyId"] = key_id - client.stop_stream_encryption(**params) - success = True - else: - err_msg = f"Invalid encryption action {action}" - else: - if action == "start_encryption": - success = True - elif action == "stop_encryption": - success = True - else: - err_msg = f"Invalid encryption action {action}" - - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - return success, err_msg - - -def retention_action(client, stream_name, retention_period=24, action="increase", check_mode=False): - """Increase or Decrease the retention of messages in the Kinesis stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - retention_period (int): This is how long messages will be kept before - they are discarded. This can not be less than 24 hours. - action (str): The action to perform. - valid actions == create and delete - default=create - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> retention_period = 48 - >>> retention_action(client, stream_name, retention_period, action='increase') - - Returns: - Tuple (bool, str) - """ - success = False - err_msg = "" - params = {"StreamName": stream_name} - try: - if not check_mode: - if action == "increase": - params["RetentionPeriodHours"] = retention_period - client.increase_stream_retention_period(**params) - success = True - err_msg = f"Retention Period increased successfully to {retention_period}" - elif action == "decrease": - params["RetentionPeriodHours"] = retention_period - client.decrease_stream_retention_period(**params) - success = True - err_msg = f"Retention Period decreased successfully to {retention_period}" - else: - err_msg = f"Invalid action {action}" - else: - if action == "increase": - success = True - elif action == "decrease": - success = True - else: - err_msg = f"Invalid action {action}" - - except botocore.exceptions.ClientError as e: - err_msg = to_native(e) - - return success, err_msg - - -def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False): - """Increase or Decrease the number of shards in the Kinesis stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - number_of_shards (int): Number of shards this stream will use. - default=1 - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> number_of_shards = 3 - >>> update_shard_count(client, stream_name, number_of_shards) - - Returns: - Tuple (bool, str) - """ - success = True - err_msg = "" - params = {"StreamName": stream_name, "ScalingType": "UNIFORM_SCALING"} - if not check_mode: - params["TargetShardCount"] = number_of_shards - try: - client.update_shard_count(**params) - except botocore.exceptions.ClientError as e: - return False, str(e) - - return success, err_msg - - -def update( - client, - current_stream, - stream_name, - number_of_shards=1, - retention_period=None, - tags=None, - wait=False, - wait_timeout=300, - check_mode=False, -): - """Update an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - number_of_shards (int): Number of shards this stream will use. - default=1 - retention_period (int): This is how long messages will be kept before - they are discarded. This can not be less than 24 hours. - tags (dict): The tags you want applied. - wait (bool): Wait until Stream is ACTIVE. - default=False - wait_timeout (int): How long to wait until this operation is considered failed. - default=300 - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> current_stream = { - 'ShardCount': 3, - 'HasMoreShards': True, - 'RetentionPeriodHours': 24, - 'StreamName': 'test-stream', - 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream', - 'StreamStatus': "ACTIVE' - } - >>> stream_name = 'test-stream' - >>> retention_period = 48 - >>> number_of_shards = 10 - >>> update(client, current_stream, stream_name, - number_of_shards, retention_period ) - - Returns: - Tuple (bool, bool, str) - """ - success = True - changed = False - err_msg = "" - if retention_period: - if wait: - wait_success, wait_msg, current_stream = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - if not wait_success: - return wait_success, False, wait_msg - - if current_stream.get("StreamStatus") == "ACTIVE": - retention_changed = False - if retention_period > current_stream["RetentionPeriodHours"]: - retention_changed, retention_msg = retention_action( - client, stream_name, retention_period, action="increase", check_mode=check_mode - ) - - elif retention_period < current_stream["RetentionPeriodHours"]: - retention_changed, retention_msg = retention_action( - client, stream_name, retention_period, action="decrease", check_mode=check_mode - ) - - elif retention_period == current_stream["RetentionPeriodHours"]: - retention_msg = f"Retention {retention_period} is the same as {current_stream['RetentionPeriodHours']}" - success = True - - if retention_changed: - success = True - changed = True - - err_msg = retention_msg - if changed and wait: - wait_success, wait_msg, current_stream = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - if not wait_success: - return wait_success, False, wait_msg - elif changed and not wait: - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found: - if current_stream["StreamStatus"] != "ACTIVE": - err_msg = f"Retention Period for {stream_name} is in the process of updating" - return success, changed, err_msg - else: - err_msg = ( - "StreamStatus has to be ACTIVE in order to modify the retention period." - f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" - ) - return success, changed, err_msg - - if current_stream["OpenShardsCount"] != number_of_shards: - success, err_msg = update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode) - - if not success: - return success, changed, err_msg - - changed = True - - if wait: - wait_success, wait_msg, current_stream = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - if not wait_success: - return wait_success, changed, wait_msg - else: - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found and current_stream["StreamStatus"] != "ACTIVE": - err_msg = f"Number of shards for {stream_name} is in the process of updating" - return success, changed, err_msg - - if tags: - tag_success, tag_changed, err_msg = update_tags(client, stream_name, tags, check_mode=check_mode) - changed |= tag_changed - if wait: - success, err_msg, status_stream = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - if success and changed: - err_msg = f"Kinesis Stream {stream_name} updated successfully." - elif success and not changed: - err_msg = f"Kinesis Stream {stream_name} did not change." - - return success, changed, err_msg - - -def create_stream( - client, - stream_name, - number_of_shards=1, - retention_period=None, - tags=None, - wait=False, - wait_timeout=300, - check_mode=False, -): - """Create an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - number_of_shards (int): Number of shards this stream will use. - default=1 - retention_period (int): Can not be less than 24 hours - default=None - tags (dict): The tags you want applied. - default=None - wait (bool): Wait until Stream is ACTIVE. - default=False - wait_timeout (int): How long to wait until this operation is considered failed. - default=300 - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> number_of_shards = 10 - >>> tags = {'env': 'test'} - >>> create_stream(client, stream_name, number_of_shards, tags=tags) - - Returns: - Tuple (bool, bool, str, dict) - """ - success = False - changed = False - err_msg = "" - results = dict() - - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - - if stream_found and current_stream.get("StreamStatus") == "DELETING" and wait: - wait_success, wait_msg, current_stream = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - - if stream_found and current_stream.get("StreamStatus") != "DELETING": - success, changed, err_msg = update( - client, - current_stream, - stream_name, - number_of_shards, - retention_period, - tags, - wait, - wait_timeout, - check_mode=check_mode, - ) - else: - create_success, create_msg = stream_action( - client, stream_name, number_of_shards, action="create", check_mode=check_mode - ) - if not create_success: - changed = True - err_msg = f"Failed to create Kinesis stream: {create_msg}" - return False, True, err_msg, {} - else: - changed = True - if wait: - wait_success, wait_msg, results = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - err_msg = f"Kinesis Stream {stream_name} is in the process of being created" - if not wait_success: - return wait_success, True, wait_msg, results - else: - err_msg = f"Kinesis Stream {stream_name} created successfully" - - if tags: - changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode) - if changed: - success = True - if not success: - return success, changed, err_msg, results - - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if retention_period and current_stream.get("StreamStatus") == "ACTIVE": - changed, err_msg = retention_action( - client, stream_name, retention_period, action="increase", check_mode=check_mode - ) - if changed: - success = True - if not success: - return success, changed, err_msg, results - else: - err_msg = ( - "StreamStatus has to be ACTIVE in order to modify the retention period." - f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" - ) - success = create_success - changed = True - - if success: - stream_found, stream_msg, results = find_stream(client, stream_name) - tag_success, tag_msg, current_tags = get_tags(client, stream_name) - if check_mode: - current_tags = tags - - if not current_tags: - current_tags = dict() - - results = camel_dict_to_snake_dict(results) - results["tags"] = current_tags - - return success, changed, err_msg, results - - -def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode=False): - """Delete an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - wait (bool): Wait until Stream is ACTIVE. - default=False - wait_timeout (int): How long to wait until this operation is considered failed. - default=300 - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> delete_stream(client, stream_name) - - Returns: - Tuple (bool, bool, str, dict) - """ - success = False - changed = False - err_msg = "" - results = dict() - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found: - success, err_msg = stream_action(client, stream_name, action="delete", check_mode=check_mode) - if success: - changed = True - if wait: - success, err_msg, results = wait_for_status( - client, stream_name, "DELETING", wait_timeout, check_mode=check_mode - ) - err_msg = f"Stream {stream_name} deleted successfully" - if not success: - return success, True, err_msg, results - else: - err_msg = f"Stream {stream_name} is in the process of being deleted" - else: - success = True - changed = False - err_msg = f"Stream {stream_name} does not exist" - - return success, changed, err_msg, results - - -def start_stream_encryption( - client, stream_name, encryption_type="", key_id="", wait=False, wait_timeout=300, check_mode=False -): - """Start encryption on an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - encryption_type (str): KMS or NONE - key_id (str): KMS key GUID or alias - wait (bool): Wait until Stream is ACTIVE. - default=False - wait_timeout (int): How long to wait until this operation is considered failed. - default=300 - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> key_id = 'alias/aws' - >>> encryption_type = 'KMS' - >>> start_stream_encryption(client, stream_name,encryption_type,key_id) - - Returns: - Tuple (bool, bool, str, dict) - """ - success = False - changed = False - err_msg = "" - params = {"StreamName": stream_name} - - results = dict() - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found: - if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id: - changed = False - success = True - err_msg = f"Kinesis Stream {stream_name} encryption already configured." - else: - success, err_msg = stream_encryption_action( - client, - stream_name, - action="start_encryption", - encryption_type=encryption_type, - key_id=key_id, - check_mode=check_mode, - ) - if success: - changed = True - if wait: - success, err_msg, results = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - err_msg = f"Kinesis Stream {stream_name} encryption started successfully." - if not success: - return success, True, err_msg, results - else: - err_msg = f"Kinesis Stream {stream_name} is in the process of starting encryption." - else: - success = True - changed = False - err_msg = f"Kinesis Stream {stream_name} does not exist" - - if success: - stream_found, stream_msg, results = find_stream(client, stream_name) - tag_success, tag_msg, current_tags = get_tags(client, stream_name) - if not current_tags: - current_tags = dict() - - results = camel_dict_to_snake_dict(results) - results["tags"] = current_tags - - return success, changed, err_msg, results - - -def stop_stream_encryption( - client, stream_name, encryption_type="", key_id="", wait=True, wait_timeout=300, check_mode=False -): - """Stop encryption on an Amazon Kinesis Stream. - Args: - client (botocore.client.EC2): Boto3 client. - stream_name (str): The name of the kinesis stream. - - Kwargs: - encryption_type (str): KMS or NONE - key_id (str): KMS key GUID or alias - wait (bool): Wait until Stream is ACTIVE. - default=False - wait_timeout (int): How long to wait until this operation is considered failed. - default=300 - check_mode (bool): This will pass DryRun as one of the parameters to the aws api. - default=False - - Basic Usage: - >>> client = boto3.client('kinesis') - >>> stream_name = 'test-stream' - >>> stop_stream_encryption(client, stream_name,encryption_type, key_id) - - Returns: - Tuple (bool, bool, str, dict) - """ - success = False - changed = False - err_msg = "" - params = {"StreamName": stream_name} - - results = dict() - stream_found, stream_msg, current_stream = find_stream(client, stream_name) - if stream_found: - if current_stream.get("EncryptionType") == "KMS": - success, err_msg = stream_encryption_action( - client, - stream_name, - action="stop_encryption", - key_id=key_id, - encryption_type=encryption_type, - check_mode=check_mode, - ) - changed = success - if wait: - success, err_msg, results = wait_for_status( - client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode - ) - if not success: - return success, True, err_msg, results - err_msg = f"Kinesis Stream {stream_name} encryption stopped successfully." - else: - err_msg = f"Stream {stream_name} is in the process of stopping encryption." - elif current_stream.get("EncryptionType") == "NONE": - success = True - err_msg = f"Kinesis Stream {stream_name} encryption already stopped." - else: - success = True - changed = False - err_msg = f"Stream {stream_name} does not exist." - - if success: - stream_found, stream_msg, results = find_stream(client, stream_name) - tag_success, tag_msg, current_tags = get_tags(client, stream_name) - if not current_tags: - current_tags = dict() - - results = camel_dict_to_snake_dict(results) - results["tags"] = current_tags - - return success, changed, err_msg, results - - -def main(): - argument_spec = dict( - name=dict(required=True), - shards=dict(default=None, required=False, type="int"), - retention_period=dict(default=None, required=False, type="int"), - tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]), - wait=dict(default=True, required=False, type="bool"), - wait_timeout=dict(default=300, required=False, type="int"), - state=dict(default="present", choices=["present", "absent"]), - encryption_type=dict(required=False, choices=["NONE", "KMS"]), - key_id=dict(required=False, type="str"), - encryption_state=dict(required=False, choices=["enabled", "disabled"]), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - retention_period = module.params.get("retention_period") - stream_name = module.params.get("name") - shards = module.params.get("shards") - state = module.params.get("state") - tags = module.params.get("tags") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - encryption_type = module.params.get("encryption_type") - key_id = module.params.get("key_id") - encryption_state = module.params.get("encryption_state") - - if state == "present" and not shards: - module.fail_json(msg="Shards is required when state == present.") - - if retention_period: - if retention_period < 24: - module.fail_json(msg="Retention period can not be less than 24 hours.") - - check_mode = module.check_mode - try: - client = module.client("kinesis") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - if state == "present": - success, changed, err_msg, results = create_stream( - client, stream_name, shards, retention_period, tags, wait, wait_timeout, check_mode - ) - if encryption_state == "enabled": - success, changed, err_msg, results = start_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) - elif encryption_state == "disabled": - success, changed, err_msg, results = stop_stream_encryption( - client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode - ) - elif state == "absent": - success, changed, err_msg, results = delete_stream(client, stream_name, wait, wait_timeout, check_mode) - - if success: - module.exit_json(success=success, changed=changed, msg=err_msg, **results) - else: - module.fail_json(success=success, changed=changed, msg=err_msg, result=results) - - -if __name__ == "__main__": - main() diff --git a/lightsail.py b/lightsail.py deleted file mode 100644 index 16b4338e7dc..00000000000 --- a/lightsail.py +++ /dev/null @@ -1,403 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: lightsail -version_added: 1.0.0 -short_description: Manage instances in AWS Lightsail -description: - - Manage instances in AWS Lightsail. - - Instance tagging is not yet supported in this module. -author: - - "Nick Ball (@nickball)" - - "Prasad Katti (@prasadkatti)" -options: - state: - description: - - Indicate desired state of the target. - - I(rebooted) and I(restarted) are aliases. - default: present - choices: ['present', 'absent', 'running', 'restarted', 'rebooted', 'stopped'] - type: str - name: - description: Name of the instance. - required: true - type: str - zone: - description: - - AWS availability zone in which to launch the instance. - - Required when I(state=present) - type: str - blueprint_id: - description: - - ID of the instance blueprint image. - - Required when I(state=present) - type: str - bundle_id: - description: - - Bundle of specification info for the instance. - - Required when I(state=present). - type: str - user_data: - description: - - Launch script that can configure the instance with additional data. - type: str - default: '' - public_ports: - description: - - A list of dictionaries to describe the ports to open for the specified instance. - type: list - elements: dict - suboptions: - from_port: - description: The first port in a range of open ports on the instance. - type: int - required: true - to_port: - description: The last port in a range of open ports on the instance. - type: int - required: true - protocol: - description: The IP protocol name accepted for the defined range of open ports. - type: str - choices: ['tcp', 'all', 'udp', 'icmp'] - required: true - cidrs: - description: - - The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. - - One of I(cidrs) or I(ipv6_cidrs) must be specified. - type: list - elements: str - ipv6_cidrs: - description: - - The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. - - One of I(cidrs) or I(ipv6_cidrs) must be specified. - type: list - elements: str - version_added: 6.0.0 - key_pair_name: - description: - - Name of the key pair to use with the instance. - - If I(state=present) and a key_pair_name is not provided, the default keypair from the region will be used. - type: str - wait: - description: - - Wait for the instance to be in state 'running' before returning. - - If I(wait=false) an ip_address may not be returned. - - Has no effect when I(state=rebooted) or I(state=absent). - type: bool - default: true - wait_timeout: - description: - - How long before I(wait) gives up, in seconds. - default: 300 - type: int - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - - -EXAMPLES = r""" -- name: Create a new Lightsail instance - community.aws.lightsail: - state: present - name: my_instance - region: us-east-1 - zone: us-east-1a - blueprint_id: ubuntu_16_04 - bundle_id: nano_1_0 - key_pair_name: id_rsa - user_data: " echo 'hello world' > /home/ubuntu/test.txt" - public_ports: - - from_port: 22 - to_port: 22 - protocol: "tcp" - cidrs: ["0.0.0.0/0"] - ipv6_cidrs: ["::/0"] - register: my_instance - -- name: Delete an instance - community.aws.lightsail: - state: absent - region: us-east-1 - name: my_instance -""" - -RETURN = r""" -changed: - description: if a snapshot has been modified/created - returned: always - type: bool - sample: - changed: true -instance: - description: instance data - returned: always - type: dict - sample: - arn: "arn:aws:lightsail:us-east-1:123456789012:Instance/1fef0175-d6c8-480e-84fa-214f969cda87" - blueprint_id: "ubuntu_16_04" - blueprint_name: "Ubuntu" - bundle_id: "nano_1_0" - created_at: "2017-03-27T08:38:59.714000-04:00" - hardware: - cpu_count: 1 - ram_size_in_gb: 0.5 - is_static_ip: false - location: - availability_zone: "us-east-1a" - region_name: "us-east-1" - name: "my_instance" - networking: - monthly_transfer: - gb_per_month_allocated: 1024 - ports: - - access_direction: "inbound" - access_from: "Anywhere (0.0.0.0/0)" - access_type: "public" - common_name: "" - from_port: 80 - protocol: tcp - to_port: 80 - - access_direction: "inbound" - access_from: "Anywhere (0.0.0.0/0)" - access_type: "public" - common_name: "" - from_port: 22 - protocol: tcp - to_port: 22 - private_ip_address: "172.26.8.14" - public_ip_address: "34.207.152.202" - resource_type: "Instance" - ssh_key_name: "keypair" - state: - code: 16 - name: running - support_code: "123456789012/i-0997c97831ee21e33" - username: "ubuntu" -""" - -import time - -try: - import botocore -except ImportError: - # will be caught by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def find_instance_info(module, client, instance_name, fail_if_not_found=False): - try: - res = client.get_instance(instanceName=instance_name) - except is_boto3_error_code("NotFoundException") as e: - if fail_if_not_found: - module.fail_json_aws(e) - return None - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - return res["instance"] - - -def wait_for_instance_state(module, client, instance_name, states): - """ - `states` is a list of instance states that we are waiting for. - """ - - wait_timeout = module.params.get("wait_timeout") - wait_max = time.time() + wait_timeout - while wait_max > time.time(): - try: - instance = find_instance_info(module, client, instance_name) - if instance["state"]["name"] in states: - break - time.sleep(5) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - else: - module.fail_json( - msg=f'Timed out waiting for instance "{instance_name}" to get to one of the following states - {states}' - ) - - -def update_public_ports(module, client, instance_name): - try: - client.put_instance_public_ports( - portInfos=snake_dict_to_camel_dict(module.params.get("public_ports")), - instanceName=instance_name, - ) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - -def create_or_update_instance(module, client, instance_name): - inst = find_instance_info(module, client, instance_name) - - if not inst: - create_params = { - "instanceNames": [instance_name], - "availabilityZone": module.params.get("zone"), - "blueprintId": module.params.get("blueprint_id"), - "bundleId": module.params.get("bundle_id"), - "userData": module.params.get("user_data"), - } - - key_pair_name = module.params.get("key_pair_name") - if key_pair_name: - create_params["keyPairName"] = key_pair_name - - try: - client.create_instances(**create_params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - wait = module.params.get("wait") - if wait: - desired_states = ["running"] - wait_for_instance_state(module, client, instance_name, desired_states) - - if module.params.get("public_ports") is not None: - update_public_ports(module, client, instance_name) - after_update_inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - - module.exit_json( - changed=after_update_inst != inst, - instance=camel_dict_to_snake_dict(after_update_inst), - ) - - -def delete_instance(module, client, instance_name): - changed = False - - inst = find_instance_info(module, client, instance_name) - if inst is None: - module.exit_json(changed=changed, instance={}) - - # Wait for instance to exit transition state before deleting - desired_states = ["running", "stopped"] - wait_for_instance_state(module, client, instance_name, desired_states) - - try: - client.delete_instance(instanceName=instance_name) - changed = True - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst)) - - -def restart_instance(module, client, instance_name): - """ - Reboot an existing instance - Wait will not apply here as this is an OS-level operation - """ - - changed = False - - inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - - try: - client.reboot_instance(instanceName=instance_name) - changed = True - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst)) - - -def start_or_stop_instance(module, client, instance_name, state): - """ - Start or stop an existing instance - """ - - changed = False - - inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - - # Wait for instance to exit transition state before state change - desired_states = ["running", "stopped"] - wait_for_instance_state(module, client, instance_name, desired_states) - - # Try state change - if inst and inst["state"]["name"] != state: - try: - if state == "running": - client.start_instance(instanceName=instance_name) - else: - client.stop_instance(instanceName=instance_name) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - changed = True - # Grab current instance info - inst = find_instance_info(module, client, instance_name) - - wait = module.params.get("wait") - if wait: - desired_states = [state] - wait_for_instance_state(module, client, instance_name, desired_states) - inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - - module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst)) - - -def main(): - argument_spec = dict( - name=dict(type="str", required=True), - state=dict( - type="str", default="present", choices=["present", "absent", "stopped", "running", "restarted", "rebooted"] - ), - zone=dict(type="str"), - blueprint_id=dict(type="str"), - bundle_id=dict(type="str"), - key_pair_name=dict(type="str"), - user_data=dict(type="str", default=""), - wait=dict(type="bool", default=True), - wait_timeout=dict(default=300, type="int"), - public_ports=dict( - type="list", - elements="dict", - options=dict( - from_port=dict(type="int", required=True), - to_port=dict(type="int", required=True), - protocol=dict(type="str", choices=["tcp", "all", "udp", "icmp"], required=True), - cidrs=dict(type="list", elements="str"), - ipv6_cidrs=dict(type="list", elements="str"), - ), - required_one_of=[("cidrs", "ipv6_cidrs")], - ), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, required_if=[["state", "present", ("zone", "blueprint_id", "bundle_id")]] - ) - - client = module.client("lightsail") - - name = module.params.get("name") - state = module.params.get("state") - - if state == "present": - create_or_update_instance(module, client, name) - elif state == "absent": - delete_instance(module, client, name) - elif state in ("running", "stopped"): - start_or_stop_instance(module, client, name, state) - elif state in ("restarted", "rebooted"): - restart_instance(module, client, name) - - -if __name__ == "__main__": - main() diff --git a/lightsail_snapshot.py b/lightsail_snapshot.py deleted file mode 100644 index 1d0d178aa49..00000000000 --- a/lightsail_snapshot.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: lightsail_snapshot -version_added: "6.0.0" -short_description: Creates snapshots of AWS Lightsail instances -description: - - Creates snapshots of AWS Lightsail instances. -author: - - "Nuno Saavedra (@Nfsaavedra)" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - type: str - snapshot_name: - description: Name of the new instance snapshot. - required: true - type: str - instance_name: - description: - - Name of the instance to create the snapshot. - - Required when I(state=present). - type: str - wait: - description: - - Wait for the instance snapshot to be created before returning. - type: bool - default: true - wait_timeout: - description: - - How long before I(wait) gives up, in seconds. - default: 300 - type: int - -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create AWS Lightsail snapshot - lightsail_snapshot: - region: us-east-1 - snapshot_name: "my_instance_snapshot" - instance_name: "my_instance" - -- name: Delete AWS Lightsail snapshot - lightsail_snapshot: - region: us-east-1 - snapshot_name: "my_instance_snapshot" - state: absent -""" - -RETURN = r""" -changed: - description: if a snapshot has been modified/created - returned: always - type: bool - sample: - changed: true -snapshot: - description: instance snapshot data - type: dict - returned: always - sample: - arn: "arn:aws:lightsail:us-east-1:070807442430:InstanceSnapshot/54b0f785-7132-443d-9e32-95a6825636a4" - created_at: "2023-02-23T18:46:11.183000+00:00" - from_attached_disks: [] - from_blueprint_id: "amazon_linux_2" - from_bundle_id: "nano_2_0" - from_instance_arn: "arn:aws:lightsail:us-east-1:070807442430:Instance/5ca1e7ca-a994-4e19-bb82-deb9d79e9ca3" - from_instance_name: "my_instance" - is_from_auto_snapshot: false - location: - availability_zone: "all" - region_name: "us-east-1" - name: "my_instance_snapshot" - resource_type: "InstanceSnapshot" - size_in_gb: 20 - state: "available" - support_code: "351201681302/ami-06b48e5589f1e248b" - tags: [] -""" - -import time - -try: - import botocore -except ImportError: - # will be caught by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def find_instance_snapshot_info(module, client, instance_snapshot_name, fail_if_not_found=False): - try: - res = client.get_instance_snapshot(instanceSnapshotName=instance_snapshot_name) - except is_boto3_error_code("NotFoundException") as e: - if fail_if_not_found: - module.fail_json_aws(e) - return None - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - return res["instanceSnapshot"] - - -def wait_for_instance_snapshot(module, client, instance_snapshot_name): - wait_timeout = module.params.get("wait_timeout") - wait_max = time.time() + wait_timeout - snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) - - while wait_max > time.time(): - snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) - current_state = snapshot["state"] - if current_state != "pending": - break - time.sleep(5) - else: - module.fail_json(msg=f'Timed out waiting for instance snapshot "{instance_snapshot_name}" to be created.') - - return snapshot - - -def create_snapshot(module, client): - snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) - new_instance = snapshot is None - - if module.check_mode or not new_instance: - snapshot = snapshot if snapshot is not None else {} - module.exit_json( - changed=new_instance, - instance_snapshot=camel_dict_to_snake_dict(snapshot), - ) - - try: - snapshot = client.create_instance_snapshot( - instanceSnapshotName=module.params.get("snapshot_name"), - instanceName=module.params.get("instance_name"), - ) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - if module.params.get("wait"): - snapshot = wait_for_instance_snapshot(module, client, module.params.get("snapshot_name")) - - module.exit_json( - changed=new_instance, - instance_snapshot=camel_dict_to_snake_dict(snapshot), - ) - - -def delete_snapshot(module, client): - snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) - if module.check_mode or snapshot is None: - changed = not (snapshot is None) - instance = snapshot if changed else {} - module.exit_json(changed=changed, instance=instance) - - try: - client.delete_instance_snapshot(instanceSnapshotName=module.params.get("snapshot_name")) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(changed=True, instance=camel_dict_to_snake_dict(snapshot)) - - -def main(): - argument_spec = dict( - state=dict(type="str", default="present", choices=["present", "absent"]), - snapshot_name=dict(type="str", required=True), - instance_name=dict(type="str"), - wait=dict(type="bool", default=True), - wait_timeout=dict(default=300, type="int"), - ) - required_if = [ - ["state", "present", ("instance_name",)], - ] - - module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - client = module.client("lightsail") - - state = module.params.get("state") - - if state == "present": - create_snapshot(module, client) - elif state == "absent": - delete_snapshot(module, client) - - -if __name__ == "__main__": - main() diff --git a/lightsail_static_ip.py b/lightsail_static_ip.py deleted file mode 100644 index 40d10a86bb1..00000000000 --- a/lightsail_static_ip.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: lightsail_static_ip -version_added: 4.1.0 -short_description: Manage static IP addresses in AWS Lightsail -description: - - Manage static IP addresses in AWS Lightsail. -author: - - "Daniel Cotton (@danielcotton)" -options: - state: - description: - - Describes the desired state. - default: present - choices: ['present', 'absent'] - type: str - name: - description: Name of the static IP. - required: true - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - - -EXAMPLES = r""" -- name: Provision a Lightsail static IP - community.aws.lightsail_static_ip: - state: present - name: my_static_ip - register: my_ip - -- name: Remove a static IP - community.aws.lightsail_static_ip: - state: absent - name: my_static_ip -""" - -RETURN = r""" -static_ip: - description: static_ipinstance data - returned: always - type: dict - sample: - arn: "arn:aws:lightsail:ap-southeast-2:123456789012:StaticIp/d8f47672-c261-4443-a484-4a2ec983db9a" - created_at: "2021-02-28T00:04:05.202000+10:30" - ip_address: "192.0.2.5" - is_attached: false - location: - availability_zone: all - region_name: ap-southeast-2 - name: "static_ip" - resource_type: StaticIp - support_code: "123456789012/192.0.2.5" -""" - -try: - import botocore -except ImportError: - # will be caught by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): - try: - res = client.get_static_ip(staticIpName=static_ip_name) - except is_boto3_error_code("NotFoundException") as e: - if fail_if_not_found: - module.fail_json_aws(e) - return None - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - return res["staticIp"] - - -def create_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) - if inst: - module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst)) - else: - create_params = {"staticIpName": static_ip_name} - - try: - client.allocate_static_ip(**create_params) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - inst = find_static_ip_info(module, client, static_ip_name, fail_if_not_found=True) - - module.exit_json(changed=True, static_ip=camel_dict_to_snake_dict(inst)) - - -def delete_static_ip(module, client, static_ip_name): - inst = find_static_ip_info(module, client, static_ip_name) - if inst is None: - module.exit_json(changed=False, static_ip={}) - - changed = False - try: - client.release_static_ip(staticIpName=static_ip_name) - changed = True - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(changed=changed, static_ip=camel_dict_to_snake_dict(inst)) - - -def main(): - argument_spec = dict( - name=dict(type="str", required=True), - state=dict(type="str", default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - client = module.client("lightsail") - - name = module.params.get("name") - state = module.params.get("state") - - if state == "present": - create_static_ip(module, client, name) - elif state == "absent": - delete_static_ip(module, client, name) - - -if __name__ == "__main__": - main() diff --git a/mq_broker.py b/mq_broker.py deleted file mode 100644 index 5fda006b8b0..00000000000 --- a/mq_broker.py +++ /dev/null @@ -1,547 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: mq_broker -version_added: 6.0.0 -short_description: MQ broker management -description: - - Create/update/delete a broker. - - Reboot a broker. -author: - - FCO (@fotto) -options: - broker_name: - description: - - The Name of the MQ broker to work on. - type: str - required: true - state: - description: - - "C(present): Create/update broker." - - "C(absent): Delete broker." - - "C(restarted): Reboot broker." - choices: [ 'present', 'absent', 'restarted' ] - default: present - type: str - deployment_mode: - description: - - Set broker deployment type. - - Can be used only during creation. - - Defaults to C(SINGLE_INSTANCE). - choices: [ 'SINGLE_INSTANCE', 'ACTIVE_STANDBY_MULTI_AZ', 'CLUSTER_MULTI_AZ' ] - type: str - use_aws_owned_key: - description: - - Must be set to C(false) if I(kms_key_id) is provided as well. - - Can be used only during creation. - - Defaults to C(true). - type: bool - kms_key_id: - description: - - Use referenced key to encrypt broker data at rest. - - Can be used only during creation. - type: str - engine_type: - description: - - Set broker engine type. - - Can be used only during creation. - - Defaults to C(ACTIVEMQ). - choices: [ 'ACTIVEMQ', 'RABBITMQ' ] - type: str - maintenance_window_start_time: - description: - - Set maintenance window for automatic minor upgrades. - - Can be used only during creation. - - Not providing any value means "no maintenance window". - type: dict - publicly_accessible: - description: - - Allow/disallow public access. - - Can be used only during creation. - - Defaults to C(false). - type: bool - storage_type: - description: - - Set underlying storage type. - - Can be used only during creation. - - Defaults to C(EFS). - choices: [ 'EBS', 'EFS' ] - type: str - subnet_ids: - description: - - Defines where deploy broker instances to. - - Minimum required number depends on deployment type. - - Can be used only during creation. - type: list - elements: str - users: - description: - - This parameter allows to use a custom set of initial user(s). - - M(community.aws.mq_user) is the preferred way to manage (local) users - however a broker cannot be created without any user. - - If nothing is specified a default C(admin) user will be created along with brokers. - - Can be used only during creation. Use M(community.aws.mq_user) module for updates. - type: list - elements: dict - tags: - description: - - Tag newly created brokers. - - Can be used only during creation. - type: dict - authentication_strategy: - description: Choose between locally and remotely managed users. - choices: [ 'SIMPLE', 'LDAP' ] - type: str - auto_minor_version_upgrade: - description: Allow/disallow automatic minor version upgrades. - type: bool - default: true - engine_version: - description: - - Set engine version of broker. - - The special value C(latest) will pick the latest available version. - - The special value C(latest) is ignored on update. - type: str - host_instance_type: - description: Instance type of broker instances. - type: str - enable_audit_log: - description: Enable/disable to push audit logs to AWS CloudWatch. - type: bool - default: false - enable_general_log: - description: Enable/disable to push general logs to AWS CloudWatch. - type: bool - default: false - security_groups: - description: - - Associate security groups with broker. - - At least one must be provided during creation. - type: list - elements: str - -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.common.modules - - amazon.aws.region.modules -""" - - -EXAMPLES = r""" -- name: create broker (if missing) with minimal required parameters - community.aws.mq_broker: - broker_name: "{{ broker_name }}" - security_groups: - - sg_xxxxxxx - subnet_ids: - - subnet_xxx - - subnet_yyy - register: result - -- set_fact: - broker_id: "{{ result.broker['BrokerId'] }}" - -- name: use mq_broker_info to wait until broker is ready - community.aws.mq_broker_info: - broker_id: "{{ broker_id }}" - register: result - until: "result.broker['BrokerState'] == 'RUNNING'" - retries: 15 - delay: 60 - -- name: create or update broker with almost all parameter set including credentials - community.aws.mq_broker: - broker_name: "my_broker_2" - state: present - deployment_mode: 'ACTIVE_STANDBY_MULTI_AZ' - use_aws_owned_key: false - kms_key_id: 'my-precreted-key-id' - engine_type: 'ACTIVEMQ' - maintenance_window_start_time: - DayOfWeek: 'MONDAY' - TimeOfDay: '03:15' - TimeZone: 'Europe/Berlin' - publicly_accessible: true - storage_type: 'EFS' - security_groups: - - sg_xxxxxxx - subnet_ids: - - subnet_xxx - - subnet_yyy - users: - - Username: 'initial-user' - Password': 'plain-text-password' - ConsoleAccess: true - tags: - - env: Test - creator: ansible - authentication_strategy: 'SIMPLE' - auto_minor_version_upgrade: true - engine_version: "5.15.13" - host_instance_type: 'mq.t3.micro' - enable_audit_log: true - enable_general_log: true - -- name: reboot a broker - community.aws.mq_broker: - broker_name: "my_broker_2" - state: restarted - -- name: delete a broker - community.aws.mq_broker: - broker_name: "my_broker_2" - state: absent -""" - -RETURN = r""" -broker: - description: - - "All API responses are converted to snake yaml except 'Tags'" - - "'state=present': API response of create_broker() or update_broker() call" - - "'state=absent': result of describe_broker() call before delete_broker() is triggerd" - - "'state=restarted': result of describe_broker() after reboot has been triggered" - type: dict - returned: success -""" - -try: - import botocore -except ImportError: - # handled by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule - - -PARAMS_MAP = { - "authentication_strategy": "AuthenticationStrategy", - "auto_minor_version_upgrade": "AutoMinorVersionUpgrade", - "broker_name": "BrokerName", - "deployment_mode": "DeploymentMode", - "use_aws_owned_key": "EncryptionOptions/UseAwsOwnedKey", - "kms_key_id": "EncryptionOptions/KmsKeyId", - "engine_type": "EngineType", - "engine_version": "EngineVersion", - "host_instance_type": "HostInstanceType", - "enable_audit_log": "Logs/Audit", - "enable_general_log": "Logs/General", - "maintenance_window_start_time": "MaintenanceWindowStartTime", - "publicly_accessible": "PubliclyAccessible", - "security_groups": "SecurityGroups", - "storage_type": "StorageType", - "subnet_ids": "SubnetIds", - "users": "Users", -} - - -DEFAULTS = { - "authentication_strategy": "SIMPLE", - "auto_minor_version_upgrade": False, - "deployment_mode": "SINGLE_INSTANCE", - "use_aws_owned_key": True, - "engine_type": "ACTIVEMQ", - "engine_version": "latest", - "host_instance_type": "mq.t3.micro", - "enable_audit_log": False, - "enable_general_log": False, - "publicly_accessible": False, - "storage_type": "EFS", -} - -CREATE_ONLY_PARAMS = [ - "deployment_mode", - "use_aws_owned_key", - "kms_key_id", - "engine_type", - "maintenance_window_start_time", - "publicly_accessible", - "storage_type", - "subnet_ids", - "users", - "tags", -] - - -def _set_kwarg(kwargs, key, value): - mapped_key = PARAMS_MAP[key] - if "/" in mapped_key: - key_list = mapped_key.split("/") - key_list.reverse() - else: - key_list = [mapped_key] - data = kwargs - while len(key_list) > 1: - this_key = key_list.pop() - if this_key not in data: - data[this_key] = {} - # - data = data[this_key] - data[key_list[0]] = value - - -def _fill_kwargs(module, apply_defaults=True, ignore_create_params=False): - kwargs = {} - if apply_defaults: - for p_name, p_value in DEFAULTS.items(): - _set_kwarg(kwargs, p_name, p_value) - for p_name in module.params: - if ignore_create_params and p_name in CREATE_ONLY_PARAMS: - # silently ignore CREATE_ONLY_PARAMS on update to - # make playbooks idempotent - continue - if p_name in PARAMS_MAP and module.params[p_name] is not None: - _set_kwarg(kwargs, p_name, module.params[p_name]) - else: - # ignore - pass - return kwargs - - -def __list_needs_change(current, desired): - if len(current) != len(desired): - return True - # equal length: - c_sorted = sorted(current) - d_sorted = sorted(desired) - for index, value in enumerate(current): - if value != desired[index]: - return True - # - return False - - -def __dict_needs_change(current, desired): - # values contained in 'current' but not specified in 'desired' are ignored - # value contained in 'desired' but not in 'current' (unsupported attributes) are ignored - for key in desired: - if key in current: - if desired[key] != current[key]: - return True - # - return False - - -def _needs_change(current, desired): - needs_change = False - for key in desired: - current_value = current[key] - desired_value = desired[key] - if isinstance(current_value, (int, str, bool)): - if current_value != desired_value: - needs_change = True - break - elif isinstance(current_value, list): - # assumption: all 'list' type settings we allow changes for have scalar values - if __list_needs_change(current_value, desired_value): - needs_change = True - break - elif isinstance(current_value, dict): - # assumption: all 'dict' type settings we allow changes for have scalar values - if __dict_needs_change(current_value, desired_value): - needs_change = True - break - else: - # unexpected type - needs_change = True - break - # - return needs_change - - -def get_latest_engine_version(conn, module, engine_type): - try: - response = conn.describe_broker_engine_types(EngineType=engine_type) - return response["BrokerEngineTypes"][0]["EngineVersions"][0]["Name"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list engine versions") - - -def get_broker_id(conn, module): - try: - broker_name = module.params["broker_name"] - broker_id = None - response = conn.list_brokers(MaxResults=100) - for broker in response["BrokerSummaries"]: - if broker["BrokerName"] == broker_name: - broker_id = broker["BrokerId"] - break - return broker_id - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list broker brokers.") - - -def get_broker_info(conn, module, broker_id): - try: - return conn.describe_broker(BrokerId=broker_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get broker details.") - - -def reboot_broker(conn, module, broker_id): - try: - return conn.reboot_broker(BrokerId=broker_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't reboot broker.") - - -def delete_broker(conn, module, broker_id): - try: - return conn.delete_broker(BrokerId=broker_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete broker.") - - -def create_broker(conn, module): - kwargs = _fill_kwargs(module) - if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": - kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"]) - if kwargs["AuthenticationStrategy"] == "LDAP": - module.fail_json(msg="'AuthenticationStrategy=LDAP' not supported, yet") - if "Users" not in kwargs: - # add some stupid default (cannot create broker without any users) - kwargs["Users"] = [{"Username": "admin", "Password": "adminPassword", "ConsoleAccess": True, "Groups": []}] - if "EncryptionOptions" in kwargs and "UseAwsOwnedKey" in kwargs["EncryptionOptions"]: - kwargs["EncryptionOptions"]["UseAwsOwnedKey"] = False - # - if "SecurityGroups" not in kwargs or len(kwargs["SecurityGroups"]) == 0: - module.fail_json(msg="At least one security group must be specified on broker creation") - # - changed = True - result = conn.create_broker(**kwargs) - # - return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed} - - -def update_broker(conn, module, broker_id): - kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True) - # replace name with id - broker_name = kwargs["BrokerName"] - del kwargs["BrokerName"] - kwargs["BrokerId"] = broker_id - # get current state for comparison: - api_result = get_broker_info(conn, module, broker_id) - if api_result["BrokerState"] != "RUNNING": - module.fail_json( - msg=f"Cannot trigger update while broker ({broker_id}) is in state {api_result['BrokerState']}", - ) - # engine version of 'latest' is taken as "keep current one" - # i.e. do not request upgrade on playbook rerun - if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": - kwargs["EngineVersion"] = api_result["EngineVersion"] - result = {"broker_id": broker_id, "broker_name": broker_name} - changed = False - if _needs_change(api_result, kwargs): - changed = True - if not module.check_mode: - api_result = conn.update_broker(**kwargs) - # - # - return {"broker": result, "changed": changed} - - -def ensure_absent(conn, module): - result = {"broker_name": module.params["broker_name"], "broker_id": None} - if module.check_mode: - return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": True} - broker_id = get_broker_id(conn, module) - result["broker_id"] = broker_id - - if not broker_id: - # silently ignore delete of unknown broker (to make it idempotent) - return {"broker": result, "changed": False} - - try: - # check for pending delete (small race condition possible here - api_result = get_broker_info(conn, module, broker_id) - if api_result["BrokerState"] == "DELETION_IN_PROGRESS": - return {"broker": result, "changed": False} - delete_broker(conn, module, broker_id) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - return {"broker": result, "changed": True} - - -def ensure_present(conn, module): - if module.check_mode: - return {"broker": {"broker_arn": "fakeArn", "broker_id": "fakeId"}, "changed": True} - - broker_id = get_broker_id(conn, module) - if broker_id: - return update_broker(conn, module, broker_id) - - return create_broker(conn, module) - - -def main(): - argument_spec = dict( - broker_name=dict(required=True, type="str"), - state=dict(default="present", choices=["present", "absent", "restarted"]), - # parameters only allowed on create - deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]), - use_aws_owned_key=dict(type="bool"), - kms_key_id=dict(type="str"), - engine_type=dict(choices=["ACTIVEMQ", "RABBITMQ"], type="str"), - maintenance_window_start_time=dict(type="dict"), - publicly_accessible=dict(type="bool"), - storage_type=dict(choices=["EBS", "EFS"]), - subnet_ids=dict(type="list", elements="str"), - users=dict(type="list", elements="dict"), - tags=dict(type="dict"), - # parameters allowed on update as well - authentication_strategy=dict(choices=["SIMPLE", "LDAP"]), - auto_minor_version_upgrade=dict(default=True, type="bool"), - engine_version=dict(type="str"), - host_instance_type=dict(type="str"), - enable_audit_log=dict(default=False, type="bool"), - enable_general_log=dict(default=False, type="bool"), - security_groups=dict(type="list", elements="str"), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("mq") - - if module.params["state"] == "present": - try: - compound_result = ensure_present(connection, module) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - # - module.exit_json(**compound_result) - - if module.params["state"] == "absent": - try: - compound_result = ensure_absent(connection, module) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - # - module.exit_json(**compound_result) - - if module.params["state"] == "restarted": - broker_id = get_broker_id(connection, module) - if module.check_mode: - module.exit_json(broker={"broker_id": broker_id if broker_id else "fakeId"}, changed=True) - if not broker_id: - module.fail_json( - msg="Cannot find broker with name {module.params['broker_name']}.", - ) - try: - changed = True - if not module.check_mode: - reboot_broker(connection, module, broker_id) - # - result = get_broker_info(connection, module, broker_id) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - module.exit_json(broker=result, changed=changed) - - -if __name__ == "__main__": - main() diff --git a/mq_broker_config.py b/mq_broker_config.py deleted file mode 100644 index 84f1b4dff3b..00000000000 --- a/mq_broker_config.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: mq_broker_config -version_added: 6.0.0 -short_description: Update Amazon MQ broker configuration -description: - - Update configuration for an MQ broker. - - If new configuration differs from the current one a new configuration - is created and the new version is assigned to the broker. - - Optionally allows broker reboot to make changes effective immediately. -author: - - FCO (@fotto) -options: - broker_id: - description: - - The ID of the MQ broker to work on. - type: str - required: true - config_xml: - description: - - The maximum number of results to return. - type: str - required: true - config_description: - description: - - Description to set on new configuration revision. - type: str - reboot: - description: - - Reboot broker after new config has been applied. - type: bool - default: false -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.common.modules - - amazon.aws.region.modules -""" - -EXAMPLES = r""" -- name: send new XML config to broker relying on credentials from environment - community.aws.mq_broker_config: - broker_id: "aws-mq-broker-id" - config_xml: "{{ lookup('file', 'activemq.xml' )}}" - region: "{{ aws_region }}" - -- name: send new XML config to broker and reboot if necessary - community.aws.mq_broker_config: - broker_id: "aws-mq-broker-id" - config_xml: "{{ lookup('file', 'activemq2.xml' )}}" - reboot: true - -- name: send new broker config and set all credentials explicitly - community.aws.mq_broker_config: - broker_id: "{{ broker_id }}" - config_xml: "{{ lookup('file', 'activemq3.xml')}}" - config_description: "custom description for configuration object" - register: result -""" - -RETURN = r""" -broker: - description: API response of describe_broker() converted to snake yaml after changes have been applied. - type: dict - returned: success -configuration: - description: Details about new configuration object. - returned: I(changed=true) - type: complex - contains: - id: - description: Configuration ID of broker configuration. - type: str - example: c-386541b8-3139-42c2-9c2c-a4c267c1714f - revision: - description: Revision of the configuration that will be active after next reboot. - type: int - example: 4 -""" - -import base64 -import re - -try: - import botocore -except ImportError: - # handled by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule - - -DEFAULTS = {"reboot": False} -FULL_DEBUG = False - - -def is_same_config(old, new): - # we a simple comparision here: strip down spaces and compare the rest - # TODO: use same XML normalizer on new as used by AWS before comparing strings - old_stripped = re.sub(r"\s+", " ", old, flags=re.S).rstrip() - new_stripped = re.sub(r"\s+", " ", new, flags=re.S).rstrip() - return old_stripped == new_stripped - - -def get_broker_info(conn, module): - try: - return conn.describe_broker(BrokerId=module.params["broker_id"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if module.check_mode: - return { - "broker_id": module.params["broker_id"], - } - module.fail_json_aws(e, msg="Couldn't get broker details.") - - -def get_current_configuration(conn, module, cfg_id, cfg_revision): - try: - return conn.describe_configuration_revision(ConfigurationId=cfg_id, ConfigurationRevision=str(cfg_revision)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get configuration revision.") - - -def create_and_assign_config(conn, module, broker_id, cfg_id, cfg_xml_encoded): - kwargs = {"ConfigurationId": cfg_id, "Data": cfg_xml_encoded} - if "config_description" in module.params and module.params["config_description"]: - kwargs["Description"] = module.params["config_description"] - else: - kwargs["Description"] = "Updated through community.aws.mq_broker_config ansible module" - # - try: - c_response = conn.update_configuration(**kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create new configuration revision.") - # - new_config_revision = c_response["LatestRevision"]["Revision"] - try: - b_response = conn.update_broker( - BrokerId=broker_id, Configuration={"Id": cfg_id, "Revision": new_config_revision} - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't assign new configuration revision to broker.") - # - return (c_response, b_response) - - -def reboot_broker(conn, module, broker_id): - try: - return conn.reboot_broker(BrokerId=broker_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't reboot broker.") - - -def ensure_config(conn, module): - broker_id = module.params["broker_id"] - broker_info = get_broker_info(conn, module) - changed = False - if module.check_mode and "Configurations" not in broker_info: - # not result from get_broker_info(). use requeste config - current_cfg_decoded = module.params["config_xml"] - else: - current_cfg = broker_info["Configurations"]["Current"] - if "Pending" in broker_info["Configurations"]: - current_cfg = broker_info["Configurations"]["Pending"] - current_cfg_encoded = get_current_configuration(conn, module, current_cfg["Id"], current_cfg["Revision"])[ - "Data" - ] - current_cfg_decoded = base64.b64decode(current_cfg_encoded.encode()).decode() - - if is_same_config(current_cfg_decoded, module.params["config_xml"]): - return {"changed": changed, "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"])} - - (c_response, _b_response) = (None, None) - if not module.check_mode: - new_cfg_encoded = base64.b64encode(module.params["config_xml"].encode()).decode() - (c_response, _b_response) = create_and_assign_config( - conn, module, broker_id, current_cfg["Id"], new_cfg_encoded - ) - # - changed = True - - if changed and module.params["reboot"] and not module.check_mode: - reboot_broker(conn, module, broker_id) - # - broker_info = get_broker_info(conn, module) - return_struct = { - "changed": changed, - "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"]), - "configuration": {"id": c_response["Id"], "revision": c_response["LatestRevision"]["Revision"]}, - } - if FULL_DEBUG: - return_struct["old_config_xml"] = base64.b64decode(current_cfg_encoded) - return_struct["new_config_xml"] = module.params["config_xml"] - return_struct["old_config_revision"] = current_cfg["Revision"] - return return_struct - - -def main(): - argument_spec = dict( - broker_id=dict(required=True, type="str"), - config_xml=dict(required=True, type="str"), - config_description=dict(required=False, type="str"), - reboot=dict(required=False, type="bool", default=DEFAULTS["reboot"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("mq") - - try: - result = ensure_config(connection, module) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/mq_broker_info.py b/mq_broker_info.py deleted file mode 100644 index c96e327cd02..00000000000 --- a/mq_broker_info.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: mq_broker_info -version_added: 6.0.0 -short_description: Retrieve MQ Broker details -description: - - Get details about a broker. -author: - - FCO (@fotto) -options: - broker_id: - description: Get details for broker with specified ID. - type: str - broker_name: - description: - - Get details for broker with specified Name. - - Is ignored if I(broker_id) is specified. - type: str -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.common.modules - - amazon.aws.region.modules -""" - - -EXAMPLES = r""" -- name: get current broker settings by id - community.aws.mq_broker_info: - broker_id: "aws-mq-broker-id" - register: broker_info - -- name: get current broker settings by name setting all credential parameters explicitly - community.aws.mq_broker_info: - broker_name: "aws-mq-broker-name" - register: broker_info -""" - -RETURN = r""" -broker: - description: API response of describe_broker() converted to snake yaml. - type: dict - returned: success -""" - -try: - import botocore -except ImportError: - # handled by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule - - -def get_broker_id(conn, module): - try: - broker_name = module.params["broker_name"] - broker_id = None - response = conn.list_brokers(MaxResults=100) - for broker in response["BrokerSummaries"]: - if broker["BrokerName"] == broker_name: - broker_id = broker["BrokerId"] - break - return broker_id - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list broker brokers.") - - -def get_broker_info(conn, module, broker_id): - try: - return conn.describe_broker(BrokerId=broker_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if module.check_mode: - module.exit_json(broker={"broker_id": broker_id, "broker_name": "fakeName"}) - else: - module.fail_json_aws(e, msg="Couldn't get broker details.") - - -def main(): - argument_spec = dict(broker_id=dict(type="str"), broker_name=dict(type="str")) - required_one_of = ( - ( - "broker_name", - "broker_id", - ), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=required_one_of, - supports_check_mode=True, - ) - broker_id = module.params["broker_id"] - broker_name = module.params["broker_name"] - - connection = module.client("mq") - - try: - if not broker_id: - broker_id = get_broker_id(connection, module) - if not broker_id: - if module.check_mode: - module.exit_json( - broker={"broker_id": "fakeId", "broker_name": broker_name if broker_name else "fakeName"} - ) - result = get_broker_info(connection, module, broker_id) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - # - module.exit_json(broker=camel_dict_to_snake_dict(result, ignore_list=["Tags"])) - - -if __name__ == "__main__": - main() diff --git a/mq_user.py b/mq_user.py deleted file mode 100644 index 898212cbcba..00000000000 --- a/mq_user.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: mq_user -version_added: 6.0.0 -short_description: Manage users in existing Amazon MQ broker -description: - - Manage Amazon MQ users. - - Pending changes are taking into account for idempotency. -author: - - FCO (@fotto) -options: - broker_id: - description: - - The ID of the MQ broker to work on. - type: str - required: true - username: - description: - - The name of the user to create/update/delete. - type: str - required: true - state: - description: - - Create/Update vs Delete of user. - default: present - choices: [ 'present', 'absent' ] - type: str - console_access: - description: - - Whether the user can access the MQ Console. - - Defaults to C(false) on creation. - type: bool - groups: - description: - - Set group memberships for user. - - Defaults to C([]) on creation. - type: list - elements: str - password: - description: - - Set password for user. - - Defaults to a random password on creation. - - Ignored unless I(allow_pw_update=true). - type: str - allow_pw_update: - description: - - When I(allow_pw_update=true) and I(password) is set, the password - will always be updated for the user. - default: false - type: bool -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.common.modules - - amazon.aws.region.modules -""" - -EXAMPLES = r""" -- name: create/update user - set provided password if user doesn't exist, yet - community.aws.mq_user: - state: present - broker_id: "aws-mq-broker-id" - username: "sample_user1" - console_access: false - groups: [ "g1", "g2" ] - password: "plain-text-password" - -- name: allow console access and update group list - relying on default state - community.aws.mq_user: - broker_id: "aws-mq-broker-id" - username: "sample_user1" - region: "{{ aws_region }}" - console_access: true - groups: [ "g1", "g2", "g3" ] - -- name: remove user - setting all credentials explicitly - community.aws.mq_user: - state: absent - broker_id: "aws-mq-broker-id" - username: "other_user" -""" - -RETURN = r""" -user: - description: - - just echos the username - - "only present when state=present" - type: str - returned: success -""" - -import secrets - -try: - import botocore -except ImportError as ex: - # handled by AnsibleAWSModule - pass - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -CREATE_DEFAULTS = { - "console_access": False, - "groups": [], -} - - -def _group_change_required(user_response, requested_groups): - current_groups = [] - if "Groups" in user_response: - current_groups = user_response["Groups"] - elif "Pending" in user_response: - # to support automatic testing without broker reboot - current_groups = user_response["Pending"]["Groups"] - if len(current_groups) != len(requested_groups): - return True - if len(current_groups) != len(set(current_groups) & set(requested_groups)): - return True - # - return False - - -def _console_access_change_required(user_response, requested_boolean): - current_boolean = CREATE_DEFAULTS["console_access"] - if "ConsoleAccess" in user_response: - current_boolean = user_response["ConsoleAccess"] - elif "Pending" in user_response: - # to support automatic testing without broker reboot - current_boolean = user_response["Pending"]["ConsoleAccess"] - # - return current_boolean != requested_boolean - - -def generate_password(): - return secrets.token_hex(20) - - -# returns API response object -def _create_user(conn, module): - kwargs = {"BrokerId": module.params["broker_id"], "Username": module.params["username"]} - if "groups" in module.params and module.params["groups"] is not None: - kwargs["Groups"] = module.params["groups"] - else: - kwargs["Groups"] = CREATE_DEFAULTS["groups"] - if "password" in module.params and module.params["password"]: - kwargs["Password"] = module.params["password"] - else: - kwargs["Password"] = generate_password() - if "console_access" in module.params and module.params["console_access"] is not None: - kwargs["ConsoleAccess"] = module.params["console_access"] - else: - kwargs["ConsoleAccess"] = CREATE_DEFAULTS["console_access"] - try: - response = conn.create_user(**kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create user") - return response - - -# returns API response object -def _update_user(conn, module, kwargs): - try: - response = conn.update_user(**kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't update user") - return response - - -def get_matching_user(conn, module, broker_id, username): - try: - response = conn.describe_user(BrokerId=broker_id, Username=username) - except is_boto3_error_code("NotFoundException"): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get user details") - return response - - -def ensure_user_present(conn, module): - user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) - changed = False - - if user is None: - if not module.check_mode: - _response = _create_user(conn, module) - changed = True - else: - kwargs = {} - if "groups" in module.params and module.params["groups"] is not None: - if _group_change_required(user, module.params["groups"]): - kwargs["Groups"] = module.params["groups"] - if "console_access" in module.params and module.params["console_access"] is not None: - if _console_access_change_required(user, module.params["console_access"]): - kwargs["ConsoleAccess"] = module.params["console_access"] - if "password" in module.params and module.params["password"]: - if "allow_pw_update" in module.params and module.params["allow_pw_update"]: - kwargs["Password"] = module.params["password"] - if len(kwargs) == 0: - changed = False - else: - if not module.check_mode: - kwargs["BrokerId"] = module.params["broker_id"] - kwargs["Username"] = module.params["username"] - response = _update_user(conn, module, kwargs) - # - changed = True - # - user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) - - return {"changed": changed, "user": camel_dict_to_snake_dict(user, ignore_list=["Tags"])} - - -def ensure_user_absent(conn, module): - user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"]) - result = {"changed": False} - if user is None: - return result - # better support for testing - if "Pending" in user and "PendingChange" in user["Pending"] and user["Pending"]["PendingChange"] == "DELETE": - return result - - result = {"changed": True} - if module.check_mode: - return result - - try: - conn.delete_user(BrokerId=user["BrokerId"], Username=user["Username"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete user") - - return result - - -def main(): - argument_spec = dict( - broker_id=dict(required=True, type="str"), - username=dict(required=True, type="str"), - console_access=dict(required=False, type="bool"), - groups=dict(required=False, type="list", elements="str"), - password=dict(required=False, type="str", no_log=True), - allow_pw_update=dict(default=False, required=False, type="bool"), - state=dict(default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("mq") - - state = module.params.get("state") - - try: - if state == "present": - result = ensure_user_present(connection, module) - elif state == "absent": - result = ensure_user_absent(connection, module) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/mq_user_info.py b/mq_user_info.py deleted file mode 100644 index 8c63f829188..00000000000 --- a/mq_user_info.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: mq_user_info -version_added: 6.0.0 -short_description: List users of an Amazon MQ broker -description: - - List users for the specified broker ID. - - Pending creations and deletions can be skipped by options. -author: - - FCO (@fotto) -options: - broker_id: - description: - - The ID of the MQ broker to work on. - type: str - required: true - max_results: - description: - - The maximum number of results to return. - type: int - default: 100 - skip_pending_create: - description: - - Will skip pending creates from the result set. - type: bool - default: false - skip_pending_delete: - description: - - Will skip pending deletes from the result set. - type: bool - default: false - as_dict: - description: - - Convert result into lookup table by username. - type: bool - default: false - -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.common.modules - - amazon.aws.region.modules -""" - - -EXAMPLES = r""" -- name: get all users as list - relying on environment for API credentials - community.aws.mq_user_info: - broker_id: "aws-mq-broker-id" - max_results: 50 - register: result - -- name: get users as dict - explicitly specifying all credentials - community.aws.mq_user_info: - broker_id: "aws-mq-broker-id" - register: result - -- name: get list of users to decide which may need to be deleted - community.aws.mq_user_info: - broker_id: "aws-mq-broker-id" - skip_pending_delete: true - -- name: get list of users to decide which may need to be created - community.aws.mq_user_info: - broker_id: "aws-mq-broker-id" - skip_pending_create: true -""" - -RETURN = r""" -users: - type: dict - returned: success - description: - - dict key is username - - each entry is the record for a user as returned by API but converted to snake yaml -""" - -try: - import botocore -except ImportError as ex: - # handled by AnsibleAWSModule - pass - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - - -DEFAULTS = {"max_results": 100, "skip_pending_create": False, "skip_pending_delete": False, "as_dict": True} - - -def get_user_info(conn, module): - try: - response = conn.list_users(BrokerId=module.params["broker_id"], MaxResults=module.params["max_results"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if module.check_mode: - # return empty set for unknown broker in check mode - if DEFAULTS["as_dict"]: - return {} - return [] - module.fail_json_aws(e, msg="Failed to describe users") - # - if not module.params["skip_pending_create"] and not module.params["skip_pending_delete"]: - # we can simply return the sub-object from the response - records = response["Users"] - else: - records = [] - for record in response["Users"]: - if "PendingChange" in record: - if record["PendingChange"] == "CREATE" and module.params["skip_pending_create"]: - continue - if record["PendingChange"] == "DELETE" and module.params["skip_pending_delete"]: - continue - # - records.append(record) - # - if DEFAULTS["as_dict"]: - user_records = {} - for record in records: - user_records[record["Username"]] = record - # - return camel_dict_to_snake_dict(user_records, ignore_list=["Tags"]) - - return camel_dict_to_snake_dict(records, ignore_list=["Tags"]) - - -def main(): - argument_spec = dict( - broker_id=dict(required=True, type="str"), - max_results=dict(required=False, type="int", default=DEFAULTS["max_results"]), - skip_pending_create=dict(required=False, type="bool", default=DEFAULTS["skip_pending_create"]), - skip_pending_delete=dict(required=False, type="bool", default=DEFAULTS["skip_pending_delete"]), - as_dict=dict(required=False, type="bool", default=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("mq") - - try: - user_records = get_user_info(connection, module) - except botocore.exceptions.ClientError as e: - module.fail_json_aws(e) - - module.exit_json(users=user_records) - - -if __name__ == "__main__": - main() diff --git a/msk_cluster.py b/msk_cluster.py deleted file mode 100644 index aa0383294b2..00000000000 --- a/msk_cluster.py +++ /dev/null @@ -1,795 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: msk_cluster -short_description: Manage Amazon MSK clusters -version_added: "2.0.0" -description: - - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) clusters. - - Prior to release 5.0.0 this module was called C(community.aws.aws_msk_cluster). - The usage did not change. -author: - - Daniil Kupchenko (@oukooveu) -options: - state: - description: Create (C(present)) or delete (C(absent)) cluster. - choices: ['present', 'absent'] - type: str - default: 'present' - name: - description: The name of the cluster. - required: true - type: str - version: - description: - - The version of Apache Kafka. - - This version should exist in given configuration. - - This parameter is required when I(state=present). - type: str - configuration_arn: - description: - - ARN of the configuration to use. - - This parameter is required when I(state=present). - type: str - configuration_revision: - description: - - The revision of the configuration to use. - - This parameter is required when I(state=present). - type: int - nodes: - description: The number of broker nodes in the cluster. Should be greater or equal to two. - type: int - default: 3 - instance_type: - description: - - The type of Amazon EC2 instances to use for Kafka brokers. - choices: - - kafka.t3.small - - kafka.m5.large - - kafka.m5.xlarge - - kafka.m5.2xlarge - - kafka.m5.4xlarge - default: kafka.t3.small - type: str - ebs_volume_size: - description: The size in GiB of the EBS volume for the data drive on each broker node. - type: int - default: 100 - subnets: - description: - - The list of subnets to connect to in the client virtual private cloud (VPC). - AWS creates elastic network interfaces inside these subnets. Client applications use - elastic network interfaces to produce and consume data. - - Client subnets can't be in Availability Zone us-east-1e. - - This parameter is required when I(state=present). - type: list - elements: str - security_groups: - description: - - The AWS security groups to associate with the elastic network interfaces in order to specify - who can connect to and communicate with the Amazon MSK cluster. - If you don't specify a security group, Amazon MSK uses the default security group associated with the VPC. - type: list - elements: str - encryption: - description: - - Includes all encryption-related information. - - Effective only for new cluster and can not be updated. - type: dict - suboptions: - kms_key_id: - description: - - The ARN of the AWS KMS key for encrypting data at rest. If you don't specify a KMS key, MSK creates one for you and uses it. - default: Null - type: str - in_transit: - description: The details for encryption in transit. - type: dict - suboptions: - in_cluster: - description: - - When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted. - When set to false, the communication happens in plaintext. - type: bool - default: True - client_broker: - description: - - Indicates the encryption setting for data in transit between clients and brokers. The following are the possible values. - TLS means that client-broker communication is enabled with TLS only. - TLS_PLAINTEXT means that client-broker communication is enabled for both TLS-encrypted, as well as plaintext data. - PLAINTEXT means that client-broker communication is enabled in plaintext only. - choices: - - TLS - - TLS_PLAINTEXT - - PLAINTEXT - type: str - default: TLS - authentication: - description: - - Includes all client authentication related information. - - Effective only for new cluster and can not be updated. - type: dict - suboptions: - tls_ca_arn: - description: List of ACM Certificate Authority ARNs. - type: list - elements: str - sasl_scram: - description: SASL/SCRAM authentication is enabled or not. - type: bool - sasl_iam: - version_added: 5.5.0 - description: IAM authentication is enabled or not. - type: bool - unauthenticated: - version_added: 5.5.0 - description: Option to explicitly turn on or off authentication - type: bool - default: True - enhanced_monitoring: - description: Specifies the level of monitoring for the MSK cluster. - choices: - - DEFAULT - - PER_BROKER - - PER_TOPIC_PER_BROKER - - PER_TOPIC_PER_PARTITION - default: DEFAULT - type: str - open_monitoring: - description: The settings for open monitoring. - type: dict - suboptions: - jmx_exporter: - description: Indicates whether you want to enable or disable the JMX Exporter. - type: bool - default: False - node_exporter: - description: Indicates whether you want to enable or disable the Node Exporter. - type: bool - default: False - logging: - description: Logging configuration. - type: dict - suboptions: - cloudwatch: - description: Details of the CloudWatch Logs destination for broker logs. - type: dict - suboptions: - enabled: - description: Specifies whether broker logs get sent to the specified CloudWatch Logs destination. - type: bool - default: False - log_group: - description: The CloudWatch log group that is the destination for broker logs. - type: str - required: False - firehose: - description: Details of the Kinesis Data Firehose delivery stream that is the destination for broker logs. - type: dict - suboptions: - enabled: - description: Specifies whether broker logs get send to the specified Kinesis Data Firehose delivery stream. - type: bool - default: False - delivery_stream: - description: The Kinesis Data Firehose delivery stream that is the destination for broker logs. - type: str - required: False - s3: - description: Details of the Amazon S3 destination for broker logs. - type: dict - suboptions: - enabled: - description: Specifies whether broker logs get sent to the specified Amazon S3 destination. - type: bool - default: False - bucket: - description: The name of the S3 bucket that is the destination for broker logs. - type: str - required: False - prefix: - description: The S3 prefix that is the destination for broker logs. - type: str - required: False - wait: - description: Whether to wait for the cluster to be available or deleted. - type: bool - default: false - wait_timeout: - description: How many seconds to wait. Cluster creation can take up to 20-30 minutes. - type: int - default: 3600 -notes: - - All operations are time consuming, for example create takes 20-30 minutes, - update kafka version -- more than one hour, update configuration -- 10-15 minutes; - - Cluster's brokers get evenly distributed over a number of availability zones - that's equal to the number of subnets. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 - - amazon.aws.tags -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- community.aws.msk_cluster: - name: kafka-cluster - state: present - version: 2.6.1 - nodes: 6 - ebs_volume_size: "{{ aws_msk_options.ebs_volume_size }}" - subnets: - - subnet-e3b48ce7c25861eeb - - subnet-2990c8b25b07ddd43 - - subnet-d9fbeaf46c54bfab6 - wait: true - wait_timeout: 1800 - configuration_arn: arn:aws:kafka:us-east-1:123456789012:configuration/kafka-cluster-configuration/aaaaaaaa-bbbb-4444-3333-ccccccccc-1 - configuration_revision: 1 - -- community.aws.msk_cluster: - name: kafka-cluster - state: absent -""" - -RETURN = r""" -# These are examples of possible return values, and in general should use other names for return values. - -bootstrap_broker_string: - description: A list of brokers that a client application can use to bootstrap. - type: complex - contains: - plain: - description: A string containing one or more hostname:port pairs. - type: str - tls: - description: A string containing one or more DNS names (or IP) and TLS port pairs. - type: str - returned: I(state=present) and cluster state is I(ACTIVE) -cluster_info: - description: Description of the MSK cluster. - type: dict - returned: I(state=present) -response: - description: The response from actual API call. - type: dict - returned: always - sample: {} -""" - -import time - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -@AWSRetry.jittered_backoff(retries=5, delay=5) -def list_clusters_with_backoff(client, cluster_name): - paginator = client.get_paginator("list_clusters") - return paginator.paginate(ClusterNameFilter=cluster_name).build_full_result() - - -@AWSRetry.jittered_backoff(retries=5, delay=5) -def list_nodes_with_backoff(client, cluster_arn): - paginator = client.get_paginator("list_nodes") - return paginator.paginate(ClusterArn=cluster_arn).build_full_result() - - -def find_cluster_by_name(client, module, cluster_name): - try: - cluster_list = list_clusters_with_backoff(client, cluster_name).get("ClusterInfoList", []) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "Failed to find kafka cluster by name") - if cluster_list: - if len(cluster_list) != 1: - module.fail_json(msg=f"Found more than one cluster with name '{cluster_name}'") - return cluster_list[0] - return {} - - -def get_cluster_state(client, module, arn): - try: - response = client.describe_cluster(ClusterArn=arn, aws_retry=True) - except client.exceptions.NotFoundException: - return "DELETED" - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "Failed to get kafka cluster state") - return response["ClusterInfo"]["State"] - - -def get_cluster_version(client, module, arn): - try: - response = client.describe_cluster(ClusterArn=arn, aws_retry=True) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "Failed to get kafka cluster version") - return response["ClusterInfo"]["CurrentVersion"] - - -def wait_for_cluster_state(client, module, arn, state="ACTIVE"): - # As of 2021-06 boto3 doesn't offer any built in waiters - start = time.time() - timeout = int(module.params.get("wait_timeout")) - check_interval = 60 - while True: - current_state = get_cluster_state(client, module, arn) - if current_state == state: - return - if time.time() - start > timeout: - module.fail_json(msg=f"Timeout waiting for cluster {current_state} (desired state is '{state}')") - time.sleep(check_interval) - - -def prepare_create_options(module): - """ - Return data structure for cluster create operation - """ - - c_params = { - "ClusterName": module.params["name"], - "KafkaVersion": module.params["version"], - "ConfigurationInfo": { - "Arn": module.params["configuration_arn"], - "Revision": module.params["configuration_revision"], - }, - "NumberOfBrokerNodes": module.params["nodes"], - "BrokerNodeGroupInfo": { - "ClientSubnets": module.params["subnets"], - "InstanceType": module.params["instance_type"], - }, - } - - if module.params["security_groups"] and len(module.params["security_groups"]) != 0: - c_params["BrokerNodeGroupInfo"]["SecurityGroups"] = module.params.get("security_groups") - - if module.params["ebs_volume_size"]: - c_params["BrokerNodeGroupInfo"]["StorageInfo"] = { - "EbsStorageInfo": {"VolumeSize": module.params.get("ebs_volume_size")} - } - - if module.params["encryption"]: - c_params["EncryptionInfo"] = {} - if module.params["encryption"].get("kms_key_id"): - c_params["EncryptionInfo"]["EncryptionAtRest"] = { - "DataVolumeKMSKeyId": module.params["encryption"]["kms_key_id"] - } - c_params["EncryptionInfo"]["EncryptionInTransit"] = { - "ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"), - "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True), - } - - if module.params["authentication"]: - c_params["ClientAuthentication"] = {} - if module.params["authentication"].get("sasl_scram") or module.params["authentication"].get("sasl_iam"): - sasl = {} - if module.params["authentication"].get("sasl_scram"): - sasl["Scram"] = {"Enabled": True} - if module.params["authentication"].get("sasl_iam"): - sasl["Iam"] = {"Enabled": True} - c_params["ClientAuthentication"]["Sasl"] = sasl - if module.params["authentication"].get("tls_ca_arn"): - c_params["ClientAuthentication"]["Tls"] = { - "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"], - "Enabled": True, - } - if module.params["authentication"].get("unauthenticated"): - c_params["ClientAuthentication"] = { - "Unauthenticated": {"Enabled": True}, - } - - c_params.update(prepare_enhanced_monitoring_options(module)) - c_params.update(prepare_open_monitoring_options(module)) - c_params.update(prepare_logging_options(module)) - - return c_params - - -def prepare_enhanced_monitoring_options(module): - m_params = {} - m_params["EnhancedMonitoring"] = module.params["enhanced_monitoring"] or "DEFAULT" - return m_params - - -def prepare_open_monitoring_options(module): - m_params = {} - open_monitoring = module.params["open_monitoring"] or {} - m_params["OpenMonitoring"] = { - "Prometheus": { - "JmxExporter": {"EnabledInBroker": open_monitoring.get("jmx_exporter", False)}, - "NodeExporter": {"EnabledInBroker": open_monitoring.get("node_exporter", False)}, - } - } - return m_params - - -def prepare_logging_options(module): - l_params = {} - logging = module.params["logging"] or {} - if logging.get("cloudwatch"): - l_params["CloudWatchLogs"] = { - "Enabled": module.params["logging"]["cloudwatch"].get("enabled"), - "LogGroup": module.params["logging"]["cloudwatch"].get("log_group"), - } - else: - l_params["CloudWatchLogs"] = {"Enabled": False} - if logging.get("firehose"): - l_params["Firehose"] = { - "Enabled": module.params["logging"]["firehose"].get("enabled"), - "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream"), - } - else: - l_params["Firehose"] = {"Enabled": False} - if logging.get("s3"): - l_params["S3"] = { - "Enabled": module.params["logging"]["s3"].get("enabled"), - "Bucket": module.params["logging"]["s3"].get("bucket"), - "Prefix": module.params["logging"]["s3"].get("prefix"), - } - else: - l_params["S3"] = {"Enabled": False} - return {"LoggingInfo": {"BrokerLogs": l_params}} - - -def create_or_update_cluster(client, module): - """ - Create new or update existing cluster - """ - - changed = False - response = {} - - cluster = find_cluster_by_name(client, module, module.params["name"]) - - if not cluster: - changed = True - - if module.check_mode: - return True, {} - - create_params = prepare_create_options(module) - - try: - response = client.create_cluster(aws_retry=True, **create_params) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "Failed to create kafka cluster") - - if module.params.get("wait"): - wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE") - - else: - response["ClusterArn"] = cluster["ClusterArn"] - response["changes"] = {} - - # prepare available update methods definitions with current/target values and options - msk_cluster_changes = { - "broker_count": { - "current_value": cluster["NumberOfBrokerNodes"], - "target_value": module.params.get("nodes"), - "update_params": {"TargetNumberOfBrokerNodes": module.params.get("nodes")}, - }, - "broker_storage": { - "current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"], - "target_value": module.params.get("ebs_volume_size"), - "update_params": { - "TargetBrokerEBSVolumeInfo": [ - {"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")} - ] - }, - }, - "broker_type": { - "current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"], - "target_value": module.params.get("instance_type"), - "update_params": {"TargetInstanceType": module.params.get("instance_type")}, - }, - "cluster_configuration": { - "current_value": { - "arn": cluster["CurrentBrokerSoftwareInfo"]["ConfigurationArn"], - "revision": cluster["CurrentBrokerSoftwareInfo"]["ConfigurationRevision"], - }, - "target_value": { - "arn": module.params.get("configuration_arn"), - "revision": module.params.get("configuration_revision"), - }, - "update_params": { - "ConfigurationInfo": { - "Arn": module.params.get("configuration_arn"), - "Revision": module.params.get("configuration_revision"), - } - }, - }, - "cluster_kafka_version": { - "current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"], - "target_value": module.params.get("version"), - "update_params": {"TargetKafkaVersion": module.params.get("version")}, - }, - "enhanced_monitoring": { - "current_value": cluster["EnhancedMonitoring"], - "target_value": module.params.get("enhanced_monitoring"), - "update_method": "update_monitoring", - "update_params": prepare_enhanced_monitoring_options(module), - }, - "open_monitoring": { - "current_value": {"OpenMonitoring": cluster["OpenMonitoring"]}, - "target_value": prepare_open_monitoring_options(module), - "update_method": "update_monitoring", - "update_params": prepare_open_monitoring_options(module), - }, - "logging": { - "current_value": {"LoggingInfo": cluster["LoggingInfo"]}, - "target_value": prepare_logging_options(module), - "update_method": "update_monitoring", - "update_params": prepare_logging_options(module), - }, - } - - for method, options in msk_cluster_changes.items(): - if "botocore_version" in options: - if not module.botocore_at_least(options["botocore_version"]): - continue - - try: - update_method = getattr(client, options.get("update_method", "update_" + method)) - except AttributeError as e: - module.fail_json_aws(e, f"There is no update method 'update_{method}'") - - if options["current_value"] != options["target_value"]: - changed = True - if module.check_mode: - return True, {} - - # need to get cluster version and check for the state because - # there can be several updates requested but only one in time can be performed - version = get_cluster_version(client, module, cluster["ClusterArn"]) - state = get_cluster_state(client, module, cluster["ClusterArn"]) - if state != "ACTIVE": - if module.params["wait"]: - wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") - else: - module.fail_json( - msg=f"Cluster can be updated only in active state, current state is '{state}'. check cluster state or use wait option" - ) - try: - response["changes"][method] = update_method( - ClusterArn=cluster["ClusterArn"], CurrentVersion=version, **options["update_params"] - ) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, f"Failed to update cluster via 'update_{method}'") - - if module.params["wait"]: - wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") - - changed |= update_cluster_tags(client, module, response["ClusterArn"]) - - return changed, response - - -def update_cluster_tags(client, module, arn): - new_tags = module.params.get("tags") - if new_tags is None: - return False - purge_tags = module.params.get("purge_tags") - - try: - existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to retrieve tags for cluster '{arn}'") - - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) - - if not module.check_mode: - try: - if tags_to_remove: - client.untag_resource(ResourceArn=arn, TagKeys=tags_to_remove, aws_retry=True) - if tags_to_add: - client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to set tags for cluster '{arn}'") - - changed = bool(tags_to_add) or bool(tags_to_remove) - return changed - - -def delete_cluster(client, module): - cluster = find_cluster_by_name(client, module, module.params["name"]) - - if module.check_mode: - if cluster: - return True, cluster - else: - return False, {} - - if not cluster: - return False, {} - - try: - response = client.delete_cluster( - ClusterArn=cluster["ClusterArn"], - CurrentVersion=cluster["CurrentVersion"], - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, "Failed to delete kafka cluster") - - if module.params["wait"]: - wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="DELETED") - - response["bootstrap_broker_string"] = {} - - return True, response - - -def main(): - module_args = dict( - name=dict(type="str", required=True), - state=dict(type="str", choices=["present", "absent"], default="present"), - version=dict(type="str"), - configuration_arn=dict(type="str"), - configuration_revision=dict(type="int"), - nodes=dict(type="int", default=3), - instance_type=dict( - choices=[ - "kafka.t3.small", - "kafka.m5.large", - "kafka.m5.xlarge", - "kafka.m5.2xlarge", - "kafka.m5.4xlarge", - ], - default="kafka.t3.small", - ), - ebs_volume_size=dict(type="int", default=100), - subnets=dict(type="list", elements="str"), - security_groups=dict(type="list", elements="str", required=False), - encryption=dict( - type="dict", - options=dict( - kms_key_id=dict(type="str", required=False), - in_transit=dict( - type="dict", - options=dict( - in_cluster=dict(type="bool", default=True), - client_broker=dict(choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], default="TLS"), - ), - ), - ), - ), - authentication=dict( - type="dict", - options=dict( - tls_ca_arn=dict(type="list", elements="str", required=False), - sasl_scram=dict(type="bool", required=False), - sasl_iam=dict(type="bool", required=False), - unauthenticated=dict(type="bool", default=True, required=False), - ), - ), - enhanced_monitoring=dict( - choices=[ - "DEFAULT", - "PER_BROKER", - "PER_TOPIC_PER_BROKER", - "PER_TOPIC_PER_PARTITION", - ], - default="DEFAULT", - required=False, - ), - open_monitoring=dict( - type="dict", - options=dict( - jmx_exporter=dict(type="bool", default=False), - node_exporter=dict(type="bool", default=False), - ), - ), - logging=dict( - type="dict", - options=dict( - cloudwatch=dict( - type="dict", - options=dict( - enabled=dict(type="bool", default=False), - log_group=dict(type="str", required=False), - ), - ), - firehose=dict( - type="dict", - options=dict( - enabled=dict(type="bool", default=False), - delivery_stream=dict(type="str", required=False), - ), - ), - s3=dict( - type="dict", - options=dict( - enabled=dict(type="bool", default=False), - bucket=dict(type="str", required=False), - prefix=dict(type="str", required=False), - ), - ), - ), - ), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=3600), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - ) - - module = AnsibleAWSModule( - argument_spec=module_args, - required_if=[["state", "present", ["version", "configuration_arn", "configuration_revision", "subnets"]]], - supports_check_mode=True, - ) - - client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) - - if module.params["state"] == "present": - if len(module.params["subnets"]) < 2: - module.fail_json(msg="At least two client subnets should be provided") - if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0: - module.fail_json( - msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" - ) - if len(module.params["name"]) > 64: - module.fail_json( - module.fail_json(msg=f"Cluster name \"{module.params['name']}\" exceeds 64 character limit") - ) - changed, response = create_or_update_cluster(client, module) - elif module.params["state"] == "absent": - changed, response = delete_cluster(client, module) - - cluster_info = {} - bootstrap_broker_string = {} - if response.get("ClusterArn") and module.params["state"] == "present": - try: - cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)["ClusterInfo"] - if cluster_info.get("State") == "ACTIVE": - brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True) - if brokers.get("BootstrapBrokerString"): - bootstrap_broker_string["plain"] = brokers["BootstrapBrokerString"] - if brokers.get("BootstrapBrokerStringTls"): - bootstrap_broker_string["tls"] = brokers["BootstrapBrokerStringTls"] - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws( - e, - f"Can not obtain information about cluster {response['ClusterArn']}", - ) - - module.exit_json( - changed=changed, - bootstrap_broker_string=bootstrap_broker_string, - cluster_info=camel_dict_to_snake_dict(cluster_info), - response=camel_dict_to_snake_dict(response), - ) - - -if __name__ == "__main__": - main() diff --git a/msk_config.py b/msk_config.py deleted file mode 100644 index 2469f95984b..00000000000 --- a/msk_config.py +++ /dev/null @@ -1,306 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: msk_config -short_description: Manage Amazon MSK cluster configurations -version_added: "2.0.0" -description: - - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations. - - Prior to release 5.0.0 this module was called C(community.aws.aws_msk_config). - The usage did not change. -author: - - Daniil Kupchenko (@oukooveu) -options: - state: - description: Create (C(present)) or delete (C(absent)) cluster configuration. - choices: ['present', 'absent'] - default: 'present' - type: str - name: - description: The name of the configuration. - required: true - type: str - description: - description: The description of the configuration. - type: str - default: '' - config: - description: Contents of the server.properties file. - type: dict - default: {} - aliases: ['configuration'] - kafka_versions: - description: - - The versions of Apache Kafka with which you can use this MSK configuration. - - Required when I(state=present). - type: list - elements: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- community.aws.msk_config: - name: kafka-cluster-configuration - state: present - kafka_versions: - - 2.6.0 - - 2.6.1 - config: - auto.create.topics.enable: false - num.partitions: 1 - default.replication.factor: 3 - zookeeper.session.timeout.ms: 18000 - -- community.aws.msk_config: - name: kafka-cluster-configuration - state: absent -""" - -RETURN = r""" -# These are examples of possible return values, and in general should use other names for return values. - -arn: - description: The Amazon Resource Name (ARN) of the configuration. - type: str - returned: I(state=present) - sample: "arn:aws:kafka:::configuration//" -revision: - description: The revision number. - type: int - returned: I(state=present) - sample: 1 -server_properties: - description: Contents of the server.properties file. - type: str - returned: I(state=present) - sample: "default.replication.factor=3\nnum.io.threads=8\nzookeeper.session.timeout.ms=18000" -response: - description: The response from actual API call. - type: dict - returned: always - sample: {} -""" - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def dict_to_prop(d): - """convert dictionary to multi-line properties""" - if len(d) == 0: - return "" - return "\n".join(f"{k}={v}" for k, v in d.items()) - - -def prop_to_dict(p): - """convert properties to dictionary""" - if len(p) == 0: - return {} - r_dict = {} - for s in p.decode().split("\n"): - kv = s.split("=") - r_dict[kv[0].strip()] = kv[1].strip() - return r_dict - # python >= 2.7 is required: - # return { - # k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n")) - # } - - -@AWSRetry.jittered_backoff(retries=5, delay=5) -def get_configurations_with_backoff(client): - paginator = client.get_paginator("list_configurations") - return paginator.paginate().build_full_result() - - -def find_active_config(client, module): - """ - looking for configuration by name - """ - - name = module.params["name"] - - try: - all_configs = get_configurations_with_backoff(client)["Configurations"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="failed to obtain kafka configurations") - - active_configs = list(item for item in all_configs if item["Name"] == name and item["State"] == "ACTIVE") - - if active_configs: - if len(active_configs) == 1: - return active_configs[0] - else: - module.fail_json_aws(msg=f"found more than one active config with name '{name}'") - - return None - - -def get_configuration_revision(client, module, arn, revision): - try: - return client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "failed to describe kafka configuration revision") - - -def is_configuration_changed(module, current): - """ - compare configuration's description and properties - python 2.7+ version: - prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} - """ - prop_module = {} - for k, v in module.params.get("config").items(): - prop_module[str(k)] = str(v) - if prop_to_dict(current.get("ServerProperties", "")) == prop_module: - if current.get("Description", "") == module.params.get("description"): - return False - return True - - -def create_config(client, module): - """create new or update existing configuration""" - - config = find_active_config(client, module) - - # create new configuration - if not config: - if module.check_mode: - return True, {} - - try: - response = client.create_configuration( - Name=module.params.get("name"), - Description=module.params.get("description"), - KafkaVersions=module.params.get("kafka_versions"), - ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True, - ) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "failed to create kafka configuration") - - # update existing configuration (creates new revision) - else: - # it's required because 'config' doesn't contain 'ServerProperties' - response = get_configuration_revision( - client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"] - ) - - if not is_configuration_changed(module, response): - return False, response - - if module.check_mode: - return True, {} - - try: - response = client.update_configuration( - Arn=config["Arn"], - Description=module.params.get("description"), - ServerProperties=dict_to_prop(module.params.get("config")).encode(), - aws_retry=True, - ) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "failed to update kafka configuration") - - arn = response["Arn"] - revision = response["LatestRevision"]["Revision"] - - result = get_configuration_revision(client, module, arn=arn, revision=revision) - - return True, result - - -def delete_config(client, module): - """delete configuration""" - - config = find_active_config(client, module) - - if module.check_mode: - if config: - return True, config - else: - return False, {} - - if config: - try: - response = client.delete_configuration(Arn=config["Arn"], aws_retry=True) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, "failed to delete the kafka configuration") - return True, response - - return False, {} - - -def main(): - module_args = dict( - name=dict(type="str", required=True), - description=dict(type="str", default=""), - state=dict(choices=["present", "absent"], default="present"), - config=dict(type="dict", aliases=["configuration"], default={}), - kafka_versions=dict(type="list", elements="str"), - ) - - module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True) - - client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) - - if module.params["state"] == "present": - changed, response = create_config(client, module) - - elif module.params["state"] == "absent": - changed, response = delete_config(client, module) - - # return some useless staff in check mode if configuration doesn't exists - # can be useful when these options are referenced by other modules during check mode run - if module.check_mode and not response.get("Arn"): - account_id, partition = get_aws_account_info(module) - arn = f"arn:{partition}:kafka:{module.region}:{account_id}:configuration/{module.params['name']}/id" - revision = 1 - server_properties = "" - else: - arn = response.get("Arn") - revision = response.get("Revision") - server_properties = response.get("ServerProperties", "") - - module.exit_json( - changed=changed, - arn=arn, - revision=revision, - server_properties=server_properties, - response=camel_dict_to_snake_dict(response), - ) - - -if __name__ == "__main__": - main() diff --git a/networkfirewall.py b/networkfirewall.py deleted file mode 100644 index 2cab7e26dfc..00000000000 --- a/networkfirewall.py +++ /dev/null @@ -1,348 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: networkfirewall -short_description: manage AWS Network Firewall firewalls -version_added: 4.0.0 -description: - - A module for creating, updating and deleting AWS Network Firewall firewalls. -options: - arn: - description: - - The ARN of the firewall. - - Exactly one of I(arn) or I(name) must be provided. - required: false - type: str - aliases: ['firewall_arn'] - name: - description: - - The name of the firewall. - - Cannot be updated after creation. - - Exactly one of I(arn) or I(name) must be provided. - required: false - type: str - aliases: ['firewall_name'] - state: - description: - - Create or remove the firewall. - required: false - choices: ['present', 'absent'] - default: 'present' - type: str - description: - description: - - A description for the firewall. - required: false - type: str - delete_protection: - description: - - When I(delete_protection=True), the firewall is protected from deletion. - - Defaults to C(false) when not provided on creation. - type: bool - required: false - policy_change_protection: - description: - - When I(policy_change_protection=True), the firewall is protected from - changes to which policy is attached to the firewall. - - Defaults to C(false) when not provided on creation. - type: bool - required: false - aliases: ['firewall_policy_change_protection'] - subnet_change_protection: - description: - - When I(subnet_change_protection=True), the firewall is protected from - changes to which subnets is attached to the firewall. - - Defaults to C(false) when not provided on creation. - type: bool - required: false - wait: - description: - - On creation, whether to wait for the firewall to reach the C(READY) - state. - - On deletion, whether to wait for the firewall to reach the C(DELETED) - state. - - On update, whether to wait for the firewall to reach the C(IN_SYNC) - configuration synchronization state. - type: bool - required: false - default: true - wait_timeout: - description: - - Maximum time, in seconds, to wait for the firewall to reach the - expected state. - - Defaults to 600 seconds. - type: int - required: false - subnets: - description: - - The ID of the subnets to which the firewall will be associated. - - Required when creating a new firewall. - type: list - elements: str - required: false - purge_subnets: - description: - - If I(purge_subnets=true), existing subnets will be removed from the - firewall as necessary to match exactly what is defined by I(subnets). - type: bool - required: false - default: true - policy: - description: - - The ARN of the Network Firewall policy to use for the firewall. - - Required when creating a new firewall. - type: str - required: false - aliases: ['firewall_policy_arn'] - -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create an AWS Network Firewall -- community.aws.networkfirewall: - name: 'ExampleFirewall' - state: present - policy: 'ExamplePolicy' - subnets: - - 'subnet-123456789abcdef01' - -# Create an AWS Network Firewall with various options, don't wait for creation -# to finish. -- community.aws.networkfirewall: - name: 'ExampleFirewall' - state: present - delete_protection: True - description: "An example Description" - policy: 'ExamplePolicy' - policy_change_protection: True - subnets: - - 'subnet-123456789abcdef01' - - 'subnet-abcdef0123456789a' - subnet_change_protection: True - tags: - ExampleTag: Example Value - another_tag: another_example - wait: false - - -# Delete an AWS Network Firewall -- community.aws.networkfirewall: - state: absent - name: 'ExampleFirewall' -""" - -RETURN = r""" -firewall: - description: The full details of the firewall - returned: success - type: dict - contains: - firewall: - description: The details of the firewall - type: dict - returned: success - contains: - delete_protection: - description: A flag indicating whether it is possible to delete the firewall. - type: str - returned: success - example: true - description: - description: A description of the firewall. - type: str - returned: success - example: "Description" - firewall_arn: - description: The ARN of the firewall. - type: str - returned: success - example: "arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall" - firewall_id: - description: A unique ID for the firewall. - type: str - returned: success - example: "12345678-abcd-1234-abcd-123456789abc" - firewall_name: - description: The name of the firewall. - type: str - returned: success - example: "ExampleFirewall" - firewall_policy_arn: - description: The ARN of the firewall policy used by the firewall. - type: str - returned: success - example: "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy" - firewall_policy_change_protection: - description: - - A flag indicating whether it is possible to change which firewall - policy is used by the firewall. - type: bool - returned: success - example: false - subnet_change_protection: - description: - - A flag indicating whether it is possible to change which subnets - the firewall endpoints are in. - type: bool - returned: success - example: true - subnets: - description: A list of the subnets the firewall endpoints are in. - type: list - elements: str - example: ["subnet-12345678", "subnet-87654321"] - subnet_mappings: - description: A list representing the subnets the firewall endpoints are in. - type: list - elements: dict - contains: - subnet_id: - description: The ID of the subnet. - type: str - returned: success - example: "subnet-12345678" - tags: - description: The tags associated with the firewall. - type: dict - returned: success - example: '{"SomeTag": "SomeValue"}' - vpc_id: - description: The ID of the VPC that the firewall is used by. - type: str - returned: success - example: "vpc-0123456789abcdef0" - firewall_metadata: - description: Metadata about the firewall - type: dict - returned: success - contains: - configuration_sync_state_summary: - description: - - A short summary of the synchronization status of the - policy and rule groups. - type: str - returned: success - example: "IN_SYNC" - status: - description: - - A short summary of the status of the firewall endpoints. - type: str - returned: success - example: "READY" - sync_states: - description: - - A description, broken down by availability zone, of the status - of the firewall endpoints as well as the synchronization status - of the policies and rule groups. - type: dict - returned: success - example: - { - "us-east-1a": { - "attachment": { - "endpoint_id": "vpce-123456789abcdef01", - "status": "READY", - "subnet_id": "subnet-12345678" - }, - "config": { - "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Ansible-Example": { - "sync_status": "IN_SYNC", - "update_token": "abcdef01-0000-0000-0000-123456789abc" - }, - "arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleDomainList": { - "sync_status": "IN_SYNC", - "update_token": "12345678-0000-0000-0000-abcdef012345" - } - } - } - } -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager - - -def main(): - argument_spec = dict( - name=dict(type="str", required=False, aliases=["firewall_name"]), - arn=dict(type="str", required=False, aliases=["firewall_arn"]), - state=dict(type="str", required=False, default="present", choices=["present", "absent"]), - description=dict(type="str", required=False), - tags=dict(type="dict", required=False, aliases=["resource_tags"]), - purge_tags=dict(type="bool", required=False, default=True), - wait=dict(type="bool", required=False, default=True), - wait_timeout=dict(type="int", required=False), - subnet_change_protection=dict(type="bool", required=False), - policy_change_protection=dict(type="bool", required=False, aliases=["firewall_policy_change_protection"]), - delete_protection=dict(type="bool", required=False), - subnets=dict(type="list", elements="str", required=False), - purge_subnets=dict(type="bool", required=False, default=True), - policy=dict(type="str", required=False, aliases=["firewall_policy_arn"]), - ) - - mutually_exclusive = [ - ["arn", "name"], - ] - required_one_of = [ - ["arn", "name"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - ) - - arn = module.params.get("arn") - name = module.params.get("name") - state = module.params.get("state") - - manager = NetworkFirewallManager(module, name=name, arn=arn) - manager.set_wait(module.params.get("wait", None)) - manager.set_wait_timeout(module.params.get("wait_timeout", None)) - - if state == "absent": - manager.set_delete_protection(module.params.get("delete_protection", None)) - manager.delete() - else: - if not manager.original_resource: - if not module.params.get("subnets", None): - module.fail_json("The subnets parameter must be provided on creation.") - if not module.params.get("policy", None): - module.fail_json("The policy parameter must be provided on creation.") - manager.set_description(module.params.get("description", None)) - manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) - manager.set_subnet_change_protection(module.params.get("subnet_change_protection", None)) - manager.set_policy_change_protection(module.params.get("policy_change_protection", None)) - manager.set_delete_protection(module.params.get("delete_protection", None)) - manager.set_subnets(module.params.get("subnets", None), module.params.get("purge_subnets", None)) - manager.set_policy(module.params.get("policy", None)) - manager.flush_changes() - - results = dict( - changed=manager.changed, - firewall=manager.updated_resource, - ) - if manager.changed: - diff = dict( - before=manager.original_resource, - after=manager.updated_resource, - ) - results["diff"] = diff - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/networkfirewall_info.py b/networkfirewall_info.py deleted file mode 100644 index 262a31067b8..00000000000 --- a/networkfirewall_info.py +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: networkfirewall_info -short_description: describe AWS Network Firewall firewalls -version_added: 4.0.0 -description: - - A module for describing AWS Network Firewall firewalls. -options: - arn: - description: - - The ARN of the Network Firewall. - - Mutually exclusive with I(name) and I(vpc_ids). - required: false - type: str - name: - description: - - The name of the Network Firewall. - - Mutually exclusive with I(arn) and I(vpc_ids). - required: false - type: str - vpc_ids: - description: - - A List of VPCs to retrieve the firewalls for. - - Mutually exclusive with I(name) and I(arn). - required: false - type: list - elements: str - aliases: ['vpcs', 'vpc_id'] - -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -# Describe all firewalls in an account -- community.aws.networkfirewall_info: {} - -# Describe a firewall by ARN -- community.aws.networkfirewall_info: - arn: arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall - -# Describe a firewall by name -- community.aws.networkfirewall_info: - name: ExampleFirewall -""" - -RETURN = r""" -firewall_list: - description: A list of ARNs of the matching firewalls. - type: list - elements: str - returned: When a firewall name isn't specified - example: ['arn:aws:network-firewall:us-east-1:123456789012:firewall/Example1', - 'arn:aws:network-firewall:us-east-1:123456789012:firewall/Example2'] - -firewalls: - description: The details of the firewalls - returned: success - type: list - elements: dict - contains: - firewall: - description: The details of the firewall - type: dict - returned: success - contains: - delete_protection: - description: A flag indicating whether it is possible to delete the firewall. - type: str - returned: success - example: true - description: - description: A description of the firewall. - type: str - returned: success - example: "Description" - firewall_arn: - description: The ARN of the firewall. - type: str - returned: success - example: "arn:aws:network-firewall:us-east-1:123456789012:firewall/ExampleFirewall" - firewall_id: - description: A unique ID for the firewall. - type: str - returned: success - example: "12345678-abcd-1234-abcd-123456789abc" - firewall_name: - description: The name of the firewall. - type: str - returned: success - example: "ExampleFirewall" - firewall_policy_arn: - description: The ARN of the firewall policy used by the firewall. - type: str - returned: success - example: "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy" - firewall_policy_change_protection: - description: - - A flag indicating whether it is possible to change which firewall - policy is used by the firewall. - type: bool - returned: success - example: false - subnet_change_protection: - description: - - A flag indicating whether it is possible to change which subnets - the firewall endpoints are in. - type: bool - returned: success - example: true - subnet_mappings: - description: A list of the subnets the firewall endpoints are in. - type: list - elements: dict - contains: - subnet_id: - description: The ID of the subnet. - type: str - returned: success - example: "subnet-12345678" - tags: - description: The tags associated with the firewall. - type: dict - returned: success - example: '{"SomeTag": "SomeValue"}' - vpc_id: - description: The ID of the VPC that the firewall is used by. - type: str - returned: success - example: "vpc-0123456789abcdef0" - firewall_metadata: - description: Metadata about the firewall - type: dict - returned: success - contains: - configuration_sync_state_summary: - description: - - A short summary of the synchronization status of the - policy and rule groups. - type: str - returned: success - example: "IN_SYNC" - status: - description: - - A short summary of the status of the firewall endpoints. - type: str - returned: success - example: "READY" - sync_states: - description: - - A description, broken down by availability zone, of the status - of the firewall endpoints as well as the synchronization status - of the policies and rule groups. - type: dict - returned: success - example: - { - "us-east-1a": { - "attachment": { - "endpoint_id": "vpce-123456789abcdef01", - "status": "READY", - "subnet_id": "subnet-12345678" - }, - "config": { - "arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Ansible-Example": { - "sync_status": "IN_SYNC", - "update_token": "abcdef01-0000-0000-0000-123456789abc" - }, - "arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleDomainList": { - "sync_status": "IN_SYNC", - "update_token": "12345678-0000-0000-0000-abcdef012345" - } - } - } - } -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager - - -def main(): - argument_spec = dict( - name=dict(type="str", required=False), - arn=dict(type="str", required=False), - vpc_ids=dict(type="list", required=False, elements="str", aliases=["vpcs", "vpc_id"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ["arn", "name", "vpc_ids"], - ], - ) - - arn = module.params.get("arn") - name = module.params.get("name") - vpcs = module.params.get("vpc_ids") - - manager = NetworkFirewallManager(module) - - results = dict(changed=False) - - if name or arn: - firewall = manager.get_firewall(name=name, arn=arn) - if firewall: - results["firewalls"] = [firewall] - else: - results["firewalls"] = [] - else: - if vpcs: - firewall_list = manager.list(vpc_ids=vpcs) - else: - firewall_list = manager.list() - results["firewall_list"] = firewall_list - firewalls = [manager.get_firewall(arn=f) for f in firewall_list] - results["firewalls"] = firewalls - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/networkfirewall_policy.py b/networkfirewall_policy.py deleted file mode 100644 index a1d389fe732..00000000000 --- a/networkfirewall_policy.py +++ /dev/null @@ -1,439 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: networkfirewall_policy -short_description: manage AWS Network Firewall policies -version_added: 4.0.0 -description: - - A module for creating, updating and deleting AWS Network Firewall policies. -options: - arn: - description: - - The ARN of the Network Firewall policy. - - Exactly one of I(arn) or I(name) must be provided. - required: false - type: str - name: - description: - - The name of the Network Firewall policy. - - Cannot be updated after creation. - - Exactly one of I(arn) or I(name) must be provided. - required: false - type: str - state: - description: - - Create or remove the Network Firewall policy. - required: false - choices: ['present', 'absent'] - default: 'present' - type: str - description: - description: - - A description for the Network Firewall policy. - required: false - type: str - stateful_rule_groups: - description: - - A list of names or ARNs of stateful firewall rule groups. - required: false - type: list - elements: str - aliases: ['stateful_groups'] - stateless_rule_groups: - description: - - A list of names or ARNs of stateless firewall rule groups. - required: false - type: list - elements: str - aliases: ['stateless_groups'] - stateless_default_actions: - description: - - Actions to take on a packet if it doesn't match any of the stateless - rules in the policy. - - Common actions are C(aws:pass), C(aws:drop) and C(aws:forward_to_sfe). - - When creating a new policy defaults to C(aws:forward_to_sfe). - required: false - type: list - elements: str - stateless_fragment_default_actions: - description: - - Actions to take on a fragmented UDP packet if it doesn't match any - of the stateless rules in the policy. - - Common actions are C(aws:pass), C(aws:drop) and C(aws:forward_to_sfe). - - When creating a new policy defaults to C(aws:forward_to_sfe). - required: false - type: list - elements: str - stateful_default_actions: - description: - - Actions to take on a packet if it doesn't match any of the stateful - rules in the policy. - - Common actions are C(aws:drop_strict), C(aws:drop_established), - C(aws:alert_strict) and C(aws:alert_established). - - Only valid for policies where I(strict_rule_order=true). - - When creating a new policy defaults to C(aws:drop_strict). - - I(stateful_default_actions) requires botocore>=1.21.52. - required: false - type: list - elements: str - stateful_rule_order: - description: - - Indicates how to manage the order of stateful rule evaluation for the policy. - - When I(strict_rule_order='strict') rules and rule groups are evaluated in - the order that they're defined. - - Cannot be updated after creation. - - I(stateful_rule_order) requires botocore>=1.21.52. - required: false - type: str - choices: ['default', 'strict'] - aliases: ['rule_order'] - stateless_custom_actions: - description: - - A list of dictionaries defining custom actions which can be used in - I(stateless_default_actions) and I(stateless_fragment_default_actions). - required: false - type: list - elements: dict - aliases: ['custom_stateless_actions'] - suboptions: - name: - description: - - The name of the custom action. - required: true - type: str - publish_metric_dimension_value: - description: - - When the custom action is used, metrics will have a dimension of - C(CustomAction) the value of which is set to - I(publish_metric_dimension_value). - required: false - type: str - aliases: ['publish_metric_dimension_values'] - purge_stateless_custom_actions: - description: - - If I(purge_stateless_custom_actions=true), existing custom actions will - be purged from the resource to match exactly what is defined by - the I(stateless_custom_actions) parameter. - type: bool - required: false - default: True - aliases: ['purge_custom_stateless_actions'] - wait: - description: - - Whether to wait for the firewall policy to reach the - C(ACTIVE) or C(DELETED) state before the module returns. - type: bool - required: false - default: true - wait_timeout: - description: - - Maximum time, in seconds, to wait for the firewall policy - to reach the expected state. - - Defaults to 600 seconds. - type: int - required: false - -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 - - amazon.aws.tags -""" - -EXAMPLES = r""" -# Create an AWS Network Firewall Policy with default rule order -- community.aws.networkfirewall_policy: - stateful_rule_order: 'default' - state: present - name: 'ExamplePolicy' - -# Create an AWS Network Firewall Policy with strict rule order -- community.aws.networkfirewall_policy: - stateful_rule_order: 'strict' - state: present - name: 'ExampleStrictPolicy' - - -# Create an AWS Network Firewall Policy that defaults to dropping all packets -- community.aws.networkfirewall_policy: - stateful_rule_order: 'strict' - state: present - name: 'ExampleDropPolicy' - stateful_default_actions: - - 'aws:drop_strict' - stateful_rule_groups: - - 'ExampleStrictRuleGroup' - - 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/BotNetCommandAndControlDomainsStrictOrder' - -# Delete an AWS Network Firewall Policy -- community.aws.networkfirewall_policy: - state: absent - name: 'ExampleDropPolicy' -""" - -RETURN = r""" -policy: - description: The details of the policy - type: dict - returned: success - contains: - policy: - description: The details of the policy - type: dict - returned: success - contains: - stateful_engine_options: - description: - - Extra options describing how the stateful rules should be handled. - type: dict - returned: success - contains: - rule_order: - description: - - How rule group evaluation will be ordered. - - For more information on rule evaluation ordering see the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). - type: str - returned: success - example: 'DEFAULT_ACTION_ORDER' - stateful_rule_group_references: - description: Information about the stateful rule groups attached to the policy. - type: list - elements: dict - returned: success - contains: - resource_arn: - description: The ARN of the rule group. - type: str - returned: success - example: 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/AbusedLegitMalwareDomainsActionOrder' - priority: - description: - - An integer that indicates the order in which to run the stateful rule groups in a single policy. - - This only applies to policies that specify the STRICT_ORDER rule order in the stateful engine options settings. - type: int - returned: success - example: 1234 - stateless_custom_actions: - description: - - A description of additional custom actions available for use as - default rules to apply to stateless packets. - type: list - elements: dict - returned: success - contains: - action_name: - description: A name for the action. - type: str - returned: success - example: 'ExampleAction' - action_definition: - description: The action to perform. - type: dict - returned: success - contains: - publish_metric_action: - description: - - Definition of a custom metric to be published to CloudWatch. - - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/monitoring-cloudwatch.html) - type: dict - returned: success - contains: - dimensions: - description: - - The values of the CustomAction dimension to set on the metrics. - - The dimensions of a metric are used to identify unique - streams of data. - type: list - elements: dict - returned: success - contains: - value: - description: A value of the CustomAction dimension to set on the metrics. - type: str - returned: success - example: 'ExampleRule' - stateless_default_actions: - description: The default actions to take on a packet that doesn't match any stateful rules. - type: list - elements: str - returned: success - example: ['aws:alert_strict'] - stateless_fragment_default_actions: - description: The actions to take on a packet if it doesn't match any of the stateless rules in the policy. - type: list - elements: str - returned: success - example: ['aws:pass'] - stateless_rule_group_references: - description: Information about the stateful rule groups attached to the policy. - type: list - elements: dict - returned: success - contains: - resource_arn: - description: The ARN of the rule group. - type: str - returned: success - example: 'arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/ExampleGroup' - priority: - description: - - An integer that indicates the order in which to run the stateless rule groups in a single policy. - type: str - returned: success - example: 12345 - policy_metadata: - description: Metadata about the policy - type: dict - returned: success - contains: - consumed_stateful_rule_capacity: - description: The total number of capacity units used by the stateful rule groups. - type: int - returned: success - example: 165 - consumed_stateless_rule_capacity: - description: The total number of capacity units used by the stateless rule groups. - type: int - returned: success - example: 2010 - firewall_policy_arn: - description: The ARN of the policy. - type: str - returned: success - example: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy - firewall_policy_id: - description: The unique ID of the policy. - type: str - returned: success - example: 12345678-abcd-1234-5678-123456789abc - firewall_policy_name: - description: The name of the policy. - type: str - returned: success - example: ExamplePolicy - firewall_policy_status: - description: The current status of the policy. - type: str - returned: success - example: ACTIVE - number_of_associations: - description: The number of firewalls the policy is associated to. - type: int - returned: success - example: 1 - tags: - description: A dictionary representing the tags associated with the policy. - type: dict - returned: success - example: {'tagName': 'Some Value'} -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager - - -def main(): - custom_action_options = dict( - name=dict(type="str", required=True), - # Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1" - publish_metric_dimension_value=dict(type="str", required=False, aliases=["publish_metric_dimension_values"]), - # NetworkFirewallPolicyManager can cope with a list for future-proofing - # publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']), - ) - - argument_spec = dict( - name=dict(type="str", required=False), - arn=dict(type="str", required=False), - state=dict(type="str", required=False, default="present", choices=["present", "absent"]), - description=dict(type="str", required=False), - tags=dict(type="dict", required=False, aliases=["resource_tags"]), - purge_tags=dict(type="bool", required=False, default=True), - stateful_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateful_groups"]), - stateless_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateless_groups"]), - stateful_default_actions=dict(type="list", elements="str", required=False), - stateless_default_actions=dict(type="list", elements="str", required=False), - stateless_fragment_default_actions=dict(type="list", elements="str", required=False), - stateful_rule_order=dict(type="str", required=False, choices=["strict", "default"], aliases=["rule_order"]), - stateless_custom_actions=dict( - type="list", - elements="dict", - required=False, - options=custom_action_options, - aliases=["custom_stateless_actions"], - ), - purge_stateless_custom_actions=dict( - type="bool", required=False, default=True, aliases=["purge_custom_stateless_actions"] - ), - wait=dict(type="bool", required=False, default=True), - wait_timeout=dict(type="int", required=False), - ) - - mutually_exclusive = [ - ["arn", "name"], - ] - required_one_of = [ - ["arn", "name"], - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - ) - - arn = module.params.get("arn") - name = module.params.get("name") - state = module.params.get("state") - - manager = NetworkFirewallPolicyManager(module, name=name, arn=arn) - manager.set_wait(module.params.get("wait", None)) - manager.set_wait_timeout(module.params.get("wait_timeout", None)) - - rule_order = module.params.get("stateful_rule_order") - if rule_order and rule_order != "default": - module.require_botocore_at_least("1.21.52", reason="to set the rule order") - if module.params.get("stateful_default_actions"): - module.require_botocore_at_least("1.21.52", reason="to set the default actions for stateful flows") - - if state == "absent": - manager.delete() - else: - manager.set_description(module.params.get("description", None)) - manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None)) - # Actions need to be defined before potentially consuming them - manager.set_custom_stateless_actions( - module.params.get("stateless_custom_actions", None), - module.params.get("purge_stateless_custom_actions", True), - ), - manager.set_stateful_rule_order(module.params.get("stateful_rule_order", None)) - manager.set_stateful_rule_groups(module.params.get("stateful_rule_groups", None)) - manager.set_stateless_rule_groups(module.params.get("stateless_rule_groups", None)) - manager.set_stateful_default_actions(module.params.get("stateful_default_actions", None)) - manager.set_stateless_default_actions(module.params.get("stateless_default_actions", None)) - manager.set_stateless_fragment_default_actions(module.params.get("stateless_fragment_default_actions", None)) - - manager.flush_changes() - - results = dict( - changed=manager.changed, - policy=manager.updated_resource, - ) - if manager.changed: - diff = dict( - before=manager.original_resource, - after=manager.updated_resource, - ) - results["diff"] = diff - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/networkfirewall_policy_info.py b/networkfirewall_policy_info.py deleted file mode 100644 index 3bb92174513..00000000000 --- a/networkfirewall_policy_info.py +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: networkfirewall_policy_info -short_description: describe AWS Network Firewall policies -version_added: 4.0.0 -description: - - A module for describing AWS Network Firewall policies. -options: - arn: - description: - - The ARN of the Network Firewall policy. - - Mutually exclusive with I(name). - required: false - type: str - name: - description: - - The name of the Network Firewall policy. - - Mutually exclusive with I(arn). - required: false - type: str - -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -# Describe all Firewall policies in an account -- community.aws.networkfirewall_policy_info: {} - -# Describe a Firewall policy by ARN -- community.aws.networkfirewall_policy_info: - arn: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy - -# Describe a Firewall policy by name -- community.aws.networkfirewall_policy_info: - name: ExamplePolicy -""" - -RETURN = r""" -policy_list: - description: A list of ARNs of the matching policies. - type: list - elements: str - returned: When a policy name isn't specified - example: ['arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Example1', - 'arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/Example2'] - -policies: - description: The details of the policies - returned: success - type: list - elements: dict - contains: - policy: - description: The details of the policy - type: dict - returned: success - contains: - stateful_engine_options: - description: - - Extra options describing how the stateful rules should be handled. - type: dict - returned: success - contains: - rule_order: - description: - - How rule group evaluation will be ordered. - - For more information on rule evaluation ordering see the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). - type: str - returned: success - example: 'DEFAULT_ACTION_ORDER' - stateful_rule_group_references: - description: Information about the stateful rule groups attached to the policy. - type: list - elements: dict - returned: success - contains: - resource_arn: - description: The ARN of the rule group. - type: str - returned: success - example: 'arn:aws:network-firewall:us-east-1:aws-managed:stateful-rulegroup/AbusedLegitMalwareDomainsActionOrder' - priority: - description: - - An integer that indicates the order in which to run the stateful rule groups in a single policy. - - This only applies to policies that specify the STRICT_ORDER rule order in the stateful engine options settings. - type: int - returned: success - example: 1234 - stateless_custom_actions: - description: - - A description of additional custom actions available for use as - default rules to apply to stateless packets. - type: list - elements: dict - returned: success - contains: - action_name: - description: A name for the action. - type: str - returned: success - example: 'ExampleAction' - action_definition: - description: The action to perform. - type: dict - returned: success - contains: - publish_metric_action: - description: - - Definition of a custom metric to be published to CloudWatch. - - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/monitoring-cloudwatch.html) - type: dict - returned: success - contains: - dimensions: - description: - - The values of the CustomAction dimension to set on the metrics. - - The dimensions of a metric are used to identify unique - streams of data. - type: list - elements: dict - returned: success - contains: - value: - description: A value of the CustomAction dimension to set on the metrics. - type: str - returned: success - example: 'ExampleRule' - stateless_default_actions: - description: The default actions to take on a packet that doesn't match any stateful rules. - type: list - elements: str - returned: success - example: ['aws:alert_strict'] - stateless_fragment_default_actions: - description: The actions to take on a packet if it doesn't match any of the stateless rules in the policy. - type: list - elements: str - returned: success - example: ['aws:pass'] - stateless_rule_group_references: - description: Information about the stateful rule groups attached to the policy. - type: list - elements: dict - returned: success - contains: - resource_arn: - description: The ARN of the rule group. - type: str - returned: success - example: 'arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/ExampleGroup' - priority: - description: - - An integer that indicates the order in which to run the stateless rule groups in a single policy. - type: str - returned: success - example: 12345 - policy_metadata: - description: Metadata about the policy - type: dict - returned: success - contains: - consumed_stateful_rule_capacity: - description: The total number of capacity units used by the stateful rule groups. - type: int - returned: success - example: 165 - consumed_stateless_rule_capacity: - description: The total number of capacity units used by the stateless rule groups. - type: int - returned: success - example: 2010 - firewall_policy_arn: - description: The ARN of the policy. - type: str - returned: success - example: arn:aws:network-firewall:us-east-1:123456789012:firewall-policy/ExamplePolicy - firewall_policy_id: - description: The unique ID of the policy. - type: str - returned: success - example: 12345678-abcd-1234-5678-123456789abc - firewall_policy_name: - description: The name of the policy. - type: str - returned: success - example: ExamplePolicy - firewall_policy_status: - description: The current status of the policy. - type: str - returned: success - example: ACTIVE - number_of_associations: - description: The number of firewalls the policy is associated to. - type: int - returned: success - example: 1 - tags: - description: A dictionary representing the tags associated with the policy. - type: dict - returned: success - example: {'tagName': 'Some Value'} -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager - - -def main(): - argument_spec = dict( - name=dict(type="str", required=False), - arn=dict(type="str", required=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ["arn", "name"], - ], - ) - - arn = module.params.get("arn") - name = module.params.get("name") - - manager = NetworkFirewallPolicyManager(module) - - results = dict(changed=False) - - if name or arn: - policy = manager.get_policy(name=name, arn=arn) - if policy: - results["policies"] = [policy] - else: - results["policies"] = [] - else: - policy_list = manager.list() - results["policy_list"] = policy_list - policies = [manager.get_policy(arn=p) for p in policy_list] - results["policies"] = policies - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/networkfirewall_rule_group.py b/networkfirewall_rule_group.py deleted file mode 100644 index a7800568619..00000000000 --- a/networkfirewall_rule_group.py +++ /dev/null @@ -1,828 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: networkfirewall_rule_group -short_description: create, delete and modify AWS Network Firewall rule groups -version_added: 4.0.0 -description: - - A module for managing AWS Network Firewall rule groups. - - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/index.html) - - Currently only supports C(stateful) firewall groups. -options: - arn: - description: - - The ARN of the Network Firewall rule group. - - Exactly one of I(arn) and I(name) must be provided. - required: false - type: str - name: - description: - - The name of the Network Firewall rule group. - - When I(name) is set, I(rule_type) must also be set. - required: false - type: str - rule_type: - description: - - Indicates whether the rule group is stateless or stateful. - - Stateless rulesets are currently not supported. - - Required if I(name) is set. - required: false - aliases: ['type' ] - choices: ['stateful'] -# choices: ['stateful', 'stateless'] - type: str - state: - description: - - Create or remove the Network Firewall rule group. - required: false - choices: ['present', 'absent'] - default: 'present' - type: str - capacity: - description: - - The maximum operating resources that this rule group can use. - - Once a rule group is created this parameter is immutable. - - See also the AWS documentation about how capacityis calculated - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/nwfw-rule-group-capacity.html) - - This option is mandatory when creating a new rule group. - type: int - required: false - rule_order: - description: - - Indicates how to manage the order of the rule evaluation for the rule group. - - Once a rule group is created this parameter is immutable. - - Mutually exclusive with I(rule_type=stateless). - - For more information on how rules are evaluated read the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html). - - I(rule_order) requires botocore>=1.23.23. - type: str - required: false - choices: ['default', 'strict'] - aliases: ['stateful_rule_order'] - description: - description: - - A description of the AWS Network Firewall rule group. - type: str - ip_variables: - description: - - A dictionary mapping variable names to a list of IP addresses and address ranges, in CIDR notation. - - For example C({EXAMPLE_HOSTS:["192.0.2.0/24", "203.0.113.42"]}). - - Mutually exclusive with I(domain_list). - type: dict - required: false - aliases: ['ip_set_variables'] - purge_ip_variables: - description: - - Whether to purge variable names not mentioned in the I(ip_variables) - dictionary. - - To remove all IP Set Variables it is necessary to explicitly set I(ip_variables={}) - and I(purge_port_variables=true). - type: bool - default: true - required: false - aliases: ['purge_ip_set_variables'] - port_variables: - description: - - A dictionary mapping variable names to a list of ports. - - For example C({SECURE_PORTS:["22", "443"]}). - type: dict - required: false - aliases: ['port_set_variables'] - purge_port_variables: - description: - - Whether to purge variable names not mentioned in the I(port_variables) - dictionary. - - To remove all Port Set Variables it is necessary to explicitly set I(port_variables={}) - and I(purge_port_variables=true). - type: bool - required: false - default: true - aliases: ['purge_port_set_variables'] - rule_strings: - description: - - Rules in Suricata format. - - If I(rule_strings) is specified, it must include at least one entry. - - For more information read the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-limitations-caveats.html) - and the Suricata documentation - U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html). - - Mutually exclusive with I(rule_type=stateless). - - Mutually exclusive with I(domain_list) and I(rule_list). - - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be - specified at creation time. - type: list - elements: str - required: false - domain_list: - description: - - Inspection criteria for a domain list rule group. - - When set overwrites all Domain List settings with the new configuration. - - For more information about domain name based filtering - read the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-rule-groups-domain-names.html). - - Mutually exclusive with I(rule_type=stateless). - - Mutually exclusive with I(ip_variables), I(rule_list) and I(rule_strings). - - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be - specified at creation time. - type: dict - required: false - suboptions: - domain_names: - description: - - A list of domain names to look for in the traffic flow. - type: list - elements: str - required: true - filter_http: - description: - - Whether HTTP traffic should be inspected (uses the host header). - type: bool - required: false - default: false - filter_https: - description: - - Whether HTTPS traffic should be inspected (uses the SNI). - type: bool - required: false - default: false - action: - description: - - Action to perform on traffic that matches the rule match settings. - type: str - required: true - choices: ['allow', 'deny'] - source_ips: - description: - - Used to expand the local network definition beyond the CIDR range - of the VPC where you deploy Network Firewall. - type: list - elements: str - required: false - rule_list: - description: - - Inspection criteria to be used for a 5-tuple based rule group. - - When set overwrites all existing 5-tuple rules with the new configuration. - - Mutually exclusive with I(domain_list) and I(rule_strings). - - Mutually exclusive with I(rule_type=stateless). - - Exactly one of I(rule_strings), I(domain_list) or I(rule_list) must be - specified at creation time. - - For more information about valid values see the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_StatefulRule.html) - and - U(https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_Header.html). - - 'Note: Idempotency when comparing AWS Web UI and Ansiible managed rules can not be guaranteed' - type: list - elements: dict - required: false - aliases: ['stateful_rule_list'] - suboptions: - action: - description: - - What Network Firewall should do with the packets in a traffic flow when the flow matches. - type: str - required: true - choices: ['pass', 'drop', 'alert'] - protocol: - description: - - The protocol to inspect for. To specify all, you can use C(IP), because all traffic on AWS is C(IP). - type: str - required: true - source: - description: - - The source IP address or address range to inspect for, in CIDR notation. - - To match with any address, specify C(ANY). - type: str - required: true - source_port: - description: - - The source port to inspect for. - - To match with any port, specify C(ANY). - type: str - required: true - direction: - description: - - The direction of traffic flow to inspect. - - If set to C(any), the inspection matches both traffic going from the - I(source) to the I(destination) and from the I(destination) to the - I(source). - - If set to C(forward), the inspection only matches traffic going from the - I(source) to the I(destination). - type: str - required: false - default: 'forward' - choices: ['forward', 'any'] - destination: - description: - - The destination IP address or address range to inspect for, in CIDR notation. - - To match with any address, specify C(ANY). - type: str - required: true - destination_port: - description: - - The source port to inspect for. - - To match with any port, specify C(ANY). - type: str - required: true - sid: - description: - - The signature ID of the rule. - - A unique I(sid) must be passed for all rules. - type: int - required: true - rule_options: - description: - - Additional options for the rule. - - 5-tuple based rules are converted by AWS into Suricata rules, for more - complex options requirements where order matters consider using I(rule_strings). - - A dictionary mapping Suricata RuleOptions names to a list of values. - - The examples section contains some examples of using rule_options. - - For more information read the AWS documentation - U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-limitations-caveats.html) - and the Suricata documentation - U(https://suricata.readthedocs.io/en/suricata-6.0.0/rules/intro.html). - type: dict - required: false - wait: - description: - - Whether to wait for the firewall rule group to reach the - C(ACTIVE) or C(DELETED) state before the module returns. - type: bool - required: false - default: true - wait_timeout: - description: - - Maximum time, in seconds, to wait for the firewall rule group - to reach the expected state. - - Defaults to 600 seconds. - type: int - required: false - -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create a rule group -- name: Create a minimal AWS Network Firewall Rule Group - community.aws.networkfirewall_rule_group: - name: 'MinimalGroup' - type: 'stateful' - capacity: 200 - rule_strings: - - 'pass tcp any any -> any any (sid:1000001;)' - -# Create an example rule group using rule_list -- name: Create 5-tuple Rule List based rule group - community.aws.networkfirewall_rule_group: - name: 'ExampleGroup' - type: 'stateful' - description: 'My description' - rule_order: default - capacity: 100 - rule_list: - - sid: 1 - direction: forward - action: pass - protocol: IP - source: any - source_port: any - destination: any - destination_port: any - -# Create an example rule group using rule_list -- name: Create 5-tuple Rule List based rule group - community.aws.networkfirewall_rule_group: - name: 'ExampleGroup' - type: 'stateful' - description: 'My description' - ip_variables: - SOURCE_IPS: ['203.0.113.0/24', '198.51.100.42'] - DESTINATION_IPS: ['192.0.2.0/24', '198.51.100.48'] - port_variables: - HTTP_PORTS: [80, 8080] - rule_order: default - capacity: 100 - rule_list: - # Allow 'Destination Unreachable' traffic - - sid: 1 - action: pass - protocol: icmp - source: any - source_port: any - destination: any - destination_port: any - rule_options: - itype: 3 - - sid: 2 - action: drop - protocol: tcp - source: "$SOURCE_IPS" - source_port: any - destination: "$DESTINATION_IPS" - destination_port: "$HTTP_PORTS" - rule_options: - urilen: ["20<>40"] - # Where only a keyword is needed, add the keword, but no value - http_uri: - # Settings where Suricata expects raw strings (like the content - # keyword) will need to have the double-quotes explicitly escaped and - # passed because there's no practical way to distinguish between them - # and flags. - content: '"index.php"' - -# Create an example rule group using Suricata rule strings -- name: Create Suricata rule string based rule group - community.aws.networkfirewall_rule_group: - name: 'ExampleSuricata' - type: 'stateful' - description: 'My description' - capacity: 200 - ip_variables: - EXAMPLE_IP: ['203.0.113.0/24', '198.51.100.42'] - ANOTHER_EXAMPLE: ['192.0.2.0/24', '198.51.100.48'] - port_variables: - EXAMPLE_PORT: [443, 22] - rule_strings: - - 'pass tcp any any -> $EXAMPLE_IP $EXAMPLE_PORT (sid:1000001;)' - - 'pass udp any any -> $ANOTHER_EXAMPLE any (sid:1000002;)' - -# Create an example Domain List based rule group -- name: Create Domain List based rule group - community.aws.networkfirewall_rule_group: - name: 'ExampleDomainList' - type: 'stateful' - description: 'My description' - capacity: 100 - domain_list: - domain_names: - - 'example.com' - - '.example.net' - filter_https: True - filter_http: True - action: allow - source_ips: '192.0.2.0/24' - -# Update the description of a rule group -- name: Update the description of a rule group - community.aws.networkfirewall_rule_group: - name: 'MinimalGroup' - type: 'stateful' - description: 'Another description' - -# Update IP Variables for a rule group -- name: Update IP Variables - community.aws.networkfirewall_rule_group: - name: 'ExampleGroup' - type: 'stateful' - ip_variables: - EXAMPLE_IP: ['192.0.2.0/24', '203.0.113.0/24', '198.51.100.42'] - purge_ip_variables: false - -# Delete a rule group -- name: Delete a rule group - community.aws.networkfirewall_rule_group: - name: 'MinimalGroup' - type: 'stateful' - state: absent - -""" - -RETURN = r""" -rule_group: - description: Details of the rules in the rule group - type: dict - returned: success - contains: - rule_group: - description: Details of the rules in the rule group - type: dict - returned: success - contains: - rule_variables: - description: Settings that are available for use in the rules in the rule group. - returned: When rule variables are attached to the rule group. - type: complex - contains: - ip_sets: - description: A dictionary mapping variable names to IP addresses in CIDR format. - returned: success - type: dict - example: ['192.0.2.0/24'] - port_sets: - description: A dictionary mapping variable names to ports - returned: success - type: dict - example: ['42'] - stateful_rule_options: - description: Additional options governing how Network Firewall handles stateful rules. - returned: When the rule group is either "rules string" or "rules list" based. - type: dict - contains: - rule_order: - description: The order in which rules will be evaluated. - returned: success - type: str - example: 'DEFAULT_ACTION_ORDER' - rules_source: - description: Inspection criteria used for a 5-tuple based rule group. - returned: success - type: dict - contains: - stateful_rules: - description: A list of dictionaries describing the rules that the rule group is comprised of. - returned: When the rule group is "rules list" based. - type: list - elements: dict - contains: - action: - description: What action to perform when a flow matches the rule criteria. - returned: success - type: str - example: 'PASS' - header: - description: A description of the criteria used for the rule. - returned: success - type: dict - contains: - protocol: - description: The protocol to inspect for. - returned: success - type: str - example: 'IP' - source: - description: The source address or range of addresses to inspect for. - returned: success - type: str - example: '203.0.113.98' - source_port: - description: The source port to inspect for. - returned: success - type: str - example: '42' - destination: - description: The destination address or range of addresses to inspect for. - returned: success - type: str - example: '198.51.100.0/24' - destination_port: - description: The destination port to inspect for. - returned: success - type: str - example: '6666:6667' - direction: - description: The direction of traffic flow to inspect. - returned: success - type: str - example: 'FORWARD' - rule_options: - description: Additional Suricata RuleOptions settings for the rule. - returned: success - type: list - elements: dict - contains: - keyword: - description: The keyword for the setting. - returned: success - type: str - example: 'sid:1' - settings: - description: A list of values passed to the setting. - returned: When values are available - type: list - elements: str - rules_string: - description: A string describing the rules that the rule group is comprised of. - returned: When the rule group is "rules string" based. - type: str - rules_source_list: - description: A description of the criteria for a domain list rule group. - returned: When the rule group is "domain list" based. - type: dict - contains: - targets: - description: A list of domain names to be inspected for. - returned: success - type: list - elements: str - example: ['abc.example.com', '.example.net'] - target_types: - description: The protocols to be inspected by the rule group. - returned: success - type: list - elements: str - example: ['TLS_SNI', 'HTTP_HOST'] - generated_rules_type: - description: Whether the rule group allows or denies access to the domains in the list. - returned: success - type: str - example: 'ALLOWLIST' - stateless_rules_and_custom_actions: - description: A description of the criteria for a stateless rule group. - returned: When the rule group is a stateless rule group. - type: dict - contains: - stateless_rules: - description: A list of stateless rules for use in a stateless rule group. - type: list - elements: dict - contains: - rule_definition: - description: Describes the stateless 5-tuple inspection criteria and actions for the rule. - returned: success - type: dict - contains: - match_attributes: - description: Describes the stateless 5-tuple inspection criteria for the rule. - returned: success - type: dict - contains: - sources: - description: The source IP addresses and address ranges to inspect for. - returned: success - type: list - elements: dict - contains: - address_definition: - description: An IP address or a block of IP addresses in CIDR notation. - returned: success - type: str - example: '192.0.2.3' - destinations: - description: The destination IP addresses and address ranges to inspect for. - returned: success - type: list - elements: dict - contains: - address_definition: - description: An IP address or a block of IP addresses in CIDR notation. - returned: success - type: str - example: '192.0.2.3' - source_ports: - description: The source port ranges to inspect for. - returned: success - type: list - elements: dict - contains: - from_port: - description: The lower limit of the port range. - returned: success - type: int - to_port: - description: The upper limit of the port range. - returned: success - type: int - destination_ports: - description: The destination port ranges to inspect for. - returned: success - type: list - elements: dict - contains: - from_port: - description: The lower limit of the port range. - returned: success - type: int - to_port: - description: The upper limit of the port range. - returned: success - type: int - protocols: - description: The IANA protocol numbers of the protocols to inspect for. - returned: success - type: list - elements: int - example: [6] - tcp_flags: - description: The TCP flags and masks to inspect for. - returned: success - type: list - elements: dict - contains: - flags: - description: Used with masks to define the TCP flags that flows are inspected for. - returned: success - type: list - elements: str - masks: - description: The set of flags considered during inspection. - returned: success - type: list - elements: str - actions: - description: The actions to take when a flow matches the rule. - returned: success - type: list - elements: str - example: ['aws:pass', 'CustomActionName'] - priority: - description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. - returned: success - type: int - custom_actions: - description: A list of individual custom action definitions that are available for use in stateless rules. - type: list - elements: dict - contains: - action_name: - description: The name for the custom action. - returned: success - type: str - action_definition: - description: The custom action associated with the action name. - returned: success - type: dict - contains: - publish_metric_action: - description: The description of an action which publishes to CloudWatch. - returned: When the action publishes to CloudWatch. - type: dict - contains: - dimensions: - description: The value to use in an Amazon CloudWatch custom metric dimension. - returned: success - type: list - elements: dict - contains: - value: - description: The value to use in the custom metric dimension. - returned: success - type: str - rule_group_metadata: - description: Details of the rules in the rule group - type: dict - returned: success - contains: - capacity: - description: The maximum operating resources that this rule group can use. - type: int - returned: success - consumed_capacity: - description: The number of capacity units currently consumed by the rule group rules. - type: int - returned: success - description: - description: A description of the rule group. - type: str - returned: success - number_of_associations: - description: The number of firewall policies that use this rule group. - type: int - returned: success - rule_group_arn: - description: The ARN for the rule group - type: int - returned: success - example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup' - rule_group_id: - description: A unique identifier for the rule group. - type: int - returned: success - example: '12345678-abcd-1234-abcd-123456789abc' - rule_group_name: - description: The name of the rule group. - type: str - returned: success - rule_group_status: - description: The current status of a rule group. - type: str - returned: success - example: 'DELETING' - tags: - description: A dictionary representing the tags associated with the rule group. - type: dict - returned: success - type: - description: Whether the rule group is stateless or stateful. - type: str - returned: success - example: 'STATEFUL' -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager - - -def main(): - domain_list_spec = dict( - domain_names=dict(type="list", elements="str", required=True), - filter_http=dict(type="bool", required=False, default=False), - filter_https=dict(type="bool", required=False, default=False), - action=dict(type="str", required=True, choices=["allow", "deny"]), - source_ips=dict(type="list", elements="str", required=False), - ) - - rule_list_spec = dict( - action=dict(type="str", required=True, choices=["pass", "drop", "alert"]), - protocol=dict(type="str", required=True), - source=dict(type="str", required=True), - source_port=dict(type="str", required=True), - direction=dict(type="str", required=False, default="forward", choices=["forward", "any"]), - destination=dict(type="str", required=True), - destination_port=dict(type="str", required=True), - sid=dict(type="int", required=True), - rule_options=dict(type="dict", required=False), - ) - - argument_spec = dict( - arn=dict(type="str", required=False), - name=dict(type="str", required=False), - rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateful"]), - # rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']), - state=dict(type="str", required=False, choices=["present", "absent"], default="present"), - capacity=dict(type="int", required=False), - rule_order=dict(type="str", required=False, aliases=["stateful_rule_order"], choices=["default", "strict"]), - description=dict(type="str", required=False), - ip_variables=dict(type="dict", required=False, aliases=["ip_set_variables"]), - purge_ip_variables=dict(type="bool", required=False, aliases=["purge_ip_set_variables"], default=True), - port_variables=dict(type="dict", required=False, aliases=["port_set_variables"]), - purge_port_variables=dict(type="bool", required=False, aliases=["purge_port_set_variables"], default=True), - rule_strings=dict(type="list", elements="str", required=False), - domain_list=dict(type="dict", options=domain_list_spec, required=False), - rule_list=dict( - type="list", elements="dict", aliases=["stateful_rule_list"], options=rule_list_spec, required=False - ), - tags=dict(type="dict", required=False, aliases=["resource_tags"]), - purge_tags=dict(type="bool", required=False, default=True), - wait=dict(type="bool", required=False, default=True), - wait_timeout=dict(type="int", required=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ["name", "arn"], - ["rule_strings", "domain_list", "rule_list"], - ["domain_list", "ip_variables"], - ], - required_together=[ - ["name", "rule_type"], - ], - required_one_of=[ - ["name", "arn"], - ], - ) - - module.require_botocore_at_least("1.19.20") - - state = module.params.get("state") - name = module.params.get("name") - arn = module.params.get("arn") - rule_type = module.params.get("rule_type") - - if rule_type == "stateless": - if module.params.get("rule_order"): - module.fail_json("rule_order can not be set for stateless rule groups") - if module.params.get("rule_strings"): - module.fail_json("rule_strings can only be used for stateful rule groups") - if module.params.get("rule_list"): - module.fail_json("rule_list can only be used for stateful rule groups") - if module.params.get("domain_list"): - module.fail_json("domain_list can only be used for stateful rule groups") - - if module.params.get("rule_order"): - module.require_botocore_at_least("1.23.23", reason="to set the rule order") - - manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type) - manager.set_wait(module.params.get("wait", None)) - manager.set_wait_timeout(module.params.get("wait_timeout", None)) - - if state == "absent": - manager.delete() - else: - manager.set_description(module.params.get("description")) - manager.set_capacity(module.params.get("capacity")) - manager.set_rule_order(module.params.get("rule_order")) - manager.set_ip_variables(module.params.get("ip_variables"), module.params.get("purge_ip_variables")) - manager.set_port_variables(module.params.get("port_variables"), module.params.get("purge_port_variables")) - manager.set_rule_string(module.params.get("rule_strings")) - manager.set_domain_list(module.params.get("domain_list")) - manager.set_rule_list(module.params.get("rule_list")) - manager.set_tags(module.params.get("tags"), module.params.get("purge_tags")) - - manager.flush_changes() - - results = dict( - changed=manager.changed, - rule_group=manager.updated_resource, - ) - if manager.changed: - diff = dict( - before=manager.original_resource, - after=manager.updated_resource, - ) - results["diff"] = diff - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/networkfirewall_rule_group_info.py b/networkfirewall_rule_group_info.py deleted file mode 100644 index 6d2dabe31c5..00000000000 --- a/networkfirewall_rule_group_info.py +++ /dev/null @@ -1,446 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: networkfirewall_rule_group_info -short_description: describe AWS Network Firewall rule groups -version_added: 4.0.0 -description: - - A module for describing AWS Network Firewall rule groups. -options: - arn: - description: - - The ARN of the Network Firewall rule group. - - At time of writing AWS does not support describing Managed Rules. - required: false - type: str - name: - description: - - The name of the Network Firewall rule group. - required: false - type: str - rule_type: - description: - - Indicates whether the rule group is stateless or stateful. - - Required if I(name) is provided. - required: false - aliases: ['type' ] - choices: ['stateful', 'stateless'] - type: str - scope: - description: - - The scope of the request. - - When I(scope='account') returns a description of all rule groups in the account. - - When I(scope='managed') returns a list of available managed rule group arns. - - By default searches only at the account scope. - - I(scope='managed') requires botocore>=1.23.23. - required: false - choices: ['managed', 'account'] - type: str - -author: - - Mark Chappell (@tremble) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -# Describe all Rule Groups in an account (excludes managed groups) -- community.aws.networkfirewall_rule_group_info: {} - -# List the available Managed Rule groups (AWS doesn't support describing the -# groups) -- community.aws.networkfirewall_rule_group_info: - scope: managed - -# Describe a Rule Group by ARN -- community.aws.networkfirewall_rule_group_info: - arn: arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleRuleGroup - -# Describe a Rule Group by name -- community.aws.networkfirewall_rule_group_info: - name: ExampleRuleGroup - type: stateful - -""" - -RETURN = r""" -rule_list: - description: A list of ARNs of the matching rule groups. - type: list - elements: str - returned: When a rule name isn't specified - -rule_groups: - description: The details of the rule groups - returned: success - type: list - elements: dict - contains: - rule_group: - description: Details of the rules in the rule group - type: dict - returned: success - contains: - rule_variables: - description: Settings that are available for use in the rules in the rule group. - returned: When rule variables are attached to the rule group. - type: complex - contains: - ip_sets: - description: A dictionary mapping variable names to IP addresses in CIDR format. - returned: success - type: dict - example: ['192.0.2.0/24'] - port_sets: - description: A dictionary mapping variable names to ports - returned: success - type: dict - example: ['42'] - stateful_rule_options: - description: Additional options governing how Network Firewall handles stateful rules. - returned: When the rule group is either "rules string" or "rules list" based. - type: dict - contains: - rule_order: - description: The order in which rules will be evaluated. - returned: success - type: str - example: 'DEFAULT_ACTION_ORDER' - rules_source: - description: DEFAULT_ACTION_ORDER - returned: success - type: dict - contains: - stateful_rules: - description: A list of dictionaries describing the rules that the rule group is comprised of. - returned: When the rule group is "rules list" based. - type: list - elements: dict - contains: - action: - description: What action to perform when a flow matches the rule criteria. - returned: success - type: str - example: 'PASS' - header: - description: A description of the criteria used for the rule. - returned: success - type: dict - contains: - protocol: - description: The protocol to inspect for. - returned: success - type: str - example: 'IP' - source: - description: The source address or range of addresses to inspect for. - returned: success - type: str - example: '203.0.113.98' - source_port: - description: The source port to inspect for. - returned: success - type: str - example: '42' - destination: - description: The destination address or range of addresses to inspect for. - returned: success - type: str - example: '198.51.100.0/24' - destination_port: - description: The destination port to inspect for. - returned: success - type: str - example: '6666:6667' - direction: - description: The direction of traffic flow to inspect. - returned: success - type: str - example: 'FORWARD' - rule_options: - description: Additional Suricata RuleOptions settings for the rule. - returned: success - type: list - elements: dict - contains: - keyword: - description: The keyword for the setting. - returned: success - type: str - example: 'sid:1' - settings: - description: A list of values passed to the setting. - returned: When values are available - type: list - elements: str - rules_string: - description: A string describing the rules that the rule group is comprised of. - returned: When the rule group is "rules string" based. - type: str - rules_source_list: - description: A description of the criteria for a domain list rule group. - returned: When the rule group is "domain list" based. - type: dict - contains: - targets: - description: A list of domain names to be inspected for. - returned: success - type: list - elements: str - example: ['abc.example.com', '.example.net'] - target_types: - description: The protocols to be inspected by the rule group. - returned: success - type: list - elements: str - example: ['TLS_SNI', 'HTTP_HOST'] - generated_rules_type: - description: Whether the rule group allows or denies access to the domains in the list. - returned: success - type: str - example: 'ALLOWLIST' - stateless_rules_and_custom_actions: - description: A description of the criteria for a stateless rule group. - returned: When the rule group is a stateless rule group. - type: dict - contains: - stateless_rules: - description: A list of stateless rules for use in a stateless rule group. - type: list - elements: dict - contains: - rule_definition: - description: Describes the stateless 5-tuple inspection criteria and actions for the rule. - returned: success - type: dict - contains: - match_attributes: - description: Describes the stateless 5-tuple inspection criteria for the rule. - returned: success - type: dict - contains: - sources: - description: The source IP addresses and address ranges to inspect for. - returned: success - type: list - elements: dict - contains: - address_definition: - description: An IP address or a block of IP addresses in CIDR notation. - returned: success - type: str - example: '192.0.2.3' - destinations: - description: The destination IP addresses and address ranges to inspect for. - returned: success - type: list - elements: dict - contains: - address_definition: - description: An IP address or a block of IP addresses in CIDR notation. - returned: success - type: str - example: '192.0.2.3' - source_ports: - description: The source port ranges to inspect for. - returned: success - type: list - elements: dict - contains: - from_port: - description: The lower limit of the port range. - returned: success - type: int - to_port: - description: The upper limit of the port range. - returned: success - type: int - destination_ports: - description: The destination port ranges to inspect for. - returned: success - type: list - elements: dict - contains: - from_port: - description: The lower limit of the port range. - returned: success - type: int - to_port: - description: The upper limit of the port range. - returned: success - type: int - protocols: - description: The IANA protocol numbers of the protocols to inspect for. - returned: success - type: list - elements: int - example: [6] - tcp_flags: - description: The TCP flags and masks to inspect for. - returned: success - type: list - elements: dict - contains: - flags: - description: Used with masks to define the TCP flags that flows are inspected for. - returned: success - type: list - elements: str - masks: - description: The set of flags considered during inspection. - returned: success - type: list - elements: str - actions: - description: The actions to take when a flow matches the rule. - returned: success - type: list - elements: str - example: ['aws:pass', 'CustomActionName'] - priority: - description: Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. - returned: success - type: int - custom_actions: - description: A list of individual custom action definitions that are available for use in stateless rules. - type: list - elements: dict - contains: - action_name: - description: The name for the custom action. - returned: success - type: str - action_definition: - description: The custom action associated with the action name. - returned: success - type: dict - contains: - publish_metric_action: - description: The description of an action which publishes to CloudWatch. - returned: When the action publishes to CloudWatch. - type: dict - contains: - dimensions: - description: The value to use in an Amazon CloudWatch custom metric dimension. - returned: success - type: list - elements: dict - contains: - value: - description: The value to use in the custom metric dimension. - returned: success - type: str - rule_group_metadata: - description: Details of the rules in the rule group - type: dict - returned: success - contains: - capacity: - description: The maximum operating resources that this rule group can use. - type: int - returned: success - consumed_capacity: - description: The number of capacity units currently consumed by the rule group rules. - type: int - returned: success - description: - description: A description of the rule group. - type: str - returned: success - number_of_associations: - description: The number of firewall policies that use this rule group. - type: int - returned: success - rule_group_arn: - description: The ARN for the rule group - type: int - returned: success - example: 'arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/ExampleGroup' - rule_group_id: - description: A unique identifier for the rule group. - type: int - returned: success - example: '12345678-abcd-1234-abcd-123456789abc' - rule_group_name: - description: The name of the rule group. - type: str - returned: success - rule_group_status: - description: The current status of a rule group. - type: str - returned: success - example: 'DELETING' - tags: - description: A dictionary representing the tags associated with the rule group. - type: dict - returned: success - type: - description: Whether the rule group is stateless or stateful. - type: str - returned: success - example: 'STATEFUL' -""" - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager - - -def main(): - argument_spec = dict( - name=dict(type="str", required=False), - rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateless", "stateful"]), - arn=dict(type="str", required=False), - scope=dict(type="str", required=False, choices=["managed", "account"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ["arn", "name"], - ["arn", "rule_type"], - ], - required_together=[ - ["name", "rule_type"], - ], - ) - - module.require_botocore_at_least("1.19.20") - - arn = module.params.get("arn") - name = module.params.get("name") - rule_type = module.params.get("rule_type") - scope = module.params.get("scope") - - if module.params.get("scope") == "managed": - module.require_botocore_at_least("1.23.23", reason="to list managed rules") - - manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type) - - results = dict(changed=False) - - if name or arn: - rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn) - if rule: - results["rule_groups"] = [rule] - else: - results["rule_groups"] = [] - else: - rule_list = manager.list(scope=scope) - results["rule_list"] = rule_list - if scope != "managed": - rules = [manager.get_rule_group(arn=r) for r in rule_list] - results["rule_groups"] = rules - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/opensearch.py b/opensearch.py deleted file mode 100644 index 967f0c98d01..00000000000 --- a/opensearch.py +++ /dev/null @@ -1,1329 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: opensearch -short_description: Creates OpenSearch or ElasticSearch domain -description: - - Creates or modify a Amazon OpenSearch Service domain. -version_added: 4.0.0 -author: - - "Sebastien Rosset (@sebastien-rosset)" -options: - state: - description: - - Creates or modifies an existing OpenSearch domain. - - Deletes an OpenSearch domain. - required: false - type: str - choices: ['present', 'absent'] - default: present - domain_name: - description: - - The name of the Amazon OpenSearch/ElasticSearch Service domain. - - Domain names are unique across the domains owned by an account within an AWS region. - required: true - type: str - engine_version: - description: - -> - The engine version to use. For example, 'ElasticSearch_7.10' or 'OpenSearch_1.1'. - -> - If the currently running version is not equal to I(engine_version), - a cluster upgrade is triggered. - -> - It may not be possible to upgrade directly from the currently running version - to I(engine_version). In that case, the upgrade is performed incrementally by - upgrading to the highest compatible version, then repeat the operation until - the cluster is running at the target version. - -> - The upgrade operation fails if there is no path from current version to I(engine_version). - -> - See OpenSearch documentation for upgrade compatibility. - required: false - type: str - allow_intermediate_upgrades: - description: - - > - If true, allow OpenSearch domain to be upgraded through one or more intermediate versions. - - > - If false, do not allow OpenSearch domain to be upgraded through intermediate versions. - The upgrade operation fails if it's not possible to ugrade to I(engine_version) directly. - required: false - type: bool - default: true - cluster_config: - description: - - Parameters for the cluster configuration of an OpenSearch Service domain. - type: dict - suboptions: - instance_type: - description: - - Type of the instances to use for the domain. - required: false - type: str - instance_count: - description: - - Number of instances for the domain. - required: false - type: int - zone_awareness: - description: - - A boolean value to indicate whether zone awareness is enabled. - required: false - type: bool - availability_zone_count: - description: - - > - An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. - This should be equal to number of subnets if VPC endpoints is enabled. - required: false - type: int - dedicated_master: - description: - - A boolean value to indicate whether a dedicated master node is enabled. - required: false - type: bool - dedicated_master_instance_type: - description: - - The instance type for a dedicated master node. - required: false - type: str - dedicated_master_instance_count: - description: - - Total number of dedicated master nodes, active and on standby, for the domain. - required: false - type: int - warm_enabled: - description: - - True to enable UltraWarm storage. - required: false - type: bool - warm_type: - description: - - The instance type for the OpenSearch domain's warm nodes. - required: false - type: str - warm_count: - description: - - The number of UltraWarm nodes in the domain. - required: false - type: int - cold_storage_options: - description: - - Specifies the ColdStorageOptions config for a Domain. - type: dict - suboptions: - enabled: - description: - - True to enable cold storage. Supported on Elasticsearch 7.9 or above. - required: false - type: bool - ebs_options: - description: - - Parameters to configure EBS-based storage for an OpenSearch Service domain. - type: dict - suboptions: - ebs_enabled: - description: - - Specifies whether EBS-based storage is enabled. - required: false - type: bool - volume_type: - description: - - Specifies the volume type for EBS-based storage. "standard"|"gp2"|"io1" - required: false - type: str - volume_size: - description: - - Integer to specify the size of an EBS volume. - required: false - type: int - iops: - description: - - The IOPD for a Provisioned IOPS EBS volume (SSD). - required: false - type: int - vpc_options: - description: - - Options to specify the subnets and security groups for a VPC endpoint. - type: dict - suboptions: - subnets: - description: - - Specifies the subnet ids for VPC endpoint. - required: false - type: list - elements: str - security_groups: - description: - - Specifies the security group ids for VPC endpoint. - required: false - type: list - elements: str - snapshot_options: - description: - - Option to set time, in UTC format, of the daily automated snapshot. - type: dict - suboptions: - automated_snapshot_start_hour: - description: - - > - Integer value from 0 to 23 specifying when the service takes a daily automated snapshot - of the specified Elasticsearch domain. - required: false - type: int - access_policies: - description: - - IAM access policy as a JSON-formatted string. - required: false - type: dict - encryption_at_rest_options: - description: - - Parameters to enable encryption at rest. - type: dict - suboptions: - enabled: - description: - - Should data be encrypted while at rest. - required: false - type: bool - kms_key_id: - description: - - If encryption at rest enabled, this identifies the encryption key to use. - - The value should be a KMS key ARN. It can also be the KMS key id. - required: false - type: str - node_to_node_encryption_options: - description: - - Node-to-node encryption options. - type: dict - suboptions: - enabled: - description: - - True to enable node-to-node encryption. - required: false - type: bool - cognito_options: - description: - - Parameters to configure OpenSearch Service to use Amazon Cognito authentication for OpenSearch Dashboards. - type: dict - suboptions: - enabled: - description: - - The option to enable Cognito for OpenSearch Dashboards authentication. - required: false - type: bool - user_pool_id: - description: - - The Cognito user pool ID for OpenSearch Dashboards authentication. - required: false - type: str - identity_pool_id: - description: - - The Cognito identity pool ID for OpenSearch Dashboards authentication. - required: false - type: str - role_arn: - description: - - The role ARN that provides OpenSearch permissions for accessing Cognito resources. - required: false - type: str - domain_endpoint_options: - description: - - Options to specify configuration that will be applied to the domain endpoint. - type: dict - suboptions: - enforce_https: - description: - - Whether only HTTPS endpoint should be enabled for the domain. - type: bool - tls_security_policy: - description: - - Specify the TLS security policy to apply to the HTTPS endpoint of the domain. - type: str - custom_endpoint_enabled: - description: - - Whether to enable a custom endpoint for the domain. - type: bool - custom_endpoint: - description: - - The fully qualified domain for your custom endpoint. - type: str - custom_endpoint_certificate_arn: - description: - - The ACM certificate ARN for your custom endpoint. - type: str - advanced_security_options: - description: - - Specifies advanced security options. - type: dict - suboptions: - enabled: - description: - - True if advanced security is enabled. - - You must enable node-to-node encryption to use advanced security options. - type: bool - internal_user_database_enabled: - description: - - True if the internal user database is enabled. - type: bool - master_user_options: - description: - - Credentials for the master user, username and password, ARN, or both. - type: dict - suboptions: - master_user_arn: - description: - - ARN for the master user (if IAM is enabled). - type: str - master_user_name: - description: - - The username of the master user, which is stored in the Amazon OpenSearch Service domain internal database. - type: str - master_user_password: - description: - - The password of the master user, which is stored in the Amazon OpenSearch Service domain internal database. - type: str - saml_options: - description: - - The SAML application configuration for the domain. - type: dict - suboptions: - enabled: - description: - - True if SAML is enabled. - - To use SAML authentication, you must enable fine-grained access control. - - You can only enable SAML authentication for OpenSearch Dashboards on existing domains, - not during the creation of new ones. - - Domains only support one Dashboards authentication method at a time. - If you have Amazon Cognito authentication for OpenSearch Dashboards enabled, - you must disable it before you can enable SAML. - type: bool - idp: - description: - - The SAML Identity Provider's information. - type: dict - suboptions: - metadata_content: - description: - - The metadata of the SAML application in XML format. - type: str - entity_id: - description: - - The unique entity ID of the application in SAML identity provider. - type: str - master_user_name: - description: - - The SAML master username, which is stored in the Amazon OpenSearch Service domain internal database. - type: str - master_backend_role: - description: - - The backend role that the SAML master user is mapped to. - type: str - subject_key: - description: - - Element of the SAML assertion to use for username. Default is NameID. - type: str - roles_key: - description: - - Element of the SAML assertion to use for backend roles. Default is roles. - type: str - session_timeout_minutes: - description: - - The duration, in minutes, after which a user session becomes inactive. Acceptable values are between 1 and 1440, and the default value is 60. - type: int - auto_tune_options: - description: - - Specifies Auto-Tune options. - type: dict - suboptions: - desired_state: - description: - - The Auto-Tune desired state. Valid values are ENABLED and DISABLED. - type: str - choices: ['ENABLED', 'DISABLED'] - maintenance_schedules: - description: - - A list of maintenance schedules. - type: list - elements: dict - suboptions: - start_at: - description: - - The timestamp at which the Auto-Tune maintenance schedule starts. - type: str - duration: - description: - - Specifies maintenance schedule duration, duration value and duration unit. - type: dict - suboptions: - value: - description: - - Integer to specify the value of a maintenance schedule duration. - type: int - unit: - description: - - The unit of a maintenance schedule duration. Valid value is HOURS. - choices: ['HOURS'] - type: str - cron_expression_for_recurrence: - description: - - A cron expression for a recurring maintenance schedule. - type: str - wait: - description: - - Whether or not to wait for completion of OpenSearch creation, modification or deletion. - type: bool - default: false - wait_timeout: - description: - - how long before wait gives up, in seconds. - default: 300 - type: int -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -RETURN = r""" # """ - -EXAMPLES = r""" - -- name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters - community.aws.opensearch: - domain_name: "dev-cluster" - engine_version: Elasticsearch_1.1 - cluster_config: - instance_type: "t2.small.search" - instance_count: 2 - zone_awareness: false - dedicated_master: false - ebs_options: - ebs_enabled: true - volume_type: "gp2" - volume_size: 10 - access_policies: "{{ lookup('file', 'policy.json') | from_json }}" - -- name: Create OpenSearch domain with dedicated masters - community.aws.opensearch: - domain_name: "my-domain" - engine_version: OpenSearch_1.1 - cluster_config: - instance_type: "t2.small.search" - instance_count: 12 - dedicated_master: true - zone_awareness: true - availability_zone_count: 2 - dedicated_master_instance_type: "t2.small.search" - dedicated_master_instance_count: 3 - warm_enabled: true - warm_type: "ultrawarm1.medium.search" - warm_count: 1 - cold_storage_options: - enabled: false - ebs_options: - ebs_enabled: true - volume_type: "io1" - volume_size: 10 - iops: 1000 - vpc_options: - subnets: - - "subnet-e537d64a" - - "subnet-e537d64b" - security_groups: - - "sg-dd2f13cb" - - "sg-dd2f13cc" - snapshot_options: - automated_snapshot_start_hour: 13 - access_policies: "{{ lookup('file', 'policy.json') | from_json }}" - encryption_at_rest_options: - enabled: false - node_to_node_encryption_options: - enabled: false - auto_tune_options: - enabled: true - maintenance_schedules: - - start_at: "2025-01-12" - duration: - value: 1 - unit: "HOURS" - cron_expression_for_recurrence: "cron(0 12 * * ? *)" - - start_at: "2032-01-12" - duration: - value: 2 - unit: "HOURS" - cron_expression_for_recurrence: "cron(0 12 * * ? *)" - tags: - Environment: Development - Application: Search - wait: true - -- name: Increase size of EBS volumes for existing cluster - community.aws.opensearch: - domain_name: "my-domain" - ebs_options: - volume_size: 5 - wait: true - -- name: Increase instance count for existing cluster - community.aws.opensearch: - domain_name: "my-domain" - cluster_config: - instance_count: 40 - wait: true - -""" - -from copy import deepcopy -import datetime -import json - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.six import string_types - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.opensearch import compare_domain_versions -from ansible_collections.community.aws.plugins.module_utils.opensearch import ensure_tags -from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status -from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config -from ansible_collections.community.aws.plugins.module_utils.opensearch import get_target_increment_version -from ansible_collections.community.aws.plugins.module_utils.opensearch import normalize_opensearch -from ansible_collections.community.aws.plugins.module_utils.opensearch import parse_version -from ansible_collections.community.aws.plugins.module_utils.opensearch import wait_for_domain_status - - -def ensure_domain_absent(client, module): - domain_name = module.params.get("domain_name") - changed = False - - domain = get_domain_status(client, module, domain_name) - if module.check_mode: - module.exit_json(changed=True, msg="Would have deleted domain if not in check mode") - try: - client.delete_domain(DomainName=domain_name) - changed = True - except is_boto3_error_code("ResourceNotFoundException"): - # The resource does not exist, or it has already been deleted - return dict(changed=False) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="trying to delete domain") - - # If we're not waiting for a delete to complete then we're all done - # so just return - if not domain or not module.params.get("wait"): - return dict(changed=changed) - try: - wait_for_domain_status(client, module, domain_name, "domain_deleted") - return dict(changed=changed) - except is_boto3_error_code("ResourceNotFoundException"): - return dict(changed=changed) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "awaiting domain deletion") - - -def upgrade_domain(client, module, source_version, target_engine_version): - domain_name = module.params.get("domain_name") - # Determine if it's possible to upgrade directly from source version - # to target version, or if it's necessary to upgrade through intermediate major versions. - next_version = target_engine_version - # When perform_check_only is true, indicates that an upgrade eligibility check needs - # to be performed. Does not actually perform the upgrade. - perform_check_only = False - if module.check_mode: - perform_check_only = True - current_version = source_version - while current_version != target_engine_version: - v = get_target_increment_version(client, module, domain_name, target_engine_version) - if v is None: - # There is no compatible version, according to the get_compatible_versions() API. - # The upgrade should fail, but try anyway. - next_version = target_engine_version - if next_version != target_engine_version: - # It's not possible to upgrade directly to the target version. - # Check the module parameters to determine if this is allowed or not. - if not module.params.get("allow_intermediate_upgrades"): - module.fail_json( - msg=f"Cannot upgrade from {source_version} to version {target_engine_version}. The highest compatible version is {next_version}" - ) - - parameters = { - "DomainName": domain_name, - "TargetVersion": next_version, - "PerformCheckOnly": perform_check_only, - } - - if not module.check_mode: - # If background tasks are in progress, wait until they complete. - # This can take several hours depending on the cluster size and the type of background tasks - # (maybe an upgrade is already in progress). - # It's not possible to upgrade a domain that has background tasks are in progress, - # the call to client.upgrade_domain would fail. - wait_for_domain_status(client, module, domain_name, "domain_available") - - try: - client.upgrade_domain(**parameters) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - # In check mode (=> PerformCheckOnly==True), a ValidationException may be - # raised if it's not possible to upgrade to the target version. - module.fail_json_aws( - e, - msg=f"Couldn't upgrade domain {domain_name} from {current_version} to {next_version}", - ) - - if module.check_mode: - module.exit_json( - changed=True, - msg=f"Would have upgraded domain from {current_version} to {next_version} if not in check mode", - ) - current_version = next_version - - if module.params.get("wait"): - wait_for_domain_status(client, module, domain_name, "domain_available") - - -def set_cluster_config(module, current_domain_config, desired_domain_config, change_set): - changed = False - - cluster_config = desired_domain_config["ClusterConfig"] - cluster_opts = module.params.get("cluster_config") - if cluster_opts is not None: - if cluster_opts.get("instance_type") is not None: - cluster_config["InstanceType"] = cluster_opts.get("instance_type") - if cluster_opts.get("instance_count") is not None: - cluster_config["InstanceCount"] = cluster_opts.get("instance_count") - if cluster_opts.get("zone_awareness") is not None: - cluster_config["ZoneAwarenessEnabled"] = cluster_opts.get("zone_awareness") - if cluster_config["ZoneAwarenessEnabled"]: - if cluster_opts.get("availability_zone_count") is not None: - cluster_config["ZoneAwarenessConfig"] = { - "AvailabilityZoneCount": cluster_opts.get("availability_zone_count"), - } - - if cluster_opts.get("dedicated_master") is not None: - cluster_config["DedicatedMasterEnabled"] = cluster_opts.get("dedicated_master") - if cluster_config["DedicatedMasterEnabled"]: - if cluster_opts.get("dedicated_master_instance_type") is not None: - cluster_config["DedicatedMasterType"] = cluster_opts.get("dedicated_master_instance_type") - if cluster_opts.get("dedicated_master_instance_count") is not None: - cluster_config["DedicatedMasterCount"] = cluster_opts.get("dedicated_master_instance_count") - - if cluster_opts.get("warm_enabled") is not None: - cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled") - if cluster_config["WarmEnabled"]: - if cluster_opts.get("warm_type") is not None: - cluster_config["WarmType"] = cluster_opts.get("warm_type") - if cluster_opts.get("warm_count") is not None: - cluster_config["WarmCount"] = cluster_opts.get("warm_count") - - cold_storage_opts = None - if cluster_opts is not None: - cold_storage_opts = cluster_opts.get("cold_storage_options") - if compare_domain_versions(desired_domain_config["EngineVersion"], "Elasticsearch_7.9") < 0: - # If the engine version is ElasticSearch < 7.9, cold storage is not supported. - # When querying a domain < 7.9, the AWS API indicates cold storage is disabled (Enabled: False), - # which makes sense. However, trying to do HTTP POST with Enable: False causes an API error. - # The 'ColdStorageOptions' attribute should not be present in HTTP POST. - if cold_storage_opts is not None and cold_storage_opts.get("enabled"): - module.fail_json(msg="Cold Storage is not supported") - cluster_config.pop("ColdStorageOptions", None) - if current_domain_config is not None and "ClusterConfig" in current_domain_config: - # Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff - # will indicate a change must be done. - current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None) - else: - # Elasticsearch 7.9 and above support ColdStorageOptions. - if cold_storage_opts is not None and cold_storage_opts.get("enabled") is not None: - cluster_config["ColdStorageOptions"] = { - "Enabled": cold_storage_opts.get("enabled"), - } - - if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config: - change_set.append(f"ClusterConfig changed from {current_domain_config['ClusterConfig']} to {cluster_config}") - changed = True - return changed - - -def set_ebs_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - ebs_config = desired_domain_config["EBSOptions"] - ebs_opts = module.params.get("ebs_options") - if ebs_opts is None: - return changed - if ebs_opts.get("ebs_enabled") is not None: - ebs_config["EBSEnabled"] = ebs_opts.get("ebs_enabled") - - if not ebs_config["EBSEnabled"]: - desired_domain_config["EBSOptions"] = { - "EBSEnabled": False, - } - else: - if ebs_opts.get("volume_type") is not None: - ebs_config["VolumeType"] = ebs_opts.get("volume_type") - if ebs_opts.get("volume_size") is not None: - ebs_config["VolumeSize"] = ebs_opts.get("volume_size") - if ebs_opts.get("iops") is not None: - ebs_config["Iops"] = ebs_opts.get("iops") - - if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config: - change_set.append(f"EBSOptions changed from {current_domain_config['EBSOptions']} to {ebs_config}") - changed = True - return changed - - -def set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"] - encryption_at_rest_opts = module.params.get("encryption_at_rest_options") - if encryption_at_rest_opts is None: - return False - if encryption_at_rest_opts.get("enabled") is not None: - encryption_at_rest_config["Enabled"] = encryption_at_rest_opts.get("enabled") - if not encryption_at_rest_config["Enabled"]: - desired_domain_config["EncryptionAtRestOptions"] = { - "Enabled": False, - } - else: - if encryption_at_rest_opts.get("kms_key_id") is not None: - encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get("kms_key_id") - - if ( - current_domain_config is not None - and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config - ): - change_set.append( - f"EncryptionAtRestOptions changed from {current_domain_config['EncryptionAtRestOptions']} to" - f" {encryption_at_rest_config}" - ) - changed = True - return changed - - -def set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - node_to_node_encryption_config = desired_domain_config["NodeToNodeEncryptionOptions"] - node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options") - if node_to_node_encryption_opts is None: - return changed - if node_to_node_encryption_opts.get("enabled") is not None: - node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get("enabled") - - if ( - current_domain_config is not None - and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config - ): - change_set.append( - f"NodeToNodeEncryptionOptions changed from {current_domain_config['NodeToNodeEncryptionOptions']} to" - f" {node_to_node_encryption_config}" - ) - changed = True - return changed - - -def set_vpc_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - vpc_config = None - if "VPCOptions" in desired_domain_config: - vpc_config = desired_domain_config["VPCOptions"] - vpc_opts = module.params.get("vpc_options") - if vpc_opts is None: - return changed - vpc_subnets = vpc_opts.get("subnets") - if vpc_subnets is not None: - if vpc_config is None: - vpc_config = {} - desired_domain_config["VPCOptions"] = vpc_config - # OpenSearch cluster is attached to VPC - if isinstance(vpc_subnets, string_types): - vpc_subnets = [x.strip() for x in vpc_subnets.split(",")] - vpc_config["SubnetIds"] = vpc_subnets - - vpc_security_groups = vpc_opts.get("security_groups") - if vpc_security_groups is not None: - if vpc_config is None: - vpc_config = {} - desired_domain_config["VPCOptions"] = vpc_config - if isinstance(vpc_security_groups, string_types): - vpc_security_groups = [x.strip() for x in vpc_security_groups.split(",")] - vpc_config["SecurityGroupIds"] = vpc_security_groups - - if current_domain_config is not None: - # Modify existing cluster. - current_cluster_is_vpc = False - desired_cluster_is_vpc = False - if ( - "VPCOptions" in current_domain_config - and "SubnetIds" in current_domain_config["VPCOptions"] - and len(current_domain_config["VPCOptions"]["SubnetIds"]) > 0 - ): - current_cluster_is_vpc = True - if ( - "VPCOptions" in desired_domain_config - and "SubnetIds" in desired_domain_config["VPCOptions"] - and len(desired_domain_config["VPCOptions"]["SubnetIds"]) > 0 - ): - desired_cluster_is_vpc = True - if current_cluster_is_vpc != desired_cluster_is_vpc: - # AWS does not allow changing the type. Don't fail here so we return the AWS API error. - change_set.append("VPCOptions changed between Internet and VPC") - changed = True - elif desired_cluster_is_vpc is False: - # There are no VPCOptions to configure. - pass - else: - # Note the subnets may be the same but be listed in a different order. - if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]): - change_set.append( - f"SubnetIds changed from {current_domain_config['VPCOptions']['SubnetIds']} to" - f" {vpc_config['SubnetIds']}" - ) - changed = True - if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]): - change_set.append( - f"SecurityGroup changed from {current_domain_config['VPCOptions']['SecurityGroupIds']} to" - f" {vpc_config['SecurityGroupIds']}" - ) - changed = True - return changed - - -def set_snapshot_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - snapshot_config = desired_domain_config["SnapshotOptions"] - snapshot_opts = module.params.get("snapshot_options") - if snapshot_opts is None: - return changed - if snapshot_opts.get("automated_snapshot_start_hour") is not None: - snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get("automated_snapshot_start_hour") - if current_domain_config is not None and current_domain_config["SnapshotOptions"] != snapshot_config: - change_set.append("SnapshotOptions changed") - changed = True - return changed - - -def set_cognito_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - cognito_config = desired_domain_config["CognitoOptions"] - cognito_opts = module.params.get("cognito_options") - if cognito_opts is None: - return changed - if cognito_opts.get("enabled") is not None: - cognito_config["Enabled"] = cognito_opts.get("enabled") - if not cognito_config["Enabled"]: - desired_domain_config["CognitoOptions"] = { - "Enabled": False, - } - else: - if cognito_opts.get("cognito_user_pool_id") is not None: - cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id") - if cognito_opts.get("cognito_identity_pool_id") is not None: - cognito_config["IdentityPoolId"] = cognito_opts.get("cognito_identity_pool_id") - if cognito_opts.get("cognito_role_arn") is not None: - cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") - - if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config: - change_set.append(f"CognitoOptions changed from {current_domain_config['CognitoOptions']} to {cognito_config}") - changed = True - return changed - - -def set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - advanced_security_config = desired_domain_config["AdvancedSecurityOptions"] - advanced_security_opts = module.params.get("advanced_security_options") - if advanced_security_opts is None: - return changed - if advanced_security_opts.get("enabled") is not None: - advanced_security_config["Enabled"] = advanced_security_opts.get("enabled") - if not advanced_security_config["Enabled"]: - desired_domain_config["AdvancedSecurityOptions"] = { - "Enabled": False, - } - else: - if advanced_security_opts.get("internal_user_database_enabled") is not None: - advanced_security_config["InternalUserDatabaseEnabled"] = advanced_security_opts.get( - "internal_user_database_enabled" - ) - master_user_opts = advanced_security_opts.get("master_user_options") - if master_user_opts is not None: - advanced_security_config.setdefault("MasterUserOptions", {}) - if master_user_opts.get("master_user_arn") is not None: - advanced_security_config["MasterUserOptions"]["MasterUserARN"] = master_user_opts.get("master_user_arn") - if master_user_opts.get("master_user_name") is not None: - advanced_security_config["MasterUserOptions"]["MasterUserName"] = master_user_opts.get( - "master_user_name" - ) - if master_user_opts.get("master_user_password") is not None: - advanced_security_config["MasterUserOptions"]["MasterUserPassword"] = master_user_opts.get( - "master_user_password" - ) - saml_opts = advanced_security_opts.get("saml_options") - if saml_opts is not None: - if saml_opts.get("enabled") is not None: - advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get("enabled") - idp_opts = saml_opts.get("idp") - if idp_opts is not None: - if idp_opts.get("metadata_content") is not None: - advanced_security_config["SamlOptions"]["Idp"]["MetadataContent"] = idp_opts.get("metadata_content") - if idp_opts.get("entity_id") is not None: - advanced_security_config["SamlOptions"]["Idp"]["EntityId"] = idp_opts.get("entity_id") - if saml_opts.get("master_user_name") is not None: - advanced_security_config["SamlOptions"]["MasterUserName"] = saml_opts.get("master_user_name") - if saml_opts.get("master_backend_role") is not None: - advanced_security_config["SamlOptions"]["MasterBackendRole"] = saml_opts.get("master_backend_role") - if saml_opts.get("subject_key") is not None: - advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get("subject_key") - if saml_opts.get("roles_key") is not None: - advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get("roles_key") - if saml_opts.get("session_timeout_minutes") is not None: - advanced_security_config["SamlOptions"]["SessionTimeoutMinutes"] = saml_opts.get( - "session_timeout_minutes" - ) - - if ( - current_domain_config is not None - and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config - ): - change_set.append( - f"AdvancedSecurityOptions changed from {current_domain_config['AdvancedSecurityOptions']} to" - f" {advanced_security_config}" - ) - changed = True - return changed - - -def set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - domain_endpoint_config = desired_domain_config["DomainEndpointOptions"] - domain_endpoint_opts = module.params.get("domain_endpoint_options") - if domain_endpoint_opts is None: - return changed - if domain_endpoint_opts.get("enforce_https") is not None: - domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get("enforce_https") - if domain_endpoint_opts.get("tls_security_policy") is not None: - domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get("tls_security_policy") - if domain_endpoint_opts.get("custom_endpoint_enabled") is not None: - domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get("custom_endpoint_enabled") - if domain_endpoint_config["CustomEndpointEnabled"]: - if domain_endpoint_opts.get("custom_endpoint") is not None: - domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get("custom_endpoint") - if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None: - domain_endpoint_config["CustomEndpointCertificateArn"] = domain_endpoint_opts.get( - "custom_endpoint_certificate_arn" - ) - - if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config: - change_set.append( - f"DomainEndpointOptions changed from {current_domain_config['DomainEndpointOptions']} to" - f" {domain_endpoint_config}" - ) - changed = True - return changed - - -def set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set): - changed = False - auto_tune_config = desired_domain_config["AutoTuneOptions"] - auto_tune_opts = module.params.get("auto_tune_options") - if auto_tune_opts is None: - return changed - schedules = auto_tune_opts.get("maintenance_schedules") - if auto_tune_opts.get("desired_state") is not None: - auto_tune_config["DesiredState"] = auto_tune_opts.get("desired_state") - if auto_tune_config["DesiredState"] != "ENABLED": - desired_domain_config["AutoTuneOptions"] = { - "DesiredState": "DISABLED", - } - elif schedules is not None: - auto_tune_config["MaintenanceSchedules"] = [] - for s in schedules: - schedule_entry = {} - start_at = s.get("start_at") - if start_at is not None: - if isinstance(start_at, datetime.datetime): - # The property was parsed from yaml to datetime, but the AWS API wants a string - start_at = start_at.strftime("%Y-%m-%d") - schedule_entry["StartAt"] = start_at - duration_opt = s.get("duration") - if duration_opt is not None: - schedule_entry["Duration"] = {} - if duration_opt.get("value") is not None: - schedule_entry["Duration"]["Value"] = duration_opt.get("value") - if duration_opt.get("unit") is not None: - schedule_entry["Duration"]["Unit"] = duration_opt.get("unit") - if s.get("cron_expression_for_recurrence") is not None: - schedule_entry["CronExpressionForRecurrence"] = s.get("cron_expression_for_recurrence") - auto_tune_config["MaintenanceSchedules"].append(schedule_entry) - if current_domain_config is not None: - if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]: - change_set.append( - "AutoTuneOptions.DesiredState changed from" - f" {current_domain_config['AutoTuneOptions']['DesiredState']} to {auto_tune_config['DesiredState']}" - ) - changed = True - if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]: - change_set.append( - "AutoTuneOptions.MaintenanceSchedules changed from" - f" {current_domain_config['AutoTuneOptions']['MaintenanceSchedules']} to" - f" {auto_tune_config['MaintenanceSchedules']}" - ) - changed = True - return changed - - -def set_access_policy(module, current_domain_config, desired_domain_config, change_set): - access_policy_config = None - changed = False - access_policy_opt = module.params.get("access_policies") - if access_policy_opt is None: - return changed - try: - access_policy_config = json.dumps(access_policy_opt) - except Exception as e: - module.fail_json(msg=f"Failed to convert the policy into valid JSON: {str(e)}") - if current_domain_config is not None: - # Updating existing domain - current_access_policy = json.loads(current_domain_config["AccessPolicies"]) - if not compare_policies(current_access_policy, access_policy_opt): - change_set.append(f"AccessPolicy changed from {current_access_policy} to {access_policy_opt}") - changed = True - desired_domain_config["AccessPolicies"] = access_policy_config - else: - # Creating new domain - desired_domain_config["AccessPolicies"] = access_policy_config - return changed - - -def ensure_domain_present(client, module): - domain_name = module.params.get("domain_name") - - # Create default if OpenSearch does not exist. If domain already exists, - # the data is populated by retrieving the current configuration from the API. - desired_domain_config = { - "DomainName": module.params.get("domain_name"), - "EngineVersion": "OpenSearch_1.1", - "ClusterConfig": { - "InstanceType": "t2.small.search", - "InstanceCount": 2, - "ZoneAwarenessEnabled": False, - "DedicatedMasterEnabled": False, - "WarmEnabled": False, - }, - # By default create ES attached to the Internet. - # If the "VPCOptions" property is specified, even if empty, the API server interprets - # as incomplete VPC configuration. - # "VPCOptions": {}, - "EBSOptions": { - "EBSEnabled": False, - }, - "EncryptionAtRestOptions": { - "Enabled": False, - }, - "NodeToNodeEncryptionOptions": { - "Enabled": False, - }, - "SnapshotOptions": { - "AutomatedSnapshotStartHour": 0, - }, - "CognitoOptions": { - "Enabled": False, - }, - "AdvancedSecurityOptions": { - "Enabled": False, - }, - "DomainEndpointOptions": { - "CustomEndpointEnabled": False, - }, - "AutoTuneOptions": { - "DesiredState": "DISABLED", - }, - } - # Determine if OpenSearch domain already exists. - # current_domain_config may be None if the domain does not exist. - (current_domain_config, domain_arn) = get_domain_config(client, module, domain_name) - if current_domain_config is not None: - desired_domain_config = deepcopy(current_domain_config) - - if module.params.get("engine_version") is not None: - # Validate the engine_version - v = parse_version(module.params.get("engine_version")) - if v is None: - module.fail_json("Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y") - desired_domain_config["EngineVersion"] = module.params.get("engine_version") - - changed = False - change_set = [] # For check mode purpose - - changed |= set_cluster_config(module, current_domain_config, desired_domain_config, change_set) - changed |= set_ebs_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_vpc_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_snapshot_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_cognito_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set) - changed |= set_access_policy(module, current_domain_config, desired_domain_config, change_set) - - if current_domain_config is not None: - if desired_domain_config["EngineVersion"] != current_domain_config["EngineVersion"]: - changed = True - change_set.append("EngineVersion changed") - upgrade_domain( - client, - module, - current_domain_config["EngineVersion"], - desired_domain_config["EngineVersion"], - ) - - if changed: - if module.check_mode: - module.exit_json( - changed=True, - msg=f"Would have updated domain if not in check mode: {change_set}", - ) - # Remove the "EngineVersion" attribute, the AWS API does not accept this attribute. - desired_domain_config.pop("EngineVersion", None) - try: - client.update_domain_config(**desired_domain_config) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") - - else: - # Create new OpenSearch cluster - if module.params.get("access_policies") is None: - module.fail_json("state is present but the following is missing: access_policies") - - changed = True - if module.check_mode: - module.exit_json(changed=True, msg="Would have created a domain if not in check mode") - try: - response = client.create_domain(**desired_domain_config) - domain = response["DomainStatus"] - domain_arn = domain["ARN"] - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: - module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") - - try: - existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, f"Couldn't get tags for domain {domain_name}") - - desired_tags = module.params["tags"] - purge_tags = module.params["purge_tags"] - changed |= ensure_tags(client, module, domain_arn, existing_tags, desired_tags, purge_tags) - - if module.params.get("wait") and not module.check_mode: - wait_for_domain_status(client, module, domain_name, "domain_available") - - domain = get_domain_status(client, module, domain_name) - - return dict(changed=changed, **normalize_opensearch(client, module, domain)) - - -def main(): - module = AnsibleAWSModule( - argument_spec=dict( - state=dict(choices=["present", "absent"], default="present"), - domain_name=dict(required=True), - engine_version=dict(), - allow_intermediate_upgrades=dict(required=False, type="bool", default=True), - access_policies=dict(required=False, type="dict"), - cluster_config=dict( - type="dict", - default=None, - options=dict( - instance_type=dict(), - instance_count=dict(required=False, type="int"), - zone_awareness=dict(required=False, type="bool"), - availability_zone_count=dict(required=False, type="int"), - dedicated_master=dict(required=False, type="bool"), - dedicated_master_instance_type=dict(), - dedicated_master_instance_count=dict(type="int"), - warm_enabled=dict(required=False, type="bool"), - warm_type=dict(required=False), - warm_count=dict(required=False, type="int"), - cold_storage_options=dict( - type="dict", - default=None, - options=dict( - enabled=dict(required=False, type="bool"), - ), - ), - ), - ), - snapshot_options=dict( - type="dict", - default=None, - options=dict( - automated_snapshot_start_hour=dict(required=False, type="int"), - ), - ), - ebs_options=dict( - type="dict", - default=None, - options=dict( - ebs_enabled=dict(required=False, type="bool"), - volume_type=dict(required=False), - volume_size=dict(required=False, type="int"), - iops=dict(required=False, type="int"), - ), - ), - vpc_options=dict( - type="dict", - default=None, - options=dict( - subnets=dict(type="list", elements="str", required=False), - security_groups=dict(type="list", elements="str", required=False), - ), - ), - cognito_options=dict( - type="dict", - default=None, - options=dict( - enabled=dict(required=False, type="bool"), - user_pool_id=dict(required=False), - identity_pool_id=dict(required=False), - role_arn=dict(required=False, no_log=False), - ), - ), - encryption_at_rest_options=dict( - type="dict", - default=None, - options=dict( - enabled=dict(type="bool"), - kms_key_id=dict(required=False), - ), - ), - node_to_node_encryption_options=dict( - type="dict", - default=None, - options=dict( - enabled=dict(type="bool"), - ), - ), - domain_endpoint_options=dict( - type="dict", - default=None, - options=dict( - enforce_https=dict(type="bool"), - tls_security_policy=dict(), - custom_endpoint_enabled=dict(type="bool"), - custom_endpoint=dict(), - custom_endpoint_certificate_arn=dict(), - ), - ), - advanced_security_options=dict( - type="dict", - default=None, - options=dict( - enabled=dict(type="bool"), - internal_user_database_enabled=dict(type="bool"), - master_user_options=dict( - type="dict", - default=None, - options=dict( - master_user_arn=dict(), - master_user_name=dict(), - master_user_password=dict(no_log=True), - ), - ), - saml_options=dict( - type="dict", - default=None, - options=dict( - enabled=dict(type="bool"), - idp=dict( - type="dict", - default=None, - options=dict( - metadata_content=dict(), - entity_id=dict(), - ), - ), - master_user_name=dict(), - master_backend_role=dict(), - subject_key=dict(no_log=False), - roles_key=dict(no_log=False), - session_timeout_minutes=dict(type="int"), - ), - ), - ), - ), - auto_tune_options=dict( - type="dict", - default=None, - options=dict( - desired_state=dict(choices=["ENABLED", "DISABLED"]), - maintenance_schedules=dict( - type="list", - elements="dict", - default=None, - options=dict( - start_at=dict(), - duration=dict( - type="dict", - default=None, - options=dict( - value=dict(type="int"), - unit=dict(choices=["HOURS"]), - ), - ), - cron_expression_for_recurrence=dict(), - ), - ), - ), - ), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - ), - supports_check_mode=True, - ) - - module.require_botocore_at_least("1.21.38") - - try: - client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS opensearch service") - - if module.params["state"] == "absent": - ret_dict = ensure_domain_absent(client, module) - else: - ret_dict = ensure_domain_present(client, module) - - module.exit_json(**ret_dict) - - -if __name__ == "__main__": - main() diff --git a/opensearch_info.py b/opensearch_info.py deleted file mode 100644 index 976ea4279f7..00000000000 --- a/opensearch_info.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: opensearch_info -short_description: obtain information about one or more OpenSearch or ElasticSearch domain -description: - - Obtain information about one Amazon OpenSearch Service domain. -version_added: 4.0.0 -author: - - "Sebastien Rosset (@sebastien-rosset)" -options: - domain_name: - description: - - The name of the Amazon OpenSearch/ElasticSearch Service domain. - required: false - type: str - tags: - description: - - > - A dict of tags that are used to filter OpenSearch domains that match - all tag key, value pairs. - required: false - type: dict -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Get information about an OpenSearch domain instance - community.aws.opensearch_info: - domain_name: my-search-cluster - register: new_cluster_info - -- name: Get all OpenSearch instances - community.aws.opensearch_info: - -- name: Get all OpenSearch instances that have the specified Key, Value tags - community.aws.opensearch_info: - tags: - Applications: search - Environment: Development -""" - -RETURN = r""" -instances: - description: List of OpenSearch domain instances - returned: always - type: complex - contains: - domain_status: - description: The current status of the OpenSearch domain. - returned: always - type: complex - contains: - arn: - description: The ARN of the OpenSearch domain. - returned: always - type: str - domain_id: - description: The unique identifier for the OpenSearch domain. - returned: always - type: str - domain_name: - description: The name of the OpenSearch domain. - returned: always - type: str - created: - description: - - > - The domain creation status. True if the creation of a domain is complete. - False if domain creation is still in progress. - returned: always - type: bool - deleted: - description: - - > - The domain deletion status. - True if a delete request has been received for the domain but resource cleanup is still in progress. - False if the domain has not been deleted. - Once domain deletion is complete, the status of the domain is no longer returned. - returned: always - type: bool - endpoint: - description: The domain endpoint that you use to submit index and search requests. - returned: always - type: str - endpoints: - description: - - > - Map containing the domain endpoints used to submit index and search requests. - - > - When you create a domain attached to a VPC domain, this propery contains - the DNS endpoint to which service requests are submitted. - - > - If you query the opensearch_info immediately after creating the OpenSearch cluster, - the VPC endpoint may not be returned. It may take several minutes until the - endpoints is available. - type: dict - processing: - description: - - > - The status of the domain configuration. - True if Amazon OpenSearch Service is processing configuration changes. - False if the configuration is active. - returned: always - type: bool - upgrade_processing: - description: true if a domain upgrade operation is in progress. - returned: always - type: bool - engine_version: - description: The version of the OpenSearch domain. - returned: always - type: str - sample: OpenSearch_1.1 - cluster_config: - description: - - Parameters for the cluster configuration of an OpenSearch Service domain. - type: complex - contains: - instance_type: - description: - - Type of the instances to use for the domain. - type: str - instance_count: - description: - - Number of instances for the domain. - type: int - zone_awareness: - description: - - A boolean value to indicate whether zone awareness is enabled. - type: bool - availability_zone_count: - description: - - > - An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. - This should be equal to number of subnets if VPC endpoints is enabled. - type: int - dedicated_master_enabled: - description: - - A boolean value to indicate whether a dedicated master node is enabled. - type: bool - zone_awareness_enabled: - description: - - A boolean value to indicate whether zone awareness is enabled. - type: bool - zone_awareness_config: - description: - - The zone awareness configuration for a domain when zone awareness is enabled. - type: complex - contains: - availability_zone_count: - description: - - An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. - type: int - dedicated_master_type: - description: - - The instance type for a dedicated master node. - type: str - dedicated_master_count: - description: - - Total number of dedicated master nodes, active and on standby, for the domain. - type: int - warm_enabled: - description: - - True to enable UltraWarm storage. - type: bool - warm_type: - description: - - The instance type for the OpenSearch domain's warm nodes. - type: str - warm_count: - description: - - The number of UltraWarm nodes in the domain. - type: int - cold_storage_options: - description: - - Specifies the ColdStorageOptions config for a Domain. - type: complex - contains: - enabled: - description: - - True to enable cold storage. Supported on Elasticsearch 7.9 or above. - type: bool - ebs_options: - description: - - Parameters to configure EBS-based storage for an OpenSearch Service domain. - type: complex - contains: - ebs_enabled: - description: - - Specifies whether EBS-based storage is enabled. - type: bool - volume_type: - description: - - Specifies the volume type for EBS-based storage. "standard"|"gp2"|"io1" - type: str - volume_size: - description: - - Integer to specify the size of an EBS volume. - type: int - iops: - description: - - The IOPD for a Provisioned IOPS EBS volume (SSD). - type: int - vpc_options: - description: - - Options to specify the subnets and security groups for a VPC endpoint. - type: complex - contains: - vpc_id: - description: The VPC ID for the domain. - type: str - subnet_ids: - description: - - Specifies the subnet ids for VPC endpoint. - type: list - elements: str - security_group_ids: - description: - - Specifies the security group ids for VPC endpoint. - type: list - elements: str - availability_zones: - description: - - The Availability Zones for the domain.. - type: list - elements: str - snapshot_options: - description: - - Option to set time, in UTC format, of the daily automated snapshot. - type: complex - contains: - automated_snapshot_start_hour: - description: - - > - Integer value from 0 to 23 specifying when the service takes a daily automated snapshot - of the specified Elasticsearch domain. - type: int - access_policies: - description: - - IAM access policy as a JSON-formatted string. - type: complex - encryption_at_rest_options: - description: - - Parameters to enable encryption at rest. - type: complex - contains: - enabled: - description: - - Should data be encrypted while at rest. - type: bool - kms_key_id: - description: - - If encryption at rest enabled, this identifies the encryption key to use. - - The value should be a KMS key ARN. It can also be the KMS key id. - type: str - node_to_node_encryption_options: - description: - - Node-to-node encryption options. - type: complex - contains: - enabled: - description: - - True to enable node-to-node encryption. - type: bool - cognito_options: - description: - - Parameters to configure OpenSearch Service to use Amazon Cognito authentication for OpenSearch Dashboards. - type: complex - contains: - enabled: - description: - - The option to enable Cognito for OpenSearch Dashboards authentication. - type: bool - user_pool_id: - description: - - The Cognito user pool ID for OpenSearch Dashboards authentication. - type: str - identity_pool_id: - description: - - The Cognito identity pool ID for OpenSearch Dashboards authentication. - type: str - role_arn: - description: - - The role ARN that provides OpenSearch permissions for accessing Cognito resources. - type: str - domain_endpoint_options: - description: - - Options to specify configuration that will be applied to the domain endpoint. - type: complex - contains: - enforce_https: - description: - - Whether only HTTPS endpoint should be enabled for the domain. - type: bool - tls_security_policy: - description: - - Specify the TLS security policy to apply to the HTTPS endpoint of the domain. - type: str - custom_endpoint_enabled: - description: - - Whether to enable a custom endpoint for the domain. - type: bool - custom_endpoint: - description: - - The fully qualified domain for your custom endpoint. - type: str - custom_endpoint_certificate_arn: - description: - - The ACM certificate ARN for your custom endpoint. - type: str - advanced_security_options: - description: - - Specifies advanced security options. - type: complex - contains: - enabled: - description: - - True if advanced security is enabled. - - You must enable node-to-node encryption to use advanced security options. - type: bool - internal_user_database_enabled: - description: - - True if the internal user database is enabled. - type: bool - master_user_options: - description: - - Credentials for the master user, username and password, ARN, or both. - type: complex - contains: - master_user_arn: - description: - - ARN for the master user (if IAM is enabled). - type: str - master_user_name: - description: - - The username of the master user, which is stored in the Amazon OpenSearch Service domain internal database. - type: str - master_user_password: - description: - - The password of the master user, which is stored in the Amazon OpenSearch Service domain internal database. - type: str - saml_options: - description: - - The SAML application configuration for the domain. - type: complex - contains: - enabled: - description: - - True if SAML is enabled. - type: bool - idp: - description: - - The SAML Identity Provider's information. - type: complex - contains: - metadata_content: - description: - - The metadata of the SAML application in XML format. - type: str - entity_id: - description: - - The unique entity ID of the application in SAML identity provider. - type: str - master_user_name: - description: - - The SAML master username, which is stored in the Amazon OpenSearch Service domain internal database. - type: str - master_backend_role: - description: - - The backend role that the SAML master user is mapped to. - type: str - subject_key: - description: - - Element of the SAML assertion to use for username. Default is NameID. - type: str - roles_key: - description: - - Element of the SAML assertion to use for backend roles. Default is roles. - type: str - session_timeout_minutes: - description: - - > - The duration, in minutes, after which a user session becomes inactive. - Acceptable values are between 1 and 1440, and the default value is 60. - type: int - auto_tune_options: - description: - - Specifies Auto-Tune options. - type: complex - contains: - desired_state: - description: - - The Auto-Tune desired state. Valid values are ENABLED and DISABLED. - type: str - maintenance_schedules: - description: - - A list of maintenance schedules. - type: list - elements: dict - contains: - start_at: - description: - - The timestamp at which the Auto-Tune maintenance schedule starts. - type: str - duration: - description: - - Specifies maintenance schedule duration, duration value and duration unit. - type: complex - contains: - value: - description: - - Integer to specify the value of a maintenance schedule duration. - type: int - unit: - description: - - The unit of a maintenance schedule duration. Valid value is HOURS. - type: str - cron_expression_for_recurrence: - description: - - A cron expression for a recurring maintenance schedule. - type: str - domain_config: - description: The OpenSearch domain configuration - returned: always - type: complex - contains: - domain_name: - description: The name of the OpenSearch domain. - returned: always - type: str -""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config -from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status - - -def domain_info(client, module): - domain_name = module.params.get("domain_name") - filter_tags = module.params.get("tags") - - domain_list = [] - if domain_name: - domain_status = get_domain_status(client, module, domain_name) - if domain_status: - domain_list.append({"DomainStatus": domain_status}) - else: - domain_summary_list = client.list_domain_names()["DomainNames"] - for d in domain_summary_list: - domain_status = get_domain_status(client, module, d["DomainName"]) - if domain_status: - domain_list.append({"DomainStatus": domain_status}) - - # Get the domain tags - for domain in domain_list: - current_domain_tags = None - domain_arn = domain["DomainStatus"]["ARN"] - try: - current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] - domain["Tags"] = boto3_tag_list_to_ansible_dict(current_domain_tags) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - # This could potentially happen if a domain is deleted between the time - # its domain status was queried and the tags were queried. - domain["Tags"] = {} - - # Filter by tags - if filter_tags: - for tag_key in filter_tags: - try: - domain_list = [ - c - for c in domain_list - if ("Tags" in c) and (tag_key in c["Tags"]) and (c["Tags"][tag_key] == filter_tags[tag_key]) - ] - except (TypeError, AttributeError) as e: - module.fail_json(msg="OpenSearch tag filtering error", exception=e) - - # Get the domain config - for idx, domain in enumerate(domain_list): - domain_name = domain["DomainStatus"]["DomainName"] - (domain_config, arn) = get_domain_config(client, module, domain_name) - if domain_config: - domain["DomainConfig"] = domain_config - domain_list[idx] = camel_dict_to_snake_dict(domain, ignore_list=["AdvancedOptions", "Endpoints", "Tags"]) - - return dict(changed=False, domains=domain_list) - - -def main(): - module = AnsibleAWSModule( - argument_spec=dict( - domain_name=dict(required=False), - tags=dict(type="dict", required=False), - ), - supports_check_mode=True, - ) - module.require_botocore_at_least("1.21.38") - - try: - client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS opensearch service") - - module.exit_json(**domain_info(client, module)) - - -if __name__ == "__main__": - main() diff --git a/redshift.py b/redshift.py deleted file mode 100644 index 4463722e59e..00000000000 --- a/redshift.py +++ /dev/null @@ -1,692 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014 Jens Carl, Hothead Games Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -author: - - "Jens Carl (@j-carl), Hothead Games Inc." - - "Rafael Driutti (@rafaeldriutti)" -module: redshift -version_added: 1.0.0 -short_description: create, delete, or modify an Amazon Redshift instance -description: - - Creates, deletes, or modifies Amazon Redshift cluster instances. -options: - command: - description: - - Specifies the action to take. - required: true - choices: [ 'create', 'facts', 'delete', 'modify' ] - type: str - identifier: - description: - - Redshift cluster identifier. - required: true - type: str - node_type: - description: - - The node type of the cluster. - - Require when I(command=create). - choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large','dc2.large', - 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'] - type: str - username: - description: - - Master database username. - - Used only when I(command=create). - type: str - password: - description: - - Master database password. - - Used only when I(command=create). - type: str - cluster_type: - description: - - The type of cluster. - choices: ['multi-node', 'single-node' ] - default: 'single-node' - type: str - db_name: - description: - - Name of the database. - type: str - availability_zone: - description: - - Availability zone in which to launch cluster. - aliases: ['zone', 'aws_zone'] - type: str - number_of_nodes: - description: - - Number of nodes. - - Only used when I(cluster_type=multi-node). - type: int - cluster_subnet_group_name: - description: - - Which subnet to place the cluster. - aliases: ['subnet'] - type: str - cluster_security_groups: - description: - - In which security group the cluster belongs. - type: list - elements: str - aliases: ['security_groups'] - vpc_security_group_ids: - description: - - VPC security group - aliases: ['vpc_security_groups'] - type: list - elements: str - skip_final_cluster_snapshot: - description: - - Skip a final snapshot before deleting the cluster. - - Used only when I(command=delete). - aliases: ['skip_final_snapshot'] - default: false - type: bool - final_cluster_snapshot_identifier: - description: - - Identifier of the final snapshot to be created before deleting the cluster. - - If this parameter is provided, I(skip_final_cluster_snapshot) must be C(false). - - Used only when I(command=delete). - aliases: ['final_snapshot_id'] - type: str - preferred_maintenance_window: - description: - - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))' - - Times are specified in UTC. - - If not specified then a random 30 minute maintenance window is assigned. - aliases: ['maintance_window', 'maint_window'] - type: str - cluster_parameter_group_name: - description: - - Name of the cluster parameter group. - aliases: ['param_group_name'] - type: str - automated_snapshot_retention_period: - description: - - The number of days that automated snapshots are retained. - aliases: ['retention_period'] - type: int - port: - description: - - Which port the cluster is listening on. - type: int - cluster_version: - description: - - Which version the cluster should have. - aliases: ['version'] - choices: ['1.0'] - type: str - allow_version_upgrade: - description: - - When I(allow_version_upgrade=true) the cluster may be automatically - upgraded during the maintenance window. - aliases: ['version_upgrade'] - default: true - type: bool - publicly_accessible: - description: - - If the cluster is accessible publicly or not. - default: false - type: bool - encrypted: - description: - - If the cluster is encrypted or not. - default: false - type: bool - elastic_ip: - description: - - An Elastic IP to use for the cluster. - type: str - new_cluster_identifier: - description: - - Only used when command=modify. - aliases: ['new_identifier'] - type: str - wait: - description: - - When I(command=create), I(command=modify) or I(command=restore) then wait for the database to enter the 'available' state. - - When I(command=delete) wait for the database to be terminated. - type: bool - default: false - wait_timeout: - description: - - When I(wait=true) defines how long in seconds before giving up. - default: 300 - type: int - enhanced_vpc_routing: - description: - - Whether the cluster should have enhanced VPC routing enabled. - default: false - type: bool -notes: - - Support for I(tags) and I(purge_tags) was added in release 1.3.0. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Basic cluster provisioning example - community.aws.redshift: - command: create - node_type: ds1.xlarge - identifier: new_cluster - username: cluster_admin - password: 1nsecure - -- name: Cluster delete example - community.aws.redshift: - command: delete - identifier: new_cluster - skip_final_cluster_snapshot: true - wait: true -""" - -RETURN = r""" -cluster: - description: dictionary containing all the cluster information - returned: success - type: complex - contains: - identifier: - description: Id of the cluster. - returned: success - type: str - sample: "new_redshift_cluster" - create_time: - description: Time of the cluster creation as timestamp. - returned: success - type: float - sample: 1430158536.308 - status: - description: Status of the cluster. - returned: success - type: str - sample: "available" - db_name: - description: Name of the database. - returned: success - type: str - sample: "new_db_name" - availability_zone: - description: Amazon availability zone where the cluster is located. "None" until cluster is available. - returned: success - type: str - sample: "us-east-1b" - maintenance_window: - description: Time frame when maintenance/upgrade are done. - returned: success - type: str - sample: "sun:09:30-sun:10:00" - private_ip_address: - description: Private IP address of the main node. - returned: success - type: str - sample: "10.10.10.10" - public_ip_address: - description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled. - returned: success - type: str - sample: "0.0.0.0" - port: - description: Port of the cluster. "None" until cluster is available. - returned: success - type: int - sample: 5439 - url: - description: FQDN of the main cluster node. "None" until cluster is available. - returned: success - type: str - sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com" - enhanced_vpc_routing: - description: status of the enhanced vpc routing feature. - returned: success - type: bool - tags: - description: aws tags for cluster. - returned: success - type: dict -""" - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _ensure_tags(redshift, identifier, existing_tags, module): - """Compares and update resource tags""" - - account_id, partition = get_aws_account_info(module) - region = module.region - resource_arn = f"arn:{partition}:redshift:{region}:{account_id}:cluster:{identifier}" - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - - tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags) - - if tags_to_add: - try: - redshift.create_tags(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to add tags to cluster") - - if tags_to_remove: - try: - redshift.delete_tags(ResourceName=resource_arn, TagKeys=tags_to_remove) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete tags on cluster") - - changed = bool(tags_to_add or tags_to_remove) - return changed - - -def _collect_facts(resource): - """Transform cluster information to dict.""" - facts = { - "identifier": resource["ClusterIdentifier"], - "status": resource["ClusterStatus"], - "username": resource["MasterUsername"], - "db_name": resource["DBName"], - "maintenance_window": resource["PreferredMaintenanceWindow"], - "enhanced_vpc_routing": resource["EnhancedVpcRouting"], - } - - for node in resource["ClusterNodes"]: - if node["NodeRole"] in ("SHARED", "LEADER"): - facts["private_ip_address"] = node["PrivateIPAddress"] - if facts["enhanced_vpc_routing"] is False: - facts["public_ip_address"] = node["PublicIPAddress"] - else: - facts["public_ip_address"] = None - break - - # Some parameters are not ready instantly if you don't wait for available - # cluster status - facts["create_time"] = None - facts["url"] = None - facts["port"] = None - facts["availability_zone"] = None - facts["tags"] = {} - - if resource["ClusterStatus"] != "creating": - facts["create_time"] = resource["ClusterCreateTime"] - facts["url"] = resource["Endpoint"]["Address"] - facts["port"] = resource["Endpoint"]["Port"] - facts["availability_zone"] = resource["AvailabilityZone"] - facts["tags"] = boto3_tag_list_to_ansible_dict(resource["Tags"]) - - return facts - - -@AWSRetry.jittered_backoff() -def _describe_cluster(redshift, identifier): - """ - Basic wrapper around describe_clusters with a retry applied - """ - return redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] - - -@AWSRetry.jittered_backoff() -def _create_cluster(redshift, **kwargs): - """ - Basic wrapper around create_cluster with a retry applied - """ - return redshift.create_cluster(**kwargs) - - -# Simple wrapper around delete, try to avoid throwing an error if some other -# operation is in progress -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) -def _delete_cluster(redshift, **kwargs): - """ - Basic wrapper around delete_cluster with a retry applied. - Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that - we can still delete a cluster if some kind of change operation was in - progress. - """ - return redshift.delete_cluster(**kwargs) - - -@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"]) -def _modify_cluster(redshift, **kwargs): - """ - Basic wrapper around modify_cluster with a retry applied. - Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases - where another modification is still in progress - """ - return redshift.modify_cluster(**kwargs) - - -def create_cluster(module, redshift): - """ - Create a new cluster - - module: AnsibleAWSModule object - redshift: authenticated redshift connection object - - Returns: - """ - - identifier = module.params.get("identifier") - node_type = module.params.get("node_type") - username = module.params.get("username") - password = module.params.get("password") - d_b_name = module.params.get("db_name") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - tags = module.params.get("tags") - - changed = True - # Package up the optional parameters - params = {} - for p in ( - "cluster_type", - "cluster_security_groups", - "vpc_security_group_ids", - "cluster_subnet_group_name", - "availability_zone", - "preferred_maintenance_window", - "cluster_parameter_group_name", - "automated_snapshot_retention_period", - "port", - "cluster_version", - "allow_version_upgrade", - "number_of_nodes", - "publicly_accessible", - "encrypted", - "elastic_ip", - "enhanced_vpc_routing", - ): - # https://github.com/boto/boto3/issues/400 - if module.params.get(p) is not None: - params[p] = module.params.get(p) - - if d_b_name: - params["d_b_name"] = d_b_name - if tags: - tags = ansible_dict_to_boto3_tag_list(tags) - params["tags"] = tags - - try: - _describe_cluster(redshift, identifier) - changed = False - except is_boto3_error_code("ClusterNotFound"): - try: - _create_cluster( - redshift, - ClusterIdentifier=identifier, - NodeType=node_type, - MasterUsername=username, - MasterUserPassword=password, - **snake_dict_to_camel_dict(params, capitalize_first=True), - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to create cluster") - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe cluster") - if wait: - attempts = wait_timeout // 60 - waiter = redshift.get_waiter("cluster_available") - try: - waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") - try: - resource = _describe_cluster(redshift, identifier) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe cluster") - - if tags: - if _ensure_tags(redshift, identifier, resource["Tags"], module): - changed = True - resource = _describe_cluster(redshift, identifier) - - return changed, _collect_facts(resource) - - -def describe_cluster(module, redshift): - """ - Collect data about the cluster. - - module: Ansible module object - redshift: authenticated redshift connection object - """ - identifier = module.params.get("identifier") - - try: - resource = _describe_cluster(redshift, identifier) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error describing cluster") - - return True, _collect_facts(resource) - - -def delete_cluster(module, redshift): - """ - Delete a cluster. - - module: Ansible module object - redshift: authenticated redshift connection object - """ - - identifier = module.params.get("identifier") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - - params = {} - for p in ("skip_final_cluster_snapshot", "final_cluster_snapshot_identifier"): - if p in module.params: - # https://github.com/boto/boto3/issues/400 - if module.params.get(p) is not None: - params[p] = module.params.get(p) - - try: - _delete_cluster( - redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) - ) - except is_boto3_error_code("ClusterNotFound"): - return False, {} - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete cluster") - - if wait: - attempts = wait_timeout // 60 - waiter = redshift.get_waiter("cluster_deleted") - try: - waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Timeout deleting the cluster") - - return True, {} - - -def modify_cluster(module, redshift): - """ - Modify an existing cluster. - - module: Ansible module object - redshift: authenticated redshift connection object - """ - - identifier = module.params.get("identifier") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - - # Package up the optional parameters - params = {} - for p in ( - "cluster_type", - "cluster_security_groups", - "vpc_security_group_ids", - "cluster_subnet_group_name", - "availability_zone", - "preferred_maintenance_window", - "cluster_parameter_group_name", - "automated_snapshot_retention_period", - "port", - "cluster_version", - "allow_version_upgrade", - "number_of_nodes", - "new_cluster_identifier", - ): - # https://github.com/boto/boto3/issues/400 - if module.params.get(p) is not None: - params[p] = module.params.get(p) - - # enhanced_vpc_routing parameter change needs an exclusive request - if module.params.get("enhanced_vpc_routing") is not None: - try: - _modify_cluster( - redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing") - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") - if wait: - attempts = wait_timeout // 60 - waiter = redshift.get_waiter("cluster_available") - try: - waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification") - - # change the rest - try: - _modify_cluster( - redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") - - if module.params.get("new_cluster_identifier"): - identifier = module.params.get("new_cluster_identifier") - - if wait: - attempts = wait_timeout // 60 - waiter2 = redshift.get_waiter("cluster_available") - try: - waiter2.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Timeout waiting for cluster modification") - try: - resource = _describe_cluster(redshift, identifier) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") - - if _ensure_tags(redshift, identifier, resource["Tags"], module): - resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] - - return True, _collect_facts(resource) - - -def main(): - argument_spec = dict( - command=dict(choices=["create", "facts", "delete", "modify"], required=True), - identifier=dict(required=True), - node_type=dict( - choices=[ - "ds1.xlarge", - "ds1.8xlarge", - "ds2.xlarge", - "ds2.8xlarge", - "dc1.large", - "dc2.large", - "dc1.8xlarge", - "dw1.xlarge", - "dw1.8xlarge", - "dw2.large", - "dw2.8xlarge", - ], - required=False, - ), - username=dict(required=False), - password=dict(no_log=True, required=False), - db_name=dict(required=False), - cluster_type=dict(choices=["multi-node", "single-node"], default="single-node"), - cluster_security_groups=dict(aliases=["security_groups"], type="list", elements="str"), - vpc_security_group_ids=dict(aliases=["vpc_security_groups"], type="list", elements="str"), - skip_final_cluster_snapshot=dict(aliases=["skip_final_snapshot"], type="bool", default=False), - final_cluster_snapshot_identifier=dict(aliases=["final_snapshot_id"], required=False), - cluster_subnet_group_name=dict(aliases=["subnet"]), - availability_zone=dict(aliases=["aws_zone", "zone"]), - preferred_maintenance_window=dict(aliases=["maintance_window", "maint_window"]), - cluster_parameter_group_name=dict(aliases=["param_group_name"]), - automated_snapshot_retention_period=dict(aliases=["retention_period"], type="int"), - port=dict(type="int"), - cluster_version=dict(aliases=["version"], choices=["1.0"]), - allow_version_upgrade=dict(aliases=["version_upgrade"], type="bool", default=True), - number_of_nodes=dict(type="int"), - publicly_accessible=dict(type="bool", default=False), - encrypted=dict(type="bool", default=False), - elastic_ip=dict(required=False), - new_cluster_identifier=dict(aliases=["new_identifier"]), - enhanced_vpc_routing=dict(type="bool", default=False), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - ) - - required_if = [ - ("command", "delete", ["skip_final_cluster_snapshot"]), - ("command", "create", ["node_type", "username", "password"]), - ] - - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - ) - - command = module.params.get("command") - skip_final_cluster_snapshot = module.params.get("skip_final_cluster_snapshot") - final_cluster_snapshot_identifier = module.params.get("final_cluster_snapshot_identifier") - # can't use module basic required_if check for this case - if command == "delete" and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: - module.fail_json( - msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False" - ) - - conn = module.client("redshift") - - changed = True - if command == "create": - (changed, cluster) = create_cluster(module, conn) - - elif command == "facts": - (changed, cluster) = describe_cluster(module, conn) - - elif command == "delete": - (changed, cluster) = delete_cluster(module, conn) - - elif command == "modify": - (changed, cluster) = modify_cluster(module, conn) - - module.exit_json(changed=changed, cluster=cluster) - - -if __name__ == "__main__": - main() diff --git a/redshift_cross_region_snapshots.py b/redshift_cross_region_snapshots.py deleted file mode 100644 index d2894dfcba8..00000000000 --- a/redshift_cross_region_snapshots.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, JR Kerkstra -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: redshift_cross_region_snapshots -version_added: 1.0.0 -short_description: Manage Redshift Cross Region Snapshots -description: - - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots. - - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy) -author: - - JR Kerkstra (@captainkerk) -options: - cluster_name: - description: - - The name of the cluster to configure cross-region snapshots for. - required: true - aliases: [ "cluster" ] - type: str - state: - description: - - Create or remove the cross-region snapshot configuration. - choices: [ "present", "absent" ] - default: present - type: str - region: - description: - - "The cluster's region." - required: true - aliases: [ "source" ] - type: str - destination_region: - description: - - The region to copy snapshots to. - required: true - aliases: [ "destination" ] - type: str - snapshot_copy_grant: - description: - - A grant for Amazon Redshift to use a master key in the I(destination_region). - - See U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.create_snapshot_copy_grant) - aliases: [ "copy_grant" ] - type: str - snapshot_retention_period: - description: - - The number of days to keep cross-region snapshots for. - required: true - aliases: [ "retention_period" ] - type: int -extends_documentation_fragment: - - amazon.aws.region.modules - - amazon.aws.common.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: configure cross-region snapshot on cluster `johniscool` - community.aws.redshift_cross_region_snapshots: - cluster_name: johniscool - state: present - region: us-east-1 - destination_region: us-west-2 - retention_period: 1 - -- name: configure cross-region snapshot on kms-encrypted cluster - community.aws.redshift_cross_region_snapshots: - cluster_name: whatever - state: present - region: us-east-1 - destination: us-west-2 - copy_grant: 'my-grant-in-destination' - retention_period: 10 - -- name: disable cross-region snapshots, necessary before most cluster modifications (rename, resize) - community.aws.redshift_cross_region_snapshots: - cluster_name: whatever - state: absent - region: us-east-1 - destination_region: us-west-2 -""" - -RETURN = r""" # """ - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class SnapshotController(object): - def __init__(self, client, cluster_name): - self.client = client - self.cluster_name = cluster_name - - def get_cluster_snapshot_copy_status(self): - response = self.client.describe_clusters(ClusterIdentifier=self.cluster_name) - return response["Clusters"][0].get("ClusterSnapshotCopyStatus") - - def enable_snapshot_copy(self, destination_region, grant_name, retention_period): - if grant_name: - self.client.enable_snapshot_copy( - ClusterIdentifier=self.cluster_name, - DestinationRegion=destination_region, - RetentionPeriod=retention_period, - SnapshotCopyGrantName=grant_name, - ) - else: - self.client.enable_snapshot_copy( - ClusterIdentifier=self.cluster_name, - DestinationRegion=destination_region, - RetentionPeriod=retention_period, - ) - - def disable_snapshot_copy(self): - self.client.disable_snapshot_copy(ClusterIdentifier=self.cluster_name) - - def modify_snapshot_copy_retention_period(self, retention_period): - self.client.modify_snapshot_copy_retention_period( - ClusterIdentifier=self.cluster_name, RetentionPeriod=retention_period - ) - - -def requesting_unsupported_modifications(actual, requested): - if ( - actual["SnapshotCopyGrantName"] != requested["snapshot_copy_grant"] - or actual["DestinationRegion"] != requested["destination_region"] - ): - return True - return False - - -def needs_update(actual, requested): - if actual["RetentionPeriod"] != requested["snapshot_retention_period"]: - return True - return False - - -def run_module(): - argument_spec = dict( - cluster_name=dict(type="str", required=True, aliases=["cluster"]), - state=dict(type="str", choices=["present", "absent"], default="present"), - region=dict(type="str", required=True, aliases=["source"]), - destination_region=dict(type="str", required=True, aliases=["destination"]), - snapshot_copy_grant=dict(type="str", aliases=["copy_grant"]), - snapshot_retention_period=dict(type="int", required=True, aliases=["retention_period"]), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - result = dict( - changed=False, - message="", - ) - connection = module.client("redshift") - - snapshot_controller = SnapshotController(client=connection, cluster_name=module.params.get("cluster_name")) - - current_config = snapshot_controller.get_cluster_snapshot_copy_status() - if current_config is not None: - if module.params.get("state") == "present": - if requesting_unsupported_modifications(current_config, module.params): - message = ( - "Cannot modify destination_region or grant_name. Please disable cross-region snapshots, and re-run." - ) - module.fail_json(msg=message, **result) - if needs_update(current_config, module.params): - result["changed"] = True - if not module.check_mode: - snapshot_controller.modify_snapshot_copy_retention_period( - module.params.get("snapshot_retention_period") - ) - else: - result["changed"] = True - if not module.check_mode: - snapshot_controller.disable_snapshot_copy() - else: - if module.params.get("state") == "present": - result["changed"] = True - if not module.check_mode: - snapshot_controller.enable_snapshot_copy( - module.params.get("destination_region"), - module.params.get("snapshot_copy_grant"), - module.params.get("snapshot_retention_period"), - ) - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == "__main__": - main() diff --git a/redshift_info.py b/redshift_info.py deleted file mode 100644 index 2a346167e24..00000000000 --- a/redshift_info.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: redshift_info -version_added: 1.0.0 -author: - - "Jens Carl (@j-carl)" -short_description: Gather information about Redshift cluster(s) -description: - - Gather information about Redshift cluster(s). -options: - cluster_identifier: - description: - - The prefix of cluster identifier of the Redshift cluster you are searching for. - - "This is a regular expression match with implicit '^'. Append '$' for a complete match." - required: false - aliases: ['name', 'identifier'] - type: str - tags: - description: - - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } - to match against the security group(s) you are searching for." - required: false - type: dict -extends_documentation_fragment: - - amazon.aws.region.modules - - amazon.aws.common.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do net set authentication details, see the AWS guide for details. - -- name: Find all clusters - community.aws.redshift_info: - register: redshift - -- name: Find cluster(s) with matching tags - community.aws.redshift_info: - tags: - env: prd - stack: monitoring - register: redshift_tags - -- name: Find cluster(s) with matching name/prefix and tags - community.aws.redshift_info: - tags: - env: dev - stack: web - name: user- - register: redshift_web - -- name: Fail if no cluster(s) is/are found - community.aws.redshift_info: - tags: - env: stg - stack: db - register: redshift_user - failed_when: "{{ redshift_user.results | length == 0 }}" -""" - -RETURN = r""" -# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters) ---- -cluster_identifier: - description: Unique key to identify the cluster. - returned: success - type: str - sample: "redshift-identifier" -node_type: - description: The node type for nodes in the cluster. - returned: success - type: str - sample: "ds2.xlarge" -cluster_status: - description: Current state of the cluster. - returned: success - type: str - sample: "available" -modify_status: - description: The status of a modify operation. - returned: optional - type: str - sample: "" -master_username: - description: The master user name for the cluster. - returned: success - type: str - sample: "admin" -db_name: - description: The name of the initial database that was created when the cluster was created. - returned: success - type: str - sample: "dev" -endpoint: - description: The connection endpoint. - returned: success - type: str - sample: { - "address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com", - "port": 5439 - } -cluster_create_time: - description: The date and time that the cluster was created. - returned: success - type: str - sample: "2016-05-10T08:33:16.629000+00:00" -automated_snapshot_retention_period: - description: The number of days that automatic cluster snapshots are retained. - returned: success - type: int - sample: 1 -cluster_security_groups: - description: A list of cluster security groups that are associated with the cluster. - returned: success - type: list - sample: [] -vpc_security_groups: - description: A list of VPC security groups the are associated with the cluster. - returned: success - type: list - sample: [ - { - "status": "active", - "vpc_security_group_id": "sg-12cghhg" - } - ] -cluster_paramater_groups: - description: The list of cluster parameters that are associated with this cluster. - returned: success - type: list - sample: [ - { - "cluster_parameter_status_list": [ - { - "parameter_apply_status": "in-sync", - "parameter_name": "statement_timeout" - }, - { - "parameter_apply_status": "in-sync", - "parameter_name": "require_ssl" - } - ], - "parameter_apply_status": "in-sync", - "parameter_group_name": "tuba" - } - ] -cluster_subnet_group_name: - description: The name of the subnet group that is associated with the cluster. - returned: success - type: str - sample: "redshift-subnet" -vpc_id: - description: The identifier of the VPC the cluster is in, if the cluster is in a VPC. - returned: success - type: str - sample: "vpc-1234567" -availability_zone: - description: The name of the Availability Zone in which the cluster is located. - returned: success - type: str - sample: "us-east-1b" -preferred_maintenance_window: - description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur. - returned: success - type: str - sample: "tue:07:30-tue:08:00" -pending_modified_values: - description: A value that, if present, indicates that changes to the cluster are pending. - returned: success - type: dict - sample: {} -cluster_version: - description: The version ID of the Amazon Redshift engine that is running on the cluster. - returned: success - type: str - sample: "1.0" -allow_version_upgrade: - description: > - A Boolean value that, if true, indicates that major version upgrades will be applied - automatically to the cluster during the maintenance window. - returned: success - type: bool - sample: true|false -number_of_nodes: - description: The number of compute nodes in the cluster. - returned: success - type: int - sample: 12 -publicly_accessible: - description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network. - returned: success - type: bool - sample: true|false -encrypted: - description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest. - returned: success - type: bool - sample: true|false -restore_status: - description: A value that describes the status of a cluster restore action. - returned: success - type: dict - sample: {} -hsm_status: - description: > - A value that reports whether the Amazon Redshift cluster has finished applying any hardware - security module (HSM) settings changes specified in a modify cluster command. - returned: success - type: dict - sample: {} -cluster_snapshot_copy_status: - description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy. - returned: success - type: dict - sample: {} -cluster_public_keys: - description: The public key for the cluster. - returned: success - type: str - sample: "ssh-rsa anjigfam Amazon-Redshift\n" -cluster_nodes: - description: The nodes in the cluster. - returned: success - type: list - sample: [ - { - "node_role": "LEADER", - "private_ip_address": "10.0.0.1", - "public_ip_address": "x.x.x.x" - }, - { - "node_role": "COMPUTE-1", - "private_ip_address": "10.0.0.3", - "public_ip_address": "x.x.x.x" - } - ] -elastic_ip_status: - description: The status of the elastic IP (EIP) address. - returned: success - type: dict - sample: {} -cluster_revision_number: - description: The specific revision number of the database in the cluster. - returned: success - type: str - sample: "1231" -tags: - description: The list of tags for the cluster. - returned: success - type: list - sample: [] -kms_key_id: - description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster. - returned: success - type: str - sample: "" -enhanced_vpc_routing: - description: An option that specifies whether to create the cluster with enhanced VPC routing enabled. - returned: success - type: bool - sample: true|false -iam_roles: - description: List of IAM roles attached to the cluster. - returned: success - type: list - sample: [] -""" - -import re - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def match_tags(tags_to_match, cluster): - for key, value in tags_to_match.items(): - for tag in cluster["Tags"]: - if key == tag["Key"] and value == tag["Value"]: - return True - - return False - - -def find_clusters(conn, module, identifier=None, tags=None): - try: - cluster_paginator = conn.get_paginator("describe_clusters") - clusters = cluster_paginator.paginate().build_full_result() - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to fetch clusters.") - - matched_clusters = [] - - if identifier is not None: - identifier_prog = re.compile("^" + identifier) - - for cluster in clusters["Clusters"]: - matched_identifier = True - if identifier: - matched_identifier = identifier_prog.search(cluster["ClusterIdentifier"]) - - matched_tags = True - if tags: - matched_tags = match_tags(tags, cluster) - - if matched_identifier and matched_tags: - matched_clusters.append(camel_dict_to_snake_dict(cluster)) - - return matched_clusters - - -def main(): - argument_spec = dict( - cluster_identifier=dict(type="str", aliases=["identifier", "name"]), - tags=dict(type="dict"), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - cluster_identifier = module.params.get("cluster_identifier") - cluster_tags = module.params.get("tags") - - redshift = module.client("redshift") - - results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags) - module.exit_json(results=results) - - -if __name__ == "__main__": - main() diff --git a/redshift_subnet_group.py b/redshift_subnet_group.py deleted file mode 100644 index 3d693cc23ac..00000000000 --- a/redshift_subnet_group.py +++ /dev/null @@ -1,275 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014 Jens Carl, Hothead Games Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: redshift_subnet_group -version_added: 1.0.0 -short_description: manage Redshift cluster subnet groups -description: - - Create, modifies, and deletes Redshift cluster subnet groups. -options: - state: - description: - - Specifies whether the subnet group should be present or absent. - default: 'present' - choices: ['present', 'absent' ] - type: str - name: - description: - - Cluster subnet group name. - required: true - aliases: ['group_name'] - type: str - description: - description: - - Cluster subnet group description. - aliases: ['group_description'] - type: str - subnets: - description: - - List of subnet IDs that make up the cluster subnet group. - - At least one subnet must be provided when creating a cluster subnet group. - aliases: ['group_subnets'] - type: list - elements: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -author: - - "Jens Carl (@j-carl), Hothead Games Inc." -""" - -EXAMPLES = r""" -- name: Create a Redshift subnet group - community.aws.redshift_subnet_group: - state: present - group_name: redshift-subnet - group_description: Redshift subnet - group_subnets: - - 'subnet-aaaaa' - - 'subnet-bbbbb' - -- name: Remove subnet group - community.aws.redshift_subnet_group: - state: absent - group_name: redshift-subnet -""" - -RETURN = r""" -cluster_subnet_group: - description: A dictionary containing information about the Redshift subnet group. - returned: success - type: dict - contains: - name: - description: Name of the Redshift subnet group. - returned: when the cache subnet group exists - type: str - sample: "redshift_subnet_group_name" - vpc_id: - description: Id of the VPC where the subnet is located. - returned: when the cache subnet group exists - type: str - sample: "vpc-aabb1122" - description: - description: The description of the cache subnet group. - returned: when the cache subnet group exists - type: str - sample: Redshift subnet - subnet_ids: - description: The IDs of the subnets beloging to the Redshift subnet group. - returned: when the cache subnet group exists - type: list - elements: str - sample: - - subnet-aaaaaaaa - - subnet-bbbbbbbb -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_subnet_group(name): - try: - groups = client.describe_cluster_subnet_groups( - aws_retry=True, - ClusterSubnetGroupName=name, - )["ClusterSubnetGroups"] - except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): - return None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe subnet group") - - if not groups: - return None - - if len(groups) > 1: - module.fail_aws( - msg="Found multiple matches for subnet group", - cluster_subnet_groups=camel_dict_to_snake_dict(groups), - ) - - # No support for managing tags yet, but make sure that we don't need to - # change the return value structure after it's been available in a release. - tags = boto3_tag_list_to_ansible_dict(groups[0]["Tags"]) - - subnet_group = camel_dict_to_snake_dict(groups[0]) - - subnet_group["tags"] = tags - subnet_group["name"] = subnet_group["cluster_subnet_group_name"] - - subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"]) - subnet_group["subnet_ids"] = subnet_ids - - return subnet_group - - -def create_subnet_group(name, description, subnets): - if not subnets: - module.fail_json(msg="At least one subnet must be provided when creating a subnet group") - - if module.check_mode: - return True - - try: - if not description: - description = name - client.create_cluster_subnet_group( - aws_retry=True, - ClusterSubnetGroupName=name, - Description=description, - SubnetIds=subnets, - ) - return True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to create subnet group") - - -def update_subnet_group(subnet_group, name, description, subnets): - update_params = dict() - if description and subnet_group["description"] != description: - update_params["Description"] = description - if subnets: - old_subnets = set(subnet_group["subnet_ids"]) - new_subnets = set(subnets) - if old_subnets != new_subnets: - update_params["SubnetIds"] = list(subnets) - - if not update_params: - return False - - if module.check_mode: - return True - - # Description is optional, SubnetIds is not - if "SubnetIds" not in update_params: - update_params["SubnetIds"] = subnet_group["subnet_ids"] - - try: - client.modify_cluster_subnet_group( - aws_retry=True, - ClusterSubnetGroupName=name, - **update_params, - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update subnet group") - - return True - - -def delete_subnet_group(name): - if module.check_mode: - return True - - try: - client.delete_cluster_subnet_group( - aws_retry=True, - ClusterSubnetGroupName=name, - ) - return True - except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"): - # AWS is "eventually consistent", cope with the race conditions where - # deletion hadn't completed when we ran describe - return False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete subnet group") - - -def main(): - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - name=dict(required=True, aliases=["group_name"]), - description=dict(required=False, aliases=["group_description"]), - subnets=dict(required=False, aliases=["group_subnets"], type="list", elements="str"), - ) - - global module - global client - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - state = module.params.get("state") - name = module.params.get("name") - description = module.params.get("description") - subnets = module.params.get("subnets") - - client = module.client("redshift", retry_decorator=AWSRetry.jittered_backoff()) - - subnet_group = get_subnet_group(name) - changed = False - - if state == "present": - if not subnet_group: - result = create_subnet_group(name, description, subnets) - changed |= result - else: - result = update_subnet_group(subnet_group, name, description, subnets) - changed |= result - subnet_group = get_subnet_group(name) - else: - if subnet_group: - result = delete_subnet_group(name) - changed |= result - subnet_group = None - - compat_results = dict() - if subnet_group: - compat_results["group"] = dict( - name=subnet_group["name"], - vpc_id=subnet_group["vpc_id"], - ) - - module.exit_json( - changed=changed, - cluster_subnet_group=subnet_group, - **compat_results, - ) - - -if __name__ == "__main__": - main() diff --git a/route53_wait.py b/route53_wait.py deleted file mode 100644 index 6b72681d4c1..00000000000 --- a/route53_wait.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2023, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: route53_wait -version_added: 6.3.0 -short_description: wait for changes in Amazons Route 53 DNS service to propagate -description: - - When using M(amazon.aws.route53) with I(wait=false), this module allows to wait for the - module's propagation to finish at a later point of time. -options: - result: - aliases: - - results - description: - - The registered result of one or multiple M(amazon.aws.route53) invocations. - required: true - type: dict - wait_timeout: - description: - - How long to wait for the changes to be replicated, in seconds. - - This timeout will be used for every changed result in I(result). - default: 300 - type: int - region: - description: - - This setting is ignored by the module. It is only present to make it possible to - have I(region) present in the module default group. - type: str -author: - - Felix Fontein (@felixfontein) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -# -""" - -EXAMPLES = r""" -# Example when using a single route53 invocation: - -- name: Add new.foo.com as an A record with 3 IPs - amazon.aws.route53: - state: present - zone: foo.com - record: new.foo.com - type: A - ttl: 7200 - value: - - 1.1.1.1 - - 2.2.2.2 - - 3.3.3.3 - register: module_result - -# do something else - -- name: Wait for the changes of the above route53 invocation to propagate - community.aws.route53_wait: - result: "{{ module_result }}" - -######################################################################### -# Example when using a loop over amazon.aws.route53: - -- name: Add various A records - amazon.aws.route53: - state: present - zone: foo.com - record: "{{ item.record }}" - type: A - ttl: 300 - value: "{{ item.value }}" - loop: - - record: new.foo.com - value: 1.1.1.1 - - record: foo.foo.com - value: 2.2.2.2 - - record: bar.foo.com - value: - - 3.3.3.3 - - 4.4.4.4 - register: module_results - -# do something else - -- name: Wait for the changes of the above three route53 invocations to propagate - community.aws.route53_wait: - results: "{{ module_results }}" -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_native - -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - -WAIT_RETRY = 5 # how many seconds to wait between propagation status polls - - -def detect_task_results(results): - if "results" in results: - # This must be the registered result of a loop of route53 tasks - for key in ("changed", "msg", "skipped"): - if key not in results: - raise ValueError(f"missing {key} key") - if not isinstance(results["results"], list): - raise ValueError("results is present, but not a list") - for index, result in enumerate(results["results"]): - if not isinstance(result, dict): - raise ValueError(f"result {index + 1} is not a dictionary") - for key in ("changed", "failed", "ansible_loop_var", "invocation"): - if key not in result: - raise ValueError(f"missing {key} key for result {index + 1}") - yield f" for result #{index + 1}", result - return - # This must be a single route53 task - for key in ("changed", "failed"): - if key not in results: - raise ValueError(f"missing {key} key") - yield "", results - - -def main(): - argument_spec = dict( - result=dict(type="dict", required=True, aliases=["results"]), - wait_timeout=dict(type="int", default=300), - region=dict(type="str"), # ignored - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - result_in = module.params["result"] - wait_timeout_in = module.params.get("wait_timeout") - - changed_results = [] - try: - for id, result in detect_task_results(result_in): - if result.get("wait_id"): - changed_results.append((id, result["wait_id"])) - except ValueError as exc: - module.fail_json( - msg=f"The value passed as result does not seem to be a registered route53 result: {to_native(exc)}" - ) - - # connect to the route53 endpoint - try: - route53 = module.client("route53") - except botocore.exceptions.HTTPClientError as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - for what, wait_id in changed_results: - try: - waiter = get_waiter(route53, "resource_record_sets_changed") - waiter.wait( - Id=wait_id, - WaiterConfig=dict( - Delay=WAIT_RETRY, - MaxAttempts=wait_timeout_in // WAIT_RETRY, - ), - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg=f"Timeout waiting for resource records changes{what} to be applied") - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to update records") - except Exception as e: - module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") - - module.exit_json(changed=False) - - -if __name__ == "__main__": - main() diff --git a/s3_bucket_info.py b/s3_bucket_info.py deleted file mode 100644 index ee4c0e2dd3f..00000000000 --- a/s3_bucket_info.py +++ /dev/null @@ -1,625 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_bucket_info -version_added: 1.0.0 -author: - - "Gerben Geijteman (@hyperized)" -short_description: Lists S3 buckets in AWS -description: - - Lists S3 buckets and details about those buckets. - - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_bucket_info). - The usage did not change. -options: - name: - description: - - Name of bucket to query. - type: str - default: "" - version_added: 1.4.0 - name_filter: - description: - - Limits buckets to only buckets who's name contain the string in I(name_filter). - type: str - default: "" - version_added: 1.4.0 - bucket_facts: - description: - - Retrieve requested S3 bucket detailed information. - - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution. - - You can limit buckets by using the I(name) or I(name_filter) option. - suboptions: - bucket_accelerate_configuration: - description: Retrive S3 accelerate configuration. - type: bool - default: False - bucket_location: - description: Retrive S3 bucket location. - type: bool - default: False - bucket_replication: - description: Retrive S3 bucket replication. - type: bool - default: False - bucket_acl: - description: Retrive S3 bucket ACLs. - type: bool - default: False - bucket_logging: - description: Retrive S3 bucket logging. - type: bool - default: False - bucket_request_payment: - description: Retrive S3 bucket request payment. - type: bool - default: False - bucket_tagging: - description: Retrive S3 bucket tagging. - type: bool - default: False - bucket_cors: - description: Retrive S3 bucket CORS configuration. - type: bool - default: False - bucket_notification_configuration: - description: Retrive S3 bucket notification configuration. - type: bool - default: False - bucket_encryption: - description: Retrive S3 bucket encryption. - type: bool - default: False - bucket_ownership_controls: - description: - - Retrive S3 ownership controls. - type: bool - default: False - bucket_website: - description: Retrive S3 bucket website. - type: bool - default: False - bucket_policy: - description: Retrive S3 bucket policy. - type: bool - default: False - bucket_policy_status: - description: Retrive S3 bucket policy status. - type: bool - default: False - bucket_lifecycle_configuration: - description: Retrive S3 bucket lifecycle configuration. - type: bool - default: False - public_access_block: - description: Retrive S3 bucket public access block. - type: bool - default: False - type: dict - version_added: 1.4.0 - transform_location: - description: - - S3 bucket location for default us-east-1 is normally reported as C(null). - - Setting this option to C(true) will return C(us-east-1) instead. - - Affects only queries with I(bucket_facts=true) and I(bucket_location=true). - type: bool - default: False - version_added: 1.4.0 -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Note: Only AWS S3 is currently supported - -# Lists all S3 buckets -- community.aws.s3_bucket_info: - register: result - -# Retrieve detailed bucket information -- community.aws.s3_bucket_info: - # Show only buckets with name matching - name_filter: your.testing - # Choose facts to retrieve - bucket_facts: - # bucket_accelerate_configuration: true - bucket_acl: true - bucket_cors: true - bucket_encryption: true - # bucket_lifecycle_configuration: true - bucket_location: true - # bucket_logging: true - # bucket_notification_configuration: true - # bucket_ownership_controls: true - # bucket_policy: true - # bucket_policy_status: true - # bucket_replication: true - # bucket_request_payment: true - # bucket_tagging: true - # bucket_website: true - # public_access_block: true - transform_location: true - register: result - -# Print out result -- name: List buckets - ansible.builtin.debug: - msg: "{{ result['buckets'] }}" -""" - -RETURN = r""" -bucket_list: - description: "List of buckets" - returned: always - type: complex - contains: - name: - description: Bucket name. - returned: always - type: str - sample: a-testing-bucket-name - creation_date: - description: Bucket creation date timestamp. - returned: always - type: str - sample: "2021-01-21T12:44:10+00:00" - public_access_block: - description: Bucket public access block configuration. - returned: when I(bucket_facts=true) and I(public_access_block=true) - type: complex - contains: - PublicAccessBlockConfiguration: - description: PublicAccessBlockConfiguration data. - returned: when PublicAccessBlockConfiguration is defined for the bucket - type: complex - contains: - BlockPublicAcls: - description: BlockPublicAcls setting value. - type: bool - sample: true - BlockPublicPolicy: - description: BlockPublicPolicy setting value. - type: bool - sample: true - IgnorePublicAcls: - description: IgnorePublicAcls setting value. - type: bool - sample: true - RestrictPublicBuckets: - description: RestrictPublicBuckets setting value. - type: bool - sample: true - bucket_name_filter: - description: String used to limit buckets. See I(name_filter). - returned: when I(name_filter) is defined - type: str - sample: filter-by-this-string - bucket_acl: - description: Bucket ACL configuration. - returned: when I(bucket_facts=true) and I(bucket_acl=true) - type: complex - contains: - Grants: - description: List of ACL grants. - type: list - sample: [] - Owner: - description: Bucket owner information. - type: complex - contains: - DisplayName: - description: Bucket owner user display name. - returned: always - type: str - sample: username - ID: - description: Bucket owner user ID. - returned: always - type: str - sample: 123894e509349etc - bucket_cors: - description: Bucket CORS configuration. - returned: when I(bucket_facts=true) and I(bucket_cors=true) - type: complex - contains: - CORSRules: - description: Bucket CORS configuration. - returned: when CORS rules are defined for the bucket - type: list - sample: [] - bucket_encryption: - description: Bucket encryption configuration. - returned: when I(bucket_facts=true) and I(bucket_encryption=true) - type: complex - contains: - ServerSideEncryptionConfiguration: - description: ServerSideEncryptionConfiguration configuration. - returned: when encryption is enabled on the bucket - type: complex - contains: - Rules: - description: List of applied encryptio rules. - returned: when encryption is enabled on the bucket - type: list - sample: { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" }, "BucketKeyEnabled": False } - bucket_lifecycle_configuration: - description: Bucket lifecycle configuration settings. - returned: when I(bucket_facts=true) and I(bucket_lifecycle_configuration=true) - type: complex - contains: - Rules: - description: List of lifecycle management rules. - returned: when lifecycle configuration is present - type: list - sample: [{ "Status": "Enabled", "ID": "example-rule" }] - bucket_location: - description: Bucket location. - returned: when I(bucket_facts=true) and I(bucket_location=true) - type: complex - contains: - LocationConstraint: - description: AWS region. - returned: always - type: str - sample: us-east-2 - bucket_logging: - description: Server access logging configuration. - returned: when I(bucket_facts=true) and I(bucket_logging=true) - type: complex - contains: - LoggingEnabled: - description: Server access logging configuration. - returned: when server access logging is defined for the bucket - type: complex - contains: - TargetBucket: - description: Target bucket name. - returned: always - type: str - sample: logging-bucket-name - TargetPrefix: - description: Prefix in target bucket. - returned: always - type: str - sample: "" - bucket_notification_configuration: - description: Bucket notification settings. - returned: when I(bucket_facts=true) and I(bucket_notification_configuration=true) - type: complex - contains: - TopicConfigurations: - description: List of notification events configurations. - returned: when at least one notification is configured - type: list - sample: [] - bucket_ownership_controls: - description: Preffered object ownership settings. - returned: when I(bucket_facts=true) and I(bucket_ownership_controls=true) - type: complex - contains: - OwnershipControls: - description: Object ownership settings. - returned: when ownership controls are defined for the bucket - type: complex - contains: - Rules: - description: List of ownership rules. - returned: when ownership rule is defined - type: list - sample: [{ "ObjectOwnership:": "ObjectWriter" }] - bucket_policy: - description: Bucket policy contents. - returned: when I(bucket_facts=true) and I(bucket_policy=true) - type: str - sample: '{"Version":"2012-10-17","Statement":[{"Sid":"AddCannedAcl","Effect":"Allow",..}}]}' - bucket_policy_status: - description: Status of bucket policy. - returned: when I(bucket_facts=true) and I(bucket_policy_status=true) - type: complex - contains: - PolicyStatus: - description: Status of bucket policy. - returned: when bucket policy is present - type: complex - contains: - IsPublic: - description: Report bucket policy public status. - returned: when bucket policy is present - type: bool - sample: True - bucket_replication: - description: Replication configuration settings. - returned: when I(bucket_facts=true) and I(bucket_replication=true) - type: complex - contains: - Role: - description: IAM role used for replication. - returned: when replication rule is defined - type: str - sample: "arn:aws:iam::123:role/example-role" - Rules: - description: List of replication rules. - returned: when replication rule is defined - type: list - sample: [{ "ID": "rule-1", "Filter": "{}" }] - bucket_request_payment: - description: Requester pays setting. - returned: when I(bucket_facts=true) and I(bucket_request_payment=true) - type: complex - contains: - Payer: - description: Current payer. - returned: always - type: str - sample: BucketOwner - bucket_tagging: - description: Bucket tags. - returned: when I(bucket_facts=true) and I(bucket_tagging=true) - type: dict - sample: { "Tag1": "Value1", "Tag2": "Value2" } - bucket_website: - description: Static website hosting. - returned: when I(bucket_facts=true) and I(bucket_website=true) - type: complex - contains: - ErrorDocument: - description: Object serving as HTTP error page. - returned: when static website hosting is enabled - type: dict - sample: { "Key": "error.html" } - IndexDocument: - description: Object serving as HTTP index page. - returned: when static website hosting is enabled - type: dict - sample: { "Suffix": "error.html" } - RedirectAllRequestsTo: - description: Website redict settings. - returned: when redirect requests is configured - type: complex - contains: - HostName: - description: Hostname to redirect. - returned: always - type: str - sample: www.example.com - Protocol: - description: Protocol used for redirect. - returned: always - type: str - sample: https -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_bucket_list(module, connection, name="", name_filter=""): - """ - Return result of list_buckets json encoded - Filter only buckets matching 'name' or name_filter if defined - :param module: - :param connection: - :return: - """ - buckets = [] - filtered_buckets = [] - final_buckets = [] - - # Get all buckets - try: - buckets = camel_dict_to_snake_dict(connection.list_buckets())["buckets"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: - module.fail_json_aws(err_code, msg="Failed to list buckets") - - # Filter buckets if requested - if name_filter: - for bucket in buckets: - if name_filter in bucket["name"]: - filtered_buckets.append(bucket) - elif name: - for bucket in buckets: - if name == bucket["name"]: - filtered_buckets.append(bucket) - - # Return proper list (filtered or all) - if name or name_filter: - final_buckets = filtered_buckets - else: - final_buckets = buckets - return final_buckets - - -def get_buckets_facts(connection, buckets, requested_facts, transform_location): - """ - Retrive additional information about S3 buckets - """ - full_bucket_list = [] - # Iterate over all buckets and append retrived facts to bucket - for bucket in buckets: - bucket.update(get_bucket_details(connection, bucket["name"], requested_facts, transform_location)) - full_bucket_list.append(bucket) - - return full_bucket_list - - -def get_bucket_details(connection, name, requested_facts, transform_location): - """ - Execute all enabled S3API get calls for selected bucket - """ - all_facts = {} - - for key in requested_facts: - if requested_facts[key]: - if key == "bucket_location": - all_facts[key] = {} - try: - all_facts[key] = get_bucket_location(name, connection, transform_location) - # we just pass on error - error means that resources is undefined - except botocore.exceptions.ClientError: - pass - elif key == "bucket_tagging": - all_facts[key] = {} - try: - all_facts[key] = get_bucket_tagging(name, connection) - # we just pass on error - error means that resources is undefined - except botocore.exceptions.ClientError: - pass - else: - all_facts[key] = {} - try: - all_facts[key] = get_bucket_property(name, connection, key) - # we just pass on error - error means that resources is undefined - except botocore.exceptions.ClientError: - pass - - return all_facts - - -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) -def get_bucket_location(name, connection, transform_location=False): - """ - Get bucket location and optionally transform 'null' to 'us-east-1' - """ - data = connection.get_bucket_location(Bucket=name) - - # Replace 'null' with 'us-east-1'? - if transform_location: - try: - if not data["LocationConstraint"]: - data["LocationConstraint"] = "us-east-1" - except KeyError: - pass - # Strip response metadata (not needed) - data.pop("ResponseMetadata", None) - return data - - -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) -def get_bucket_tagging(name, connection): - """ - Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function - """ - data = connection.get_bucket_tagging(Bucket=name) - - try: - bucket_tags = boto3_tag_list_to_ansible_dict(data["TagSet"]) - return bucket_tags - except KeyError: - # Strip response metadata (not needed) - data.pop("ResponseMetadata", None) - return data - - -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) -def get_bucket_property(name, connection, get_api_name): - """ - Get bucket property - """ - api_call = "get_" + get_api_name - api_function = getattr(connection, api_call) - data = api_function(Bucket=name) - - # Strip response metadata (not needed) - data.pop("ResponseMetadata", None) - return data - - -def main(): - """ - Get list of S3 buckets - :return: - """ - argument_spec = dict( - name=dict(type="str", default=""), - name_filter=dict(type="str", default=""), - bucket_facts=dict( - type="dict", - options=dict( - bucket_accelerate_configuration=dict(type="bool", default=False), - bucket_acl=dict(type="bool", default=False), - bucket_cors=dict(type="bool", default=False), - bucket_encryption=dict(type="bool", default=False), - bucket_lifecycle_configuration=dict(type="bool", default=False), - bucket_location=dict(type="bool", default=False), - bucket_logging=dict(type="bool", default=False), - bucket_notification_configuration=dict(type="bool", default=False), - bucket_ownership_controls=dict(type="bool", default=False), - bucket_policy=dict(type="bool", default=False), - bucket_policy_status=dict(type="bool", default=False), - bucket_replication=dict(type="bool", default=False), - bucket_request_payment=dict(type="bool", default=False), - bucket_tagging=dict(type="bool", default=False), - bucket_website=dict(type="bool", default=False), - public_access_block=dict(type="bool", default=False), - ), - ), - transform_location=dict(type="bool", default=False), - ) - - # Ensure we have an empty dict - result = {} - - # Define mutually exclusive options - mutually_exclusive = [ - ["name", "name_filter"], - ] - - # Including ec2 argument spec - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - ) - - # Get parameters - name = module.params.get("name") - name_filter = module.params.get("name_filter") - requested_facts = module.params.get("bucket_facts") - transform_location = module.params.get("bucket_facts") - - # Set up connection - connection = {} - try: - connection = module.client("s3") - except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: - module.fail_json_aws(err_code, msg="Failed to connect to AWS") - - # Get basic bucket list (name + creation date) - bucket_list = get_bucket_list(module, connection, name, name_filter) - - # Add information about name/name_filter to result - if name: - result["bucket_name"] = name - elif name_filter: - result["bucket_name_filter"] = name_filter - - # Gather detailed information about buckets if requested - bucket_facts = module.params.get("bucket_facts") - if bucket_facts: - result["buckets"] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) - else: - result["buckets"] = bucket_list - - module.exit_json(msg="Retrieved s3 info.", **result) - - -# MAIN -if __name__ == "__main__": - main() diff --git a/s3_bucket_notification.py b/s3_bucket_notification.py deleted file mode 100644 index 1045164dce3..00000000000 --- a/s3_bucket_notification.py +++ /dev/null @@ -1,391 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Ansible Project -# (c) 2019, XLAB d.o.o -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_bucket_notification -version_added: 1.0.0 -short_description: Creates, updates or deletes S3 Bucket notifications targeting Lambda functions, SNS or SQS. -description: - - This module supports the creation, updates and deletions of S3 bucket notification profiles targeting - either Lambda functions, SNS topics or SQS queues. - - The target for the notifications must already exist. For lambdas use module M(community.aws.lambda) - to manage the lambda function itself, M(community.aws.lambda_alias) - to manage function aliases and M(community.aws.lambda_policy) to modify lambda permissions. - For SNS or SQS then use M(community.aws.sns_topic) or M(community.aws.sqs_queue). -notes: - - If using Lambda function as the target then a Lambda policy is also needed, use - M(community.aws.lambda_policy) to do so to allow C(lambda:InvokeFunction) for the notification. -author: - - XLAB d.o.o. (@xlab-si) - - Aljaz Kosir (@aljazkosir) - - Miha Plesko (@miha-plesko) - - Mark Woolley (@marknet15) -options: - event_name: - description: - - Unique name for event notification on bucket. - required: true - type: str - bucket_name: - description: - - S3 bucket name. - required: true - type: str - state: - description: - - Describes the desired state. - default: "present" - choices: ["present", "absent"] - type: str - queue_arn: - description: - - The ARN of the SQS queue. - - Mutually exclusive with I(topic_arn) and I(lambda_function_arn). - type: str - version_added: 3.2.0 - topic_arn: - description: - - The ARN of the SNS topic. - - Mutually exclusive with I(queue_arn) and I(lambda_function_arn). - type: str - version_added: 3.2.0 - lambda_function_arn: - description: - - The ARN of the lambda function. - - Mutually exclusive with I(queue_arn) and I(topic_arn). - aliases: ['function_arn'] - type: str - lambda_alias: - description: - - Name of the Lambda function alias. - - Mutually exclusive with I(lambda_version). - type: str - lambda_version: - description: - - Version of the Lambda function. - - Mutually exclusive with I(lambda_alias). - type: int - default: 0 - events: - description: - - Events that will be triggering a notification. You can select multiple events to send - to the same destination, you can set up different events to send to different destinations, - and you can set up a prefix or suffix for an event. However, for each bucket, - individual events cannot have multiple configurations with overlapping prefixes or - suffixes that could match the same object key. - - Required when I(state=present). - choices: ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', - 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', - 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', - 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', - 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'] - type: list - elements: str - default: [] - prefix: - description: - - Optional prefix to limit the notifications to objects with keys that start with matching - characters. - type: str - default: '' - suffix: - description: - - Optional suffix to limit the notifications to objects with keys that end with matching - characters. - type: str - default: '' -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" ---- -# Examples adding notification target configs to a S3 bucket -- name: Setup bucket event notification to a Lambda function - community.aws.s3_bucket_notification: - state: present - event_name: on_file_add_or_remove - bucket_name: test-bucket - lambda_function_arn: arn:aws:lambda:us-east-2:123456789012:function:test-lambda - events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] - prefix: images/ - suffix: .jpg - -- name: Setup bucket event notification to SQS - community.aws.s3_bucket_notification: - state: present - event_name: on_file_add_or_remove - bucket_name: test-bucket - queue_arn: arn:aws:sqs:us-east-2:123456789012:test-queue - events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] - prefix: images/ - suffix: .jpg - -# Example removing an event notification -- name: Remove event notification - community.aws.s3_bucket_notification: - state: absent - event_name: on_file_add_or_remove - bucket_name: test-bucket -""" - -RETURN = r""" -notification_configuration: - description: dictionary of currently applied notifications - returned: success - type: complex - contains: - lambda_function_configurations: - description: - - List of current Lambda function notification configurations applied to the bucket. - type: list - queue_configurations: - description: - - List of current SQS notification configurations applied to the bucket. - type: list - topic_configurations: - description: - - List of current SNS notification configurations applied to the bucket. - type: list -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # will be protected by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class AmazonBucket: - def __init__(self, module, client): - self.module = module - self.client = client - self.bucket_name = module.params["bucket_name"] - self.check_mode = module.check_mode - self._full_config_cache = None - - def full_config(self): - if self._full_config_cache is None: - self._full_config_cache = dict( - QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[] - ) - - try: - config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) - except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg=f"{e}") - - # Handle different event targets - if config_lookup.get("QueueConfigurations"): - for queue_config in config_lookup.get("QueueConfigurations"): - self._full_config_cache["QueueConfigurations"].append(Config.from_api(queue_config)) - - if config_lookup.get("TopicConfigurations"): - for topic_config in config_lookup.get("TopicConfigurations"): - self._full_config_cache["TopicConfigurations"].append(Config.from_api(topic_config)) - - if config_lookup.get("LambdaFunctionConfigurations"): - for function_config in config_lookup.get("LambdaFunctionConfigurations"): - self._full_config_cache["LambdaFunctionConfigurations"].append(Config.from_api(function_config)) - - return self._full_config_cache - - def current_config(self, config_name): - # Iterate through configs and get current event config - for target_configs in self.full_config(): - for config in self.full_config()[target_configs]: - if config.raw["Id"] == config_name: - return config - - def apply_config(self, desired): - configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) - - # Iterate through existing configs then add the desired config - for target_configs in self.full_config(): - for config in self.full_config()[target_configs]: - if config.name != desired.raw["Id"]: - configs[target_configs].append(config.raw) - - if self.module.params.get("queue_arn"): - configs["QueueConfigurations"].append(desired.raw) - if self.module.params.get("topic_arn"): - configs["TopicConfigurations"].append(desired.raw) - if self.module.params.get("lambda_function_arn"): - configs["LambdaFunctionConfigurations"].append(desired.raw) - - self._upload_bucket_config(configs) - return configs - - def delete_config(self, desired): - configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) - - # Iterate through existing configs omitting specified config - for target_configs in self.full_config(): - for config in self.full_config()[target_configs]: - if config.name != desired.raw["Id"]: - configs[target_configs].append(config.raw) - - self._upload_bucket_config(configs) - return configs - - def _upload_bucket_config(self, configs): - api_params = dict(Bucket=self.bucket_name, NotificationConfiguration=dict()) - - # Iterate through available configs - for target_configs in configs: - if len(configs[target_configs]) > 0: - api_params["NotificationConfiguration"][target_configs] = configs[target_configs] - - if not self.check_mode: - try: - self.client.put_bucket_notification_configuration(**api_params) - except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg=f"{e}") - - -class Config: - def __init__(self, content): - self._content = content - self.name = content.get("Id") - - @property - def raw(self): - return self._content - - def __eq__(self, other): - if other: - return self.raw == other.raw - return False - - @classmethod - def from_params(cls, **params): - """Generate bucket notification params for target""" - - bucket_event_params = dict( - Id=params["event_name"], - Events=sorted(params["events"]), - Filter=dict( - Key=dict( - FilterRules=[ - dict(Name="Prefix", Value=params["prefix"]), - dict(Name="Suffix", Value=params["suffix"]), - ] - ) - ), - ) - - # Handle different event targets - if params.get("queue_arn"): - bucket_event_params["QueueArn"] = params["queue_arn"] - if params.get("topic_arn"): - bucket_event_params["TopicArn"] = params["topic_arn"] - if params.get("lambda_function_arn"): - function_arn = params["lambda_function_arn"] - - qualifier = None - if params["lambda_version"] > 0: - qualifier = str(params["lambda_version"]) - elif params["lambda_alias"]: - qualifier = str(params["lambda_alias"]) - if qualifier: - params["lambda_function_arn"] = f"{function_arn}:{qualifier}" - - bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"] - - return cls(bucket_event_params) - - @classmethod - def from_api(cls, config): - return cls(config) - - -def setup_module_object(): - event_types = [ - "s3:ObjectCreated:*", - "s3:ObjectCreated:Put", - "s3:ObjectCreated:Post", - "s3:ObjectCreated:Copy", - "s3:ObjectCreated:CompleteMultipartUpload", - "s3:ObjectRemoved:*", - "s3:ObjectRemoved:Delete", - "s3:ObjectRemoved:DeleteMarkerCreated", - "s3:ObjectRestore:Post", - "s3:ObjectRestore:Completed", - "s3:ReducedRedundancyLostObject", - ] - - argument_spec = dict( - state=dict(default="present", choices=["present", "absent"]), - event_name=dict(required=True), - lambda_function_arn=dict(aliases=["function_arn"]), - queue_arn=dict(type="str"), - topic_arn=dict(type="str"), - bucket_name=dict(required=True), - events=dict(type="list", default=[], choices=event_types, elements="str"), - prefix=dict(default=""), - suffix=dict(default=""), - lambda_alias=dict(), - lambda_version=dict(type="int", default=0), - ) - - mutually_exclusive = [ - ["queue_arn", "topic_arn", "lambda_function_arn"], - ["lambda_alias", "lambda_version"], - ] - - return AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_if=[["state", "present", ["events"]]], - ) - - -def main(): - module = setup_module_object() - - client = module.client("s3") - bucket = AmazonBucket(module, client) - current = bucket.current_config(module.params["event_name"]) - desired = Config.from_params(**module.params) - - notification_configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]) - - for target_configs in bucket.full_config(): - for cfg in bucket.full_config()[target_configs]: - notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw)) - - state = module.params["state"] - updated_configuration = dict() - changed = False - - if state == "present": - if current != desired: - updated_configuration = bucket.apply_config(desired) - changed = True - elif state == "absent": - if current: - updated_configuration = bucket.delete_config(desired) - changed = True - - for target_configs in updated_configuration: - notification_configs[target_configs] = [] - for cfg in updated_configuration.get(target_configs, list()): - notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg)) - - module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(notification_configs)) - - -if __name__ == "__main__": - main() diff --git a/s3_cors.py b/s3_cors.py deleted file mode 100644 index d153c7df823..00000000000 --- a/s3_cors.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_cors -version_added: 1.0.0 -short_description: Manage CORS for S3 buckets in AWS -description: - - Manage CORS for S3 buckets in AWS. - - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_cors). - The usage did not change. -author: - - "Oyvind Saltvik (@fivethreeo)" -options: - name: - description: - - Name of the S3 bucket. - required: true - type: str - rules: - description: - - Cors rules to put on the S3 bucket. - type: list - elements: dict - state: - description: - - Create or remove cors on the S3 bucket. - required: true - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Create a simple cors for s3 bucket -- community.aws.s3_cors: - name: mys3bucket - state: present - rules: - - allowed_origins: - - http://www.example.com/ - allowed_methods: - - GET - - POST - allowed_headers: - - Authorization - expose_headers: - - x-amz-server-side-encryption - - x-amz-request-id - max_age_seconds: 30000 - -# Remove cors for s3 bucket -- community.aws.s3_cors: - name: mys3bucket - state: absent -""" - -RETURN = r""" -changed: - description: check to see if a change was made to the rules - returned: always - type: bool - sample: true -name: - description: name of bucket - returned: always - type: str - sample: 'bucket-name' -rules: - description: list of current rules - returned: always - type: list - sample: [ - { - "allowed_headers": [ - "Authorization" - ], - "allowed_methods": [ - "GET" - ], - "allowed_origins": [ - "*" - ], - "max_age_seconds": 30000 - } - ] -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def create_or_update_bucket_cors(connection, module): - name = module.params.get("name") - rules = module.params.get("rules", []) - changed = False - - try: - current_camel_rules = connection.get_bucket_cors(Bucket=name)["CORSRules"] - except ClientError: - current_camel_rules = [] - - new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True) - # compare_policies() takes two dicts and makes them hashable for comparison - if compare_policies(new_camel_rules, current_camel_rules): - changed = True - - if changed: - try: - cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Unable to update CORS for bucket {name}") - - module.exit_json(changed=changed, name=name, rules=rules) - - -def destroy_bucket_cors(connection, module): - name = module.params.get("name") - changed = False - - try: - cors = connection.delete_bucket_cors(Bucket=name) - changed = True - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Unable to delete CORS for bucket {name}") - - module.exit_json(changed=changed) - - -def main(): - argument_spec = dict( - name=dict(required=True, type="str"), - rules=dict(type="list", elements="dict"), - state=dict(type="str", choices=["present", "absent"], required=True), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - client = module.client("s3") - - state = module.params.get("state") - - if state == "present": - create_or_update_bucket_cors(client, module) - elif state == "absent": - destroy_bucket_cors(client, module) - - -if __name__ == "__main__": - main() diff --git a/s3_lifecycle.py b/s3_lifecycle.py deleted file mode 100644 index 27f1179688d..00000000000 --- a/s3_lifecycle.py +++ /dev/null @@ -1,690 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_lifecycle -version_added: 1.0.0 -short_description: Manage S3 bucket lifecycle rules in AWS -description: - - Manage S3 bucket lifecycle rules in AWS. -author: - - "Rob White (@wimnat)" -notes: - - If specifying expiration time as days then transition time must also be specified in days. - - If specifying expiration time as a date then transition time must also be specified as a date. -options: - name: - description: - - Name of the S3 bucket. - required: true - type: str - abort_incomplete_multipart_upload_days: - description: - - Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. - type: int - version_added: 2.2.0 - expiration_date: - description: - - Indicates the lifetime of the objects that are subject to the rule by the date they will expire. - - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. - - This cannot be specified with I(expire_object_delete_marker) - type: str - expiration_days: - description: - - Indicates the lifetime, in days, of the objects that are subject to the rule. - - The value must be a non-zero positive integer. - - This cannot be specified with I(expire_object_delete_marker) - type: int - expire_object_delete_marker: - description: - - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. - - If set to C(true), the delete marker will be expired; if set to C(false) the policy takes no action. - - This cannot be specified with I(expiration_days) or I(expiration_date). - type: bool - version_added: 2.2.0 - prefix: - description: - - Prefix identifying one or more objects to which the rule applies. - - If no prefix is specified, the rule will apply to the whole bucket. - type: str - purge_transitions: - description: - - Whether to replace all the current transition(s) with the new transition(s). - - When C(false), the provided transition(s) will be added, replacing transitions - with the same storage_class. When true, existing transitions will be removed - and replaced with the new transition(s) - default: true - type: bool - noncurrent_version_expiration_days: - description: - - The number of days after which non-current versions should be deleted. - - Must be set if I(noncurrent_version_keep_newer) is set. - required: false - type: int - noncurrent_version_keep_newer: - description: - - The minimum number of non-current versions to retain. - - Requires C(botocore >= 1.23.12) - - Requres I(noncurrent_version_expiration_days). - required: false - type: int - version_added: 5.3.0 - noncurrent_version_storage_class: - description: - - The storage class to which non-current versions are transitioned. - default: glacier - choices: ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] - required: false - type: str - noncurrent_version_transition_days: - description: - - The number of days after which non-current versions will be transitioned - to the storage class specified in I(noncurrent_version_storage_class). - required: false - type: int - noncurrent_version_transitions: - description: - - A list of transition behaviors to be applied to noncurrent versions for the rule. - - Each storage class may be used only once. Each transition behavior contains these elements - I(transition_days) - I(storage_class) - type: list - elements: dict - rule_id: - description: - - Unique identifier for the rule. - - The value cannot be longer than 255 characters. - - A unique value for the rule will be generated if no value is provided. - type: str - state: - description: - - Create or remove the lifecycle rule. - default: present - choices: [ 'present', 'absent' ] - type: str - status: - description: - - If C(enabled), the rule is currently being applied. - - If C(disabled), the rule is not currently being applied. - default: enabled - choices: [ 'enabled', 'disabled' ] - type: str - storage_class: - description: - - The storage class to transition to. - default: glacier - choices: [ 'glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] - type: str - transition_date: - description: - - Indicates the lifetime of the objects that are subject to the rule by the date they - will transition to a different storage class. - - The value must be ISO-8601 format, the time must be midnight and a GMT timezone must - be specified. - - If (transition_days) is not specified, this parameter is required. - type: str - transition_days: - description: - - Indicates when, in days, an object transitions to a different storage class. - - If I(transition_date) is not specified, this parameter is required. - type: int - transitions: - description: - - A list of transition behaviors to be applied to the rule. - - Each storage class may be used only once. Each transition behavior may contain these elements - I(transition_days) - I(transition_date) - I(storage_class) - type: list - elements: dict - wait: - description: - - Wait for the configuration to complete before returning. - version_added: 1.5.0 - type: bool - default: false -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" # """ - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days - community.aws.s3_lifecycle: - name: mybucket - expiration_days: 30 - prefix: logs/ - status: enabled - state: present - -- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days - community.aws.s3_lifecycle: - name: mybucket - transition_days: 7 - expiration_days: 90 - prefix: logs/ - status: enabled - state: present - -# Note that midnight GMT must be specified. -# Be sure to quote your date strings -- name: Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. - community.aws.s3_lifecycle: - name: mybucket - transition_date: "2020-12-30T00:00:00.000Z" - expiration_date: "2030-12-30T00:00:00.000Z" - prefix: logs/ - status: enabled - state: present - -- name: Disable the rule created above - community.aws.s3_lifecycle: - name: mybucket - prefix: logs/ - status: disabled - state: present - -- name: Delete the lifecycle rule created above - community.aws.s3_lifecycle: - name: mybucket - prefix: logs/ - state: absent - -- name: Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class. - community.aws.s3_lifecycle: - name: mybucket - prefix: backups/ - storage_class: standard_ia - transition_days: 31 - state: present - status: enabled - -- name: Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90 - community.aws.s3_lifecycle: - name: mybucket - prefix: logs/ - state: present - status: enabled - transitions: - - transition_days: 30 - storage_class: standard_ia - - transition_days: 90 - storage_class: glacier -""" - -from copy import deepcopy -import datetime -import time - -try: - from dateutil import parser as date_parser - - HAS_DATEUTIL = True -except ImportError: - HAS_DATEUTIL = False - -try: - import botocore -except ImportError: - pass # handled by AnsibleAwsModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def parse_date(date): - if date is None: - return None - try: - if HAS_DATEUTIL: - return date_parser.parse(date) - else: - # Very simplistic - return datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z") - except ValueError: - return None - - -def fetch_rules(client, module, name): - # Get the bucket's current lifecycle rules - try: - current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name) - current_lifecycle_rules = normalize_boto3_result(current_lifecycle["Rules"]) - except is_boto3_error_code("NoSuchLifecycleConfiguration"): - current_lifecycle_rules = [] - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - return current_lifecycle_rules - - -def build_rule(client, module): - name = module.params.get("name") - abort_incomplete_multipart_upload_days = module.params.get("abort_incomplete_multipart_upload_days") - expiration_date = parse_date(module.params.get("expiration_date")) - expiration_days = module.params.get("expiration_days") - expire_object_delete_marker = module.params.get("expire_object_delete_marker") - noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days") - noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days") - noncurrent_version_transitions = module.params.get("noncurrent_version_transitions") - noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class") - noncurrent_version_keep_newer = module.params.get("noncurrent_version_keep_newer") - prefix = module.params.get("prefix") or "" - rule_id = module.params.get("rule_id") - status = module.params.get("status") - storage_class = module.params.get("storage_class") - transition_date = parse_date(module.params.get("transition_date")) - transition_days = module.params.get("transition_days") - transitions = module.params.get("transitions") - purge_transitions = module.params.get("purge_transitions") - - rule = dict(Filter=dict(Prefix=prefix), Status=status.title()) - if rule_id is not None: - rule["ID"] = rule_id - - if abort_incomplete_multipart_upload_days: - rule["AbortIncompleteMultipartUpload"] = {"DaysAfterInitiation": abort_incomplete_multipart_upload_days} - - # Create expiration - if expiration_days is not None: - rule["Expiration"] = dict(Days=expiration_days) - elif expiration_date is not None: - rule["Expiration"] = dict(Date=expiration_date.isoformat()) - elif expire_object_delete_marker is not None: - rule["Expiration"] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker) - if noncurrent_version_expiration_days or noncurrent_version_keep_newer: - rule["NoncurrentVersionExpiration"] = dict() - if noncurrent_version_expiration_days is not None: - rule["NoncurrentVersionExpiration"]["NoncurrentDays"] = noncurrent_version_expiration_days - if noncurrent_version_keep_newer is not None: - rule["NoncurrentVersionExpiration"]["NewerNoncurrentVersions"] = noncurrent_version_keep_newer - if transition_days is not None: - rule["Transitions"] = [ - dict(Days=transition_days, StorageClass=storage_class.upper()), - ] - - elif transition_date is not None: - rule["Transitions"] = [ - dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), - ] - - if transitions is not None: - if not rule.get("Transitions"): - rule["Transitions"] = [] - for transition in transitions: - t_out = dict() - if transition.get("transition_date"): - t_out["Date"] = transition["transition_date"] - elif transition.get("transition_days") is not None: - t_out["Days"] = int(transition["transition_days"]) - if transition.get("storage_class"): - t_out["StorageClass"] = transition["storage_class"].upper() - rule["Transitions"].append(t_out) - - if noncurrent_version_transition_days is not None: - rule["NoncurrentVersionTransitions"] = [ - dict( - NoncurrentDays=noncurrent_version_transition_days, StorageClass=noncurrent_version_storage_class.upper() - ), - ] - - if noncurrent_version_transitions is not None: - if not rule.get("NoncurrentVersionTransitions"): - rule["NoncurrentVersionTransitions"] = [] - for noncurrent_version_transition in noncurrent_version_transitions: - t_out = dict() - t_out["NoncurrentDays"] = noncurrent_version_transition["transition_days"] - if noncurrent_version_transition.get("storage_class"): - t_out["StorageClass"] = noncurrent_version_transition["storage_class"].upper() - rule["NoncurrentVersionTransitions"].append(t_out) - - return rule - - -def compare_and_update_configuration(client, module, current_lifecycle_rules, rule): - purge_transitions = module.params.get("purge_transitions") - rule_id = module.params.get("rule_id") - - lifecycle_configuration = dict(Rules=[]) - changed = False - appended = False - - # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule - if current_lifecycle_rules: - # If rule ID exists, use that for comparison otherwise compare based on prefix - for existing_rule in current_lifecycle_rules: - if rule.get("ID") == existing_rule.get("ID") and rule["Filter"].get("Prefix", "") != existing_rule.get( - "Filter", {} - ).get("Prefix", ""): - existing_rule.pop("ID") - elif rule_id is None and rule["Filter"].get("Prefix", "") == existing_rule.get("Filter", {}).get( - "Prefix", "" - ): - existing_rule.pop("ID") - if rule.get("ID") == existing_rule.get("ID"): - changed_, appended_ = update_or_append_rule( - rule, existing_rule, purge_transitions, lifecycle_configuration - ) - changed = changed_ or changed - appended = appended_ or appended - else: - lifecycle_configuration["Rules"].append(existing_rule) - - # If nothing appended then append now as the rule must not exist - if not appended: - lifecycle_configuration["Rules"].append(rule) - changed = True - else: - lifecycle_configuration["Rules"].append(rule) - changed = True - - return changed, lifecycle_configuration - - -def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj): - changed = False - if existing_rule["Status"] != new_rule["Status"]: - if not new_rule.get("Transitions") and existing_rule.get("Transitions"): - new_rule["Transitions"] = existing_rule["Transitions"] - if not new_rule.get("Expiration") and existing_rule.get("Expiration"): - new_rule["Expiration"] = existing_rule["Expiration"] - if not new_rule.get("NoncurrentVersionExpiration") and existing_rule.get("NoncurrentVersionExpiration"): - new_rule["NoncurrentVersionExpiration"] = existing_rule["NoncurrentVersionExpiration"] - lifecycle_obj["Rules"].append(new_rule) - changed = True - appended = True - else: - if not purge_transitions: - merge_transitions(new_rule, existing_rule) - if compare_rule(new_rule, existing_rule, purge_transitions): - lifecycle_obj["Rules"].append(new_rule) - appended = True - else: - lifecycle_obj["Rules"].append(new_rule) - changed = True - appended = True - return changed, appended - - -def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None): - changed = False - lifecycle_configuration = dict(Rules=[]) - - # Check if rule exists - # If an ID exists, use that otherwise compare based on prefix - if rule_id is not None: - for existing_rule in current_lifecycle_rules: - if rule_id == existing_rule["ID"]: - # We're not keeping the rule (i.e. deleting) so mark as changed - changed = True - else: - lifecycle_configuration["Rules"].append(existing_rule) - else: - for existing_rule in current_lifecycle_rules: - if prefix == existing_rule["Filter"].get("Prefix", ""): - # We're not keeping the rule (i.e. deleting) so mark as changed - changed = True - else: - lifecycle_configuration["Rules"].append(existing_rule) - - return changed, lifecycle_configuration - - -def compare_rule(new_rule, old_rule, purge_transitions): - # Copy objects - rule1 = deepcopy(new_rule) - rule2 = deepcopy(old_rule) - - if purge_transitions: - return rule1 == rule2 - else: - transitions1 = rule1.pop("Transitions", []) - transitions2 = rule2.pop("Transitions", []) - noncurrent_transtions1 = rule1.pop("NoncurrentVersionTransitions", []) - noncurrent_transtions2 = rule2.pop("NoncurrentVersionTransitions", []) - if rule1 != rule2: - return False - for transition in transitions1: - if transition not in transitions2: - return False - for transition in noncurrent_transtions1: - if transition not in noncurrent_transtions2: - return False - return True - - -def merge_transitions(updated_rule, updating_rule): - # because of the legal S3 transitions, we know only one can exist for each storage class. - # So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only - # in updating_rule to updated_rule - updated_transitions = {} - updating_transitions = {} - for transition in updated_rule.get("Transitions", []): - updated_transitions[transition["StorageClass"]] = transition - for transition in updating_rule.get("Transitions", []): - updating_transitions[transition["StorageClass"]] = transition - for storage_class, transition in updating_transitions.items(): - if updated_transitions.get(storage_class) is None: - updated_rule["Transitions"].append(transition) - - -def create_lifecycle_rule(client, module): - name = module.params.get("name") - wait = module.params.get("wait") - changed = False - - old_lifecycle_rules = fetch_rules(client, module, name) - new_rule = build_rule(client, module) - (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) - if changed: - # Write lifecycle to bucket - try: - client.put_bucket_lifecycle_configuration( - aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration - ) - except is_boto3_error_message("At least one action needs to be specified in a rule"): - # Amazon interpreted this as not changing anything - changed = False - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules - ) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 - else: - _retries = 0 - - new_rules = fetch_rules(client, module, name) - - module.exit_json( - changed=changed, - new_rule=new_rule, - rules=new_rules, - old_rules=old_lifecycle_rules, - _retries=_retries, - _config=lifecycle_configuration, - ) - - -def destroy_lifecycle_rule(client, module): - name = module.params.get("name") - prefix = module.params.get("prefix") - rule_id = module.params.get("rule_id") - wait = module.params.get("wait") - changed = False - - if prefix is None: - prefix = "" - - current_lifecycle_rules = fetch_rules(client, module, name) - changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix) - - if changed: - # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration - try: - if lifecycle_obj["Rules"]: - client.put_bucket_lifecycle_configuration( - aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_obj - ) - elif current_lifecycle_rules: - changed = True - client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 - else: - _retries = 0 - - new_rules = fetch_rules(client, module, name) - - module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, _retries=_retries) - - -def main(): - s3_storage_class = ["glacier", "onezone_ia", "standard_ia", "intelligent_tiering", "deep_archive"] - argument_spec = dict( - name=dict(required=True, type="str"), - abort_incomplete_multipart_upload_days=dict(type="int"), - expiration_days=dict(type="int"), - expiration_date=dict(), - expire_object_delete_marker=dict(type="bool"), - noncurrent_version_expiration_days=dict(type="int"), - noncurrent_version_keep_newer=dict(type="int"), - noncurrent_version_storage_class=dict(default="glacier", type="str", choices=s3_storage_class), - noncurrent_version_transition_days=dict(type="int"), - noncurrent_version_transitions=dict(type="list", elements="dict"), - prefix=dict(), - rule_id=dict(), - state=dict(default="present", choices=["present", "absent"]), - status=dict(default="enabled", choices=["enabled", "disabled"]), - storage_class=dict(default="glacier", type="str", choices=s3_storage_class), - transition_days=dict(type="int"), - transition_date=dict(), - transitions=dict(type="list", elements="dict"), - purge_transitions=dict(default=True, type="bool"), - wait=dict(type="bool", default=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ["expiration_days", "expiration_date", "expire_object_delete_marker"], - ["expiration_days", "transition_date"], - ["transition_days", "transition_date"], - ["transition_days", "expiration_date"], - ["transition_days", "transitions"], - ["transition_date", "transitions"], - ["noncurrent_version_transition_days", "noncurrent_version_transitions"], - ], - required_by={ - "noncurrent_version_keep_newer": ["noncurrent_version_expiration_days"], - }, - ) - - client = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) - - expiration_date = module.params.get("expiration_date") - transition_date = module.params.get("transition_date") - state = module.params.get("state") - - if module.params.get("noncurrent_version_keep_newer"): - module.require_botocore_at_least( - "1.23.12", reason="to set number of versions to keep with noncurrent_version_keep_newer" - ) - - if state == "present" and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix - required_when_present = ( - "abort_incomplete_multipart_upload_days", - "expiration_date", - "expiration_days", - "expire_object_delete_marker", - "transition_date", - "transition_days", - "transitions", - "noncurrent_version_expiration_days", - "noncurrent_version_keep_newer", - "noncurrent_version_transition_days", - "noncurrent_version_transitions", - ) - for param in required_when_present: - if module.params.get(param) is None: - break - else: - msg = f"one of the following is required when 'state' is 'present': {', '.join(required_when_present)}" - module.fail_json(msg=msg) - - # If dates have been set, make sure they're in a valid format - if expiration_date: - expiration_date = parse_date(expiration_date) - if expiration_date is None: - module.fail_json( - msg="expiration_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included" - ) - if transition_date: - transition_date = parse_date(transition_date) - if transition_date is None: - module.fail_json( - msg="transition_date is not a valid ISO-8601 format." - " The time must be midnight and a timezone of GMT must be included" - ) - - if state == "present": - create_lifecycle_rule(client, module) - elif state == "absent": - destroy_lifecycle_rule(client, module) - - -if __name__ == "__main__": - main() diff --git a/s3_logging.py b/s3_logging.py deleted file mode 100644 index 193455a4be2..00000000000 --- a/s3_logging.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_logging -version_added: 1.0.0 -short_description: Manage logging facility of an s3 bucket in AWS -description: - - Manage logging facility of an s3 bucket in AWS -author: - - Rob White (@wimnat) -options: - name: - description: - - "Name of the s3 bucket." - required: true - type: str - state: - description: - - "Enable or disable logging." - default: present - choices: [ 'present', 'absent' ] - type: str - target_bucket: - description: - - "The bucket to log to. Required when state=present." - type: str - target_prefix: - description: - - "The prefix that should be prepended to the generated log files written to the target_bucket." - default: "" - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" # """ - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs - community.aws.s3_logging: - name: mywebsite.com - target_bucket: mylogs - target_prefix: logs/mywebsite.com - state: present - -- name: Remove logging on an s3 bucket - community.aws.s3_logging: - name: mywebsite.com - state: absent - -""" - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def compare_bucket_logging(bucket_logging, target_bucket, target_prefix): - if not bucket_logging.get("LoggingEnabled", False): - if target_bucket: - return True - return False - - logging = bucket_logging["LoggingEnabled"] - if logging["TargetBucket"] != target_bucket: - return True - if logging["TargetPrefix"] != target_prefix: - return True - return False - - -def verify_acls(connection, module, target_bucket): - try: - current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) - current_grants = current_acl["Grants"] - except is_boto3_error_code("NoSuchBucket"): - module.fail_json(msg=f"Target Bucket '{target_bucket}' not found") - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to fetch target bucket ACL") - - required_grant = { - "Grantee": {"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", "Type": "Group"}, - "Permission": "FULL_CONTROL", - } - - for grant in current_grants: - if grant == required_grant: - return False - - if module.check_mode: - return True - - updated_acl = dict(current_acl) - updated_grants = list(current_grants) - updated_grants.append(required_grant) - updated_acl["Grants"] = updated_grants - del updated_acl["ResponseMetadata"] - try: - connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to update target bucket ACL to allow log delivery") - - return True - - -def enable_bucket_logging(connection, module): - bucket_name = module.params.get("name") - target_bucket = module.params.get("target_bucket") - target_prefix = module.params.get("target_prefix") - changed = False - - try: - bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) - except is_boto3_error_code("NoSuchBucket"): - module.fail_json(msg=f"Bucket '{bucket_name}' not found") - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to fetch current logging status") - - try: - changed |= verify_acls(connection, module, target_bucket) - - if not compare_bucket_logging(bucket_logging, target_bucket, target_prefix): - bucket_logging = camel_dict_to_snake_dict(bucket_logging) - module.exit_json(changed=changed, **bucket_logging) - - if module.check_mode: - module.exit_json(changed=True) - - result = connection.put_bucket_logging( - aws_retry=True, - Bucket=bucket_name, - BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": target_bucket, - "TargetPrefix": target_prefix, - } - }, - ) - - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to enable bucket logging") - - result = camel_dict_to_snake_dict(result) - module.exit_json(changed=True, **result) - - -def disable_bucket_logging(connection, module): - bucket_name = module.params.get("name") - changed = False - - try: - bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to fetch current logging status") - - if not compare_bucket_logging(bucket_logging, None, None): - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - try: - response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])( - connection.put_bucket_logging - )(Bucket=bucket_name, BucketLoggingStatus={}) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to disable bucket logging") - - module.exit_json(changed=True) - - -def main(): - argument_spec = dict( - name=dict(required=True), - target_bucket=dict(required=False, default=None), - target_prefix=dict(required=False, default=""), - state=dict(required=False, default="present", choices=["present", "absent"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - connection = module.client("s3", retry_decorator=AWSRetry.jittered_backoff()) - state = module.params.get("state") - - if state == "present": - enable_bucket_logging(connection, module) - elif state == "absent": - disable_bucket_logging(connection, module) - - -if __name__ == "__main__": - main() diff --git a/s3_metrics_configuration.py b/s3_metrics_configuration.py deleted file mode 100644 index d90e7d0e603..00000000000 --- a/s3_metrics_configuration.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_metrics_configuration -version_added: 1.3.0 -short_description: Manage s3 bucket metrics configuration in AWS -description: - - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket -author: - - Dmytro Vorotyntsev (@vorotech) -notes: - - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations - - To request metrics for the entire bucket, create a metrics configuration without a filter - - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on -options: - bucket_name: - description: - - "Name of the s3 bucket" - required: true - type: str - id: - description: - - "The ID used to identify the metrics configuration" - required: true - type: str - filter_prefix: - description: - - "A prefix used when evaluating a metrics filter" - required: false - type: str - filter_tags: - description: - - "A dictionary of one or more tags used when evaluating a metrics filter" - required: false - aliases: ['filter_tag'] - type: dict - default: {} - state: - description: - - "Create or delete metrics configuration" - default: present - choices: ['present', 'absent'] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" # """ - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create a metrics configuration that enables metrics for an entire bucket - community.aws.s3_metrics_configuration: - bucket_name: my-bucket - id: EntireBucket - state: present - -- name: Put a metrics configuration that enables metrics for objects starting with a prefix - community.aws.s3_metrics_configuration: - bucket_name: my-bucket - id: Assets - filter_prefix: assets - state: present - -- name: Put a metrics configuration that enables metrics for objects with specific tag - community.aws.s3_metrics_configuration: - bucket_name: my-bucket - id: Assets - filter_tag: - kind: asset - state: present - -- name: Put a metrics configuration that enables metrics for objects that start with a particular prefix and have specific tags applied - community.aws.s3_metrics_configuration: - bucket_name: my-bucket - id: ImportantBlueDocuments - filter_prefix: documents - filter_tags: - priority: high - class: blue - state: present - -- name: Delete metrics configuration - community.aws.s3_metrics_configuration: - bucket_name: my-bucket - id: EntireBucket - state: absent - -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _create_metrics_configuration(mc_id, filter_prefix, filter_tags): - payload = {"Id": mc_id} - # Just a filter_prefix or just a single tag filter is a special case - if filter_prefix and not filter_tags: - payload["Filter"] = {"Prefix": filter_prefix} - elif not filter_prefix and len(filter_tags) == 1: - payload["Filter"] = {"Tag": ansible_dict_to_boto3_tag_list(filter_tags)[0]} - # Otherwise we need to use 'And' - elif filter_tags: - payload["Filter"] = {"And": {"Tags": ansible_dict_to_boto3_tag_list(filter_tags)}} - if filter_prefix: - payload["Filter"]["And"]["Prefix"] = filter_prefix - - return payload - - -def create_or_update_metrics_configuration(client, module): - bucket_name = module.params.get("bucket_name") - mc_id = module.params.get("id") - filter_prefix = module.params.get("filter_prefix") - filter_tags = module.params.get("filter_tags") - - try: - response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - metrics_configuration = response["MetricsConfiguration"] - except is_boto3_error_code("NoSuchConfiguration"): - metrics_configuration = None - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") - - new_configuration = _create_metrics_configuration(mc_id, filter_prefix, filter_tags) - - if metrics_configuration: - if metrics_configuration == new_configuration: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - try: - client.put_bucket_metrics_configuration( - aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration - ) - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Failed to put bucket metrics configuration '{mc_id}'") - - module.exit_json(changed=True) - - -def delete_metrics_configuration(client, module): - bucket_name = module.params.get("bucket_name") - mc_id = module.params.get("id") - - try: - client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code("NoSuchConfiguration"): - module.exit_json(changed=False) - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get bucket metrics configuration") - - if module.check_mode: - module.exit_json(changed=True) - - try: - client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id) - except is_boto3_error_code("NoSuchConfiguration"): - module.exit_json(changed=False) - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Failed to delete bucket metrics configuration '{mc_id}'") - - module.exit_json(changed=True) - - -def main(): - argument_spec = dict( - bucket_name=dict(type="str", required=True), - id=dict(type="str", required=True), - filter_prefix=dict(type="str", required=False), - filter_tags=dict(default={}, type="dict", required=False, aliases=["filter_tag"]), - state=dict(default="present", type="str", choices=["present", "absent"]), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - state = module.params.get("state") - - try: - client = module.client("s3", retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3)) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - if state == "present": - create_or_update_metrics_configuration(client, module) - elif state == "absent": - delete_metrics_configuration(client, module) - - -if __name__ == "__main__": - main() diff --git a/s3_sync.py b/s3_sync.py deleted file mode 100644 index 36809ed2f75..00000000000 --- a/s3_sync.py +++ /dev/null @@ -1,556 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_sync -version_added: 1.0.0 -short_description: Efficiently upload multiple files to S3 -description: -- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, - inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping. -options: - mode: - description: - - sync direction. - default: 'push' - choices: [ 'push' ] - type: str - file_change_strategy: - description: - - Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded. - - date_size will upload if file sizes don't match or if local file modified date is newer than s3's version - - checksum will compare etag values based on s3's implementation of chunked md5s. - - force will always upload all files. - required: false - default: 'date_size' - choices: [ 'force', 'checksum', 'date_size' ] - type: str - bucket: - description: - - Bucket name. - required: true - type: str - key_prefix: - description: - - In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary. - required: false - type: str - default: '' - file_root: - description: - - File/directory path for synchronization. This is a local path. - - This root path is scrubbed from the key name, so subdirectories will remain as keys. - required: true - type: path - permission: - description: - - Canned ACL to apply to synced files. - - Changing this ACL only changes newly synced files, it does not trigger a full reupload. - required: false - choices: - - 'private' - - 'public-read' - - 'public-read-write' - - 'authenticated-read' - - 'aws-exec-read' - - 'bucket-owner-read' - - 'bucket-owner-full-control' - type: str - mime_map: - description: - - > - Dict entry from extension to MIME type. This will override any default/sniffed MIME type. - For example C({".txt": "application/text", ".yml": "application/text"}) - required: false - type: dict - include: - description: - - Shell pattern-style file matching. - - Used before exclude to determine eligible files (for instance, only C("*.gif")) - - For multiple patterns, comma-separate them. - required: false - default: "*" - type: str - exclude: - description: - - Shell pattern-style file matching. - - Used after include to remove files (for instance, skip C("*.txt")) - - For multiple patterns, comma-separate them. - required: false - default: ".*" - type: str - cache_control: - description: - - Cache-Control header set on uploaded objects. - - Directives are separated by commas. - required: false - type: str - default: '' - storage_class: - description: - - Storage class to be associated to each object added to the S3 bucket. - required: false - choices: - - 'STANDARD' - - 'REDUCED_REDUNDANCY' - - 'STANDARD_IA' - - 'ONEZONE_IA' - - 'INTELLIGENT_TIERING' - - 'GLACIER' - - 'DEEP_ARCHIVE' - - 'OUTPOSTS' - default: 'STANDARD' - type: str - version_added: 1.5.0 - delete: - description: - - Remove remote files that exist in bucket but are not present in the file root. - required: false - default: false - type: bool - -author: -- Ted Timmons (@tedder) -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: basic upload - community.aws.s3_sync: - bucket: tedder - file_root: roles/s3/files/ - -- name: basic upload using the glacier storage class - community.aws.s3_sync: - bucket: tedder - file_root: roles/s3/files/ - storage_class: GLACIER - -- name: basic individual file upload - community.aws.s3_sync: - bucket: tedder - file_root: roles/s3/files/file_name - -- name: all the options - community.aws.s3_sync: - bucket: tedder - file_root: roles/s3/files - mime_map: - .yml: application/text - .json: application/text - key_prefix: config_files/web - file_change_strategy: force - permission: public-read - cache_control: "public, max-age=31536000" - storage_class: "GLACIER" - include: "*" - exclude: "*.txt,.*" -""" - -RETURN = r""" -filelist_initial: - description: file listing (dicts) from initial globbing - returned: always - type: list - sample: [{ - "bytes": 151, - "chopped_path": "policy.json", - "fullpath": "roles/cf/files/policy.json", - "modified_epoch": 1477416706 - }] -filelist_local_etag: - description: file listing (dicts) including calculated local etag - returned: always - type: list - sample: [{ - "bytes": 151, - "chopped_path": "policy.json", - "fullpath": "roles/cf/files/policy.json", - "mime_type": "application/json", - "modified_epoch": 1477416706, - "s3_path": "s3sync/policy.json" - }] -filelist_s3: - description: file listing (dicts) including information about previously-uploaded versions - returned: always - type: list - sample: [{ - "bytes": 151, - "chopped_path": "policy.json", - "fullpath": "roles/cf/files/policy.json", - "mime_type": "application/json", - "modified_epoch": 1477416706, - "s3_path": "s3sync/policy.json" - }] -filelist_typed: - description: file listing (dicts) with calculated or overridden mime types - returned: always - type: list - sample: [{ - "bytes": 151, - "chopped_path": "policy.json", - "fullpath": "roles/cf/files/policy.json", - "mime_type": "application/json", - "modified_epoch": 1477416706 - }] -filelist_actionable: - description: file listing (dicts) of files that will be uploaded after the strategy decision - returned: always - type: list - sample: [{ - "bytes": 151, - "chopped_path": "policy.json", - "fullpath": "roles/cf/files/policy.json", - "mime_type": "application/json", - "modified_epoch": 1477931256, - "s3_path": "s3sync/policy.json", - "whysize": "151 / 151", - "whytime": "1477931256 / 1477929260" - }] -uploads: - description: file listing (dicts) of files that were actually uploaded - returned: always - type: list - sample: [{ - "bytes": 151, - "chopped_path": "policy.json", - "fullpath": "roles/cf/files/policy.json", - "s3_path": "s3sync/policy.json", - "whysize": "151 / 151", - "whytime": "1477931637 / 1477931489" - }] - -""" - -import datetime -import fnmatch -import mimetypes -import os -import stat as osstat # os.stat constants - -try: - from dateutil import tz - - HAS_DATEUTIL = True -except ImportError: - HAS_DATEUTIL = False - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils._text import to_text - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def gather_files(fileroot, include=None, exclude=None): - ret = [] - - if os.path.isfile(fileroot): - fullpath = fileroot - fstat = os.stat(fullpath) - path_array = fileroot.split("/") - chopped_path = path_array[-1] - f_size = fstat[osstat.ST_SIZE] - f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append( - { - "fullpath": fullpath, - "chopped_path": chopped_path, - "modified_epoch": f_modified_epoch, - "bytes": f_size, - } - ) - - else: - for dirpath, dirnames, filenames in os.walk(fileroot): - for fn in filenames: - fullpath = os.path.join(dirpath, fn) - # include/exclude - if include: - found = False - for x in include.split(","): - if fnmatch.fnmatch(fn, x): - found = True - if not found: - # not on the include list, so we don't want it. - continue - - if exclude: - found = False - for x in exclude.split(","): - if fnmatch.fnmatch(fn, x): - found = True - if found: - # skip it, even if previously included. - continue - - chopped_path = os.path.relpath(fullpath, start=fileroot) - fstat = os.stat(fullpath) - f_size = fstat[osstat.ST_SIZE] - f_modified_epoch = fstat[osstat.ST_MTIME] - ret.append( - { - "fullpath": fullpath, - "chopped_path": chopped_path, - "modified_epoch": f_modified_epoch, - "bytes": f_size, - } - ) - # dirpath = path *to* the directory - # dirnames = subdirs *in* our directory - # filenames - return ret - - -def calculate_s3_path(filelist, key_prefix=""): - ret = [] - for fileentry in filelist: - # don't modify the input dict - retentry = fileentry.copy() - retentry["s3_path"] = os.path.join(key_prefix, fileentry["chopped_path"]) - ret.append(retentry) - return ret - - -def calculate_local_etag(filelist, key_prefix=""): - """Really, "calculate md5", but since AWS uses their own format, we'll just call - it a "local etag". TODO optimization: only calculate if remote key exists.""" - ret = [] - for fileentry in filelist: - # don't modify the input dict - retentry = fileentry.copy() - retentry["local_etag"] = calculate_multipart_etag(fileentry["fullpath"]) - ret.append(retentry) - return ret - - -def determine_mimetypes(filelist, override_map): - ret = [] - for fileentry in filelist: - retentry = fileentry.copy() - localfile = fileentry["fullpath"] - - # reminder: file extension is '.txt', not 'txt'. - file_extension = os.path.splitext(localfile)[1] - if override_map and override_map.get(file_extension): - # override? use it. - retentry["mime_type"] = override_map[file_extension] - else: - # else sniff it - retentry["mime_type"], retentry["encoding"] = mimetypes.guess_type(localfile, strict=False) - - # might be None or '' from one of the above. Not a great type but better than nothing. - if not retentry["mime_type"]: - retentry["mime_type"] = "application/octet-stream" - - ret.append(retentry) - - return ret - - -def head_s3(s3, bucket, s3keys): - retkeys = [] - for entry in s3keys: - retentry = entry.copy() - try: - retentry["s3_head"] = s3.head_object(Bucket=bucket, Key=entry["s3_path"]) - # 404 (Missing) - File doesn't exist, we'll need to upload - # 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload - except is_boto3_error_code(["404", "403"]): - pass - retkeys.append(retentry) - return retkeys - - -def filter_list(s3, bucket, s3filelist, strategy): - keeplist = list(s3filelist) - - for e in keeplist: - e["_strategy"] = strategy - - # init/fetch info from S3 if we're going to use it for comparisons - if not strategy == "force": - keeplist = head_s3(s3, bucket, s3filelist) - - # now actually run the strategies - if strategy == "checksum": - for entry in keeplist: - if entry.get("s3_head"): - # since we have a remote s3 object, compare the values. - if entry["s3_head"]["ETag"] == entry["local_etag"]: - # files match, so remove the entry - entry["skip_flag"] = True - else: - # file etags don't match, keep the entry. - pass - else: # we don't have an etag, so we'll keep it. - pass - elif strategy == "date_size": - for entry in keeplist: - if entry.get("s3_head"): - # fstat = entry['stat'] - local_modified_epoch = entry["modified_epoch"] - local_size = entry["bytes"] - - # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward. - # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp() - remote_modified_datetime = entry["s3_head"]["LastModified"] - delta = remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()) - remote_modified_epoch = delta.seconds + (delta.days * 86400) - - remote_size = entry["s3_head"]["ContentLength"] - - entry["whytime"] = f"{local_modified_epoch} / {remote_modified_epoch}" - entry["whysize"] = f"{local_size} / {remote_size}" - - if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: - entry["skip_flag"] = True - else: - entry["why"] = "no s3_head" - # else: probably 'force'. Basically we don't skip with any with other strategies. - else: - pass - - # prune 'please skip' entries, if any. - return [x for x in keeplist if not x.get("skip_flag")] - - -def upload_files(s3, bucket, filelist, params): - ret = [] - for entry in filelist: - args = {"ContentType": entry["mime_type"]} - if params.get("permission"): - args["ACL"] = params["permission"] - if params.get("cache_control"): - args["CacheControl"] = params["cache_control"] - if params.get("storage_class"): - args["StorageClass"] = params["storage_class"] - # if this fails exception is caught in main() - s3.upload_file(entry["fullpath"], bucket, entry["s3_path"], ExtraArgs=args, Callback=None, Config=None) - ret.append(entry) - return ret - - -def remove_files(s3, sourcelist, params): - bucket = params.get("bucket") - key_prefix = params.get("key_prefix") - paginator = s3.get_paginator("list_objects_v2") - current_keys = set( - x["Key"] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get("Contents", []) - ) - keep_keys = set(to_text(source_file["s3_path"]) for source_file in sourcelist) - delete_keys = list(current_keys - keep_keys) - - # can delete 1000 objects at a time - groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] # fmt:skip - for keys in groups_of_keys: - s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": key} for key in keys]}) - - return delete_keys - - -def main(): - argument_spec = dict( - mode=dict(choices=["push"], default="push"), - file_change_strategy=dict(choices=["force", "date_size", "checksum"], default="date_size"), - bucket=dict(required=True), - key_prefix=dict(required=False, default="", no_log=False), - file_root=dict(required=True, type="path"), - permission=dict( - required=False, - choices=[ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", - ], - ), - mime_map=dict(required=False, type="dict"), - exclude=dict(required=False, default=".*"), - include=dict(required=False, default="*"), - cache_control=dict(required=False, default=""), - delete=dict(required=False, type="bool", default=False), - storage_class=dict( - required=False, - default="STANDARD", - choices=[ - "STANDARD", - "REDUCED_REDUNDANCY", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "GLACIER", - "DEEP_ARCHIVE", - "OUTPOSTS", - ], - ), - # future options: encoding, metadata, retries - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - ) - - if not HAS_DATEUTIL: - module.fail_json(msg="dateutil required for this module") - - result = {} - mode = module.params["mode"] - - try: - s3 = module.client("s3") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - if mode == "push": - try: - result["filelist_initial"] = gather_files( - module.params["file_root"], exclude=module.params["exclude"], include=module.params["include"] - ) - result["filelist_typed"] = determine_mimetypes(result["filelist_initial"], module.params.get("mime_map")) - result["filelist_s3"] = calculate_s3_path(result["filelist_typed"], module.params["key_prefix"]) - try: - result["filelist_local_etag"] = calculate_local_etag(result["filelist_s3"]) - except ValueError as e: - if module.params["file_change_strategy"] == "checksum": - module.fail_json_aws( - e, - "Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy", - ) - result["filelist_local_etag"] = result["filelist_s3"].copy() - result["filelist_actionable"] = filter_list( - s3, module.params["bucket"], result["filelist_local_etag"], module.params["file_change_strategy"] - ) - result["uploads"] = upload_files(s3, module.params["bucket"], result["filelist_actionable"], module.params) - - if module.params["delete"]: - result["removed"] = remove_files(s3, result["filelist_local_etag"], module.params) - - # mark changed if we actually upload something. - if result.get("uploads") or result.get("removed"): - result["changed"] = True - # result.update(filelist=actionable_filelist) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to push file") - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/s3_website.py b/s3_website.py deleted file mode 100644 index 38c411b1fe2..00000000000 --- a/s3_website.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: s3_website -version_added: 1.0.0 -short_description: Configure an s3 bucket as a website -description: - - Configure an s3 bucket as a website -author: - - Rob White (@wimnat) -options: - name: - description: - - "Name of the s3 bucket" - required: true - type: str - error_key: - description: - - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None." - type: str - redirect_all_requests: - description: - - "Describes the redirect behavior for every request to this s3 bucket website endpoint" - type: str - state: - description: - - "Add or remove s3 website configuration" - choices: [ 'present', 'absent' ] - required: true - type: str - suffix: - description: - - > - Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to - samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash - character. - default: index.html - type: str - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Configure an s3 bucket to redirect all requests to example.com - community.aws.s3_website: - name: mybucket.com - redirect_all_requests: example.com - state: present - -- name: Remove website configuration from an s3 bucket - community.aws.s3_website: - name: mybucket.com - state: absent - -- name: Configure an s3 bucket as a website with index and error pages - community.aws.s3_website: - name: mybucket.com - suffix: home.htm - error_key: errors/404.htm - state: present - -""" - -RETURN = r""" -index_document: - description: index document - type: complex - returned: always - contains: - suffix: - description: suffix that is appended to a request that is for a directory on the website endpoint - returned: success - type: str - sample: index.html -error_document: - description: error document - type: complex - returned: always - contains: - key: - description: object key name to use when a 4XX class error occurs - returned: when error_document parameter set - type: str - sample: error.html -redirect_all_requests_to: - description: where to redirect requests - type: complex - returned: always - contains: - host_name: - description: name of the host where requests will be redirected. - returned: when redirect all requests parameter set - type: str - sample: ansible.com - protocol: - description: protocol to use when redirecting requests. - returned: when redirect all requests parameter set - type: str - sample: https -routing_rules: - description: routing rules - type: list - returned: always - contains: - condition: - type: complex - description: A container for describing a condition that must be met for the specified redirect to apply. - contains: - http_error_code_returned_equals: - description: The HTTP error code when the redirect is applied. - returned: always - type: str - key_prefix_equals: - description: object key name prefix when the redirect is applied. For example, to redirect - requests for ExamplePage.html, the key prefix will be ExamplePage.html - returned: when routing rule present - type: str - sample: docs/ - redirect: - type: complex - description: Container for redirect information. - returned: always - contains: - host_name: - description: name of the host where requests will be redirected. - returned: when host name set as part of redirect rule - type: str - sample: ansible.com - http_redirect_code: - description: The HTTP redirect code to use on the response. - returned: when routing rule present - type: str - protocol: - description: Protocol to use when redirecting requests. - returned: when routing rule present - type: str - sample: http - replace_key_prefix_with: - description: object key prefix to use in the redirect request - returned: when routing rule present - type: str - sample: documents/ - replace_key_with: - description: object key prefix to use in the redirect request - returned: when routing rule present - type: str - sample: documents/ -""" - -import time - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _create_redirect_dict(url): - redirect_dict = {} - url_split = url.split(":") - - # Did we split anything? - if len(url_split) == 2: - redirect_dict["Protocol"] = url_split[0] - redirect_dict["HostName"] = url_split[1].replace("//", "") - elif len(url_split) == 1: - redirect_dict["HostName"] = url_split[0] - else: - raise ValueError("Redirect URL appears invalid") - - return redirect_dict - - -def _create_website_configuration(suffix, error_key, redirect_all_requests): - website_configuration = {} - - if error_key is not None: - website_configuration["ErrorDocument"] = {"Key": error_key} - - if suffix is not None: - website_configuration["IndexDocument"] = {"Suffix": suffix} - - if redirect_all_requests is not None: - website_configuration["RedirectAllRequestsTo"] = _create_redirect_dict(redirect_all_requests) - - return website_configuration - - -def enable_or_update_bucket_as_website(client_connection, resource_connection, module): - bucket_name = module.params.get("name") - redirect_all_requests = module.params.get("redirect_all_requests") - # If redirect_all_requests is set then don't use the default suffix that has been set - if redirect_all_requests is not None: - suffix = None - else: - suffix = module.params.get("suffix") - error_key = module.params.get("error_key") - changed = False - - try: - bucket_website = resource_connection.BucketWebsite(bucket_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get bucket") - - try: - website_config = client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code("NoSuchWebsiteConfiguration"): - website_config = None - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get website configuration") - - if website_config is None: - try: - bucket_website.put( - WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to set bucket website configuration") - except ValueError as e: - module.fail_json(msg=str(e)) - else: - try: - if ( - (suffix is not None and website_config["IndexDocument"]["Suffix"] != suffix) - or (error_key is not None and website_config["ErrorDocument"]["Key"] != error_key) - or ( - redirect_all_requests is not None - and website_config["RedirectAllRequestsTo"] != _create_redirect_dict(redirect_all_requests) - ) - ): - try: - bucket_website.put( - WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update bucket website configuration") - except KeyError as e: - try: - bucket_website.put( - WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) - ) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update bucket website configuration") - except ValueError as e: - module.fail_json(msg=str(e)) - - # Wait 5 secs before getting the website_config again to give it time to update - time.sleep(5) - - website_config = client_connection.get_bucket_website(Bucket=bucket_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config)) - - -def disable_bucket_as_website(client_connection, module): - changed = False - bucket_name = module.params.get("name") - - try: - client_connection.get_bucket_website(Bucket=bucket_name) - except is_boto3_error_code("NoSuchWebsiteConfiguration"): - module.exit_json(changed=changed) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get bucket website") - - try: - client_connection.delete_bucket_website(Bucket=bucket_name) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to delete bucket website") - - module.exit_json(changed=changed) - - -def main(): - argument_spec = dict( - name=dict(type="str", required=True), - state=dict(type="str", required=True, choices=["present", "absent"]), - suffix=dict(type="str", required=False, default="index.html"), - error_key=dict(type="str", required=False, no_log=False), - redirect_all_requests=dict(type="str", required=False), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ["redirect_all_requests", "suffix"], - ["redirect_all_requests", "error_key"], - ], - ) - - try: - client_connection = module.client("s3") - resource_connection = module.resource("s3") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - state = module.params.get("state") - - if state == "present": - enable_or_update_bucket_as_website(client_connection, resource_connection, module) - elif state == "absent": - disable_bucket_as_website(client_connection, module) - - -if __name__ == "__main__": - main() diff --git a/secretsmanager_secret.py b/secretsmanager_secret.py deleted file mode 100644 index 1a1340df723..00000000000 --- a/secretsmanager_secret.py +++ /dev/null @@ -1,658 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, REY Remi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: secretsmanager_secret -version_added: 1.0.0 -short_description: Manage secrets stored in AWS Secrets Manager -description: - - Create, update, and delete secrets stored in AWS Secrets Manager. - - Prior to release 5.0.0 this module was called C(community.aws.aws_secret). - The usage did not change. -author: - - "REY Remi (@rrey)" -options: - name: - description: - - Friendly name for the secret you are creating. - required: true - type: str - state: - description: - - Whether the secret should be exist or not. - default: 'present' - choices: ['present', 'absent'] - type: str - overwrite: - description: - - Whether to overwrite an existing secret with the same name. - - If set to C(True), an existing secret with the same I(name) will be overwritten. - - If set to C(False), a secret with the given I(name) will only be created if none exists. - type: bool - default: True - version_added: 5.3.0 - recovery_window: - description: - - Only used if state is absent. - - Specifies the number of days that Secrets Manager waits before it can delete the secret. - - If set to 0, the deletion is forced without recovery. - default: 30 - type: int - description: - description: - - Specifies a user-provided description of the secret. - type: str - default: '' - replica: - description: - - Specifies a list of regions and kms_key_ids (optional) to replicate the secret to - type: list - elements: dict - version_added: 5.3.0 - suboptions: - region: - description: - - Region to replicate secret to. - type: str - required: true - kms_key_id: - description: - - Specifies the ARN or alias of the AWS KMS customer master key (CMK) in the - destination region to be used (alias/aws/secretsmanager is assumed if not specified) - type: str - required: false - kms_key_id: - description: - - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be - used to encrypt the I(secret) values in the versions stored in this secret. - type: str - secret_type: - description: - - Specifies the type of data that you want to encrypt. - choices: ['binary', 'string'] - default: 'string' - type: str - secret: - description: - - Specifies string or binary data that you want to encrypt and store in the new version of the secret. - - Mutually exclusive with the I(json_secret) option. - default: "" - type: str - json_secret: - description: - - Specifies JSON-formatted data that you want to encrypt and store in the new version of the - secret. - - Mutually exclusive with the I(secret) option. - type: json - version_added: 4.1.0 - resource_policy: - description: - - Specifies JSON-formatted resource policy to attach to the secret. Useful when granting cross-account access - to secrets. - required: false - type: json - version_added: 3.1.0 - rotation_lambda: - description: - - Specifies the ARN of the Lambda function that can rotate the secret. - type: str - rotation_interval: - description: - - Specifies the number of days between automatic scheduled rotations of the secret. - default: 30 - type: int -notes: - - Support for I(purge_tags) was added in release 4.0.0. -extends_documentation_fragment: - - amazon.aws.region.modules - - amazon.aws.common.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Add string to AWS Secrets Manager - community.aws.secretsmanager_secret: - name: 'test_secret_string' - state: present - secret_type: 'string' - secret: "{{ super_secret_string }}" - -- name: Add a secret with resource policy attached - community.aws.secretsmanager_secret: - name: 'test_secret_string' - state: present - secret_type: 'string' - secret: "{{ super_secret_string }}" - resource_policy: "{{ lookup('template', 'templates/resource_policy.json.j2', convert_data=False) | string }}" - -- name: remove string from AWS Secrets Manager - community.aws.secretsmanager_secret: - name: 'test_secret_string' - state: absent - secret_type: 'string' - secret: "{{ super_secret_string }}" - -- name: Only create a new secret, but do not update if alredy exists by name - community.aws.secretsmanager_secret: - name: 'random_string' - state: present - secret_type: 'string' - secret: "{{ lookup('community.general.random_string', length=16, special=false) }}" - overwrite: false -""" - -RETURN = r""" -secret: - description: The secret information - returned: always - type: complex - contains: - arn: - description: The ARN of the secret. - returned: always - type: str - sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx - description: - description: A description of the secret. - returned: when the secret has a description - type: str - sample: An example description - last_accessed_date: - description: The date the secret was last accessed. - returned: always - type: str - sample: '2018-11-20T01:00:00+01:00' - last_changed_date: - description: The date the secret was last modified. - returned: always - type: str - sample: '2018-11-20T12:16:38.433000+01:00' - name: - description: The secret name. - returned: always - type: str - sample: my_secret - rotation_enabled: - description: The secret rotation status. - returned: always - type: bool - sample: false - version_ids_to_stages: - description: Provide the secret version ids and the associated secret stage. - returned: always - type: dict - sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] } - tags: - description: - - A list of dictionaries representing the tags associated with the secret in the standard boto3 format. - returned: when the secret has tags - type: list - elements: dict - contains: - key: - description: The name or key of the tag. - type: str - example: MyTag - returned: success - value: - description: The value of the tag. - type: str - example: Some value. - returned: success - tags_dict: - description: A dictionary representing the tags associated with the secret. - type: dict - returned: when the secret has tags - example: {'MyTagName': 'Some Value'} - version_added: 4.0.0 -""" - -from traceback import format_exc -import json - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils._text import to_bytes -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class Secret(object): - """An object representation of the Secret described by the self.module args""" - - def __init__( - self, - name, - secret_type, - secret, - resource_policy=None, - description="", - kms_key_id=None, - tags=None, - lambda_arn=None, - rotation_interval=None, - replica_regions=None, - ): - self.name = name - self.description = description - self.replica_regions = replica_regions - self.kms_key_id = kms_key_id - if secret_type == "binary": - self.secret_type = "SecretBinary" - else: - self.secret_type = "SecretString" - self.secret = secret - self.resource_policy = resource_policy - self.tags = tags or {} - self.rotation_enabled = False - if lambda_arn: - self.rotation_enabled = True - self.rotation_lambda_arn = lambda_arn - self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)} - - @property - def create_args(self): - args = {"Name": self.name} - if self.description: - args["Description"] = self.description - if self.kms_key_id: - args["KmsKeyId"] = self.kms_key_id - if self.replica_regions: - add_replica_regions = [] - for replica in self.replica_regions: - if replica["kms_key_id"]: - add_replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) - else: - add_replica_regions.append({"Region": replica["region"]}) - args["AddReplicaRegions"] = add_replica_regions - if self.tags: - args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) - args[self.secret_type] = self.secret - return args - - @property - def update_args(self): - args = {"SecretId": self.name} - if self.description: - args["Description"] = self.description - if self.kms_key_id: - args["KmsKeyId"] = self.kms_key_id - args[self.secret_type] = self.secret - return args - - @property - def secret_resource_policy_args(self): - args = {"SecretId": self.name} - if self.resource_policy: - args["ResourcePolicy"] = self.resource_policy - return args - - @property - def boto3_tags(self): - return ansible_dict_to_boto3_tag_list(self.Tags) - - def as_dict(self): - result = self.__dict__ - result.pop("tags") - return snake_dict_to_camel_dict(result) - - -class SecretsManagerInterface(object): - """An interface with SecretsManager""" - - def __init__(self, module): - self.module = module - self.client = self.module.client("secretsmanager") - - def get_secret(self, name): - try: - secret = self.client.describe_secret(SecretId=name) - except self.client.exceptions.ResourceNotFoundException: - secret = None - except Exception as e: - self.module.fail_json_aws(e, msg="Failed to describe secret") - return secret - - def get_resource_policy(self, name): - try: - resource_policy = self.client.get_resource_policy(SecretId=name) - except self.client.exceptions.ResourceNotFoundException: - resource_policy = None - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to get secret resource policy") - return resource_policy - - def create_secret(self, secret): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - created_secret = self.client.create_secret(**secret.create_args) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to create secret") - - if secret.rotation_enabled: - response = self.update_rotation(secret) - created_secret["VersionId"] = response.get("VersionId") - return created_secret - - def update_secret(self, secret): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - response = self.client.update_secret(**secret.update_args) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to update secret") - return response - - def put_resource_policy(self, secret): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - json.loads(secret.secret_resource_policy_args.get("ResourcePolicy")) - except (TypeError, ValueError) as e: - self.module.fail_json(msg=f"Failed to parse resource policy as JSON: {str(e)}", exception=format_exc()) - - try: - response = self.client.put_resource_policy(**secret.secret_resource_policy_args) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to update secret resource policy") - return response - - def remove_replication(self, name, regions): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - replica_regions = [] - response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to replicate secret") - return response - - def replicate_secret(self, name, regions): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - replica_regions = [] - for replica in regions: - if replica["kms_key_id"]: - replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]}) - else: - replica_regions.append({"Region": replica["region"]}) - response = self.client.replicate_secret_to_regions(SecretId=name, AddReplicaRegions=replica_regions) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to replicate secret") - return response - - def restore_secret(self, name): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - response = self.client.restore_secret(SecretId=name) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to restore secret") - return response - - def delete_secret(self, name, recovery_window): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - if recovery_window == 0: - response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True) - else: - response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to delete secret") - return response - - def delete_resource_policy(self, name): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - response = self.client.delete_resource_policy(SecretId=name) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to delete secret resource policy") - return response - - def update_rotation(self, secret): - if secret.rotation_enabled: - try: - response = self.client.rotate_secret( - SecretId=secret.name, - RotationLambdaARN=secret.rotation_lambda_arn, - RotationRules=secret.rotation_rules, - ) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to rotate secret secret") - else: - try: - response = self.client.cancel_rotate_secret(SecretId=secret.name) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to cancel rotation") - return response - - def tag_secret(self, secret_name, tags): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - self.client.tag_resource(SecretId=secret_name, Tags=tags) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret") - - def untag_secret(self, secret_name, tag_keys): - if self.module.check_mode: - self.module.exit_json(changed=True) - try: - self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret") - - def secrets_match(self, desired_secret, current_secret): - """Compare secrets except tags and rotation - - Args: - desired_secret: camel dict representation of the desired secret state. - current_secret: secret reference as returned by the secretsmanager api. - - Returns: bool - """ - if desired_secret.description != current_secret.get("Description", ""): - return False - if desired_secret.kms_key_id != current_secret.get("KmsKeyId"): - return False - current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name")) - if desired_secret.secret_type == "SecretBinary": - desired_value = to_bytes(desired_secret.secret) - else: - desired_value = desired_secret.secret - if desired_value != current_secret_value.get(desired_secret.secret_type): - return False - return True - - -def rotation_match(desired_secret, current_secret): - """Compare secrets rotation configuration - - Args: - desired_secret: camel dict representation of the desired secret state. - current_secret: secret reference as returned by the secretsmanager api. - - Returns: bool - """ - if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False): - return False - if desired_secret.rotation_enabled: - if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"): - return False - if desired_secret.rotation_rules != current_secret.get("RotationRules"): - return False - return True - - -def compare_regions(desired_secret, current_secret): - """Compare secrets replication configuration - - Args: - desired_secret: camel dict representation of the desired secret state. - current_secret: secret reference as returned by the secretsmanager api. - - Returns: bool - """ - regions_to_set_replication = [] - regions_to_remove_replication = [] - - if desired_secret.replica_regions is None: - return regions_to_set_replication, regions_to_remove_replication - - if desired_secret.replica_regions: - regions_to_set_replication = desired_secret.replica_regions - - for current_secret_region in current_secret.get("ReplicationStatus", []): - if regions_to_set_replication: - for desired_secret_region in regions_to_set_replication: - if current_secret_region["Region"] == desired_secret_region["region"]: - regions_to_set_replication.remove(desired_secret_region) - else: - regions_to_remove_replication.append(current_secret_region["Region"]) - else: - regions_to_remove_replication.append(current_secret_region["Region"]) - - return regions_to_set_replication, regions_to_remove_replication - - -def main(): - replica_args = dict( - region=dict(type="str", required=True), - kms_key_id=dict(type="str", required=False), - ) - - module = AnsibleAWSModule( - argument_spec={ - "name": dict(required=True), - "state": dict(choices=["present", "absent"], default="present"), - "overwrite": dict(type="bool", default=True), - "description": dict(default=""), - "replica": dict(type="list", elements="dict", options=replica_args), - "kms_key_id": dict(), - "secret_type": dict(choices=["binary", "string"], default="string"), - "secret": dict(default="", no_log=True), - "json_secret": dict(type="json", no_log=True), - "resource_policy": dict(type="json", default=None), - "tags": dict(type="dict", default=None, aliases=["resource_tags"]), - "purge_tags": dict(type="bool", default=True), - "rotation_lambda": dict(), - "rotation_interval": dict(type="int", default=30), - "recovery_window": dict(type="int", default=30), - }, - mutually_exclusive=[["secret", "json_secret"]], - supports_check_mode=True, - ) - - changed = False - state = module.params.get("state") - secrets_mgr = SecretsManagerInterface(module) - recovery_window = module.params.get("recovery_window") - secret = Secret( - module.params.get("name"), - module.params.get("secret_type"), - module.params.get("secret") or module.params.get("json_secret"), - description=module.params.get("description"), - replica_regions=module.params.get("replica"), - kms_key_id=module.params.get("kms_key_id"), - resource_policy=module.params.get("resource_policy"), - tags=module.params.get("tags"), - lambda_arn=module.params.get("rotation_lambda"), - rotation_interval=module.params.get("rotation_interval"), - ) - purge_tags = module.params.get("purge_tags") - - current_secret = secrets_mgr.get_secret(secret.name) - - if state == "absent": - if current_secret: - if not current_secret.get("DeletedDate"): - result = camel_dict_to_snake_dict( - secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) - ) - changed = True - elif current_secret.get("DeletedDate") and recovery_window == 0: - result = camel_dict_to_snake_dict( - secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window) - ) - changed = True - else: - result = "secret already scheduled for deletion" - else: - result = "secret does not exist" - if state == "present": - if current_secret is None: - result = secrets_mgr.create_secret(secret) - if secret.resource_policy and result.get("ARN"): - result = secrets_mgr.put_resource_policy(secret) - changed = True - else: - # current_secret exists; decide what to do with it - if current_secret.get("DeletedDate"): - secrets_mgr.restore_secret(secret.name) - changed = True - if not secrets_mgr.secrets_match(secret, current_secret): - overwrite = module.params.get("overwrite") - if overwrite: - result = secrets_mgr.update_secret(secret) - changed = True - if not rotation_match(secret, current_secret): - result = secrets_mgr.update_rotation(secret) - changed = True - - current_resource_policy_response = secrets_mgr.get_resource_policy(secret.name) - current_resource_policy = current_resource_policy_response.get("ResourcePolicy") - if compare_policies(secret.resource_policy, current_resource_policy): - if secret.resource_policy is None and current_resource_policy: - result = secrets_mgr.delete_resource_policy(secret.name) - else: - result = secrets_mgr.put_resource_policy(secret) - changed = True - - if module.params.get("tags") is not None: - current_tags = boto3_tag_list_to_ansible_dict(current_secret.get("Tags", [])) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags) - if tags_to_add: - secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add)) - changed = True - if tags_to_remove: - secrets_mgr.untag_secret(secret.name, tags_to_remove) - changed = True - - regions_to_set_replication, regions_to_remove_replication = compare_regions(secret, current_secret) - if regions_to_set_replication: - secrets_mgr.replicate_secret(secret.name, regions_to_set_replication) - changed = True - if regions_to_remove_replication: - secrets_mgr.remove_replication(secret.name, regions_to_remove_replication) - changed = True - - result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name)) - if result.get("tags", None) is not None: - result["tags_dict"] = boto3_tag_list_to_ansible_dict(result.get("tags", [])) - result.pop("response_metadata") - - module.exit_json(changed=changed, secret=result) - - -if __name__ == "__main__": - main() diff --git a/ses_identity.py b/ses_identity.py deleted file mode 100644 index e324a7e12f7..00000000000 --- a/ses_identity.py +++ /dev/null @@ -1,575 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ses_identity -version_added: 1.0.0 -short_description: Manages SES email and domain identity -description: - - This module allows the user to manage verified email and domain identity for SES. - - This covers verifying and removing identities as well as setting up complaint, bounce - and delivery notification settings. - - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_identity). - The usage did not change. -author: - - Ed Costello (@orthanc) -options: - identity: - description: - - This is the email address or domain to verify / delete. - - If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain. - required: true - type: str - state: - description: Whether to create(or update) or delete the identity. - default: present - choices: [ 'present', 'absent' ] - type: str - bounce_notifications: - description: - - Setup the SNS topic used to report bounce notifications. - - If omitted, bounce notifications will not be delivered to a SNS topic. - - If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled. - suboptions: - topic: - description: - - The ARN of the topic to send notifications to. - - If omitted, notifications will not be delivered to a SNS topic. - include_headers: - description: - - Whether or not to include headers when delivering to the SNS topic. - - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic. - type: bool - default: No - type: dict - complaint_notifications: - description: - - Setup the SNS topic used to report complaint notifications. - - If omitted, complaint notifications will not be delivered to a SNS topic. - - If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled. - suboptions: - topic: - description: - - The ARN of the topic to send notifications to. - - If omitted, notifications will not be delivered to a SNS topic. - include_headers: - description: - - Whether or not to include headers when delivering to the SNS topic. - - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic. - type: bool - default: No - type: dict - delivery_notifications: - description: - - Setup the SNS topic used to report delivery notifications. - - If omitted, delivery notifications will not be delivered to a SNS topic. - suboptions: - topic: - description: - - The ARN of the topic to send notifications to. - - If omitted, notifications will not be delivered to a SNS topic. - include_headers: - description: - - Whether or not to include headers when delivering to the SNS topic. - - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic. - type: bool - default: No - type: dict - feedback_forwarding: - description: - - Whether or not to enable feedback forwarding. - - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics. - type: 'bool' - default: True -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Ensure example@example.com email identity exists - community.aws.ses_identity: - identity: example@example.com - state: present - -- name: Delete example@example.com email identity - community.aws.ses_identity: - email: example@example.com - state: absent - -- name: Ensure example.com domain identity exists - community.aws.ses_identity: - identity: example.com - state: present - -# Create an SNS topic and send bounce and complaint notifications to it -# instead of emailing the identity owner -- name: Ensure complaints-topic exists - community.aws.sns_topic: - name: "complaints-topic" - state: present - purge_subscriptions: False - register: topic_info - -- name: Deliver feedback to topic instead of owner email - community.aws.ses_identity: - identity: example@example.com - state: present - complaint_notifications: - topic: "{{ topic_info.sns_arn }}" - include_headers: True - bounce_notifications: - topic: "{{ topic_info.sns_arn }}" - include_headers: False - feedback_forwarding: False - -# Create an SNS topic for delivery notifications and leave complaints -# Being forwarded to the identity owner email -- name: Ensure delivery-notifications-topic exists - community.aws.sns_topic: - name: "delivery-notifications-topic" - state: present - purge_subscriptions: False - register: topic_info - -- name: Delivery notifications to topic - community.aws.ses_identity: - identity: example@example.com - state: present - delivery_notifications: - topic: "{{ topic_info.sns_arn }}" -""" - -RETURN = r""" -identity: - description: The identity being modified. - returned: success - type: str - sample: example@example.com -identity_arn: - description: The arn of the identity being modified. - returned: success - type: str - sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com -verification_attributes: - description: The verification information for the identity. - returned: success - type: complex - sample: { - "verification_status": "Pending", - "verification_token": "...." - } - contains: - verification_status: - description: The verification status of the identity. - type: str - sample: "Pending" - verification_token: - description: The verification token for a domain identity. - type: str -notification_attributes: - description: The notification setup for the identity. - returned: success - type: complex - sample: { - "bounce_topic": "arn:aws:sns:....", - "complaint_topic": "arn:aws:sns:....", - "delivery_topic": "arn:aws:sns:....", - "forwarding_enabled": false, - "headers_in_bounce_notifications_enabled": true, - "headers_in_complaint_notifications_enabled": true, - "headers_in_delivery_notifications_enabled": true - } - contains: - bounce_topic: - description: - - The ARN of the topic bounce notifications are delivered to. - - Omitted if bounce notifications are not delivered to a topic. - type: str - complaint_topic: - description: - - The ARN of the topic complaint notifications are delivered to. - - Omitted if complaint notifications are not delivered to a topic. - type: str - delivery_topic: - description: - - The ARN of the topic delivery notifications are delivered to. - - Omitted if delivery notifications are not delivered to a topic. - type: str - forwarding_enabled: - description: Whether or not feedback forwarding is enabled. - type: bool - headers_in_bounce_notifications_enabled: - description: Whether or not headers are included in messages delivered to the bounce topic. - type: bool - headers_in_complaint_notifications_enabled: - description: Whether or not headers are included in messages delivered to the complaint topic. - type: bool - headers_in_delivery_notifications_enabled: - description: Whether or not headers are included in messages delivered to the delivery topic. - type: bool -""" - -import time - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10): - # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've - # just registered it. Suspect this is an eventual consistency issue on AWS side. - # Don't want this complexity exposed users of the module as they'd have to retry to ensure - # a consistent return from the module. - # To avoid this we have an internal retry that we use only after registering the identity. - for attempt in range(0, retries + 1): - try: - response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to retrieve identity verification attributes for {identity}") - identity_verification = response["VerificationAttributes"] - if identity in identity_verification: - break - time.sleep(retryDelay) - if identity not in identity_verification: - return None - return identity_verification[identity] - - -def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10): - # Unpredictably get_identity_notifications doesn't include the notifications when we've - # just registered the identity. - # Don't want this complexity exposed users of the module as they'd have to retry to ensure - # a consistent return from the module. - # To avoid this we have an internal retry that we use only when getting the current notification - # status for return. - for attempt in range(0, retries + 1): - try: - response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to retrieve identity notification attributes for {identity}") - notification_attributes = response["NotificationAttributes"] - - # No clear AWS docs on when this happens, but it appears sometimes identities are not included in - # in the notification attributes when the identity is first registered. Suspect that this is caused by - # eventual consistency within the AWS services. It's been observed in builds so we need to handle it. - # - # When this occurs, just return None and we'll assume no identity notification settings have been changed - # from the default which is reasonable if this is just eventual consistency on creation. - # See: https://github.com/ansible/ansible/issues/36065 - if identity in notification_attributes: - break - else: - # Paranoia check for coding errors, we only requested one identity, so if we get a different one - # something has gone very wrong. - if len(notification_attributes) != 0: - module.fail_json( - msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format( - identity, - notification_attributes.keys(), - ) - ) - time.sleep(retryDelay) - if identity not in notification_attributes: - return None - return notification_attributes[identity] - - -def desired_topic(module, notification_type): - arg_dict = module.params.get(notification_type.lower() + "_notifications") - if arg_dict: - return arg_dict.get("topic", None) - else: - return None - - -def update_notification_topic(connection, module, identity, identity_notifications, notification_type): - # Not passing the parameter should not cause any changes. - if module.params.get(f"{notification_type.lower()}_notifications") is None: - return False - - topic_key = notification_type + "Topic" - if identity_notifications is None: - # If there is no configuration for notifications cannot be being sent to topics - # hence assume None as the current state. - current_topic = None - elif topic_key in identity_notifications: - current_topic = identity_notifications[topic_key] - else: - # If there is information on the notifications setup but no information on the - # particular notification topic it's pretty safe to assume there's no topic for - # this notification. AWS API docs suggest this information will always be - # included but best to be defensive - current_topic = None - - required_topic = desired_topic(module, notification_type) - - if current_topic != required_topic: - try: - if not module.check_mode: - request_kwargs = { - "Identity": identity, - "NotificationType": notification_type, - "aws_retry": True, - } - - # The topic has to be omitted from the request to disable the notification. - if required_topic is not None: - request_kwargs["SnsTopic"] = required_topic - - connection.set_identity_notification_topic(**request_kwargs) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg=f"Failed to set identity notification topic for {identity} {notification_type}", - ) - return True - return False - - -def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type): - arg_dict = module.params.get(notification_type.lower() + "_notifications") - header_key = "HeadersIn" + notification_type + "NotificationsEnabled" - if identity_notifications is None: - # If there is no configuration for topic notifications, headers cannot be being - # forwarded, hence assume false. - current = False - elif header_key in identity_notifications: - current = identity_notifications[header_key] - else: - # AWS API doc indicates that the headers in fields are optional. Unfortunately - # it's not clear on what this means. But it's a pretty safe assumption that it means - # headers are not included since most API consumers would interpret absence as false. - current = False - - if arg_dict is not None and "include_headers" in arg_dict: - required = arg_dict["include_headers"] - else: - required = False - - if current != required: - try: - if not module.check_mode: - connection.set_identity_headers_in_notifications_enabled( - Identity=identity, NotificationType=notification_type, Enabled=required, aws_retry=True - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg=f"Failed to set identity headers in notification for {identity} {notification_type}" - ) - return True - return False - - -def update_feedback_forwarding(connection, module, identity, identity_notifications): - if identity_notifications is None: - # AWS requires feedback forwarding to be enabled unless bounces and complaints - # are being handled by SNS topics. So in the absence of identity_notifications - # information existing feedback forwarding must be on. - current = True - elif "ForwardingEnabled" in identity_notifications: - current = identity_notifications["ForwardingEnabled"] - else: - # If there is information on the notifications setup but no information on the - # forwarding state it's pretty safe to assume forwarding is off. AWS API docs - # suggest this information will always be included but best to be defensive - current = False - - required = module.params.get("feedback_forwarding") - - if current != required: - try: - if not module.check_mode: - connection.set_identity_feedback_forwarding_enabled( - Identity=identity, ForwardingEnabled=required, aws_retry=True - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to set identity feedback forwarding for {identity}") - return True - return False - - -def create_mock_notifications_response(module): - resp = { - "ForwardingEnabled": module.params.get("feedback_forwarding"), - } - for notification_type in ("Bounce", "Complaint", "Delivery"): - arg_dict = module.params.get(notification_type.lower() + "_notifications") - if arg_dict is not None and "topic" in arg_dict: - resp[notification_type + "Topic"] = arg_dict["topic"] - - header_key = "HeadersIn" + notification_type + "NotificationsEnabled" - if arg_dict is not None and "include_headers" in arg_dict: - resp[header_key] = arg_dict["include_headers"] - else: - resp[header_key] = False - return resp - - -def update_identity_notifications(connection, module): - identity = module.params.get("identity") - changed = False - identity_notifications = get_identity_notifications(connection, module, identity) - - for notification_type in ("Bounce", "Complaint", "Delivery"): - changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type) - changed |= update_notification_topic_headers( - connection, module, identity, identity_notifications, notification_type - ) - - changed |= update_feedback_forwarding(connection, module, identity, identity_notifications) - - if changed or identity_notifications is None: - if module.check_mode: - identity_notifications = create_mock_notifications_response(module) - else: - identity_notifications = get_identity_notifications(connection, module, identity, retries=4) - return changed, identity_notifications - - -def validate_params_for_identity_present(module): - if module.params.get("feedback_forwarding") is False: - if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")): - module.fail_json( - msg=( - "Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " - "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" - ) - ) - - -def create_or_update_identity(connection, module, region, account_id): - identity = module.params.get("identity") - changed = False - verification_attributes = get_verification_attributes(connection, module, identity) - if verification_attributes is None: - try: - if not module.check_mode: - if "@" in identity: - connection.verify_email_identity(EmailAddress=identity, aws_retry=True) - else: - connection.verify_domain_identity(Domain=identity, aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to verify identity {identity}") - if module.check_mode: - verification_attributes = { - "VerificationStatus": "Pending", - } - else: - verification_attributes = get_verification_attributes(connection, module, identity, retries=4) - changed = True - elif verification_attributes["VerificationStatus"] not in ("Pending", "Success"): - module.fail_json( - msg="Identity " + identity + " in bad status " + verification_attributes["VerificationStatus"], - verification_attributes=camel_dict_to_snake_dict(verification_attributes), - ) - - if verification_attributes is None: - module.fail_json(msg="Unable to load identity verification attributes after registering identity.") - - notifications_changed, notification_attributes = update_identity_notifications(connection, module) - changed |= notifications_changed - - if notification_attributes is None: - module.fail_json(msg="Unable to load identity notification attributes.") - - identity_arn = "arn:aws:ses:" + region + ":" + account_id + ":identity/" + identity - - module.exit_json( - changed=changed, - identity=identity, - identity_arn=identity_arn, - verification_attributes=camel_dict_to_snake_dict(verification_attributes), - notification_attributes=camel_dict_to_snake_dict(notification_attributes), - ) - - -def destroy_identity(connection, module): - identity = module.params.get("identity") - changed = False - verification_attributes = get_verification_attributes(connection, module, identity) - if verification_attributes is not None: - try: - if not module.check_mode: - connection.delete_identity(Identity=identity, aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to delete identity {identity}") - changed = True - - module.exit_json( - changed=changed, - identity=identity, - ) - - -def get_account_id(module): - sts = module.client("sts") - try: - caller_identity = sts.get_caller_identity() - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to retrieve caller identity") - return caller_identity["Account"] - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "identity": dict(required=True, type="str"), - "state": dict(default="present", choices=["present", "absent"]), - "bounce_notifications": dict(type="dict"), - "complaint_notifications": dict(type="dict"), - "delivery_notifications": dict(type="dict"), - "feedback_forwarding": dict(default=True, type="bool"), - }, - supports_check_mode=True, - ) - - for notification_type in ("bounce", "complaint", "delivery"): - param_name = notification_type + "_notifications" - arg_dict = module.params.get(param_name) - if arg_dict: - extra_keys = [x for x in arg_dict.keys() if x not in ("topic", "include_headers")] - if extra_keys: - module.fail_json( - msg="Unexpected keys " - + str(extra_keys) - + " in " - + param_name - + " valid keys are topic or include_headers" - ) - - # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. - # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but - # the ansible build runs multiple instances of the test in parallel that's caused throttling - # failures so apply a jittered backoff to call SES calls. - connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) - - state = module.params.get("state") - - if state == "present": - region = module.region - account_id = get_account_id(module) - validate_params_for_identity_present(module) - create_or_update_identity(connection, module, region, account_id) - else: - destroy_identity(connection, module) - - -if __name__ == "__main__": - main() diff --git a/ses_identity_policy.py b/ses_identity_policy.py deleted file mode 100644 index 9b7a3d6b6fa..00000000000 --- a/ses_identity_policy.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ses_identity_policy -version_added: 1.0.0 -short_description: Manages SES sending authorization policies -description: - - This module allows the user to manage sending authorization policies associated with an SES - identity (email or domain). - - SES authorization sending policies can be used to control what actors are able to send email - on behalf of the validated identity and what conditions must be met by the sent emails. - - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_identity_policy). - The usage did not change. -author: - - Ed Costello (@orthanc) - -options: - identity: - description: | - The SES identity to attach or remove a policy from. This can be either the full ARN or just - the verified email or domain. - required: true - type: str - policy_name: - description: The name used to identify the policy within the scope of the identity it's attached to. - required: true - type: str - policy: - description: A properly formatted JSON sending authorization policy. Required when I(state=present). - type: json - state: - description: Whether to create(or update) or delete the authorization policy on the identity. - default: present - choices: [ 'present', 'absent' ] - type: str -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: add sending authorization policy to domain identity - community.aws.ses_identity_policy: - identity: example.com - policy_name: ExamplePolicy - policy: "{{ lookup('template', 'policy.json.j2') }}" - state: present - -- name: add sending authorization policy to email identity - community.aws.ses_identity_policy: - identity: example@example.com - policy_name: ExamplePolicy - policy: "{{ lookup('template', 'policy.json.j2') }}" - state: present - -- name: add sending authorization policy to identity using ARN - community.aws.ses_identity_policy: - identity: "arn:aws:ses:us-east-1:12345678:identity/example.com" - policy_name: ExamplePolicy - policy: "{{ lookup('template', 'policy.json.j2') }}" - state: present - -- name: remove sending authorization policy - community.aws.ses_identity_policy: - identity: example.com - policy_name: ExamplePolicy - state: absent -""" - -RETURN = r""" -policies: - description: A list of all policies present on the identity after the operation. - returned: success - type: list - sample: [ExamplePolicy] -""" - -import json - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_identity_policy(connection, module, identity, policy_name): - try: - response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to retrieve identity policy {policy_name}") - policies = response["Policies"] - if policy_name in policies: - return policies[policy_name] - return None - - -def create_or_update_identity_policy(connection, module): - identity = module.params.get("identity") - policy_name = module.params.get("policy_name") - required_policy = module.params.get("policy") - required_policy_dict = json.loads(required_policy) - - changed = False - policy = get_identity_policy(connection, module, identity, policy_name) - policy_dict = json.loads(policy) if policy else None - if compare_policies(policy_dict, required_policy_dict): - changed = True - try: - if not module.check_mode: - connection.put_identity_policy( - Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to put identity policy {policy_name}") - - # Load the list of applied policies to include in the response. - # In principle we should be able to just return the response, but given - # eventual consistency behaviours in AWS it's plausible that we could - # end up with a list that doesn't contain the policy we just added. - # So out of paranoia check for this case and if we're missing the policy - # just make sure it's present. - # - # As a nice side benefit this also means the return is correct in check mode - try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to list identity policies") - if policy_name is not None and policy_name not in policies_present: - policies_present = list(policies_present) - policies_present.append(policy_name) - module.exit_json( - changed=changed, - policies=policies_present, - ) - - -def delete_identity_policy(connection, module): - identity = module.params.get("identity") - policy_name = module.params.get("policy_name") - - changed = False - try: - policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"] - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to list identity policies") - if policy_name in policies_present: - try: - if not module.check_mode: - connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Failed to delete identity policy {policy_name}") - changed = True - policies_present = list(policies_present) - policies_present.remove(policy_name) - - module.exit_json( - changed=changed, - policies=policies_present, - ) - - -def main(): - module = AnsibleAWSModule( - argument_spec={ - "identity": dict(required=True, type="str"), - "state": dict(default="present", choices=["present", "absent"]), - "policy_name": dict(required=True, type="str"), - "policy": dict(type="json", default=None), - }, - required_if=[["state", "present", ["policy"]]], - supports_check_mode=True, - ) - - # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. - # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but - # the ansible build runs multiple instances of the test in parallel that's caused throttling - # failures so apply a jittered backoff to call SES calls. - connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) - - state = module.params.get("state") - - if state == "present": - create_or_update_identity_policy(connection, module) - else: - delete_identity_policy(connection, module) - - -if __name__ == "__main__": - main() diff --git a/ses_rule_set.py b/ses_rule_set.py deleted file mode 100644 index cf478c0f90a..00000000000 --- a/ses_rule_set.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Ben Tomasik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ses_rule_set -version_added: 1.0.0 -short_description: Manages SES inbound receipt rule sets -description: - - This module allows you to create, delete, and manage SES receipt rule sets - - Prior to release 5.0.0 this module was called C(community.aws.aws_ses_rule_set). - The usage did not change. -author: - - "Ben Tomasik (@tomislacker)" - - "Ed Costello (@orthanc)" -options: - name: - description: - - The name of the receipt rule set. - required: True - type: str - state: - description: - - Whether to create (or update) or destroy the receipt rule set. - required: False - default: present - choices: ["absent", "present"] - type: str - active: - description: - - Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present). - - If omitted, the active rule set will not be changed. - - If C(True) then this rule set will be made active and all others inactive. - - if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set. - type: bool - required: False - force: - description: - - When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set). - type: bool - required: False - default: False -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: Create default rule set and activate it if not already - community.aws.ses_rule_set: - name: default-rule-set - state: present - active: true - -- name: Create some arbitrary rule set but do not activate it - community.aws.ses_rule_set: - name: arbitrary-rule-set - state: present - -- name: Explicitly deactivate the default rule set leaving no active rule set - community.aws.ses_rule_set: - name: default-rule-set - state: present - active: false - -- name: Remove an arbitrary inactive rule set - community.aws.ses_rule_set: - name: arbitrary-rule-set - state: absent - -- name: Remove an ruleset even if we have to first deactivate it to remove it - community.aws.ses_rule_set: - name: default-rule-set - state: absent - force: true -""" - -RETURN = r""" -active: - description: if the SES rule set is active - returned: success if I(state) is C(present) - type: bool - sample: true -rule_sets: - description: The list of SES receipt rule sets that exist after any changes. - returned: success - type: list - sample: [{ - "created_timestamp": "2018-02-25T01:20:32.690000+00:00", - "name": "default-rule-set" - }] -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def list_rule_sets(client, module): - try: - response = client.list_receipt_rule_sets(aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't list rule sets.") - return response["RuleSets"] - - -def rule_set_in(name, rule_sets): - return any(s for s in rule_sets if s["Name"] == name) - - -def ruleset_active(client, module, name): - try: - active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't get the active rule set.") - if active_rule_set is not None and "Metadata" in active_rule_set: - return name == active_rule_set["Metadata"]["Name"] - else: - # Metadata was not set meaning there is no active rule set - return False - - -def deactivate_rule_set(client, module): - try: - # No ruleset name deactivates all rulesets - client.set_active_receipt_rule_set(aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't set active rule set to None.") - - -def update_active_rule_set(client, module, name, desired_active): - check_mode = module.check_mode - - active = ruleset_active(client, module, name) - - changed = False - if desired_active is not None: - if desired_active and not active: - if not check_mode: - try: - client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't set active rule set to {name}.") - changed = True - active = True - elif not desired_active and active: - if not check_mode: - deactivate_rule_set(client, module) - changed = True - active = False - return changed, active - - -def create_or_update_rule_set(client, module): - name = module.params.get("name") - check_mode = module.check_mode - changed = False - - rule_sets = list_rule_sets(client, module) - if not rule_set_in(name, rule_sets): - if not check_mode: - try: - client.create_receipt_rule_set(RuleSetName=name, aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't create rule set {name}.") - changed = True - rule_sets = list(rule_sets) - rule_sets.append( - { - "Name": name, - } - ) - - (active_changed, active) = update_active_rule_set(client, module, name, module.params.get("active")) - changed |= active_changed - - module.exit_json( - changed=changed, - active=active, - rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets], - ) - - -def remove_rule_set(client, module): - name = module.params.get("name") - check_mode = module.check_mode - changed = False - - rule_sets = list_rule_sets(client, module) - if rule_set_in(name, rule_sets): - active = ruleset_active(client, module, name) - if active and not module.params.get("force"): - module.fail_json( - msg=( - f"Couldn't delete rule set {name} because it is currently active. Set force=true to delete an" - " active ruleset." - ), - error={ - "code": "CannotDelete", - "message": f"Cannot delete active rule set: {name}", - }, - ) - if not check_mode: - if active and module.params.get("force"): - deactivate_rule_set(client, module) - try: - client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg=f"Couldn't delete rule set {name}.") - changed = True - rule_sets = [x for x in rule_sets if x["Name"] != name] - - module.exit_json( - changed=changed, - rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets], - ) - - -def main(): - argument_spec = dict( - name=dict(type="str", required=True), - state=dict(type="str", default="present", choices=["present", "absent"]), - active=dict(type="bool"), - force=dict(type="bool", default=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - state = module.params.get("state") - - # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. - # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but - # the ansible build runs multiple instances of the test in parallel that's caused throttling - # failures so apply a jittered backoff to call SES calls. - client = module.client("ses", retry_decorator=AWSRetry.jittered_backoff()) - - if state == "absent": - remove_rule_set(client, module) - else: - create_or_update_rule_set(client, module) - - -if __name__ == "__main__": - main() diff --git a/sns.py b/sns.py deleted file mode 100644 index 493855b76e0..00000000000 --- a/sns.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Michael J. Schultz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: sns -short_description: Send Amazon Simple Notification Service messages -version_added: 1.0.0 -description: - - Sends a notification to a topic on your Amazon SNS account. -author: - - Michael J. Schultz (@mjschultz) - - Paul Arthur (@flowerysong) -options: - msg: - description: - - Default message for subscriptions without a more specific message. - required: true - aliases: [ "default" ] - type: str - subject: - description: - - Message subject - type: str - topic: - description: - - The name or ARN of the topic to publish to. - required: true - type: str - email: - description: - - Message to send to email subscriptions. - type: str - email_json: - description: - - Message to send to email-json subscriptions. - type: str - sqs: - description: - - Message to send to SQS subscriptions. - type: str - sms: - description: - - Message to send to SMS subscriptions. - type: str - http: - description: - - Message to send to HTTP subscriptions. - type: str - https: - description: - - Message to send to HTTPS subscriptions. - type: str - application: - description: - - Message to send to application subscriptions. - type: str - lambda: - description: - - Message to send to Lambda subscriptions. - type: str - message_attributes: - description: - - Dictionary of message attributes. These are optional structured data entries to be sent along to the endpoint. - - This is in AWS's distinct Name/Type/Value format; see example below. - type: dict - message_structure: - description: - - The payload format to use for the message. - - This must be C(json) to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)). - - It must be C(string) to support I(message_attributes). - default: json - choices: ['json', 'string'] - type: str - message_group_id: - description: - - A tag which is used to process messages that belong to the same group in a FIFO manner. - - Has to be included when publishing a message to a fifo topic. - - Can contain up to 128 alphanumeric characters and punctuation. - type: str - version_added: 5.4.0 - message_deduplication_id: - description: - - Only in connection with the message_group_id. - - Overwrites the auto generated MessageDeduplicationId. - - Can contain up to 128 alphanumeric characters and punctuation. - - Messages with the same deduplication id getting recognized as the same message. - - Gets overwritten by an auto generated token, if the topic has ContentBasedDeduplication set. - type: str - version_added: 5.4.0 - -extends_documentation_fragment: - - amazon.aws.region.modules - - amazon.aws.common.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Send default notification message via SNS - community.aws.sns: - msg: '{{ inventory_hostname }} has completed the play.' - subject: Deploy complete! - topic: deploy - delegate_to: localhost - -- name: Send notification messages via SNS with short message for SMS - community.aws.sns: - msg: '{{ inventory_hostname }} has completed the play.' - sms: deployed! - subject: Deploy complete! - topic: deploy - delegate_to: localhost - -- name: Send message with message_attributes - community.aws.sns: - topic: "deploy" - msg: "message with extra details!" - message_attributes: - channel: - data_type: String - string_value: "mychannel" - color: - data_type: String - string_value: "green" - delegate_to: localhost - -- name: Send message to a fifo topic - community.aws.sns: - topic: "deploy" - msg: "Message with message group id" - subject: Deploy complete! - message_group_id: "deploy-1" - delegate_to: localhost -""" - -RETURN = r""" -msg: - description: Human-readable diagnostic information - returned: always - type: str - sample: OK -message_id: - description: The message ID of the submitted message - returned: when success - type: str - sample: 2f681ef0-6d76-5c94-99b2-4ae3996ce57b -sequence_number: - description: A 128 bits long sequence number which gets assigned to the message in fifo topics - returned: when success - type: str -""" - -import json - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - protocols = [ - "http", - "https", - "email", - "email_json", - "sms", - "sqs", - "application", - "lambda", - ] - - argument_spec = dict( - msg=dict(required=True, aliases=["default"]), - subject=dict(), - topic=dict(required=True), - message_attributes=dict(type="dict"), - message_structure=dict(choices=["json", "string"], default="json"), - message_group_id=dict(), - message_deduplication_id=dict(), - ) - - for p in protocols: - argument_spec[p] = dict() - - module = AnsibleAWSModule(argument_spec=argument_spec) - - sns_kwargs = dict( - Message=module.params["msg"], - Subject=module.params["subject"], - MessageStructure=module.params["message_structure"], - ) - - if module.params["message_attributes"]: - if module.params["message_structure"] != "string": - module.fail_json(msg='message_attributes is only supported when the message_structure is "string".') - sns_kwargs["MessageAttributes"] = module.params["message_attributes"] - - if module.params["message_group_id"]: - sns_kwargs["MessageGroupId"] = module.params["message_group_id"] - if module.params["message_deduplication_id"]: - sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"] - - dict_msg = {"default": sns_kwargs["Message"]} - - for p in protocols: - if module.params[p]: - if sns_kwargs["MessageStructure"] != "json": - module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".') - dict_msg[p.replace("_", "-")] = module.params[p] - - client = module.client("sns") - - topic = module.params["topic"] - if ":" in topic: - # Short names can't contain ':' so we'll assume this is the full ARN - sns_kwargs["TopicArn"] = topic - else: - sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic) - - if not sns_kwargs["TopicArn"]: - module.fail_json(msg=f"Could not find topic: {topic}") - - if sns_kwargs["MessageStructure"] == "json": - sns_kwargs["Message"] = json.dumps(dict_msg) - - try: - result = client.publish(**sns_kwargs) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to publish message") - - sns_result = dict(msg="OK", message_id=result["MessageId"]) - - if module.params["message_group_id"]: - sns_result["sequence_number"] = result["SequenceNumber"] - - module.exit_json(**sns_result) - - -if __name__ == "__main__": - main() diff --git a/sns_topic.py b/sns_topic.py deleted file mode 100644 index 22a2c82c216..00000000000 --- a/sns_topic.py +++ /dev/null @@ -1,717 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: sns_topic -short_description: Manages AWS SNS topics and subscriptions -version_added: 1.0.0 -description: - - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. -author: - - "Joel Thompson (@joelthompson)" - - "Fernando Jose Pando (@nand0p)" - - "Will Thames (@willthames)" -options: - name: - description: - - The name or ARN of the SNS topic to manage. - required: true - type: str - topic_type: - description: - - The type of topic that should be created. Either Standard for FIFO (first-in, first-out). - - Some regions, including GovCloud regions do not support FIFO topics. - Use a default value of 'standard' or omit the option if the region - does not support FIFO topics. - choices: ["standard", "fifo"] - default: 'standard' - type: str - version_added: 2.0.0 - state: - description: - - Whether to create or destroy an SNS topic. - default: present - choices: ["absent", "present"] - type: str - display_name: - description: - - Display name of the topic. - type: str - policy: - description: - - Policy to apply to the SNS topic. - - Policy body can be YAML or JSON. - - This is required for certain use cases for example with S3 bucket notifications. - type: dict - delivery_policy: - description: - - Delivery policy to apply to the SNS topic. - type: dict - suboptions: - http: - description: - - Delivery policy for HTTP(S) messages. - - See U(https://docs.aws.amazon.com/sns/latest/dg/sns-message-delivery-retries.html) - for more information. - type: dict - required: false - suboptions: - disableSubscriptionOverrides: - description: - - Applies this policy to all subscriptions, even if they have their own policies. - type: bool - required: false - defaultThrottlePolicy: - description: - - Throttle the rate of messages sent to subsriptions. - type: dict - suboptions: - maxReceivesPerSecond: - description: - - The maximum number of deliveries per second per subscription. - type: int - required: true - required: false - defaultHealthyRetryPolicy: - description: - - Retry policy for HTTP(S) messages. - type: dict - required: true - suboptions: - minDelayTarget: - description: - - The minimum delay for a retry. - type: int - required: true - maxDelayTarget: - description: - - The maximum delay for a retry. - type: int - required: true - numRetries: - description: - - The total number of retries. - type: int - required: true - numMaxDelayRetries: - description: - - The number of retries with the maximum delay between them. - type: int - required: true - numMinDelayRetries: - description: - - The number of retries with just the minimum delay between them. - type: int - required: true - numNoDelayRetries: - description: - - The number of retries to be performmed immediately. - type: int - required: true - backoffFunction: - description: - - The function for backoff between retries. - type: str - required: true - choices: ['arithmetic', 'exponential', 'geometric', 'linear'] - subscriptions: - description: - - List of subscriptions to apply to the topic. Note that AWS requires - subscriptions to be confirmed, so you will need to confirm any new - subscriptions. - suboptions: - endpoint: - description: Endpoint of subscription. - required: true - protocol: - description: Protocol of subscription. - required: true - attributes: - description: Attributes of subscription. Only supports RawMessageDelievery for SQS endpoints. - default: {} - version_added: "4.1.0" - type: list - elements: dict - default: [] - purge_subscriptions: - description: - - "Whether to purge any subscriptions not listed here. NOTE: AWS does not - allow you to purge any PendingConfirmation subscriptions, so if any - exist and would be purged, they are silently skipped. This means that - somebody could come back later and confirm the subscription. Sorry. - Blame Amazon." - default: true - type: bool - content_based_deduplication: - description: - - Whether to enable content-based deduplication for this topic. - - Ignored unless I(topic_type=fifo). - - Defaults to C(disabled). - choices: ["disabled", "enabled"] - type: str - version_added: 5.3.0 -notes: - - Support for I(tags) and I(purge_tags) was added in release 5.3.0. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - -- name: Create alarm SNS topic - community.aws.sns_topic: - name: "alarms" - state: present - display_name: "alarm SNS topic" - delivery_policy: - http: - defaultHealthyRetryPolicy: - minDelayTarget: 2 - maxDelayTarget: 4 - numRetries: 9 - numMaxDelayRetries: 5 - numMinDelayRetries: 2 - numNoDelayRetries: 2 - backoffFunction: "linear" - disableSubscriptionOverrides: True - defaultThrottlePolicy: - maxReceivesPerSecond: 10 - subscriptions: - - endpoint: "my_email_address@example.com" - protocol: "email" - - endpoint: "my_mobile_number" - protocol: "sms" - -- name: Create a topic permitting S3 bucket notifications - community.aws.sns_topic: - name: "S3Notifications" - state: present - display_name: "S3 notifications SNS topic" - policy: - Id: s3-topic-policy - Version: 2012-10-17 - Statement: - - Sid: Statement-id - Effect: Allow - Resource: "arn:aws:sns:*:*:S3Notifications" - Principal: - Service: s3.amazonaws.com - Action: sns:Publish - Condition: - ArnLike: - aws:SourceArn: "arn:aws:s3:*:*:SomeBucket" - -- name: Example deleting a topic - community.aws.sns_topic: - name: "ExampleTopic" - state: absent -""" - -RETURN = r""" -sns_arn: - description: The ARN of the topic you are modifying - type: str - returned: always - sample: "arn:aws:sns:us-east-2:123456789012:my_topic_name" -sns_topic: - description: Dict of sns topic details - type: complex - returned: always - contains: - attributes_set: - description: list of attributes set during this run - returned: always - type: list - sample: [] - check_mode: - description: whether check mode was on - returned: always - type: bool - sample: false - content_based_deduplication: - description: Whether or not content_based_deduplication was set - returned: always - type: str - sample: disabled - version_added: 5.3.0 - delivery_policy: - description: Delivery policy for the SNS topic - returned: when topic is owned by this AWS account - type: str - sample: > - {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0, - "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}} - display_name: - description: Display name for SNS topic - returned: when topic is owned by this AWS account - type: str - sample: My topic name - name: - description: Topic name - returned: always - type: str - sample: ansible-test-dummy-topic - owner: - description: AWS account that owns the topic - returned: when topic is owned by this AWS account - type: str - sample: '123456789012' - policy: - description: Policy for the SNS topic - returned: when topic is owned by this AWS account - type: str - sample: > - {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::123456789012:root"}, - "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} - state: - description: whether the topic is present or absent - returned: always - type: str - sample: present - subscriptions: - description: List of subscribers to the topic in this AWS account - returned: always - type: list - sample: [] - subscriptions_added: - description: List of subscribers added in this run - returned: always - type: list - sample: [] - subscriptions_confirmed: - description: Count of confirmed subscriptions - returned: when topic is owned by this AWS account - type: str - sample: '0' - subscriptions_deleted: - description: Count of deleted subscriptions - returned: when topic is owned by this AWS account - type: str - sample: '0' - subscriptions_existing: - description: List of existing subscriptions - returned: always - type: list - sample: [] - subscriptions_new: - description: List of new subscriptions - returned: always - type: list - sample: [] - subscriptions_pending: - description: Count of pending subscriptions - returned: when topic is owned by this AWS account - type: str - sample: '0' - subscriptions_purge: - description: Whether or not purge_subscriptions was set - returned: always - type: bool - sample: true - topic_arn: - description: ARN of the SNS topic (equivalent to sns_arn) - returned: when topic is owned by this AWS account - type: str - sample: arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic - topic_created: - description: Whether the topic was created - returned: always - type: bool - sample: false - topic_deleted: - description: Whether the topic was deleted - returned: always - type: bool - sample: false -""" - -import json - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.sns import list_topics -from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup -from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies -from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions -from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint -from ansible_collections.community.aws.plugins.module_utils.sns import get_info -from ansible_collections.community.aws.plugins.module_utils.sns import update_tags - - -class SnsTopicManager(object): - """Handles SNS Topic creation and destruction""" - - def __init__( - self, - module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode, - ): - self.connection = module.client("sns") - self.module = module - self.name = name - self.topic_type = topic_type - self.state = state - self.display_name = display_name - self.policy = policy - self.delivery_policy = scrub_none_parameters(delivery_policy) if delivery_policy else None - self.subscriptions = subscriptions - self.subscriptions_existing = [] - self.subscriptions_deleted = [] - self.subscriptions_added = [] - self.subscriptions_attributes_set = [] - self.desired_subscription_attributes = dict() - self.purge_subscriptions = purge_subscriptions - self.content_based_deduplication = content_based_deduplication - self.check_mode = check_mode - self.topic_created = False - self.topic_deleted = False - self.topic_arn = None - self.attributes_set = [] - self.tags = tags - self.purge_tags = purge_tags - - def _create_topic(self): - attributes = {} - tags = [] - - # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) - # don't support the attribute being set, even to False. - if self.topic_type == "fifo": - attributes["FifoTopic"] = "true" - if not self.name.endswith(".fifo"): - self.name = self.name + ".fifo" - - if self.tags: - tags = ansible_dict_to_boto3_tag_list(self.tags) - - if not self.check_mode: - try: - response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg=f"Couldn't create topic {self.name}") - self.topic_arn = response["TopicArn"] - return True - - def _set_topic_attrs(self): - changed = False - try: - topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg=f"Couldn't get topic attributes for topic {self.topic_arn}") - - if self.display_name and self.display_name != topic_attributes["DisplayName"]: - changed = True - self.attributes_set.append("display_name") - if not self.check_mode: - try: - self.connection.set_topic_attributes( - TopicArn=self.topic_arn, AttributeName="DisplayName", AttributeValue=self.display_name - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't set display name") - - if self.policy and compare_policies(self.policy, json.loads(topic_attributes["Policy"])): - changed = True - self.attributes_set.append("policy") - if not self.check_mode: - try: - self.connection.set_topic_attributes( - TopicArn=self.topic_arn, AttributeName="Policy", AttributeValue=json.dumps(self.policy) - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't set topic policy") - - # Set content-based deduplication attribute. Ignore if topic_type is not fifo. - if ( - "FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true" - ) and self.content_based_deduplication: - enabled = "true" if self.content_based_deduplication in "enabled" else "false" - if enabled != topic_attributes["ContentBasedDeduplication"]: - changed = True - self.attributes_set.append("content_based_deduplication") - if not self.check_mode: - try: - self.connection.set_topic_attributes( - TopicArn=self.topic_arn, AttributeName="ContentBasedDeduplication", AttributeValue=enabled - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication") - - if self.delivery_policy and ( - "DeliveryPolicy" not in topic_attributes - or compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes["DeliveryPolicy"])) - ): - changed = True - self.attributes_set.append("delivery_policy") - if not self.check_mode: - try: - self.connection.set_topic_attributes( - TopicArn=self.topic_arn, - AttributeName="DeliveryPolicy", - AttributeValue=json.dumps(self.delivery_policy), - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") - return changed - - def _set_topic_subs(self): - changed = False - subscriptions_existing_list = set() - desired_subscriptions = [ - (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) for sub in self.subscriptions - ] - - for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub["Protocol"], sub["Endpoint"]) - subscriptions_existing_list.add(sub_key) - if ( - self.purge_subscriptions - and sub_key not in desired_subscriptions - and sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted") - ): - changed = True - self.subscriptions_deleted.append(sub_key) - if not self.check_mode: - try: - self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") - - for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list): - changed = True - self.subscriptions_added.append((protocol, endpoint)) - if not self.check_mode: - try: - self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg=f"Couldn't subscribe to topic {self.topic_arn}") - return changed - - def _init_desired_subscription_attributes(self): - for sub in self.subscriptions: - sub_key = (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) - tmp_dict = sub.get("attributes", {}) - # aws sdk expects values to be strings - # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes - for k, v in tmp_dict.items(): - tmp_dict[k] = str(v) - - self.desired_subscription_attributes[sub_key] = tmp_dict - - def _set_topic_subs_attributes(self): - changed = False - for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): - sub_key = (sub["Protocol"], sub["Endpoint"]) - sub_arn = sub["SubscriptionArn"] - if not self.desired_subscription_attributes.get(sub_key): - # subscription attributes aren't defined in desired, skipping - continue - - try: - sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)[ - "Attributes" - ] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, f"Couldn't get subscription attributes for subscription {sub_arn}") - - raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery") - if raw_message is not None and "RawMessageDelivery" in sub_current_attributes: - if sub_current_attributes["RawMessageDelivery"].lower() != raw_message.lower(): - changed = True - if not self.check_mode: - try: - self.connection.set_subscription_attributes( - SubscriptionArn=sub_arn, AttributeName="RawMessageDelivery", AttributeValue=raw_message - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute") - - return changed - - def _delete_subscriptions(self): - # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days - # https://forums.aws.amazon.com/thread.jspa?threadID=85993 - subscriptions = list_topic_subscriptions(self.connection, self.module, self.topic_arn) - if not subscriptions: - return False - for sub in subscriptions: - if sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted"): - self.subscriptions_deleted.append(sub["SubscriptionArn"]) - if not self.check_mode: - try: - self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic") - return True - - def _delete_topic(self): - self.topic_deleted = True - if not self.check_mode: - try: - self.connection.delete_topic(TopicArn=self.topic_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg=f"Couldn't delete topic {self.topic_arn}") - return True - - def _name_is_arn(self): - return bool(parse_aws_arn(self.name)) - - def ensure_ok(self): - changed = False - self.populate_topic_arn() - if not self.topic_arn: - changed = self._create_topic() - if self.topic_arn in list_topics(self.connection, self.module): - changed |= self._set_topic_attrs() - elif self.display_name or self.policy or self.delivery_policy: - self.module.fail_json( - msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account" - ) - changed |= self._set_topic_subs() - self._init_desired_subscription_attributes() - if self.topic_arn in list_topics(self.connection, self.module): - changed |= self._set_topic_subs_attributes() - elif any(self.desired_subscription_attributes.values()): - self.module.fail_json(msg="Cannot set subscription attributes for SNS topics not owned by this account") - # Check tagging - changed |= update_tags(self.connection, self.module, self.topic_arn) - - return changed - - def ensure_gone(self): - changed = False - self.populate_topic_arn() - if self.topic_arn: - if self.topic_arn not in list_topics(self.connection, self.module): - self.module.fail_json( - msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe" - ) - changed = self._delete_subscriptions() - changed |= self._delete_topic() - return changed - - def populate_topic_arn(self): - if self._name_is_arn(): - self.topic_arn = self.name - return - - name = self.name - if self.topic_type == "fifo" and not name.endswith(".fifo"): - name += ".fifo" - self.topic_arn = topic_arn_lookup(self.connection, self.module, name) - - -def main(): - # We're kinda stuck with CamelCase here, it would be nice to switch to - # snake_case, but we'd need to purge out the alias entries - http_retry_args = dict( - minDelayTarget=dict(type="int", required=True), - maxDelayTarget=dict(type="int", required=True), - numRetries=dict(type="int", required=True), - numMaxDelayRetries=dict(type="int", required=True), - numMinDelayRetries=dict(type="int", required=True), - numNoDelayRetries=dict(type="int", required=True), - backoffFunction=dict(type="str", required=True, choices=["arithmetic", "exponential", "geometric", "linear"]), - ) - http_delivery_args = dict( - defaultHealthyRetryPolicy=dict(type="dict", required=True, options=http_retry_args), - disableSubscriptionOverrides=dict(type="bool", required=False), - defaultThrottlePolicy=dict( - type="dict", - required=False, - options=dict( - maxReceivesPerSecond=dict(type="int", required=True), - ), - ), - ) - delivery_args = dict( - http=dict(type="dict", required=False, options=http_delivery_args), - ) - - argument_spec = dict( - name=dict(required=True), - topic_type=dict(type="str", default="standard", choices=["standard", "fifo"]), - state=dict(default="present", choices=["present", "absent"]), - display_name=dict(), - policy=dict(type="dict"), - delivery_policy=dict(type="dict", options=delivery_args), - subscriptions=dict(default=[], type="list", elements="dict"), - purge_subscriptions=dict(type="bool", default=True), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - content_based_deduplication=dict(choices=["enabled", "disabled"]), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - name = module.params.get("name") - topic_type = module.params.get("topic_type") - state = module.params.get("state") - display_name = module.params.get("display_name") - policy = module.params.get("policy") - delivery_policy = module.params.get("delivery_policy") - subscriptions = module.params.get("subscriptions") - purge_subscriptions = module.params.get("purge_subscriptions") - content_based_deduplication = module.params.get("content_based_deduplication") - check_mode = module.check_mode - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - - sns_topic = SnsTopicManager( - module, - name, - topic_type, - state, - display_name, - policy, - delivery_policy, - subscriptions, - purge_subscriptions, - tags, - purge_tags, - content_based_deduplication, - check_mode, - ) - - if state == "present": - changed = sns_topic.ensure_ok() - elif state == "absent": - changed = sns_topic.ensure_gone() - - sns_facts = dict( - changed=changed, - sns_arn=sns_topic.topic_arn, - sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn), - ) - - module.exit_json(**sns_facts) - - -if __name__ == "__main__": - main() diff --git a/sns_topic_info.py b/sns_topic_info.py deleted file mode 100644 index 51ec8372eac..00000000000 --- a/sns_topic_info.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: sns_topic_info -short_description: sns_topic_info module -version_added: 3.2.0 -description: -- The M(community.aws.sns_topic_info) module allows to get all AWS SNS topics or properties of a specific AWS SNS topic. -author: -- "Alina Buzachis (@alinabuzachis)" -options: - topic_arn: - description: The ARN of the AWS SNS topic for which you wish to find subscriptions or list attributes. - required: false - type: str -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: list all the topics - community.aws.sns_topic_info: - register: sns_topic_list - -- name: get info on specific topic - community.aws.sns_topic_info: - topic_arn: "{{ sns_arn }}" - register: sns_topic_info -""" - -RETURN = r""" -result: - description: - - The result contaning the details of one or all AWS SNS topics. - returned: success - type: list - contains: - sns_arn: - description: The ARN of the topic. - type: str - returned: always - sample: "arn:aws:sns:us-east-2:123456789012:my_topic_name" - sns_topic: - description: Dict of sns topic details. - type: complex - returned: always - contains: - content_based_deduplication: - description: Whether or not content_based_deduplication was set - returned: always - type: str - sample: "true" - delivery_policy: - description: Delivery policy for the SNS topic. - returned: when topic is owned by this AWS account - type: str - sample: > - {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0, - "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}} - display_name: - description: Display name for SNS topic. - returned: when topic is owned by this AWS account - type: str - sample: My topic name - owner: - description: AWS account that owns the topic. - returned: when topic is owned by this AWS account - type: str - sample: '123456789012' - policy: - description: Policy for the SNS topic. - returned: when topic is owned by this AWS account - type: str - sample: > - {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::123456789012:root"}, - "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]} - subscriptions: - description: List of subscribers to the topic in this AWS account. - returned: always - type: list - sample: [] - subscriptions_added: - description: List of subscribers added in this run. - returned: always - type: list - sample: [] - subscriptions_confirmed: - description: Count of confirmed subscriptions. - returned: when topic is owned by this AWS account - type: str - sample: '0' - subscriptions_deleted: - description: Count of deleted subscriptions. - returned: when topic is owned by this AWS account - type: str - sample: '0' - subscriptions_existing: - description: List of existing subscriptions. - returned: always - type: list - sample: [] - subscriptions_new: - description: List of new subscriptions. - returned: always - type: list - sample: [] - subscriptions_pending: - description: Count of pending subscriptions. - returned: when topic is owned by this AWS account - type: str - sample: '0' - subscriptions_purge: - description: Whether or not purge_subscriptions was set. - returned: always - type: bool - sample: true - topic_arn: - description: ARN of the SNS topic (equivalent to sns_arn). - returned: when topic is owned by this AWS account - type: str - sample: arn:aws:sns:us-east-2:123456789012:ansible-test-dummy-topic - topic_type: - description: The type of topic. - type: str - sample: "standard" -""" - - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.community.aws.plugins.module_utils.sns import list_topics -from ansible_collections.community.aws.plugins.module_utils.sns import get_info - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - argument_spec = dict( - topic_arn=dict(type="str", required=False), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - topic_arn = module.params.get("topic_arn") - - try: - connection = module.client("sns", retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS.") - - if topic_arn: - results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn)) - else: - results = list_topics(connection, module) - - module.exit_json(result=results) - - -if __name__ == "__main__": - main() diff --git a/sqs_queue.py b/sqs_queue.py deleted file mode 100644 index ad3ce68a7ce..00000000000 --- a/sqs_queue.py +++ /dev/null @@ -1,520 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: sqs_queue -version_added: 1.0.0 -short_description: Creates or deletes AWS SQS queues -description: - - Create or delete AWS SQS queues. - - Update attributes on existing queues. -author: - - Alan Loi (@loia) - - Fernando Jose Pando (@nand0p) - - Nadir Lloret (@nadirollo) - - Dennis Podkovyrin (@sbj-ss) -options: - state: - description: - - Create or delete the queue. - choices: ['present', 'absent'] - default: 'present' - type: str - name: - description: - - Name of the queue. - required: true - type: str - queue_type: - description: - - Standard or FIFO queue. - - I(queue_type) can only be set at queue creation and will otherwise be - ignored. - choices: ['standard', 'fifo'] - default: 'standard' - type: str - deduplication_scope: - description: - - Deduplication scope for FIFO queues. - - C(messageGroup) is required for high throughput FIFO. - - Defaults to C(queue) on creation. - choices: ['queue', 'messageGroup'] - type: str - version_added: 5.3.0 - fifo_throughput_limit: - description: - - Throughput limit for FIFO queues. - - C(perMessageGroupId) is required for high throughput FIFO. - - Defaults to C(perQueue) on creation. - choices: ['perQueue', 'perMessageGroupId'] - type: str - version_added: 5.3.0 - visibility_timeout: - description: - - The default visibility timeout in seconds. - aliases: [default_visibility_timeout] - type: int - message_retention_period: - description: - - The message retention period in seconds. - type: int - maximum_message_size: - description: - - The maximum message size in bytes. - type: int - delay_seconds: - description: - - The delivery delay in seconds. - aliases: [delivery_delay] - type: int - receive_message_wait_time_seconds: - description: - - The receive message wait time in seconds. - aliases: [receive_message_wait_time] - type: int - policy: - description: - - Policy to attach to the queue. - - Policy body can be YAML or JSON. - - This is required for certain use cases for example with S3 bucket notifications. - type: dict - redrive_policy: - description: - - JSON dict with the redrive_policy (see example). - type: dict - kms_master_key_id: - description: - - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. - - Specifying a valid I(kms_master_key_id) will enable encryption automatically. - type: str - kms_data_key_reuse_period_seconds: - description: - - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. - aliases: [kms_data_key_reuse_period] - type: int - content_based_deduplication: - type: bool - description: - - Enables content-based deduplication. Used for FIFOs only. - - Defaults to C(false). -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -RETURN = r""" -content_based_deduplication: - description: Enables content-based deduplication. Used for FIFOs only. - type: bool - returned: always - sample: True -fifo_throughput_limit: - description: Which throughput limit strategy is applied. - type: str - returned: always - sample: perQueue -deduplication_scope: - description: The deduplication setting. - type: str - returned: always - sample: messageGroup -visibility_timeout: - description: The default visibility timeout in seconds. - type: int - returned: always - sample: 30 -delay_seconds: - description: The delivery delay in seconds. - type: int - returned: always - sample: 0 -kms_master_key_id: - description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. - type: str - returned: if value exists - sample: alias/MyAlias -kms_data_key_reuse_period_seconds: - description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. - type: int - returned: always - sample: 300 -maximum_message_size: - description: The maximum message size in bytes. - type: int - returned: always - sample: 262144 -message_retention_period: - description: The message retention period in seconds. - type: int - returned: always - sample: 345600 -name: - description: Name of the SQS Queue - type: str - returned: always - sample: "queuename-987d2de0" -queue_arn: - description: The queue's Amazon resource name (ARN). - type: str - returned: on success - sample: 'arn:aws:sqs:us-east-1:123456789012:queuename-987d2de0' -queue_url: - description: URL to access the queue - type: str - returned: on success - sample: 'https://queue.amazonaws.com/123456789012/MyQueue' -receive_message_wait_time_seconds: - description: The receive message wait time in seconds. - type: int - returned: always - sample: 0 -region: - description: Region that the queue was created within - type: str - returned: always - sample: 'us-east-1' -tags: - description: List of queue tags - type: dict - returned: always - sample: '{"Env": "prod"}' -""" - -EXAMPLES = r""" -- name: Create SQS queue with redrive policy - community.aws.sqs_queue: - name: my-queue - region: ap-southeast-2 - default_visibility_timeout: 120 - message_retention_period: 86400 - maximum_message_size: 1024 - delivery_delay: 30 - receive_message_wait_time: 20 - policy: "{{ json_dict }}" - redrive_policy: - maxReceiveCount: 5 - deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue - -- name: Drop redrive policy - community.aws.sqs_queue: - name: my-queue - region: ap-southeast-2 - redrive_policy: {} - -- name: Create FIFO queue - community.aws.sqs_queue: - name: fifo-queue - region: ap-southeast-2 - queue_type: fifo - content_based_deduplication: true - -- name: Tag queue - community.aws.sqs_queue: - name: fifo-queue - region: ap-southeast-2 - tags: - example: SomeValue - -- name: Configure Encryption, automatically uses a new data key every hour - community.aws.sqs_queue: - name: fifo-queue - region: ap-southeast-2 - kms_master_key_id: alias/MyQueueKey - kms_data_key_reuse_period_seconds: 3600 - -- name: Example queue allowing s3 bucket notifications - sqs_queue: - name: "S3Notifications" - default_visibility_timeout: 120 - message_retention_period: 86400 - maximum_message_size: 1024 - delivery_delay: 30 - receive_message_wait_time: 20 - policy: - Version: 2012-10-17 - Id: s3-queue-policy - Statement: - - Sid: allowNotifications - Effect: Allow - Principal: - Service: s3.amazonaws.com - Action: - - SQS:SendMessage - Resource: "arn:aws:sqs:*:*:S3Notifications" - Condition: - ArnLike: - aws:SourceArn: "arn:aws:s3:*:*:SomeBucket" - -- name: Delete SQS queue - community.aws.sqs_queue: - name: my-queue - region: ap-southeast-2 - state: absent -""" - -import json - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_queue_name(module, is_fifo=False): - name = module.params.get("name") - if not is_fifo or name.endswith(".fifo"): - return name - return name + ".fifo" - - -# NonExistentQueue is explicitly expected when a queue doesn't exist -@AWSRetry.jittered_backoff() -def get_queue_url(client, name): - try: - return client.get_queue_url(QueueName=name)["QueueUrl"] - except is_boto3_error_code("AWS.SimpleQueueService.NonExistentQueue"): - return None - - -def describe_queue(client, queue_url): - """ - Description a queue in snake format - """ - attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)["Attributes"] - description = dict(attributes) - description.pop("Policy", None) - description.pop("RedrivePolicy", None) - description = camel_dict_to_snake_dict(description) - description["policy"] = attributes.get("Policy", None) - description["redrive_policy"] = attributes.get("RedrivePolicy", None) - - # Boto3 returns everything as a string, convert them back to integers/dicts if - # that's what we expected. - for key, value in description.items(): - if value is None: - continue - - if key in ["policy", "redrive_policy"]: - policy = json.loads(value) - description[key] = policy - continue - - if key == "content_based_deduplication": - try: - description[key] = bool(value) - except (TypeError, ValueError): - pass - - try: - if value == str(int(value)): - description[key] = int(value) - except (TypeError, ValueError): - pass - - return description - - -def create_or_update_sqs_queue(client, module): - is_fifo = module.params.get("queue_type") == "fifo" - kms_master_key_id = module.params.get("kms_master_key_id") - queue_name = get_queue_name(module, is_fifo) - result = dict( - name=queue_name, - region=module.params.get("region"), - changed=False, - ) - - queue_url = get_queue_url(client, queue_name) - result["queue_url"] = queue_url - - # Create a dict() to hold attributes that will be passed to boto3 - create_attributes = {} - - if not queue_url: - if is_fifo: - create_attributes["FifoQueue"] = "True" - if kms_master_key_id: - create_attributes["KmsMasterKeyId"] = kms_master_key_id - result["changed"] = True - if module.check_mode: - return result - queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)["QueueUrl"] - - changed, arn = update_sqs_queue(module, client, queue_url) - result["changed"] |= changed - result["queue_arn"] = arn - - changed, tags = update_tags(client, queue_url, module) - result["changed"] |= changed - result["tags"] = tags - - result.update(describe_queue(client, queue_url)) - - COMPATABILITY_KEYS = dict( - delay_seconds="delivery_delay", - receive_message_wait_time_seconds="receive_message_wait_time", - visibility_timeout="default_visibility_timeout", - kms_data_key_reuse_period_seconds="kms_data_key_reuse_period", - ) - for key in list(result.keys()): - # The return values changed between boto and boto3, add the old keys too - # for backwards compatibility - return_name = COMPATABILITY_KEYS.get(key) - if return_name: - result[return_name] = result.get(key) - - return result - - -def update_sqs_queue(module, client, queue_url): - check_mode = module.check_mode - changed = False - existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)[ - "Attributes" - ] - new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) - attributes_to_set = dict() - - # Boto3 SQS deals with policies as strings, we want to deal with them as - # dicts - if module.params.get("policy") is not None: - policy = module.params.get("policy") - current_value = existing_attributes.get("Policy", "{}") - current_policy = json.loads(current_value) - if compare_policies(current_policy, policy): - attributes_to_set["Policy"] = json.dumps(policy) - changed = True - if module.params.get("redrive_policy") is not None: - policy = module.params.get("redrive_policy") - current_value = existing_attributes.get("RedrivePolicy", "{}") - current_policy = json.loads(current_value) - if compare_policies(current_policy, policy): - attributes_to_set["RedrivePolicy"] = json.dumps(policy) - changed = True - - for attribute, value in existing_attributes.items(): - # We handle these as a special case because they're IAM policies - if attribute in ["Policy", "RedrivePolicy"]: - continue - - if attribute not in new_attributes.keys(): - continue - - if new_attributes.get(attribute) is None: - continue - - new_value = new_attributes[attribute] - - if isinstance(new_value, bool): - new_value = str(new_value).lower() - value = str(value).lower() - - if str(new_value) == str(value): - continue - - # Boto3 expects strings - attributes_to_set[attribute] = str(new_value) - changed = True - - if changed and not check_mode: - client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) - - return changed, existing_attributes.get("queue_arn") - - -def delete_sqs_queue(client, module): - is_fifo = module.params.get("queue_type") == "fifo" - queue_name = get_queue_name(module, is_fifo) - result = dict(name=queue_name, region=module.params.get("region"), changed=False) - - queue_url = get_queue_url(client, queue_name) - if not queue_url: - return result - - result["changed"] = bool(queue_url) - if not module.check_mode: - AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url) - - return result - - -def update_tags(client, queue_url, module): - new_tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - if new_tags is None: - return False, {} - - try: - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)["Tags"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e: - existing_tags = {} - - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) - - if not module.check_mode: - if tags_to_remove: - client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True) - if tags_to_add: - client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add) - existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get("Tags", {}) - else: - existing_tags = new_tags - - changed = bool(tags_to_remove) or bool(tags_to_add) - return changed, existing_tags - - -def main(): - argument_spec = dict( - state=dict(type="str", default="present", choices=["present", "absent"]), - name=dict(type="str", required=True), - queue_type=dict(type="str", default="standard", choices=["standard", "fifo"]), - delay_seconds=dict(type="int", aliases=["delivery_delay"]), - maximum_message_size=dict(type="int"), - message_retention_period=dict(type="int"), - policy=dict(type="dict"), - receive_message_wait_time_seconds=dict(type="int", aliases=["receive_message_wait_time"]), - redrive_policy=dict(type="dict"), - visibility_timeout=dict(type="int", aliases=["default_visibility_timeout"]), - kms_master_key_id=dict(type="str"), - fifo_throughput_limit=dict(type="str", choices=["perQueue", "perMessageGroupId"]), - deduplication_scope=dict(type="str", choices=["queue", "messageGroup"]), - kms_data_key_reuse_period_seconds=dict(type="int", aliases=["kms_data_key_reuse_period"], no_log=False), - content_based_deduplication=dict(type="bool"), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - state = module.params.get("state") - retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["AWS.SimpleQueueService.NonExistentQueue"]) - try: - client = module.client("sqs", retry_decorator=retry_decorator) - if state == "present": - result = create_or_update_sqs_queue(client, module) - elif state == "absent": - result = delete_sqs_queue(client, module) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to control sqs queue") - else: - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/ssm_inventory_info.py b/ssm_inventory_info.py deleted file mode 100644 index c5b84909738..00000000000 --- a/ssm_inventory_info.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Contributors to the Ansible project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = """ -module: ssm_inventory_info -version_added: 6.0.0 -short_description: Get SSM inventory information for EC2 instance - -description: - - Gather SSM inventory for EC2 instance configured with SSM. - -author: 'Aubin Bikouo (@abikouo)' - -options: - instance_id: - description: - - EC2 instance id. - required: true - type: str - -extends_documentation_fragment: -- amazon.aws.common.modules -- amazon.aws.region.modules -- amazon.aws.boto3 -""" - -EXAMPLES = """ -- name: Retrieve SSM inventory info for instance id 'i-012345678902' - community.aws.ssm_inventory_info: - instance_id: 'i-012345678902' -""" - - -RETURN = """ -ssm_inventory: - returned: on success - description: > - SSM inventory information. - type: dict - sample: { - 'agent_type': 'amazon-ssm-agent', - 'agent_version': '3.2.582.0', - 'computer_name': 'ip-172-31-44-166.ec2.internal', - 'instance_id': 'i-039eb9b1f55934ab6', - 'instance_status': 'Active', - 'ip_address': '172.31.44.166', - 'platform_name': 'Fedora Linux', - 'platform_type': 'Linux', - 'platform_version': '37', - 'resource_type': 'EC2Instance' - } -""" - - -try: - import botocore -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class SsmInventoryInfoFailure(Exception): - def __init__(self, exc, msg): - self.exc = exc - self.msg = msg - super().__init__(self) - - -def get_ssm_inventory(connection, filters): - try: - return connection.get_inventory(Filters=filters) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise SsmInventoryInfoFailure(exc=e, msg="get_ssm_inventory() failed.") - - -def execute_module(module, connection): - instance_id = module.params.get("instance_id") - try: - filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": [instance_id]}] - - response = get_ssm_inventory(connection, filters) - entities = response.get("Entities", []) - ssm_inventory = {} - if entities: - content = entities[0].get("Data", {}).get("AWS:InstanceInformation", {}).get("Content", []) - if content: - ssm_inventory = camel_dict_to_snake_dict(content[0]) - module.exit_json(changed=False, ssm_inventory=ssm_inventory) - except SsmInventoryInfoFailure as e: - module.fail_json_aws(exception=e.exc, msg=e.msg) - - -def main(): - argument_spec = dict( - instance_id=dict(required=True, type="str"), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - connection = module.client("ssm") - execute_module(module, connection) - - -if __name__ == "__main__": - main() diff --git a/ssm_parameter.py b/ssm_parameter.py deleted file mode 100644 index aefafca009c..00000000000 --- a/ssm_parameter.py +++ /dev/null @@ -1,594 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: ssm_parameter -version_added: 1.0.0 -short_description: Manage key-value pairs in AWS Systems Manager Parameter Store -description: - - Manage key-value pairs in AWS Systems Manager (SSM) Parameter Store. - - Prior to release 5.0.0 this module was called C(community.aws.aws_ssm_parameter_store). - The usage did not change. -options: - name: - description: - - Parameter key name. - required: true - type: str - description: - description: - - Parameter key description. - required: false - type: str - value: - description: - - Parameter value. - required: false - type: str - state: - description: - - Creates or modifies an existing parameter. - - Deletes a parameter. - required: false - choices: ['present', 'absent'] - default: present - type: str - string_type: - description: - - Parameter String type. - required: false - choices: ['String', 'StringList', 'SecureString'] - default: String - type: str - aliases: ['type'] - decryption: - description: - - Work with SecureString type to get plain text secrets - type: bool - required: false - default: true - key_id: - description: - - AWS KMS key to decrypt the secrets. - - The default key (C(alias/aws/ssm)) is automatically generated the first - time it's requested. - required: false - default: alias/aws/ssm - type: str - overwrite_value: - description: - - Option to overwrite an existing value if it already exists. - required: false - choices: ['never', 'changed', 'always'] - default: changed - type: str - tier: - description: - - Parameter store tier type. - required: false - choices: ['Standard', 'Advanced', 'Intelligent-Tiering'] - default: Standard - type: str - version_added: 1.5.0 -seealso: - - ref: amazon.aws.aws_ssm lookup - description: The documentation for the C(amazon.aws.aws_ssm) lookup plugin. - -author: - - "Davinder Pal (@116davinder) " - - "Nathan Webster (@nathanwebsterdotme)" - - "Bill Wang (@ozbillwang) " - - "Michael De La Rue (@mikedlr)" - -notes: - - Support for I(tags) and I(purge_tags) was added in release 5.3.0. - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create or update key/value pair in AWS SSM parameter store - community.aws.ssm_parameter: - name: "Hello" - description: "This is your first key" - value: "World" - -- name: Delete the key - community.aws.ssm_parameter: - name: "Hello" - state: absent - -- name: Create or update secure key/value pair with default KMS key (aws/ssm) - community.aws.ssm_parameter: - name: "Hello" - description: "This is your first key" - string_type: "SecureString" - value: "World" - -- name: Create or update secure key/value pair with nominated KMS key - community.aws.ssm_parameter: - name: "Hello" - description: "This is your first key" - string_type: "SecureString" - key_id: "alias/demo" - value: "World" - -- name: Always update a parameter store value and create a new version - community.aws.ssm_parameter: - name: "overwrite_example" - description: "This example will always overwrite the value" - string_type: "String" - value: "Test1234" - overwrite_value: "always" - -- name: Create or update key/value pair in AWS SSM parameter store with tier - community.aws.ssm_parameter: - name: "Hello" - description: "This is your first key" - value: "World" - tier: "Advanced" - -- name: recommend to use with aws_ssm lookup plugin - ansible.builtin.debug: - msg: "{{ lookup('amazon.aws.aws_ssm', 'Hello') }}" - -- name: Create or update key/value pair in AWS SSM parameter store w/ tags - community.aws.ssm_parameter: - name: "Hello" - description: "This is your first key" - value: "World" - tags: - Environment: "dev" - Version: "1.0" - Confidentiality: "low" - Tag With Space: "foo bar" - -- name: Add or update a tag on an existing parameter w/o removing existing tags - community.aws.ssm_parameter: - name: "Hello" - purge_tags: false - tags: - Contact: "person1" - -- name: Delete all tags on an existing parameter - community.aws.ssm_parameter: - name: "Hello" - tags: {} -""" - -RETURN = r""" -parameter_metadata: - type: dict - description: - - Information about a parameter. - - Does not include the value of the parameter as this can be sensitive - information. - returned: success - contains: - data_type: - type: str - description: Parameter Data type. - example: text - returned: success - description: - type: str - description: Parameter key description. - example: This is your first key - returned: success - last_modified_date: - type: str - description: Time and date that the parameter was last modified. - example: '2022-06-20T09:56:58.573000+00:00' - returned: success - last_modified_user: - type: str - description: ARN of the last user to modify the parameter. - example: 'arn:aws:sts::123456789012:assumed-role/example-role/session=example' - returned: success - name: - type: str - description: Parameter key name. - example: Hello - returned: success - policies: - type: list - description: A list of policies associated with a parameter. - elements: dict - returned: success - contains: - policy_text: - type: str - description: The JSON text of the policy. - returned: success - policy_type: - type: str - description: The type of policy. - example: Expiration - returned: success - policy_status: - type: str - description: The status of the policy. - example: Pending - returned: success - tier: - type: str - description: Parameter tier. - example: Standard - returned: success - type: - type: str - description: Parameter type - example: String - returned: success - version: - type: int - description: Parameter version number - example: 3 - returned: success - tags: - description: A dictionary representing the tags associated with the parameter. - type: dict - returned: when the parameter has tags - example: {'MyTagName': 'Some Value'} - version_added: 5.3.0 -""" - -import time - -try: - import botocore - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class ParameterWaiterFactory(BaseWaiterFactory): - def __init__(self, module): - client = module.client("ssm") - super(ParameterWaiterFactory, self).__init__(module, client) - - @property - def _waiter_model_data(self): - data = super(ParameterWaiterFactory, self)._waiter_model_data - ssm_data = dict( - parameter_exists=dict( - operation="DescribeParameters", - delay=1, - maxAttempts=20, - acceptors=[ - dict(state="retry", matcher="error", expected="ParameterNotFound"), - dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) == `0`"), - dict(state="success", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), - ], - ), - parameter_deleted=dict( - operation="DescribeParameters", - delay=1, - maxAttempts=20, - acceptors=[ - dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"), - dict(state="success", matcher="path", expected=True, argument="length(Parameters[]) == `0`"), - dict(state="success", matcher="error", expected="ParameterNotFound"), - ], - ), - ) - data.update(ssm_data) - return data - - -def _wait_exists(client, module, name): - if module.check_mode: - return - wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter("parameter_exists") - try: - waiter.wait( - ParameterFilters=[{"Key": "Name", "Values": [name]}], - ) - except botocore.exceptions.WaiterError: - module.warn("Timeout waiting for parameter to exist") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe parameter while waiting for creation") - - -def _wait_updated(client, module, name, version): - # Unfortunately we can't filter on the Version, as such we need something custom. - if module.check_mode: - return - for x in range(1, 10): - try: - parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}]) - if parameter.get("Version", 0) > version: - return - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update") - time.sleep(1) - - -def _wait_deleted(client, module, name): - if module.check_mode: - return - wf = ParameterWaiterFactory(module) - waiter = wf.get_waiter("parameter_deleted") - try: - waiter.wait( - ParameterFilters=[{"Key": "Name", "Values": [name]}], - ) - except botocore.exceptions.WaiterError: - module.warn("Timeout waiting for parameter to exist") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe parameter while waiting for deletion") - - -def tag_parameter(client, module, parameter_name, tags): - try: - return client.add_tags_to_resource( - aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, Tags=tags - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to add tag(s) to parameter") - - -def untag_parameter(client, module, parameter_name, tag_keys): - try: - return client.remove_tags_from_resource( - aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, TagKeys=tag_keys - ) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter") - - -def get_parameter_tags(client, module, parameter_name): - try: - tags = client.list_tags_for_resource(aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name)[ - "TagList" - ] - tags_dict = boto3_tag_list_to_ansible_dict(tags) - return tags_dict - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to retrieve parameter tags") - - -def update_parameter_tags(client, module, parameter_name, supplied_tags): - changed = False - response = {} - - if supplied_tags is None: - return False, response - - current_tags = get_parameter_tags(client, module, parameter_name) - tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, module.params.get("purge_tags")) - - if tags_to_add: - if module.check_mode: - return True, response - response = tag_parameter(client, module, parameter_name, ansible_dict_to_boto3_tag_list(tags_to_add)) - changed = True - if tags_to_remove: - if module.check_mode: - return True, response - response = untag_parameter(client, module, parameter_name, tags_to_remove) - changed = True - - return changed, response - - -def update_parameter(client, module, **args): - changed = False - response = {} - if module.check_mode: - return True, response - - try: - response = client.put_parameter(aws_retry=True, **args) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as exc: - module.fail_json_aws(exc, msg="setting parameter") - - return changed, response - - -@AWSRetry.jittered_backoff() -def describe_parameter(client, module, **args): - paginator = client.get_paginator("describe_parameters") - existing_parameter = paginator.paginate(**args).build_full_result() - - if not existing_parameter["Parameters"]: - return None - - tags_dict = get_parameter_tags(client, module, module.params.get("name")) - existing_parameter["Parameters"][0]["tags"] = tags_dict - - return existing_parameter["Parameters"][0] - - -def create_update_parameter(client, module): - changed = False - existing_parameter = None - response = {} - - args = dict(Name=module.params.get("name"), Type=module.params.get("string_type"), Tier=module.params.get("tier")) - - if module.params.get("overwrite_value") in ("always", "changed"): - args.update(Overwrite=True) - else: - args.update(Overwrite=False) - - if module.params.get("value") is not None: - args.update(Value=module.params.get("value")) - - if module.params.get("description"): - args.update(Description=module.params.get("description")) - - if module.params.get("string_type") == "SecureString": - args.update(KeyId=module.params.get("key_id")) - - try: - existing_parameter = client.get_parameter(aws_retry=True, Name=args["Name"], WithDecryption=True) - except botocore.exceptions.ClientError: - pass - except botocore.exceptions.BotoCoreError as e: - module.fail_json_aws(e, msg="fetching parameter") - - if existing_parameter: - original_version = existing_parameter["Parameter"]["Version"] - if "Value" not in args: - args["Value"] = existing_parameter["Parameter"]["Value"] - - if module.params.get("overwrite_value") == "always": - (changed, response) = update_parameter(client, module, **args) - - elif module.params.get("overwrite_value") == "changed": - if existing_parameter["Parameter"]["Type"] != args["Type"]: - (changed, response) = update_parameter(client, module, **args) - - elif existing_parameter["Parameter"]["Value"] != args["Value"]: - (changed, response) = update_parameter(client, module, **args) - - elif args.get("Description"): - # Description field not available from get_parameter function so get it from describe_parameters - try: - describe_existing_parameter = describe_parameter( - client, module, ParameterFilters=[{"Key": "Name", "Values": [args["Name"]]}] - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="getting description value") - - if describe_existing_parameter.get("Description") != args["Description"]: - (changed, response) = update_parameter(client, module, **args) - if changed: - _wait_updated(client, module, module.params.get("name"), original_version) - - # Handle tag updates for existing parameters - if module.params.get("overwrite_value") != "never": - tags_changed, tags_response = update_parameter_tags( - client, module, existing_parameter["Parameter"]["Name"], module.params.get("tags") - ) - - changed = changed or tags_changed - - if tags_response: - response["tag_updates"] = tags_response - - else: - # Add tags in initial creation request - if module.params.get("tags"): - args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get("tags"))) - # Overwrite=True conflicts with tags and is not needed for new param - args.update(Overwrite=False) - - (changed, response) = update_parameter(client, module, **args) - _wait_exists(client, module, module.params.get("name")) - - return changed, response - - -def delete_parameter(client, module): - response = {} - - try: - existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get("name"), WithDecryption=True) - except is_boto3_error_code("ParameterNotFound"): - return False, {} - except botocore.exceptions.ClientError: - # If we can't describe the parameter we may still be able to delete it - existing_parameter = True - except botocore.exceptions.BotoCoreError as e: - module.fail_json_aws(e, msg="setting parameter") - - if not existing_parameter: - return False, {} - if module.check_mode: - return True, {} - - try: - response = client.delete_parameter(aws_retry=True, Name=module.params.get("name")) - except is_boto3_error_code("ParameterNotFound"): - return False, {} - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="deleting parameter") - - _wait_deleted(client, module, module.params.get("name")) - - return True, response - - -def setup_client(module): - retry_decorator = AWSRetry.jittered_backoff() - connection = module.client("ssm", retry_decorator=retry_decorator) - return connection - - -def setup_module_object(): - argument_spec = dict( - name=dict(required=True), - description=dict(), - value=dict(required=False, no_log=True), - state=dict(default="present", choices=["present", "absent"]), - string_type=dict(default="String", choices=["String", "StringList", "SecureString"], aliases=["type"]), - decryption=dict(default=True, type="bool"), - key_id=dict(default="alias/aws/ssm"), - overwrite_value=dict(default="changed", choices=["never", "changed", "always"]), - tier=dict(default="Standard", choices=["Standard", "Advanced", "Intelligent-Tiering"]), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - ) - - return AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - -def main(): - module = setup_module_object() - state = module.params.get("state") - client = setup_client(module) - - invocations = { - "present": create_update_parameter, - "absent": delete_parameter, - } - (changed, response) = invocations[state](client, module) - - result = {"response": response} - - try: - parameter_metadata = describe_parameter( - client, module, ParameterFilters=[{"Key": "Name", "Values": [module.params.get("name")]}] - ) - except is_boto3_error_code("ParameterNotFound"): - return False, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="to describe parameter") - if parameter_metadata: - result["parameter_metadata"] = camel_dict_to_snake_dict(parameter_metadata, ignore_list=["tags"]) - - module.exit_json(changed=changed, **result) - - -if __name__ == "__main__": - main() diff --git a/stepfunctions_state_machine.py b/stepfunctions_state_machine.py deleted file mode 100644 index a2558c8085c..00000000000 --- a/stepfunctions_state_machine.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019, Tom De Keyser (@tdekeyser) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: stepfunctions_state_machine -version_added: 1.0.0 -short_description: Manage AWS Step Functions state machines -description: - - Create, update and delete state machines in AWS Step Functions. - - Calling the module in C(state=present) for an existing AWS Step Functions state machine - will attempt to update the state machine definition, IAM Role, or tags with the provided data. - - Prior to release 5.0.0 this module was called C(community.aws.aws_step_functions_state_machine). - The usage did not change. - -options: - name: - description: - - Name of the state machine. - required: true - type: str - definition: - description: - - The Amazon States Language definition of the state machine. See - U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more - information on the Amazon States Language. - - Required when I(state=present). - type: json - role_arn: - description: - - The ARN of the IAM Role that will be used by the state machine for its executions. - - Required when I(state=present). - type: str - state: - description: - - Desired state for the state machine. - default: present - choices: [ present, absent ] - type: str - -author: - - Tom De Keyser (@tdekeyser) - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -# Create a new AWS Step Functions state machine -- name: Setup HelloWorld state machine - community.aws.stepfunctions_state_machine: - name: "HelloWorldStateMachine" - definition: "{{ lookup('file','state_machine.json') }}" - role_arn: arn:aws:iam::123456789012:role/service-role/invokeLambdaStepFunctionsRole - tags: - project: helloWorld - -# Update an existing state machine -- name: Change IAM Role and tags of HelloWorld state machine - community.aws.stepfunctions_state_machine: - name: HelloWorldStateMachine - definition: "{{ lookup('file','state_machine.json') }}" - role_arn: arn:aws:iam::123456789012:role/service-role/anotherStepFunctionsRole - tags: - otherTag: aDifferentTag - -# Remove the AWS Step Functions state machine -- name: Delete HelloWorld state machine - community.aws.stepfunctions_state_machine: - name: HelloWorldStateMachine - state: absent -""" - -RETURN = r""" -state_machine_arn: - description: ARN of the AWS Step Functions state machine - type: str - returned: always -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def manage_state_machine(state, sfn_client, module): - state_machine_arn = get_state_machine_arn(sfn_client, module) - - if state == "present": - if state_machine_arn is None: - create(sfn_client, module) - else: - update(state_machine_arn, sfn_client, module) - elif state == "absent": - if state_machine_arn is not None: - remove(state_machine_arn, sfn_client, module) - - check_mode(module, msg="State is up-to-date.") - module.exit_json(changed=False, state_machine_arn=state_machine_arn) - - -def create(sfn_client, module): - check_mode(module, msg="State machine would be created.", changed=True) - - tags = module.params.get("tags") - sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name="key", tag_value_key_name="value") if tags else [] - - state_machine = sfn_client.create_state_machine( - name=module.params.get("name"), - definition=module.params.get("definition"), - roleArn=module.params.get("role_arn"), - tags=sfn_tags, - ) - module.exit_json(changed=True, state_machine_arn=state_machine.get("stateMachineArn")) - - -def remove(state_machine_arn, sfn_client, module): - check_mode(module, msg=f"State machine would be deleted: {state_machine_arn}", changed=True) - - sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) - module.exit_json(changed=True, state_machine_arn=state_machine_arn) - - -def update(state_machine_arn, sfn_client, module): - tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) - - if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: - check_mode(module, msg=f"State machine would be updated: {state_machine_arn}", changed=True) - - sfn_client.update_state_machine( - stateMachineArn=state_machine_arn, - definition=module.params.get("definition"), - roleArn=module.params.get("role_arn"), - ) - sfn_client.untag_resource(resourceArn=state_machine_arn, tagKeys=tags_to_remove) - sfn_client.tag_resource( - resourceArn=state_machine_arn, - tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name="key", tag_value_key_name="value"), - ) - - module.exit_json(changed=True, state_machine_arn=state_machine_arn) - - -def compare_tags(state_machine_arn, sfn_client, module): - new_tags = module.params.get("tags") - current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get("tags") - return compare_aws_tags( - boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get("purge_tags") - ) - - -def params_changed(state_machine_arn, sfn_client, module): - """ - Check whether the state machine definition or IAM Role ARN is different - from the existing state machine parameters. - """ - current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn) - return current.get("definition") != module.params.get("definition") or current.get("roleArn") != module.params.get( - "role_arn" - ) - - -def get_state_machine_arn(sfn_client, module): - """ - Finds the state machine ARN based on the name parameter. Returns None if - there is no state machine with this name. - """ - target_name = module.params.get("name") - all_state_machines = sfn_client.list_state_machines(aws_retry=True).get("stateMachines") - - for state_machine in all_state_machines: - if state_machine.get("name") == target_name: - return state_machine.get("stateMachineArn") - - -def check_mode(module, msg="", changed=False): - if module.check_mode: - module.exit_json(changed=changed, output=msg) - - -def main(): - module_args = dict( - name=dict(type="str", required=True), - definition=dict(type="json"), - role_arn=dict(type="str"), - state=dict(choices=["present", "absent"], default="present"), - tags=dict(default=None, type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - ) - module = AnsibleAWSModule( - argument_spec=module_args, - required_if=[("state", "present", ["role_arn"]), ("state", "present", ["definition"])], - supports_check_mode=True, - ) - - sfn_client = module.client("stepfunctions", retry_decorator=AWSRetry.jittered_backoff(retries=5)) - state = module.params.get("state") - - try: - manage_state_machine(state, sfn_client, module) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to manage state machine") - - -if __name__ == "__main__": - main() diff --git a/stepfunctions_state_machine_execution.py b/stepfunctions_state_machine_execution.py deleted file mode 100644 index b7a9f7efba8..00000000000 --- a/stepfunctions_state_machine_execution.py +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019, Prasad Katti (@prasadkatti) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: stepfunctions_state_machine_execution -version_added: 1.0.0 - -short_description: Start or stop execution of an AWS Step Functions state machine - -description: - - Start or stop execution of a state machine in AWS Step Functions. - - Prior to release 5.0.0 this module was called C(community.aws.aws_step_functions_state_machine_execution). - The usage did not change. - -options: - action: - description: Desired action (C(start) or C(stop)) for a state machine execution. - default: start - choices: [ start, stop ] - type: str - name: - description: Name of the execution. - type: str - execution_input: - description: The JSON input data for the execution. - type: json - default: {} - state_machine_arn: - description: The ARN of the state machine that will be executed. - type: str - execution_arn: - description: The ARN of the execution you wish to stop. - type: str - cause: - description: A detailed explanation of the cause for stopping the execution. - type: str - default: '' - error: - description: The error code of the failure to pass in when stopping the execution. - type: str - default: '' - -author: - - Prasad Katti (@prasadkatti) - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Start an execution of a state machine - community.aws.stepfunctions_state_machine_execution: - name: an_execution_name - execution_input: '{ "IsHelloWorldExample": true }' - state_machine_arn: "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine" - -- name: Stop an execution of a state machine - community.aws.stepfunctions_state_machine_execution: - action: stop - execution_arn: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" - cause: "cause of task failure" - error: "error code of the failure" -""" - -RETURN = r""" -execution_arn: - description: ARN of the AWS Step Functions state machine execution. - type: str - returned: if action == start and changed == True - sample: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8" -start_date: - description: The date the execution is started. - type: str - returned: if action == start and changed == True - sample: "2019-11-02T22:39:49.071000-07:00" -stop_date: - description: The date the execution is stopped. - type: str - returned: if action == stop - sample: "2019-11-02T22:39:49.071000-07:00" -""" - - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def start_execution(module, sfn_client): - """ - start_execution uses execution name to determine if a previous execution already exists. - If an execution by the provided name exists, call client.start_execution will not be called. - """ - - state_machine_arn = module.params.get("state_machine_arn") - name = module.params.get("name") - execution_input = module.params.get("execution_input") - - try: - # list_executions is eventually consistent - page_iterators = sfn_client.get_paginator("list_executions").paginate(stateMachineArn=state_machine_arn) - - for execution in page_iterators.build_full_result()["executions"]: - if name == execution["name"]: - check_mode(module, msg="State machine execution already exists.", changed=False) - module.exit_json(changed=False) - - check_mode(module, msg="State machine execution would be started.", changed=True) - res_execution = sfn_client.start_execution(stateMachineArn=state_machine_arn, name=name, input=execution_input) - except is_boto3_error_code("ExecutionAlreadyExists"): - # this will never be executed anymore - module.exit_json(changed=False) - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to start execution.") - - module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution)) - - -def stop_execution(module, sfn_client): - cause = module.params.get("cause") - error = module.params.get("error") - execution_arn = module.params.get("execution_arn") - - try: - # describe_execution is eventually consistent - execution_status = sfn_client.describe_execution(executionArn=execution_arn)["status"] - if execution_status != "RUNNING": - check_mode(module, msg="State machine execution is not running.", changed=False) - module.exit_json(changed=False) - - check_mode(module, msg="State machine execution would be stopped.", changed=True) - res = sfn_client.stop_execution(executionArn=execution_arn, cause=cause, error=error) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to stop execution.") - - module.exit_json(changed=True, **camel_dict_to_snake_dict(res)) - - -def check_mode(module, msg="", changed=False): - if module.check_mode: - module.exit_json(changed=changed, output=msg) - - -def main(): - module_args = dict( - action=dict(choices=["start", "stop"], default="start"), - name=dict(type="str"), - execution_input=dict(type="json", default={}), - state_machine_arn=dict(type="str"), - cause=dict(type="str", default=""), - error=dict(type="str", default=""), - execution_arn=dict(type="str"), - ) - module = AnsibleAWSModule( - argument_spec=module_args, - required_if=[ - ("action", "start", ["name", "state_machine_arn"]), - ("action", "stop", ["execution_arn"]), - ], - supports_check_mode=True, - ) - - sfn_client = module.client("stepfunctions") - - action = module.params.get("action") - if action == "start": - start_execution(module, sfn_client) - else: - stop_execution(module, sfn_client) - - -if __name__ == "__main__": - main() diff --git a/storagegateway_info.py b/storagegateway_info.py deleted file mode 100644 index 55b7c4685d4..00000000000 --- a/storagegateway_info.py +++ /dev/null @@ -1,359 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Loic BLOT (@nerzhul) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# This module is sponsored by E.T.A.I. (www.etai.fr) - -DOCUMENTATION = r""" ---- -module: storagegateway_info -version_added: 1.0.0 -short_description: Fetch AWS Storage Gateway information -description: - - Fetch AWS Storage Gateway information - - Prior to release 5.0.0 this module was called C(community.aws.aws_sgw_info). - The usage did not change. -author: - - Loic Blot (@nerzhul) -options: - gather_local_disks: - description: - - Gather local disks attached to the storage gateway. - type: bool - required: false - default: true - gather_tapes: - description: - - Gather tape information for storage gateways in tape mode. - type: bool - required: false - default: true - gather_file_shares: - description: - - Gather file share information for storage gateways in s3 mode. - type: bool - required: false - default: true - gather_volumes: - description: - - Gather volume information for storage gateways in iSCSI (cached & stored) modes. - type: bool - required: false - default: true -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -gateways: - description: list of gateway objects - returned: always - type: complex - contains: - gateway_arn: - description: "Storage Gateway ARN" - returned: always - type: str - sample: "arn:aws:storagegateway:eu-west-1:123456789012:gateway/sgw-9999F888" - gateway_id: - description: "Storage Gateway ID" - returned: always - type: str - sample: "sgw-9999F888" - gateway_name: - description: "Storage Gateway friendly name" - returned: always - type: str - sample: "my-sgw-01" - gateway_operational_state: - description: "Storage Gateway operational state" - returned: always - type: str - sample: "ACTIVE" - gateway_type: - description: "Storage Gateway type" - returned: always - type: str - sample: "FILE_S3" - file_shares: - description: "Storage gateway file shares" - returned: when gateway_type == "FILE_S3" - type: complex - contains: - file_share_arn: - description: "File share ARN" - returned: always - type: str - sample: "arn:aws:storagegateway:eu-west-1:123456789012:share/share-AF999C88" - file_share_id: - description: "File share ID" - returned: always - type: str - sample: "share-AF999C88" - file_share_status: - description: "File share status" - returned: always - type: str - sample: "AVAILABLE" - tapes: - description: "Storage Gateway tapes" - returned: when gateway_type == "VTL" - type: complex - contains: - tape_arn: - description: "Tape ARN" - returned: always - type: str - sample: "arn:aws:storagegateway:eu-west-1:123456789012:tape/tape-AF999C88" - tape_barcode: - description: "Tape ARN" - returned: always - type: str - sample: "tape-AF999C88" - tape_size_in_bytes: - description: "Tape ARN" - returned: always - type: int - sample: 555887569 - tape_status: - description: "Tape ARN" - returned: always - type: str - sample: "AVAILABLE" - local_disks: - description: "Storage gateway local disks" - returned: always - type: complex - contains: - disk_allocation_type: - description: "Disk allocation type" - returned: always - type: str - sample: "CACHE STORAGE" - disk_id: - description: "Disk ID on the system" - returned: always - type: str - sample: "pci-0000:00:1f.0" - disk_node: - description: "Disk parent block device" - returned: always - type: str - sample: "/dev/sdb" - disk_path: - description: "Disk path used for the cache" - returned: always - type: str - sample: "/dev/nvme1n1" - disk_size_in_bytes: - description: "Disk size in bytes" - returned: always - type: int - sample: 107374182400 - disk_status: - description: "Disk status" - returned: always - type: str - sample: "present" -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -- name: "Get AWS storage gateway information" - community.aws.storagegateway_info: - -- name: "Get AWS storage gateway information for region eu-west-3" - community.aws.storagegateway_info: - region: eu-west-3 -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class SGWInformationManager(object): - def __init__(self, client, module): - self.client = client - self.module = module - self.name = self.module.params.get("name") - - def fetch(self): - gateways = self.list_gateways() - for gateway in gateways: - if self.module.params.get("gather_local_disks"): - self.list_local_disks(gateway) - # File share gateway - if gateway["gateway_type"] == "FILE_S3" and self.module.params.get("gather_file_shares"): - self.list_gateway_file_shares(gateway) - # Volume tape gateway - elif gateway["gateway_type"] == "VTL" and self.module.params.get("gather_tapes"): - self.list_gateway_vtl(gateway) - # iSCSI gateway - elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get("gather_volumes"): - self.list_gateway_volumes(gateway) - - self.module.exit_json(gateways=gateways) - - """ - List all storage gateways for the AWS endpoint. - """ - - def list_gateways(self): - try: - paginator = self.client.get_paginator("list_gateways") - response = paginator.paginate( - PaginationConfig={ - "PageSize": 100, - } - ).build_full_result() - - gateways = [] - for gw in response["Gateways"]: - gateways.append(camel_dict_to_snake_dict(gw)) - - return gateways - - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't list storage gateways") - - """ - Read file share objects from AWS API response. - Drop the gateway_arn attribute from response, as it will be duplicate with parent object. - """ - - @staticmethod - def _read_gateway_fileshare_response(fileshares, aws_reponse): - for share in aws_reponse["FileShareInfoList"]: - share_obj = camel_dict_to_snake_dict(share) - if "gateway_arn" in share_obj: - del share_obj["gateway_arn"] - fileshares.append(share_obj) - - return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None - - """ - List file shares attached to AWS storage gateway when in S3 mode. - """ - - def list_gateway_file_shares(self, gateway): - try: - response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Limit=100) - - gateway["file_shares"] = [] - marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) - - while marker is not None: - response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Marker=marker, Limit=100) - - marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't list gateway file shares") - - """ - List storage gateway local disks - """ - - def list_local_disks(self, gateway): - try: - gateway["local_disks"] = [ - camel_dict_to_snake_dict(disk) - for disk in self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])["Disks"] - ] - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") - - """ - Read tape objects from AWS API response. - Drop the gateway_arn attribute from response, as it will be duplicate with parent object. - """ - - @staticmethod - def _read_gateway_tape_response(tapes, aws_response): - for tape in aws_response["TapeInfos"]: - tape_obj = camel_dict_to_snake_dict(tape) - if "gateway_arn" in tape_obj: - del tape_obj["gateway_arn"] - tapes.append(tape_obj) - - return aws_response["Marker"] if "Marker" in aws_response else None - - """ - List VTL & VTS attached to AWS storage gateway in VTL mode - """ - - def list_gateway_vtl(self, gateway): - try: - response = self.client.list_tapes(Limit=100) - - gateway["tapes"] = [] - marker = self._read_gateway_tape_response(gateway["tapes"], response) - - while marker is not None: - response = self.client.list_tapes(Marker=marker, Limit=100) - - marker = self._read_gateway_tape_response(gateway["tapes"], response) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes") - - """ - List volumes attached to AWS storage gateway in CACHED or STORAGE mode - """ - - def list_gateway_volumes(self, gateway): - try: - paginator = self.client.get_paginator("list_volumes") - response = paginator.paginate( - GatewayARN=gateway["gateway_arn"], - PaginationConfig={ - "PageSize": 100, - }, - ).build_full_result() - - gateway["volumes"] = [] - for volume in response["VolumeInfos"]: - volume_obj = camel_dict_to_snake_dict(volume) - if "gateway_arn" in volume_obj: - del volume_obj["gateway_arn"] - if "gateway_id" in volume_obj: - del volume_obj["gateway_id"] - - gateway["volumes"].append(volume_obj) - except (BotoCoreError, ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes") - - -def main(): - argument_spec = dict( - gather_local_disks=dict(type="bool", default=True), - gather_tapes=dict(type="bool", default=True), - gather_file_shares=dict(type="bool", default=True), - gather_volumes=dict(type="bool", default=True), - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = module.client("storagegateway") - - if client is None: # this should never happen - module.fail_json(msg="Unknown error, failed to create storagegateway client, no information available.") - - SGWInformationManager(client, module).fetch() - - -if __name__ == "__main__": - main() diff --git a/sts_assume_role.py b/sts_assume_role.py deleted file mode 100644 index 4d934c2d5cd..00000000000 --- a/sts_assume_role.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: sts_assume_role -version_added: 1.0.0 -short_description: Assume a role using AWS Security Token Service and obtain temporary credentials -description: - - Assume a role using AWS Security Token Service and obtain temporary credentials. -author: - - Boris Ekelchik (@bekelchik) - - Marek Piatek (@piontas) -options: - role_arn: - description: - - The Amazon Resource Name (ARN) of the role that the caller is - assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). - required: true - type: str - role_session_name: - description: - - Name of the role's session - will be used by CloudTrail. - required: true - type: str - policy: - description: - - Supplemental policy to use in addition to assumed role's policies. - type: str - duration_seconds: - description: - - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours). - - The max depends on the IAM role's sessions duration setting. - - By default, the value is set to 3600 seconds. - type: int - external_id: - description: - - A unique identifier that is used by third parties to assume a role in their customers' accounts. - type: str - mfa_serial_number: - description: - - The identification number of the MFA device that is associated with the user who is making the AssumeRole call. - type: str - mfa_token: - description: - - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. - type: str -notes: - - In order to use the assumed role in a following playbook task you must pass the I(access_key), - I(secret_key) and I(session_token) parameters to modules that should use the assumed credentials. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -sts_creds: - description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token - returned: always - type: dict - sample: - access_key: XXXXXXXXXXXXXXXXXXXX - expiration: '2017-11-11T11:11:11+00:00' - secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -sts_user: - description: The Amazon Resource Name (ARN) and the assumed role ID - returned: always - type: dict - sample: - assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob - arn: ARO123EXAMPLE123:Bob -changed: - description: True if obtaining the credentials succeeds - type: bool - returned: always -""" - -EXAMPLES = r""" -# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) -- community.aws.sts_assume_role: - access_key: AKIA1EXAMPLE1EXAMPLE - secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE - role_arn: "arn:aws:iam::123456789012:role/someRole" - role_session_name: "someRoleSession" - register: assumed_role - -# Use the assumed role above to tag an instance in account 123456789012 -- amazon.aws.ec2_tag: - access_key: "{{ assumed_role.sts_creds.access_key }}" - secret_key: "{{ assumed_role.sts_creds.secret_key }}" - session_token: "{{ assumed_role.sts_creds.session_token }}" - resource: i-xyzxyz01 - state: present - tags: - MyNewTag: value - -""" - -try: - from botocore.exceptions import ClientError - from botocore.exceptions import ParamValidationError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def _parse_response(response): - credentials = response.get("Credentials", {}) - user = response.get("AssumedRoleUser", {}) - - sts_cred = { - "access_key": credentials.get("AccessKeyId"), - "secret_key": credentials.get("SecretAccessKey"), - "session_token": credentials.get("SessionToken"), - "expiration": credentials.get("Expiration"), - } - sts_user = camel_dict_to_snake_dict(user) - return sts_cred, sts_user - - -def assume_role_policy(connection, module): - params = { - "RoleArn": module.params.get("role_arn"), - "RoleSessionName": module.params.get("role_session_name"), - "Policy": module.params.get("policy"), - "DurationSeconds": module.params.get("duration_seconds"), - "ExternalId": module.params.get("external_id"), - "SerialNumber": module.params.get("mfa_serial_number"), - "TokenCode": module.params.get("mfa_token"), - } - changed = False - - kwargs = dict((k, v) for k, v in params.items() if v is not None) - - try: - response = connection.assume_role(**kwargs) - changed = True - except (ClientError, ParamValidationError) as e: - module.fail_json_aws(e) - - sts_cred, sts_user = _parse_response(response) - module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user) - - -def main(): - argument_spec = dict( - role_arn=dict(required=True), - role_session_name=dict(required=True), - duration_seconds=dict(required=False, default=None, type="int"), - external_id=dict(required=False, default=None), - policy=dict(required=False, default=None), - mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None, no_log=True), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - connection = module.client("sts") - - assume_role_policy(connection, module) - - -if __name__ == "__main__": - main() diff --git a/sts_session_token.py b/sts_session_token.py deleted file mode 100644 index 044a6367b58..00000000000 --- a/sts_session_token.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: sts_session_token -version_added: 1.0.0 -short_description: obtain a session token from the AWS Security Token Service -description: - - Obtain a session token from the AWS Security Token Service. -author: - - Victor Costan (@pwnall) -options: - duration_seconds: - description: - - The duration, in seconds, of the session token. - See U(https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters) - for acceptable and default values. - type: int - mfa_serial_number: - description: - - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call. - type: str - mfa_token: - description: - - The value provided by the MFA device, if the trust policy of the user requires MFA. - type: str -notes: - - In order to use the session token in a following playbook task you must pass the I(access_key), - I(secret_key) and I(session_token) parameters to modules that should use the session credentials. -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -RETURN = r""" -sts_creds: - description: The Credentials object returned by the AWS Security Token Service - returned: always - type: list - sample: - access_key: ASIAXXXXXXXXXXXXXXXX - expiration: "2016-04-08T11:59:47+00:00" - secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -changed: - description: True if obtaining the credentials succeeds - type: bool - returned: always -""" - - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) -- name: Get a session token - community.aws.sts_session_token: - access_key: AKIA1EXAMPLE1EXAMPLE - secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE - duration_seconds: 3600 - register: session_credentials - -- name: Use the session token obtained above to tag an instance in account 123456789012 - amazon.aws.ec2_tag: - access_key: "{{ session_credentials.sts_creds.access_key }}" - secret_key: "{{ session_credentials.sts_creds.secret_key }}" - session_token: "{{ session_credentials.sts_creds.session_token }}" - resource: i-xyzxyz01 - state: present - tags: - MyNewTag: value - -""" - -try: - import botocore - from botocore.exceptions import ClientError -except ImportError: - pass # Handled by AnsibleAWSModule - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def normalize_credentials(credentials): - access_key = credentials.get("AccessKeyId", None) - secret_key = credentials.get("SecretAccessKey", None) - session_token = credentials.get("SessionToken", None) - expiration = credentials.get("Expiration", None) - return { - "access_key": access_key, - "secret_key": secret_key, - "session_token": session_token, - "expiration": expiration, - } - - -def get_session_token(connection, module): - duration_seconds = module.params.get("duration_seconds") - mfa_serial_number = module.params.get("mfa_serial_number") - mfa_token = module.params.get("mfa_token") - changed = False - - args = {} - if duration_seconds is not None: - args["DurationSeconds"] = duration_seconds - if mfa_serial_number is not None: - args["SerialNumber"] = mfa_serial_number - if mfa_token is not None: - args["TokenCode"] = mfa_token - - try: - response = connection.get_session_token(**args) - changed = True - except ClientError as e: - module.fail_json(msg=e) - - credentials = normalize_credentials(response.get("Credentials", {})) - module.exit_json(changed=changed, sts_creds=credentials) - - -def main(): - argument_spec = dict( - duration_seconds=dict(required=False, default=None, type="int"), - mfa_serial_number=dict(required=False, default=None), - mfa_token=dict(required=False, default=None, no_log=True), - ) - - module = AnsibleAWSModule(argument_spec=argument_spec) - - try: - connection = module.client("sts") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to connect to AWS") - - get_session_token(connection, module) - - -if __name__ == "__main__": - main() diff --git a/waf_condition.py b/waf_condition.py deleted file mode 100644 index b1baae378e8..00000000000 --- a/waf_condition.py +++ /dev/null @@ -1,770 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Will Thames -# Copyright (c) 2015 Mike Mochan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: waf_condition -short_description: Create and delete WAF Conditions -version_added: 1.0.0 -description: - - Read the AWS documentation for WAF - U(https://aws.amazon.com/documentation/waf/) - - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_condition). - The usage did not change. - -author: - - Will Thames (@willthames) - - Mike Mochan (@mmochan) - -options: - name: - description: Name of the Web Application Firewall condition to manage. - required: true - type: str - type: - description: The type of matching to perform. - choices: - - byte - - geo - - ip - - regex - - size - - sql - - xss - type: str - required: true - filters: - description: - - A list of the filters against which to match. - - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string). - - For I(type=geo), the only valid key is I(country). - - For I(type=ip), the only valid key is I(ip_address). - - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern). - - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size). - - For I(type=sql), valid keys are I(field_to_match) and I(transformation). - - For I(type=xss), valid keys are I(field_to_match) and I(transformation). - - Required when I(state=present). - type: list - elements: dict - suboptions: - field_to_match: - description: - - The field upon which to perform the match. - - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). - type: str - choices: ['uri', 'query_string', 'header', 'method', 'body'] - position: - description: - - Where in the field the match needs to occur. - - Only valid when I(type=byte). - type: str - choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word'] - header: - description: - - Which specific header should be matched. - - Required when I(field_to_match=header). - - Valid when I(type=byte). - type: str - transformation: - description: - - A transform to apply on the field prior to performing the match. - - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss). - type: str - choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode'] - country: - description: - - Value of geo constraint (typically a two letter country code). - - The only valid key when I(type=geo). - type: str - ip_address: - description: - - An IP Address or CIDR to match. - - The only valid key when I(type=ip). - type: str - regex_pattern: - description: - - A dict describing the regular expressions used to perform the match. - - Only valid when I(type=regex). - type: dict - suboptions: - name: - description: A name to describe the set of patterns. - type: str - regex_strings: - description: A list of regular expressions to match. - type: list - elements: str - comparison: - description: - - What type of comparison to perform. - - Only valid key when I(type=size). - type: str - choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT'] - size: - description: - - The size of the field (in bytes). - - Only valid key when I(type=size). - type: int - target_string: - description: - - The string to search for. - - May be up to 50 bytes. - - Valid when I(type=byte). - type: str - purge_filters: - description: - - Whether to remove existing filters from a condition if not passed in I(filters). - default: false - type: bool - waf_regional: - description: Whether to use C(waf-regional) module. - default: false - required: false - type: bool - state: - description: Whether the condition should be C(present) or C(absent). - choices: - - present - - absent - default: present - type: str - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - - name: create WAF byte condition - community.aws.waf_condition: - name: my_byte_condition - filters: - - field_to_match: header - position: STARTS_WITH - target_string: Hello - header: Content-type - type: byte - - - name: create WAF geo condition - community.aws.waf_condition: - name: my_geo_condition - filters: - - country: US - - country: AU - - country: AT - type: geo - - - name: create IP address condition - community.aws.waf_condition: - name: "{{ resource_prefix }}_ip_condition" - filters: - - ip_address: "10.0.0.0/8" - - ip_address: "192.168.0.0/24" - type: ip - - - name: create WAF regex condition - community.aws.waf_condition: - name: my_regex_condition - filters: - - field_to_match: query_string - regex_pattern: - name: greetings - regex_strings: - - '[hH]ello' - - '^Hi there' - - '.*Good Day to You' - type: regex - - - name: create WAF size condition - community.aws.waf_condition: - name: my_size_condition - filters: - - field_to_match: query_string - size: 300 - comparison: GT - type: size - - - name: create WAF sql injection condition - community.aws.waf_condition: - name: my_sql_condition - filters: - - field_to_match: query_string - transformation: url_decode - type: sql - - - name: create WAF xss condition - community.aws.waf_condition: - name: my_xss_condition - filters: - - field_to_match: query_string - transformation: url_decode - type: xss - -""" - -RETURN = r""" -condition: - description: Condition returned by operation. - returned: always - type: complex - contains: - condition_id: - description: Type-agnostic ID for the condition. - returned: when state is present - type: str - sample: dd74b1ff-8c06-4a4f-897a-6b23605de413 - byte_match_set_id: - description: ID for byte match set. - returned: always - type: str - sample: c4882c96-837b-44a2-a762-4ea87dbf812b - byte_match_tuples: - description: List of byte match tuples. - returned: always - type: complex - contains: - field_to_match: - description: Field to match. - returned: always - type: complex - contains: - data: - description: Which specific header (if type is header). - type: str - sample: content-type - type: - description: Type of field - type: str - sample: HEADER - positional_constraint: - description: Position in the field to match. - type: str - sample: STARTS_WITH - target_string: - description: String to look for. - type: str - sample: Hello - text_transformation: - description: Transformation to apply to the field before matching. - type: str - sample: NONE - geo_match_constraints: - description: List of geographical constraints. - returned: when type is geo and state is present - type: complex - contains: - type: - description: Type of geo constraint. - type: str - sample: Country - value: - description: Value of geo constraint (typically a country code). - type: str - sample: AT - geo_match_set_id: - description: ID of the geo match set. - returned: when type is geo and state is present - type: str - sample: dd74b1ff-8c06-4a4f-897a-6b23605de413 - ip_set_descriptors: - description: list of IP address filters - returned: when type is ip and state is present - type: complex - contains: - type: - description: Type of IP address (IPV4 or IPV6). - returned: always - type: str - sample: IPV4 - value: - description: IP address. - returned: always - type: str - sample: 10.0.0.0/8 - ip_set_id: - description: ID of condition. - returned: when type is ip and state is present - type: str - sample: 78ad334a-3535-4036-85e6-8e11e745217b - name: - description: Name of condition. - returned: when state is present - type: str - sample: my_waf_condition - regex_match_set_id: - description: ID of the regex match set. - returned: when type is regex and state is present - type: str - sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c - regex_match_tuples: - description: List of regex matches. - returned: when type is regex and state is present - type: complex - contains: - field_to_match: - description: Field on which the regex match is applied. - type: complex - contains: - type: - description: The field name. - returned: when type is regex and state is present - type: str - sample: QUERY_STRING - regex_pattern_set_id: - description: ID of the regex pattern. - type: str - sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e - text_transformation: - description: transformation applied to the text before matching - type: str - sample: NONE - size_constraint_set_id: - description: ID of the size constraint set. - returned: when type is size and state is present - type: str - sample: de84b4b3-578b-447e-a9a0-0db35c995656 - size_constraints: - description: List of size constraints to apply. - returned: when type is size and state is present - type: complex - contains: - comparison_operator: - description: Comparison operator to apply. - type: str - sample: GT - field_to_match: - description: Field on which the size constraint is applied. - type: complex - contains: - type: - description: Field name. - type: str - sample: QUERY_STRING - size: - description: Size to compare against the field. - type: int - sample: 300 - text_transformation: - description: Transformation applied to the text before matching. - type: str - sample: NONE - sql_injection_match_set_id: - description: ID of the SQL injection match set. - returned: when type is sql and state is present - type: str - sample: de84b4b3-578b-447e-a9a0-0db35c995656 - sql_injection_match_tuples: - description: List of SQL injection match sets. - returned: when type is sql and state is present - type: complex - contains: - field_to_match: - description: Field on which the SQL injection match is applied. - type: complex - contains: - type: - description: Field name. - type: str - sample: QUERY_STRING - text_transformation: - description: Transformation applied to the text before matching. - type: str - sample: URL_DECODE - xss_match_set_id: - description: ID of the XSS match set. - returned: when type is xss and state is present - type: str - sample: de84b4b3-578b-447e-a9a0-0db35c995656 - xss_match_tuples: - description: List of XSS match sets. - returned: when type is xss and state is present - type: complex - contains: - field_to_match: - description: Field on which the XSS match is applied. - type: complex - contains: - type: - description: Field name - type: str - sample: QUERY_STRING - text_transformation: - description: transformation applied to the text before matching. - type: str - sample: URL_DECODE -""" - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP -from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -class Condition(object): - def __init__(self, client, module): - self.client = client - self.module = module - self.type = module.params["type"] - self.method_suffix = MATCH_LOOKUP[self.type]["method"] - self.conditionset = MATCH_LOOKUP[self.type]["conditionset"] - self.conditionsets = MATCH_LOOKUP[self.type]["conditionset"] + "s" - self.conditionsetid = MATCH_LOOKUP[self.type]["conditionset"] + "Id" - self.conditiontuple = MATCH_LOOKUP[self.type]["conditiontuple"] - self.conditiontuples = MATCH_LOOKUP[self.type]["conditiontuple"] + "s" - self.conditiontype = MATCH_LOOKUP[self.type]["type"] - - def format_for_update(self, condition_set_id): - # Prep kwargs - kwargs = dict() - kwargs["Updates"] = list() - - for filtr in self.module.params.get("filters"): - # Only for ip_set - if self.type == "ip": - # there might be a better way of detecting an IPv6 address - if ":" in filtr.get("ip_address"): - ip_type = "IPV6" - else: - ip_type = "IPV4" - condition_insert = {"Type": ip_type, "Value": filtr.get("ip_address")} - - # Specific for geo_match_set - if self.type == "geo": - condition_insert = dict(Type="Country", Value=filtr.get("country")) - - # Common For everything but ip_set and geo_match_set - if self.type not in ("ip", "geo"): - condition_insert = dict( - FieldToMatch=dict(Type=filtr.get("field_to_match").upper()), - TextTransformation=filtr.get("transformation", "none").upper(), - ) - - if filtr.get("field_to_match").upper() == "HEADER": - if filtr.get("header"): - condition_insert["FieldToMatch"]["Data"] = filtr.get("header").lower() - else: - self.module.fail_json(msg=str("DATA required when HEADER requested")) - - # Specific for byte_match_set - if self.type == "byte": - condition_insert["TargetString"] = filtr.get("target_string") - condition_insert["PositionalConstraint"] = filtr.get("position") - - # Specific for size_constraint_set - if self.type == "size": - condition_insert["ComparisonOperator"] = filtr.get("comparison") - condition_insert["Size"] = filtr.get("size") - - # Specific for regex_match_set - if self.type == "regex": - condition_insert["RegexPatternSetId"] = self.ensure_regex_pattern_present(filtr.get("regex_pattern"))[ - "RegexPatternSetId" - ] - - kwargs["Updates"].append({"Action": "INSERT", self.conditiontuple: condition_insert}) - - kwargs[self.conditionsetid] = condition_set_id - return kwargs - - def format_for_deletion(self, condition): - return { - "Updates": [ - {"Action": "DELETE", self.conditiontuple: current_condition_tuple} - for current_condition_tuple in condition[self.conditiontuples] - ], - self.conditionsetid: condition[self.conditionsetid], - } - - @AWSRetry.exponential_backoff() - def list_regex_patterns_with_backoff(self, **params): - return self.client.list_regex_pattern_sets(**params) - - @AWSRetry.exponential_backoff() - def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id): - return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id) - - def list_regex_patterns(self): - # at time of writing(2017-11-20) no regex pattern paginator exists - regex_patterns = [] - params = {} - while True: - try: - response = self.list_regex_patterns_with_backoff(**params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not list regex patterns") - regex_patterns.extend(response["RegexPatternSets"]) - if "NextMarker" in response: - params["NextMarker"] = response["NextMarker"] - else: - break - return regex_patterns - - def get_regex_pattern_by_name(self, name): - existing_regex_patterns = self.list_regex_patterns() - regex_lookup = dict((item["Name"], item["RegexPatternSetId"]) for item in existing_regex_patterns) - if name in regex_lookup: - return self.get_regex_pattern_set_with_backoff(regex_lookup[name])["RegexPatternSet"] - else: - return None - - def ensure_regex_pattern_present(self, regex_pattern): - name = regex_pattern["name"] - - pattern_set = self.get_regex_pattern_by_name(name) - if not pattern_set: - pattern_set = run_func_with_change_token_backoff( - self.client, self.module, {"Name": name}, self.client.create_regex_pattern_set - )["RegexPatternSet"] - missing = set(regex_pattern["regex_strings"]) - set(pattern_set["RegexPatternStrings"]) - extra = set(pattern_set["RegexPatternStrings"]) - set(regex_pattern["regex_strings"]) - if not missing and not extra: - return pattern_set - updates = [{"Action": "INSERT", "RegexPatternString": pattern} for pattern in missing] - updates.extend([{"Action": "DELETE", "RegexPatternString": pattern} for pattern in extra]) - run_func_with_change_token_backoff( - self.client, - self.module, - {"RegexPatternSetId": pattern_set["RegexPatternSetId"], "Updates": updates}, - self.client.update_regex_pattern_set, - wait=True, - ) - return self.get_regex_pattern_set_with_backoff(pattern_set["RegexPatternSetId"])["RegexPatternSet"] - - def delete_unused_regex_pattern(self, regex_pattern_set_id): - try: - regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)[ - "RegexPatternSet" - ] - updates = list() - for regex_pattern_string in regex_pattern_set["RegexPatternStrings"]: - updates.append({"Action": "DELETE", "RegexPatternString": regex_pattern_string}) - run_func_with_change_token_backoff( - self.client, - self.module, - {"RegexPatternSetId": regex_pattern_set_id, "Updates": updates}, - self.client.update_regex_pattern_set, - ) - - run_func_with_change_token_backoff( - self.client, - self.module, - {"RegexPatternSetId": regex_pattern_set_id}, - self.client.delete_regex_pattern_set, - wait=True, - ) - except is_boto3_error_code("WAFNonexistentItemException"): - return - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not delete regex pattern") - - def get_condition_by_name(self, name): - all_conditions = [d for d in self.list_conditions() if d["Name"] == name] - if all_conditions: - return all_conditions[0][self.conditionsetid] - - @AWSRetry.exponential_backoff() - def get_condition_by_id_with_backoff(self, condition_set_id): - params = dict() - params[self.conditionsetid] = condition_set_id - func = getattr(self.client, "get_" + self.method_suffix) - return func(**params)[self.conditionset] - - def get_condition_by_id(self, condition_set_id): - try: - return self.get_condition_by_id_with_backoff(condition_set_id) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not get condition") - - def list_conditions(self): - method = "list_" + self.method_suffix + "s" - try: - paginator = self.client.get_paginator(method) - func = paginator.paginate().build_full_result - except botocore.exceptions.OperationNotPageableError: - # list_geo_match_sets and list_regex_match_sets do not have a paginator - func = getattr(self.client, method) - try: - return func()[self.conditionsets] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg=f"Could not list {self.type} conditions") - - def tidy_up_regex_patterns(self, regex_match_set): - all_regex_match_sets = self.list_conditions() - all_match_set_patterns = list() - for rms in all_regex_match_sets: - all_match_set_patterns.extend( - conditiontuple["RegexPatternSetId"] - for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples] - ) - for filtr in regex_match_set[self.conditiontuples]: - if filtr["RegexPatternSetId"] not in all_match_set_patterns: - self.delete_unused_regex_pattern(filtr["RegexPatternSetId"]) - - def find_condition_in_rules(self, condition_set_id): - rules_in_use = [] - try: - if self.client.__class__.__name__ == "WAF": - all_rules = list_rules_with_backoff(self.client) - elif self.client.__class__.__name__ == "WAFRegional": - all_rules = list_regional_rules_with_backoff(self.client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not list rules") - for rule in all_rules: - try: - rule_details = get_rule_with_backoff(self.client, rule["RuleId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not get rule details") - if condition_set_id in [predicate["DataId"] for predicate in rule_details["Predicates"]]: - rules_in_use.append(rule_details["Name"]) - return rules_in_use - - def find_and_delete_condition(self, condition_set_id): - current_condition = self.get_condition_by_id(condition_set_id) - in_use_rules = self.find_condition_in_rules(condition_set_id) - if in_use_rules: - rulenames = ", ".join(in_use_rules) - self.module.fail_json(msg=f"Condition {current_condition['Name']} is in use by {rulenames}") - if current_condition[self.conditiontuples]: - # Filters are deleted using update with the DELETE action - func = getattr(self.client, "update_" + self.method_suffix) - params = self.format_for_deletion(current_condition) - try: - # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call - run_func_with_change_token_backoff(self.client, self.module, params, func) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not delete filters from condition") - func = getattr(self.client, "delete_" + self.method_suffix) - params = dict() - params[self.conditionsetid] = condition_set_id - try: - run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not delete condition") - # tidy up regex patterns - if self.type == "regex": - self.tidy_up_regex_patterns(current_condition) - return True, {} - - def find_missing(self, update, current_condition): - missing = [] - for desired in update["Updates"]: - found = False - desired_condition = desired[self.conditiontuple] - current_conditions = current_condition[self.conditiontuples] - for condition in current_conditions: - if not compare_policies(condition, desired_condition): - found = True - if not found: - missing.append(desired) - return missing - - def find_and_update_condition(self, condition_set_id): - current_condition = self.get_condition_by_id(condition_set_id) - update = self.format_for_update(condition_set_id) - missing = self.find_missing(update, current_condition) - if self.module.params.get("purge_filters"): - extra = [ - {"Action": "DELETE", self.conditiontuple: current_tuple} - for current_tuple in current_condition[self.conditiontuples] - if current_tuple not in [desired[self.conditiontuple] for desired in update["Updates"]] - ] - else: - extra = [] - changed = bool(missing or extra) - if changed: - update["Updates"] = missing + extra - func = getattr(self.client, "update_" + self.method_suffix) - try: - result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not update condition") - return changed, self.get_condition_by_id(condition_set_id) - - def ensure_condition_present(self): - name = self.module.params["name"] - condition_set_id = self.get_condition_by_name(name) - if condition_set_id: - return self.find_and_update_condition(condition_set_id) - else: - params = dict() - params["Name"] = name - func = getattr(self.client, "create_" + self.method_suffix) - try: - condition = run_func_with_change_token_backoff(self.client, self.module, params, func) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not create condition") - return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid]) - - def ensure_condition_absent(self): - condition_set_id = self.get_condition_by_name(self.module.params["name"]) - if condition_set_id: - return self.find_and_delete_condition(condition_set_id) - return False, {} - - -def main(): - filters_subspec = dict( - country=dict(), - field_to_match=dict(choices=["uri", "query_string", "header", "method", "body"]), - header=dict(), - transformation=dict( - choices=["none", "compress_white_space", "html_entity_decode", "lowercase", "cmd_line", "url_decode"] - ), - position=dict(choices=["exactly", "starts_with", "ends_with", "contains", "contains_word"]), - comparison=dict(choices=["EQ", "NE", "LE", "LT", "GE", "GT"]), - target_string=dict(), # Bytes - size=dict(type="int"), - ip_address=dict(), - regex_pattern=dict(), - ) - argument_spec = dict( - name=dict(required=True), - type=dict(required=True, choices=["byte", "geo", "ip", "regex", "size", "sql", "xss"]), - filters=dict(type="list", elements="dict"), - purge_filters=dict(type="bool", default=False), - waf_regional=dict(type="bool", default=False), - state=dict(default="present", choices=["present", "absent"]), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[["state", "present", ["filters"]]], - ) - state = module.params.get("state") - - resource = "waf" if not module.params["waf_regional"] else "waf-regional" - client = module.client(resource) - - condition = Condition(client, module) - - if state == "present": - (changed, results) = condition.ensure_condition_present() - # return a condition agnostic ID for use by waf_rule - results["ConditionId"] = results[condition.conditionsetid] - else: - (changed, results) = condition.ensure_condition_absent() - - module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results)) - - -if __name__ == "__main__": - main() diff --git a/waf_info.py b/waf_info.py deleted file mode 100644 index 711d1d8de74..00000000000 --- a/waf_info.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: waf_info -short_description: Retrieve information for WAF ACLs, Rules, Conditions and Filters -version_added: 1.0.0 -description: - - Retrieve information for WAF ACLs, Rules, Conditions and Filters. - - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_info). - The usage did not change. -options: - name: - description: - - The name of a Web Application Firewall. - type: str - waf_regional: - description: Whether to use the C(waf-regional) module. - default: false - required: false - type: bool - -author: - - Mike Mochan (@mmochan) - - Will Thames (@willthames) -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: obtain all WAF information - community.aws.waf_info: - -- name: obtain all information for a single WAF - community.aws.waf_info: - name: test_waf - -- name: obtain all information for a single WAF Regional - community.aws.waf_info: - name: test_waf - waf_regional: true -""" - -RETURN = r""" -wafs: - description: The WAFs that match the passed arguments. - returned: success - type: complex - contains: - name: - description: A friendly name or description of the WebACL. - returned: always - type: str - sample: test_waf - default_action: - description: The action to perform if none of the Rules contained in the WebACL match. - returned: always - type: int - sample: BLOCK - metric_name: - description: A friendly name or description for the metrics for this WebACL. - returned: always - type: str - sample: test_waf_metric - rules: - description: An array that contains the action for each Rule in a WebACL , the priority of the Rule. - returned: always - type: complex - contains: - action: - description: The action to perform if the Rule matches. - returned: always - type: str - sample: BLOCK - metric_name: - description: A friendly name or description for the metrics for this Rule. - returned: always - type: str - sample: ipblockrule - name: - description: A friendly name or description of the Rule. - returned: always - type: str - sample: ip_block_rule - predicates: - description: The Predicates list contains a Predicate for each - ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet - object in a Rule. - returned: always - type: list - sample: - [ - { - "byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890", - "byte_match_tuples": [ - { - "field_to_match": { - "type": "QUERY_STRING" - }, - "positional_constraint": "STARTS_WITH", - "target_string": "bobbins", - "text_transformation": "NONE" - } - ], - "name": "bobbins", - "negated": false, - "type": "ByteMatch" - } - ] -""" - -from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def main(): - argument_spec = dict( - name=dict(required=False), - waf_regional=dict(type="bool", default=False), - ) - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - - resource = "waf" if not module.params["waf_regional"] else "waf-regional" - client = module.client(resource) - web_acls = list_web_acls(client, module) - name = module.params["name"] - if name: - web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name] - if not web_acls: - module.fail_json(msg=f"WAF named {name} not found") - module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls]) - - -if __name__ == "__main__": - main() diff --git a/waf_rule.py b/waf_rule.py deleted file mode 100644 index 28ff981623d..00000000000 --- a/waf_rule.py +++ /dev/null @@ -1,364 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Will Thames -# Copyright (c) 2015 Mike Mochan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: waf_rule -short_description: Create and delete WAF Rules -version_added: 1.0.0 -description: - - Read the AWS documentation for WAF - U(https://aws.amazon.com/documentation/waf/). - - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_rule). - The usage did not change. - -author: - - Mike Mochan (@mmochan) - - Will Thames (@willthames) - -options: - name: - description: Name of the Web Application Firewall rule. - required: true - type: str - metric_name: - description: - - A friendly name or description for the metrics for the rule. - - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name may not contain whitespace. - - You can't change I(metric_name) after you create the rule. - - Defaults to the same as I(name) with disallowed characters removed. - type: str - state: - description: Whether the rule should be present or absent. - choices: ['present', 'absent'] - default: present - type: str - conditions: - description: > - List of conditions used in the rule. M(community.aws.waf_condition) can be used to create new conditions. - type: list - elements: dict - suboptions: - type: - required: true - type: str - choices: ['byte','geo','ip','size','sql','xss'] - description: The type of rule to match. - negated: - required: true - type: bool - description: Whether the condition should be negated. - condition: - required: true - type: str - description: The name of the condition. The condition must already exist. - purge_conditions: - description: - - Whether or not to remove conditions that are not passed when updating I(conditions). - default: false - type: bool - waf_regional: - description: Whether to use C(waf-regional) module. - default: false - required: false - type: bool - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - - name: create WAF rule - community.aws.waf_rule: - name: my_waf_rule - conditions: - - name: my_regex_condition - type: regex - negated: false - - name: my_geo_condition - type: geo - negated: false - - name: my_byte_condition - type: byte - negated: true - - - name: remove WAF rule - community.aws.waf_rule: - name: "my_waf_rule" - state: absent -""" - -RETURN = r""" -rule: - description: WAF rule contents - returned: always - type: complex - contains: - metric_name: - description: Metric name for the rule. - returned: always - type: str - sample: ansibletest1234rule - name: - description: Friendly name for the rule. - returned: always - type: str - sample: ansible-test-1234_rule - predicates: - description: List of conditions used in the rule. - returned: always - type: complex - contains: - data_id: - description: ID of the condition. - returned: always - type: str - sample: 8251acdb-526c-42a8-92bc-d3d13e584166 - negated: - description: Whether the sense of the condition is negated. - returned: always - type: bool - sample: false - type: - description: type of the condition. - returned: always - type: str - sample: ByteMatch - rule_id: - description: ID of the WAF rule. - returned: always - type: str - sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261 -""" - -import re - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule - - -def get_rule_by_name(client, module, name): - rules = [d["RuleId"] for d in list_rules(client, module) if d["Name"] == name] - if rules: - return rules[0] - - -def get_rule(client, module, rule_id): - try: - return client.get_rule(RuleId=rule_id)["Rule"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not get WAF rule") - - -def list_rules(client, module): - if client.__class__.__name__ == "WAF": - try: - return list_rules_with_backoff(client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list WAF rules") - elif client.__class__.__name__ == "WAFRegional": - try: - return list_regional_rules_with_backoff(client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list WAF Regional rules") - - -def list_regional_rules(client, module): - try: - return list_regional_rules_with_backoff(client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list WAF rules") - - -def find_and_update_rule(client, module, rule_id): - rule = get_rule(client, module, rule_id) - rule_id = rule["RuleId"] - - existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) - desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) - all_conditions = dict() - - for condition_type in MATCH_LOOKUP: - method = "list_" + MATCH_LOOKUP[condition_type]["method"] + "s" - all_conditions[condition_type] = dict() - try: - paginator = client.get_paginator(method) - func = paginator.paginate().build_full_result - except (KeyError, botocore.exceptions.OperationNotPageableError): - # list_geo_match_sets and list_regex_match_sets do not have a paginator - # and throw different exceptions - func = getattr(client, method) - try: - pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Could not list {condition_type} conditions") - for pred in pred_results: - pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"] - all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred) - all_conditions[condition_type][pred["DataId"]] = camel_dict_to_snake_dict(pred) - - for condition in module.params["conditions"]: - desired_conditions[condition["type"]][condition["name"]] = condition - - reverse_condition_types = dict((v["type"], k) for (k, v) in MATCH_LOOKUP.items()) - for condition in rule["Predicates"]: - existing_conditions[reverse_condition_types[condition["Type"]]][condition["DataId"]] = camel_dict_to_snake_dict( - condition - ) - - insertions = list() - deletions = list() - - for condition_type in desired_conditions: - for condition_name, condition in desired_conditions[condition_type].items(): - if condition_name not in all_conditions[condition_type]: - module.fail_json(msg=f"Condition {condition_name} of type {condition_type} does not exist") - condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"] - if condition["data_id"] not in existing_conditions[condition_type]: - insertions.append(format_for_insertion(condition)) - - if module.params["purge_conditions"]: - for condition_type in existing_conditions: - deletions.extend( - [ - format_for_deletion(condition) - for condition in existing_conditions[condition_type].values() - if not all_conditions[condition_type][condition["data_id"]]["name"] - in desired_conditions[condition_type] - ] - ) - - changed = bool(insertions or deletions) - update = {"RuleId": rule_id, "Updates": insertions + deletions} - if changed: - try: - run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not update rule conditions") - - return changed, get_rule(client, module, rule_id) - - -def format_for_insertion(condition): - return dict( - Action="INSERT", - Predicate=dict( - Negated=condition["negated"], Type=MATCH_LOOKUP[condition["type"]]["type"], DataId=condition["data_id"] - ), - ) - - -def format_for_deletion(condition): - return dict( - Action="DELETE", - Predicate=dict(Negated=condition["negated"], Type=condition["type"], DataId=condition["data_id"]), - ) - - -def remove_rule_conditions(client, module, rule_id): - conditions = get_rule(client, module, rule_id)["Predicates"] - updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions] - try: - run_func_with_change_token_backoff(client, module, {"RuleId": rule_id, "Updates": updates}, client.update_rule) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not remove rule conditions") - - -def ensure_rule_present(client, module): - name = module.params["name"] - rule_id = get_rule_by_name(client, module, name) - params = dict() - if rule_id: - return find_and_update_rule(client, module, rule_id) - else: - params["Name"] = module.params["name"] - metric_name = module.params["metric_name"] - if not metric_name: - metric_name = re.sub(r"[^a-zA-Z0-9]", "", module.params["name"]) - params["MetricName"] = metric_name - try: - new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)["Rule"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not create rule") - return find_and_update_rule(client, module, new_rule["RuleId"]) - - -def find_rule_in_web_acls(client, module, rule_id): - web_acls_in_use = [] - try: - if client.__class__.__name__ == "WAF": - all_web_acls = list_web_acls_with_backoff(client) - elif client.__class__.__name__ == "WAFRegional": - all_web_acls = list_regional_web_acls_with_backoff(client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list Web ACLs") - for web_acl in all_web_acls: - try: - web_acl_details = get_web_acl_with_backoff(client, web_acl["WebACLId"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not get Web ACL details") - if rule_id in [rule["RuleId"] for rule in web_acl_details["Rules"]]: - web_acls_in_use.append(web_acl_details["Name"]) - return web_acls_in_use - - -def ensure_rule_absent(client, module): - rule_id = get_rule_by_name(client, module, module.params["name"]) - in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) - if in_use_web_acls: - web_acl_names = ", ".join(in_use_web_acls) - module.fail_json(msg=f"Rule {module.params['name']} is in use by Web ACL(s) {web_acl_names}") - if rule_id: - remove_rule_conditions(client, module, rule_id) - try: - return True, run_func_with_change_token_backoff( - client, module, {"RuleId": rule_id}, client.delete_rule, wait=True - ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete rule") - return False, {} - - -def main(): - argument_spec = dict( - name=dict(required=True), - metric_name=dict(), - state=dict(default="present", choices=["present", "absent"]), - conditions=dict(type="list", elements="dict"), - purge_conditions=dict(type="bool", default=False), - waf_regional=dict(type="bool", default=False), - ) - module = AnsibleAWSModule(argument_spec=argument_spec) - state = module.params.get("state") - - resource = "waf" if not module.params["waf_regional"] else "waf-regional" - client = module.client(resource) - if state == "present": - (changed, results) = ensure_rule_present(client, module) - else: - (changed, results) = ensure_rule_absent(client, module) - - module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) - - -if __name__ == "__main__": - main() diff --git a/waf_web_acl.py b/waf_web_acl.py deleted file mode 100644 index dd78a2778a5..00000000000 --- a/waf_web_acl.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" -module: waf_web_acl -short_description: Create and delete WAF Web ACLs -version_added: 1.0.0 -description: - - Module for WAF classic, for WAF v2 use the I(wafv2_*) modules. - - Read the AWS documentation for WAF U(https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html). - - Prior to release 5.0.0 this module was called C(community.aws.aws_waf_web_acl). - The usage did not change. - -author: - - Mike Mochan (@mmochan) - - Will Thames (@willthames) - -options: - name: - description: Name of the Web Application Firewall ACL to manage. - required: true - type: str - default_action: - description: The action that you want AWS WAF to take when a request doesn't - match the criteria specified in any of the Rule objects that are associated with the WebACL. - choices: - - block - - allow - - count - type: str - state: - description: Whether the Web ACL should be present or absent. - choices: - - present - - absent - default: present - type: str - metric_name: - description: - - A friendly name or description for the metrics for this WebACL. - - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. - - You can't change I(metric_name) after you create the WebACL. - - Metric name will default to I(name) with disallowed characters stripped out. - type: str - rules: - description: - - A list of rules that the Web ACL will enforce. - type: list - elements: dict - suboptions: - name: - description: Name of the rule. - type: str - required: true - action: - description: The action to perform. - type: str - required: true - priority: - description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first. - type: int - required: true - type: - description: The type of rule. - choices: - - rate_based - - regular - type: str - purge_rules: - description: - - Whether to remove rules that aren't passed with I(rules). - default: False - type: bool - waf_regional: - description: Whether to use C(waf-regional) module. - default: false - required: false - type: bool - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" - - name: create web ACL - community.aws.waf_web_acl: - name: my_web_acl - rules: - - name: my_rule - priority: 1 - action: block - default_action: block - purge_rules: true - state: present - - - name: delete the web acl - community.aws.waf_web_acl: - name: my_web_acl - state: absent -""" - -RETURN = r""" -web_acl: - description: contents of the Web ACL. - returned: always - type: complex - contains: - default_action: - description: Default action taken by the Web ACL if no rules match. - returned: always - type: dict - sample: - type: BLOCK - metric_name: - description: Metric name used as an identifier. - returned: always - type: str - sample: mywebacl - name: - description: Friendly name of the Web ACL. - returned: always - type: str - sample: my web acl - rules: - description: List of rules. - returned: always - type: complex - contains: - action: - description: Action taken by the WAF when the rule matches. - returned: always - type: complex - sample: - type: ALLOW - priority: - description: priority number of the rule (lower numbers are run first). - returned: always - type: int - sample: 2 - rule_id: - description: Rule ID. - returned: always - type: str - sample: a6fc7ab5-287b-479f-8004-7fd0399daf75 - type: - description: Type of rule (either REGULAR or RATE_BASED). - returned: always - type: str - sample: REGULAR - web_acl_id: - description: Unique identifier of Web ACL. - returned: always - type: str - sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c -""" - -import re - -try: - import botocore -except ImportError: - pass # handled by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff -from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff - - -def get_web_acl_by_name(client, module, name): - acls = [d["WebACLId"] for d in list_web_acls(client, module) if d["Name"] == name] - if acls: - return acls[0] - else: - return acls - - -def create_rule_lookup(client, module): - if client.__class__.__name__ == "WAF": - try: - rules = list_rules_with_backoff(client) - return dict((rule["Name"], rule) for rule in rules) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list rules") - elif client.__class__.__name__ == "WAFRegional": - try: - rules = list_regional_rules_with_backoff(client) - return dict((rule["Name"], rule) for rule in rules) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list regional rules") - - -def get_web_acl(client, module, web_acl_id): - try: - return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Could not get Web ACL with id {web_acl_id}") - - -def list_web_acls( - client, - module, -): - if client.__class__.__name__ == "WAF": - try: - return list_web_acls_with_backoff(client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not get Web ACLs") - elif client.__class__.__name__ == "WAFRegional": - try: - return list_regional_web_acls_with_backoff(client) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not get Web ACLs") - - -def find_and_update_web_acl(client, module, web_acl_id): - acl = get_web_acl(client, module, web_acl_id) - rule_lookup = create_rule_lookup(client, module) - existing_rules = acl["Rules"] - desired_rules = [ - { - "RuleId": rule_lookup[rule["name"]]["RuleId"], - "Priority": rule["priority"], - "Action": {"Type": rule["action"].upper()}, - "Type": rule.get("type", "regular").upper(), - } - for rule in module.params["rules"] - ] - missing = [rule for rule in desired_rules if rule not in existing_rules] - extras = [] - if module.params["purge_rules"]: - extras = [rule for rule in existing_rules if rule not in desired_rules] - - insertions = [format_for_update(rule, "INSERT") for rule in missing] - deletions = [format_for_update(rule, "DELETE") for rule in extras] - changed = bool(insertions + deletions) - - # Purge rules before adding new ones in case a deletion shares the same - # priority as an insertion. - params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"]} - change_tokens = [] - if deletions: - try: - params["Updates"] = deletions - result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result["ChangeToken"]) - get_waiter( - client, - "change_token_in_sync", - ).wait(ChangeToken=result["ChangeToken"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not update Web ACL") - if insertions: - try: - params["Updates"] = insertions - result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - change_tokens.append(result["ChangeToken"]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not update Web ACL") - if change_tokens: - for token in change_tokens: - get_waiter( - client, - "change_token_in_sync", - ).wait(ChangeToken=token) - if changed: - acl = get_web_acl(client, module, web_acl_id) - return changed, acl - - -def format_for_update(rule, action): - return dict( - Action=action, - ActivatedRule=dict( - Priority=rule["Priority"], - RuleId=rule["RuleId"], - Action=dict(Type=rule["Action"]["Type"]), - ), - ) - - -def remove_rules_from_web_acl(client, module, web_acl_id): - acl = get_web_acl(client, module, web_acl_id) - deletions = [format_for_update(rule, "DELETE") for rule in acl["Rules"]] - try: - params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"], "Updates": deletions} - run_func_with_change_token_backoff(client, module, params, client.update_web_acl) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not remove rule") - - -def ensure_web_acl_present(client, module): - changed = False - result = None - name = module.params["name"] - web_acl_id = get_web_acl_by_name(client, module, name) - if web_acl_id: - (changed, result) = find_and_update_web_acl(client, module, web_acl_id) - else: - metric_name = module.params["metric_name"] - if not metric_name: - metric_name = re.sub(r"[^A-Za-z0-9]", "", module.params["name"]) - default_action = module.params["default_action"].upper() - try: - params = {"Name": name, "MetricName": metric_name, "DefaultAction": {"Type": default_action}} - new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not create Web ACL") - (changed, result) = find_and_update_web_acl(client, module, new_web_acl["WebACL"]["WebACLId"]) - return changed, result - - -def ensure_web_acl_absent(client, module): - web_acl_id = get_web_acl_by_name(client, module, module.params["name"]) - if web_acl_id: - web_acl = get_web_acl(client, module, web_acl_id) - if web_acl["Rules"]: - remove_rules_from_web_acl(client, module, web_acl_id) - try: - run_func_with_change_token_backoff( - client, module, {"WebACLId": web_acl_id}, client.delete_web_acl, wait=True - ) - return True, {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete Web ACL") - return False, {} - - -def main(): - argument_spec = dict( - name=dict(required=True), - default_action=dict(choices=["block", "allow", "count"]), - metric_name=dict(), - state=dict(default="present", choices=["present", "absent"]), - rules=dict(type="list", elements="dict"), - purge_rules=dict(type="bool", default=False), - waf_regional=dict(type="bool", default=False), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=[["state", "present", ["default_action", "rules"]]], - ) - state = module.params.get("state") - - resource = "waf" if not module.params["waf_regional"] else "waf-regional" - client = module.client(resource) - if state == "present": - (changed, results) = ensure_web_acl_present(client, module) - else: - (changed, results) = ensure_web_acl_absent(client, module) - - module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results)) - - -if __name__ == "__main__": - main() diff --git a/wafv2_ip_set.py b/wafv2_ip_set.py deleted file mode 100644 index b96ba0cb1c1..00000000000 --- a/wafv2_ip_set.py +++ /dev/null @@ -1,340 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_ip_set -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: wafv2_ip_set -description: - - Create, modify and delete IP sets for WAFv2. -options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - required: true - type: str - name: - description: - - The name of the IP set. - required: true - type: str - description: - description: - - Description of the IP set. - required: false - type: str - scope: - description: - - Specifies whether this is for an AWS CloudFront distribution or for a regional application, - such as API Gateway or Application LoadBalancer. - choices: ["CLOUDFRONT","REGIONAL"] - required: true - type: str - ip_address_version: - description: - - Specifies whether this is an IPv4 or an IPv6 IP set. - - Required when I(state=present). - choices: ["IPV4","IPV6"] - type: str - addresses: - description: - - Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in - Classless Inter-Domain Routing (CIDR) notation. - - Required when I(state=present). - - When I(state=absent) and I(addresses) is defined, only the given IP addresses will be removed - from the IP set. The entire IP set itself will stay present. - type: list - elements: str - purge_addresses: - description: - - When set to C(no), keep the existing addresses in place. Will modify and add, but will not delete. - default: true - type: bool - -notes: - - Support for I(purge_tags) was added in release 4.0.0. - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: test ip set - wafv2_ip_set: - name: test02 - state: present - description: hallo eins - scope: REGIONAL - ip_address_version: IPV4 - addresses: - - 8.8.8.8/32 - - 8.8.4.4/32 - tags: - A: B - C: D -""" - -RETURN = r""" -addresses: - description: Current addresses of the ip set - sample: - - 8.8.8.8/32 - - 8.8.4.4/32 - returned: Always, as long as the ip set exists - type: list -arn: - description: IP set arn - sample: "arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/4b007330-2934-4dc5-af24-82dcb3aeb127" - type: str - returned: Always, as long as the ip set exists -description: - description: Description of the ip set - sample: Some IP set description - returned: Always, as long as the ip set exists - type: str -ip_address_version: - description: IP version of the ip set - sample: IPV4 - type: str - returned: Always, as long as the ip set exists -name: - description: IP set name - sample: test02 - returned: Always, as long as the ip set exists - type: str -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags - - -class IpSet: - def __init__(self, wafv2, name, scope, fail_json_aws): - self.wafv2 = wafv2 - self.name = name - self.scope = scope - self.fail_json_aws = fail_json_aws - self.existing_set, self.id, self.locktoken, self.arn = self.get_set() - - def description(self): - return self.existing_set.get("Description") - - def _format_set(self, ip_set): - if ip_set is None: - return None - return camel_dict_to_snake_dict(self.existing_set, ignore_list=["tags"]) - - def get(self): - return self._format_set(self.existing_set) - - def remove(self): - try: - response = self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.") - return {} - - def create(self, description, ip_address_version, addresses, tags): - req_obj = { - "Name": self.name, - "Scope": self.scope, - "IPAddressVersion": ip_address_version, - "Addresses": addresses, - } - - if description: - req_obj["Description"] = description - - if tags: - req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) - - try: - response = self.wafv2.create_ip_set(**req_obj) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to create wafv2 ip set.") - - self.existing_set, self.id, self.locktoken, self.arn = self.get_set() - return self._format_set(self.existing_set) - - def update(self, description, addresses): - req_obj = { - "Name": self.name, - "Scope": self.scope, - "Id": self.id, - "Addresses": addresses, - "LockToken": self.locktoken, - } - - if description: - req_obj["Description"] = description - - try: - response = self.wafv2.update_ip_set(**req_obj) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to update wafv2 ip set.") - - self.existing_set, self.id, self.locktoken, self.arn = self.get_set() - return self._format_set(self.existing_set) - - def get_set(self): - response = self.list() - existing_set = None - id = None - arn = None - locktoken = None - for item in response.get("IPSets"): - if item.get("Name") == self.name: - id = item.get("Id") - locktoken = item.get("LockToken") - arn = item.get("ARN") - if id: - try: - existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=id).get("IPSet") - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") - tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_set["tags"] = tags - - return existing_set, id, locktoken, arn - - def list(self, Nextmarker=None): - # there is currently no paginator for wafv2 - req_obj = {"Scope": self.scope, "Limit": 100} - if Nextmarker: - req_obj["NextMarker"] = Nextmarker - - try: - response = self.wafv2.list_ip_sets(**req_obj) - if response.get("NextMarker"): - response["IPSets"] += self.list(Nextmarker=response.get("NextMarker")).get("IPSets") - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to list wafv2 ip set.") - - return response - - -def compare(existing_set, addresses, purge_addresses, state): - diff = False - new_rules = [] - existing_rules = existing_set.get("addresses") - if state == "present": - if purge_addresses: - new_rules = addresses - if sorted(addresses) != sorted(existing_set.get("addresses")): - diff = True - - else: - for requested_rule in addresses: - if requested_rule not in existing_rules: - diff = True - new_rules.append(requested_rule) - - new_rules += existing_rules - else: - if purge_addresses and addresses: - for requested_rule in addresses: - if requested_rule in existing_rules: - diff = True - existing_rules.pop(existing_rules.index(requested_rule)) - new_rules = existing_rules - - return diff, new_rules - - -def main(): - arg_spec = dict( - state=dict(type="str", required=True, choices=["present", "absent"]), - name=dict(type="str", required=True), - scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), - description=dict(type="str"), - ip_address_version=dict(type="str", choices=["IPV4", "IPV6"]), - addresses=dict(type="list", elements="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(type="bool", default=True), - purge_addresses=dict(type="bool", default=True), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - required_if=[["state", "present", ["ip_address_version", "addresses"]]], - ) - - state = module.params.get("state") - name = module.params.get("name") - scope = module.params.get("scope") - description = module.params.get("description") - ip_address_version = module.params.get("ip_address_version") - addresses = module.params.get("addresses") - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - purge_addresses = module.params.get("purge_addresses") - check_mode = module.check_mode - - wafv2 = module.client("wafv2") - - change = False - retval = {} - - ip_set = IpSet(wafv2, name, scope, module.fail_json_aws) - - if state == "present": - if ip_set.get(): - tags_updated = ensure_wafv2_tags( - wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode - ) - ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state) - description_updated = bool(description) and ip_set.description() != description - change = ips_updated or description_updated or tags_updated - retval = ip_set.get() - if module.check_mode: - pass - elif ips_updated or description_updated: - retval = ip_set.update(description=description, addresses=addresses) - elif tags_updated: - retval, id, locktoken, arn = ip_set.get_set() - else: - if not check_mode: - retval = ip_set.create( - description=description, ip_address_version=ip_address_version, addresses=addresses, tags=tags - ) - change = True - - if state == "absent": - if ip_set.get(): - if addresses: - if len(addresses) > 0: - change, addresses = compare(ip_set.get(), addresses, purge_addresses, state) - if change and not check_mode: - retval = ip_set.update(description=description, addresses=addresses) - else: - if not check_mode: - retval = ip_set.remove() - change = True - - module.exit_json(changed=change, **retval) - - -if __name__ == "__main__": - main() diff --git a/wafv2_ip_set_info.py b/wafv2_ip_set_info.py deleted file mode 100644 index caca5cd7081..00000000000 --- a/wafv2_ip_set_info.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_ip_set_info -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: Get information about wafv2 ip sets -description: - - Get information about existing wafv2 ip sets. -options: - name: - description: - - The name of the IP set. - required: true - type: str - scope: - description: - - Specifies whether this is for an AWS CloudFront distribution or for a regional application. - choices: ["CLOUDFRONT","REGIONAL"] - required: true - type: str - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: test ip set - wafv2_ip_set_info: - name: test02 - scope: REGIONAL -""" - -RETURN = r""" -addresses: - description: Current addresses of the ip set - sample: - - 8.8.8.8/32 - - 8.8.4.4/32 - returned: Always, as long as the ip set exists - type: list -arn: - description: IP set arn - sample: "arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/4b007330-2934-4dc5-af24-82dcb3aeb127" - type: str - returned: Always, as long as the ip set exists -description: - description: Description of the ip set - sample: Some IP set description - returned: Always, as long as the ip set exists - type: str -ip_address_version: - description: IP version of the ip set - sample: IPV4 - type: str - returned: Always, as long as the ip set exists -name: - description: IP set name - sample: test02 - returned: Always, as long as the ip set exists - type: str -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags - - -def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): - # there is currently no paginator for wafv2 - req_obj = {"Scope": scope, "Limit": 100} - if Nextmarker: - req_obj["NextMarker"] = Nextmarker - - try: - response = wafv2.list_ip_sets(**req_obj) - if response.get("NextMarker"): - response["IPSets"] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get("NextMarker")).get( - "IPSets" - ) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to list wafv2 ip set") - return response - - -def get_ip_set(wafv2, name, scope, id, fail_json_aws): - try: - response = wafv2.get_ip_set(Name=name, Scope=scope, Id=id) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to get wafv2 ip set") - return response - - -def main(): - arg_spec = dict( - name=dict(type="str", required=True), scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]) - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - ) - - name = module.params.get("name") - scope = module.params.get("scope") - - wafv2 = module.client("wafv2") - - # check if ip set exist - response = list_ip_sets(wafv2, scope, module.fail_json_aws) - - id = None - - for item in response.get("IPSets"): - if item.get("Name") == name: - id = item.get("Id") - arn = item.get("ARN") - - retval = {} - existing_set = None - if id: - existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_set.get("IPSet")) - retval["tags"] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} - module.exit_json(**retval) - - -if __name__ == "__main__": - main() diff --git a/wafv2_resources.py b/wafv2_resources.py deleted file mode 100644 index b36f517120b..00000000000 --- a/wafv2_resources.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_resources -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: wafv2_web_acl -description: - - Apply or remove wafv2 to other aws resources. -options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - required: true - type: str - name: - description: - - The name of the web acl. - type: str - scope: - description: - - Scope of waf - choices: ["CLOUDFRONT","REGIONAL"] - type: str - arn: - description: - - AWS resources (ALB, API Gateway or AppSync GraphQL API) ARN - type: str - required: true - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: add test alb to waf string03 - community.aws.wafv2_resources: - name: string03 - scope: REGIONAL - state: present - arn: "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" -""" - -RETURN = r""" -resource_arns: - description: Current resources where the wafv2 is applied on - sample: - - "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" - returned: Always, as long as the wafv2 exists - type: list -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls - - -def get_web_acl(wafv2, name, scope, id, fail_json_aws): - try: - response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to get wafv2 web acl.") - return response - - -def list_wafv2_resources(wafv2, arn, fail_json_aws): - try: - response = wafv2.list_resources_for_web_acl(WebACLArn=arn) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to list wafv2 web acl.") - return response - - -def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws): - try: - response = wafv2.associate_web_acl(WebACLArn=waf_arn, ResourceArn=arn) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to add wafv2 web acl.") - return response - - -def remove_resources(wafv2, arn, fail_json_aws): - try: - response = wafv2.disassociate_web_acl(ResourceArn=arn) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to remove wafv2 web acl.") - return response - - -def main(): - arg_spec = dict( - state=dict(type="str", required=True, choices=["present", "absent"]), - name=dict(type="str"), - scope=dict(type="str", choices=["CLOUDFRONT", "REGIONAL"]), - arn=dict(type="str", required=True), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - required_if=[["state", "present", ["name", "scope"]]], - ) - - state = module.params.get("state") - name = module.params.get("name") - scope = module.params.get("scope") - arn = module.params.get("arn") - check_mode = module.check_mode - - wafv2 = module.client("wafv2") - - # check if web acl exists - - response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) - - id = None - retval = {} - change = False - - for item in response.get("WebACLs"): - if item.get("Name") == name: - id = item.get("Id") - - if id: - existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - waf_arn = existing_acl.get("WebACL").get("ARN") - - retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws) - - if state == "present": - if retval: - if arn not in retval.get("ResourceArns"): - change = True - if not check_mode: - retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws) - - elif state == "absent": - if retval: - if arn in retval.get("ResourceArns"): - change = True - if not check_mode: - retval = remove_resources(wafv2, arn, module.fail_json_aws) - - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval)) - - -if __name__ == "__main__": - main() diff --git a/wafv2_resources_info.py b/wafv2_resources_info.py deleted file mode 100644 index 5cafee1f67d..00000000000 --- a/wafv2_resources_info.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_resources_info -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: wafv2_resources_info -description: - - List web acl resources. -options: - name: - description: - - The name wafv2 acl of interest. - type: str - required: true - scope: - description: - - Scope of wafv2 web acl. - required: true - choices: ["CLOUDFRONT","REGIONAL"] - type: str - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: get web acl - community.aws.wafv2_resources_info: - name: string03 - scope: REGIONAL -""" - -RETURN = r""" -resource_arns: - description: Current resources where the wafv2 is applied on - sample: - - "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933" - returned: Always, as long as the wafv2 exists - type: list -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls - - -def get_web_acl(wafv2, name, scope, id, fail_json_aws): - try: - response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to get wafv2 web acl.") - return response - - -def list_web_acls(wafv2, scope, fail_json_aws): - return wafv2_list_web_acls(wafv2, scope, fail_json_aws) - - -def list_wafv2_resources(wafv2, arn, fail_json_aws): - try: - response = wafv2.list_resources_for_web_acl(WebACLArn=arn) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to list wafv2 resources.") - return response - - -def main(): - arg_spec = dict( - name=dict(type="str", required=True), - scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - ) - - name = module.params.get("name") - scope = module.params.get("scope") - - wafv2 = module.client("wafv2") - # check if web acl exists - response = list_web_acls(wafv2, scope, module.fail_json_aws) - - id = None - retval = {} - - for item in response.get("WebACLs"): - if item.get("Name") == name: - id = item.get("Id") - - if id: - existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - arn = existing_acl.get("WebACL").get("ARN") - - retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws)) - - module.exit_json(**retval) - - -if __name__ == "__main__": - main() diff --git a/wafv2_rule_group.py b/wafv2_rule_group.py deleted file mode 100644 index e2a7fd1d438..00000000000 --- a/wafv2_rule_group.py +++ /dev/null @@ -1,416 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_rule_group -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: wafv2_web_acl -description: - - Create, modify and delete wafv2 rule groups. -options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - required: true - type: str - name: - description: - - The name of the rule group. - required: true - type: str - rules: - description: - - The Rule statements used to identify the web requests that you want to allow, block, or count. - type: list - elements: dict - scope: - description: - - Scope of wafv2 rule group. - required: true - choices: ["CLOUDFRONT","REGIONAL"] - type: str - description: - description: - - Description of wafv2 rule group. - type: str - sampled_requests: - description: - - Sampled requests, true or false. - type: bool - default: false - cloudwatch_metrics: - description: - - Enable cloudwatch metric for wafv2 rule group - type: bool - default: true - metric_name: - description: - - Name of cloudwatch metrics. - - If not given and cloudwatch_metrics is enabled, the name of the rule group itself will be taken. - type: str - capacity: - description: - - capacity of wafv2 rule group. - type: int - purge_rules: - description: - - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. - default: true - type: bool - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: change description - community.aws.wafv2_rule_group: - name: test02 - state: present - description: hallo eins zwei - scope: REGIONAL - capacity: 500 - rules: - - name: eins - priority: 1 - action: - allow: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: fsd - statement: - ip_set_reference_statement: - arn: "{{ IPSET.arn }}" - cloudwatch_metrics: true - tags: - A: B - C: D - register: out - -- name: add rule - community.aws.wafv2_rule_group: - name: test02 - state: present - description: hallo eins zwei - scope: REGIONAL - capacity: 500 - rules: - - name: eins - priority: 1 - action: - allow: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: fsd - statement: - ip_set_reference_statement: - arn: "{{ IPSET.arn }}" - - name: zwei - priority: 2 - action: - block: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: ddos - statement: - or_statement: - statements: - - byte_match_statement: - search_string: ansible.com - positional_constraint: CONTAINS - field_to_match: - single_header: - name: host - text_transformations: - - type: LOWERCASE - priority: 0 - - xss_match_statement: - field_to_match: - body: {} - text_transformations: - - type: NONE - priority: 0 - cloudwatch_metrics: true - tags: - A: B - C: D - register: out -""" - -RETURN = r""" -arn: - description: Rule group arn - sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 - type: str - returned: Always, as long as the web acl exists -description: - description: Description of the rule group - sample: Some rule group description - returned: Always, as long as the web acl exists - type: str -capacity: - description: Current capacity of the rule group - sample: 500 - returned: Always, as long as the rule group exists - type: int -name: - description: Rule group name - sample: test02 - returned: Always, as long as the rule group exists - type: str -rules: - description: Current rules of the rule group - returned: Always, as long as the rule group exists - type: list - sample: - - action: - allow: {} - name: eins - priority: 1 - statement: - ip_set_reference_statement: - arn: arn:aws:wafv2:eu-central-1:11111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a - visibility_config: - cloud_watch_metrics_enabled: True - metric_name: fsd - sampled_requests_enabled: True -visibility_config: - description: Visibility config of the rule group - returned: Always, as long as the rule group exists - type: dict - sample: - cloud_watch_metrics_enabled: True - metric_name: blub - sampled_requests_enabled: False -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules -from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict - - -class RuleGroup: - def __init__(self, wafv2, name, scope, fail_json_aws): - self.wafv2 = wafv2 - self.id = None - self.name = name - self.scope = scope - self.fail_json_aws = fail_json_aws - self.existing_group = self.get_group() - - def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name): - req_obj = { - "Name": self.name, - "Scope": self.scope, - "Id": self.id, - "Rules": rules, - "LockToken": self.locktoken, - "VisibilityConfig": { - "SampledRequestsEnabled": sampled_requests, - "CloudWatchMetricsEnabled": cloudwatch_metrics, - "MetricName": metric_name, - }, - } - - if description: - req_obj["Description"] = description - - try: - response = self.wafv2.update_rule_group(**req_obj) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to update wafv2 rule group.") - return self.refresh_group() - - def get_group(self): - if self.id is None: - response = self.list() - - for item in response.get("RuleGroups"): - if item.get("Name") == self.name: - self.id = item.get("Id") - self.locktoken = item.get("LockToken") - self.arn = item.get("ARN") - - return self.refresh_group() - - def refresh_group(self): - existing_group = None - if self.id: - try: - response = self.wafv2.get_rule_group(Name=self.name, Scope=self.scope, Id=self.id) - existing_group = response.get("RuleGroup") - self.locktoken = response.get("LockToken") - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to get wafv2 rule group.") - - tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws) - existing_group["tags"] = tags or {} - - return existing_group - - def list(self): - return wafv2_list_rule_groups(self.wafv2, self.scope, self.fail_json_aws) - - def get(self): - if self.existing_group: - return self.existing_group - return None - - def remove(self): - try: - response = self.wafv2.delete_rule_group( - Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken - ) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.") - return response - - def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags): - req_obj = { - "Name": self.name, - "Scope": self.scope, - "Capacity": capacity, - "Rules": rules, - "VisibilityConfig": { - "SampledRequestsEnabled": sampled_requests, - "CloudWatchMetricsEnabled": cloudwatch_metrics, - "MetricName": metric_name, - }, - } - - if description: - req_obj["Description"] = description - - if tags: - req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) - - try: - response = self.wafv2.create_rule_group(**req_obj) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to create wafv2 rule group.") - - self.existing_group = self.get_group() - - return self.existing_group - - -def main(): - arg_spec = dict( - state=dict(type="str", required=True, choices=["present", "absent"]), - name=dict(type="str", required=True), - scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), - capacity=dict(type="int"), - description=dict(type="str"), - rules=dict(type="list", elements="dict"), - sampled_requests=dict(type="bool", default=False), - cloudwatch_metrics=dict(type="bool", default=True), - metric_name=dict(type="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - purge_rules=dict(default=True, type="bool"), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - required_if=[["state", "present", ["capacity", "rules"]]], - ) - - state = module.params.get("state") - name = module.params.get("name") - scope = module.params.get("scope") - capacity = module.params.get("capacity") - description = module.params.get("description") - rules = module.params.get("rules") - sampled_requests = module.params.get("sampled_requests") - cloudwatch_metrics = module.params.get("cloudwatch_metrics") - metric_name = module.params.get("metric_name") - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - purge_rules = module.params.get("purge_rules") - check_mode = module.check_mode - - if rules: - rules = [] - for rule in module.params.get("rules"): - rules.append(wafv2_snake_dict_to_camel_dict(snake_dict_to_camel_dict(rule, capitalize_first=True))) - - if not metric_name: - metric_name = name - - wafv2 = module.client("wafv2") - rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws) - - change = False - retval = {} - - if state == "present": - if rule_group.get(): - tagging_change = ensure_wafv2_tags( - wafv2, rule_group.arn, tags, purge_tags, module.fail_json_aws, module.check_mode - ) - rules_change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) - description_change = bool(description) and (rule_group.get().get("Description") != description) - change = tagging_change or rules_change or description_change - retval = rule_group.get() - if module.check_mode: - # In check mode nothing changes... - pass - elif rules_change or description_change: - retval = rule_group.update(description, rules, sampled_requests, cloudwatch_metrics, metric_name) - elif tagging_change: - retval = rule_group.refresh_group() - - else: - change = True - if not check_mode: - retval = rule_group.create( - capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags - ) - - elif state == "absent": - if rule_group.get(): - if rules: - if len(rules) > 0: - change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state) - if change and not check_mode: - retval = rule_group.update( - description, rules, sampled_requests, cloudwatch_metrics, metric_name - ) - else: - change = True - if not check_mode: - retval = rule_group.remove() - - module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=["tags"])) - - -if __name__ == "__main__": - main() diff --git a/wafv2_rule_group_info.py b/wafv2_rule_group_info.py deleted file mode 100644 index 58862a9a5f2..00000000000 --- a/wafv2_rule_group_info.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_rule_group_info -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: wafv2_web_acl_info -description: - - Get informations about existing wafv2 rule groups. -options: - name: - description: - - The name of the rule group. - required: true - type: str - scope: - description: - - Scope of wafv2 rule group. - required: true - choices: ["CLOUDFRONT","REGIONAL"] - type: str - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: rule group info - community.aws.wafv2_rule_group_info: - name: test02 - scope: REGIONAL -""" - -RETURN = r""" -arn: - description: Rule group arn - sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7 - type: str - returned: Always, as long as the web acl exists -description: - description: Description of the rule group - sample: Some rule group description - returned: Always, as long as the web acl exists - type: str -capacity: - description: Current capacity of the rule group - sample: 500 - returned: Always, as long as the rule group exists - type: int -name: - description: Rule group name - sample: test02 - returned: Always, as long as the rule group exists - type: str -rules: - description: Current rules of the rule group - returned: Always, as long as the rule group exists - type: list - sample: - - action: - allow: {} - name: eins - priority: 1 - statement: - ip_set_reference_statement: - arn: arn:aws:wafv2:eu-central-1:111111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a - visibility_config: - cloud_watch_metrics_enabled: True - metric_name: fsd - sampled_requests_enabled: True -visibility_config: - description: Visibility config of the rule group - returned: Always, as long as the rule group exists - type: dict - sample: - cloud_watch_metrics_enabled: True - metric_name: blub - sampled_requests_enabled: False -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups - - -def get_rule_group(wafv2, name, scope, id, fail_json_aws): - try: - response = wafv2.get_rule_group(Name=name, Scope=scope, Id=id) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to get wafv2 rule group.") - return response - - -def main(): - arg_spec = dict( - name=dict(type="str", required=True), - scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - ) - - name = module.params.get("name") - scope = module.params.get("scope") - - wafv2 = module.client("wafv2") - - # check if rule group exists - response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) - id = None - retval = {} - - for item in response.get("RuleGroups"): - if item.get("Name") == name: - id = item.get("Id") - arn = item.get("ARN") - - existing_group = None - if id: - existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_group.get("RuleGroup")) - tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval["tags"] = tags or {} - - module.exit_json(**retval) - - -if __name__ == "__main__": - main() diff --git a/wafv2_web_acl.py b/wafv2_web_acl.py deleted file mode 100644 index 23e8f9c6b09..00000000000 --- a/wafv2_web_acl.py +++ /dev/null @@ -1,596 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_web_acl -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: Create and delete WAF Web ACLs -description: - - Create, modify or delete AWS WAF v2 web ACLs (not for classic WAF). - - See docs at U(https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html) -options: - state: - description: - - Whether the rule is present or absent. - choices: ["present", "absent"] - required: true - type: str - name: - description: - - The name of the web acl. - required: true - type: str - scope: - description: - - Geographical scope of the web acl. - required: true - choices: ["CLOUDFRONT", "REGIONAL"] - type: str - description: - description: - - Description of wafv2 web acl. - type: str - default_action: - description: - - Default action of the wafv2 web acl. - choices: ["Block", "Allow"] - type: str - sampled_requests: - description: - - Whether to store a sample of the web requests, true or false. - type: bool - default: false - cloudwatch_metrics: - description: - - Enable cloudwatch metric for wafv2 web acl. - type: bool - default: true - metric_name: - description: - - Name of cloudwatch metrics. - - If not given and cloudwatch_metrics is enabled, the name of the web acl itself will be taken. - type: str - rules: - description: - - The Rule statements used to identify the web requests that you want to allow, block, or count. - - For a list of managed rules see U(https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-list.html). - type: list - elements: dict - suboptions: - name: - description: - - The name of the wafv2 rule - type: str - priority: - description: - - The rule priority - type: int - action: - description: - - Wether a rule is blocked, allowed or counted. - type: dict - visibility_config: - description: - - Visibility of single wafv2 rule. - type: dict - statement: - description: - - Rule configuration. - type: dict - custom_response_bodies: - description: - - A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing - - the key of the body dictionary element. - - Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content). - - Requires botocore >= 1.20.40 - type: dict - version_added: 3.1.0 - purge_rules: - description: - - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. - default: true - type: bool - -notes: - - Support for the I(purge_tags) parameter was added in release 4.0.0. - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.tags - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: Create test web acl - community.aws.wafv2_web_acl: - name: test05 - description: hallo eins - scope: REGIONAL - default_action: Allow - sampled_requests: false - cloudwatch_metrics: true - metric_name: test05-acl-metric - rules: - - name: zwei - priority: 0 - action: - block: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: ddos - statement: - xss_match_statement: - field_to_match: - body: {} - text_transformations: - - type: NONE - priority: 0 - - name: admin_protect - priority: 1 - override_action: - none: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: fsd - statement: - managed_rule_group_statement: - vendor_name: AWS - name: AWSManagedRulesAdminProtectionRuleSet - - # AWS Managed Bad Input Rule Set - # but allow PROPFIND_METHOD used e.g. by webdav - - name: bad_input_protect_whitelist_webdav - priority: 2 - override_action: - none: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: bad_input_protect - statement: - managed_rule_group_statement: - vendor_name: AWS - name: AWSManagedRulesKnownBadInputsRuleSet - excluded_rules: - - name: PROPFIND_METHOD - - # Rate Limit example. 1500 req/5min - # counted for two domains via or_statement. login.mydomain.tld and api.mydomain.tld - - name: rate_limit_example - priority: 3 - action: - block: {} - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: mydomain-ratelimit - statement: - rate_based_statement: - limit: 1500 - aggregate_key_type: IP - scope_down_statement: - or_statement: - statements: - - byte_match_statement: - search_string: login.mydomain.tld - positional_constraint: CONTAINS - field_to_match: - single_header: - name: host - text_transformations: - - type: LOWERCASE - priority: 0 - - byte_match_dtatement: - search_string: api.mydomain.tld - positional_constraint: CONTAINS - field_to_match: - single_header: - name: host - text_transformations: - - type: LOWERCASE - priority: 0 - purge_rules: true - tags: - A: B - C: D - state: present - -- name: Create IP filtering web ACL - community.aws.wafv2_web_acl: - name: ip-filtering-traffic - description: ACL that filters web traffic based on rate limits and whitelists some IPs - scope: REGIONAL - default_action: Allow - sampled_requests: true - cloudwatch_metrics: true - metric_name: ip-filtering-traffic - rules: - - name: whitelist-own-IPs - priority: 0 - action: - allow: {} - statement: - ip_set_reference_statement: - arn: 'arn:aws:wafv2:us-east-1:123456789012:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123' - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: waf-acl-rule-whitelist-own-IPs - - name: rate-limit-per-IP - priority: 1 - action: - block: - custom_response: - response_code: 429 - custom_response_body_key: too_many_requests - statement: - rate_based_statement: - limit: 5000 - aggregate_key_type: IP - visibility_config: - sampled_requests_enabled: true - cloud_watch_metrics_enabled: true - metric_name: waf-acl-rule-rate-limit-per-IP - purge_rules: true - custom_response_bodies: - too_many_requests: - content_type: APPLICATION_JSON - content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' - region: us-east-1 - state: present - -""" - -RETURN = r""" -arn: - description: web acl arn - sample: arn:aws:wafv2:eu-central-1:123456789012:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 - type: str - returned: Always, as long as the web acl exists -description: - description: Description of the web acl - sample: Some web acl description - returned: Always, as long as the web acl exists - type: str -capacity: - description: Current capacity of the web acl - sample: 140 - returned: Always, as long as the web acl exists - type: int -name: - description: Web acl name - sample: test02 - returned: Always, as long as the web acl exists - type: str -default_action: - description: Default action of ACL - returned: Always, as long as the web acl exists - sample: - allow: {} - type: dict -rules: - description: Current rules of the web acl - returned: Always, as long as the web acl exists - type: list - sample: - - name: admin_protect - override_action: - none: {} - priority: 1 - statement: - managed_rule_group_statement: - name: AWSManagedRulesAdminProtectionRuleSet - vendor_name: AWS - visibility_config: - cloud_watch_metrics_enabled: true - metric_name: admin_protect - sampled_requests_enabled: true -custom_response_bodies: - description: Custom response body configurations to be used in rules - type: dict - sample: - too_many_requests: - content_type: APPLICATION_JSON - content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' - returned: Always, as long as the web acl exists -visibility_config: - description: Visibility config of the web acl - returned: Always, as long as the web acl exists - type: dict - sample: - cloud_watch_metrics_enabled: true - metric_name: blub - sampled_requests_enabled: false -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules -from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict - - -class WebACL: - def __init__(self, wafv2, name, scope, fail_json_aws): - self.wafv2 = wafv2 - self.name = name - self.scope = scope - self.fail_json_aws = fail_json_aws - self.existing_acl, self.id, self.locktoken = self.get_web_acl() - - def update( - self, - default_action, - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - custom_response_bodies, - ): - req_obj = { - "Name": self.name, - "Scope": self.scope, - "Id": self.id, - "DefaultAction": default_action, - "Rules": rules, - "VisibilityConfig": { - "SampledRequestsEnabled": sampled_requests, - "CloudWatchMetricsEnabled": cloudwatch_metrics, - "MetricName": metric_name, - }, - "LockToken": self.locktoken, - } - - if description: - req_obj["Description"] = description - - if custom_response_bodies: - req_obj["CustomResponseBodies"] = custom_response_bodies - - try: - response = self.wafv2.update_web_acl(**req_obj) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to update wafv2 web acl.") - - self.existing_acl, self.id, self.locktoken = self.get_web_acl() - return self.existing_acl - - def remove(self): - try: - response = self.wafv2.delete_web_acl(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.") - return response - - def get(self): - if self.existing_acl: - return self.existing_acl - return None - - def get_web_acl(self): - id = None - locktoken = None - arn = None - existing_acl = None - response = self.list() - - for item in response.get("WebACLs"): - if item.get("Name") == self.name: - id = item.get("Id") - locktoken = item.get("LockToken") - arn = item.get("ARN") - - if id: - try: - existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=id) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") - tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) - existing_acl["tags"] = tags - return existing_acl, id, locktoken - - def list(self): - return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) - - def create( - self, - default_action, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - tags, - description, - custom_response_bodies, - ): - req_obj = { - "Name": self.name, - "Scope": self.scope, - "DefaultAction": default_action, - "Rules": rules, - "VisibilityConfig": { - "SampledRequestsEnabled": sampled_requests, - "CloudWatchMetricsEnabled": cloudwatch_metrics, - "MetricName": metric_name, - }, - } - - if custom_response_bodies: - req_obj["CustomResponseBodies"] = custom_response_bodies - if description: - req_obj["Description"] = description - if tags: - req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) - - try: - response = self.wafv2.create_web_acl(**req_obj) - except (BotoCoreError, ClientError) as e: - self.fail_json_aws(e, msg="Failed to create wafv2 web acl.") - - self.existing_acl, self.id, self.locktoken = self.get_web_acl() - return self.existing_acl - - -def format_result(result): - # We were returning details of the Web ACL inside a "web_acl" parameter on - # creation, keep returning it to avoid breaking existing playbooks, but also - # return what the docs said we return (and returned when no change happened) - retval = dict(result) - if "WebACL" in retval: - retval.update(retval["WebACL"]) - - return camel_dict_to_snake_dict(retval, ignore_list=["tags"]) - - -def main(): - arg_spec = dict( - state=dict(type="str", required=True, choices=["present", "absent"]), - name=dict(type="str", required=True), - scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), - description=dict(type="str"), - default_action=dict(type="str", choices=["Block", "Allow"]), - rules=dict(type="list", elements="dict"), - sampled_requests=dict(type="bool", default=False), - cloudwatch_metrics=dict(type="bool", default=True), - metric_name=dict(type="str"), - tags=dict(type="dict", aliases=["resource_tags"]), - purge_tags=dict(default=True, type="bool"), - custom_response_bodies=dict(type="dict"), - purge_rules=dict(default=True, type="bool"), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - required_if=[["state", "present", ["default_action", "rules"]]], - ) - - state = module.params.get("state") - name = module.params.get("name") - scope = module.params.get("scope") - description = module.params.get("description") - default_action = module.params.get("default_action") - rules = module.params.get("rules") - sampled_requests = module.params.get("sampled_requests") - cloudwatch_metrics = module.params.get("cloudwatch_metrics") - metric_name = module.params.get("metric_name") - tags = module.params.get("tags") - purge_tags = module.params.get("purge_tags") - purge_rules = module.params.get("purge_rules") - check_mode = module.check_mode - - custom_response_bodies = module.params.get("custom_response_bodies") - if custom_response_bodies: - module.require_botocore_at_least("1.20.40", reason="to set custom response bodies") - custom_response_bodies = {} - - for custom_name, body in module.params.get("custom_response_bodies").items(): - custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True) - - if default_action == "Block": - default_action = {"Block": {}} - elif default_action == "Allow": - default_action = {"Allow": {}} - - if rules: - rules = [] - for rule in module.params.get("rules"): - rules.append(wafv2_snake_dict_to_camel_dict(snake_dict_to_camel_dict(rule, capitalize_first=True))) - - if not metric_name: - metric_name = name - - wafv2 = module.client("wafv2") - web_acl = WebACL(wafv2, name, scope, module.fail_json_aws) - change = False - retval = {} - - if state == "present": - if web_acl.get(): - tags_changed = ensure_wafv2_tags( - wafv2, web_acl.get().get("WebACL").get("ARN"), tags, purge_tags, module.fail_json_aws, module.check_mode - ) - change, rules = compare_priority_rules(web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state) - change = change or (description and web_acl.get().get("WebACL").get("Description") != description) - change = change or (default_action and web_acl.get().get("WebACL").get("DefaultAction") != default_action) - - if change and not check_mode: - retval = web_acl.update( - default_action, - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - custom_response_bodies, - ) - elif tags_changed: - retval, id, locktoken = web_acl.get_web_acl() - else: - retval = web_acl.get() - - change |= tags_changed - - else: - change = True - if not check_mode: - retval = web_acl.create( - default_action, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - tags, - description, - custom_response_bodies, - ) - - elif state == "absent": - if web_acl.get(): - if rules: - if len(rules) > 0: - change, rules = compare_priority_rules( - web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state - ) - if change and not check_mode: - retval = web_acl.update( - default_action, - description, - rules, - sampled_requests, - cloudwatch_metrics, - metric_name, - custom_response_bodies, - ) - else: - change = True - if not check_mode: - retval = web_acl.remove() - - module.exit_json(changed=change, **format_result(retval)) - - -if __name__ == "__main__": - main() diff --git a/wafv2_web_acl_info.py b/wafv2_web_acl_info.py deleted file mode 100644 index e3cdc46e330..00000000000 --- a/wafv2_web_acl_info.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r""" ---- -module: wafv2_web_acl_info -version_added: 1.5.0 -author: - - "Markus Bergholz (@markuman)" -short_description: wafv2_web_acl -description: - - Info about web acl -options: - name: - description: - - The name of the web acl. - required: true - type: str - scope: - description: - - Scope of wafv2 web acl. - required: true - choices: ["CLOUDFRONT", "REGIONAL"] - type: str - -extends_documentation_fragment: - - amazon.aws.common.modules - - amazon.aws.region.modules - - amazon.aws.boto3 -""" - -EXAMPLES = r""" -- name: get web acl - community.aws.wafv2_web_acl_info: - name: test05 - scope: REGIONAL - register: out -""" - -RETURN = r""" -arn: - description: web acl arn - sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61 - type: str - returned: Always, as long as the web acl exists -description: - description: Description of the web acl - sample: Some web acl description - returned: Always, as long as the web acl exists - type: str -capacity: - description: Current capacity of the web acl - sample: 140 - returned: Always, as long as the web acl exists - type: int -name: - description: Web acl name - sample: test02 - returned: Always, as long as the web acl exists - type: str -rules: - description: Current rules of the web acl - returned: Always, as long as the web acl exists - type: list - sample: - - name: admin_protect - override_action: - none: {} - priority: 1 - statement: - managed_rule_group_statement: - name: AWSManagedRulesAdminProtectionRuleSet - vendor_name: AWS - visibility_config: - cloud_watch_metrics_enabled: true - metric_name: admin_protect - sampled_requests_enabled: true -visibility_config: - description: Visibility config of the web acl - returned: Always, as long as the web acl exists - type: dict - sample: - cloud_watch_metrics_enabled: true - metric_name: blub - sampled_requests_enabled: false -""" - -try: - from botocore.exceptions import BotoCoreError - from botocore.exceptions import ClientError -except ImportError: - pass # caught by AnsibleAWSModule - -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict - -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule -from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags -from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls - - -def get_web_acl(wafv2, name, scope, id, fail_json_aws): - try: - response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) - except (BotoCoreError, ClientError) as e: - fail_json_aws(e, msg="Failed to get wafv2 web acl.") - return response - - -def main(): - arg_spec = dict( - name=dict(type="str", required=True), - scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]), - ) - - module = AnsibleAWSModule( - argument_spec=arg_spec, - supports_check_mode=True, - ) - - state = module.params.get("state") - name = module.params.get("name") - scope = module.params.get("scope") - - wafv2 = module.client("wafv2") - # check if web acl exists - response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) - - id = None - arn = None - retval = {} - - for item in response.get("WebACLs"): - if item.get("Name") == name: - id = item.get("Id") - arn = item.get("ARN") - - if id: - existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) - retval = camel_dict_to_snake_dict(existing_acl.get("WebACL")) - tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) - retval["tags"] = tags - - module.exit_json(**retval) - - -if __name__ == "__main__": - main() From 1b1acfc2a93b639ae468190753313a731c06344f Mon Sep 17 00:00:00 2001 From: abikouo Date: Mon, 18 Sep 2023 18:48:13 +0200 Subject: [PATCH 680/683] apply deprecation on iam_role and iam_role_info, add some unit tests and minor code refactoring --- ...915_migrate_iam_role_and_iam_role_info.yml | 7 + meta/runtime.yml | 2 + plugins/modules/iam_role.py | 160 +++-- plugins/modules/iam_role_info.py | 55 +- tests/integration/targets/iam_role/aliases | 9 + .../targets/iam_role/defaults/main.yml | 6 + .../targets/iam_role/files/deny-all-a.json | 13 + .../targets/iam_role/files/deny-all-b.json | 13 + .../targets/iam_role/files/deny-all.json | 12 + .../targets/iam_role/files/deny-assume.json | 10 + .../targets/iam_role/meta/main.yml | 1 + .../iam_role/tasks/boundary_policy.yml | 94 +++ .../iam_role/tasks/complex_role_creation.yml | 131 ++++ .../iam_role/tasks/creation_deletion.yml | 411 ++++++++++++ .../iam_role/tasks/description_update.yml | 148 +++++ .../iam_role/tasks/inline_policy_update.yml | 48 ++ .../targets/iam_role/tasks/main.yml | 119 ++++ .../iam_role/tasks/max_session_update.yml | 71 +++ .../iam_role/tasks/parameter_checks.yml | 90 +++ .../targets/iam_role/tasks/policy_update.yml | 250 ++++++++ .../targets/iam_role/tasks/role_removal.yml | 65 ++ .../targets/iam_role/tasks/tags_update.yml | 341 ++++++++++ tests/unit/plugins/modules/test_iam_role.py | 598 ++++++++++++++++++ .../plugins/modules/test_iam_role_info.py | 245 +++++++ 24 files changed, 2787 insertions(+), 112 deletions(-) create mode 100644 changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml create mode 100644 tests/integration/targets/iam_role/aliases create mode 100644 tests/integration/targets/iam_role/defaults/main.yml create mode 100644 tests/integration/targets/iam_role/files/deny-all-a.json create mode 100644 tests/integration/targets/iam_role/files/deny-all-b.json create mode 100644 tests/integration/targets/iam_role/files/deny-all.json create mode 100644 tests/integration/targets/iam_role/files/deny-assume.json create mode 100644 tests/integration/targets/iam_role/meta/main.yml create mode 100644 tests/integration/targets/iam_role/tasks/boundary_policy.yml create mode 100644 tests/integration/targets/iam_role/tasks/complex_role_creation.yml create mode 100644 tests/integration/targets/iam_role/tasks/creation_deletion.yml create mode 100644 tests/integration/targets/iam_role/tasks/description_update.yml create mode 100644 tests/integration/targets/iam_role/tasks/inline_policy_update.yml create mode 100644 tests/integration/targets/iam_role/tasks/main.yml create mode 100644 tests/integration/targets/iam_role/tasks/max_session_update.yml create mode 100644 tests/integration/targets/iam_role/tasks/parameter_checks.yml create mode 100644 tests/integration/targets/iam_role/tasks/policy_update.yml create mode 100644 tests/integration/targets/iam_role/tasks/role_removal.yml create mode 100644 tests/integration/targets/iam_role/tasks/tags_update.yml create mode 100644 tests/unit/plugins/modules/test_iam_role.py create mode 100644 tests/unit/plugins/modules/test_iam_role_info.py diff --git a/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml b/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml new file mode 100644 index 00000000000..b297ffcbfb0 --- /dev/null +++ b/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml @@ -0,0 +1,7 @@ +breaking_changes: +- iam_role - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should be updated + to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/amazon.aws/pull/1757). +- iam_role_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should be updated + to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/amazon.aws/pull/1757). diff --git a/meta/runtime.yml b/meta/runtime.yml index c627df5be2b..c88a894d544 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -72,6 +72,8 @@ action_groups: - iam_instance_profile_info - iam_policy - iam_policy_info + - iam_role + - iam_role_info - iam_user - iam_user_info - kms_key diff --git a/plugins/modules/iam_role.py b/plugins/modules/iam_role.py index b39281e17b9..d8aa8299936 100644 --- a/plugins/modules/iam_role.py +++ b/plugins/modules/iam_role.py @@ -45,7 +45,7 @@ description: - A list of managed policy ARNs, managed policy ARNs or friendly names. - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]). - - To embed an inline policy, use M(community.aws.iam_policy). + - To embed an inline policy, use M(amazon.aws.iam_policy). aliases: ['managed_policy'] type: list elements: str @@ -100,7 +100,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create a role with description and tags - community.aws.iam_role: + amazon.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file','policy.json') }}" description: This is My New Role @@ -108,20 +108,20 @@ env: dev - name: "Create a role and attach a managed policy called 'PowerUserAccess'" - community.aws.iam_role: + amazon.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file','policy.json') }}" managed_policies: - arn:aws:iam::aws:policy/PowerUserAccess - name: Keep the role created above but remove all managed policies - community.aws.iam_role: + amazon.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file','policy.json') }}" managed_policies: [] - name: Delete the role - community.aws.iam_role: + amazon.aws.iam_role: name: mynewrole assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" state: absent @@ -161,9 +161,6 @@ assume_role_policy_document: description: - the policy that grants an entity permission to assume the role - - | - note: the case of keys in this dictionary are currently converted from CamelCase to - snake_case. In a release after 2023-12-01 this behaviour will change type: dict returned: always sample: { @@ -232,7 +229,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule @AWSRetry.jittered_backoff() @@ -241,6 +238,17 @@ def _list_policies(client): return paginator.paginate().build_full_result()["Policies"] +def _wait_iam_role(client, role_name, wait_timeout): + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + waiter = client.get_waiter("role_exists") + waiter.wait( + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, + RoleName=role_name, + ) + + def wait_iam_exists(module, client): if module.check_mode: return @@ -250,15 +258,8 @@ def wait_iam_exists(module, client): role_name = module.params.get("name") wait_timeout = module.params.get("wait_timeout") - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - try: - waiter = client.get_waiter("role_exists") - waiter.wait( - WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, - RoleName=role_name, - ) + _wait_iam_role(client, role_name, wait_timeout) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="Timeout while waiting on IAM role creation") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -529,30 +530,41 @@ def create_or_update_role(module, client): role["AttachedPolicies"] = get_attached_policy_list(module, client, role_name) role["tags"] = get_role_tags(module, client) - camel_role = camel_dict_to_snake_dict(role, ignore_list=["tags"]) - camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {}) - module.exit_json(changed=changed, iam_role=camel_role, **camel_role) + camel_role = camel_dict_to_snake_dict(role, ignore_list=["tags", "AssumeRolePolicyDocument"]) + camel_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument", {}) + camel_role["assume_role_policy_document_raw"] = camel_role["assume_role_policy_document"] + module.exit_json(changed=changed, iam_role=camel_role) -def create_instance_profiles(module, client, role_name, path): - # Fetch existing Profiles +def list_instance_profiles_for_role(module, client, name): try: - instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)[ - "InstanceProfiles" - ] + return client.list_instance_profiles_for_role(RoleName=name, aws_retry=True)["InstanceProfiles"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {role_name}") + module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {name}") - # Profile already exists - if any(p["InstanceProfileName"] == role_name for p in instance_profiles): - return False - if module.check_mode: - return True +def delete_instance_profile(module, client, name): + try: + client.delete_instance_profile(InstanceProfileName=name, aws_retry=True) + except is_boto3_error_code("NoSuchEntityException"): + pass + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to remove instance profile {name}") - # Make sure an instance profile is created + +def remove_role_from_instance_profile(module, client, role_name, profile_name): + try: + client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"Unable to remove role {role_name} from instance profile {profile_name}") + + +def create_instance_profile(module, client, name, path): try: - client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True) + client.create_instance_profile(InstanceProfileName=name, Path=path, aws_retry=True) except is_boto3_error_code("EntityAlreadyExists"): # If the profile already exists, no problem, move on. # Implies someone's changing things at the same time... @@ -561,48 +573,48 @@ def create_instance_profiles(module, client, role_name, path): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Unable to create instance profile for role {role_name}") + module.fail_json_aws(e, msg=f"Unable to create instance profile for role {name}") - # And attach the role to the profile + +def add_role_to_instance_profile(module, client, name): try: - client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True) + client.add_role_to_instance_profile(InstanceProfileName=name, RoleName=name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to attach role {role_name} to instance profile {role_name}") + module.fail_json_aws(e, msg=f"Unable to attach role {name} to instance profile {name}") + + +def create_instance_profiles(module, client, role_name, path): + # Fetch existing Profiles + instance_profiles = list_instance_profiles_for_role(module, client, role_name) + + # Profile already exists + if any(p["InstanceProfileName"] == role_name for p in instance_profiles): + return False + + if module.check_mode: + return True + + # Make sure an instance profile is created + create_instance_profile(module, client, role_name, path) + + # And attach the role to the profile + add_role_to_instance_profile(module, client, role_name) return True def remove_instance_profiles(module, client, role_name): - delete_profiles = module.params.get("delete_instance_profile") - - try: - instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)[ - "InstanceProfiles" - ] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {role_name}") + if not module.check_mode: + delete_profiles = module.params.get("delete_instance_profile") + instance_profiles = list_instance_profiles_for_role(module, client, role_name) - # Remove the role from the instance profile(s) - for profile in instance_profiles: - profile_name = profile["InstanceProfileName"] - try: - if not module.check_mode: - client.remove_role_from_instance_profile( - aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name - ) - if profile_name == role_name: - if delete_profiles: - try: - client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True) - except is_boto3_error_code("NoSuchEntityException"): - pass - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Unable to remove instance profile {profile_name}") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg=f"Unable to remove role {role_name} from instance profile {profile_name}") + # Remove the role from the instance profile(s) + for profile in instance_profiles: + profile_name = profile["InstanceProfileName"] + remove_role_from_instance_profile(module, client, role_name, profile_name) + if profile_name == role_name: + if delete_profiles: + delete_instance_profile(module, client, profile_name) def destroy_role(module, client): @@ -728,22 +740,6 @@ def main(): supports_check_mode=True, ) - module.deprecate( - "All return values other than iam_role and changed have been deprecated and " - "will be removed in a release after 2023-12-01.", - date="2023-12-01", - collection_name="community.aws", - ) - - module.deprecate( - "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - "iam_role.assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", - collection_name="community.aws", - ) - if module.params.get("boundary"): if module.params.get("create_instance_profile"): module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index d23754d90a0..caab88625f1 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -34,15 +34,15 @@ EXAMPLES = r""" - name: find all existing IAM roles - community.aws.iam_role_info: + amazon.aws.iam_role_info: register: result - name: describe a single role - community.aws.iam_role_info: + amazon.aws.iam_role_info: name: MyIAMRole - name: describe all roles matching a path prefix - community.aws.iam_role_info: + amazon.aws.iam_role_info: path_prefix: /application/path """ @@ -59,10 +59,7 @@ sample: arn:aws:iam::123456789012:role/AnsibleTestRole assume_role_policy_document: description: - - The policy that grants an entity permission to assume the role - - | - Note: the case of keys in this dictionary are currently converted from CamelCase to - snake_case. In a release after 2023-12-01 this behaviour will change. + - The policy that grants an entity permission to assume the role. returned: always type: dict assume_role_policy_document_raw: @@ -163,7 +160,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule @AWSRetry.jittered_backoff() @@ -190,6 +187,18 @@ def list_iam_instance_profiles_for_role_with_backoff(client, role_name): return paginator.paginate(RoleName=role_name).build_full_result()["InstanceProfiles"] +def get_role(module, client, name): + try: + return [client.get_role(RoleName=name, aws_retry=True)["Role"]] + except is_boto3_error_code("NoSuchEntity"): + return [] + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get IAM role {name}") + + def describe_iam_role(module, client, role): name = role["RoleName"] try: @@ -216,15 +225,7 @@ def describe_iam_roles(module, client): name = module.params["name"] path_prefix = module.params["path_prefix"] if name: - try: - roles = [client.get_role(RoleName=name, aws_retry=True)["Role"]] - except is_boto3_error_code("NoSuchEntity"): - return [] - except ( - botocore.exceptions.ClientError, - botocore.exceptions.BotoCoreError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg=f"Couldn't get IAM role {name}") + roles = get_role(module, client, name) else: params = dict() if path_prefix: @@ -243,15 +244,18 @@ def describe_iam_roles(module, client): def normalize_profile(profile): new_profile = camel_dict_to_snake_dict(profile) if profile.get("Roles"): - profile["roles"] = [normalize_role(role) for role in profile.get("Roles")] + new_profile["roles"] = [normalize_role(role) for role in profile.get("Roles")] + del new_profile["Roles"] return new_profile def normalize_role(role): - new_role = camel_dict_to_snake_dict(role, ignore_list=["tags"]) - new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") + new_role = camel_dict_to_snake_dict(role, ignore_list=["tags", "AssumeRolePolicyDocument"]) + new_role["assume_role_policy_document"] = role.pop("AssumeRolePolicyDocument", {}) + new_role["assume_role_policy_document_raw"] = new_role["assume_role_policy_document"] if role.get("InstanceProfiles"): - role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] + new_role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] + del new_role["InstanceProfiles"] return new_role @@ -272,15 +276,6 @@ def main(): client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) - module.deprecate( - "In a release after 2023-12-01 the contents of assume_role_policy_document " - "will no longer be converted from CamelCase to snake_case. The " - ".assume_role_policy_document_raw return value already returns the " - "policy document in this future format.", - date="2023-12-01", - collection_name="community.aws", - ) - module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client)) diff --git a/tests/integration/targets/iam_role/aliases b/tests/integration/targets/iam_role/aliases new file mode 100644 index 00000000000..483c861158c --- /dev/null +++ b/tests/integration/targets/iam_role/aliases @@ -0,0 +1,9 @@ +# reason: missing-policy +# It should be possible to test iam_role by limiting which policies can be +# attached to the roles. +# Careful review is needed prior to adding this to the main CI. +unsupported + +cloud/aws + +iam_role_info diff --git a/tests/integration/targets/iam_role/defaults/main.yml b/tests/integration/targets/iam_role/defaults/main.yml new file mode 100644 index 00000000000..d496c421636 --- /dev/null +++ b/tests/integration/targets/iam_role/defaults/main.yml @@ -0,0 +1,6 @@ +--- +test_role: '{{ resource_prefix }}-role' +test_path: '/{{ resource_prefix }}/' +safe_managed_policy: 'AWSDenyAll' +custom_policy_name: '{{ resource_prefix }}-denyall' +boundary_policy: 'arn:aws:iam::aws:policy/AWSDenyAll' diff --git a/tests/integration/targets/iam_role/files/deny-all-a.json b/tests/integration/targets/iam_role/files/deny-all-a.json new file mode 100644 index 00000000000..ae62fd1975d --- /dev/null +++ b/tests/integration/targets/iam_role/files/deny-all-a.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*", + "Sid": "DenyA" + } + ] +} diff --git a/tests/integration/targets/iam_role/files/deny-all-b.json b/tests/integration/targets/iam_role/files/deny-all-b.json new file mode 100644 index 00000000000..3a4704a46ab --- /dev/null +++ b/tests/integration/targets/iam_role/files/deny-all-b.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*", + "Sid": "DenyB" + } + ] +} diff --git a/tests/integration/targets/iam_role/files/deny-all.json b/tests/integration/targets/iam_role/files/deny-all.json new file mode 100644 index 00000000000..3d324b9b9c6 --- /dev/null +++ b/tests/integration/targets/iam_role/files/deny-all.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*" + } + ] +} diff --git a/tests/integration/targets/iam_role/files/deny-assume.json b/tests/integration/targets/iam_role/files/deny-assume.json new file mode 100644 index 00000000000..73e87715862 --- /dev/null +++ b/tests/integration/targets/iam_role/files/deny-assume.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { "Service": "ec2.amazonaws.com" }, + "Effect": "Deny" + } + ] +} diff --git a/tests/integration/targets/iam_role/meta/main.yml b/tests/integration/targets/iam_role/meta/main.yml new file mode 100644 index 00000000000..32cf5dda7ed --- /dev/null +++ b/tests/integration/targets/iam_role/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/iam_role/tasks/boundary_policy.yml b/tests/integration/targets/iam_role/tasks/boundary_policy.yml new file mode 100644 index 00000000000..89a983f1564 --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/boundary_policy.yml @@ -0,0 +1,94 @@ +--- +- name: "Create minimal role with no boundary policy" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: "Configure Boundary Policy (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + boundary: "{{ boundary_policy }}" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Configure Boundary Policy" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + boundary: "{{ boundary_policy }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: "Configure Boundary Policy (no change) - check mode" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + boundary: "{{ boundary_policy }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Configure Boundary Policy (no change)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + boundary: "{{ boundary_policy }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after adding boundary policy" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 0 + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '/' + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + +- name: "Remove IAM Role" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is changed \ No newline at end of file diff --git a/tests/integration/targets/iam_role/tasks/complex_role_creation.yml b/tests/integration/targets/iam_role/tasks/complex_role_creation.yml new file mode 100644 index 00000000000..c23234ebf1f --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/complex_role_creation.yml @@ -0,0 +1,131 @@ +--- +- name: "Complex IAM Role (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: no + description: "Ansible Test Role {{ resource_prefix }}" + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: "ValueA" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "iam_role_info after Complex Role creation in check_mode" + iam_role_info: + name: "{{ test_role }}" + register: role_info +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: "Complex IAM Role" + iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: no + description: "Ansible Test Role {{ resource_prefix }}" + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: "ValueA" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - 'iam_role.iam_role.arn.startswith("arn")' + - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )' + # Would be nice to test the contents... + - '"assume_role_policy_document" in iam_role.iam_role' + - iam_role.iam_role.attached_policies | length == 2 + - iam_role.iam_role.max_session_duration == 43200 + - iam_role.iam_role.path == test_path + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: "Complex IAM role (no change) - check mode" + iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: no + description: "Ansible Test Role {{ resource_prefix }}" + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: "ValueA" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Complex IAM role (no change)" + iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: no + description: "Ansible Test Role {{ resource_prefix }}" + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: "ValueA" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after Role creation" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 0 + - role_info.iam_roles[0].managed_policies | length == 2 + - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == test_path + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "ValueA" diff --git a/tests/integration/targets/iam_role/tasks/creation_deletion.yml b/tests/integration/targets/iam_role/tasks/creation_deletion.yml new file mode 100644 index 00000000000..78d60cb7a0d --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/creation_deletion.yml @@ -0,0 +1,411 @@ +--- +- name: Try running some rapid fire create/delete tests + block: + - name: "Minimal IAM Role without instance profile (rapid)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + + - name: "Minimal IAM Role without instance profile (rapid)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role_again + + - assert: + that: + - iam_role is changed + - iam_role_again is not changed + + - name: "Remove IAM Role (rapid)" + iam_role: + state: absent + name: "{{ test_role }}" + register: iam_role + + - name: "Remove IAM Role (rapid)" + iam_role: + state: absent + name: "{{ test_role }}" + register: iam_role_again + + - assert: + that: + - iam_role is changed + - iam_role_again is not changed + + - name: "Minimal IAM Role without instance profile (rapid)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + + - name: "Remove IAM Role (rapid)" + iam_role: + state: absent + name: "{{ test_role }}" + + register: iam_role_again + - assert: + that: + - iam_role is changed + - iam_role_again is changed + +# =================================================================== +# Role Creation +# (without Instance profile) +- name: "iam_role_info before Role creation (no args)" + iam_role_info: + register: role_info + +- assert: + that: + - role_info is succeeded + +- name: "iam_role_info before Role creation (search for test role)" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: "Minimal IAM Role (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is changed + +- name: "iam_role_info after Role creation in check_mode" + iam_role_info: + name: "{{ test_role }}" + register: role_info +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: "Minimal IAM Role without instance profile" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - 'iam_role.iam_role.arn.startswith("arn")' + - 'iam_role.iam_role.arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in iam_role.iam_role' + - '"assume_role_policy_document_raw" in iam_role.iam_role' + - iam_role.iam_role.assume_role_policy_document == assume_deny_policy + - iam_role.iam_role.assume_role_policy_document_raw == assume_deny_policy + - iam_role.iam_role.attached_policies | length == 0 + - iam_role.iam_role.max_session_duration == 3600 + - iam_role.iam_role.path == '/' + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: "Minimal IAM Role without instance profile (no change) - check mode" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Minimal IAM Role without instance profile (no change)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: no + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after Role creation" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"assume_role_policy_document_raw" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].assume_role_policy_document == assume_deny_policy + - role_info.iam_roles[0].assume_role_policy_document_raw == assume_deny_policy + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 0 + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +- name: "Remove IAM Role" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "iam_role_info after Role deletion" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +# ------------------------------------------------------------------------------------------ + +# (with path) +- name: "Minimal IAM Role with path (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is changed + +- name: "Minimal IAM Role with path" + iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - 'iam_role.iam_role.arn.startswith("arn")' + - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )' + # Would be nice to test the contents... + - '"assume_role_policy_document" in iam_role.iam_role' + - '"assume_role_policy_document_raw" in iam_role.iam_role' + - iam_role.iam_role.attached_policies | length == 0 + - iam_role.iam_role.max_session_duration == 3600 + - iam_role.iam_role.path == '{{ test_path }}' + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: "Minimal IAM Role with path (no change) - check mode" + iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Minimal IAM Role with path (no change)" + iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after Role creation" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"assume_role_policy_document_raw" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '{{ test_path }}' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +- name: "iam_role_info after Role creation (searching a path)" + iam_role_info: + path_prefix: "{{ test_path }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"assume_role_policy_document_raw" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].path == '{{ test_path }}' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +- name: "Remove IAM Role" + iam_role: + state: absent + name: "{{ test_role }}" + path: "{{ test_path }}" + # If we don't delete the existing profile it'll be reused (with the path) + # by the test below. + delete_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "iam_role_info after Role deletion" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +# ------------------------------------------------------------------------------------------ + +# (with Instance profile) +- name: "Minimal IAM Role with instance profile - check mode" + iam_role: + name: "{{ test_role }}" + create_instance_profile: yes + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is changed + +- name: "Minimal IAM Role with instance profile" + iam_role: + name: "{{ test_role }}" + create_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - 'iam_role.iam_role.arn.startswith("arn")' + - 'iam_role.iam_role.arn.endswith("role/" + test_role )' + # Would be nice to test the contents... + - '"assume_role_policy_document" in iam_role.iam_role' + - '"assume_role_policy_document_raw" in iam_role.iam_role' + - iam_role.iam_role.attached_policies | length == 0 + - iam_role.iam_role.max_session_duration == 3600 + - iam_role.iam_role.path == '/' + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: "Minimal IAM Role wth instance profile (no change) - check mode" + iam_role: + name: "{{ test_role }}" + create_instance_profile: yes + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Minimal IAM Role wth instance profile (no change)" + iam_role: + name: "{{ test_role }}" + create_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after Role creation" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"assume_role_policy_document_raw" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 diff --git a/tests/integration/targets/iam_role/tasks/description_update.yml b/tests/integration/targets/iam_role/tasks/description_update.yml new file mode 100644 index 00000000000..85f5e1f56a3 --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/description_update.yml @@ -0,0 +1,148 @@ +--- +- name: "Add Description (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role {{ resource_prefix }}" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Add Description" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role {{ resource_prefix }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}' + +- name: "Add Description (no change) - check mode" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role {{ resource_prefix }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Add Description (no change)" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role {{ resource_prefix }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}' + +- name: "iam_role_info after adding Description" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +# ------------------------------------------------------------------------------------------ + +- name: "Update Description (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role (updated) {{ resource_prefix }}" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Update Description" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role (updated) {{ resource_prefix }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}' + +- name: "Update Description (no change) - check mode" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role (updated) {{ resource_prefix }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Update Description (no change)" + iam_role: + name: "{{ test_role }}" + description: "Ansible Test Role (updated) {{ resource_prefix }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}' + +- name: "iam_role_info after updating Description" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 diff --git a/tests/integration/targets/iam_role/tasks/inline_policy_update.yml b/tests/integration/targets/iam_role/tasks/inline_policy_update.yml new file mode 100644 index 00000000000..d364d87d79f --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/inline_policy_update.yml @@ -0,0 +1,48 @@ +--- +- name: "Attach inline policy a" + iam_policy: + state: present + iam_type: "role" + iam_name: "{{ test_role }}" + policy_name: "inline-policy-a" + policy_json: '{{ lookup("file", "deny-all-a.json") }}' + +- name: "Attach inline policy b" + iam_policy: + state: present + iam_type: "role" + iam_name: "{{ test_role }}" + policy_name: "inline-policy-b" + policy_json: '{{ lookup("file", "deny-all-b.json") }}' + +- name: "iam_role_info after attaching inline policies (using iam_policy)" + iam_role_info: + name: "{{ test_role }}" + register: role_info +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 2 + - '"inline-policy-a" in role_info.iam_roles[0].inline_policies' + - '"inline-policy-b" in role_info.iam_roles[0].inline_policies' + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 1 + - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/tests/integration/targets/iam_role/tasks/main.yml b/tests/integration/targets/iam_role/tasks/main.yml new file mode 100644 index 00000000000..821a683eb53 --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/main.yml @@ -0,0 +1,119 @@ +--- +# Tests for iam_role and iam_role_info +# +# Tests: +# - Minimal Role creation +# - Role deletion +# - Fetching a specific role +# - Creating roles w/ and w/o instance profiles +# - Creating roles w/ a path +# - Updating Max Session Duration +# - Updating Description +# - Managing list of managed policies +# - Managing list of inline policies (for testing _info) +# - Managing boundary policy +# +# Notes: +# - Only tests *documented* return values ( RESULT.iam_role ) +# - There are some known timing issues with boto3 returning before actions +# complete in the case of problems with "changed" status it's worth enabling +# the standard_pauses and paranoid_pauses options as a first step in debugging + + +- name: "Setup AWS connection info" + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + iam_role: + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + collections: + - amazon.aws + - community.general + block: + - set_fact: + assume_deny_policy: '{{ lookup("file", "deny-assume.json") | from_json }}' + # =================================================================== + # Parameter Checks + - include_tasks: parameter_checks.yml + + # =================================================================== + # Supplemental resource pre-creation + - name: "Create Safe IAM Managed Policy" + iam_managed_policy: + state: present + policy_name: "{{ custom_policy_name }}" + policy_description: "A safe (deny-all) managed policy" + policy: "{{ lookup('file', 'deny-all.json') }}" + register: create_managed_policy + + - assert: + that: + - create_managed_policy is succeeded + + # =================================================================== + # Rapid Role Creation and deletion + - include_tasks: creation_deletion.yml + + # =================================================================== + # Max Session Duration Manipulation + - include_tasks: max_session_update.yml + + # =================================================================== + # Description Manipulation + - include_tasks: description_update.yml + + # =================================================================== + # Tag Manipulation + - include_tasks: tags_update.yml + + # =================================================================== + # Policy Manipulation + - include_tasks: policy_update.yml + + # =================================================================== + # Inline Policy (test _info behavior) + - include_tasks: inline_policy_update.yml + + # =================================================================== + # Role Removal + - include_tasks: role_removal.yml + + # =================================================================== + # Boundary Policy (requires create_instance_profile: no) + - include_tasks: boundary_policy.yml + + # =================================================================== + # Complex role Creation + - include_tasks: complex_role_creation.yml + + always: + # =================================================================== + # Cleanup + + - name: "Remove IAM Role" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + ignore_errors: true + + - name: "Remove IAM Role (with path)" + iam_role: + state: absent + name: "{{ test_role }}" + path: "{{ test_path }}" + delete_instance_profile: yes + ignore_errors: true + + - name: "iam_role_info after Role deletion" + iam_role_info: + name: "{{ test_role }}" + ignore_errors: true + + - name: "Remove test managed policy" + iam_managed_policy: + state: absent + policy_name: "{{ custom_policy_name }}" diff --git a/tests/integration/targets/iam_role/tasks/max_session_update.yml b/tests/integration/targets/iam_role/tasks/max_session_update.yml new file mode 100644 index 00000000000..8ad3641be62 --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/max_session_update.yml @@ -0,0 +1,71 @@ +--- +- name: "Update Max Session Duration (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Update Max Session Duration" + iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.max_session_duration == 43200 + +- name: "Update Max Session Duration (no change)" + iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + register: iam_role + +- assert: + that: + - iam_role is not changed + +- name: "Update Max Session Duration (no change) - check mode" + iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "iam_role_info after updating Max Session Duration" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 diff --git a/tests/integration/targets/iam_role/tasks/parameter_checks.yml b/tests/integration/targets/iam_role/tasks/parameter_checks.yml new file mode 100644 index 00000000000..57df5436afc --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/parameter_checks.yml @@ -0,0 +1,90 @@ +--- +# Parameter Checks +- name: "Friendly message when creating an instance profile and adding a boundary profile" + iam_role: + name: "{{ test_role }}" + boundary: "{{ boundary_policy }}" + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - '"boundary policy" in iam_role.msg' + - '"create_instance_profile" in iam_role.msg' + - '"false" in iam_role.msg' + +- name: "Friendly message when boundary profile is not an ARN" + iam_role: + name: "{{ test_role }}" + boundary: "AWSDenyAll" + create_instance_profile: no + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - '"Boundary policy" in iam_role.msg' + - '"ARN" in iam_role.msg' + +- name: 'Friendly message when "present" without assume_role_policy_document' + module_defaults: { iam_role: {} } + iam_role: + name: "{{ test_role }}" + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - 'iam_role.msg.startswith("state is present but all of the following are missing")' + - '"assume_role_policy_document" in iam_role.msg' + +- name: "Maximum Session Duration needs to be between 1 and 12 hours" + iam_role: + name: "{{ test_role }}" + max_session_duration: 3599 + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - '"max_session_duration must be between" in iam_role.msg' + +- name: "Maximum Session Duration needs to be between 1 and 12 hours" + iam_role: + name: "{{ test_role }}" + max_session_duration: 43201 + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - '"max_session_duration must be between" in iam_role.msg' + +- name: "Role Paths must start with /" + iam_role: + name: "{{ test_role }}" + path: "test/" + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - '"path must begin and end with /" in iam_role.msg' + +- name: "Role Paths must end with /" + iam_role: + name: "{{ test_role }}" + path: "/test" + register: iam_role + ignore_errors: yes + +- assert: + that: + - iam_role is failed + - '"path must begin and end with /" in iam_role.msg' diff --git a/tests/integration/targets/iam_role/tasks/policy_update.yml b/tests/integration/targets/iam_role/tasks/policy_update.yml new file mode 100644 index 00000000000..a822edf74b6 --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/policy_update.yml @@ -0,0 +1,250 @@ +--- +- name: "Add Managed Policy (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ safe_managed_policy }}" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Add Managed Policy" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: "Add Managed Policy (no change) - check mode" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Add Managed Policy (no change)" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after adding Managed Policy" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 1 + - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" + +# ------------------------------------------------------------------------------------------ + +- name: "Update Managed Policy without purge (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ custom_policy_name }}" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Update Managed Policy without purge" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: "Update Managed Policy without purge (no change) - check mode" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Update Managed Policy without purge (no change)" + iam_role: + name: "{{ test_role }}" + purge_policies: no + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after updating Managed Policy without purge" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 2 + - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" + +# ------------------------------------------------------------------------------------------ + +# Managed Policies are purged by default +- name: "Update Managed Policy with purge (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Update Managed Policy with purge" + iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: "Update Managed Policy with purge (no change) - check mode" + iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Update Managed Policy with purge (no change)" + iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: "iam_role_info after updating Managed Policy with purge" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 1 + - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/tests/integration/targets/iam_role/tasks/role_removal.yml b/tests/integration/targets/iam_role/tasks/role_removal.yml new file mode 100644 index 00000000000..ebcfd54530a --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/role_removal.yml @@ -0,0 +1,65 @@ +--- +- name: "Remove IAM Role (CHECK MODE)" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "iam_role_info after deleting role in check mode" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + +- name: "Remove IAM Role" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "iam_role_info after deleting role" + iam_role_info: + name: "{{ test_role }}" + register: role_info +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: "Remove IAM Role (should be gone already) - check mode" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Remove IAM Role (should be gone already)" + iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: yes + register: iam_role + +- assert: + that: + - iam_role is not changed diff --git a/tests/integration/targets/iam_role/tasks/tags_update.yml b/tests/integration/targets/iam_role/tasks/tags_update.yml new file mode 100644 index 00000000000..5eadd9fdf7e --- /dev/null +++ b/tests/integration/targets/iam_role/tasks/tags_update.yml @@ -0,0 +1,341 @@ +--- +- name: "Add Tag (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Add Tag" + iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.tags | length == 1 + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "ValueA" + +- name: "Add Tag (no change) - check mode" + iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Add Tag (no change)" + iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "ValueA" + +- name: "iam_role_info after adding Tags" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "ValueA" + +# ------------------------------------------------------------------------------------------ + +- name: "Update Tag (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Update Tag" + iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "AValue" + +- name: "Update Tag (no change) - check mode" + iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Update Tag (no change)" + iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "AValue" + +- name: "iam_role_info after updating Tag" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "AValue" + +# ------------------------------------------------------------------------------------------ + +- name: "Add second Tag without purge (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + purge_tags: no + tags: + TagB: ValueB + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Add second Tag without purge" + iam_role: + name: "{{ test_role }}" + purge_tags: no + tags: + TagB: ValueB + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: "Add second Tag without purge (no change) - check mode" + iam_role: + name: "{{ test_role }}" + purge_tags: no + tags: + TagB: ValueB + register: iam_role + check_mode: yes + +- assert: + that: + - iam_role is not changed + +- name: "Add second Tag without purge (no change)" + iam_role: + name: "{{ test_role }}" + purge_tags: no + tags: + TagB: ValueB + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: "iam_role_info after adding second Tag without purge" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 2 + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "AValue" + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" + +# ------------------------------------------------------------------------------------------ + +- name: "Purge first tag (CHECK MODE)" + iam_role: + name: "{{ test_role }}" + purge_tags: yes + tags: + TagB: ValueB + check_mode: yes + register: iam_role + +- assert: + that: + - iam_role is changed + +- name: "Purge first tag" + iam_role: + name: "{{ test_role }}" + purge_tags: yes + tags: + TagB: ValueB + register: iam_role + +- assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: "Purge first tag (no change) - check mode" + iam_role: + name: "{{ test_role }}" + purge_tags: yes + tags: + TagB: ValueB + register: iam_role + +- assert: + that: + - iam_role is not changed + +- name: "Purge first tag (no change)" + iam_role: + name: "{{ test_role }}" + purge_tags: yes + tags: + TagB: ValueB + register: iam_role + +- assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: "iam_role_info after purging first Tag" + iam_role_info: + name: "{{ test_role }}" + register: role_info + +- assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - 'role_info.iam_roles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )' + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")' + - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)' + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagA" not in role_info.iam_roles[0].tags' + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/tests/unit/plugins/modules/test_iam_role.py b/tests/unit/plugins/modules/test_iam_role.py new file mode 100644 index 00000000000..dac64ef1946 --- /dev/null +++ b/tests/unit/plugins/modules/test_iam_role.py @@ -0,0 +1,598 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest +from botocore.exceptions import BotoCoreError, WaiterError, ClientError + +from unittest.mock import MagicMock, call, patch, ANY +from ansible_collections.amazon.aws.plugins.modules import iam_role + + +mod__list_policies = "ansible_collections.amazon.aws.plugins.modules.iam_role._list_policies" +mod__wait_iam_role = "ansible_collections.amazon.aws.plugins.modules.iam_role._wait_iam_role" +mod_validate_aws_arn = "ansible_collections.amazon.aws.plugins.modules.iam_role.validate_aws_arn" +mod_get_inline_policy_list = "ansible_collections.amazon.aws.plugins.modules.iam_role.get_inline_policy_list" +mod_ansible_dict_to_boto3_tag_list = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role.ansible_dict_to_boto3_tag_list" +) +mod_generate_create_params = "ansible_collections.amazon.aws.plugins.modules.iam_role.generate_create_params" +mod_get_role_with_backoff = "ansible_collections.amazon.aws.plugins.modules.iam_role.get_role_with_backoff" +mod_remove_instance_profiles = "ansible_collections.amazon.aws.plugins.modules.iam_role.remove_instance_profiles" +mod_update_managed_policies = "ansible_collections.amazon.aws.plugins.modules.iam_role.update_managed_policies" +mod_remove_inline_policies = "ansible_collections.amazon.aws.plugins.modules.iam_role.remove_inline_policies" +mod_get_role = "ansible_collections.amazon.aws.plugins.modules.iam_role.get_role" +mod_list_instance_profiles_for_role = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role.list_instance_profiles_for_role" +) +mod_remove_role_from_instance_profile = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role.remove_role_from_instance_profile" +) +mod_delete_instance_profile = "ansible_collections.amazon.aws.plugins.modules.iam_role.delete_instance_profile" +mod_create_instance_profile = "ansible_collections.amazon.aws.plugins.modules.iam_role.create_instance_profile" +mod_add_role_to_instance_profile = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role.add_role_to_instance_profile" +) +mod_convert_friendly_names_to_arns = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role.convert_friendly_names_to_arns" +) + + +@patch(mod__wait_iam_role) +def test_wait_iam_exists_check_mode_or_parameter_not_set(m__wait_iam_role): + module = MagicMock() + client = MagicMock() + + module.check_mode = False + module.params = {"wait_timeout": 10} + + m__wait_iam_role.side_effect = SystemExit(1) + + # Test with module parameter not set + iam_role.wait_iam_exists(module, client) + m__wait_iam_role.assert_not_called() + + # Test with check_mode=true + module.check_mode = True + iam_role.wait_iam_exists(module, client) + m__wait_iam_role.assert_not_called() + + +@patch(mod__wait_iam_role) +def test_wait_iam_exists_waiter_error(m__wait_iam_role): + module = MagicMock() + client = MagicMock() + + role_name = "ansible-test-role" + module.fail_json_aws.side_effect = SystemExit(1) + module.check_mode = False + wait_timeout = 10 + module.params = {"name": role_name, "wait": True, "wait_timeout": wait_timeout} + waiter_err = WaiterError( + name="IAMCreationError", + reason="Waiter encountered an unexpected error", + last_response=None, + ) + m__wait_iam_role.side_effect = waiter_err + + with pytest.raises(SystemExit): + iam_role.wait_iam_exists(module, client) + m__wait_iam_role.assert_called_once_with(client, role_name, wait_timeout) + module.fail_json_aws.assert_called_once_with(waiter_err, msg="Timeout while waiting on IAM role creation") + + +@patch(mod__list_policies) +@patch(mod_validate_aws_arn) +def test_convert_friendly_names_to_arns_with_valid_iam_arn(m_validate_aws_arn, m__list_policies): + m_validate_aws_arn.side_effect = lambda *ag, **kw: True + m__list_policies.side_effect = SystemExit(1) + + module = MagicMock() + client = MagicMock() + policy_names = [None, "policy-1"] + + assert iam_role.convert_friendly_names_to_arns(module, client, policy_names) == policy_names + m_validate_aws_arn.assert_called_once_with("policy-1", service="iam") + m__list_policies.assert_not_called() + + +@pytest.mark.parametrize( + "policy_names", + [ + ["AWSEC2SpotServiceRolePolicy", "AllowXRayPutTraceSegments"], + ["AWSEC2SpotServiceRolePolicy", "AllowXRayPutTraceSegments", "ThisPolicyDoesNotExists"], + ], +) +@patch(mod__list_policies) +@patch(mod_validate_aws_arn) +def test_convert_friendly_names_to_arns(m_validate_aws_arn, m__list_policies, policy_names): + m_validate_aws_arn.side_effect = lambda *ag, **kw: False + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(1) + client = MagicMock() + + test_policies = [ + { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2SpotServiceRolePolicy", + "PolicyName": "AWSEC2SpotServiceRolePolicy", + }, + { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSServiceRoleForAmazonEKSNodegroup", + "PolicyName": "AWSServiceRoleForAmazonEKSNodegroup", + }, + { + "Arn": "arn:aws:iam::966509639900:policy/AllowXRayPutTraceSegments", + "PolicyName": "AllowXRayPutTraceSegments", + }, + ] + test_policies_names = [policy["PolicyName"] for policy in test_policies] + m__list_policies.return_value = test_policies + if any(policy not in test_policies_names for policy in policy_names if policy is not None): + with pytest.raises(SystemExit): + iam_role.convert_friendly_names_to_arns(module, client, policy_names) + module.fail_json_aws.assert_called_once_with(ANY, msg="Couldn't find policy") + else: + + def _get_policy_arn(policy): + for item in test_policies: + if item.get("PolicyName") == policy: + return item.get("Arn") + + expected = [_get_policy_arn(policy) for policy in policy_names if policy is not None] + assert iam_role.convert_friendly_names_to_arns(module, client, policy_names) == expected + m__list_policies.assert_called_once_with(client) + + +def test_attach_policies(): + module = MagicMock() + client = MagicMock() + + module.fail_json_aws.side_effect = SystemExit(1) + role_name = "ansible-test-role" + + # Test: check_mode=true and policies_to_attach = [] + module.check_mode = True + assert not iam_role.attach_policies(module, client, [], role_name) + client.attach_role_policy.assert_not_called() + + # Test: check_mode=true and policies_to_attach != [] + module.check_mode = True + assert iam_role.attach_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + client.attach_role_policy.assert_not_called() + + # Test: check_mode=false and policies_to_attach != [] + module.check_mode = False + assert iam_role.attach_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + client.attach_role_policy.assert_has_calls( + [ + call(RoleName=role_name, PolicyArn="policy-1", aws_retry=True), + call(RoleName=role_name, PolicyArn="policy-2", aws_retry=True), + call(RoleName=role_name, PolicyArn="policy-3", aws_retry=True), + ] + ) + + # Test: client.attach_role_policy raised botocore exception + error = BotoCoreError(error="AttachRolePolicy", operation="Failed to attach policy to IAM role") + client.attach_role_policy.side_effect = error + with pytest.raises(SystemExit): + iam_role.attach_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + module.fail_json_aws.assert_called_once_with(error, msg=f"Unable to attach policy policy-1 to role {role_name}") + + +def test_remove_policies(): + module = MagicMock() + client = MagicMock() + + module.fail_json_aws.side_effect = SystemExit(1) + role_name = "ansible-test-role" + + # Test: check_mode=true and policies_to_remove = [] + module.check_mode = True + assert not iam_role.remove_policies(module, client, [], role_name) + client.detach_role_policy.assert_not_called() + + # Test: check_mode=true and policies_to_remove != [] + module.check_mode = True + assert iam_role.remove_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + client.detach_role_policy.assert_not_called() + + # Test: check_mode=false and policies_to_attach != [] + module.check_mode = False + assert iam_role.remove_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + client.detach_role_policy.assert_has_calls( + [ + call(RoleName=role_name, PolicyArn="policy-1", aws_retry=True), + call(RoleName=role_name, PolicyArn="policy-2", aws_retry=True), + call(RoleName=role_name, PolicyArn="policy-3", aws_retry=True), + ] + ) + + # Test: client.attach_role_policy raised botocore exception + error = BotoCoreError(error="DetachRolePolicy", operation="Failed to detach policy to IAM role") + client.detach_role_policy.side_effect = error + with pytest.raises(SystemExit): + iam_role.remove_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + module.fail_json_aws.assert_called_once_with(error, msg=f"Unable to detach policy policy-1 from {role_name}") + + # Test: client.attach_role_policy raised botocore error 'NoSuchEntityException' + nosuch_entity_err = ClientError( + {"Error": {"Code": "NoSuchEntityException"}}, + "DetachRolePolicy", + ) + client.detach_role_policy.side_effect = ( + lambda *args, **kw: nosuch_entity_err if kw.get("PolicyArn") == "policy-2" else True + ) + assert iam_role.remove_policies(module, client, ["policy-1", "policy-2", "policy-3"], role_name) + client.detach_role_policy.assert_has_calls( + [ + call(RoleName=role_name, PolicyArn="policy-1", aws_retry=True), + call(RoleName=role_name, PolicyArn="policy-2", aws_retry=True), + call(RoleName=role_name, PolicyArn="policy-3", aws_retry=True), + ] + ) + + +@patch(mod_get_inline_policy_list) +def test_remove_inline_policies(m_get_inline_policy_list): + role_name = "ansible-test-role" + module = MagicMock() + client = MagicMock() + + m_get_inline_policy_list.return_value = ["policy-1", "policy-2", "policy-3"] + nosuch_entity_err = ClientError( + {"Error": {"Code": "NoSuchEntityException"}}, + "DetachRolePolicy", + ) + client.detach_role_policy.side_effect = ( + lambda *args, **kw: nosuch_entity_err if kw.get("PolicyArn") == "policy-2" else True + ) + iam_role.remove_inline_policies(module, client, role_name) + client.delete_role_policy.assert_has_calls( + [ + call(RoleName=role_name, PolicyName="policy-1", aws_retry=True), + call(RoleName=role_name, PolicyName="policy-2", aws_retry=True), + call(RoleName=role_name, PolicyName="policy-3", aws_retry=True), + ] + ) + + +@patch(mod_ansible_dict_to_boto3_tag_list) +def test_generate_create_params(m_ansible_dict_to_boto3_tag_list): + module = MagicMock() + path = MagicMock() + name = MagicMock() + policy_document = MagicMock() + description = MagicMock() + max_session_duration = MagicMock() + boundary = MagicMock() + tags = MagicMock() + module.params = { + "path": path, + "name": name, + "assume_role_policy_document": policy_document, + "description": description, + "max_session_duration": max_session_duration, + "boundary": boundary, + "tags": tags, + } + expected = { + "Path": path, + "RoleName": name, + "AssumeRolePolicyDocument": policy_document, + "Description": description, + "MaxSessionDuration": max_session_duration, + "PermissionsBoundary": boundary, + "Tags": tags, + } + + m_ansible_dict_to_boto3_tag_list.return_value = tags + assert iam_role.generate_create_params(module) == expected + m_ansible_dict_to_boto3_tag_list.assert_called_once_with(tags) + + +@patch(mod_get_role_with_backoff) +@patch(mod_generate_create_params) +def test_create_basic_role_check_mode(m_generate_create_params, m_get_role_with_backoff): + module = MagicMock() + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemExit(1) + client = MagicMock() + + module.check_mode = True + with pytest.raises(SystemExit): + iam_role.create_basic_role(module, client) + m_generate_create_params.assert_not_called() + m_get_role_with_backoff.assert_not_called() + + +@patch(mod_get_role_with_backoff) +@patch(mod_generate_create_params) +def test_create_basic_role_with_create_role_error(m_generate_create_params, m_get_role_with_backoff): + role_name = "ansible-test-role" + params = { + "RoleName": role_name, + "Tags": { + "Phase": "dev", + "ansible-test": "units", + }, + } + m_generate_create_params.return_value = params + + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(1) + client = MagicMock() + + module.check_mode = False + create_role_error = BotoCoreError(error="failed", operation="Not enough permission to create role") + client.create_role.side_effect = create_role_error + with pytest.raises(SystemExit): + iam_role.create_basic_role(module, client) + module.fail_json_aws.assert_called_once_with(create_role_error, msg="Unable to create role") + m_get_role_with_backoff.assert_not_called() + + +@patch(mod_get_role_with_backoff) +@patch(mod_generate_create_params) +def test_create_basic_role_with_get_role_error(m_generate_create_params, m_get_role_with_backoff): + role_name = "ansible-test-role" + params = { + "RoleName": role_name, + "Tags": { + "Phase": "dev", + "ansible-test": "units", + }, + } + m_generate_create_params.return_value = params + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(1) + client = MagicMock() + + module.check_mode = False + client.create_role.return_value = { + "RoleName": role_name, + } + error = BotoCoreError(error="failed", operation="Unable to get role") + m_get_role_with_backoff.side_effect = error + with pytest.raises(SystemExit): + iam_role.create_basic_role(module, client) + module.fail_json_aws.assert_called_once_with(error, msg="Unable to create role") + client.create_role.assert_called_once_with(aws_retry=True, **params) + + +@patch(mod_get_role_with_backoff) +@patch(mod_generate_create_params) +def test_create_basic_role(m_generate_create_params, m_get_role_with_backoff): + role_name = "ansible-test-role" + params = { + "RoleName": role_name, + "Tags": { + "Phase": "dev", + "ansible-test": "units", + }, + } + m_generate_create_params.return_value = params + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(1) + client = MagicMock() + + module.check_mode = False + client.create_role.return_value = { + "RoleName": role_name, + } + role = { + "RoleName": role_name, + "Description": "Role created for ansible unit testing", + "Tags": { + "Phase": "dev", + "ansible-test": "units", + }, + } + m_get_role_with_backoff.return_value = role + assert iam_role.create_basic_role(module, client) == role + client.create_role.assert_called_once_with(aws_retry=True, **params) + m_get_role_with_backoff.assert_called_once_with(module, client, role_name) + + +@patch(mod_update_managed_policies) +@patch(mod_remove_inline_policies) +@patch(mod_remove_instance_profiles) +@patch(mod_get_role) +def test_destroy_role_unexisting_role( + m_get_role, m_remove_instance_profiles, m_remove_inline_policies, m_update_managed_policies +): + module = MagicMock() + client = MagicMock() + + role_name = "ansible-test-role" + module.params = {"name": role_name} + module.check_mode = False + module.exit_json.side_effect = SystemExit(1) + m_get_role.return_value = None + + with pytest.raises(SystemExit): + iam_role.destroy_role(module, client) + m_get_role.assert_called_once_with(module, client, role_name) + module.exit_json.assert_called_once_with(changed=False) + m_remove_instance_profiles.assert_not_called() + m_remove_inline_policies.assert_not_called() + m_update_managed_policies.assert_not_called() + + +@patch(mod_update_managed_policies) +@patch(mod_remove_inline_policies) +@patch(mod_remove_instance_profiles) +@patch(mod_get_role) +def test_destroy_role_check_mode( + m_get_role, m_remove_instance_profiles, m_remove_inline_policies, m_update_managed_policies +): + module = MagicMock() + client = MagicMock() + + role_name = "ansible-test-role" + module.params = {"name": role_name} + module.check_mode = True + module.exit_json.side_effect = SystemExit(1) + m_get_role.return_value = MagicMock() + + with pytest.raises(SystemExit): + iam_role.destroy_role(module, client) + m_get_role.assert_called_once_with(module, client, role_name) + module.exit_json.assert_called_once_with(changed=True) + m_remove_instance_profiles.assert_not_called() + m_remove_inline_policies.assert_not_called() + m_update_managed_policies.assert_not_called() + + +@patch(mod_update_managed_policies) +@patch(mod_remove_inline_policies) +@patch(mod_remove_instance_profiles) +@patch(mod_get_role) +def test_destroy_role(m_get_role, m_remove_instance_profiles, m_remove_inline_policies, m_update_managed_policies): + module = MagicMock() + client = MagicMock() + + role_name = "ansible-test-role" + module.params = {"name": role_name} + module.check_mode = False + module.exit_json.side_effect = SystemExit(1) + m_get_role.return_value = MagicMock() + + with pytest.raises(SystemExit): + iam_role.destroy_role(module, client) + m_get_role.assert_called_once_with(module, client, role_name) + module.exit_json.assert_called_once_with(changed=True) + m_remove_instance_profiles.assert_called_once_with(module, client, role_name) + m_remove_inline_policies.assert_called_once_with(module, client, role_name) + m_update_managed_policies.assert_called_once_with(module, client, role_name, [], True) + + +@patch(mod_update_managed_policies) +@patch(mod_remove_inline_policies) +@patch(mod_remove_instance_profiles) +@patch(mod_get_role) +def test_destroy_role_with_deletion_error( + m_get_role, m_remove_instance_profiles, m_remove_inline_policies, m_update_managed_policies +): + module = MagicMock() + client = MagicMock() + + role_name = "ansible-test-role" + module.params = {"name": role_name} + module.check_mode = False + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemExit(1) + m_get_role.return_value = MagicMock() + + error = BotoCoreError(error="failed", operation="Unable to get role") + client.delete_role.side_effect = error + + with pytest.raises(SystemExit): + iam_role.destroy_role(module, client) + m_get_role.assert_called_once_with(module, client, role_name) + module.exit_json.assert_not_called() + module.fail_json_aws.assert_called_once_with(error, msg="Unable to delete role") + m_remove_instance_profiles.assert_called_once_with(module, client, role_name) + m_remove_inline_policies.assert_called_once_with(module, client, role_name) + m_update_managed_policies.assert_called_once_with(module, client, role_name, [], True) + + +@patch(mod_list_instance_profiles_for_role) +@patch(mod_remove_role_from_instance_profile) +@patch(mod_delete_instance_profile) +def test_remove_instance_profiles_check_mode( + m_delete_instance_profile, m_remove_role_from_instance_profile, m_list_instance_profiles_for_role +): + module = MagicMock() + client = MagicMock() + + role_name = "ansible-test-role" + module.check_mode = True + iam_role.remove_instance_profiles(module, client, role_name) + for m_func in (m_delete_instance_profile, m_remove_role_from_instance_profile, m_list_instance_profiles_for_role): + m_func.assert_not_called() + + +@pytest.mark.parametrize("delete_profiles", [True, False]) +@patch(mod_list_instance_profiles_for_role) +@patch(mod_remove_role_from_instance_profile) +@patch(mod_delete_instance_profile) +def test_remove_instance_profiles_with_delete_profile( + m_delete_instance_profile, m_remove_role_from_instance_profile, m_list_instance_profiles_for_role, delete_profiles +): + module = MagicMock() + client = MagicMock() + + module.params = {"delete_instance_profile": delete_profiles} + module.check_mode = False + role_name = "ansible-test-role" + instance_profiles = [ + {"InstanceProfileName": "instance_profile_1"}, + {"InstanceProfileName": "instance_profile_2"}, + {"InstanceProfileName": role_name}, + ] + m_list_instance_profiles_for_role.return_value = instance_profiles + iam_role.remove_instance_profiles(module, client, role_name) + m_list_instance_profiles_for_role.assert_called_once_with(module, client, role_name) + m_remove_role_from_instance_profile.assert_has_calls( + [call(module, client, role_name, profile["InstanceProfileName"]) for profile in instance_profiles], + any_order=True, + ) + if delete_profiles: + m_delete_instance_profile.assert_called_once_with(module, client, role_name) + else: + m_delete_instance_profile.assert_not_called() + + +@patch(mod_list_instance_profiles_for_role) +@patch(mod_create_instance_profile) +@patch(mod_add_role_to_instance_profile) +def test_create_instance_profiles_with_existing_profile( + m_add_role_to_instance_profile, m_create_instance_profile, m_list_instance_profiles_for_role +): + module = MagicMock() + client = MagicMock() + path = MagicMock() + + role_name = "ansible-test-role" + m_list_instance_profiles_for_role.return_value = [{"InstanceProfileName": role_name}] + assert not iam_role.create_instance_profiles(module, client, role_name, path) + m_add_role_to_instance_profile.assert_not_called() + m_create_instance_profile.assert_not_called() + + +@patch(mod_list_instance_profiles_for_role) +@patch(mod_create_instance_profile) +@patch(mod_add_role_to_instance_profile) +def test_create_instance_profiles_check_mode( + m_add_role_to_instance_profile, m_create_instance_profile, m_list_instance_profiles_for_role +): + module = MagicMock() + client = MagicMock() + path = MagicMock() + + module.check_mode = True + role_name = "ansible-test-role" + m_list_instance_profiles_for_role.return_value = [{"InstanceProfileName": "instance-profile-1"}] + assert iam_role.create_instance_profiles(module, client, role_name, path) + m_add_role_to_instance_profile.assert_not_called() + m_create_instance_profile.assert_not_called() + + +@patch(mod_list_instance_profiles_for_role) +@patch(mod_create_instance_profile) +@patch(mod_add_role_to_instance_profile) +def test_create_instance_profiles( + m_add_role_to_instance_profile, m_create_instance_profile, m_list_instance_profiles_for_role +): + module = MagicMock() + client = MagicMock() + path = MagicMock() + + module.check_mode = False + role_name = "ansible-test-role" + m_list_instance_profiles_for_role.return_value = [{"InstanceProfileName": "instance-profile-1"}] + assert iam_role.create_instance_profiles(module, client, role_name, path) + m_add_role_to_instance_profile.assert_called_once_with(module, client, role_name) + m_create_instance_profile.assert_called_once_with(module, client, role_name, path) diff --git a/tests/unit/plugins/modules/test_iam_role_info.py b/tests/unit/plugins/modules/test_iam_role_info.py new file mode 100644 index 00000000000..71b23d65a1b --- /dev/null +++ b/tests/unit/plugins/modules/test_iam_role_info.py @@ -0,0 +1,245 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest +from botocore.exceptions import BotoCoreError + +from unittest.mock import MagicMock, call, patch +from ansible_collections.amazon.aws.plugins.modules import iam_role_info + + +mod_list_iam_roles = "ansible_collections.amazon.aws.plugins.modules.iam_role_info.list_iam_roles_with_backoff" +mod_list_iam_role_policies = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role_info.list_iam_role_policies_with_backoff" +) +mod_list_iam_attached_policies = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role_info.list_iam_attached_role_policies_with_backoff" +) +mod_list_iam_instance_profiles_for_role = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role_info.list_iam_instance_profiles_for_role_with_backoff" +) +mod_boto3_tag_list_to_ansible_dict = ( + "ansible_collections.amazon.aws.plugins.modules.iam_role_info.boto3_tag_list_to_ansible_dict" +) +mod_normalize_role = "ansible_collections.amazon.aws.plugins.modules.iam_role_info.normalize_role" +mod_normalize_profile = "ansible_collections.amazon.aws.plugins.modules.iam_role_info.normalize_profile" +mod_camel_dict_to_snake_dict = "ansible_collections.amazon.aws.plugins.modules.iam_role_info.camel_dict_to_snake_dict" +mod_get_role = "ansible_collections.amazon.aws.plugins.modules.iam_role_info.get_role" +mod_describe_iam_role = "ansible_collections.amazon.aws.plugins.modules.iam_role_info.describe_iam_role" + + +def raise_botocore_exception(): + return BotoCoreError(error="failed", operation="Unexpected error while calling botocore api") + + +@pytest.mark.parametrize("list_iam_policies_status", [True, False]) +@pytest.mark.parametrize("list_iam_attached_policies_status", [True, False]) +@pytest.mark.parametrize("list_iam_instance_profiles_for_role_status", [True, False]) +@patch(mod_list_iam_role_policies) +@patch(mod_list_iam_attached_policies) +@patch(mod_list_iam_instance_profiles_for_role) +@patch(mod_boto3_tag_list_to_ansible_dict) +def test_describe_iam_role_with_iam_policies_error( + m_boto3_tag_list_to_ansible_dict, + m_list_iam_instance_profiles_for_role, + m_list_iam_attached_policies, + m_list_iam_role_policies, + list_iam_policies_status, + list_iam_attached_policies_status, + list_iam_instance_profiles_for_role_status, +): + client = MagicMock() + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(1) + + iam_policies = { + "PolicyNames": [ + "policy-1", + ] + } + iam_attached_policies = { + "AttachedPolicies": [ + {"PolicyName": "policy-1", "PolicyArn": "iam:policy:arn:xxx:xxx:xxx"}, + ] + } + iam_instance_profiles = {"InstanceProfiles": ["instance-profile-1"]} + + has_failure = False + if list_iam_policies_status: + m_list_iam_role_policies.return_value = iam_policies + else: + has_failure = True + m_list_iam_role_policies.side_effect = raise_botocore_exception() + + if list_iam_attached_policies_status: + m_list_iam_attached_policies.return_value = iam_attached_policies + else: + has_failure = True + m_list_iam_attached_policies.side_effect = raise_botocore_exception() + module.fail_json_aws.side_effect = SystemExit(1) + + if list_iam_instance_profiles_for_role_status: + m_list_iam_instance_profiles_for_role.return_value = iam_instance_profiles + else: + has_failure = True + m_list_iam_instance_profiles_for_role.side_effect = raise_botocore_exception() + module.fail_json_aws.side_effect = SystemExit(1) + + m_boto3_tag_list_to_ansible_dict.side_effect = lambda x: x + + role_name = "ansible-test-role" + role_tags = { + "Environment": "Dev", + "Phase": "Units", + } + test_role = { + "RoleName": role_name, + "Tags": role_tags, + } + + if has_failure: + with pytest.raises(SystemExit): + iam_role_info.describe_iam_role(module, client, test_role) + module.fail_json_aws.assert_called_once() + # validate that each function has at most 1 call + assert m_list_iam_role_policies.call_count <= 1 + assert m_list_iam_attached_policies.call_count <= 1 + assert m_list_iam_instance_profiles_for_role.call_count <= 1 + # validate function call with expected parameters + if m_list_iam_role_policies.call_count == 1: + m_list_iam_role_policies.assert_called_once_with(client, role_name) + if m_list_iam_attached_policies.call_count == 1: + m_list_iam_attached_policies.assert_called_once_with(client, role_name) + if m_list_iam_instance_profiles_for_role.call_count == 1: + m_list_iam_instance_profiles_for_role.assert_called_once_with(client, role_name) + else: + # Everything went well + expected_role = { + "RoleName": role_name, + "InlinePolicies": iam_policies, + "ManagedPolicies": iam_attached_policies, + "InstanceProfiles": iam_instance_profiles, + "tags": role_tags, + } + assert expected_role == iam_role_info.describe_iam_role(module, client, test_role) + m_list_iam_role_policies.assert_called_once_with(client, role_name) + m_list_iam_attached_policies.assert_called_once_with(client, role_name) + m_list_iam_instance_profiles_for_role.assert_called_once_with(client, role_name) + m_boto3_tag_list_to_ansible_dict.assert_called_once_with(role_tags) + + +@patch(mod_normalize_role) +@patch(mod_camel_dict_to_snake_dict) +def test_normalize_profile(m_camel_dict_to_snake_dict, m_normalize_role): + m_camel_dict_to_snake_dict.side_effect = lambda x: x + m_normalize_role.side_effect = lambda x: {"RoleName": x} + + profile = {"Roles": ["role-1", "role-2"]} + expected = {"roles": [{"RoleName": "role-1"}, {"RoleName": "role-2"}]} + assert expected == iam_role_info.normalize_profile(profile) + m_camel_dict_to_snake_dict.assert_called_once_with(profile) + m_normalize_role.assert_has_calls([call("role-1"), call("role-2")]) + + +@patch(mod_normalize_profile) +@patch(mod_camel_dict_to_snake_dict) +def test_normalize_role(m_camel_dict_to_snake_dict, m_normalize_profile): + m_camel_dict_to_snake_dict.side_effect = lambda x, **kwargs: x + m_normalize_profile.side_effect = lambda x: x + + role_policy_document = { + "Statement": [{"Action": "sts:AssumeRole", "Effect": "Deny", "Principal": {"Service": "ec2.amazonaws.com"}}], + "Version": "2012-10-17", + } + role_tags = { + "Environment": "Dev", + "Phase": "Units", + } + role = { + "AssumeRolePolicyDocument": role_policy_document, + "tags": role_tags, + "InstanceProfiles": [ + "profile-1", + "profile-2", + ], + } + expected = { + "assume_role_policy_document": role_policy_document, + "assume_role_policy_document_raw": role_policy_document, + "tags": role_tags, + "instance_profiles": [ + "profile-1", + "profile-2", + ], + } + + assert expected == iam_role_info.normalize_role(role) + m_camel_dict_to_snake_dict.assert_called_once_with(role, ignore_list=["tags", "AssumeRolePolicyDocument"]) + m_normalize_profile.assert_has_calls([call("profile-1"), call("profile-2")]) + + +@patch(mod_get_role) +@patch(mod_list_iam_roles) +@patch(mod_normalize_role) +@patch(mod_describe_iam_role) +def test_describe_iam_roles_with_name(m_describe_iam_role, m_normalize_role, m_list_iam_roles, m_get_role): + role_name = "ansible-test-role" + + client = MagicMock() + module = MagicMock() + module.params = { + "name": role_name, + "path_prefix": "path prefix", + } + + m_get_role.return_value = [{"RoleName": role_name}] + expected = {"role_name": role_name, "instance_profiles": ["profile-1", "profile-2"]} + m_describe_iam_role.return_value = expected + m_normalize_role.return_value = expected + + assert [expected] == iam_role_info.describe_iam_roles(module, client) + m_get_role.assert_called_once_with(module, client, role_name) + m_list_iam_roles.assert_not_called() + + m_describe_iam_role.assert_called_once_with(module, client, {"RoleName": role_name}) + m_normalize_role.assert_called_once_with(expected) + + +@pytest.mark.parametrize( + "path_prefix", + [ + "ansible-prefix", + "ansible-prefix/", + "/ansible-prefix", + "/ansible-prefix/", + ], +) +@patch(mod_get_role) +@patch(mod_list_iam_roles) +@patch(mod_normalize_role) +@patch(mod_describe_iam_role) +def test_describe_iam_roles_with_path_prefix( + m_describe_iam_role, m_normalize_role, m_list_iam_roles, m_get_role, path_prefix +): + client = MagicMock() + role = MagicMock() + module = MagicMock() + module.params = { + "name": None, + "path_prefix": path_prefix, + } + + m_list_iam_roles.return_value = {"Roles": [role]} + + m_describe_iam_role.side_effect = lambda m, c, r: r + m_normalize_role.side_effect = lambda x: x + + assert [role] == iam_role_info.describe_iam_roles(module, client) + m_get_role.assert_not_called() + m_list_iam_roles.assert_called_once_with(client, PathPrefix="/ansible-prefix/") + + m_describe_iam_role.assert_called_once_with(module, client, role) + m_normalize_role.assert_called_once_with(role) From ac53162605475ae831c4de81b37a1ecf33f46016 Mon Sep 17 00:00:00 2001 From: abikouo Date: Tue, 19 Sep 2023 10:18:06 +0200 Subject: [PATCH 681/683] adding extra note for promotion --- plugins/modules/iam_role.py | 3 ++- plugins/modules/iam_role_info.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/modules/iam_role.py b/plugins/modules/iam_role.py index d8aa8299936..d3ed2095458 100644 --- a/plugins/modules/iam_role.py +++ b/plugins/modules/iam_role.py @@ -7,10 +7,11 @@ DOCUMENTATION = r""" --- module: iam_role -version_added: 1.0.0 +version_added: 7.0.0 short_description: Manage AWS IAM roles description: - Manage AWS IAM roles. + - This module was originally added to C(community.aws) in release 1.0.0. author: - "Rob White (@wimnat)" options: diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index caab88625f1..343b81b8295 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -7,10 +7,11 @@ DOCUMENTATION = r""" --- module: iam_role_info -version_added: 1.0.0 +version_added: 7.0.0 short_description: Gather information on IAM roles description: - Gathers information about IAM roles. + - This module was originally added to C(community.aws) in release 1.0.0. author: - "Will Thames (@willthames)" options: From cbaf4dc7116492f944e51b2a6e1e386629008ec1 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 19 Sep 2023 16:03:54 +0200 Subject: [PATCH 682/683] use version_added_collection --- plugins/modules/iam_role.py | 4 ++-- plugins/modules/iam_role_info.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/modules/iam_role.py b/plugins/modules/iam_role.py index d3ed2095458..011d2a84019 100644 --- a/plugins/modules/iam_role.py +++ b/plugins/modules/iam_role.py @@ -7,11 +7,11 @@ DOCUMENTATION = r""" --- module: iam_role -version_added: 7.0.0 +version_added: 1.0.0 +version_added_collection: community.aws short_description: Manage AWS IAM roles description: - Manage AWS IAM roles. - - This module was originally added to C(community.aws) in release 1.0.0. author: - "Rob White (@wimnat)" options: diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index 343b81b8295..6a1c2f22b25 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -7,11 +7,11 @@ DOCUMENTATION = r""" --- module: iam_role_info -version_added: 7.0.0 +version_added: 1.0.0 +version_added_collection: community.aws short_description: Gather information on IAM roles description: - Gathers information about IAM roles. - - This module was originally added to C(community.aws) in release 1.0.0. author: - "Will Thames (@willthames)" options: From f39f2ba78039590defae8bc169131483fe03c7e2 Mon Sep 17 00:00:00 2001 From: abikouo Date: Tue, 19 Sep 2023 18:40:44 +0200 Subject: [PATCH 683/683] update unit tests, some updates after code review --- .../20230915_migrate_iam_role_and_iam_role_info.yml | 2 +- plugins/modules/iam_role_info.py | 4 +--- tests/integration/targets/iam_role/tasks/main.yml | 9 +++++---- tests/unit/plugins/modules/test_iam_role_info.py | 9 +++++++-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml b/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml index b297ffcbfb0..f1c33d0d20b 100644 --- a/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml +++ b/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml @@ -1,4 +1,4 @@ -breaking_changes: +major_changes: - iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/amazon.aws/pull/1757). diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index 6a1c2f22b25..eb60b3be5e5 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -246,17 +246,15 @@ def normalize_profile(profile): new_profile = camel_dict_to_snake_dict(profile) if profile.get("Roles"): new_profile["roles"] = [normalize_role(role) for role in profile.get("Roles")] - del new_profile["Roles"] return new_profile def normalize_role(role): new_role = camel_dict_to_snake_dict(role, ignore_list=["tags", "AssumeRolePolicyDocument"]) - new_role["assume_role_policy_document"] = role.pop("AssumeRolePolicyDocument", {}) + new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument", {}) new_role["assume_role_policy_document_raw"] = new_role["assume_role_policy_document"] if role.get("InstanceProfiles"): new_role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")] - del new_role["InstanceProfiles"] return new_role diff --git a/tests/integration/targets/iam_role/tasks/main.yml b/tests/integration/targets/iam_role/tasks/main.yml index 821a683eb53..f79da0d975e 100644 --- a/tests/integration/targets/iam_role/tasks/main.yml +++ b/tests/integration/targets/iam_role/tasks/main.yml @@ -23,10 +23,11 @@ - name: "Setup AWS connection info" module_defaults: group/aws: - access_key: "{{ aws_access_key }}" - secret_key: "{{ aws_secret_key }}" - session_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" + # access_key: "{{ aws_access_key }}" + # secret_key: "{{ aws_secret_key }}" + # session_token: "{{ security_token | default(omit) }}" + # region: "{{ aws_region }}" + aws_profile: ansible iam_role: assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' collections: diff --git a/tests/unit/plugins/modules/test_iam_role_info.py b/tests/unit/plugins/modules/test_iam_role_info.py index 71b23d65a1b..c661bd1ed46 100644 --- a/tests/unit/plugins/modules/test_iam_role_info.py +++ b/tests/unit/plugins/modules/test_iam_role_info.py @@ -134,7 +134,7 @@ def test_describe_iam_role_with_iam_policies_error( @patch(mod_normalize_role) @patch(mod_camel_dict_to_snake_dict) def test_normalize_profile(m_camel_dict_to_snake_dict, m_normalize_role): - m_camel_dict_to_snake_dict.side_effect = lambda x: x + m_camel_dict_to_snake_dict.side_effect = lambda x: {k.lower(): d for k,d in x.items()} m_normalize_role.side_effect = lambda x: {"RoleName": x} profile = {"Roles": ["role-1", "role-2"]} @@ -147,7 +147,7 @@ def test_normalize_profile(m_camel_dict_to_snake_dict, m_normalize_role): @patch(mod_normalize_profile) @patch(mod_camel_dict_to_snake_dict) def test_normalize_role(m_camel_dict_to_snake_dict, m_normalize_profile): - m_camel_dict_to_snake_dict.side_effect = lambda x, **kwargs: x + m_camel_dict_to_snake_dict.side_effect = lambda x, **kwargs: {k.lower() if k not in kwargs.get("ignore_list") else k: d for k,d in x.items()} m_normalize_profile.side_effect = lambda x: x role_policy_document = { @@ -167,9 +167,14 @@ def test_normalize_role(m_camel_dict_to_snake_dict, m_normalize_profile): ], } expected = { + "AssumeRolePolicyDocument": role_policy_document, "assume_role_policy_document": role_policy_document, "assume_role_policy_document_raw": role_policy_document, "tags": role_tags, + "instanceprofiles": [ + "profile-1", + "profile-2", + ], "instance_profiles": [ "profile-1", "profile-2",